intel_dp->psr.dc3co_exit_delay = val;
        intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
        intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+       intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
        intel_dp->psr.req_psr2_sdp_prior_scanline =
                crtc_state->req_psr2_sdp_prior_scanline;
 
               PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
 }
 
+static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
+{
+       return IS_ALDERLAKE_P(dev_priv) ?
+              ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
+              PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
+}
+
 static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
                lockdep_assert_held(&intel_dp->psr.lock);
+               if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
+                       return;
                break;
        }
 
        mutex_unlock(&intel_dp->psr.lock);
 }
 
+static void _psr_invalidate_handle(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       if (intel_dp->psr.psr2_sel_fetch_enabled) {
+               u32 val;
+
+               if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
+                       return;
+
+               val = man_trk_ctl_enable_bit_get(dev_priv) |
+                     man_trk_ctl_partial_frame_bit_get(dev_priv) |
+                     man_trk_ctl_continuos_full_frame(dev_priv);
+               intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), val);
+               intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+               intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
+       } else {
+               intel_psr_exit(intel_dp);
+       }
+}
+
 /**
  * intel_psr_invalidate - Invalidade PSR
  * @dev_priv: i915 device
                intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
 
                if (pipe_frontbuffer_bits)
-                       intel_psr_exit(intel_dp);
+                       _psr_invalidate_handle(intel_dp);
 
                mutex_unlock(&intel_dp->psr.lock);
        }
                         intel_dp->psr.dc3co_exit_delay);
 }
 
+static void _psr_flush_handle(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+       if (intel_dp->psr.psr2_sel_fetch_enabled) {
+               if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
+                       /* can we turn CFF off? */
+                       if (intel_dp->psr.busy_frontbuffer_bits == 0) {
+                               u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
+                                         man_trk_ctl_partial_frame_bit_get(dev_priv) |
+                                         man_trk_ctl_single_full_frame_bit_get(dev_priv);
+
+                               /*
+                                * turn continuous full frame off and do a single
+                                * full frame
+                                */
+                               intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+                                              val);
+                               intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+                               intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
+                       }
+               } else {
+                       /*
+                        * continuous full frame is disabled, only a single full
+                        * frame is required
+                        */
+                       psr_force_hw_tracking_exit(intel_dp);
+               }
+       } else {
+               psr_force_hw_tracking_exit(intel_dp);
+
+               if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
+                       schedule_work(&intel_dp->psr.work);
+       }
+}
+
 /**
  * intel_psr_flush - Flush PSR
  * @dev_priv: i915 device
                 * we have to ensure that the PSR is not activated until
                 * intel_psr_resume() is called.
                 */
-               if (intel_dp->psr.paused) {
-                       mutex_unlock(&intel_dp->psr.lock);
-                       continue;
-               }
+               if (intel_dp->psr.paused)
+                       goto unlock;
 
                if (origin == ORIGIN_FLIP ||
                    (origin == ORIGIN_CURSOR_UPDATE &&
                     !intel_dp->psr.psr2_sel_fetch_enabled)) {
                        tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
-                       mutex_unlock(&intel_dp->psr.lock);
-                       continue;
+                       goto unlock;
                }
 
-               /* By definition flush = invalidate + flush */
-               if (pipe_frontbuffer_bits)
-                       psr_force_hw_tracking_exit(intel_dp);
+               if (pipe_frontbuffer_bits == 0)
+                       goto unlock;
 
-               if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
-                       schedule_work(&intel_dp->psr.work);
+               /* By definition flush = invalidate + flush */
+               _psr_flush_handle(intel_dp);
+unlock:
                mutex_unlock(&intel_dp->psr.lock);
        }
 }