PIPE_CONF_CHECK_X(lane_lat_optim_mask);
 
        if (HAS_DOUBLE_BUFFERED_M_N(dev_priv)) {
-               PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+               if (!fastset || !pipe_config->seamless_m_n)
+                       PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
        } else {
                PIPE_CONF_CHECK_M_N(dp_m_n);
                PIPE_CONF_CHECK_M_N(dp_m2_n2);
        if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
                PIPE_CONF_CHECK_I(pipe_bpp);
 
-       PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
-       PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
+       if (!fastset || !pipe_config->seamless_m_n) {
+               PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_clock);
+               PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_clock);
+       }
        PIPE_CONF_CHECK_I(port_clock);
 
        PIPE_CONF_CHECK_I(min_voltage_level);
        if (DISPLAY_VER(dev_priv) >= 9 ||
            IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
                hsw_set_linetime_wm(new_crtc_state);
+
+       if (new_crtc_state->seamless_m_n)
+               intel_cpu_transcoder_set_m1_n1(crtc, new_crtc_state->cpu_transcoder,
+                                              &new_crtc_state->dp_m_n);
 }
 
 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
 
        }
 }
 
+static bool has_seamless_m_n(struct intel_connector *connector)
+{
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+       /*
+        * Seamless M/N reprogramming only implemented
+        * for BDW+ double buffered M/N registers so far.
+        */
+       return HAS_DOUBLE_BUFFERED_M_N(i915) &&
+               intel_panel_drrs_type(connector) == DRRS_TYPE_SEAMLESS;
+}
+
+static int intel_dp_mode_clock(const struct intel_crtc_state *crtc_state,
+                              const struct drm_connector_state *conn_state)
+{
+       struct intel_connector *connector = to_intel_connector(conn_state->connector);
+       const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+       /* FIXME a bit of a mess wrt clock vs. crtc_clock */
+       if (has_seamless_m_n(connector))
+               return intel_panel_highest_mode(connector, adjusted_mode)->clock;
+       else
+               return adjusted_mode->crtc_clock;
+}
+
 /* Optimize link config in order: max bpp, min clock, min lanes */
 static int
 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
                                  struct intel_crtc_state *pipe_config,
+                                 const struct drm_connector_state *conn_state,
                                  const struct link_config_limits *limits)
 {
-       struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
-       int bpp, i, lane_count;
+       int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
        int mode_rate, link_rate, link_avail;
 
        for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
                int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
 
-               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
-                                                  output_bpp);
+               mode_rate = intel_dp_link_required(clock, output_bpp);
 
                for (i = 0; i < intel_dp->num_common_rates; i++) {
                        link_rate = intel_dp_common_rate(intel_dp, i);
         * Optimize for slow and wide for everything, because there are some
         * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
         */
-       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+       ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, conn_state, &limits);
 
        if (ret || joiner_needs_dsc || intel_dp->force_dsc_en) {
                drm_dbg_kms(&i915->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
                intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
        int pixel_clock;
 
+       if (has_seamless_m_n(connector))
+               pipe_config->seamless_m_n = true;
+
        if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
                if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder))
                        intel_zero_m_n(&pipe_config->dp_m2_n2);