return ret;
        }
 
+       if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
+           intel_can_enable_sagv(dev_priv, old_bw_state)) {
+               ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+               if (ret)
+                       return ret;
+       } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+               ret = intel_atomic_lock_global_state(&new_bw_state->base);
+               if (ret)
+                       return ret;
+       }
+
        for_each_new_intel_crtc_in_state(state, crtc,
                                         new_crtc_state, i) {
                struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
                        intel_can_enable_sagv(dev_priv, new_bw_state);
        }
 
-       if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
-           intel_can_enable_sagv(dev_priv, old_bw_state)) {
-               ret = intel_atomic_serialize_global_state(&new_bw_state->base);
-               if (ret)
-                       return ret;
-       } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
-               ret = intel_atomic_lock_global_state(&new_bw_state->base);
-               if (ret)
-                       return ret;
-       }
-
        return 0;
 }