struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *for_crtc = cstate->base.crtc;
-       u16 pipe_size, ddb_size;
-       int nth_active_pipe;
+       const struct drm_crtc_state *crtc_state;
+       const struct drm_crtc *crtc;
+       u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
+       enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
+       u16 ddb_size;
+       u32 i;
 
        if (WARN_ON(!state) || !cstate->base.active) {
                alloc->start = 0;
                                      *num_active, ddb);
 
        /*
-        * If the state doesn't change the active CRTC's, then there's
-        * no need to recalculate; the existing pipe allocation limits
-        * should remain unchanged.  Note that we're safe from racing
-        * commits since any racing commit that changes the active CRTC
-        * list would need to grab _all_ crtc locks, including the one
-        * we currently hold.
+        * If the state doesn't change the active CRTC's or there is no
+        * modeset request, then there's no need to recalculate;
+        * the existing pipe allocation limits should remain unchanged.
+        * Note that we're safe from racing commits since any racing commit
+        * that changes the active CRTC list or do modeset would need to
+        * grab _all_ crtc locks, including the one we currently hold.
         */
-       if (!intel_state->active_pipe_changes) {
+       if (!intel_state->active_pipe_changes && !intel_state->modeset) {
                /*
                 * alloc may be cleared by clear_intel_crtc_state,
                 * copy from old state to be sure
                return;
        }
 
-       nth_active_pipe = hweight32(intel_state->active_crtcs &
-                                   (drm_crtc_mask(for_crtc) - 1));
-       pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
-       alloc->start = nth_active_pipe * ddb_size / *num_active;
-       alloc->end = alloc->start + pipe_size;
+       /*
+        * Watermark/ddb requirement highly depends upon width of the
+        * framebuffer, So instead of allocating DDB equally among pipes
+        * distribute DDB based on resolution/width of the display.
+        */
+       for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+               const struct drm_display_mode *adjusted_mode;
+               int hdisplay, vdisplay;
+               enum pipe pipe;
+
+               if (!crtc_state->enable)
+                       continue;
+
+               pipe = to_intel_crtc(crtc)->pipe;
+               adjusted_mode = &crtc_state->adjusted_mode;
+               drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
+               total_width += hdisplay;
+
+               if (pipe < for_pipe)
+                       width_before_pipe += hdisplay;
+               else if (pipe == for_pipe)
+                       pipe_width = hdisplay;
+       }
+
+       alloc->start = ddb_size * width_before_pipe / total_width;
+       alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
 }
 
 static unsigned int skl_cursor_allocation(int num_active)
         * any other display updates race with this transaction, so we need
         * to grab the lock on *all* CRTC's.
         */
-       if (intel_state->active_pipe_changes) {
+       if (intel_state->active_pipe_changes || intel_state->modeset) {
                realloc_pipes = ~0;
                intel_state->wm_results.dirty_pipes = ~0;
        }