#include "intel_frontbuffer.h"
 #include "i915_drv.h"
 
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @origin: which operation caused the invalidation
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                            enum fb_op_origin origin)
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                              enum fb_op_origin origin)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       if (!obj->frontbuffer_bits)
-               return;
-
        if (origin == ORIGIN_CS) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
-               dev_priv->fb_tracking.busy_bits
-                       |= obj->frontbuffer_bits;
-               dev_priv->fb_tracking.flip_bits
-                       &= ~obj->frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
+               spin_lock(&dev_priv->fb_tracking.lock);
+               dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
+               dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
+               spin_unlock(&dev_priv->fb_tracking.lock);
        }
 
        intel_psr_invalidate(dev, obj->frontbuffer_bits);
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Delay flushing when rings are still busy.*/
-       mutex_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&dev_priv->fb_tracking.lock);
        frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
+       spin_unlock(&dev_priv->fb_tracking.lock);
 
        if (!frontbuffer_bits)
                return;
        intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- * @origin: which operation caused the flush
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-                       bool retire, enum fb_op_origin origin)
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                         bool retire,
+                         enum fb_op_origin origin)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       if (!obj->frontbuffer_bits)
-               return;
-
        frontbuffer_bits = obj->frontbuffer_bits;
 
        if (retire) {
-               mutex_lock(&dev_priv->fb_tracking.lock);
+               spin_lock(&dev_priv->fb_tracking.lock);
                /* Filter out new bits since rendering started. */
                frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
                dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-               mutex_unlock(&dev_priv->fb_tracking.lock);
+               spin_unlock(&dev_priv->fb_tracking.lock);
        }
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+       if (frontbuffer_bits)
+               intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
 }
 
 /**
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
-       mutex_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&dev_priv->fb_tracking.lock);
        dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
        /* Remove stale busy bits due to the old buffer. */
        dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
+       spin_unlock(&dev_priv->fb_tracking.lock);
 
        intel_psr_single_frame_update(dev, frontbuffer_bits);
 }
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
-       mutex_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&dev_priv->fb_tracking.lock);
        /* Mask any cancelled flips. */
        frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
        dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
+       spin_unlock(&dev_priv->fb_tracking.lock);
 
-       intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+       if (frontbuffer_bits)
+               intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
-       mutex_lock(&dev_priv->fb_tracking.lock);
+       spin_lock(&dev_priv->fb_tracking.lock);
        /* Remove stale busy bits due to the old buffer. */
        dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-       mutex_unlock(&dev_priv->fb_tracking.lock);
+       spin_unlock(&dev_priv->fb_tracking.lock);
 
        intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
 
 struct drm_i915_private;
 struct drm_i915_gem_object;
 
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-                            enum fb_op_origin origin);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
                                    unsigned frontbuffer_bits);
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
                                     unsigned frontbuffer_bits);
 void intel_frontbuffer_flip(struct drm_device *dev,
                            unsigned frontbuffer_bits);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
-                       enum fb_op_origin origin);
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                              enum fb_op_origin origin);
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                         bool retire,
+                         enum fb_op_origin origin);
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @origin: which operation caused the invalidation
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+                                          enum fb_op_origin origin)
+{
+       if (!obj->frontbuffer_bits)
+               return;
+
+       __intel_fb_obj_invalidate(obj, origin);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+                                     bool retire,
+                                     enum fb_op_origin origin)
+{
+       if (!obj->frontbuffer_bits)
+               return;
+
+       __intel_fb_obj_flush(obj, retire, origin);
+}
 
 #endif /* __INTEL_FRONTBUFFER_H__ */