unsigned int *needs_clflush);
 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
                                     unsigned int *needs_clflush);
-#define CLFLUSH_BEFORE 0x1
-#define CLFLUSH_AFTER 0x2
-#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
+#define CLFLUSH_BEFORE BIT(0)
+#define CLFLUSH_AFTER  BIT(1)
+#define CLFLUSH_FLAGS  (CLFLUSH_BEFORE | CLFLUSH_AFTER)
 
 static inline void
 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
 
        if (ret)
                return ret;
 
+       if (i915_gem_object_is_coherent(obj) ||
+           !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+               ret = i915_gem_object_set_to_cpu_domain(obj, false);
+               if (ret)
+                       goto err_unpin;
+               else
+                       goto out;
+       }
+
        i915_gem_object_flush_gtt_write_domain(obj);
 
        /* If we're not in the cpu read domain, set ourself into the gtt
         * anyway again before the next pread happens.
         */
        if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
-               *needs_clflush = !i915_gem_object_is_coherent(obj);
-
-       if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
-               ret = i915_gem_object_set_to_cpu_domain(obj, false);
-               if (ret)
-                       goto err_unpin;
-
-               *needs_clflush = 0;
-       }
+               *needs_clflush = CLFLUSH_BEFORE;
 
+out:
        /* return with the pages pinned */
        return 0;
 
        if (ret)
                return ret;
 
+       if (i915_gem_object_is_coherent(obj) ||
+           !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+               ret = i915_gem_object_set_to_cpu_domain(obj, true);
+               if (ret)
+                       goto err_unpin;
+               else
+                       goto out;
+       }
+
        i915_gem_object_flush_gtt_write_domain(obj);
 
        /* If we're not in the cpu write domain, set ourself into the
         * right away and we therefore have to clflush anyway.
         */
        if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
-               *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
+               *needs_clflush |= CLFLUSH_AFTER;
 
        /* Same trick applies to invalidate partially written cachelines read
         * before writing.
         */
        if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
-               *needs_clflush |= !i915_gem_object_is_coherent(obj);
-
-       if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
-               ret = i915_gem_object_set_to_cpu_domain(obj, true);
-               if (ret)
-                       goto err_unpin;
-
-               *needs_clflush = 0;
-       }
-
-       if ((*needs_clflush & CLFLUSH_AFTER) == 0)
-               obj->cache_dirty = true;
+               *needs_clflush |= CLFLUSH_BEFORE;
 
+out:
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
        obj->mm.dirty = true;
        /* return with the pages pinned */