intel_init_display_hooks(dev_priv);
        intel_init_clock_gating_hooks(dev_priv);
        intel_init_audio_hooks(dev_priv);
+       i915_gem_load_init(dev);
 
        intel_runtime_pm_get(dev_priv);
 
 
        intel_opregion_setup(dev);
 
-       i915_gem_load_init(dev);
+       i915_gem_load_init_fences(dev_priv);
+
        i915_gem_shrinker_init(dev_priv);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
        if (INTEL_INFO(dev)->num_pipes) {
                ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
                if (ret)
-                       goto out_gem_unload;
+                       goto out_cleanup_shrinker;
        }
 
        ret = i915_load_modeset_init(dev);
 out_power_well:
        intel_power_domains_fini(dev_priv);
        drm_vblank_cleanup(dev);
-out_gem_unload:
+out_cleanup_shrinker:
        i915_gem_shrinker_cleanup(dev_priv);
 
        if (dev->pdev->msi_enabled)
        i915_mmio_cleanup(dev);
 put_bridge:
        pci_dev_put(dev_priv->bridge_dev);
-       i915_gem_load_cleanup(dev);
 out_runtime_pm_put:
        intel_runtime_pm_put(dev_priv);
+       i915_gem_load_cleanup(dev);
        i915_workqueues_cleanup(dev_priv);
 out_free_priv:
        kfree(dev_priv);
        intel_uncore_fini(dev);
        i915_mmio_cleanup(dev);
 
-       i915_gem_load_cleanup(dev);
        pci_dev_put(dev_priv->bridge_dev);
+       i915_gem_load_cleanup(dev);
        i915_workqueues_cleanup(dev_priv);
        kfree(dev_priv);
 
 
                        struct drm_file *file_priv);
 void i915_gem_load_init(struct drm_device *dev);
 void i915_gem_load_cleanup(struct drm_device *dev);
+void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
        INIT_LIST_HEAD(&engine->request_list);
 }
 
+void
+i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+
+       if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
+           !IS_CHERRYVIEW(dev_priv))
+               dev_priv->num_fence_regs = 32;
+       else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
+                IS_I945GM(dev_priv) || IS_G33(dev_priv))
+               dev_priv->num_fence_regs = 16;
+       else
+               dev_priv->num_fence_regs = 8;
+
+       if (intel_vgpu_active(dev))
+               dev_priv->num_fence_regs =
+                               I915_READ(vgtif_reg(avail_rs.fence_num));
+
+       /* Initialize fence registers to zero */
+       i915_gem_restore_fences(dev);
+
+       i915_gem_detect_bit_6_swizzle(dev);
+}
+
 void
 i915_gem_load_init(struct drm_device *dev)
 {
 
        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-       if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
-               dev_priv->num_fence_regs = 32;
-       else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-               dev_priv->num_fence_regs = 16;
-       else
-               dev_priv->num_fence_regs = 8;
-
-       if (intel_vgpu_active(dev))
-               dev_priv->num_fence_regs =
-                               I915_READ(vgtif_reg(avail_rs.fence_num));
-
        /*
         * Set initial sequence number for requests.
         * Using this number allows the wraparound to happen early,
        dev_priv->next_seqno = ((u32)~0 - 0x1100);
        dev_priv->last_seqno = ((u32)~0 - 0x1101);
 
-       /* Initialize fence registers to zero */
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-       i915_gem_restore_fences(dev);
 
-       i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
 
        dev_priv->mm.interruptible = true;