struct i915_address_space base;
        struct io_mapping mappable;     /* Mapping to our CPU mappable region */
 
+       /* Stolen memory is segmented in hardware with different portions
+        * offlimits to certain functions.
+        *
+        * The drm_mm is initialised to the total accessible range, as found
+        * from the PCI config. On Broadwell+, this is further restricted to
+        * avoid the first page! The upper end of stolen memory is reserved for
+        * hardware functions and similarly removed from the accessible range.
+        */
        size_t stolen_size;             /* Total size of stolen memory */
-       size_t stolen_usable_size;      /* Total size minus BIOS reserved */
+       size_t stolen_usable_size;      /* Total size minus reserved ranges */
        size_t stolen_reserved_base;
        size_t stolen_reserved_size;
        u64 mappable_end;               /* End offset that we can CPU map */
 
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return -ENODEV;
 
-       /* See the comment at the drm_mm_init() call for more about this check.
-        * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
-        */
-       if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
-               start = 4096;
-
        mutex_lock(&dev_priv->mm.stolen_lock);
        ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
                                          alignment, start, end,
                                struct drm_mm_node *node, u64 size,
                                unsigned alignment)
 {
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
        return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
-                                                   alignment, 0,
-                                                   ggtt->stolen_usable_size);
+                                                   alignment, 0, U64_MAX);
 }
 
 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        unsigned long reserved_total, reserved_base = 0, reserved_size;
-       unsigned long stolen_top;
+       unsigned long stolen_usable_start, stolen_top;
 
        mutex_init(&dev_priv->mm.stolen_lock);
 
                      ggtt->stolen_size >> 10,
                      (ggtt->stolen_size - reserved_total) >> 10);
 
-       ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
+       stolen_usable_start = 0;
+       /* WaSkipStolenMemoryFirstPage:bdw+ */
+       if (INTEL_GEN(dev_priv) >= 8)
+               stolen_usable_start = 4096;
 
-       /*
-        * Basic memrange allocator for stolen space.
-        *
-        * TODO: Notice that some platforms require us to not use the first page
-        * of the stolen memory but their BIOSes may still put the framebuffer
-        * on the first page. So we don't reserve this page for now because of
-        * that. Our current solution is to just prevent new nodes from being
-        * inserted on the first page - see the check we have at
-        * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
-        * problem later.
-        */
-       drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
+       ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total -
+                                  stolen_usable_start;
+
+       /* Basic memrange allocator for stolen space. */
+       drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
+                   ggtt->stolen_usable_size);
 
        return 0;
 }