*/
 
        gte = (gen8_pte_t __iomem *)ggtt->gsm;
-       gte += vma_res->start / I915_GTT_PAGE_SIZE;
-       end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+       gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
+       end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
+       while (gte < end)
+               gen8_set_pte(gte++, vm->scratch[0]->encode);
+       end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
 
        for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
                gen8_set_pte(gte++, pte_encode | addr);
        dma_addr_t addr;
 
        gte = (gen6_pte_t __iomem *)ggtt->gsm;
-       gte += vma_res->start / I915_GTT_PAGE_SIZE;
-       end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+       gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
 
+       end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
+       while (gte < end)
+               iowrite32(vm->scratch[0]->encode, gte++);
+       end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
        for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
                iowrite32(vm->pte_encode(addr, level, flags), gte++);
        GEM_BUG_ON(gte > end);
 
                               obj->mm.rsgt, i915_gem_object_is_readonly(obj),
                               i915_gem_object_is_lmem(obj), obj->mm.region,
                               vma->ops, vma->private, __i915_vma_offset(vma),
-                              __i915_vma_size(vma), vma->size);
+                              __i915_vma_size(vma), vma->size, vma->guard);
 }
 
 /**
            i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
                return true;
 
+       if (flags & PIN_OFFSET_GUARD &&
+           vma->guard < (flags & PIN_OFFSET_MASK))
+               return true;
+
        return false;
 }
 
 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
                u64 size, u64 alignment, u64 flags)
 {
-       unsigned long color;
+       unsigned long color, guard;
        u64 start, end;
        int ret;
 
        GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
        GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+       GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1);
 
        size = max(size, vma->size);
-       alignment = max(alignment, vma->display_alignment);
+       alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
        if (flags & PIN_MAPPABLE) {
                size = max_t(typeof(size), size, vma->fence_size);
                alignment = max_t(typeof(alignment),
        GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
        GEM_BUG_ON(!is_power_of_2(alignment));
 
+       guard = vma->guard; /* retain guard across rebinds */
+       if (flags & PIN_OFFSET_GUARD) {
+               GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32));
+               guard = max_t(u32, guard, flags & PIN_OFFSET_MASK);
+       }
+       /*
+        * As we align the node upon insertion, but the hardware gets
+        * node.start + guard, the easiest way to make that work is
+        * to make the guard a multiple of the alignment size.
+        */
+       guard = ALIGN(guard, alignment);
+
        start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
        GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 
 
        alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
 
-       /* If binding the object/GGTT view requires more space than the entire
+       /*
+        * If binding the object/GGTT view requires more space than the entire
         * aperture has, reject it early before evicting everything in a vain
         * attempt to find space.
         */
-       if (size > end) {
+       if (size > end - 2 * guard) {
                drm_dbg(&to_i915(vma->obj->base.dev)->drm,
                        "Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
                        size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
                if (!IS_ALIGNED(offset, alignment) ||
                    range_overflows(offset, size, end))
                        return -EINVAL;
+               /*
+                * The caller knows not of the guard added by others and
+                * requests for the offset of the start of its buffer
+                * to be fixed, which may not be the same as the position
+                * of the vma->node due to the guard pages.
+                */
+               if (offset < guard || offset + size > end - guard)
+                       return -ENOSPC;
 
                ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
-                                          size, offset, color,
-                                          flags);
+                                          size + 2 * guard,
+                                          offset - guard,
+                                          color, flags);
                if (ret)
                        return ret;
        } else {
+               size += 2 * guard;
                /*
                 * We only support huge gtt pages through the 48b PPGTT,
                 * however we also don't want to force any alignment for
        GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
 
        list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+       vma->guard = guard;
 
        return 0;
 }
 
  * @node_size: Size of the allocated range manager node with padding
  * subtracted.
  * @vma_size: Bind size.
+ * @guard: The size of guard area preceding and trailing the bind.
  * @page_sizes_gtt: Resulting page sizes from the bind operation.
  * @bound_flags: Flags indicating binding status.
  * @allocated: Backend private data. TODO: Should move into @private.
        u64 start;
        u64 node_size;
        u64 vma_size;
+       u32 guard;
        u32 page_sizes_gtt;
 
        u32 bound_flags;
  * @start: Offset into the address space of bind range start after padding.
  * @node_size: Size of the allocated range manager node minus padding.
  * @size: Bind size.
+ * @guard: The size of the guard area preceding and trailing the bind.
  *
  * Initializes a vma resource allocated using i915_vma_resource_alloc().
  * The reason for having separate allocate and initialize function is that
                                          void *private,
                                          u64 start,
                                          u64 node_size,
-                                         u64 size)
+                                         u64 size,
+                                         u32 guard)
 {
        __i915_vma_resource_init(vma_res);
        vma_res->vm = vm;
        vma_res->start = start;
        vma_res->node_size = node_size;
        vma_res->vma_size = size;
+       vma_res->guard = guard;
 }
 
 static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)