If the user has created a read-only object, they should not be allowed
to circumvent the write protection by using a GGTT mmapping. Deny it.
Also most machines do not support read-only GGTT PTEs, so again we have
to reject attempted writes. Fortunately, this is known a priori, so we
can at least reject in the call to create the mmap (with a sanity check
in the fault handler).
v2: Check the vma->vm_flags during mmap() to allow readonly access.
v3: Remove VM_MAYWRITE to curtail mprotect()
Testcase: igt/gem_userptr_blits/readonly_mmap*
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: David Herrmann <dh.herrmann@gmail.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> #v1
Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180712185315.3288-4-chris@chris-wilson.co.uk
                return -EACCES;
        }
 
+       if (node->readonly) {
+               if (vma->vm_flags & VM_WRITE) {
+                       drm_gem_object_put_unlocked(obj);
+                       return -EINVAL;
+               }
+
+               vma->vm_flags &= ~VM_MAYWRITE;
+       }
+
        ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
                               vma);
 
 
        pgoff_t page_offset;
        int ret;
 
+       /* Sanity check that we allow writing into this object */
+       if (i915_gem_object_is_readonly(obj) && write)
+               return VM_FAULT_SIGBUS;
+
        /* We don't use vmf->pgoff since that has the fake offset */
        page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
 
 
 
        /* Applicable to VLV, and gen8+ */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
 
        vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
        const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
        dma_addr_t addr;
 
-       /* The GTT does not support read-only mappings */
-       GEM_BUG_ON(flags & PTE_READ_ONLY);
+       /*
+        * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+        * not to allow the user to override access to a read only page.
+        */
 
        gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
        gtt_entries += vma->node.start >> PAGE_SHIFT;
 
        /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
        pte_flags = 0;
-       if (obj->gt_ro)
+       if (i915_gem_object_is_readonly(obj))
                pte_flags |= PTE_READ_ONLY;
 
        intel_runtime_pm_get(i915);
 
        /* Currently applicable only to VLV */
        pte_flags = 0;
-       if (vma->obj->gt_ro)
+       if (i915_gem_object_is_readonly(vma->obj))
                pte_flags |= PTE_READ_ONLY;
 
        if (flags & I915_VMA_LOCAL_BIND) {
 
         * Is the object to be mapped as read-only to the GPU
         * Only honoured if hardware has relevant pte bit
         */
-       unsigned long gt_ro:1;
        unsigned int cache_level:3;
        unsigned int cache_coherent:2;
 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
        reservation_object_unlock(obj->resv);
 }
 
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+       obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+       return obj->base.vma_node.readonly;
+}
+
 static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 {
 
         * if supported by the platform's GGTT.
         */
        if (vm->has_read_only)
-               obj->gt_ro = 1;
+               i915_gem_object_set_readonly(obj);
 
        vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma))
 
                                        goto out_unlock;
                                }
 
-                               obj->gt_ro = prandom_u32_state(&prng);
+                               if (prandom_u32_state(&prng) & 1)
+                                       i915_gem_object_set_readonly(obj);
                        }
 
                        intel_runtime_pm_get(i915);
                unsigned int num_writes;
 
                num_writes = rem;
-               if (obj->gt_ro)
+               if (i915_gem_object_is_readonly(obj))
                        num_writes = 0;
 
                err = cpu_check(obj, num_writes);
 
        rwlock_t vm_lock;
        struct drm_mm_node vm_node;
        struct rb_root vm_files;
+       bool readonly:1;
 };
 
 struct drm_vma_offset_manager {