#include "i915_gem_object.h"
 #include "i915_vma.h"
 #include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
 
 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
 {
 
                           struct drm_file *file);
 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
-int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
-                           struct drm_file *file);
+int i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file);
 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                         struct drm_file *file);
 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 
  */
 
 #include <linux/mman.h>
+#include <linux/pfn_t.h>
 #include <linux/sizes.h>
 
 #include "gt/intel_gt.h"
 #include "i915_gem_gtt.h"
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
+#include "i915_gem_mman.h"
 #include "i915_trace.h"
 #include "i915_vma.h"
 
  * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
  *     pagefault; swapin remains transparent.
  *
+ * 4 - Support multiple fault handlers per object depending on object's
+ *     backing storage (a.k.a. MMAP_OFFSET).
+ *
  * Restrictions:
  *
  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
  */
 int i915_gem_mmap_gtt_version(void)
 {
-       return 3;
+       return 4;
 }
 
 static inline struct i915_ggtt_view
        return view;
 }
 
-/**
- * i915_gem_fault - fault a page into the GTT
- * @vmf: fault info
- *
- * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
- * from userspace.  The fault handler takes care of binding the object to
- * the GTT (if needed), allocating and programming a fence register (again,
- * only if needed based on whether the old reg is still valid or the object
- * is tiled) and inserting a new PTE into the faulting process.
- *
- * Note that the faulting process may involve evicting existing objects
- * from the GTT and/or fence registers to make room.  So performance may
- * suffer if the GTT working set is large or there are few fence registers
- * left.
- *
- * The current feature set supported by i915_gem_fault() and thus GTT mmaps
- * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
- */
-vm_fault_t i915_gem_fault(struct vm_fault *vmf)
+static vm_fault_t i915_error_to_vmf_fault(int err)
+{
+       switch (err) {
+       default:
+               WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
+               /* fallthrough */
+       case -EIO: /* shmemfs failure from swap device */
+       case -EFAULT: /* purged object */
+       case -ENODEV: /* bad object, how did you get here! */
+               return VM_FAULT_SIGBUS;
+
+       case -ENOSPC: /* shmemfs allocation failure */
+       case -ENOMEM: /* our allocation failure */
+               return VM_FAULT_OOM;
+
+       case 0:
+       case -EAGAIN:
+       case -ERESTARTSYS:
+       case -EINTR:
+       case -EBUSY:
+               /*
+                * EBUSY is ok: this just means that another thread
+                * already did the job.
+                */
+               return VM_FAULT_NOPAGE;
+       }
+}
+
+static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
+{
+       struct vm_area_struct *area = vmf->vma;
+       struct i915_mmap_offset *mmo = area->vm_private_data;
+       struct drm_i915_gem_object *obj = mmo->obj;
+       unsigned long i, size = area->vm_end - area->vm_start;
+       bool write = area->vm_flags & VM_WRITE;
+       vm_fault_t ret = VM_FAULT_SIGBUS;
+       int err;
+
+       if (!i915_gem_object_has_struct_page(obj))
+               return ret;
+
+       /* Sanity check that we allow writing into this object */
+       if (i915_gem_object_is_readonly(obj) && write)
+               return ret;
+
+       err = i915_gem_object_pin_pages(obj);
+       if (err)
+               return i915_error_to_vmf_fault(err);
+
+       /* PTEs are revoked in obj->ops->put_pages() */
+       for (i = 0; i < size >> PAGE_SHIFT; i++) {
+               struct page *page = i915_gem_object_get_page(obj, i);
+
+               ret = vmf_insert_pfn(area,
+                                    (unsigned long)area->vm_start + i * PAGE_SIZE,
+                                    page_to_pfn(page));
+               if (ret != VM_FAULT_NOPAGE)
+                       break;
+       }
+
+       if (write) {
+               GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+               obj->cache_dirty = true; /* XXX flush after PAT update? */
+               obj->mm.dirty = true;
+       }
+
+       i915_gem_object_unpin_pages(obj);
+
+       return ret;
+}
+
+static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
 {
 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
        struct vm_area_struct *area = vmf->vma;
-       struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+       struct i915_mmap_offset *mmo = area->vm_private_data;
+       struct drm_i915_gem_object *obj = mmo->obj;
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *i915 = to_i915(dev);
        struct intel_runtime_pm *rpm = &i915->runtime_pm;
                list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
        mutex_unlock(&i915->ggtt.vm.mutex);
 
+       /* Track the mmo associated with the fenced vma */
+       vma->mmo = mmo;
+
        if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
                intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
                                   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
        intel_runtime_pm_put(rpm, wakeref);
        i915_gem_object_unpin_pages(obj);
 err:
-       switch (ret) {
-       default:
-               WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
-               /* fallthrough */
-       case -EIO: /* shmemfs failure from swap device */
-       case -EFAULT: /* purged object */
-       case -ENODEV: /* bad object, how did you get here! */
-               return VM_FAULT_SIGBUS;
-
-       case -ENOSPC: /* shmemfs allocation failure */
-       case -ENOMEM: /* our allocation failure */
-               return VM_FAULT_OOM;
-
-       case 0:
-       case -EAGAIN:
-       case -ERESTARTSYS:
-       case -EINTR:
-       case -EBUSY:
-               /*
-                * EBUSY is ok: this just means that another thread
-                * already did the job.
-                */
-               return VM_FAULT_NOPAGE;
-       }
+       return i915_error_to_vmf_fault(ret);
 }
 
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
 
        GEM_BUG_ON(!obj->userfault_count);
 
-       obj->userfault_count = 0;
-       list_del(&obj->userfault_link);
-       drm_vma_node_unmap(&obj->base.vma_node,
-                          obj->base.dev->anon_inode->i_mapping);
-
        for_each_ggtt_vma(vma, obj)
-               i915_vma_unset_userfault(vma);
+               i915_vma_revoke_mmap(vma);
+
+       GEM_BUG_ON(obj->userfault_count);
 }
 
-/**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
+/*
  * It is vital that we remove the page mapping if we have mapped a tiled
  * object through the GTT and then lose the fence register due to
  * resource pressure. Similarly if the object has been moved out of the
  * aperture, than pages mapped into userspace must be revoked. Removing the
  * mapping will then trigger a page fault on the next user access, allowing
- * fixup by i915_gem_fault().
+ * fixup by vm_fault_gtt().
  */
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        intel_wakeref_t wakeref;
 
-       /* Serialisation between user GTT access and our code depends upon
+       /*
+        * Serialisation between user GTT access and our code depends upon
         * revoking the CPU's PTE whilst the mutex is held. The next user
         * pagefault then has to wait until we release the mutex.
         *
        if (!obj->userfault_count)
                goto out;
 
-       __i915_gem_object_release_mmap(obj);
+       __i915_gem_object_release_mmap_gtt(obj);
 
-       /* Ensure that the CPU's PTE are revoked and there are not outstanding
+       /*
+        * Ensure that the CPU's PTE are revoked and there are not outstanding
         * memory transactions from userspace before we return. The TLB
         * flushing implied above by changing the PTE above *should* be
         * sufficient, an extra barrier here just provides us with a bit
        intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
-static int create_mmap_offset(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+{
+       struct i915_mmap_offset *mmo;
+
+       spin_lock(&obj->mmo.lock);
+       list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+               /*
+                * vma_node_unmap for GTT mmaps handled already in
+                * __i915_gem_object_release_mmap_gtt
+                */
+               if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
+                       continue;
+
+               spin_unlock(&obj->mmo.lock);
+               drm_vma_node_unmap(&mmo->vma_node,
+                                  obj->base.dev->anon_inode->i_mapping);
+               spin_lock(&obj->mmo.lock);
+       }
+       spin_unlock(&obj->mmo.lock);
+}
+
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_release_mmap_gtt(obj);
+       i915_gem_object_release_mmap_offset(obj);
+}
+
+static struct i915_mmap_offset *
+mmap_offset_attach(struct drm_i915_gem_object *obj,
+                  enum i915_mmap_type mmap_type,
+                  struct drm_file *file)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct intel_gt *gt = &i915->gt;
+       struct i915_mmap_offset *mmo;
        int err;
 
-       err = drm_gem_create_mmap_offset(&obj->base);
+       mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
+       if (!mmo)
+               return ERR_PTR(-ENOMEM);
+
+       mmo->obj = obj;
+       mmo->dev = obj->base.dev;
+       mmo->file = file;
+       mmo->mmap_type = mmap_type;
+       drm_vma_node_reset(&mmo->vma_node);
+
+       err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
+                                obj->base.size / PAGE_SIZE);
        if (likely(!err))
-               return 0;
+               goto out;
 
        /* Attempt to reap some mmap space from dead objects */
-       err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
+       err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
        if (err)
-               return err;
+               goto err;
 
        i915_gem_drain_freed_objects(i915);
-       return drm_gem_create_mmap_offset(&obj->base);
+       err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
+                                obj->base.size / PAGE_SIZE);
+       if (err)
+               goto err;
+
+out:
+       if (file)
+               drm_vma_node_allow(&mmo->vma_node, file);
+
+       spin_lock(&obj->mmo.lock);
+       list_add(&mmo->offset, &obj->mmo.offsets);
+       spin_unlock(&obj->mmo.lock);
+
+       return mmo;
+
+err:
+       kfree(mmo);
+       return ERR_PTR(err);
 }
 
-int
-i915_gem_mmap_gtt(struct drm_file *file,
-                 struct drm_device *dev,
-                 u32 handle,
-                 u64 *offset)
+static int
+__assign_mmap_offset(struct drm_file *file,
+                    u32 handle,
+                    enum i915_mmap_type mmap_type,
+                    u64 *offset)
 {
        struct drm_i915_gem_object *obj;
-       int ret;
-
-       if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
-               return -ENODEV;
+       struct i915_mmap_offset *mmo;
+       int err;
 
        obj = i915_gem_object_lookup(file, handle);
        if (!obj)
                return -ENOENT;
 
-       if (i915_gem_object_never_bind_ggtt(obj)) {
-               ret = -ENODEV;
+       if (mmap_type == I915_MMAP_TYPE_GTT &&
+           i915_gem_object_never_bind_ggtt(obj)) {
+               err = -ENODEV;
                goto out;
        }
 
-       ret = create_mmap_offset(obj);
-       if (ret == 0)
-               *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+       if (mmap_type != I915_MMAP_TYPE_GTT &&
+           !i915_gem_object_has_struct_page(obj)) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       mmo = mmap_offset_attach(obj, mmap_type, file);
+       if (IS_ERR(mmo)) {
+               err = PTR_ERR(mmo);
+               goto out;
+       }
 
+       *offset = drm_vma_node_offset_addr(&mmo->vma_node);
+       err = 0;
 out:
        i915_gem_object_put(obj);
-       return ret;
+       return err;
+}
+
+int
+i915_gem_dumb_mmap_offset(struct drm_file *file,
+                         struct drm_device *dev,
+                         u32 handle,
+                         u64 *offset)
+{
+       enum i915_mmap_type mmap_type;
+
+       if (boot_cpu_has(X86_FEATURE_PAT))
+               mmap_type = I915_MMAP_TYPE_WC;
+       else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
+               return -ENODEV;
+       else
+               mmap_type = I915_MMAP_TYPE_GTT;
+
+       return __assign_mmap_offset(file, handle, mmap_type, offset);
 }
 
 /**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
  * @dev: DRM device
  * @data: GTT mapping ioctl data
  * @file: GEM object info
  * userspace.
  */
 int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file)
+i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file)
 {
-       struct drm_i915_gem_mmap_gtt *args = data;
+       struct drm_i915_private *i915 = to_i915(dev);
+       struct drm_i915_gem_mmap_offset *args = data;
+       enum i915_mmap_type type;
+
+       if (args->extensions)
+               return -EINVAL;
+
+       switch (args->flags) {
+       case I915_MMAP_OFFSET_GTT:
+               if (!i915_ggtt_has_aperture(&i915->ggtt))
+                       return -ENODEV;
+               type = I915_MMAP_TYPE_GTT;
+               break;
+
+       case I915_MMAP_OFFSET_WC:
+               if (!boot_cpu_has(X86_FEATURE_PAT))
+                       return -ENODEV;
+               type = I915_MMAP_TYPE_WC;
+               break;
+
+       case I915_MMAP_OFFSET_WB:
+               type = I915_MMAP_TYPE_WB;
+               break;
+
+       case I915_MMAP_OFFSET_UC:
+               if (!boot_cpu_has(X86_FEATURE_PAT))
+                       return -ENODEV;
+               type = I915_MMAP_TYPE_UC;
+               break;
+
+       default:
+               return -EINVAL;
+       }
 
-       return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+       return __assign_mmap_offset(file, args->handle, type, &args->offset);
+}
+
+static void vm_open(struct vm_area_struct *vma)
+{
+       struct i915_mmap_offset *mmo = vma->vm_private_data;
+       struct drm_i915_gem_object *obj = mmo->obj;
+
+       GEM_BUG_ON(!obj);
+       i915_gem_object_get(obj);
+}
+
+static void vm_close(struct vm_area_struct *vma)
+{
+       struct i915_mmap_offset *mmo = vma->vm_private_data;
+       struct drm_i915_gem_object *obj = mmo->obj;
+
+       GEM_BUG_ON(!obj);
+       i915_gem_object_put(obj);
+}
+
+static const struct vm_operations_struct vm_ops_gtt = {
+       .fault = vm_fault_gtt,
+       .open = vm_open,
+       .close = vm_close,
+};
+
+static const struct vm_operations_struct vm_ops_cpu = {
+       .fault = vm_fault_cpu,
+       .open = vm_open,
+       .close = vm_close,
+};
+
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_vma_offset_node *node;
+       struct drm_file *priv = filp->private_data;
+       struct drm_device *dev = priv->minor->dev;
+       struct i915_mmap_offset *mmo = NULL;
+       struct drm_gem_object *obj = NULL;
+
+       if (drm_dev_is_unplugged(dev))
+               return -ENODEV;
+
+       drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+       node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+                                                 vma->vm_pgoff,
+                                                 vma_pages(vma));
+       if (likely(node)) {
+               mmo = container_of(node, struct i915_mmap_offset,
+                                  vma_node);
+               /*
+                * In our dependency chain, the drm_vma_offset_node
+                * depends on the validity of the mmo, which depends on
+                * the gem object. However the only reference we have
+                * at this point is the mmo (as the parent of the node).
+                * Try to check if the gem object was at least cleared.
+                */
+               if (!mmo || !mmo->obj) {
+                       drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+                       return -EINVAL;
+               }
+               /*
+                * Skip 0-refcnted objects as it is in the process of being
+                * destroyed and will be invalid when the vma manager lock
+                * is released.
+                */
+               obj = &mmo->obj->base;
+               if (!kref_get_unless_zero(&obj->refcount))
+                       obj = NULL;
+       }
+       drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+       if (!obj)
+               return -EINVAL;
+
+       if (!drm_vma_node_is_allowed(node, priv)) {
+               drm_gem_object_put_unlocked(obj);
+               return -EACCES;
+       }
+
+       if (i915_gem_object_is_readonly(to_intel_bo(obj))) {
+               if (vma->vm_flags & VM_WRITE) {
+                       drm_gem_object_put_unlocked(obj);
+                       return -EINVAL;
+               }
+               vma->vm_flags &= ~VM_MAYWRITE;
+       }
+
+       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_private_data = mmo;
+
+       switch (mmo->mmap_type) {
+       case I915_MMAP_TYPE_WC:
+               vma->vm_page_prot =
+                       pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+               vma->vm_ops = &vm_ops_cpu;
+               break;
+
+       case I915_MMAP_TYPE_WB:
+               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+               vma->vm_ops = &vm_ops_cpu;
+               break;
+
+       case I915_MMAP_TYPE_UC:
+               vma->vm_page_prot =
+                       pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+               vma->vm_ops = &vm_ops_cpu;
+               break;
+
+       case I915_MMAP_TYPE_GTT:
+               vma->vm_page_prot =
+                       pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+               vma->vm_ops = &vm_ops_gtt;
+               break;
+       }
+       vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+
+       return 0;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_MMAN_H__
+#define __I915_GEM_MMAN_H__
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_file;
+struct drm_i915_gem_object;
+struct file;
+struct i915_mmap_offset;
+struct mutex;
+
+int i915_gem_mmap_gtt_version(void);
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
+                             struct drm_device *dev,
+                             u32 handle, u64 *offset);
+
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
+
+#endif
 
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
+#include "i915_gem_mman.h"
 #include "i915_gem_object.h"
 #include "i915_globals.h"
 #include "i915_trace.h"
 
        INIT_LIST_HEAD(&obj->lut_list);
 
+       spin_lock_init(&obj->mmo.lock);
+       INIT_LIST_HEAD(&obj->mmo.offsets);
+
        init_rcu_head(&obj->rcu);
 
        obj->ops = ops;
        struct drm_i915_gem_object *obj = to_intel_bo(gem);
        struct drm_i915_file_private *fpriv = file->driver_priv;
        struct i915_lut_handle *lut, *ln;
+       struct i915_mmap_offset *mmo;
        LIST_HEAD(close);
 
        i915_gem_object_lock(obj);
        }
        i915_gem_object_unlock(obj);
 
+       spin_lock(&obj->mmo.lock);
+       list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+               if (mmo->file != file)
+                       continue;
+
+               spin_unlock(&obj->mmo.lock);
+               drm_vma_node_revoke(&mmo->vma_node, file);
+               spin_lock(&obj->mmo.lock);
+       }
+       spin_unlock(&obj->mmo.lock);
+
        list_for_each_entry_safe(lut, ln, &close, obj_link) {
                struct i915_gem_context *ctx = lut->ctx;
                struct i915_vma *vma;
 
        wakeref = intel_runtime_pm_get(&i915->runtime_pm);
        llist_for_each_entry_safe(obj, on, freed, freed) {
+               struct i915_mmap_offset *mmo, *mn;
+
                trace_i915_gem_object_destroy(obj);
 
                if (!list_empty(&obj->vma.list)) {
                        spin_unlock(&obj->vma.lock);
                }
 
+               i915_gem_object_release_mmap(obj);
+
+               list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+                       drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+                                             &mmo->vma_node);
+                       kfree(mmo);
+               }
+               INIT_LIST_HEAD(&obj->mmo.offsets);
+
                GEM_BUG_ON(atomic_read(&obj->bind_count));
                GEM_BUG_ON(obj->userfault_count);
                GEM_BUG_ON(!list_empty(&obj->lut_list));
 
 static inline void
 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 {
-       obj->base.vma_node.readonly = true;
+       obj->flags |= I915_BO_READONLY;
 }
 
 static inline bool
 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
 {
-       return obj->base.vma_node.readonly;
+       return obj->flags & I915_BO_READONLY;
 }
 
 static inline bool
        i915_gem_object_unpin_pages(obj);
 }
 
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
-
 void
 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
                                   unsigned int flush_domains);
 
        void (*release)(struct drm_i915_gem_object *obj);
 };
 
+enum i915_mmap_type {
+       I915_MMAP_TYPE_GTT = 0,
+       I915_MMAP_TYPE_WC,
+       I915_MMAP_TYPE_WB,
+       I915_MMAP_TYPE_UC,
+};
+
+struct i915_mmap_offset {
+       struct drm_device *dev;
+       struct drm_vma_offset_node vma_node;
+       struct drm_i915_gem_object *obj;
+       struct drm_file *file;
+       enum i915_mmap_type mmap_type;
+
+       struct list_head offset;
+};
+
 struct drm_i915_gem_object {
        struct drm_gem_object base;
 
        unsigned int userfault_count;
        struct list_head userfault_link;
 
+       struct {
+               spinlock_t lock; /* Protects access to mmo offsets */
+               struct list_head offsets;
+       } mmo;
+
        I915_SELFTEST_DECLARE(struct list_head st_link);
 
        unsigned long flags;
 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
 #define I915_BO_ALLOC_VOLATILE   BIT(1)
 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
+#define I915_BO_READONLY         BIT(2)
 
        /*
         * Is the object to be mapped as read-only to the GPU
 
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 #include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
                                 struct sg_table *pages,
                goto unlock;
        }
 
+       i915_gem_object_release_mmap_offset(obj);
+
        /*
         * ->put_pages might need to allocate memory for the bit17 swizzle
         * array, hence protect them from being reaped by removing them from gtt
 
 #include "i915_drv.h"
 #include "i915_gem.h"
 #include "i915_gem_ioctls.h"
+#include "i915_gem_mman.h"
 #include "i915_gem_object.h"
 
 /**
 
                               int expected)
 {
        struct drm_i915_gem_object *obj;
-       int err;
+       struct i915_mmap_offset *mmo;
 
        obj = i915_gem_object_create_internal(i915, size);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       err = create_mmap_offset(obj);
+       mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
        i915_gem_object_put(obj);
 
-       return err == expected;
+       return PTR_ERR_OR_ZERO(mmo) == expected;
 }
 
 static void disable_retire_worker(struct drm_i915_private *i915)
        struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *hole, *next;
-       int loop, err;
+       struct i915_mmap_offset *mmo;
+       int loop, err = 0;
 
        /* Disable background reaper */
        disable_retire_worker(i915);
                goto out;
        }
 
-       err = create_mmap_offset(obj);
-       if (err) {
+       mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
+       if (IS_ERR(mmo)) {
                pr_err("Unable to insert object into reclaimed hole\n");
+               err = PTR_ERR(mmo);
                goto err_obj;
        }
 
 }
 
 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
-static int igt_mmap_gtt(void *arg)
+static int igt_mmap(void *arg, enum i915_mmap_type type)
 {
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj;
+       struct i915_mmap_offset *mmo;
        struct vm_area_struct *area;
        unsigned long addr;
        void *vaddr;
-       int err, i;
+       int err = 0, i;
 
        if (!i915_ggtt_has_aperture(&i915->ggtt))
                return 0;
        i915_gem_object_flush_map(obj);
        i915_gem_object_unpin_map(obj);
 
-       err = create_mmap_offset(obj);
-       if (err)
+       mmo = mmap_offset_attach(obj, type, NULL);
+       if (IS_ERR(mmo)) {
+               err = PTR_ERR(mmo);
                goto out;
+       }
 
-       addr = igt_mmap_node(i915, &obj->base.vma_node,
-                            0, PROT_WRITE, MAP_SHARED);
+       addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
        if (IS_ERR_VALUE(addr)) {
                err = addr;
                goto out;
        }
 
-       pr_debug("igt_mmap(obj:gtt) @ %lx\n", addr);
+       pr_debug("igt_mmap() @ %lx\n", addr);
 
        area = find_vma(current->mm, addr);
        if (!area) {
                goto out_unmap;
        }
 
-       if (area->vm_private_data != obj) {
-               pr_err("vm_area_struct did not point back to our object!\n");
+       if (area->vm_private_data != mmo) {
+               pr_err("vm_area_struct did not point back to our mmap_offset object!\n");
                err = -EINVAL;
                goto out_unmap;
        }
                u32 x;
 
                if (get_user(x, ux)) {
-                       pr_err("Unable to read from GTT mmap, offset:%zd\n",
+                       pr_err("Unable to read from mmap, offset:%zd\n",
                               i * sizeof(x));
                        err = -EFAULT;
                        break;
                }
 
                if (x != expand32(POISON_INUSE)) {
-                       pr_err("Read incorrect value from GTT mmap, offset:%zd, found:%x, expected:%x\n",
+                       pr_err("Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
                               i * sizeof(x), x, expand32(POISON_INUSE));
                        err = -EINVAL;
                        break;
 
                x = expand32(POISON_FREE);
                if (put_user(x, ux)) {
-                       pr_err("Unable to write to GTT mmap, offset:%zd\n",
+                       pr_err("Unable to write to mmap, offset:%zd\n",
                               i * sizeof(x));
                        err = -EFAULT;
                        break;
                goto out;
        }
        if (err == 0 && memchr_inv(vaddr, POISON_FREE, PAGE_SIZE)) {
-               pr_err("Write via GGTT mmap did not land in backing store\n");
+               pr_err("Write via mmap did not land in backing store\n");
                err = -EINVAL;
        }
        i915_gem_object_unpin_map(obj);
        return err;
 }
 
+static int igt_mmap_gtt(void *arg)
+{
+       return igt_mmap(arg, I915_MMAP_TYPE_GTT);
+}
+
+static int igt_mmap_cpu(void *arg)
+{
+       return igt_mmap(arg, I915_MMAP_TYPE_WC);
+}
+
 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
 {
        if (!pte_present(*pte) || pte_none(*pte)) {
        return __get_user(c, end - 1);
 }
 
-static int igt_mmap_gtt_revoke(void *arg)
+static int igt_mmap_revoke(void *arg, enum i915_mmap_type type)
 {
        struct drm_i915_private *i915 = arg;
        struct drm_i915_gem_object *obj;
+       struct i915_mmap_offset *mmo;
        unsigned long addr;
        int err;
 
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       err = create_mmap_offset(obj);
-       if (err)
+       mmo = mmap_offset_attach(obj, type, NULL);
+       if (IS_ERR(mmo)) {
+               err = PTR_ERR(mmo);
                goto out;
+       }
 
-       addr = igt_mmap_node(i915, &obj->base.vma_node,
-                            0, PROT_WRITE, MAP_SHARED);
+       addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
        if (IS_ERR_VALUE(addr)) {
                err = addr;
                goto out;
        if (err)
                goto out_unmap;
 
-       GEM_BUG_ON(!atomic_read(&obj->bind_count));
+       GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
+                  !atomic_read(&obj->bind_count));
 
        err = check_present(addr, obj->base.size);
        if (err)
        }
        GEM_BUG_ON(atomic_read(&obj->bind_count));
 
+       if (type != I915_MMAP_TYPE_GTT) {
+               __i915_gem_object_put_pages(obj);
+               if (i915_gem_object_has_pages(obj)) {
+                       pr_err("Failed to put-pages object!\n");
+                       err = -EINVAL;
+                       goto out_unmap;
+               }
+       }
+
        err = check_absent(addr, obj->base.size);
        if (err)
                goto out_unmap;
        return err;
 }
 
+static int igt_mmap_gtt_revoke(void *arg)
+{
+       return igt_mmap_revoke(arg, I915_MMAP_TYPE_GTT);
+}
+
+static int igt_mmap_cpu_revoke(void *arg)
+{
+       return igt_mmap_revoke(arg, I915_MMAP_TYPE_WC);
+}
+
 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_smoke_tiling),
                SUBTEST(igt_mmap_offset_exhaustion),
                SUBTEST(igt_mmap_gtt),
+               SUBTEST(igt_mmap_cpu),
                SUBTEST(igt_mmap_gtt_revoke),
+               SUBTEST(igt_mmap_cpu_revoke),
        };
 
        return i915_subtests(tests, i915);
 
                        continue;
 
                GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
-               node = &vma->obj->base.vma_node;
+
+               if (!vma->mmo)
+                       continue;
+
+               node = &vma->mmo->vma_node;
                vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+
                unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
                                    drm_vma_node_offset_addr(node) + vma_offset,
                                    vma->size,
 
 
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_mman.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_rc6.h"
        .runtime_resume = intel_runtime_resume,
 };
 
-static const struct vm_operations_struct i915_gem_vm_ops = {
-       .fault = i915_gem_fault,
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
-};
-
 static const struct file_operations i915_driver_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
        .release = drm_release,
        .unlocked_ioctl = drm_ioctl,
-       .mmap = drm_gem_mmap,
+       .mmap = i915_gem_mmap,
        .poll = drm_poll,
        .read = drm_read,
        .compat_ioctl = i915_compat_ioctl,
        DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
 
        .gem_close_object = i915_gem_close_object,
        .gem_free_object_unlocked = i915_gem_free_object,
-       .gem_vm_ops = &i915_gem_vm_ops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .get_scanout_position = i915_get_crtc_scanoutpos,
 
        .dumb_create = i915_gem_dumb_create,
-       .dumb_map_offset = i915_gem_mmap_gtt,
+       .dumb_map_offset = i915_gem_dumb_mmap_offset,
+
        .ioctls = i915_ioctls,
        .num_ioctls = ARRAY_SIZE(i915_ioctls),
        .fops = &i915_driver_fops,
 
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
-                     u32 handle, u64 *offset);
-int i915_gem_mmap_gtt_version(void);
 
 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
 
 void i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
-vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 
 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
 #include "gem/i915_gem_clflush.h"
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_mman.h"
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_context.h"
 #include "gt/intel_engine_user.h"
 
        list_for_each_entry_safe(obj, on,
                                 &i915->ggtt.userfault_list, userfault_link)
-               __i915_gem_object_release_mmap(obj);
+               __i915_gem_object_release_mmap_gtt(obj);
 
        /*
         * The fence will be lost when the device powers down. If any were
 
  * SPDX-License-Identifier: MIT
  */
 
+#include "gem/i915_gem_mman.h"
 #include "gt/intel_engine_user.h"
 
 #include "i915_drv.h"
 
 
 void i915_vma_revoke_mmap(struct i915_vma *vma)
 {
-       struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+       struct drm_vma_offset_node *node;
        u64 vma_offset;
 
-       lockdep_assert_held(&vma->vm->mutex);
-
        if (!i915_vma_has_userfault(vma))
                return;
 
        GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
        GEM_BUG_ON(!vma->obj->userfault_count);
 
+       node = &vma->mmo->vma_node;
        vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
        unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
                            drm_vma_node_offset_addr(node) + vma_offset,
 
        u64 display_alignment;
        struct i915_page_sizes page_sizes;
 
+       /* mmap-offset associated with fencing for this vma */
+       struct i915_mmap_offset *mmo;
+
        u32 fence_size;
        u32 fence_alignment;
 
 
 #define DRM_IOCTL_I915_GEM_PWRITE      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
 #define DRM_IOCTL_I915_GEM_MMAP                DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
 #define DRM_IOCTL_I915_GEM_MMAP_GTT    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
 #define DRM_IOCTL_I915_GEM_SET_DOMAIN  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
 #define DRM_IOCTL_I915_GEM_SW_FINISH   DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
 #define DRM_IOCTL_I915_GEM_SET_TILING  DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
        __u64 offset;
 };
 
+struct drm_i915_gem_mmap_offset {
+       /** Handle for the object being mapped. */
+       __u32 handle;
+       __u32 pad;
+       /**
+        * Fake offset to use for subsequent mmap call
+        *
+        * This is a fixed-size type for 32/64 compatibility.
+        */
+       __u64 offset;
+
+       /**
+        * Flags for extended behaviour.
+        *
+        * It is mandatory that one of the MMAP_OFFSET types
+        * (GTT, WC, WB, UC, etc) should be included.
+        */
+       __u64 flags;
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC  1
+#define I915_MMAP_OFFSET_WB  2
+#define I915_MMAP_OFFSET_UC  3
+
+       /*
+        * Zero-terminated chain of extensions.
+        *
+        * No current extensions defined; mbz.
+        */
+       __u64 extensions;
+};
+
 struct drm_i915_gem_set_domain {
        /** Handle for the object */
        __u32 handle;