gt/intel_sseu.o \
        gt/intel_timeline.o \
        gt/intel_workarounds.o \
+       gt/shmem_utils.o \
        gt/sysfs_engines.o
 # autogenerated null render state
 gt-y += \
 
        intel_engine_cleanup_cmd_parser(engine);
 
        if (engine->default_state)
-               i915_gem_object_put(engine->default_state);
+               fput(engine->default_state);
 
        if (engine->kernel_context) {
                intel_context_unpin(engine->kernel_context);
 
 #include "intel_gt_pm.h"
 #include "intel_rc6.h"
 #include "intel_ring.h"
+#include "shmem_utils.h"
 
 static int __engine_unpark(struct intel_wakeref *wf)
 {
        /* Pin the default state for fast resets from atomic context. */
        map = NULL;
        if (engine->default_state)
-               map = i915_gem_object_pin_map(engine->default_state,
-                                             I915_MAP_WB);
-       if (!IS_ERR_OR_NULL(map))
-               engine->pinned_default_state = map;
+               map = shmem_pin_map(engine->default_state);
+       engine->pinned_default_state = map;
 
        /* Discard stale context state from across idling */
        ce = engine->kernel_context;
                engine->park(engine);
 
        if (engine->pinned_default_state) {
-               i915_gem_object_unpin_map(engine->default_state);
+               shmem_unpin_map(engine->default_state,
+                               engine->pinned_default_state);
                engine->pinned_default_state = NULL;
        }
 
 
 
        unsigned long wakeref_serial;
        struct intel_wakeref wakeref;
-       struct drm_i915_gem_object *default_state;
+       struct file *default_state;
        void *pinned_default_state;
 
        struct {
 
 #include "intel_rps.h"
 #include "intel_uncore.h"
 #include "intel_pm.h"
+#include "shmem_utils.h"
 
 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 {
                return i915_vm_get(>->ggtt->vm);
 }
 
-static int __intel_context_flush_retire(struct intel_context *ce)
-{
-       struct intel_timeline *tl;
-
-       tl = intel_context_timeline_lock(ce);
-       if (IS_ERR(tl))
-               return PTR_ERR(tl);
-
-       intel_context_timeline_unlock(tl);
-       return 0;
-}
-
 static int __engines_record_defaults(struct intel_gt *gt)
 {
        struct i915_request *requests[I915_NUM_ENGINES] = {};
 
        for (id = 0; id < ARRAY_SIZE(requests); id++) {
                struct i915_request *rq;
-               struct i915_vma *state;
-               void *vaddr;
+               struct file *state;
 
                rq = requests[id];
                if (!rq)
                }
 
                GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
-               state = rq->context->state;
-               if (!state)
+               if (!rq->context->state)
                        continue;
 
-               /* Serialise with retirement on another CPU */
-               GEM_BUG_ON(!i915_request_completed(rq));
-               err = __intel_context_flush_retire(rq->context);
-               if (err)
-                       goto out;
-
-               /* We want to be able to unbind the state from the GGTT */
-               GEM_BUG_ON(intel_context_is_pinned(rq->context));
-
-               /*
-                * As we will hold a reference to the logical state, it will
-                * not be torn down with the context, and importantly the
-                * object will hold onto its vma (making it possible for a
-                * stray GTT write to corrupt our defaults). Unmap the vma
-                * from the GTT to prevent such accidents and reclaim the
-                * space.
-                */
-               err = i915_vma_unbind(state);
-               if (err)
-                       goto out;
-
-               i915_gem_object_lock(state->obj);
-               err = i915_gem_object_set_to_cpu_domain(state->obj, false);
-               i915_gem_object_unlock(state->obj);
-               if (err)
-                       goto out;
-
-               i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
-
-               /* Check we can acquire the image of the context state */
-               vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
-               if (IS_ERR(vaddr)) {
-                       err = PTR_ERR(vaddr);
+               /* Keep a copy of the state's backing pages; free the obj */
+               state = shmem_create_from_object(rq->context->state->obj);
+               if (IS_ERR(state)) {
+                       err = PTR_ERR(state);
                        goto out;
                }
-
-               rq->engine->default_state = i915_gem_object_get(state->obj);
-               i915_gem_object_unpin_map(state->obj);
+               rq->engine->default_state = state;
        }
 
 out:
 
 #include "intel_reset.h"
 #include "intel_ring.h"
 #include "intel_workarounds.h"
+#include "shmem_utils.h"
 
 #define RING_EXECLIST_QFULL            (1 << 0x2)
 #define RING_EXECLIST1_VALID           (1 << 0x3)
 {
        bool inhibit = true;
        void *vaddr;
-       int ret;
 
        vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
-               ret = PTR_ERR(vaddr);
-               drm_dbg(&engine->i915->drm,
-                       "Could not map object pages! (%d)\n", ret);
-               return ret;
+               drm_dbg(&engine->i915->drm, "Could not map object pages!\n");
+               return PTR_ERR(vaddr);
        }
 
        set_redzone(vaddr, engine);
 
        if (engine->default_state) {
-               void *defaults;
-
-               defaults = i915_gem_object_pin_map(engine->default_state,
-                                                  I915_MAP_WB);
-               if (IS_ERR(defaults)) {
-                       ret = PTR_ERR(defaults);
-                       goto err_unpin_ctx;
-               }
-
-               memcpy(vaddr, defaults, engine->context_size);
-               i915_gem_object_unpin_map(engine->default_state);
+               shmem_read(engine->default_state, 0,
+                          vaddr, engine->context_size);
                __set_bit(CONTEXT_VALID_BIT, &ce->flags);
                inhibit = false;
        }
        execlists_init_reg_state(vaddr + LRC_STATE_OFFSET,
                                 ce, engine, ring, inhibit);
 
-       ret = 0;
-err_unpin_ctx:
        __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size);
        i915_gem_object_unpin_map(ctx_obj);
-       return ret;
+       return 0;
 }
 
 static int __execlists_context_alloc(struct intel_context *ce,
 
 #include "intel_reset.h"
 #include "intel_ring.h"
 #include "intel_workarounds.h"
+#include "shmem_utils.h"
 
 /* Rough estimate of the typical request size, performing a flush,
  * set-context and then emitting the batch.
                i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
 
        if (engine->default_state) {
-               void *defaults, *vaddr;
+               void *vaddr;
 
                vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
                if (IS_ERR(vaddr)) {
                        goto err_obj;
                }
 
-               defaults = i915_gem_object_pin_map(engine->default_state,
-                                                  I915_MAP_WB);
-               if (IS_ERR(defaults)) {
-                       err = PTR_ERR(defaults);
-                       goto err_map;
-               }
-
-               memcpy(vaddr, defaults, engine->context_size);
-               i915_gem_object_unpin_map(engine->default_state);
+               shmem_read(engine->default_state, 0,
+                          vaddr, engine->context_size);
 
                i915_gem_object_flush_map(obj);
                i915_gem_object_unpin_map(obj);
 
        return vma;
 
-err_map:
-       i915_gem_object_unpin_map(obj);
 err_obj:
        i915_gem_object_put(obj);
        return ERR_PTR(err);
 
 
        for_each_engine(engine, gt, id) {
                struct {
-                       struct drm_i915_gem_object *state;
+                       struct file *state;
                        void *pinned;
                } saved;
 
 
                if (!engine->default_state)
                        continue;
 
-               hw = i915_gem_object_pin_map(engine->default_state,
-                                            I915_MAP_WB);
+               hw = shmem_pin_map(engine->default_state);
                if (IS_ERR(hw)) {
                        err = PTR_ERR(hw);
                        break;
                        hexdump(lrc, PAGE_SIZE);
                }
 
-               i915_gem_object_unpin_map(engine->default_state);
+               shmem_unpin_map(engine->default_state, hw);
                if (err)
                        break;
        }
                if (!engine->default_state)
                        continue;
 
-               hw = i915_gem_object_pin_map(engine->default_state,
-                                            I915_MAP_WB);
+               hw = shmem_pin_map(engine->default_state);
                if (IS_ERR(hw)) {
                        err = PTR_ERR(hw);
                        break;
                        }
                }
 
-               i915_gem_object_unpin_map(engine->default_state);
+               shmem_unpin_map(engine->default_state, hw);
        }
 
        return err;
 
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+
+#include "gem/i915_gem_object.h"
+#include "shmem_utils.h"
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len)
+{
+       struct file *file;
+       int err;
+
+       file = shmem_file_setup(name, PAGE_ALIGN(len), VM_NORESERVE);
+       if (IS_ERR(file))
+               return file;
+
+       err = shmem_write(file, 0, data, len);
+       if (err) {
+               fput(file);
+               return ERR_PTR(err);
+       }
+
+       return file;
+}
+
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
+{
+       struct file *file;
+       void *ptr;
+
+       if (obj->ops == &i915_gem_shmem_ops) {
+               file = obj->base.filp;
+               atomic_long_inc(&file->f_count);
+               return file;
+       }
+
+       ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(ptr))
+               return ERR_CAST(ptr);
+
+       file = shmem_create_from_data("", ptr, obj->base.size);
+       i915_gem_object_unpin_map(obj);
+
+       return file;
+}
+
+static size_t shmem_npte(struct file *file)
+{
+       return file->f_mapping->host->i_size >> PAGE_SHIFT;
+}
+
+static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte)
+{
+       unsigned long pfn;
+
+       vunmap(ptr);
+
+       for (pfn = 0; pfn < n_pte; pfn++) {
+               struct page *page;
+
+               page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+                                                  GFP_KERNEL);
+               if (!WARN_ON(IS_ERR(page))) {
+                       put_page(page);
+                       put_page(page);
+               }
+       }
+}
+
+void *shmem_pin_map(struct file *file)
+{
+       const size_t n_pte = shmem_npte(file);
+       pte_t *stack[32], **ptes, **mem;
+       struct vm_struct *area;
+       unsigned long pfn;
+
+       mem = stack;
+       if (n_pte > ARRAY_SIZE(stack)) {
+               mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
+               if (!mem)
+                       return NULL;
+       }
+
+       area = alloc_vm_area(n_pte << PAGE_SHIFT, mem);
+       if (!area) {
+               if (mem != stack)
+                       kvfree(mem);
+               return NULL;
+       }
+
+       ptes = mem;
+       for (pfn = 0; pfn < n_pte; pfn++) {
+               struct page *page;
+
+               page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+                                                  GFP_KERNEL);
+               if (IS_ERR(page))
+                       goto err_page;
+
+               **ptes++ = mk_pte(page,  PAGE_KERNEL);
+       }
+
+       if (mem != stack)
+               kvfree(mem);
+
+       mapping_set_unevictable(file->f_mapping);
+       return area->addr;
+
+err_page:
+       if (mem != stack)
+               kvfree(mem);
+
+       __shmem_unpin_map(file, area->addr, pfn);
+       return NULL;
+}
+
+void shmem_unpin_map(struct file *file, void *ptr)
+{
+       mapping_clear_unevictable(file->f_mapping);
+       __shmem_unpin_map(file, ptr, shmem_npte(file));
+}
+
+static int __shmem_rw(struct file *file, loff_t off,
+                     void *ptr, size_t len,
+                     bool write)
+{
+       unsigned long pfn;
+
+       for (pfn = off >> PAGE_SHIFT; len; pfn++) {
+               unsigned int this =
+                       min_t(size_t, PAGE_SIZE - offset_in_page(off), len);
+               struct page *page;
+               void *vaddr;
+
+               page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
+                                                  GFP_KERNEL);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+
+               vaddr = kmap(page);
+               if (write)
+                       memcpy(vaddr + offset_in_page(off), ptr, this);
+               else
+                       memcpy(ptr, vaddr + offset_in_page(off), this);
+               kunmap(page);
+               put_page(page);
+
+               len -= this;
+               ptr += this;
+               off = 0;
+       }
+
+       return 0;
+}
+
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len)
+{
+       return __shmem_rw(file, off, dst, len, false);
+}
+
+int shmem_write(struct file *file, loff_t off, void *src, size_t len)
+{
+       return __shmem_rw(file, off, src, len, true);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "st_shmem_utils.c"
+#endif
 
--- /dev/null
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef SHMEM_UTILS_H
+#define SHMEM_UTILS_H
+
+#include <linux/types.h>
+
+struct drm_i915_gem_object;
+struct file;
+
+struct file *shmem_create_from_data(const char *name, void *data, size_t len);
+struct file *shmem_create_from_object(struct drm_i915_gem_object *obj);
+
+void *shmem_pin_map(struct file *file);
+void shmem_unpin_map(struct file *file, void *ptr);
+
+int shmem_read(struct file *file, loff_t off, void *dst, size_t len);
+int shmem_write(struct file *file, loff_t off, void *src, size_t len);
+
+#endif /* SHMEM_UTILS_H */
 
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/* Just a quick and causal check of the shmem_utils API */
+
+static int igt_shmem_basic(void *ignored)
+{
+       u32 datum = 0xdeadbeef, result;
+       struct file *file;
+       u32 *map;
+       int err;
+
+       file = shmem_create_from_data("mock", &datum, sizeof(datum));
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       result = 0;
+       err = shmem_read(file, 0, &result, sizeof(result));
+       if (err)
+               goto out_file;
+
+       if (result != datum) {
+               pr_err("Incorrect read back from shmemfs: %x != %x\n",
+                      result, datum);
+               err = -EINVAL;
+               goto out_file;
+       }
+
+       result = 0xc0ffee;
+       err = shmem_write(file, 0, &result, sizeof(result));
+       if (err)
+               goto out_file;
+
+       map = shmem_pin_map(file);
+       if (!map) {
+               err = -ENOMEM;
+               goto out_file;
+       }
+
+       if (*map != result) {
+               pr_err("Incorrect read back via mmap of last write: %x != %x\n",
+                      *map, result);
+               err = -EINVAL;
+               goto out_map;
+       }
+
+out_map:
+       shmem_unpin_map(file, map);
+out_file:
+       fput(file);
+       return err;
+}
+
+int shmem_utils_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_shmem_basic),
+       };
+
+       return i915_subtests(tests, NULL);
+}
 
        return capture;
 }
 
-static struct i915_vma_coredump *
-capture_object(const struct intel_gt *gt,
-              struct drm_i915_gem_object *obj,
-              const char *name,
-              struct i915_vma_compress *compress)
-{
-       if (obj && i915_gem_object_has_pages(obj)) {
-               struct i915_vma fake = {
-                       .node = { .start = U64_MAX, .size = obj->base.size },
-                       .size = obj->base.size,
-                       .pages = obj->mm.pages,
-                       .obj = obj,
-               };
-
-               return i915_vma_coredump_create(gt, &fake, name, compress);
-       } else {
-               return NULL;
-       }
-}
-
 static void add_vma(struct intel_engine_coredump *ee,
                    struct i915_vma_coredump *vma)
 {
                                         engine->wa_ctx.vma,
                                         "WA context",
                                         compress));
-
-       add_vma(ee,
-               capture_object(engine->gt,
-                              engine->default_state,
-                              "NULL context",
-                              compress));
 }
 
 static struct intel_engine_coredump *
 
  * Tests are executed in order by igt/drv_selftest
  */
 selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
+selftest(shmem, shmem_utils_mock_selftests)
 selftest(fence, i915_sw_fence_mock_selftests)
 selftest(scatterlist, scatterlist_mock_selftests)
 selftest(syncmap, i915_syncmap_mock_selftests)