i915_utils.o \
          intel_csr.o \
          intel_device_info.o \
+         intel_memory_region.o \
          intel_pch.o \
          intel_pm.o \
          intel_runtime_pm.o \
        gem/i915_gem_pages.o \
        gem/i915_gem_phys.o \
        gem/i915_gem_pm.o \
+       gem/i915_gem_region.o \
        gem/i915_gem_shmem.o \
        gem/i915_gem_shrinker.o \
        gem/i915_gem_stolen.o \
 
                atomic_t pages_pin_count;
                atomic_t shrink_pin;
 
+               /**
+                * Memory region for this object.
+                */
+               struct intel_memory_region *region;
+               /**
+                * List of memory region blocks allocated for this object.
+                */
+               struct list_head blocks;
+
                struct sg_table *pages;
                void *mapping;
 
 
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_gem_region.h"
+#include "i915_drv.h"
+
+void
+i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+                               struct sg_table *pages)
+{
+       __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
+
+       obj->mm.dirty = false;
+       sg_free_table(pages);
+       kfree(pages);
+}
+
+int
+i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
+{
+       struct intel_memory_region *mem = obj->mm.region;
+       struct list_head *blocks = &obj->mm.blocks;
+       unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
+       resource_size_t size = obj->base.size;
+       resource_size_t prev_end;
+       struct i915_buddy_block *block;
+       struct sg_table *st;
+       struct scatterlist *sg;
+       unsigned int sg_page_sizes;
+       int ret;
+
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (!st)
+               return -ENOMEM;
+
+       if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
+               kfree(st);
+               return -ENOMEM;
+       }
+
+       ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
+       if (ret)
+               goto err_free_sg;
+
+       GEM_BUG_ON(list_empty(blocks));
+
+       sg = st->sgl;
+       st->nents = 0;
+       sg_page_sizes = 0;
+       prev_end = (resource_size_t)-1;
+
+       list_for_each_entry(block, blocks, link) {
+               u64 block_size, offset;
+
+               block_size = i915_buddy_block_size(&mem->mm, block);
+               offset = i915_buddy_block_offset(block);
+
+               GEM_BUG_ON(overflows_type(block_size, sg->length));
+
+               if (offset != prev_end ||
+                   add_overflows_t(typeof(sg->length), sg->length, block_size)) {
+                       if (st->nents) {
+                               sg_page_sizes |= sg->length;
+                               sg = __sg_next(sg);
+                       }
+
+                       sg_dma_address(sg) = mem->region.start + offset;
+                       sg_dma_len(sg) = block_size;
+
+                       sg->length = block_size;
+
+                       st->nents++;
+               } else {
+                       sg->length += block_size;
+                       sg_dma_len(sg) += block_size;
+               }
+
+               prev_end = offset + block_size;
+       };
+
+       sg_page_sizes |= sg->length;
+       sg_mark_end(sg);
+       i915_sg_trim(st);
+
+       __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+       return 0;
+
+err_free_sg:
+       sg_free_table(st);
+       kfree(st);
+       return ret;
+}
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+                                       struct intel_memory_region *mem)
+{
+       INIT_LIST_HEAD(&obj->mm.blocks);
+       obj->mm.region = intel_memory_region_get(mem);
+}
+
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
+{
+       intel_memory_region_put(obj->mm.region);
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+                             resource_size_t size,
+                             unsigned int flags)
+{
+       struct drm_i915_gem_object *obj;
+
+       /*
+        * NB: Our use of resource_size_t for the size stems from using struct
+        * resource for the mem->region. We might need to revisit this in the
+        * future.
+        */
+
+       if (!mem)
+               return ERR_PTR(-ENODEV);
+
+       size = round_up(size, mem->min_page_size);
+
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
+
+       /*
+        * XXX: There is a prevalence of the assumption that we fit the
+        * object's page count inside a 32bit _signed_ variable. Let's document
+        * this and catch if we ever need to fix it. In the meantime, if you do
+        * spot such a local variable, please consider fixing!
+        */
+
+       if (size >> PAGE_SHIFT > INT_MAX)
+               return ERR_PTR(-E2BIG);
+
+       if (overflows_type(size, obj->base.size))
+               return ERR_PTR(-E2BIG);
+
+       return mem->ops->create_object(mem, size, flags);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_REGION_H__
+#define __I915_GEM_REGION_H__
+
+#include <linux/types.h>
+
+struct intel_memory_region;
+struct drm_i915_gem_object;
+struct sg_table;
+
+int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj);
+void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
+                                    struct sg_table *pages);
+
+void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
+                                       struct intel_memory_region *mem);
+void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+i915_gem_object_create_region(struct intel_memory_region *mem,
+                             resource_size_t size,
+                             unsigned int flags);
+
+#endif
 
 
 #include "i915_selftest.h"
 
+#include "gem/i915_gem_region.h"
 #include "gem/i915_gem_pm.h"
 
 #include "gt/intel_gt.h"
 
 #include "selftests/mock_drm.h"
 #include "selftests/mock_gem_device.h"
+#include "selftests/mock_region.h"
 #include "selftests/i915_random.h"
 
 static const unsigned int page_sizes[] = {
        return err;
 }
 
+static int igt_mock_memory_region_huge_pages(void *arg)
+{
+       struct i915_ppgtt *ppgtt = arg;
+       struct drm_i915_private *i915 = ppgtt->vm.i915;
+       unsigned long supported = INTEL_INFO(i915)->page_sizes;
+       struct intel_memory_region *mem;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int bit;
+       int err = 0;
+
+       mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+       if (IS_ERR(mem)) {
+               pr_err("%s failed to create memory region\n", __func__);
+               return PTR_ERR(mem);
+       }
+
+       for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+               unsigned int page_size = BIT(bit);
+               resource_size_t phys;
+
+               obj = i915_gem_object_create_region(mem, page_size, 0);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       goto out_region;
+               }
+
+               vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+               if (IS_ERR(vma)) {
+                       err = PTR_ERR(vma);
+                       goto out_put;
+               }
+
+               err = i915_vma_pin(vma, 0, 0, PIN_USER);
+               if (err)
+                       goto out_close;
+
+               err = igt_check_page_sizes(vma);
+               if (err)
+                       goto out_unpin;
+
+               phys = i915_gem_object_get_dma_address(obj, 0);
+               if (!IS_ALIGNED(phys, page_size)) {
+                       pr_err("%s addr misaligned(%pa) page_size=%u\n",
+                              __func__, &phys, page_size);
+                       err = -EINVAL;
+                       goto out_unpin;
+               }
+
+               if (vma->page_sizes.gtt != page_size) {
+                       pr_err("%s page_sizes.gtt=%u, expected=%u\n",
+                              __func__, vma->page_sizes.gtt, page_size);
+                       err = -EINVAL;
+                       goto out_unpin;
+               }
+
+               i915_vma_unpin(vma);
+               i915_vma_close(vma);
+
+               i915_gem_object_put(obj);
+       }
+
+       goto out_region;
+
+out_unpin:
+       i915_vma_unpin(vma);
+out_close:
+       i915_vma_close(vma);
+out_put:
+       i915_gem_object_put(obj);
+out_region:
+       intel_memory_region_put(mem);
+       return err;
+}
+
 static int igt_mock_ppgtt_misaligned_dma(void *arg)
 {
        struct i915_ppgtt *ppgtt = arg;
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_mock_exhaust_device_supported_pages),
+               SUBTEST(igt_mock_memory_region_huge_pages),
                SUBTEST(igt_mock_ppgtt_misaligned_dma),
                SUBTEST(igt_mock_ppgtt_huge_fill),
                SUBTEST(igt_mock_ppgtt_64K),
 
 #include "intel_device_info.h"
 #include "intel_pch.h"
 #include "intel_runtime_pm.h"
+#include "intel_memory_region.h"
 #include "intel_uncore.h"
 #include "intel_wakeref.h"
 #include "intel_wopcm.h"
 
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "intel_memory_region.h"
+#include "i915_drv.h"
+
+static u64
+intel_memory_region_free_pages(struct intel_memory_region *mem,
+                              struct list_head *blocks)
+{
+       struct i915_buddy_block *block, *on;
+       u64 size = 0;
+
+       list_for_each_entry_safe(block, on, blocks, link) {
+               size += i915_buddy_block_size(&mem->mm, block);
+               i915_buddy_free(&mem->mm, block);
+       }
+       INIT_LIST_HEAD(blocks);
+
+       return size;
+}
+
+void
+__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+                                     struct list_head *blocks)
+{
+       mutex_lock(&mem->mm_lock);
+       intel_memory_region_free_pages(mem, blocks);
+       mutex_unlock(&mem->mm_lock);
+}
+
+void
+__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
+{
+       struct list_head blocks;
+
+       INIT_LIST_HEAD(&blocks);
+       list_add(&block->link, &blocks);
+       __intel_memory_region_put_pages_buddy(block->private, &blocks);
+}
+
+int
+__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+                                     resource_size_t size,
+                                     unsigned int flags,
+                                     struct list_head *blocks)
+{
+       unsigned long n_pages = size >> ilog2(mem->mm.chunk_size);
+       unsigned int min_order = 0;
+
+       GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
+       GEM_BUG_ON(!list_empty(blocks));
+
+       if (flags & I915_ALLOC_MIN_PAGE_SIZE) {
+               min_order = ilog2(mem->min_page_size) -
+                           ilog2(mem->mm.chunk_size);
+       }
+
+       mutex_lock(&mem->mm_lock);
+
+       do {
+               struct i915_buddy_block *block;
+               unsigned int order;
+
+               order = fls(n_pages) - 1;
+               GEM_BUG_ON(order > mem->mm.max_order);
+               GEM_BUG_ON(order < min_order);
+
+               do {
+                       block = i915_buddy_alloc(&mem->mm, order);
+                       if (!IS_ERR(block))
+                               break;
+
+                       if (order-- == min_order)
+                               goto err_free_blocks;
+               } while (1);
+
+               n_pages -= BIT(order);
+
+               block->private = mem;
+               list_add(&block->link, blocks);
+
+               if (!n_pages)
+                       break;
+       } while (1);
+
+       mutex_unlock(&mem->mm_lock);
+       return 0;
+
+err_free_blocks:
+       intel_memory_region_free_pages(mem, blocks);
+       mutex_unlock(&mem->mm_lock);
+       return -ENXIO;
+}
+
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+                                     resource_size_t size,
+                                     unsigned int flags)
+{
+       struct i915_buddy_block *block;
+       LIST_HEAD(blocks);
+       int ret;
+
+       ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks);
+       if (ret)
+               return ERR_PTR(ret);
+
+       block = list_first_entry(&blocks, typeof(*block), link);
+       list_del_init(&block->link);
+       return block;
+}
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem)
+{
+       return i915_buddy_init(&mem->mm, resource_size(&mem->region),
+                              PAGE_SIZE);
+}
+
+void intel_memory_region_release_buddy(struct intel_memory_region *mem)
+{
+       i915_buddy_fini(&mem->mm);
+}
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+                          resource_size_t start,
+                          resource_size_t size,
+                          resource_size_t min_page_size,
+                          resource_size_t io_start,
+                          const struct intel_memory_region_ops *ops)
+{
+       struct intel_memory_region *mem;
+       int err;
+
+       mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+       if (!mem)
+               return ERR_PTR(-ENOMEM);
+
+       mem->i915 = i915;
+       mem->region = (struct resource)DEFINE_RES_MEM(start, size);
+       mem->io_start = io_start;
+       mem->min_page_size = min_page_size;
+       mem->ops = ops;
+
+       mutex_init(&mem->mm_lock);
+
+       if (ops->init) {
+               err = ops->init(mem);
+               if (err)
+                       goto err_free;
+       }
+
+       kref_init(&mem->kref);
+       return mem;
+
+err_free:
+       kfree(mem);
+       return ERR_PTR(err);
+}
+
+static void __intel_memory_region_destroy(struct kref *kref)
+{
+       struct intel_memory_region *mem =
+               container_of(kref, typeof(*mem), kref);
+
+       if (mem->ops->release)
+               mem->ops->release(mem);
+
+       mutex_destroy(&mem->mm_lock);
+       kfree(mem);
+}
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem)
+{
+       kref_get(&mem->kref);
+       return mem;
+}
+
+void intel_memory_region_put(struct intel_memory_region *mem)
+{
+       kref_put(&mem->kref, __intel_memory_region_destroy);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_memory_region.c"
+#include "selftests/mock_region.c"
+#endif
 
--- /dev/null
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_MEMORY_REGION_H__
+#define __INTEL_MEMORY_REGION_H__
+
+#include <linux/kref.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/io-mapping.h>
+
+#include "i915_buddy.h"
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+struct intel_memory_region;
+struct sg_table;
+
+#define I915_ALLOC_MIN_PAGE_SIZE   BIT(0)
+
+struct intel_memory_region_ops {
+       unsigned int flags;
+
+       int (*init)(struct intel_memory_region *mem);
+       void (*release)(struct intel_memory_region *mem);
+
+       struct drm_i915_gem_object *
+       (*create_object)(struct intel_memory_region *mem,
+                        resource_size_t size,
+                        unsigned int flags);
+};
+
+struct intel_memory_region {
+       struct drm_i915_private *i915;
+
+       const struct intel_memory_region_ops *ops;
+
+       struct io_mapping iomap;
+       struct resource region;
+
+       struct i915_buddy_mm mm;
+       struct mutex mm_lock;
+
+       struct kref kref;
+
+       resource_size_t io_start;
+       resource_size_t min_page_size;
+
+       unsigned int type;
+       unsigned int instance;
+       unsigned int id;
+};
+
+int intel_memory_region_init_buddy(struct intel_memory_region *mem);
+void intel_memory_region_release_buddy(struct intel_memory_region *mem);
+
+int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
+                                         resource_size_t size,
+                                         unsigned int flags,
+                                         struct list_head *blocks);
+struct i915_buddy_block *
+__intel_memory_region_get_block_buddy(struct intel_memory_region *mem,
+                                     resource_size_t size,
+                                     unsigned int flags);
+void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
+                                          struct list_head *blocks);
+void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block);
+
+struct intel_memory_region *
+intel_memory_region_create(struct drm_i915_private *i915,
+                          resource_size_t start,
+                          resource_size_t size,
+                          resource_size_t min_page_size,
+                          resource_size_t io_start,
+                          const struct intel_memory_region_ops *ops);
+
+struct intel_memory_region *
+intel_memory_region_get(struct intel_memory_region *mem);
+void intel_memory_region_put(struct intel_memory_region *mem);
+
+#endif
 
 selftest(hugepages, i915_gem_huge_page_mock_selftests)
 selftest(contexts, i915_gem_context_mock_selftests)
 selftest(buddy, i915_buddy_mock_selftests)
+selftest(memory_region, intel_memory_region_mock_selftests)
 
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+#include "mock_region.h"
+
+#include "gem/i915_gem_region.h"
+#include "gem/selftests/mock_context.h"
+
+static void close_objects(struct intel_memory_region *mem,
+                         struct list_head *objects)
+{
+       struct drm_i915_private *i915 = mem->i915;
+       struct drm_i915_gem_object *obj, *on;
+
+       list_for_each_entry_safe(obj, on, objects, st_link) {
+               if (i915_gem_object_has_pinned_pages(obj))
+                       i915_gem_object_unpin_pages(obj);
+               /* No polluting the memory region between tests */
+               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+               list_del(&obj->st_link);
+               i915_gem_object_put(obj);
+       }
+
+       cond_resched();
+
+       i915_gem_drain_freed_objects(i915);
+}
+
+static int igt_mock_fill(void *arg)
+{
+       struct intel_memory_region *mem = arg;
+       resource_size_t total = resource_size(&mem->region);
+       resource_size_t page_size;
+       resource_size_t rem;
+       unsigned long max_pages;
+       unsigned long page_num;
+       LIST_HEAD(objects);
+       int err = 0;
+
+       page_size = mem->mm.chunk_size;
+       max_pages = div64_u64(total, page_size);
+       rem = total;
+
+       for_each_prime_number_from(page_num, 1, max_pages) {
+               resource_size_t size = page_num * page_size;
+               struct drm_i915_gem_object *obj;
+
+               obj = i915_gem_object_create_region(mem, size, 0);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       break;
+               }
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err) {
+                       i915_gem_object_put(obj);
+                       break;
+               }
+
+               list_add(&obj->st_link, &objects);
+               rem -= size;
+       }
+
+       if (err == -ENOMEM)
+               err = 0;
+       if (err == -ENXIO) {
+               if (page_num * page_size <= rem) {
+                       pr_err("%s failed, space still left in region\n",
+                              __func__);
+                       err = -EINVAL;
+               } else {
+                       err = 0;
+               }
+       }
+
+       close_objects(mem, &objects);
+
+       return err;
+}
+
+int intel_memory_region_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_mock_fill),
+       };
+       struct intel_memory_region *mem;
+       struct drm_i915_private *i915;
+       int err;
+
+       i915 = mock_gem_device();
+       if (!i915)
+               return -ENOMEM;
+
+       mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+       if (IS_ERR(mem)) {
+               pr_err("failed to create memory region\n");
+               err = PTR_ERR(mem);
+               goto out_unref;
+       }
+
+       err = i915_subtests(tests, mem);
+
+       intel_memory_region_put(mem);
+out_unref:
+       drm_dev_put(&i915->drm);
+       return err;
+}
 
 #include "mock_gem_device.h"
 #include "mock_gtt.h"
 #include "mock_uncore.h"
+#include "mock_region.h"
 
 #include "gem/selftests/mock_context.h"
 #include "gem/selftests/mock_gem_object.h"
 
--- /dev/null
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gem/i915_gem_region.h"
+#include "intel_memory_region.h"
+
+#include "mock_region.h"
+
+static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
+       .get_pages = i915_gem_object_get_pages_buddy,
+       .put_pages = i915_gem_object_put_pages_buddy,
+       .release = i915_gem_object_release_memory_region,
+};
+
+static struct drm_i915_gem_object *
+mock_object_create(struct intel_memory_region *mem,
+                  resource_size_t size,
+                  unsigned int flags)
+{
+       struct drm_i915_private *i915 = mem->i915;
+       struct drm_i915_gem_object *obj;
+
+       if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
+               return ERR_PTR(-E2BIG);
+
+       obj = i915_gem_object_alloc();
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       drm_gem_private_object_init(&i915->drm, &obj->base, size);
+       i915_gem_object_init(obj, &mock_region_obj_ops);
+
+       obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+
+       i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
+       i915_gem_object_init_memory_region(obj, mem);
+
+       return obj;
+}
+
+static const struct intel_memory_region_ops mock_region_ops = {
+       .init = intel_memory_region_init_buddy,
+       .release = intel_memory_region_release_buddy,
+       .create_object = mock_object_create,
+};
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+                  resource_size_t start,
+                  resource_size_t size,
+                  resource_size_t min_page_size,
+                  resource_size_t io_start)
+{
+       return intel_memory_region_create(i915, start, size, min_page_size,
+                                         io_start, &mock_region_ops);
+}
 
--- /dev/null
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __MOCK_REGION_H
+#define __MOCK_REGION_H
+
+struct intel_memory_region *
+mock_region_create(struct drm_i915_private *i915,
+                  resource_size_t start,
+                  resource_size_t size,
+                  resource_size_t min_page_size,
+                  resource_size_t io_start);
+
+#endif /* !__MOCK_REGION_H */