struct panfrost_gem_object *bo = to_panfrost_bo(obj);
        struct panfrost_device *pfdev = obj->dev->dev_private;
 
+       if (bo->sgts) {
+               int i;
+               int n_sgt = bo->base.base.size / SZ_2M;
+
+               for (i = 0; i < n_sgt; i++) {
+                       if (bo->sgts[i].sgl) {
+                               dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
+                                            bo->sgts[i].nents, DMA_BIDIRECTIONAL);
+                               sg_free_table(&bo->sgts[i]);
+                       }
+               }
+               kfree(bo->sgts);
+       }
+
        mutex_lock(&pfdev->shrinker_lock);
        if (!list_empty(&bo->base.madv_list))
                list_del(&bo->base.madv_list);
        if (ret)
                goto out;
 
-       ret = panfrost_mmu_map(bo);
-       if (ret)
-               drm_mm_remove_node(&bo->node);
-
+       if (!bo->is_heap) {
+               ret = panfrost_mmu_map(bo);
+               if (ret)
+                       drm_mm_remove_node(&bo->node);
+       }
 out:
        spin_unlock(&pfdev->mm_lock);
        return ret;
        spin_unlock(&pfdev->mm_lock);
 }
 
+static int panfrost_gem_pin(struct drm_gem_object *obj)
+{
+       if (to_panfrost_bo(obj)->is_heap)
+               return -EINVAL;
+
+       return drm_gem_shmem_pin(obj);
+}
+
 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
        .free = panfrost_gem_free_object,
        .open = panfrost_gem_open,
        .close = panfrost_gem_close,
        .print_info = drm_gem_shmem_print_info,
-       .pin = drm_gem_shmem_pin,
+       .pin = panfrost_gem_pin,
        .unpin = drm_gem_shmem_unpin,
        .get_sg_table = drm_gem_shmem_get_sg_table,
        .vmap = drm_gem_shmem_vmap,
        struct drm_gem_shmem_object *shmem;
        struct panfrost_gem_object *bo;
 
+       /* Round up heap allocations to 2MB to keep fault handling simple */
+       if (flags & PANFROST_BO_HEAP)
+               size = roundup(size, SZ_2M);
+
        shmem = drm_gem_shmem_create(dev, size);
        if (IS_ERR(shmem))
                return ERR_CAST(shmem);
 
        bo = to_panfrost_bo(&shmem->base);
        bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
+       bo->is_heap = !!(flags & PANFROST_BO_HEAP);
 
        /*
         * Allocate an id of idr table where the obj is registered
 
 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
 #include <linux/bitfield.h>
 #include <linux/delay.h>
+#include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/iommu.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/shmem_fs.h>
 #include <linux/sizes.h>
 
 #include "panfrost_device.h"
                size_t unmapped_page;
                size_t pgsize = get_pgsize(iova, len - unmapped_len);
 
-               unmapped_page = ops->unmap(ops, iova, pgsize);
-               if (!unmapped_page)
-                       break;
-
-               iova += unmapped_page;
-               unmapped_len += unmapped_page;
+               if (ops->iova_to_phys(ops, iova)) {
+                       unmapped_page = ops->unmap(ops, iova, pgsize);
+                       WARN_ON(unmapped_page != pgsize);
+               }
+               iova += pgsize;
+               unmapped_len += pgsize;
        }
 
        mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
        .tlb_sync       = mmu_tlb_sync_context,
 };
 
+static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+{
+       struct drm_mm_node *node;
+       u64 offset = addr >> PAGE_SHIFT;
+
+       drm_mm_for_each_node(node, &pfdev->mm) {
+               if (offset >= node->start && offset < (node->start + node->size))
+                       return node;
+       }
+       return NULL;
+}
+
+#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
+
+int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
+{
+       int ret, i;
+       struct drm_mm_node *node;
+       struct panfrost_gem_object *bo;
+       struct address_space *mapping;
+       pgoff_t page_offset;
+       struct sg_table *sgt;
+       struct page **pages;
+
+       node = addr_to_drm_mm_node(pfdev, as, addr);
+       if (!node)
+               return -ENOENT;
+
+       bo = drm_mm_node_to_panfrost_bo(node);
+       if (!bo->is_heap) {
+               dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
+                        node->start << PAGE_SHIFT);
+               return -EINVAL;
+       }
+       /* Assume 2MB alignment and size multiple */
+       addr &= ~((u64)SZ_2M - 1);
+       page_offset = addr >> PAGE_SHIFT;
+       page_offset -= node->start;
+
+       mutex_lock(&bo->base.pages_lock);
+
+       if (!bo->base.pages) {
+               bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
+                                    sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
+               if (!bo->sgts)
+                       return -ENOMEM;
+
+               pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+                                      sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
+               if (!pages) {
+                       kfree(bo->sgts);
+                       bo->sgts = NULL;
+                       return -ENOMEM;
+               }
+               bo->base.pages = pages;
+               bo->base.pages_use_count = 1;
+       } else
+               pages = bo->base.pages;
+
+       mapping = bo->base.base.filp->f_mapping;
+       mapping_set_unevictable(mapping);
+
+       for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+               pages[i] = shmem_read_mapping_page(mapping, i);
+               if (IS_ERR(pages[i])) {
+                       mutex_unlock(&bo->base.pages_lock);
+                       ret = PTR_ERR(pages[i]);
+                       goto err_pages;
+               }
+       }
+
+       mutex_unlock(&bo->base.pages_lock);
+
+       sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
+       ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
+                                       NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
+       if (ret)
+               goto err_pages;
+
+       if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
+               ret = -EINVAL;
+               goto err_map;
+       }
+
+       mmu_map_sg(pfdev, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+
+       bo->is_mapped = true;
+
+       dev_dbg(pfdev->dev, "mapped page fault @ %llx", addr);
+
+       return 0;
+
+err_map:
+       sg_free_table(sgt);
+err_pages:
+       drm_gem_shmem_put_pages(&bo->base);
+       return ret;
+}
+
 static const char *access_type_name(struct panfrost_device *pfdev,
                u32 fault_status)
 {
 {
        struct panfrost_device *pfdev = data;
        u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
-       int i;
-
-       dev_err(pfdev->dev, "mmu irq status=%x\n", status);
+       int i, ret;
 
        for (i = 0; status; i++) {
                u32 mask = BIT(i) | BIT(i + 16);
                access_type = (fault_status >> 8) & 0x3;
                source_id = (fault_status >> 16);
 
+               /* Page fault only */
+               if ((status & mask) == BIT(i)) {
+                       WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
+
+                       ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
+                       if (!ret) {
+                               mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
+                               status &= ~mask;
+                               continue;
+                       }
+               }
+
                /* terminal fault, print info about the fault */
                dev_err(pfdev->dev,
                        "Unhandled Page fault in AS%d at VA 0x%016llX\n"