*/
 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
 
+static size_t amdgpu_amdkfd_acc_size(uint64_t size)
+{
+       size >>= PAGE_SHIFT;
+       size *= sizeof(dma_addr_t) + sizeof(void *);
+
+       return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
+               __roundup_pow_of_two(sizeof(struct ttm_tt)) +
+               PAGE_ALIGN(size);
+}
+
 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
                uint64_t size, u32 domain, bool sg)
 {
        size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
        int ret = 0;
 
-       acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
-                                      sizeof(struct amdgpu_bo));
+       acc_size = amdgpu_amdkfd_acc_size(size);
 
        vram_needed = 0;
        if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 {
        size_t acc_size;
 
-       acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
-                                      sizeof(struct amdgpu_bo));
+       acc_size = amdgpu_amdkfd_acc_size(size);
 
        spin_lock(&kfd_mem_limit.mem_limit_lock);
        if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 
        };
        struct amdgpu_bo *bo;
        unsigned long page_align, size = bp->size;
-       size_t acc_size;
        int r;
 
        /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
 
        *bo_ptr = NULL;
 
-       acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
-                                      sizeof(struct amdgpu_bo));
-
        bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
                bo->tbo.priority = 1;
 
        r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
-                                &bo->placement, page_align, &ctx, acc_size,
-                                NULL, bp->resv, &amdgpu_bo_destroy);
+                                &bo->placement, page_align, &ctx,  NULL,
+                                bp->resv, &amdgpu_bo_destroy);
        if (unlikely(r != 0))
                return r;
 
 
        struct drm_vram_mm *vmm = dev->vram_mm;
        struct ttm_device *bdev;
        int ret;
-       size_t acc_size;
 
        if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
                return ERR_PTR(-EINVAL);
        }
 
        bdev = &vmm->bdev;
-       acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
 
        gbo->bo.bdev = bdev;
        drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
         * to release gbo->bo.base and kfree gbo.
         */
        ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
-                         &gbo->placement, pg_align, false, acc_size,
-                         NULL, NULL, ttm_buffer_object_destroy);
+                         &gbo->placement, pg_align, false, NULL, NULL,
+                         ttm_buffer_object_destroy);
        if (ret)
                return ERR_PTR(ret);
 
 
                struct sg_table *sg, struct dma_resv *robj)
 {
        int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
-       size_t acc_size;
        int ret;
 
-       acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
-
        nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
        nouveau_bo_placement_set(nvbo, domain, 0);
        INIT_LIST_HEAD(&nvbo->io_reserve_lru);
 
        ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
-                         &nvbo->placement, align >> PAGE_SHIFT, false,
-                         acc_size, sg, robj, nouveau_bo_del_ttm);
+                         &nvbo->placement, align >> PAGE_SHIFT, false, sg,
+                         robj, nouveau_bo_del_ttm);
        if (ret) {
                /* ttm will call nouveau_bo_del_ttm if it fails.. */
                return ret;
 
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
 
 #include <drm/drm_audio_component.h>
 
 
        qxl_ttm_placement_from_domain(bo, domain);
 
        r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
-                                &bo->placement, 0, &ctx, size,
-                                NULL, NULL, &qxl_ttm_bo_destroy);
+                                &bo->placement, 0, &ctx, NULL, NULL,
+                                &qxl_ttm_bo_destroy);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        dev_err(qdev->ddev.dev,
 
        struct radeon_bo *bo;
        enum ttm_bo_type type;
        unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
-       size_t acc_size;
        int r;
 
        size = ALIGN(size, PAGE_SIZE);
        }
        *bo_ptr = NULL;
 
-       acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
-                                      sizeof(struct radeon_bo));
-
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
        /* Kernel allocation are uninterruptible */
        down_read(&rdev->pm.mclk_lock);
        r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
-                       &bo->placement, page_align, !kernel, acc_size,
-                       sg, resv, &radeon_ttm_bo_destroy);
+                       &bo->placement, page_align, !kernel, sg, resv,
+                       &radeon_ttm_bo_destroy);
        up_read(&rdev->pm.mclk_lock);
        if (unlikely(r != 0)) {
                return r;
 
 #
 # Makefile for the drm device driver.  This driver provides support for the
 
-ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
-       ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
-       ttm_execbuf_util.o ttm_range_manager.o \
-       ttm_resource.o ttm_pool.o ttm_device.o
+ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
+       ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
+       ttm_device.o
 ttm-$(CONFIG_AGP) += ttm_agp_backend.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
 
        struct ttm_buffer_object *bo =
            container_of(kref, struct ttm_buffer_object, kref);
        struct ttm_device *bdev = bo->bdev;
-       size_t acc_size = bo->acc_size;
        int ret;
 
        if (!bo->deleted) {
        if (!ttm_bo_uses_embedded_gem_object(bo))
                dma_resv_fini(&bo->base._resv);
        bo->destroy(bo);
-       ttm_mem_global_free(&ttm_mem_glob, acc_size);
 }
 
 void ttm_bo_put(struct ttm_buffer_object *bo)
                         struct ttm_placement *placement,
                         uint32_t page_alignment,
                         struct ttm_operation_ctx *ctx,
-                        size_t acc_size,
                         struct sg_table *sg,
                         struct dma_resv *resv,
                         void (*destroy) (struct ttm_buffer_object *))
 {
-       struct ttm_mem_global *mem_glob = &ttm_mem_glob;
        bool locked;
        int ret = 0;
 
-       ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
-       if (ret) {
-               pr_err("Out of kernel memory\n");
-               if (destroy)
-                       (*destroy)(bo);
-               else
-                       kfree(bo);
-               return -ENOMEM;
-       }
-
        bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
 
        kref_init(&bo->kref);
        bo->mem.bus.addr = NULL;
        bo->moving = NULL;
        bo->mem.placement = 0;
-       bo->acc_size = acc_size;
        bo->pin_count = 0;
        bo->sg = sg;
        if (resv) {
                struct ttm_placement *placement,
                uint32_t page_alignment,
                bool interruptible,
-               size_t acc_size,
                struct sg_table *sg,
                struct dma_resv *resv,
                void (*destroy) (struct ttm_buffer_object *))
        int ret;
 
        ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
-                                  page_alignment, &ctx, acc_size,
-                                  sg, resv, destroy);
+                                  page_alignment, &ctx, sg, resv, destroy);
        if (ret)
                return ret;
 
 }
 EXPORT_SYMBOL(ttm_bo_init);
 
-size_t ttm_bo_dma_acc_size(struct ttm_device *bdev,
-                          unsigned long bo_size,
-                          unsigned struct_size)
-{
-       unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
-       size_t size = 0;
-
-       size += ttm_round_pot(struct_size);
-       size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
-       size += ttm_round_pot(sizeof(struct ttm_tt));
-       return size;
-}
-EXPORT_SYMBOL(ttm_bo_dma_acc_size);
-
 /*
  * buffer object vm functions.
  */
 
 
        kref_init(&fbo->base.kref);
        fbo->base.destroy = &ttm_transfered_destroy;
-       fbo->base.acc_size = 0;
        fbo->base.pin_count = 0;
        if (bo->type != ttm_bo_type_sg)
                fbo->base.base.resv = &fbo->base.base._resv;
 
 
 #define pr_fmt(fmt) "[TTM DEVICE] " fmt
 
+#include <linux/mm.h>
+
 #include <drm/ttm/ttm_device.h>
-#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_tt.h>
 #include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_bo_api.h>
 
 #include "ttm_module.h"
 
        if (--ttm_glob_use_count > 0)
                goto out;
 
-       ttm_mem_global_release(&ttm_mem_glob);
+       ttm_pool_mgr_fini();
+       ttm_tt_mgr_fini();
+
        __free_page(glob->dummy_read_page);
        memset(glob, 0, sizeof(*glob));
 out:
 static int ttm_global_init(void)
 {
        struct ttm_global *glob = &ttm_glob;
+       unsigned long num_pages;
+       struct sysinfo si;
        int ret = 0;
        unsigned i;
 
        if (++ttm_glob_use_count > 1)
                goto out;
 
-       ret = ttm_mem_global_init(&ttm_mem_glob);
-       if (ret)
-               goto out;
+       si_meminfo(&si);
+
+       /* Limit the number of pages in the pool to about 50% of the total
+        * system memory.
+        */
+       num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
+       ttm_pool_mgr_init(num_pages * 50 / 100);
+       ttm_tt_mgr_init();
 
        spin_lock_init(&glob->lru_lock);
        glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 
                        caching = pages + (1 << order);
                }
 
-               r = ttm_mem_global_alloc_page(&ttm_mem_glob, p,
-                                             (1 << order) * PAGE_SIZE,
-                                             ctx);
-               if (r)
-                       goto error_free_page;
-
                if (dma_addr) {
                        r = ttm_pool_map(pool, order, p, &dma_addr);
                        if (r)
-                               goto error_global_free;
+                               goto error_free_page;
                }
 
                num_pages -= 1 << order;
 
        return 0;
 
-error_global_free:
-       ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE);
-
 error_free_page:
        ttm_pool_free_page(pool, tt->caching, order, p);
 
 
                order = ttm_pool_page_order(pool, p);
                num_pages = 1ULL << order;
-               ttm_mem_global_free_page(&ttm_mem_glob, p,
-                                        num_pages * PAGE_SIZE);
                if (tt->dma_address)
                        ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
 
 
            vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
            vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
            vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
-           ttm_object.o ttm_lock.o
+           ttm_object.o ttm_lock.o ttm_memory.o
 
 vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
 
 
 #define pr_fmt(fmt) "[TTM] " fmt
 
-#include <drm/ttm/ttm_memory.h>
 #include <linux/spinlock.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
-#include <drm/ttm/ttm_pool.h>
-#include <drm/ttm/ttm_tt.h>
 
-#include "ttm_module.h"
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+
+#include "ttm_memory.h"
 
 #define TTM_MEMORY_ALLOC_RETRIES 4
 
 }
 #endif
 
-int ttm_mem_global_init(struct ttm_mem_global *glob)
+int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
 {
        struct sysinfo si;
        int ret;
        spin_lock_init(&glob->lock);
        glob->swap_queue = create_singlethread_workqueue("ttm_swap");
        INIT_WORK(&glob->work, ttm_shrink_work);
-       ret = kobject_init_and_add(
-               &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
+
+       ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type,
+                                  &dev->kobj, "memory_accounting");
        if (unlikely(ret != 0)) {
                kobject_put(&glob->kobj);
                return ret;
                pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
                        zone->name, (unsigned long long)zone->max_mem >> 10);
        }
-       ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
-       ttm_tt_mgr_init();
        return 0;
 out_no_zone:
        ttm_mem_global_release(glob);
        struct ttm_mem_zone *zone;
        unsigned int i;
 
-       /* let the page allocator first stop the shrink work. */
-       ttm_pool_mgr_fini();
-       ttm_tt_mgr_fini();
-
        flush_workqueue(glob->swap_queue);
        destroy_workqueue(glob->swap_queue);
        glob->swap_queue = NULL;
 
 #include <linux/errno.h>
 #include <linux/kobject.h>
 #include <linux/mm.h>
-#include "ttm_bo_api.h"
+
+#include <drm/ttm/ttm_bo_api.h>
 
 /**
  * struct ttm_mem_global - Global memory accounting structure.
 #endif
 } ttm_mem_glob;
 
-int ttm_mem_global_init(struct ttm_mem_global *glob);
+int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev);
 void ttm_mem_global_release(struct ttm_mem_global *glob);
 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
                         struct ttm_operation_ctx *ctx);
 
 #include <linux/rcupdate.h>
 
 #include <drm/drm_hashtab.h>
-#include <drm/ttm/ttm_memory.h>
+
+#include "ttm_memory.h"
 
 /**
  * enum ttm_ref_type
 
        acc_size = ttm_round_pot(sizeof(*bo));
        acc_size += ttm_round_pot(npages * sizeof(void *));
        acc_size += ttm_round_pot(sizeof(struct ttm_tt));
+
+       ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
+       if (unlikely(ret))
+               goto error_free;
+
        ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
                                   ttm_bo_type_device, placement, 0,
-                                  &ctx, acc_size, NULL, NULL, NULL);
+                                  &ctx, NULL, NULL, NULL);
        if (unlikely(ret))
-               goto error_free;
+               goto error_account;
 
        ttm_bo_pin(bo);
        ttm_bo_unreserve(bo);
 
        return 0;
 
+error_account:
+       ttm_mem_global_free(&ttm_mem_glob, acc_size);
+
 error_free:
        kfree(bo);
        return ret;
        vmw_bo->base.priority = 3;
        vmw_bo->res_tree = RB_ROOT;
 
+       ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
+       if (unlikely(ret))
+               return ret;
+
        ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
                                   ttm_bo_type_device, placement,
-                                  0, &ctx, acc_size, NULL, NULL, bo_free);
-       if (unlikely(ret))
+                                  0, &ctx, NULL, NULL, bo_free);
+       if (unlikely(ret)) {
+               ttm_mem_global_free(&ttm_mem_glob, acc_size);
                return ret;
+       }
 
        if (pin)
                ttm_bo_pin(&vmw_bo->base);
 
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
 
+       ttm_mem_global_release(&ttm_mem_glob);
        drm_dev_unregister(dev);
        vmw_driver_unload(dev);
 }
 
        pci_set_drvdata(pdev, &vmw->drm);
 
+       ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
+       if (ret)
+               return ret;
+
        ret = vmw_driver_load(vmw, ent->device);
        if (ret)
                return ret;
 
 static int vmw_ttm_populate(struct ttm_device *bdev,
                            struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
+       unsigned int i;
+       int ret;
+
        /* TODO: maybe completely drop this ? */
        if (ttm_tt_is_populated(ttm))
                return 0;
 
-       return ttm_pool_alloc(&bdev->pool, ttm, ctx);
+       ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < ttm->num_pages; ++i) {
+               ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
+                                               PAGE_SIZE, ctx);
+               if (ret)
+                       goto error;
+       }
+       return 0;
+
+error:
+       while (i--)
+               ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
+                                        PAGE_SIZE);
+       ttm_pool_free(&bdev->pool, ttm);
+       return ret;
 }
 
 static void vmw_ttm_unpopulate(struct ttm_device *bdev,
 {
        struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
                                                 dma_ttm);
+       unsigned int i;
 
        if (vmw_tt->mob) {
                vmw_mob_destroy(vmw_tt->mob);
        }
 
        vmw_ttm_unmap_dma(vmw_tt);
+
+       for (i = 0; i < ttm->num_pages; ++i)
+               ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
+                                        PAGE_SIZE);
+
        ttm_pool_free(&bdev->pool, ttm);
 }
 
 
  * @type: The bo type.
  * @destroy: Destruction function. If NULL, kfree is used.
  * @num_pages: Actual number of pages.
- * @acc_size: Accounted size for this object.
  * @kref: Reference count of this buffer object. When this refcount reaches
  * zero, the object is destroyed or put on the delayed delete list.
  * @mem: structure describing current placement.
        struct ttm_device *bdev;
        enum ttm_bo_type type;
        void (*destroy) (struct ttm_buffer_object *);
-       size_t acc_size;
 
        /**
        * Members not needing protection.
 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
                              const struct ttm_place *place);
 
-size_t ttm_bo_dma_acc_size(struct ttm_device *bdev,
-                          unsigned long bo_size,
-                          unsigned struct_size);
-
 /**
  * ttm_bo_init_reserved
  *
  * @flags: Initial placement flags.
  * @page_alignment: Data alignment in pages.
  * @ctx: TTM operation context for memory allocation.
- * @acc_size: Accounted size for this object.
  * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
  * @destroy: Destroy function. Use NULL for kfree().
  *
                         struct ttm_placement *placement,
                         uint32_t page_alignment,
                         struct ttm_operation_ctx *ctx,
-                        size_t acc_size, struct sg_table *sg,
-                        struct dma_resv *resv,
+                        struct sg_table *sg, struct dma_resv *resv,
                         void (*destroy) (struct ttm_buffer_object *));
 
 /**
  * holds a pointer to a persistent shmem object. Typically, this would
  * point to the shmem object backing a GEM object if TTM is used to back a
  * GEM user interface.
- * @acc_size: Accounted size for this object.
  * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
  * @destroy: Destroy function. Use NULL for kfree().
  *
 int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo,
                size_t size, enum ttm_bo_type type,
                struct ttm_placement *placement,
-               uint32_t page_alignment, bool interrubtible, size_t acc_size,
+               uint32_t page_alignment, bool interrubtible,
                struct sg_table *sg, struct dma_resv *resv,
                void (*destroy) (struct ttm_buffer_object *));
 
 
 #include <drm/ttm/ttm_device.h>
 
 #include "ttm_bo_api.h"
-#include "ttm_memory.h"
 #include "ttm_placement.h"
 #include "ttm_tt.h"
 #include "ttm_pool.h"
 
 #include <linux/types.h>
 #include <drm/ttm/ttm_caching.h>
 
+struct ttm_bo_device;
 struct ttm_tt;
 struct ttm_resource;
 struct ttm_buffer_object;