vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
            vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
            vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
-           vmwgfx_devcaps.o ttm_object.o ttm_memory.o vmwgfx_system_manager.o
+           vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o
 
 vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
 vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
 
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/ttm/ttm_device.h>
-
-#include "ttm_memory.h"
-
-#define TTM_MEMORY_ALLOC_RETRIES 4
-
-struct ttm_mem_global ttm_mem_glob;
-EXPORT_SYMBOL(ttm_mem_glob);
-
-struct ttm_mem_zone {
-       struct kobject kobj;
-       struct ttm_mem_global *glob;
-       const char *name;
-       uint64_t zone_mem;
-       uint64_t emer_mem;
-       uint64_t max_mem;
-       uint64_t swap_limit;
-       uint64_t used_mem;
-};
-
-static struct attribute ttm_mem_sys = {
-       .name = "zone_memory",
-       .mode = S_IRUGO
-};
-static struct attribute ttm_mem_emer = {
-       .name = "emergency_memory",
-       .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_max = {
-       .name = "available_memory",
-       .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_swap = {
-       .name = "swap_limit",
-       .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_mem_used = {
-       .name = "used_memory",
-       .mode = S_IRUGO
-};
-
-static void ttm_mem_zone_kobj_release(struct kobject *kobj)
-{
-       struct ttm_mem_zone *zone =
-               container_of(kobj, struct ttm_mem_zone, kobj);
-
-       pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
-               zone->name, (unsigned long long)zone->used_mem >> 10);
-       kfree(zone);
-}
-
-static ssize_t ttm_mem_zone_show(struct kobject *kobj,
-                                struct attribute *attr,
-                                char *buffer)
-{
-       struct ttm_mem_zone *zone =
-               container_of(kobj, struct ttm_mem_zone, kobj);
-       uint64_t val = 0;
-
-       spin_lock(&zone->glob->lock);
-       if (attr == &ttm_mem_sys)
-               val = zone->zone_mem;
-       else if (attr == &ttm_mem_emer)
-               val = zone->emer_mem;
-       else if (attr == &ttm_mem_max)
-               val = zone->max_mem;
-       else if (attr == &ttm_mem_swap)
-               val = zone->swap_limit;
-       else if (attr == &ttm_mem_used)
-               val = zone->used_mem;
-       spin_unlock(&zone->glob->lock);
-
-       return snprintf(buffer, PAGE_SIZE, "%llu\n",
-                       (unsigned long long) val >> 10);
-}
-
-static void ttm_check_swapping(struct ttm_mem_global *glob);
-
-static ssize_t ttm_mem_zone_store(struct kobject *kobj,
-                                 struct attribute *attr,
-                                 const char *buffer,
-                                 size_t size)
-{
-       struct ttm_mem_zone *zone =
-               container_of(kobj, struct ttm_mem_zone, kobj);
-       int chars;
-       unsigned long val;
-       uint64_t val64;
-
-       chars = sscanf(buffer, "%lu", &val);
-       if (chars == 0)
-               return size;
-
-       val64 = val;
-       val64 <<= 10;
-
-       spin_lock(&zone->glob->lock);
-       if (val64 > zone->zone_mem)
-               val64 = zone->zone_mem;
-       if (attr == &ttm_mem_emer) {
-               zone->emer_mem = val64;
-               if (zone->max_mem > val64)
-                       zone->max_mem = val64;
-       } else if (attr == &ttm_mem_max) {
-               zone->max_mem = val64;
-               if (zone->emer_mem < val64)
-                       zone->emer_mem = val64;
-       } else if (attr == &ttm_mem_swap)
-               zone->swap_limit = val64;
-       spin_unlock(&zone->glob->lock);
-
-       ttm_check_swapping(zone->glob);
-
-       return size;
-}
-
-static struct attribute *ttm_mem_zone_attrs[] = {
-       &ttm_mem_sys,
-       &ttm_mem_emer,
-       &ttm_mem_max,
-       &ttm_mem_swap,
-       &ttm_mem_used,
-       NULL
-};
-
-static const struct sysfs_ops ttm_mem_zone_ops = {
-       .show = &ttm_mem_zone_show,
-       .store = &ttm_mem_zone_store
-};
-
-static struct kobj_type ttm_mem_zone_kobj_type = {
-       .release = &ttm_mem_zone_kobj_release,
-       .sysfs_ops = &ttm_mem_zone_ops,
-       .default_attrs = ttm_mem_zone_attrs,
-};
-static struct kobj_type ttm_mem_glob_kobj_type = {0};
-
-static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
-                                       bool from_wq, uint64_t extra)
-{
-       unsigned int i;
-       struct ttm_mem_zone *zone;
-       uint64_t target;
-
-       for (i = 0; i < glob->num_zones; ++i) {
-               zone = glob->zones[i];
-
-               if (from_wq)
-                       target = zone->swap_limit;
-               else if (capable(CAP_SYS_ADMIN))
-                       target = zone->emer_mem;
-               else
-                       target = zone->max_mem;
-
-               target = (extra > target) ? 0ULL : target;
-
-               if (zone->used_mem > target)
-                       return true;
-       }
-       return false;
-}
-
-/*
- * At this point we only support a single shrink callback.
- * Extend this if needed, perhaps using a linked list of callbacks.
- * Note that this function is reentrant:
- * many threads may try to swap out at any given time.
- */
-
-static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
-                       uint64_t extra, struct ttm_operation_ctx *ctx)
-{
-       int ret;
-
-       spin_lock(&glob->lock);
-
-       while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
-               spin_unlock(&glob->lock);
-               ret = ttm_global_swapout(ctx, GFP_KERNEL);
-               spin_lock(&glob->lock);
-               if (unlikely(ret <= 0))
-                       break;
-       }
-
-       spin_unlock(&glob->lock);
-}
-
-static void ttm_shrink_work(struct work_struct *work)
-{
-       struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
-       struct ttm_mem_global *glob =
-           container_of(work, struct ttm_mem_global, work);
-
-       ttm_shrink(glob, true, 0ULL, &ctx);
-}
-
-static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
-                                   const struct sysinfo *si)
-{
-       struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
-       uint64_t mem;
-       int ret;
-
-       if (unlikely(!zone))
-               return -ENOMEM;
-
-       mem = si->totalram - si->totalhigh;
-       mem *= si->mem_unit;
-
-       zone->name = "kernel";
-       zone->zone_mem = mem;
-       zone->max_mem = mem >> 1;
-       zone->emer_mem = (mem >> 1) + (mem >> 2);
-       zone->swap_limit = zone->max_mem - (mem >> 3);
-       zone->used_mem = 0;
-       zone->glob = glob;
-       glob->zone_kernel = zone;
-       ret = kobject_init_and_add(
-               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
-       if (unlikely(ret != 0)) {
-               kobject_put(&zone->kobj);
-               return ret;
-       }
-       glob->zones[glob->num_zones++] = zone;
-       return 0;
-}
-
-#ifdef CONFIG_HIGHMEM
-static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
-                                    const struct sysinfo *si)
-{
-       struct ttm_mem_zone *zone;
-       uint64_t mem;
-       int ret;
-
-       if (si->totalhigh == 0)
-               return 0;
-
-       zone = kzalloc(sizeof(*zone), GFP_KERNEL);
-       if (unlikely(!zone))
-               return -ENOMEM;
-
-       mem = si->totalram;
-       mem *= si->mem_unit;
-
-       zone->name = "highmem";
-       zone->zone_mem = mem;
-       zone->max_mem = mem >> 1;
-       zone->emer_mem = (mem >> 1) + (mem >> 2);
-       zone->swap_limit = zone->max_mem - (mem >> 3);
-       zone->used_mem = 0;
-       zone->glob = glob;
-       glob->zone_highmem = zone;
-       ret = kobject_init_and_add(
-               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
-               zone->name);
-       if (unlikely(ret != 0)) {
-               kobject_put(&zone->kobj);
-               return ret;
-       }
-       glob->zones[glob->num_zones++] = zone;
-       return 0;
-}
-#else
-static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
-                                  const struct sysinfo *si)
-{
-       struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
-       uint64_t mem;
-       int ret;
-
-       if (unlikely(!zone))
-               return -ENOMEM;
-
-       mem = si->totalram;
-       mem *= si->mem_unit;
-
-       /**
-        * No special dma32 zone needed.
-        */
-
-       if (mem <= ((uint64_t) 1ULL << 32)) {
-               kfree(zone);
-               return 0;
-       }
-
-       /*
-        * Limit max dma32 memory to 4GB for now
-        * until we can figure out how big this
-        * zone really is.
-        */
-
-       mem = ((uint64_t) 1ULL << 32);
-       zone->name = "dma32";
-       zone->zone_mem = mem;
-       zone->max_mem = mem >> 1;
-       zone->emer_mem = (mem >> 1) + (mem >> 2);
-       zone->swap_limit = zone->max_mem - (mem >> 3);
-       zone->used_mem = 0;
-       zone->glob = glob;
-       glob->zone_dma32 = zone;
-       ret = kobject_init_and_add(
-               &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
-       if (unlikely(ret != 0)) {
-               kobject_put(&zone->kobj);
-               return ret;
-       }
-       glob->zones[glob->num_zones++] = zone;
-       return 0;
-}
-#endif
-
-int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
-{
-       struct sysinfo si;
-       int ret;
-       int i;
-       struct ttm_mem_zone *zone;
-
-       spin_lock_init(&glob->lock);
-       glob->swap_queue = create_singlethread_workqueue("ttm_swap");
-       INIT_WORK(&glob->work, ttm_shrink_work);
-
-       ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type,
-                                  &dev->kobj, "memory_accounting");
-       if (unlikely(ret != 0)) {
-               kobject_put(&glob->kobj);
-               return ret;
-       }
-
-       si_meminfo(&si);
-
-       ret = ttm_mem_init_kernel_zone(glob, &si);
-       if (unlikely(ret != 0))
-               goto out_no_zone;
-#ifdef CONFIG_HIGHMEM
-       ret = ttm_mem_init_highmem_zone(glob, &si);
-       if (unlikely(ret != 0))
-               goto out_no_zone;
-#else
-       ret = ttm_mem_init_dma32_zone(glob, &si);
-       if (unlikely(ret != 0))
-               goto out_no_zone;
-#endif
-       for (i = 0; i < glob->num_zones; ++i) {
-               zone = glob->zones[i];
-               pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
-                       zone->name, (unsigned long long)zone->max_mem >> 10);
-       }
-       return 0;
-out_no_zone:
-       ttm_mem_global_release(glob);
-       return ret;
-}
-
-void ttm_mem_global_release(struct ttm_mem_global *glob)
-{
-       struct ttm_mem_zone *zone;
-       unsigned int i;
-
-       destroy_workqueue(glob->swap_queue);
-       glob->swap_queue = NULL;
-       for (i = 0; i < glob->num_zones; ++i) {
-               zone = glob->zones[i];
-               kobject_del(&zone->kobj);
-               kobject_put(&zone->kobj);
-       }
-       kobject_del(&glob->kobj);
-       kobject_put(&glob->kobj);
-       memset(glob, 0, sizeof(*glob));
-}
-
-static void ttm_check_swapping(struct ttm_mem_global *glob)
-{
-       bool needs_swapping = false;
-       unsigned int i;
-       struct ttm_mem_zone *zone;
-
-       spin_lock(&glob->lock);
-       for (i = 0; i < glob->num_zones; ++i) {
-               zone = glob->zones[i];
-               if (zone->used_mem > zone->swap_limit) {
-                       needs_swapping = true;
-                       break;
-               }
-       }
-
-       spin_unlock(&glob->lock);
-
-       if (unlikely(needs_swapping))
-               (void)queue_work(glob->swap_queue, &glob->work);
-
-}
-
-static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
-                                    struct ttm_mem_zone *single_zone,
-                                    uint64_t amount)
-{
-       unsigned int i;
-       struct ttm_mem_zone *zone;
-
-       spin_lock(&glob->lock);
-       for (i = 0; i < glob->num_zones; ++i) {
-               zone = glob->zones[i];
-               if (single_zone && zone != single_zone)
-                       continue;
-               zone->used_mem -= amount;
-       }
-       spin_unlock(&glob->lock);
-}
-
-void ttm_mem_global_free(struct ttm_mem_global *glob,
-                        uint64_t amount)
-{
-       return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
-}
-EXPORT_SYMBOL(ttm_mem_global_free);
-
-static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
-                                 struct ttm_mem_zone *single_zone,
-                                 uint64_t amount, bool reserve)
-{
-       uint64_t limit;
-       int ret = -ENOMEM;
-       unsigned int i;
-       struct ttm_mem_zone *zone;
-
-       spin_lock(&glob->lock);
-       for (i = 0; i < glob->num_zones; ++i) {
-               zone = glob->zones[i];
-               if (single_zone && zone != single_zone)
-                       continue;
-
-               limit = (capable(CAP_SYS_ADMIN)) ?
-                       zone->emer_mem : zone->max_mem;
-
-               if (zone->used_mem > limit)
-                       goto out_unlock;
-       }
-
-       if (reserve) {
-               for (i = 0; i < glob->num_zones; ++i) {
-                       zone = glob->zones[i];
-                       if (single_zone && zone != single_zone)
-                               continue;
-                       zone->used_mem += amount;
-               }
-       }
-
-       ret = 0;
-out_unlock:
-       spin_unlock(&glob->lock);
-       ttm_check_swapping(glob);
-
-       return ret;
-}
-
-
-static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
-                                    struct ttm_mem_zone *single_zone,
-                                    uint64_t memory,
-                                    struct ttm_operation_ctx *ctx)
-{
-       int count = TTM_MEMORY_ALLOC_RETRIES;
-
-       while (unlikely(ttm_mem_global_reserve(glob,
-                                              single_zone,
-                                              memory, true)
-                       != 0)) {
-               if (ctx->no_wait_gpu)
-                       return -ENOMEM;
-               if (unlikely(count-- == 0))
-                       return -ENOMEM;
-               ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
-       }
-
-       return 0;
-}
-
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
-                        struct ttm_operation_ctx *ctx)
-{
-       /**
-        * Normal allocations of kernel memory are registered in
-        * the kernel zone.
-        */
-
-       return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
-}
-EXPORT_SYMBOL(ttm_mem_global_alloc);
-
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
-                             struct page *page, uint64_t size,
-                             struct ttm_operation_ctx *ctx)
-{
-       struct ttm_mem_zone *zone = NULL;
-
-       /**
-        * Page allocations may be registed in a single zone
-        * only if highmem or !dma32.
-        */
-
-#ifdef CONFIG_HIGHMEM
-       if (PageHighMem(page) && glob->zone_highmem != NULL)
-               zone = glob->zone_highmem;
-#else
-       if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
-               zone = glob->zone_kernel;
-#endif
-       return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
-}
-
-void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
-                             uint64_t size)
-{
-       struct ttm_mem_zone *zone = NULL;
-
-#ifdef CONFIG_HIGHMEM
-       if (PageHighMem(page) && glob->zone_highmem != NULL)
-               zone = glob->zone_highmem;
-#else
-       if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
-               zone = glob->zone_kernel;
-#endif
-       ttm_mem_global_free_zone(glob, zone, size);
-}
-
-size_t ttm_round_pot(size_t size)
-{
-       if ((size & (size - 1)) == 0)
-               return size;
-       else if (size > PAGE_SIZE)
-               return PAGE_ALIGN(size);
-       else {
-               size_t tmp_size = 4;
-
-               while (tmp_size < size)
-                       tmp_size <<= 1;
-
-               return tmp_size;
-       }
-       return 0;
-}
-EXPORT_SYMBOL(ttm_round_pot);
 
+++ /dev/null
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TTM_MEMORY_H
-#define TTM_MEMORY_H
-
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-#include <linux/bug.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kobject.h>
-#include <linux/mm.h>
-
-#include <drm/ttm/ttm_bo_api.h>
-
-/**
- * struct ttm_mem_global - Global memory accounting structure.
- *
- * @shrink: A single callback to shrink TTM memory usage. Extend this
- * to a linked list to be able to handle multiple callbacks when needed.
- * @swap_queue: A workqueue to handle shrinking in low memory situations. We
- * need a separate workqueue since it will spend a lot of time waiting
- * for the GPU, and this will otherwise block other workqueue tasks(?)
- * At this point we use only a single-threaded workqueue.
- * @work: The workqueue callback for the shrink queue.
- * @lock: Lock to protect the @shrink - and the memory accounting members,
- * that is, essentially the whole structure with some exceptions.
- * @zones: Array of pointers to accounting zones.
- * @num_zones: Number of populated entries in the @zones array.
- * @zone_kernel: Pointer to the kernel zone.
- * @zone_highmem: Pointer to the highmem zone if there is one.
- * @zone_dma32: Pointer to the dma32 zone if there is one.
- *
- * Note that this structure is not per device. It should be global for all
- * graphics devices.
- */
-
-#define TTM_MEM_MAX_ZONES 2
-struct ttm_mem_zone;
-extern struct ttm_mem_global {
-       struct kobject kobj;
-       struct workqueue_struct *swap_queue;
-       struct work_struct work;
-       spinlock_t lock;
-       struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
-       unsigned int num_zones;
-       struct ttm_mem_zone *zone_kernel;
-#ifdef CONFIG_HIGHMEM
-       struct ttm_mem_zone *zone_highmem;
-#else
-       struct ttm_mem_zone *zone_dma32;
-#endif
-} ttm_mem_glob;
-
-int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev);
-void ttm_mem_global_release(struct ttm_mem_global *glob);
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
-                        struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount);
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
-                             struct page *page, uint64_t size,
-                             struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free_page(struct ttm_mem_global *glob,
-                             struct page *page, uint64_t size);
-size_t ttm_round_pot(size_t size);
-
-#endif
 
        spinlock_t object_lock;
        struct vmwgfx_open_hash object_hash;
        atomic_t object_count;
-       struct ttm_mem_global *mem_glob;
        struct dma_buf_ops ops;
        void (*dmabuf_release)(struct dma_buf *dma_buf);
-       size_t dma_buf_size;
        struct idr idr;
 };
 
        struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type];
        struct ttm_ref_object *ref;
        struct vmwgfx_hash_item *hash;
-       struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
        int ret = -EINVAL;
 
        if (base->tfile != tfile && !base->shareable)
                if (require_existed)
                        return -EPERM;
 
-               ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
-                                          &ctx);
-               if (unlikely(ret != 0))
-                       return ret;
                ref = kmalloc(sizeof(*ref), GFP_KERNEL);
                if (unlikely(ref == NULL)) {
-                       ttm_mem_global_free(mem_glob, sizeof(*ref));
                        return -ENOMEM;
                }
 
                spin_unlock(&tfile->lock);
                BUG_ON(ret != -EINVAL);
 
-               ttm_mem_global_free(mem_glob, sizeof(*ref));
                kfree(ref);
        }
 
        struct ttm_base_object *base = ref->obj;
        struct ttm_object_file *tfile = ref->tfile;
        struct vmwgfx_open_hash *ht;
-       struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 
        ht = &tfile->ref_hash[ref->ref_type];
        (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash);
                base->ref_obj_release(base, ref->ref_type);
 
        ttm_base_object_unref(&ref->obj);
-       ttm_mem_global_free(mem_glob, sizeof(*ref));
        kfree_rcu(ref, rcu_head);
        spin_lock(&tfile->lock);
 }
 }
 
 struct ttm_object_device *
-ttm_object_device_init(struct ttm_mem_global *mem_glob,
-                      unsigned int hash_order,
+ttm_object_device_init(unsigned int hash_order,
                       const struct dma_buf_ops *ops)
 {
        struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
        if (unlikely(tdev == NULL))
                return NULL;
 
-       tdev->mem_glob = mem_glob;
        spin_lock_init(&tdev->object_lock);
        atomic_set(&tdev->object_count, 0);
        ret = vmwgfx_ht_create(&tdev->object_hash, hash_order);
        tdev->ops = *ops;
        tdev->dmabuf_release = tdev->ops.release;
        tdev->ops.release = ttm_prime_dmabuf_release;
-       tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
-               ttm_round_pot(sizeof(struct file));
        return tdev;
 
 out_no_object_hash:
        if (prime->dma_buf == dma_buf)
                prime->dma_buf = NULL;
        mutex_unlock(&prime->mutex);
-       ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
        ttm_base_object_unref(&base);
 }
 
        dma_buf = prime->dma_buf;
        if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
                DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-               struct ttm_operation_ctx ctx = {
-                       .interruptible = true,
-                       .no_wait_gpu = false
-               };
                exp_info.ops = &tdev->ops;
                exp_info.size = prime->size;
                exp_info.flags = flags;
                exp_info.priv = prime;
 
                /*
-                * Need to create a new dma_buf, with memory accounting.
+                * Need to create a new dma_buf
                 */
-               ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
-                                          &ctx);
-               if (unlikely(ret != 0)) {
-                       mutex_unlock(&prime->mutex);
-                       goto out_unref;
-               }
 
                dma_buf = dma_buf_export(&exp_info);
                if (IS_ERR(dma_buf)) {
                        ret = PTR_ERR(dma_buf);
-                       ttm_mem_global_free(tdev->mem_glob,
-                                           tdev->dma_buf_size);
                        mutex_unlock(&prime->mutex);
                        goto out_unref;
                }
 
 #include <linux/list.h>
 #include <linux/rcupdate.h>
 
-#include "ttm_memory.h"
 #include "vmwgfx_hashtab.h"
 
 /**
 /**
  * ttm_object device init - initialize a struct ttm_object_device
  *
- * @mem_glob: struct ttm_mem_global for memory accounting.
  * @hash_order: Order of hash table used to hash the base objects.
  * @ops: DMA buf ops for prime objects of this device.
  *
  */
 
 extern struct ttm_object_device *
-ttm_object_device_init(struct ttm_mem_global *mem_glob,
-                      unsigned int hash_order,
+ttm_object_device_init(unsigned int hash_order,
                       const struct dma_buf_ops *ops);
 
 /**
 #define ttm_prime_object_kfree(__obj, __prime)         \
        kfree_rcu(__obj, __prime.base.rhead)
 
-/*
- * Extra memory required by the base object's idr storage, which is allocated
- * separately from the base object itself. We estimate an on-average 128 bytes
- * per idr.
- */
-#define TTM_OBJ_EXTRA_SIZE 128
-
 struct ttm_base_object *
 ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key);
 
 
 }
 
 /**
- * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
- * memory accounting.
+ * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state.
  *
  * @dev_priv: Pointer to a device private structure.
  *
 vmw_binding_state_alloc(struct vmw_private *dev_priv)
 {
        struct vmw_ctx_binding_state *cbs;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
-       int ret;
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
-                               &ctx);
-       if (ret)
-               return ERR_PTR(ret);
 
        cbs = vzalloc(sizeof(*cbs));
        if (!cbs) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
                return ERR_PTR(-ENOMEM);
        }
 
 }
 
 /**
- * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
- * memory accounting info.
+ * vmw_binding_state_free - Free a struct vmw_ctx_binding_state.
  *
  * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
  */
 void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
 {
-       struct vmw_private *dev_priv = cbs->dev_priv;
-
        vfree(cbs);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
 }
 
 /**
 
 }
 
 
-/**
- * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
- *
- * @dev_priv: Pointer to a struct vmw_private identifying the device.
- * @size: The requested buffer size.
- * @user: Whether this is an ordinary dma buffer or a user dma buffer.
- */
-static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
-                             bool user)
-{
-       static size_t struct_size, user_struct_size;
-       size_t num_pages = PFN_UP(size);
-       size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
-       if (unlikely(struct_size == 0)) {
-               size_t backend_size = ttm_round_pot(vmw_tt_size);
-
-               struct_size = backend_size +
-                       ttm_round_pot(sizeof(struct vmw_buffer_object));
-               user_struct_size = backend_size +
-                 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
-                                     TTM_OBJ_EXTRA_SIZE;
-       }
-
-       if (dev_priv->map_mode == vmw_dma_alloc_coherent)
-               page_array_size +=
-                       ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
-       return ((user) ? user_struct_size : struct_size) +
-               page_array_size;
-}
-
-
 /**
  * vmw_bo_bo_free - vmw buffer object destructor
  *
                         struct ttm_placement *placement,
                         struct ttm_buffer_object **p_bo)
 {
-       struct ttm_operation_ctx ctx = { false, false };
+       struct ttm_operation_ctx ctx = {
+               .interruptible = false,
+               .no_wait_gpu = false
+       };
        struct ttm_buffer_object *bo;
-       size_t acc_size;
        int ret;
 
        bo = kzalloc(sizeof(*bo), GFP_KERNEL);
        if (unlikely(!bo))
                return -ENOMEM;
 
-       acc_size = ttm_round_pot(sizeof(*bo));
-       acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
-       acc_size += ttm_round_pot(sizeof(struct ttm_tt));
-
-       ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
-       if (unlikely(ret))
-               goto error_free;
-
-
        bo->base.size = size;
        dma_resv_init(&bo->base._resv);
        drm_vma_node_reset(&bo->base.vma_node);
                                   ttm_bo_type_kernel, placement, 0,
                                   &ctx, NULL, NULL, NULL);
        if (unlikely(ret))
-               goto error_account;
+               goto error_free;
 
        ttm_bo_pin(bo);
        ttm_bo_unreserve(bo);
 
        return 0;
 
-error_account:
-       ttm_mem_global_free(&ttm_mem_glob, acc_size);
-
 error_free:
        kfree(bo);
        return ret;
                bool interruptible, bool pin,
                void (*bo_free)(struct ttm_buffer_object *bo))
 {
-       struct ttm_operation_ctx ctx = { interruptible, false };
+       struct ttm_operation_ctx ctx = {
+               .interruptible = interruptible,
+               .no_wait_gpu = false
+       };
        struct ttm_device *bdev = &dev_priv->bdev;
-       size_t acc_size;
        int ret;
        bool user = (bo_free == &vmw_user_bo_destroy);
 
        WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
-
-       acc_size = vmw_bo_acc_size(dev_priv, size, user);
        memset(vmw_bo, 0, sizeof(*vmw_bo));
        BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
        vmw_bo->base.priority = 3;
        vmw_bo->res_tree = RB_ROOT;
 
-       ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
-       if (unlikely(ret))
-               return ret;
 
        vmw_bo->base.base.size = size;
        dma_resv_init(&vmw_bo->base.base._resv);
                                   ttm_bo_type_device, placement,
                                   0, &ctx, NULL, NULL, bo_free);
        if (unlikely(ret)) {
-               ttm_mem_global_free(&ttm_mem_glob, acc_size);
                return ret;
        }
 
 
        kfree(man);
 }
 
-/**
- * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
- * resource manager
- *
- * Returns the approximate allocation size of a command buffer managed
- * resource manager.
- */
-size_t vmw_cmdbuf_res_man_size(void)
-{
-       static size_t res_man_size;
-
-       if (unlikely(res_man_size == 0))
-               res_man_size =
-                       ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
-                       ttm_round_pot(sizeof(struct hlist_head) <<
-                                     VMW_CMDBUF_RES_MAN_HT_ORDER);
-
-       return res_man_size;
-}
 
                                 struct ttm_validate_buffer *val_buf);
 static int vmw_dx_context_destroy(struct vmw_resource *res);
 
-static uint64_t vmw_user_context_size;
-
 static const struct vmw_user_resource_conv user_context_conv = {
        .object_type = VMW_RES_CONTEXT,
        .base_obj_to_res = vmw_user_context_base_to_res,
 {
        struct vmw_user_context *ctx =
            container_of(res, struct vmw_user_context, res);
-       struct vmw_private *dev_priv = res->dev_priv;
 
        if (ctx->cbs)
                vmw_binding_state_free(ctx->cbs);
        (void) vmw_context_bind_dx_query(res, NULL);
 
        ttm_base_object_kfree(ctx, base);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                           vmw_user_context_size);
 }
 
 /*
        struct vmw_resource *tmp;
        struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct ttm_operation_ctx ttm_opt_ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        int ret;
 
        if (!has_sm4_context(dev_priv) && dx) {
                return -EINVAL;
        }
 
-       if (unlikely(vmw_user_context_size == 0))
-               vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
-                 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
-                 + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  vmw_user_context_size,
-                                  &ttm_opt_ctx);
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for context"
-                                 " creation.\n");
-               goto out_ret;
-       }
-
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (unlikely(!ctx)) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   vmw_user_context_size);
                ret = -ENOMEM;
                goto out_ret;
        }
 
        (void) vmw_cotable_destroy(res);
 }
 
-static size_t cotable_acc_size;
-
 /**
  * vmw_cotable_free - Cotable resource destructor
  *
  */
 static void vmw_cotable_free(struct vmw_resource *res)
 {
-       struct vmw_private *dev_priv = res->dev_priv;
-
        kfree(res);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
 }
 
 /**
                                       u32 type)
 {
        struct vmw_cotable *vcotbl;
-       struct ttm_operation_ctx ttm_opt_ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        int ret;
        u32 num_entries;
 
-       if (unlikely(cotable_acc_size == 0))
-               cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  cotable_acc_size, &ttm_opt_ctx);
-       if (unlikely(ret))
-               return ERR_PTR(ret);
-
        vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
        if (unlikely(!vcotbl)) {
                ret = -ENOMEM;
 out_no_init:
        kfree(vcotbl);
 out_no_alloc:
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
        return ERR_PTR(ret);
 }
 
 
 #define VMW_MIN_INITIAL_WIDTH 800
 #define VMW_MIN_INITIAL_HEIGHT 600
 
-#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
-
-
 /*
  * Fully encoded drm commands. Might move to vmw_drm.h
  */
                goto out_err0;
        }
 
-       dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
-                                               &vmw_prime_dmabuf_ops);
+       dev_priv->tdev = ttm_object_device_init(12, &vmw_prime_dmabuf_ops);
 
        if (unlikely(dev_priv->tdev == NULL)) {
                drm_err(&dev_priv->drm,
                        dev_priv->sm_type = VMW_SM_4;
        }
 
-       vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
-
        /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */
        if (has_sm4_context(dev_priv) &&
            (dev_priv->capabilities2 & SVGA_CAP2_DX2)) {
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
 
-       ttm_mem_global_release(&ttm_mem_glob);
        drm_dev_unregister(dev);
        vmw_driver_unload(dev);
 }
 
        pci_set_drvdata(pdev, &vmw->drm);
 
-       ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
-       if (ret)
-               goto out_error;
-
        ret = vmw_driver_load(vmw, ent->device);
        if (ret)
-               goto out_release;
+               goto out_error;
 
        ret = drm_dev_register(&vmw->drm, 0);
        if (ret)
        return 0;
 out_unload:
        vmw_driver_unload(&vmw->drm);
-out_release:
-       ttm_mem_global_release(&ttm_mem_glob);
 out_error:
        return ret;
 }
 
        struct vmw_cmdbuf_man *cman;
        DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
 
-       /* Validation memory reservation */
-       struct vmw_validation_mem vvm;
-
        uint32 *devcaps;
 
        /*
 
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
-extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
-                                       size_t gran);
-
 /**
  * TTM buffer object driver - vmwgfx_ttm_buffer.c
  */
                                       struct drm_file *file_priv);
 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
                                          struct drm_file *file_priv);
-int vmw_surface_gb_priv_define(struct drm_device *dev,
-                              uint32_t user_accounting_size,
-                              SVGA3dSurfaceAllFlags svga3d_flags,
-                              SVGA3dSurfaceFormat format,
-                              bool for_scanout,
-                              uint32_t num_mip_levels,
-                              uint32_t multisample_count,
-                              uint32_t array_size,
-                              struct drm_vmw_size size,
-                              SVGA3dMSPattern multisample_pattern,
-                              SVGA3dMSQualityLevel quality_level,
-                              struct vmw_surface **srf_out);
 extern int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev,
                                           void *data,
                                           struct drm_file *file_priv);
                                              struct drm_file *file_priv);
 
 int vmw_gb_surface_define(struct vmw_private *dev_priv,
-                         uint32_t user_accounting_size,
                          const struct vmw_surface_metadata *req,
                          struct vmw_surface **srf_out);
 
 extern struct vmw_cmdbuf_res_manager *
 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
-extern size_t vmw_cmdbuf_res_man_size(void);
 extern struct vmw_resource *
 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
                      enum vmw_cmdbuf_res_type res_type,
        return buf;
 }
 
-static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
-{
-       return &ttm_mem_glob;
-}
-
 static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
 {
        atomic_inc(&dev_priv->num_fifo_resources);
 
        struct sync_file *sync_file = NULL;
        DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
 
-       vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
-
        if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
                if (out_fence_fd < 0) {
 
        spinlock_t lock;
        struct list_head fence_list;
        struct work_struct work;
-       u32 user_fence_size;
-       u32 fence_size;
-       u32 event_fence_action_size;
        bool fifo_down;
        struct list_head cleanup_list;
        uint32_t pending_actions[VMW_ACTION_MAX];
        INIT_LIST_HEAD(&fman->cleanup_list);
        INIT_WORK(&fman->work, &vmw_fence_work_func);
        fman->fifo_down = true;
-       fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
-               TTM_OBJ_EXTRA_SIZE;
-       fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
-       fman->event_fence_action_size =
-               ttm_round_pot(sizeof(struct vmw_event_fence_action));
        mutex_init(&fman->goal_irq_mutex);
        fman->ctx = dma_fence_context_alloc(1);
 
 {
        struct vmw_user_fence *ufence =
                container_of(fence, struct vmw_user_fence, fence);
-       struct vmw_fence_manager *fman = fman_from_fence(fence);
 
        ttm_base_object_kfree(ufence, base);
-       /*
-        * Free kernel space accounting.
-        */
-       ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
-                           fman->user_fence_size);
 }
 
 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct vmw_user_fence *ufence;
        struct vmw_fence_obj *tmp;
-       struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
-       struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
        int ret;
 
-       /*
-        * Kernel memory space accounting, since this object may
-        * be created by a user-space request.
-        */
-
-       ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
-                                  &ctx);
-       if (unlikely(ret != 0))
-               return ret;
-
        ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
        if (unlikely(!ufence)) {
                ret = -ENOMEM;
        tmp = &ufence->fence;
        vmw_fence_obj_unreference(&tmp);
 out_no_object:
-       ttm_mem_global_free(mem_glob, fman->user_fence_size);
        return ret;
 }
 
 
        metadata.base_size.depth = 1;
        metadata.scanout = true;
 
-       ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
+       ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
        if (ret) {
                DRM_ERROR("Failed to allocate proxy content buffer\n");
                return ret;
 
  * @mob:         Pointer to the mob the pagetable of which we want to
  *               populate.
  *
- * This function allocates memory to be used for the pagetable, and
- * adjusts TTM memory accounting accordingly. Returns ENOMEM if
- * memory resources aren't sufficient and may cause TTM buffer objects
- * to be swapped out by using the TTM memory accounting function.
+ * This function allocates memory to be used for the pagetable.
+ * Returns ENOMEM if memory resources aren't sufficient and may
+ * cause TTM buffer objects to be swapped out.
  */
 static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
                               struct vmw_mob *mob)
 
  * @ref_count: Reference count for this structure
  * @bitmap_size: The size of the bitmap in bits. Typically equal to the
  * nuber of pages in the bo.
- * @size: The accounting size for this struct.
  * @bitmap: A bitmap where each bit represents a page. A set bit means a
  * dirty page.
  */
        unsigned int change_count;
        unsigned int ref_count;
        unsigned long bitmap_size;
-       size_t size;
        unsigned long bitmap[];
 };
 
 {
        struct vmw_bo_dirty *dirty = vbo->dirty;
        pgoff_t num_pages = vbo->base.resource->num_pages;
-       size_t size, acc_size;
+       size_t size;
        int ret;
-       static struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
 
        if (dirty) {
                dirty->ref_count++;
        }
 
        size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
-       acc_size = ttm_round_pot(size);
-       ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
-       if (ret) {
-               VMW_DEBUG_USER("Out of graphics memory for buffer object "
-                              "dirty tracker.\n");
-               return ret;
-       }
        dirty = kvzalloc(size, GFP_KERNEL);
        if (!dirty) {
                ret = -ENOMEM;
                goto out_no_dirty;
        }
 
-       dirty->size = acc_size;
        dirty->bitmap_size = num_pages;
        dirty->start = dirty->bitmap_size;
        dirty->end = 0;
        return 0;
 
 out_no_dirty:
-       ttm_mem_global_free(&ttm_mem_glob, acc_size);
        return ret;
 }
 
        struct vmw_bo_dirty *dirty = vbo->dirty;
 
        if (dirty && --dirty->ref_count == 0) {
-               size_t acc_size = dirty->size;
-
                kvfree(dirty);
-               ttm_mem_global_free(&ttm_mem_glob, acc_size);
                vbo->dirty = NULL;
        }
 }
 
        struct list_head cotable_head;
 };
 
-static uint64_t vmw_user_shader_size;
-static uint64_t vmw_shader_size;
-static size_t vmw_shader_dx_size;
-
 static void vmw_user_shader_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_shader_base_to_res(struct ttm_base_object *base);
                                        enum vmw_cmdbuf_res_state state);
 static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type);
 static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type);
-static uint64_t vmw_user_shader_size;
 
 static const struct vmw_user_resource_conv user_shader_conv = {
        .object_type = VMW_RES_SHADER,
  *
  * @res: The shader resource
  *
- * Frees the DX shader resource and updates memory accounting.
+ * Frees the DX shader resource.
  */
 static void vmw_dx_shader_res_free(struct vmw_resource *res)
 {
-       struct vmw_private *dev_priv = res->dev_priv;
        struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res);
 
        vmw_resource_unreference(&shader->cotable);
        kfree(shader);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
 }
 
 /**
        struct vmw_dx_shader *shader;
        struct vmw_resource *res;
        struct vmw_private *dev_priv = ctx->dev_priv;
-       struct ttm_operation_ctx ttm_opt_ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        int ret;
 
-       if (!vmw_shader_dx_size)
-               vmw_shader_dx_size = ttm_round_pot(sizeof(*shader));
-
        if (!vmw_shader_id_ok(user_key, shader_type))
                return -EINVAL;
 
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
-                                  &ttm_opt_ctx);
-       if (ret) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for shader "
-                                 "creation.\n");
-               return ret;
-       }
-
        shader = kmalloc(sizeof(*shader), GFP_KERNEL);
        if (!shader) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size);
                return -ENOMEM;
        }
 
 {
        struct vmw_user_shader *ushader =
                container_of(res, struct vmw_user_shader, shader.res);
-       struct vmw_private *dev_priv = res->dev_priv;
 
        ttm_base_object_kfree(ushader, base);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                           vmw_user_shader_size);
 }
 
 static void vmw_shader_free(struct vmw_resource *res)
 {
        struct vmw_shader *shader = vmw_res_to_shader(res);
-       struct vmw_private *dev_priv = res->dev_priv;
 
        kfree(shader);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                           vmw_shader_size);
 }
 
 /*
 {
        struct vmw_user_shader *ushader;
        struct vmw_resource *res, *tmp;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        int ret;
 
-       if (unlikely(vmw_user_shader_size == 0))
-               vmw_user_shader_size =
-                       ttm_round_pot(sizeof(struct vmw_user_shader)) +
-                       VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  vmw_user_shader_size,
-                                  &ctx);
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for shader "
-                                 "creation.\n");
-               goto out;
-       }
-
        ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
        if (unlikely(!ushader)) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   vmw_user_shader_size);
                ret = -ENOMEM;
                goto out;
        }
 {
        struct vmw_shader *shader;
        struct vmw_resource *res;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        int ret;
 
-       if (unlikely(vmw_shader_size == 0))
-               vmw_shader_size =
-                       ttm_round_pot(sizeof(struct vmw_shader)) +
-                       VMW_IDA_ACC_SIZE;
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  vmw_shader_size,
-                                  &ctx);
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for shader "
-                                 "creation.\n");
-               goto out_err;
-       }
-
        shader = kzalloc(sizeof(*shader), GFP_KERNEL);
        if (unlikely(!shader)) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   vmw_shader_size);
                ret = -ENOMEM;
                goto out_err;
        }
 
  * struct vmw_user_simple_resource - User-space simple resource struct
  *
  * @base: The TTM base object implementing user-space visibility.
- * @account_size: How much memory was accounted for this object.
  * @simple: The embedded struct vmw_simple_resource.
  */
 struct vmw_user_simple_resource {
        struct ttm_base_object base;
-       size_t account_size;
        struct vmw_simple_resource simple;
 /*
  * Nothing to be placed after @simple, since size of @simple is
  *
  * @res: The struct vmw_resource member of the simple resource object.
  *
- * Frees memory and memory accounting for the object.
+ * Frees memory for the object.
  */
 static void vmw_simple_resource_free(struct vmw_resource *res)
 {
        struct vmw_user_simple_resource *usimple =
                container_of(res, struct vmw_user_simple_resource,
                             simple.res);
-       struct vmw_private *dev_priv = res->dev_priv;
-       size_t size = usimple->account_size;
 
        ttm_base_object_kfree(usimple, base);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 }
 
 /**
        struct vmw_resource *res;
        struct vmw_resource *tmp;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        size_t alloc_size;
-       size_t account_size;
        int ret;
 
        alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
          func->size;
-       account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
-               TTM_OBJ_EXTRA_SIZE;
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
-                                  &ctx);
-       if (ret) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for %s"
-                                 " creation.\n", func->res_func.type_name);
-
-               goto out_ret;
-       }
 
        usimple = kzalloc(alloc_size, GFP_KERNEL);
        if (!usimple) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   account_size);
                ret = -ENOMEM;
                goto out_ret;
        }
 
        usimple->simple.func = func;
-       usimple->account_size = account_size;
        res = &usimple->simple.res;
        usimple->base.shareable = false;
        usimple->base.tfile = NULL;
 
  *
  * @res: Pointer to a struct vmw_resource
  *
- * Frees memory and memory accounting held by a struct vmw_view.
+ * Frees memory held by the struct vmw_view.
  */
 static void vmw_view_res_free(struct vmw_resource *res)
 {
        struct vmw_view *view = vmw_view(res);
-       size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size;
-       struct vmw_private *dev_priv = res->dev_priv;
 
        vmw_resource_unreference(&view->cotable);
        vmw_resource_unreference(&view->srf);
        kfree_rcu(view, rcu);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 }
 
 /**
        struct vmw_private *dev_priv = ctx->dev_priv;
        struct vmw_resource *res;
        struct vmw_view *view;
-       struct ttm_operation_ctx ttm_opt_ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        size_t size;
        int ret;
 
 
        size = offsetof(struct vmw_view, cmd) + cmd_size;
 
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
-       if (ret) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for view creation\n");
-               return ret;
-       }
-
        view = kmalloc(size, GFP_KERNEL);
        if (!view) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
                return -ENOMEM;
        }
 
 
                }
 
                if (!vps->surf) {
-                       ret = vmw_gb_surface_define(dev_priv, 0, &metadata,
+                       ret = vmw_gb_surface_define(dev_priv, &metadata,
                                                    &vps->surf);
                        if (ret != 0) {
                                DRM_ERROR("Couldn't allocate STDU surface.\n");
 
 static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
                                              enum vmw_cmdbuf_res_state state);
 
-static size_t vmw_streamoutput_size;
-
 static const struct vmw_res_func vmw_dx_streamoutput_func = {
        .res_type = vmw_res_streamoutput,
        .needs_backup = true,
 
 static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
 {
-       struct vmw_private *dev_priv = res->dev_priv;
        struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
 
        vmw_resource_unreference(&so->cotable);
        kfree(so);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
 }
 
 static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
        struct vmw_dx_streamoutput *so;
        struct vmw_resource *res;
        struct vmw_private *dev_priv = ctx->dev_priv;
-       struct ttm_operation_ctx ttm_opt_ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        int ret;
 
-       if (!vmw_streamoutput_size)
-               vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  vmw_streamoutput_size, &ttm_opt_ctx);
-       if (ret) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for streamout.\n");
-               return ret;
-       }
-
        so = kmalloc(sizeof(*so), GFP_KERNEL);
        if (!so) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   vmw_streamoutput_size);
                return -ENOMEM;
        }
 
 
  * @prime:          The TTM prime object.
  * @base:           The TTM base object handling user-space visibility.
  * @srf:            The surface metadata.
- * @size:           TTM accounting size for the surface.
  * @master:         Master of the creating client. Used for security check.
  * @backup_base:    The TTM base object of the backup buffer.
  */
 struct vmw_user_surface {
        struct ttm_prime_object prime;
        struct vmw_surface srf;
-       uint32_t size;
        struct drm_master *master;
        struct ttm_base_object *backup_base;
 };
 /**
  * struct vmw_surface_dirty - Surface dirty-tracker
  * @cache: Cached layout information of the surface.
- * @size: Accounting size for the struct vmw_surface_dirty.
  * @num_subres: Number of subresources.
  * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
  */
 struct vmw_surface_dirty {
        struct vmw_surface_cache cache;
-       size_t size;
        u32 num_subres;
        SVGA3dBox boxes[];
 };
 const struct vmw_user_resource_conv *user_surface_converter =
        &user_surface_conv;
 
-
-static uint64_t vmw_user_surface_size;
-
 static const struct vmw_res_func vmw_legacy_surface_func = {
        .res_type = vmw_res_surface,
        .needs_backup = false,
  *              vmw_surface.
  *
  * Destroys a the device surface associated with a struct vmw_surface if
- * any, and adjusts accounting and resource count accordingly.
+ * any, and adjusts resource count accordingly.
  */
 static void vmw_hw_surface_destroy(struct vmw_resource *res)
 {
        struct vmw_surface *srf = vmw_res_to_srf(res);
        struct vmw_user_surface *user_srf =
            container_of(srf, struct vmw_user_surface, srf);
-       struct vmw_private *dev_priv = srf->res.dev_priv;
-       uint32_t size = user_srf->size;
 
        WARN_ON_ONCE(res->dirty);
        if (user_srf->master)
        kfree(srf->metadata.sizes);
        kfree(srf->snooper.image);
        ttm_prime_object_kfree(user_srf, prime);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 }
 
 /**
        struct drm_vmw_surface_create_req *req = &arg->req;
        struct drm_vmw_surface_arg *rep = &arg->rep;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        int ret;
        int i, j;
        uint32_t cur_bo_offset;
        struct drm_vmw_size *cur_size;
        struct vmw_surface_offset *cur_offset;
        uint32_t num_sizes;
-       uint32_t size;
        const SVGA3dSurfaceDesc *desc;
 
-       if (unlikely(vmw_user_surface_size == 0))
-               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
-                       VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
        num_sizes = 0;
        for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
                if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
            num_sizes == 0)
                return -EINVAL;
 
-       size = vmw_user_surface_size +
-               ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
-               ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
        desc = vmw_surface_get_desc(req->format);
        if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) {
                VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
                return -EINVAL;
        }
 
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  size, &ctx);
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for surface.\n");
-               goto out_unlock;
-       }
-
        user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
        if (unlikely(!user_srf)) {
                ret = -ENOMEM;
-               goto out_no_user_srf;
+               goto out_unlock;
        }
 
        srf = &user_srf->srf;
        memcpy(metadata->mip_levels, req->mip_levels,
               sizeof(metadata->mip_levels));
        metadata->num_sizes = num_sizes;
-       user_srf->size = size;
        metadata->sizes =
                memdup_user((struct drm_vmw_size __user *)(unsigned long)
                            req->size_addr,
        kfree(metadata->sizes);
 out_no_sizes:
        ttm_prime_object_kfree(user_srf, prime);
-out_no_user_srf:
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 out_unlock:
        return ret;
 }
        struct vmw_resource *res;
        struct vmw_resource *tmp;
        int ret = 0;
-       uint32_t size;
        uint32_t backup_handle = 0;
        SVGA3dSurfaceAllFlags svga3d_flags_64 =
                SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
                return -EINVAL;
        }
 
-       if (unlikely(vmw_user_surface_size == 0))
-               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
-                       VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
-
-       size = vmw_user_surface_size;
-
        metadata.flags = svga3d_flags_64;
        metadata.format = req->base.format;
        metadata.mip_levels[0] = req->base.mip_levels;
                drm_vmw_surface_flag_scanout;
 
        /* Define a surface based on the parameters. */
-       ret = vmw_gb_surface_define(dev_priv, size, &metadata, &srf);
+       ret = vmw_gb_surface_define(dev_priv, &metadata, &srf);
        if (ret != 0) {
                VMW_DEBUG_USER("Failed to define surface.\n");
                return ret;
        u32 num_mip;
        u32 num_subres;
        u32 num_samples;
-       size_t dirty_size, acc_size;
-       static struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
+       size_t dirty_size;
        int ret;
 
        if (metadata->array_size)
 
        num_subres = num_layers * num_mip;
        dirty_size = struct_size(dirty, boxes, num_subres);
-       acc_size = ttm_round_pot(dirty_size);
-       ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
-                                  acc_size, &ctx);
-       if (ret) {
-               VMW_DEBUG_USER("Out of graphics memory for surface "
-                              "dirty tracker.\n");
-               return ret;
-       }
 
        dirty = kvzalloc(dirty_size, GFP_KERNEL);
        if (!dirty) {
 
        num_samples = max_t(u32, 1, metadata->multisample_count);
        ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format,
-                                       num_mip, num_layers, num_samples,
-                                       &dirty->cache);
+                                     num_mip, num_layers, num_samples,
+                                     &dirty->cache);
        if (ret)
                goto out_no_cache;
 
        dirty->num_subres = num_subres;
-       dirty->size = acc_size;
        res->dirty = (struct vmw_resource_dirty *) dirty;
 
        return 0;
 out_no_cache:
        kvfree(dirty);
 out_no_dirty:
-       ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
        return ret;
 }
 
 {
        struct vmw_surface_dirty *dirty =
                (struct vmw_surface_dirty *) res->dirty;
-       size_t acc_size = dirty->size;
 
        kvfree(dirty);
-       ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
        res->dirty = NULL;
 }
 
  * vmw_gb_surface_define - Define a private GB surface
  *
  * @dev_priv: Pointer to a device private.
- * @user_accounting_size:  Used to track user-space memory usage, set
- *                         to 0 for kernel mode only memory
  * @metadata: Metadata representing the surface to create.
  * @user_srf_out: allocated user_srf. Set to NULL on failure.
  *
  * it available to user mode drivers.
  */
 int vmw_gb_surface_define(struct vmw_private *dev_priv,
-                         uint32_t user_accounting_size,
                          const struct vmw_surface_metadata *req,
                          struct vmw_surface **srf_out)
 {
        struct vmw_surface_metadata *metadata;
        struct vmw_user_surface *user_srf;
        struct vmw_surface *srf;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        u32 sample_count = 1;
        u32 num_layers = 1;
        int ret;
        if (req->sizes != NULL)
                return -EINVAL;
 
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  user_accounting_size, &ctx);
-       if (ret != 0) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for surface.\n");
-               goto out_unlock;
-       }
-
        user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
        if (unlikely(!user_srf)) {
                ret = -ENOMEM;
-               goto out_no_user_srf;
+               goto out_unlock;
        }
 
        *srf_out  = &user_srf->srf;
-       user_srf->size = user_accounting_size;
        user_srf->prime.base.shareable = false;
        user_srf->prime.base.tfile = NULL;
 
 
        return ret;
 
-out_no_user_srf:
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
-
 out_unlock:
        return ret;
 }
 
        int mem_type;
        struct sg_table sgt;
        struct vmw_sg_table vsgt;
-       uint64_t sg_alloc_size;
        bool mapped;
        bool bound;
 };
 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 {
        struct vmw_private *dev_priv = vmw_tt->dev_priv;
-       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
        struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        struct vmw_piter iter;
        dma_addr_t old;
        int ret = 0;
-       static size_t sgl_size;
-       static size_t sgt_size;
 
        if (vmw_tt->mapped)
                return 0;
        switch (dev_priv->map_mode) {
        case vmw_dma_map_bind:
        case vmw_dma_map_populate:
-               if (unlikely(!sgl_size)) {
-                       sgl_size = ttm_round_pot(sizeof(struct scatterlist));
-                       sgt_size = ttm_round_pot(sizeof(struct sg_table));
-               }
-               vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
-               ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
-               if (unlikely(ret != 0))
-                       return ret;
-
                ret = sg_alloc_table_from_pages_segment(
                        &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
                        (unsigned long)vsgt->num_pages << PAGE_SHIFT,
                if (ret)
                        goto out_sg_alloc_fail;
 
-               if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
-                       uint64_t over_alloc =
-                               sgl_size * (vsgt->num_pages -
-                                           vmw_tt->sgt.orig_nents);
-
-                       ttm_mem_global_free(glob, over_alloc);
-                       vmw_tt->sg_alloc_size -= over_alloc;
-               }
-
                ret = vmw_ttm_map_for_dma(vmw_tt);
                if (unlikely(ret != 0))
                        goto out_map_fail;
        sg_free_table(vmw_tt->vsgt.sgt);
        vmw_tt->vsgt.sgt = NULL;
 out_sg_alloc_fail:
-       ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
        return ret;
 }
 
                vmw_ttm_unmap_from_dma(vmw_tt);
                sg_free_table(vmw_tt->vsgt.sgt);
                vmw_tt->vsgt.sgt = NULL;
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   vmw_tt->sg_alloc_size);
                break;
        default:
                break;
 static int vmw_ttm_populate(struct ttm_device *bdev,
                            struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
-       unsigned int i;
        int ret;
 
        /* TODO: maybe completely drop this ? */
                return 0;
 
        ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < ttm->num_pages; ++i) {
-               ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
-                                               PAGE_SIZE, ctx);
-               if (ret)
-                       goto error;
-       }
-       return 0;
 
-error:
-       while (i--)
-               ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
-                                        PAGE_SIZE);
-       ttm_pool_free(&bdev->pool, ttm);
        return ret;
 }
 
 {
        struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
                                                 dma_ttm);
-       unsigned int i;
 
        vmw_ttm_unbind(bdev, ttm);
 
 
        vmw_ttm_unmap_dma(vmw_tt);
 
-       for (i = 0; i < ttm->num_pages; ++i)
-               ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
-                                        PAGE_SIZE);
-
        ttm_pool_free(&bdev->pool, ttm);
 }
 
 
        return ret;
 }
 
-/* struct vmw_validation_mem callback */
-static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
-{
-       static struct ttm_operation_ctx ctx = {.interruptible = false,
-                                              .no_wait_gpu = false};
-       struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
-
-       return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
-}
-
-/* struct vmw_validation_mem callback */
-static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
-{
-       struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
-
-       return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_validation_mem_init_ttm - Interface the validation memory tracker
- * to ttm.
- * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
- * rather than a struct vmw_validation_mem is to make sure assumption in the
- * callbacks that struct vmw_private derives from struct vmw_validation_mem
- * holds true.
- * @gran: The recommended allocation granularity
- */
-void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
-{
-       struct vmw_validation_mem *vvm = &dev_priv->vvm;
-
-       vvm->reserve_mem = vmw_vmt_reserve;
-       vvm->unreserve_mem = vmw_vmt_unreserve;
-       vvm->gran = gran;
-}
 
 #include "vmwgfx_validation.h"
 #include "vmwgfx_drv.h"
 
+
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
 /**
  * struct vmw_validation_bo_node - Buffer object validation metadata.
  * @base: Metadata used for TTM reservation- and validation.
                struct page *page;
 
                if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
-                       int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
-
-                       if (ret)
-                               return NULL;
-
-                       ctx->vm_size_left += ctx->vm->gran;
-                       ctx->total_mem += ctx->vm->gran;
+                       ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN;
+                       ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN;
                }
 
                page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 
        ctx->mem_size_left = 0;
        if (ctx->vm && ctx->total_mem) {
-               ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
                ctx->total_mem = 0;
                ctx->vm_size_left = 0;
        }
 
 #define VMW_RES_DIRTY_SET BIT(0)
 #define VMW_RES_DIRTY_CLEAR BIT(1)
 
-/**
- * struct vmw_validation_mem - Custom interface to provide memory reservations
- * for the validation code.
- * @reserve_mem: Callback to reserve memory
- * @unreserve_mem: Callback to unreserve memory
- * @gran: Reservation granularity. Contains a hint how much memory should
- * be reserved in each call to @reserve_mem(). A slow implementation may want
- * reservation to be done in large batches.
- */
-struct vmw_validation_mem {
-       int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
-       void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
-       size_t gran;
-};
-
 /**
  * struct vmw_validation_context - Per command submission validation context
  * @ht: Hash table used to find resource- or buffer object duplicates
        return !list_empty(&ctx->bo_list);
 }
 
-/**
- * vmw_validation_set_val_mem - Register a validation mem object for
- * validation memory reservation
- * @ctx: The validation context
- * @vm: Pointer to a struct vmw_validation_mem
- *
- * Must be set before the first attempt to allocate validation memory.
- */
-static inline void
-vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
-                          struct vmw_validation_mem *vm)
-{
-       ctx->vm = vm;
-}
-
 /**
  * vmw_validation_set_ht - Register a hash table for duplicate finding
  * @ctx: The validation context
                                    (void *) fence);
 }
 
-/**
- * vmw_validation_context_init - Initialize a validation context
- * @ctx: Pointer to the validation context to initialize
- *
- * This function initializes a validation context with @merge_dups set
- * to false
- */
-static inline void
-vmw_validation_context_init(struct vmw_validation_context *ctx)
-{
-       memset(ctx, 0, sizeof(*ctx));
-       INIT_LIST_HEAD(&ctx->resource_list);
-       INIT_LIST_HEAD(&ctx->resource_ctx_list);
-       INIT_LIST_HEAD(&ctx->bo_list);
-}
-
 /**
  * vmw_validation_align - Align a validation memory allocation
  * @val: The size to be aligned