node->base.start = node->mm_nodes[0].start;
        } else {
                node->mm_nodes[0].start = 0;
-               node->mm_nodes[0].size = node->base.num_pages;
+               node->mm_nodes[0].size = PFN_UP(node->base.size);
                node->base.start = AMDGPU_BO_INVALID_OFFSET;
        }
 
 
                /* GWS and OA don't need any alignment. */
                page_align = bp->byte_align;
                size <<= PAGE_SHIFT;
+
        } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
                /* Both size and alignment must be a multiple of 4. */
                page_align = ALIGN(bp->byte_align, 4);
                return 0;
        }
 
-       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
+       r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
        if (r)
                return r;
 
 
        if (!res)
                goto fallback;
 
-       BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
+       BUG_ON(start + size > res->size);
 
        cur->mem_type = res->mem_type;
 
        cur->size = size;
        cur->remaining = size;
        cur->node = NULL;
-       WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
+       WARN_ON(res && start + size > res->size);
        return;
 }
 
 
 
            TP_fast_assign(
                           __entry->bo = bo;
-                          __entry->pages = bo->tbo.resource->num_pages;
+                          __entry->pages = PFN_UP(bo->tbo.resource->size);
                           __entry->type = bo->tbo.resource->mem_type;
                           __entry->prefer = bo->preferred_domains;
                           __entry->allow = bo->allowed_domains;
 
        dst.offset = 0;
 
        r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
-                                      new_mem->num_pages << PAGE_SHIFT,
+                                      new_mem->size,
                                       amdgpu_bo_encrypted(abo),
                                       bo->base.resv, &fence);
        if (r)
 static bool amdgpu_mem_visible(struct amdgpu_device *adev,
                               struct ttm_resource *mem)
 {
-       u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+       u64 mem_size = (u64)mem->size;
        struct amdgpu_res_cursor cursor;
        u64 end;
 
                                     struct ttm_resource *mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
-       size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+       size_t bus_size = (size_t)mem->size;
 
        switch (mem->mem_type) {
        case TTM_PL_SYSTEM:
 
                /* Allocate blocks in desired range */
                vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
 
-       remaining_size = (u64)vres->base.num_pages << PAGE_SHIFT;
+       remaining_size = (u64)vres->base.size;
 
        mutex_lock(&mgr->lock);
        while (remaining_size) {
                LIST_HEAD(temp);
 
                trim_list = &vres->blocks;
-               original_size = (u64)vres->base.num_pages << PAGE_SHIFT;
+               original_size = (u64)vres->base.size;
 
                /*
                 * If size value is rounded up to min_block_size, trim the last
                        amdgpu_vram_mgr_block_size(block);
                start >>= PAGE_SHIFT;
 
-               if (start > vres->base.num_pages)
-                       start -= vres->base.num_pages;
+               if (start > PFN_UP(vres->base.size))
+                       start -= PFN_UP(vres->base.size);
                else
                        start = 0;
                vres->base.start = max(vres->base.start, start);
 
        if (!i915_ttm_cpu_maps_iomem(res))
                return true;
 
-       return bman_res->used_visible_size == bman_res->base.num_pages;
+       return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
 }
 
 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
 
                                                     u32 page_alignment)
 {
        struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
-       const u64 size = res->num_pages << PAGE_SHIFT;
+       const u64 size = res->size;
        const u32 max_segment = round_down(UINT_MAX, page_alignment);
        struct drm_buddy *mm = bman_res->mm;
        struct list_head *blocks = &bman_res->blocks;
 
        i915_refct_sgt_init(rsgt, size);
        st = &rsgt->table;
-       if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
+       if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) {
                i915_refct_sgt_put(rsgt);
                return ERR_PTR(-ENOMEM);
        }
 
        if (place->fpfn || lpfn != man->size)
                bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
 
-       GEM_BUG_ON(!bman_res->base.num_pages);
-       size = bman_res->base.num_pages << PAGE_SHIFT;
+       GEM_BUG_ON(!bman_res->base.size);
+       size = bman_res->base.size;
 
        min_page_size = bman->default_page_size;
        if (bo->page_alignment)
        GEM_BUG_ON(min_page_size < mm->chunk_size);
        GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
 
-       if (place->fpfn + bman_res->base.num_pages != place->lpfn &&
+       if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
            place->flags & TTM_PL_FLAG_CONTIGUOUS) {
                unsigned long pages;
 
                goto err_free_blocks;
 
        if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-               u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
+               u64 original_size = (u64)bman_res->base.size;
 
                drm_buddy_block_trim(mm,
                                     original_size,
        }
 
        if (lpfn <= bman->visible_size) {
-               bman_res->used_visible_size = bman_res->base.num_pages;
+               bman_res->used_visible_size = PFN_UP(bman_res->base.size);
        } else {
                struct drm_buddy_block *block;
 
 
        if (!place->fpfn &&
            place->lpfn == i915_ttm_buddy_man_visible_size(man))
-               return bman_res->used_visible_size == res->num_pages;
+               return bman_res->used_visible_size == PFN_UP(res->size);
 
        /* Check each drm buddy block individually */
        list_for_each_entry(block, &bman_res->blocks, link) {
 
        struct ttm_resource_manager *man = mem->region_private;
        struct ttm_buffer_object mock_bo = {};
 
-       mock_bo.base.size = res->num_pages << PAGE_SHIFT;
+       mock_bo.base.size = res->size;
        mock_bo.bdev = &mem->i915->bdev;
        res->bo = &mock_bo;
 
 
        if (ret)
                return ret;
 
-       ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
+       ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap);
 
        ttm_bo_unreserve(&nvbo->bo);
        return ret;
        } else {
                /* make sure bo is in mappable vram */
                if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
-                   bo->resource->start + bo->resource->num_pages < mappable)
+                   bo->resource->start + PFN_UP(bo->resource->size) < mappable)
                        return 0;
 
                for (i = 0; i < nvbo->placement.num_placement; ++i) {
 
        u32 src_offset = old_reg->start << PAGE_SHIFT;
        u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
        u32 dst_offset = new_reg->start << PAGE_SHIFT;
-       u32 page_count = new_reg->num_pages;
+       u32 page_count = PFN_UP(new_reg->size);
        int ret;
 
        ret = PUSH_WAIT(push, 3);
        PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma,
                               SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma);
 
-       page_count = new_reg->num_pages;
+       page_count = PFN_UP(new_reg->size);
        while (page_count) {
                int line_count = (page_count > 2047) ? 2047 : page_count;
 
 
 {
        struct nouveau_mem *mem = nouveau_mem(old_reg);
        struct nvif_push *push = chan->chan.push;
-       u64 length = (new_reg->num_pages << PAGE_SHIFT);
+       u64 length = new_reg->size;
        u64 src_offset = mem->vma[0].addr;
        u64 dst_offset = mem->vma[1].addr;
        int src_tiled = !!mem->kind;
 
        if (ret)
                return ret;
 
-       PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->num_pages << PAGE_SHIFT,
+       PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->size,
                                0x0308, upper_32_bits(mem->vma[0].addr),
                                0x030c, lower_32_bits(mem->vma[0].addr),
                                0x0310, upper_32_bits(mem->vma[1].addr),
 
        struct nvif_push *push = chan->chan.push;
        u64 src_offset = mem->vma[0].addr;
        u64 dst_offset = mem->vma[1].addr;
-       u32 page_count = new_reg->num_pages;
+       u32 page_count = PFN_UP(new_reg->size);
        int ret;
 
-       page_count = new_reg->num_pages;
+       page_count = PFN_UP(new_reg->size);
        while (page_count) {
                int line_count = (page_count > 8191) ? 8191 : page_count;
 
 
        struct nouveau_mem *mem = nouveau_mem(old_reg);
        u64 src_offset = mem->vma[0].addr;
        u64 dst_offset = mem->vma[1].addr;
-       u32 page_count = new_reg->num_pages;
+       u32 page_count = PFN_UP(new_reg->size);
        int ret;
 
-       page_count = new_reg->num_pages;
+       page_count = PFN_UP(new_reg->size);
        while (page_count) {
                int line_count = (page_count > 2047) ? 2047 : page_count;
 
 
        struct nvif_push *push = chan->chan.push;
        u64 src_offset = mem->vma[0].addr;
        u64 dst_offset = mem->vma[1].addr;
-       u32 page_count = new_reg->num_pages;
+       u32 page_count = PFN_UP(new_reg->size);
        int ret;
 
-       page_count = new_reg->num_pages;
+       page_count = PFN_UP(new_reg->size);
        while (page_count) {
                int line_count = (page_count > 8191) ? 8191 : page_count;
 
 
                                PITCH_IN, PAGE_SIZE,
                                PITCH_OUT, PAGE_SIZE,
                                LINE_LENGTH_IN, PAGE_SIZE,
-                               LINE_COUNT, new_reg->num_pages);
+                               LINE_COUNT, PFN_UP(new_reg->size));
 
        PUSH_IMMD(push, NVA0B5, LAUNCH_DMA,
                  NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
 
                }
 
                if (!nvbo->kmap.virtual) {
-                       ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
+                       ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
                                          &nvbo->kmap);
                        if (ret) {
                                NV_PRINTK(err, cli, "failed kmap for reloc\n");
                        if (unlikely(cmd != req->suffix0)) {
                                if (!nvbo->kmap.virtual) {
                                        ret = ttm_bo_kmap(&nvbo->bo, 0,
-                                                         nvbo->bo.resource->
-                                                         num_pages,
+                                                         PFN_UP(nvbo->bo.base.size),
                                                          &nvbo->kmap);
                                        if (ret) {
                                                WIND_RING(chan);
 
 
        mutex_lock(&drm->master.lock);
        ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
-                                reg->num_pages << PAGE_SHIFT,
+                                reg->size,
                                 &args, sizeof(args), &mem->mem);
        mutex_unlock(&drm->master.lock);
        return ret;
        struct nouveau_cli *cli = mem->cli;
        struct nouveau_drm *drm = cli->drm;
        struct nvif_mmu *mmu = &cli->mmu;
-       u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
+       u64 size = ALIGN(reg->size, 1 << page);
        int ret;
 
        mutex_lock(&drm->master.lock);
 
        mem = nouveau_mem(*res);
        ttm_resource_init(bo, place, *res);
        ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
-                          (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
+                          (long)(*res)->size, &mem->vma[0]);
        if (ret) {
                nouveau_mem_del(man, *res);
                return ret;
 
        struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
 
        /* Sort A before B if A is smaller. */
-       return (int)la->robj->tbo.resource->num_pages -
-               (int)lb->robj->tbo.resource->num_pages;
+       if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
+               return 1;
+       if (la->robj->tbo.base.size < lb->robj->tbo.base.size)
+               return -1;
+       return 0;
 }
 
 /**
 
                }
                return 0;
        }
-       r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
+       r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
        if (r) {
                return r;
        }
        if (bo->resource->mem_type != TTM_PL_VRAM)
                return 0;
 
-       size = bo->resource->num_pages << PAGE_SHIFT;
+       size = bo->resource->size;
        offset = bo->resource->start << PAGE_SHIFT;
        if ((offset + size) <= rdev->mc.visible_vram_size)
                return 0;
 
 
            TP_fast_assign(
                           __entry->bo = bo;
-                          __entry->pages = bo->tbo.resource->num_pages;
+                          __entry->pages = PFN_UP(bo->tbo.resource->size);
                           ),
            TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
 );
 
 
        BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 
-       num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+       num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
        fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
        if (IS_ERR(fence))
                return PTR_ERR(fence);
 static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
 {
        struct radeon_device *rdev = radeon_get_rdev(bdev);
-       size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
+       size_t bus_size = (size_t)mem->size;
 
        switch (mem->mem_type) {
        case TTM_PL_SYSTEM:
 
        struct ttm_resource_manager *man;
        int i, mem_type;
 
-       drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
-                  bo, bo->resource->num_pages, bo->base.size >> 10,
-                  bo->base.size >> 20);
        for (i = 0; i < placement->num_placement; i++) {
                mem_type = placement->placement[i].mem_type;
                drm_printf(&p, "  placement[%d]=0x%08X (%d)\n",
 
 
        clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
        if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
-               ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
+               ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
 
        if (!src_iter->ops->maps_tt)
                ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
 
        map->virtual = NULL;
        map->bo = bo;
-       if (num_pages > bo->resource->num_pages)
+       if (num_pages > PFN_UP(bo->resource->size))
                return -EINVAL;
-       if ((start_page + num_pages) > bo->resource->num_pages)
+       if ((start_page + num_pages) > PFN_UP(bo->resource->size))
                return -EINVAL;
 
        ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
 
        page_last = vma_pages(vma) + vma->vm_pgoff -
                drm_vma_node_start(&bo->base.vma_node);
 
-       if (unlikely(page_offset >= bo->resource->num_pages))
+       if (unlikely(page_offset >= PFN_UP(bo->base.size)))
                return VM_FAULT_SIGBUS;
 
        prot = ttm_io_prot(bo, bo->resource, prot);
                 << PAGE_SHIFT);
        int ret;
 
-       if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
+       if (len < 1 || (offset + len) > bo->base.size)
                return -EIO;
 
        ret = ttm_bo_reserve(bo, true, false, NULL);
 
 
        spin_lock(&rman->lock);
        ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
-                                         node->base.num_pages,
+                                         PFN_UP(node->base.size),
                                          bo->page_alignment, 0,
                                          place->fpfn, lpfn, mode);
        spin_unlock(&rman->lock);
 
        struct ttm_resource_manager *man;
 
        res->start = 0;
-       res->num_pages = PFN_UP(bo->base.size);
+       res->size = bo->base.size;
        res->mem_type = place->mem_type;
        res->placement = place->flags;
        res->bus.addr = NULL;
                list_add_tail(&res->lru, &bo->bdev->pinned);
        else
                list_add_tail(&res->lru, &man->lru[bo->priority]);
-       man->usage += res->num_pages << PAGE_SHIFT;
+       man->usage += res->size;
        spin_unlock(&bo->bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_resource_init);
 
        spin_lock(&bdev->lru_lock);
        list_del_init(&res->lru);
-       man->usage -= res->num_pages << PAGE_SHIFT;
+       man->usage -= res->size;
        spin_unlock(&bdev->lru_lock);
 }
 EXPORT_SYMBOL(ttm_resource_fini);
                iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
                iter_io->needs_unmap = false;
        } else {
-               size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
-
                iter_io->needs_unmap = true;
                memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
                if (mem->bus.caching == ttm_write_combined)
                        iosys_map_set_vaddr_iomem(&iter_io->dmap,
                                                  ioremap_wc(mem->bus.offset,
-                                                            bus_size));
+                                                            mem->size));
                else if (mem->bus.caching == ttm_cached)
                        iosys_map_set_vaddr(&iter_io->dmap,
-                                           memremap(mem->bus.offset, bus_size,
+                                           memremap(mem->bus.offset, mem->size,
                                                     MEMREMAP_WB |
                                                     MEMREMAP_WT |
                                                     MEMREMAP_WC));
                if (iosys_map_is_null(&iter_io->dmap))
                        iosys_map_set_vaddr_iomem(&iter_io->dmap,
                                                  ioremap(mem->bus.offset,
-                                                         bus_size));
+                                                         mem->size));
 
                if (iosys_map_is_null(&iter_io->dmap)) {
                        ret = -ENOMEM;
 
        d.src_addr = NULL;
        d.dst_pages = dst->ttm->pages;
        d.src_pages = src->ttm->pages;
-       d.dst_num_pages = dst->resource->num_pages;
-       d.src_num_pages = src->resource->num_pages;
+       d.dst_num_pages = PFN_UP(dst->resource->size);
+       d.src_num_pages = PFN_UP(src->resource->size);
        d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
        d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
        d.diff = diff;
 
        int ret = 0;
 
        place = vmw_vram_placement.placement[0];
-       place.lpfn = bo->resource->num_pages;
+       place.lpfn = PFN_UP(bo->resource->size);
        placement.num_placement = 1;
        placement.placement = &place;
        placement.num_busy_placement = 1;
         * that situation.
         */
        if (bo->resource->mem_type == TTM_PL_VRAM &&
-           bo->resource->start < bo->resource->num_pages &&
+           bo->resource->start < PFN_UP(bo->resource->size) &&
            bo->resource->start > 0 &&
            buf->base.pin_count == 0) {
                ctx.interruptible = false;
        if (virtual)
                return virtual;
 
-       ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
+       ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
        if (ret)
                DRM_ERROR("Buffer object map failed: %d.\n", ret);
 
 
         * Do a page by page copy of COTables. This eliminates slow vmap()s.
         * This should really be a TTM utility.
         */
-       for (i = 0; i < old_bo->resource->num_pages; ++i) {
+       for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
                bool dummy;
 
                ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
 
 
        if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
-               if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
+               if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
                        VMW_DEBUG_USER("Query buffer too large.\n");
                        return -EINVAL;
                }
 
        spin_lock(&gman->lock);
 
        if (gman->max_gmr_pages > 0) {
-               gman->used_gmr_pages += (*res)->num_pages;
+               gman->used_gmr_pages += PFN_UP((*res)->size);
                /*
                 * Because the graphics memory is a soft limit we can try to
                 * expand it instead of letting the userspace apps crash.
        return 0;
 
 nospace:
-       gman->used_gmr_pages -= (*res)->num_pages;
+       gman->used_gmr_pages -= PFN_UP((*res)->size);
        spin_unlock(&gman->lock);
        ida_free(&gman->gmr_ida, id);
        ttm_resource_fini(man, *res);
 
        ida_free(&gman->gmr_ida, res->start);
        spin_lock(&gman->lock);
-       gman->used_gmr_pages -= res->num_pages;
+       gman->used_gmr_pages -= PFN_UP(res->size);
        spin_unlock(&gman->lock);
        ttm_resource_fini(man, res);
        kfree(res);
 
 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
 {
        struct vmw_bo_dirty *dirty = vbo->dirty;
-       pgoff_t num_pages = vbo->base.resource->num_pages;
+       pgoff_t num_pages = PFN_UP(vbo->base.resource->size);
        size_t size;
        int ret;
 
                return ret;
 
        page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
-       if (unlikely(page_offset >= bo->resource->num_pages)) {
+       if (unlikely(page_offset >= PFN_UP(bo->resource->size))) {
                ret = VM_FAULT_SIGBUS;
                goto out_unlock;
        }
 
                page_offset = vmf->pgoff -
                        drm_vma_node_start(&bo->base.vma_node);
-               if (page_offset >= bo->resource->num_pages ||
+               if (page_offset >= PFN_UP(bo->resource->size) ||
                    vmw_resources_clean(vbo, page_offset,
                                        page_offset + PAGE_SIZE,
                                        &allowed_prefault)) {
 
  * struct ttm_resource
  *
  * @start: Start of the allocation.
- * @num_pages: Actual size of resource in pages.
+ * @size: Actual size of resource in bytes.
  * @mem_type: Resource type of the allocation.
  * @placement: Placement flags.
  * @bus: Placement on io bus accessible to the CPU
  */
 struct ttm_resource {
        unsigned long start;
-       unsigned long num_pages;
+       size_t size;
        uint32_t mem_type;
        uint32_t placement;
        struct ttm_bus_placement bus;