ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
                        AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
-                       ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj);
+                       ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
 
        amdgpu_bo_unreserve(mem->bo);
 
                        va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain));
 
        ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
-                                      bo_type, NULL, &gobj);
+                                      bo_type, NULL, &gobj, 0);
        if (ret) {
                pr_debug("Failed to create BO on domain %s. ret %d\n",
                         domain_string(alloc_domain), ret);
 
                             int alignment, u32 initial_domain,
                             u64 flags, enum ttm_bo_type type,
                             struct dma_resv *resv,
-                            struct drm_gem_object **obj)
+                            struct drm_gem_object **obj, int8_t mem_id_plus1)
 {
        struct amdgpu_bo *bo;
        struct amdgpu_bo_user *ubo;
        bp.flags = flags;
        bp.domain = initial_domain;
        bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+       bp.mem_id_plus1 = mem_id_plus1;
 
        r = amdgpu_bo_create_user(adev, &bp, &ubo);
        if (r)
 retry:
        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
                                     initial_domain,
-                                    flags, ttm_bo_type_device, resv, &gobj);
+                                    flags, ttm_bo_type_device, resv, &gobj, 0);
        if (r && r != -ERESTARTSYS) {
                if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
                        flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
        /* create a gem object to contain this object in */
        r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
-                                    0, ttm_bo_type_device, NULL, &gobj);
+                                    0, ttm_bo_type_device, NULL, &gobj, 0);
        if (r)
                return r;
 
        domain = amdgpu_bo_get_preferred_domain(adev,
                                amdgpu_display_supported_domains(adev, flags));
        r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
-                                    ttm_bo_type_device, NULL, &gobj);
+                                    ttm_bo_type_device, NULL, &gobj, 0);
        if (r)
                return -ENOMEM;
 
 
                             int alignment, u32 initial_domain,
                             u64 flags, enum ttm_bo_type type,
                             struct dma_resv *resv,
-                            struct drm_gem_object **obj);
-
+                            struct drm_gem_object **obj, int8_t mem_id_plus1);
 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
                            struct drm_device *dev,
                            struct drm_mode_create_dumb *args);