if (err)
goto err_ppgtt;
+ i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ i915_gem_object_unlock(ppgtt->vm.scratch[0]);
if (err)
goto err_stash;
mutex_unlock(&ggtt->vm.mutex);
i915_address_space_fini(&ggtt->vm);
+ dma_resv_fini(&ggtt->vm.resv);
arch_phys_wc_del(ggtt->mtrr);
ggtt->vm.gt = gt;
ggtt->vm.i915 = i915;
ggtt->vm.dma = i915->drm.dev;
+ dma_resv_init(&ggtt->vm.resv);
if (INTEL_GEN(i915) <= 5)
ret = i915_gmch_probe(ggtt);
ret = gen6_gmch_probe(ggtt);
else
ret = gen8_gmch_probe(ggtt);
- if (ret)
+ if (ret) {
+ dma_resv_fini(&ggtt->vm.resv);
return ret;
+ }
if ((ggtt->vm.total - 1) >> 32) {
drm_err(&i915->drm,
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
{
+ struct drm_i915_gem_object *obj;
+
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
- return i915_gem_object_create_internal(vm->i915, sz);
+ obj = i915_gem_object_create_internal(vm->i915, sz);
+ /* ensure all dma objects have the same reservation class */
+ if (!IS_ERR(obj))
+ obj->base.resv = &vm->resv;
+ return obj;
}
int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
int err;
+ i915_gem_object_lock(obj, NULL);
+ err = i915_gem_object_pin_pages(obj);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return err;
+
+ i915_gem_object_make_unshrinkable(obj);
+ return 0;
+}
+
+int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+{
+ int err;
+
err = i915_gem_object_pin_pages(obj);
if (err)
return err;
mutex_unlock(&vm->mutex);
}
+/* lock the vm into the current ww, if we lock one, we lock all */
+int i915_vm_lock_objects(struct i915_address_space *vm,
+ struct i915_gem_ww_ctx *ww)
+{
+ if (vm->scratch[0]->base.resv == &vm->resv) {
+ return i915_gem_object_lock(vm->scratch[0], ww);
+ } else {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ /* We borrowed the scratch page from ggtt, take the top level object */
+ return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
+ }
+}
+
void i915_address_space_fini(struct i915_address_space *vm)
{
drm_mm_takedown(&vm->mm);
vm->cleanup(vm);
i915_address_space_fini(vm);
+ dma_resv_fini(&vm->resv);
kfree(vm);
}
mutex_init(&vm->mutex);
lockdep_set_subclass(&vm->mutex, subclass);
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+ dma_resv_init(&vm->resv);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
atomic_t open;
struct mutex mutex; /* protects vma and our lists */
+ struct dma_resv resv; /* reservation lock for all pd objects, and buffer pool */
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
+int __must_check
+i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
+
static inline bool
i915_vm_is_4lvl(const struct i915_address_space *vm)
{
struct i915_page_directory *__alloc_pd(int npde);
int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
void free_px(struct i915_address_space *vm,
struct i915_page_table *pt, int lvl);
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
for (pt = stash->pt[n]; pt; pt = pt->stash) {
- err = pin_pt_dma(vm, pt->base);
+ err = pin_pt_dma_locked(vm, pt->base);
if (err)
return err;
}
ppgtt->vm.dma = i915->drm.dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+ dma_resv_init(&ppgtt->vm.resv);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;