fix the vm->mutex and ww_mutex confilcts.
vm->mutex is always token first, then ww_mutex.
V2: remove unneccessary checking for pt bo.
Change-Id: Iea56e183752c02831126d06d2f5b7a474a6e4743
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
                }
        }
 
-       mutex_lock(&vm->mutex);
        r = amdgpu_bo_vm_update_pte(parser, vm);
        if (r) {
                goto out;
                                       parser->filp);
 
 out:
-       mutex_unlock(&vm->mutex);
        return r;
 }
 
 {
        struct amdgpu_device *adev = dev->dev_private;
        union drm_amdgpu_cs *cs = data;
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_cs_parser *parser;
        bool reserved_buffers = false;
        int i, r;
                r = amdgpu_cs_handle_lockup(adev, r);
                return r;
        }
-
+       mutex_lock(&vm->mutex);
        r = amdgpu_cs_parser_relocs(parser);
        if (r == -ENOMEM)
                DRM_ERROR("Not enough memory for command submission!\n");
 
                mutex_unlock(&job->job_lock);
                amdgpu_cs_parser_fini_late(parser);
+               mutex_unlock(&vm->mutex);
                return 0;
        }
 
        cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
 out:
        amdgpu_cs_parser_fini(parser, r, reserved_buffers);
+       mutex_unlock(&vm->mutex);
        r = amdgpu_cs_handle_lockup(adev, r);
        return r;
 }
 
        struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_va *bo_va;
        int r;
-
+       mutex_lock(&vm->mutex);
        r = amdgpu_bo_reserve(rbo, false);
        if (r) {
+               mutex_unlock(&vm->mutex);
                return r;
        }
 
                ++bo_va->ref_count;
        }
        amdgpu_bo_unreserve(rbo);
-
+       mutex_unlock(&vm->mutex);
        return 0;
 }
 
        struct amdgpu_vm *vm = &fpriv->vm;
        struct amdgpu_bo_va *bo_va;
        int r;
-
+       mutex_lock(&vm->mutex);
        r = amdgpu_bo_reserve(rbo, true);
        if (r) {
+               mutex_unlock(&vm->mutex);
                dev_err(adev->dev, "leaking bo va because "
                        "we fail to reserve bo (%d)\n", r);
                return;
                }
        }
        amdgpu_bo_unreserve(rbo);
+       mutex_unlock(&vm->mutex);
 }
 
 static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
                        goto error_unreserve;
        }
 
-       mutex_lock(&bo_va->vm->mutex);
        r = amdgpu_vm_clear_freed(adev, bo_va->vm);
        if (r)
-               goto error_unlock;
-
+               goto error_unreserve;
 
        if (operation == AMDGPU_VA_OP_MAP)
                r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
 
-error_unlock:
-       mutex_unlock(&bo_va->vm->mutex);
-
 error_unreserve:
        ttm_eu_backoff_reservation(&ticket, &list);
 
        gobj = drm_gem_object_lookup(dev, filp, args->handle);
        if (gobj == NULL)
                return -ENOENT;
-
+       mutex_lock(&fpriv->vm.mutex);
        rbo = gem_to_amdgpu_bo(gobj);
        r = amdgpu_bo_reserve(rbo, false);
        if (r) {
+               mutex_unlock(&fpriv->vm.mutex);
                drm_gem_object_unreference_unlocked(gobj);
                return r;
        }
        bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
        if (!bo_va) {
                amdgpu_bo_unreserve(rbo);
+               mutex_unlock(&fpriv->vm.mutex);
                return -ENOENT;
        }
 
 
        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
                amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
-
+       mutex_unlock(&fpriv->vm.mutex);
        drm_gem_object_unreference_unlocked(gobj);
        return r;
 }
 
        struct amdgpu_bo_list_entry *list;
        unsigned i, idx;
 
-       mutex_lock(&vm->mutex);
        list = drm_malloc_ab(vm->max_pde_used + 2,
                             sizeof(struct amdgpu_bo_list_entry));
        if (!list) {
-               mutex_unlock(&vm->mutex);
                return NULL;
        }
 
                list[idx].tv.shared = true;
                list_add(&list[idx++].tv.head, head);
        }
-       mutex_unlock(&vm->mutex);
 
        return list;
 }
        INIT_LIST_HEAD(&bo_va->invalids);
        INIT_LIST_HEAD(&bo_va->vm_status);
 
-       mutex_lock(&vm->mutex);
        list_add_tail(&bo_va->bo_list, &bo->va);
-       mutex_unlock(&vm->mutex);
 
        return bo_va;
 }
                return -EINVAL;
        }
 
-       mutex_lock(&vm->mutex);
-
        saddr /= AMDGPU_GPU_PAGE_SIZE;
        eaddr /= AMDGPU_GPU_PAGE_SIZE;
 
                        tmp->it.start, tmp->it.last + 1);
                amdgpu_bo_unreserve(bo_va->bo);
                r = -EINVAL;
-               goto error_unlock;
+               goto error;
        }
 
        mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
        if (!mapping) {
                amdgpu_bo_unreserve(bo_va->bo);
                r = -ENOMEM;
-               goto error_unlock;
+               goto error;
        }
 
        INIT_LIST_HEAD(&mapping->list);
                if (vm->page_tables[pt_idx].bo)
                        continue;
 
-               /* drop mutex to allocate and clear page table */
-               mutex_unlock(&vm->mutex);
-
                ww_mutex_lock(&resv->lock, NULL);
                r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
                                     AMDGPU_GPU_PAGE_SIZE, true,
                        goto error_free;
                }
 
-               /* aquire mutex again */
-               mutex_lock(&vm->mutex);
-               if (vm->page_tables[pt_idx].bo) {
-                       /* someone else allocated the pt in the meantime */
-                       mutex_unlock(&vm->mutex);
-                       amdgpu_bo_unref(&pt);
-                       mutex_lock(&vm->mutex);
-                       continue;
-               }
-
                vm->page_tables[pt_idx].addr = 0;
                vm->page_tables[pt_idx].bo = pt;
        }
 
-       mutex_unlock(&vm->mutex);
        return 0;
 
 error_free:
-       mutex_lock(&vm->mutex);
        list_del(&mapping->list);
        interval_tree_remove(&mapping->it, &vm->va);
        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
        kfree(mapping);
 
-error_unlock:
-       mutex_unlock(&vm->mutex);
+error:
        return r;
 }
 
                }
        }
 
-       mutex_lock(&vm->mutex);
        list_del(&mapping->list);
        interval_tree_remove(&mapping->it, &vm->va);
        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
                list_add(&mapping->list, &vm->freed);
        else
                kfree(mapping);
-       mutex_unlock(&vm->mutex);
        amdgpu_bo_unreserve(bo_va->bo);
 
        return 0;
 
        list_del(&bo_va->bo_list);
 
-       mutex_lock(&vm->mutex);
-
        spin_lock(&vm->status_lock);
        list_del(&bo_va->vm_status);
        spin_unlock(&vm->status_lock);
 
        fence_put(bo_va->last_pt_update);
        kfree(bo_va);
-
-       mutex_unlock(&vm->mutex);
 }
 
 /**