]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/panthor: Fix firmware initialization on systems with a page size > 4k
authorBoris Brezillon <boris.brezillon@collabora.com>
Wed, 30 Oct 2024 15:02:31 +0000 (16:02 +0100)
committerBoris Brezillon <boris.brezillon@collabora.com>
Wed, 30 Oct 2024 15:30:21 +0000 (16:30 +0100)
The system and GPU MMU page size might differ, which becomes a
problem for FW sections that need to be mapped at explicit addresses
since our PAGE_SIZE alignment might cover a VA range that's
expected to be used for another section.

Make sure we never map more than we need.

Changes in v3:
- Add R-bs

Changes in v2:
- Plan for per-VM page sizes so the MCU VM and user VM can
  have different pages sizes

Fixes: 2718d91816ee ("drm/panthor: Add the FW logical block")
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241030150231.768949-1-boris.brezillon@collabora.com
drivers/gpu/drm/panthor/panthor_fw.c
drivers/gpu/drm/panthor/panthor_gem.c
drivers/gpu/drm/panthor/panthor_mmu.c
drivers/gpu/drm/panthor/panthor_mmu.h

index ef232c0c204932c9b3e5f25841131c48dd566a98..4e2d3a02ea06894fc4c05892c5b9a63af5de2e38 100644 (file)
@@ -487,6 +487,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
                                         struct panthor_fw_binary_iter *iter,
                                         u32 ehdr)
 {
+       ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm);
        struct panthor_fw_binary_section_entry_hdr hdr;
        struct panthor_fw_section *section;
        u32 section_size;
@@ -515,8 +516,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
                return -EINVAL;
        }
 
-       if ((hdr.va.start & ~PAGE_MASK) != 0 ||
-           (hdr.va.end & ~PAGE_MASK) != 0) {
+       if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) {
                drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
                        hdr.va.start, hdr.va.end);
                return -EINVAL;
index 38f560864879c5f2bcd57df32daed0cf88dbd0a9..be97d56bc011d29e6237c8eb2df1912fd67ec423 100644 (file)
@@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
                        to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
                goto out_free_bo;
 
-       ret = panthor_vm_unmap_range(vm, bo->va_node.start,
-                                    panthor_kernel_bo_size(bo));
+       ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
        if (ret)
                goto out_free_bo;
 
@@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
        }
 
        bo = to_panthor_bo(&obj->base);
-       size = obj->base.size;
        kbo->obj = &obj->base;
        bo->flags = bo_flags;
 
+       /* The system and GPU MMU page size might differ, which becomes a
+        * problem for FW sections that need to be mapped at explicit address
+        * since our PAGE_SIZE alignment might cover a VA range that's
+        * expected to be used for another section.
+        * Make sure we never map more than we need.
+        */
+       size = ALIGN(size, panthor_vm_page_size(vm));
        ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
        if (ret)
                goto err_put_obj;
index 3cd2bce59edcd70aa8dc8e059ed043e28a781525..5d5e25b1be95cb618460b18f41710855a475f51e 100644 (file)
@@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm)
        mutex_unlock(&ptdev->mmu->as.slots_lock);
 }
 
+u32 panthor_vm_page_size(struct panthor_vm *vm)
+{
+       const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
+       u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
+
+       return 1u << pg_shift;
+}
+
 static void panthor_vm_stop(struct panthor_vm *vm)
 {
        drm_sched_stop(&vm->sched, NULL);
@@ -1025,12 +1033,13 @@ int
 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
                    struct drm_mm_node *va_node)
 {
+       ssize_t vm_pgsz = panthor_vm_page_size(vm);
        int ret;
 
-       if (!size || (size & ~PAGE_MASK))
+       if (!size || !IS_ALIGNED(size, vm_pgsz))
                return -EINVAL;
 
-       if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
+       if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
                return -EINVAL;
 
        mutex_lock(&vm->mm_lock);
@@ -2366,11 +2375,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
                               const struct drm_panthor_vm_bind_op *op,
                               struct panthor_vm_op_ctx *op_ctx)
 {
+       ssize_t vm_pgsz = panthor_vm_page_size(vm);
        struct drm_gem_object *gem;
        int ret;
 
        /* Aligned on page size. */
-       if ((op->va | op->size) & ~PAGE_MASK)
+       if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
                return -EINVAL;
 
        switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
index 6788771071e35557ccde471301bf4aa2ef32ec8f..8d21e83d8aba1e203df11883122a9e9356787803 100644 (file)
@@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
 
 int panthor_vm_active(struct panthor_vm *vm);
 void panthor_vm_idle(struct panthor_vm *vm);
+u32 panthor_vm_page_size(struct panthor_vm *vm);
 int panthor_vm_as(struct panthor_vm *vm);
 int panthor_vm_flush_all(struct panthor_vm *vm);