Add gfx v12 pte/pde support to gmc common helper.
v2: squash in fixes (Alex)
Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Likun Gao <Likun.Gao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
        if (flags & AMDGPU_VM_PAGE_WRITEABLE)
                pte_flag |= AMDGPU_PTE_WRITEABLE;
        if (flags & AMDGPU_VM_PAGE_PRT)
-               pte_flag |= AMDGPU_PTE_PRT;
+               pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
        if (flags & AMDGPU_VM_PAGE_NOALLOC)
                pte_flag |= AMDGPU_PTE_NOALLOC;
 
 
        flags |= AMDGPU_PTE_WRITEABLE;
        flags |= AMDGPU_PTE_SNOOPED;
        flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
-       flags |= AMDGPU_PDE_PTE;
+       flags |= AMDGPU_PDE_PTE_FLAG(adev);
 
        /* The first n PDE0 entries are used as PTE,
         * pointing to vram
         * pointing to a 4K system page
         */
        flags = AMDGPU_PTE_VALID;
-       flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
+       flags |= AMDGPU_PTE_SNOOPED | AMDGPU_PDE_BFS_FLAG(adev, 0);
        /* Requires gart_ptb_gpu_pa to be 4K aligned */
        amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
        drm_dev_exit(idx);
 
                                params.pages_addr = NULL;
                        }
 
-               } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+               } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
                        addr = vram_base + cursor.start;
                } else {
                        addr = 0;
                                   struct amdgpu_bo_va_mapping *mapping,
                                   struct dma_fence *fence)
 {
-       if (mapping->flags & AMDGPU_PTE_PRT)
+       if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
                amdgpu_vm_add_prt_cb(adev, fence);
        kfree(mapping);
 }
        list_add(&mapping->list, &bo_va->invalids);
        amdgpu_vm_it_insert(mapping, &vm->va);
 
-       if (mapping->flags & AMDGPU_PTE_PRT)
+       if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
                amdgpu_vm_prt_get(adev);
 
        if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
                struct amdgpu_bo *bo = before->bo_va->base.bo;
 
                amdgpu_vm_it_insert(before, &vm->va);
-               if (before->flags & AMDGPU_PTE_PRT)
+               if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
                        amdgpu_vm_prt_get(adev);
 
                if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
                struct amdgpu_bo *bo = after->bo_va->base.bo;
 
                amdgpu_vm_it_insert(after, &vm->va);
-               if (after->flags & AMDGPU_PTE_PRT)
+               if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
                        amdgpu_vm_prt_get(adev);
 
                if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv &&
        dma_fence_put(vm->last_tlb_flush);
 
        list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
-               if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
+               if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
                        amdgpu_vm_prt_fini(adev, vm);
                        prt_fini_needed = false;
                }
 
 
 /* gfx12 */
 #define AMDGPU_PTE_PRT_GFX12           (1ULL << 56)
+#define AMDGPU_PTE_PRT_FLAG(adev)      \
+       ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PTE_PRT_GFX12 : AMDGPU_PTE_PRT)
 
 #define AMDGPU_PTE_MTYPE_GFX12(a)      ((uint64_t)(a) << 54)
 #define AMDGPU_PTE_MTYPE_GFX12_MASK    AMDGPU_PTE_MTYPE_GFX12(3ULL)
 
 /* PDE Block Fragment Size for gfx v12 */
 #define AMDGPU_PDE_BFS_GFX12(a)                ((uint64_t)((a) & 0x1fULL) << 58)
+#define AMDGPU_PDE_BFS_FLAG(adev, a)   \
+       ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_BFS_GFX12(a) : AMDGPU_PDE_BFS(a))
 /* PDE is handled as PTE for gfx v12 */
 #define AMDGPU_PDE_PTE_GFX12           (1ULL << 63)
+#define AMDGPU_PDE_PTE_FLAG(adev)      \
+       ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_PTE_GFX12 : AMDGPU_PDE_PTE)
 
 /* How to program VM fault handling */
 #define AMDGPU_VM_FAULT_STOP_NEVER     0
 
        if (adev->asic_type >= CHIP_VEGA10) {
                if (level != AMDGPU_VM_PTB) {
                        /* Handle leaf PDEs as PTEs */
-                       flags |= AMDGPU_PDE_PTE;
+                       flags |= AMDGPU_PDE_PTE_FLAG(adev);
                        amdgpu_gmc_get_vm_pde(adev, level,
                                              &value, &flags);
                } else {
        struct amdgpu_device *adev = params->adev;
 
        if (level != AMDGPU_VM_PTB) {
-               flags |= AMDGPU_PDE_PTE;
+               flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
                amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
 
        } else if (adev->asic_type >= CHIP_VEGA10 &&
                   !(flags & AMDGPU_PTE_VALID) &&
-                  !(flags & AMDGPU_PTE_PRT)) {
+                  !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
 
                /* Workaround for fault priority problem on GMC9 */
                flags |= AMDGPU_PTE_EXECUTABLE;