From 861b27584d9055e4e1763341474ce8ce9dc6a55d Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Wed, 18 Dec 2024 11:48:32 -0500 Subject: [PATCH 01/16] drm/xe: Print vm flags in xe_vm trace print Print vm flags in xe_vm trace print. This is helpful to diagnosis the VM mode of operation. Signed-off-by: Oak Zeng Reviewed-by: Matthew Brost Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20241218164833.2364049-3-oak.zeng@intel.com Signed-off-by: Himal Prasad Ghimiray --- drivers/gpu/drm/xe/xe_trace_bo.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index 3d7e6c80b0aa..082fadb5f99b 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -190,16 +190,19 @@ DECLARE_EVENT_CLASS(xe_vm, __string(dev, __dev_name_vm(vm)) __field(struct xe_vm *, vm) __field(u32, asid) + __field(u32, flags) ), TP_fast_assign( __assign_str(dev); __entry->vm = vm; __entry->asid = vm->usm.asid; + __entry->flags = vm->flags; ), - TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev), - __entry->vm, __entry->asid) + TP_printk("dev=%s, vm=%p, asid=0x%05x, vm flags=0x%05x", + __get_str(dev), __entry->vm, __entry->asid, + __entry->flags) ); DEFINE_EVENT(xe_vm, xe_vm_kill, -- 2.51.0 From 22b1a53f282b1ad6692c6238a7446275854f0afb Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Wed, 18 Dec 2024 11:48:33 -0500 Subject: [PATCH 02/16] drm/xe: Print vm parameter in xe_vma trace Print the vm that the vma belongs to in the vma trace. This is useful to correlate VMA operations to the VM. Signed-off-by: Oak Zeng Reviewed-by: Matthew Brost Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20241218164833.2364049-4-oak.zeng@intel.com Signed-off-by: Himal Prasad Ghimiray --- drivers/gpu/drm/xe/xe_trace_bo.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index 082fadb5f99b..ccebd5f0878e 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -92,6 +92,7 @@ DECLARE_EVENT_CLASS(xe_vma, TP_STRUCT__entry( __string(dev, __dev_name_vma(vma)) __field(struct xe_vma *, vma) + __field(struct xe_vm *, vm) __field(u32, asid) __field(u64, start) __field(u64, end) @@ -101,14 +102,16 @@ DECLARE_EVENT_CLASS(xe_vma, TP_fast_assign( __assign_str(dev); __entry->vma = vma; + __entry->vm = xe_vma_vm(vma); __entry->asid = xe_vma_vm(vma)->usm.asid; __entry->start = xe_vma_start(vma); __entry->end = xe_vma_end(vma) - 1; __entry->ptr = xe_vma_userptr(vma); ), - TP_printk("dev=%s, vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,", - __get_str(dev), __entry->vma, __entry->asid, __entry->start, + TP_printk("dev=%s, vma=%p, vm=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx", + __get_str(dev), __entry->vma, __entry->vm, + __entry->asid, __entry->start, __entry->end, __entry->ptr) ) -- 2.51.0 From b824709ee1d0dbfed4b1757279c97fc0edad1e1a Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Mon, 13 Jan 2025 16:23:24 -0500 Subject: [PATCH 03/16] drm/xe: Fix a typo in xe_vm_doc.h s/vm->ttm.base.resv->lock/vm->gpuvm.r_obj->resv->lock Signed-off-by: Oak Zeng Reviewed-by: Maciej Patelczyk Link: https://patchwork.freedesktop.org/patch/msgid/20250113212324.3264218-1-oak.zeng@intel.com Signed-off-by: Himal Prasad Ghimiray --- drivers/gpu/drm/xe/xe_vm_doc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h index 078786958403..1030ce214032 100644 --- a/drivers/gpu/drm/xe/xe_vm_doc.h +++ b/drivers/gpu/drm/xe/xe_vm_doc.h @@ -431,7 +431,7 @@ * bind path also acquires this lock in write while the exec / compute mode * rebind worker acquires this lock in read mode. * - * VM dma-resv lock (vm->ttm.base.resv->lock) - WW lock. Protects VM dma-resv + * VM dma-resv lock (vm->gpuvm.r_obj->resv->lock) - WW lock. Protects VM dma-resv * slots which is shared with any private BO in the VM. Expected to be acquired * during VM binds, execs, and compute mode rebind worker. This lock is also * held when private BOs are being evicted. -- 2.51.0 From 474c4dd29f666145dee7b5dce56d024a26e9550c Mon Sep 17 00:00:00 2001 From: Francois Dugast Date: Thu, 16 Jan 2025 13:45:32 +0100 Subject: [PATCH 04/16] drm/xe: Add missing SPDX license identifiers Ensure all Xe driver files have a proper SPDX license identifier, add it in files where it was missing. Link: https://patchwork.freedesktop.org/patch/msgid/20250116124532.1480351-1-francois.dugast@intel.com Signed-off-by: Francois Dugast Reviewed-by: Lucas De Marchi --- drivers/gpu/drm/xe/Kconfig.profile | 1 + drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/xe/Kconfig.profile b/drivers/gpu/drm/xe/Kconfig.profile index ba17a25e8db3..7530df998148 100644 --- a/drivers/gpu/drm/xe/Kconfig.profile +++ b/drivers/gpu/drm/xe/Kconfig.profile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only config DRM_XE_JOB_TIMEOUT_MAX int "Default max job timeout (ms)" default 10000 # milliseconds diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h index 9c4cf050059a..41d39d67817a 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h @@ -1,3 +1,8 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + #ifndef _I915_GEM_STOLEN_H_ #define _I915_GEM_STOLEN_H_ -- 2.51.0 From bbd8429264baf8bc3c40cefda048560ae0eb7890 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Thu, 14 Nov 2024 18:59:54 +0100 Subject: [PATCH 05/16] drm/xe: Always setup GT MMIO adjustment data While we believed that xe_gt_mmio_init() will be called just once per GT, this might not be a case due to some tweaks that need to performed by the VF driver during early probe. To avoid leaving any stale data in case of the re-run, reset the GT MMIO adjustment data for the non-media GT case. Signed-off-by: Michal Wajdeczko Cc: Matt Roper Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20241114175955.2299-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 26e64530ada2..b5c313a3e946 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -643,6 +643,9 @@ void xe_gt_mmio_init(struct xe_gt *gt) if (gt->info.type == XE_GT_TYPE_MEDIA) { gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET; gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH; + } else { + gt->mmio.adj_offset = 0; + gt->mmio.adj_limit = 0; } if (IS_SRIOV_VF(gt_to_xe(gt))) -- 2.51.0 From 13265fe7426ec9ba5aa86baab913417ca361e8a4 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 14 Jan 2025 22:13:47 +0100 Subject: [PATCH 06/16] drm/xe/vf: Perform early GT MMIO initialization to read GMDID MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit VFs need to communicate with the GuC to obtain the GMDID value and existing GuC functions used for that assume that the GT has it's MMIO members already setup. However, due to recent refactoring the gt->mmio is initialized later, and any attempt by the VF to use xe_mmio_read|write() from GuC functions will lead to NPD crash due to unset MMIO register address: [] xe 0000:00:02.1: [drm] Running in SR-IOV VF mode [] xe 0000:00:02.1: [drm] GT0: sending H2G MMIO 0x5507 [] BUG: unable to handle page fault for address: 0000000000190240 Since we are already tweaking the id and type of the primary GT to mimic it's a Media GT before initializing the GuC communication, we can also call xe_gt_mmio_init() to perform early setup of the gt->mmio which will make those GuC functions work again. Signed-off-by: Michal Wajdeczko Cc: Matt Roper Cc: Piotr Piórkowski Reviewed-by: Piotr Piórkowski Link: https://patchwork.freedesktop.org/patch/msgid/20250114211347.1083-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 48d1c81d441e..bf35a18bf5e7 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -501,6 +501,7 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, gt->info.type = XE_GT_TYPE_MAIN; } + xe_gt_mmio_init(gt); xe_guc_comm_init_early(>->uc.guc); /* Don't bother with GMDID if failed to negotiate the GuC ABI */ -- 2.51.0 From 9cd3f4efc870463f17f6c29114c61fb6bfcaa291 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:41:54 +0100 Subject: [PATCH 07/16] drm/xe/sa: Always call drm_suballoc_manager_fini() After successful call to drm_suballoc_manager_init() we should make sure to call drm_suballoc_manager_fini() as it may include some cleanup code even if we didn't start using it for real. As we can abort init() early due to kvzalloc() failure, we should either explicitly call drm_suballoc_manager_fini() or, even better, postpone drm_suballoc_manager_init() once we finish all other preparation steps, so we can rely on fini() that will do cleanup. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_sa.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index e055bed7ae55..4e7aba445ebc 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -57,8 +57,6 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 } sa_manager->bo = bo; sa_manager->is_iomem = bo->vmap.is_iomem; - - drm_suballoc_manager_init(&sa_manager->base, managed_size, align); sa_manager->gpu_addr = xe_bo_ggtt_addr(bo); if (bo->vmap.is_iomem) { @@ -72,6 +70,7 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size); } + drm_suballoc_manager_init(&sa_manager->base, managed_size, align); ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini, sa_manager); if (ret) -- 2.51.0 From d29cddd49bed2c880e7c17724bcf3604e865c23a Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:41:55 +0100 Subject: [PATCH 08/16] drm/xe/sa: Drop redundant NULL assignments The sa_manager is drmm_kzalloc'ed so all members are already zero. And in case of kvzalloc() failure we are not returning pointer to the sa_manager at all, so no point in resetting .bo member. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_sa.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index 4e7aba445ebc..eb314ca75355 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -44,8 +44,6 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 if (!sa_manager) return ERR_PTR(-ENOMEM); - sa_manager->bo = NULL; - bo = xe_managed_bo_create_pin_map(xe, tile, size, XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT | @@ -61,10 +59,8 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 if (bo->vmap.is_iomem) { sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL); - if (!sa_manager->cpu_ptr) { - sa_manager->bo = NULL; + if (!sa_manager->cpu_ptr) return ERR_PTR(-ENOMEM); - } } else { sa_manager->cpu_ptr = bo->vmap.vaddr; memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size); -- 2.51.0 From 97ee0e351f6ebbcb2a2dccdff726f75f728fede8 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:41:56 +0100 Subject: [PATCH 09/16] drm/xe/sa: Improve error message on init failure Instead of raw errno value we can print friendly error code and also print size of the buffer object that we fail to prepare. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-4-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_sa.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index eb314ca75355..0b87599759d7 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -49,8 +49,8 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE); if (IS_ERR(bo)) { - drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n", - PTR_ERR(bo)); + drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n", + size / SZ_1K, bo); return ERR_CAST(bo); } sa_manager->bo = bo; -- 2.51.0 From 7e937cdf18164ea276ce0f4bbc5755e0031280e0 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:41:57 +0100 Subject: [PATCH 10/16] drm/xe/sa: Tidy up coding style in init() There is no need to use tile_to_xe() since we already got the xe. And we should keep all variable declarations together, no need for separate sa_manager declaration. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-5-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_sa.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index 0b87599759d7..0e78ae46667e 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -34,13 +34,12 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg) struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align) { struct xe_device *xe = tile_to_xe(tile); + struct xe_sa_manager *sa_manager; u32 managed_size = size - SZ_4K; struct xe_bo *bo; int ret; - struct xe_sa_manager *sa_manager = drmm_kzalloc(&tile_to_xe(tile)->drm, - sizeof(*sa_manager), - GFP_KERNEL); + sa_manager = drmm_kzalloc(&xe->drm, sizeof(*sa_manager), GFP_KERNEL); if (!sa_manager) return ERR_PTR(-ENOMEM); -- 2.51.0 From 0e1871f61e71d7611196b04d1b133f18fef666dd Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:41:58 +0100 Subject: [PATCH 11/16] drm/xe/sa: Allow making suballocations using custom gfp flags Actual xe_sa_manager implementation uses hardcoded GFP_KERNEL flag during creation of suballocations but in upcoming patch we want to reuse the xe_sa_manager in places where GFP_KERNEL is not allowed. Add another variant of the xe_sa_bo_new() function that accepts arbitrary gfp flags. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-6-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_sa.c | 15 ++++++++++++--- drivers/gpu/drm/xe/xe_sa.h | 19 +++++++++++++++++-- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index 0e78ae46667e..5f89e32b0640 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -74,8 +74,17 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 return sa_manager; } -struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, - unsigned int size) +/** + * __xe_sa_bo_new() - Make a suballocation but use custom gfp flags. + * @sa_manager: the &xe_sa_manager + * @size: number of bytes we want to suballocate + * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL. + * + * Try to make a suballocation of size @size. + * + * Return: a &drm_suballoc, or an ERR_PTR. + */ +struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp) { /* * BB to large, return -ENOBUFS indicating user should split @@ -84,7 +93,7 @@ struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, if (size > sa_manager->base.size) return ERR_PTR(-ENOBUFS); - return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0); + return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0); } void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo) diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h index 4e96483057d7..a0341eafbe77 100644 --- a/drivers/gpu/drm/xe/xe_sa.h +++ b/drivers/gpu/drm/xe/xe_sa.h @@ -5,6 +5,7 @@ #ifndef _XE_SA_H_ #define _XE_SA_H_ +#include #include "xe_sa_types.h" struct dma_fence; @@ -13,8 +14,22 @@ struct xe_tile; struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align); -struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, - u32 size); +struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp); + +/** + * xe_sa_bo_new() - Make a suballocation. + * @sa_manager: the &xe_sa_manager + * @size: number of bytes we want to suballocate + * + * Try to make a suballocation of size @size. + * + * Return: a &drm_suballoc, or an ERR_PTR. + */ +static inline struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size) +{ + return __xe_sa_bo_new(sa_manager, size, GFP_KERNEL); +} + void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo); void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence); -- 2.51.0 From ae8b507fb8bbea2aa30783184d5728b14ce40c8f Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:41:59 +0100 Subject: [PATCH 12/16] drm/xe/sa: Allow creating suballocator with custom guard size Actual xe_sa_manager implementation uses hardcoded 4K to exclude it from making suballocations but in upcoming patch we want to reuse the xe_sa_manager where such 4K guard is not needed. Add another variant of the xe_sa_bo_manager_init() function that accepts arbitrary guard size. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-7-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_sa.c | 18 ++++++++++++++++-- drivers/gpu/drm/xe/xe_sa.h | 9 +++++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index 5f89e32b0640..f8fe61e25518 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -31,14 +31,28 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg) sa_manager->bo = NULL; } -struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align) +/** + * __xe_sa_bo_manager_init() - Create and initialize the suballocator + * @tile: the &xe_tile where allocate + * @size: number of bytes to allocate + * @guard: number of bytes to exclude from suballocations + * @align: alignment for each suballocated chunk + * + * Prepares the suballocation manager for suballocations. + * + * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure. + */ +struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align) { struct xe_device *xe = tile_to_xe(tile); struct xe_sa_manager *sa_manager; - u32 managed_size = size - SZ_4K; + u32 managed_size; struct xe_bo *bo; int ret; + xe_tile_assert(tile, size > guard); + managed_size = size - guard; + sa_manager = drmm_kzalloc(&xe->drm, sizeof(*sa_manager), GFP_KERNEL); if (!sa_manager) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h index a0341eafbe77..de0330eb36d4 100644 --- a/drivers/gpu/drm/xe/xe_sa.h +++ b/drivers/gpu/drm/xe/xe_sa.h @@ -5,6 +5,7 @@ #ifndef _XE_SA_H_ #define _XE_SA_H_ +#include #include #include "xe_sa_types.h" @@ -12,10 +13,14 @@ struct dma_fence; struct xe_bo; struct xe_tile; -struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align); - +struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align); struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp); +static inline struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align) +{ + return __xe_sa_bo_manager_init(tile, size, SZ_4K, align); +} + /** * xe_sa_bo_new() - Make a suballocation. * @sa_manager: the &xe_sa_manager -- 2.51.0 From c49ca671818a325f2221f0bda8af96e339272a5e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:42:00 +0100 Subject: [PATCH 13/16] drm/xe/sa: Minor header cleanups Drop unused struct xe_bo forward declaration and, while around, fix unnecessary line split in xe_sa_bo_free() declaration. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-8-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_sa.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h index de0330eb36d4..1170ee5a81a8 100644 --- a/drivers/gpu/drm/xe/xe_sa.h +++ b/drivers/gpu/drm/xe/xe_sa.h @@ -10,7 +10,6 @@ #include "xe_sa_types.h" struct dma_fence; -struct xe_bo; struct xe_tile; struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align); @@ -36,8 +35,7 @@ static inline struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager } void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo); -void xe_sa_bo_free(struct drm_suballoc *sa_bo, - struct dma_fence *fence); +void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence); static inline struct xe_sa_manager * to_xe_sa_manager(struct drm_suballoc_manager *mng) -- 2.51.0 From 696bfdf273eab9ce3dd2ff51d26ca30f7924a4bb Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:42:01 +0100 Subject: [PATCH 14/16] drm/xe/guc: Introduce the GuC Buffer Cache The purpose of the GuC Buffer Cache is to maintain a set ofreusable buffers that could be used while sending some of the CTB H2G actions that require separate buffer with indirect data. Currently only few PF actions need this so initialize it only when running as a PF. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Cc: Rodrigo Vivi Acked-by: Rodrigo Vivi Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-9-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/Makefile | 1 + drivers/gpu/drm/xe/xe_guc.c | 5 + drivers/gpu/drm/xe/xe_guc_buf.c | 172 ++++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_guc_buf.h | 47 +++++++ drivers/gpu/drm/xe/xe_guc_buf_types.h | 28 +++++ drivers/gpu/drm/xe/xe_guc_types.h | 3 + 6 files changed, 256 insertions(+) create mode 100644 drivers/gpu/drm/xe/xe_guc_buf.c create mode 100644 drivers/gpu/drm/xe/xe_guc_buf.h create mode 100644 drivers/gpu/drm/xe/xe_guc_buf_types.h diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 7730e0596299..2e50a697f549 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -56,6 +56,7 @@ xe-y += xe_bb.o \ xe_gt_topology.o \ xe_guc.o \ xe_guc_ads.o \ + xe_guc_buf.o \ xe_guc_capture.o \ xe_guc_ct.o \ xe_guc_db_mgr.o \ diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 408365dfe4ee..1619c0a52db9 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -23,6 +23,7 @@ #include "xe_gt_sriov_vf.h" #include "xe_gt_throttle.h" #include "xe_guc_ads.h" +#include "xe_guc_buf.h" #include "xe_guc_capture.h" #include "xe_guc_ct.h" #include "xe_guc_db_mgr.h" @@ -743,6 +744,10 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc) if (ret) return ret; + ret = xe_guc_buf_cache_init(&guc->buf); + if (ret) + return ret; + return xe_guc_ads_init_post_hwconfig(&guc->ads); } diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c new file mode 100644 index 000000000000..261c7c74417f --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_buf.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include +#include + +#include "xe_assert.h" +#include "xe_bo.h" +#include "xe_gt_printk.h" +#include "xe_guc.h" +#include "xe_guc_buf.h" +#include "xe_sa.h" + +static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache) +{ + return container_of(cache, struct xe_guc, buf); +} + +static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache) +{ + return guc_to_gt(cache_to_guc(cache)); +} + +/** + * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache. + * @cache: the &xe_guc_buf_cache to initialize + * + * The Buffer Cache allows to obtain a reusable buffer that can be used to pass + * indirect H2G data to GuC without a need to create a ad-hoc allocation. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) +{ + struct xe_gt *gt = cache_to_gt(cache); + struct xe_sa_manager *sam; + + /* XXX: currently it's useful only for the PF actions */ + if (!IS_SRIOV_PF(gt_to_xe(gt))) + return 0; + + sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32)); + if (IS_ERR(sam)) + return PTR_ERR(sam); + cache->sam = sam; + + xe_gt_dbg(gt, "reusable buffer with %u dwords at %#x for %ps\n", + xe_guc_buf_cache_dwords(cache), xe_bo_ggtt_addr(sam->bo), + __builtin_return_address(0)); + return 0; +} + +/** + * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports. + * @cache: the &xe_guc_buf_cache to query + * + * Return: a size of the largest reusable buffer (in dwords) + */ +u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache) +{ + return cache->sam ? cache->sam->base.size / sizeof(u32) : 0; +} + +/** + * xe_guc_buf_reserve() - Reserve a new sub-allocation. + * @cache: the &xe_guc_buf_cache where reserve sub-allocation + * @dwords: the requested size of the buffer in dwords + * + * Use xe_guc_buf_is_valid() to check if returned buffer reference is valid. + * Must use xe_guc_buf_release() to release a sub-allocation. + * + * Return: a &xe_guc_buf of new sub-allocation. + */ +struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords) +{ + struct drm_suballoc *sa; + + if (cache->sam) + sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(32), GFP_ATOMIC); + else + sa = ERR_PTR(-EOPNOTSUPP); + + return (struct xe_guc_buf){ .sa = sa }; +} + +/** + * xe_guc_buf_from_data() - Reserve a new sub-allocation using data. + * @cache: the &xe_guc_buf_cache where reserve sub-allocation + * @data: the data to flush the sub-allocation + * @size: the size of the data + * + * Similar to xe_guc_buf_reserve() but flushes @data to the GPU memory. + * + * Return: a &xe_guc_buf of new sub-allocation. + */ +struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache, + const void *data, size_t size) +{ + struct drm_suballoc *sa; + + sa = __xe_sa_bo_new(cache->sam, size, GFP_ATOMIC); + if (!IS_ERR(sa)) + memcpy(xe_sa_bo_cpu_addr(sa), data, size); + + return (struct xe_guc_buf){ .sa = sa }; +} + +/** + * xe_guc_buf_release() - Release a sub-allocation. + * @buf: the &xe_guc_buf to release + * + * Releases a sub-allocation reserved by the xe_guc_buf_reserve(). + */ +void xe_guc_buf_release(const struct xe_guc_buf buf) +{ + if (xe_guc_buf_is_valid(buf)) + xe_sa_bo_free(buf.sa, NULL); +} + +/** + * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory. + * @buf: the &xe_guc_buf to flush + * + * Return: a GPU address of the sub-allocation. + */ +u64 xe_guc_buf_flush(const struct xe_guc_buf buf) +{ + xe_sa_bo_flush_write(buf.sa); + return xe_sa_bo_gpu_addr(buf.sa); +} + +/** + * xe_guc_buf_cpu_ptr() - Obtain a CPU pointer to the sub-allocation. + * @buf: the &xe_guc_buf to query + * + * Return: a CPU pointer of the sub-allocation. + */ +void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf) +{ + return xe_sa_bo_cpu_addr(buf.sa); +} + +/** + * xe_guc_buf_gpu_addr() - Obtain a GPU address of the sub-allocation. + * @buf: the &xe_guc_buf to query + * + * Return: a GPU address of the sub-allocation. + */ +u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf) +{ + return xe_sa_bo_gpu_addr(buf.sa); +} + +/** + * xe_guc_cache_gpu_addr_from_ptr() - Lookup a GPU address using the pointer. + * @cache: the &xe_guc_buf_cache with sub-allocations + * @ptr: the CPU pointer of the sub-allocation + * @size: the size of the data + * + * Return: a GPU address on success or 0 if the pointer was unrelated. + */ +u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size) +{ + ptrdiff_t offset = ptr - cache->sam->cpu_ptr; + + if (offset < 0 || offset + size > cache->sam->base.size) + return 0; + + return cache->sam->gpu_addr + offset; +} diff --git a/drivers/gpu/drm/xe/xe_guc_buf.h b/drivers/gpu/drm/xe/xe_guc_buf.h new file mode 100644 index 000000000000..0d67604d96bd --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_buf.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GUC_BUF_H_ +#define _XE_GUC_BUF_H_ + +#include +#include + +#include "xe_guc_buf_types.h" + +int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache); +u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache); +struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords); +struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache, + const void *data, size_t size); +void xe_guc_buf_release(const struct xe_guc_buf buf); + +/** + * xe_guc_buf_is_valid() - Check if a buffer reference is valid. + * @buf: the &xe_guc_buf reference to check + * + * Return: true if @ref represents a valid sub-allication. + */ +static inline bool xe_guc_buf_is_valid(const struct xe_guc_buf buf) +{ + return !IS_ERR_OR_NULL(buf.sa); +} + +void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf); +u64 xe_guc_buf_flush(const struct xe_guc_buf buf); +u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf); +u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size); + +DEFINE_CLASS(xe_guc_buf, struct xe_guc_buf, + xe_guc_buf_release(_T), + xe_guc_buf_reserve(cache, num), + struct xe_guc_buf_cache *cache, u32 num); + +DEFINE_CLASS(xe_guc_buf_from_data, struct xe_guc_buf, + xe_guc_buf_release(_T), + xe_guc_buf_from_data(cache, data, size), + struct xe_guc_buf_cache *cache, const void *data, size_t size); + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_buf_types.h b/drivers/gpu/drm/xe/xe_guc_buf_types.h new file mode 100644 index 000000000000..9e123d71c064 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_buf_types.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GUC_BUF_TYPES_H_ +#define _XE_GUC_BUF_TYPES_H_ + +struct drm_suballoc; +struct xe_sa_manager; + +/** + * struct xe_guc_buf_cache - GuC Data Buffer Cache. + */ +struct xe_guc_buf_cache { + /* private: internal sub-allocation manager */ + struct xe_sa_manager *sam; +}; + +/** + * struct xe_guc_buf - GuC Data Buffer Reference. + */ +struct xe_guc_buf { + /* private: internal sub-allocation reference */ + struct drm_suballoc *sa; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index 83a41ebcdc91..573aa6308380 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -11,6 +11,7 @@ #include "regs/xe_reg_defs.h" #include "xe_guc_ads_types.h" +#include "xe_guc_buf_types.h" #include "xe_guc_ct_types.h" #include "xe_guc_fwif.h" #include "xe_guc_log_types.h" @@ -58,6 +59,8 @@ struct xe_guc { struct xe_guc_ads ads; /** @ct: GuC ct */ struct xe_guc_ct ct; + /** @buf: GuC Buffer Cache manager */ + struct xe_guc_buf_cache buf; /** @capture: the error-state-capture module's data and objects */ struct xe_guc_state_capture *capture; /** @pc: GuC Power Conservation */ -- 2.51.0 From d8b2149ba8f184cd138b482289b16d8558787e99 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:42:02 +0100 Subject: [PATCH 15/16] drm/xe/pf: Use GuC Buffer Cache during VFs provisioning Start using GuC buffer cache for the VF's configuration actions. Signed-off-by: Michal Wajdeczko Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-10-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 68 +++++++++++----------- 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 878e96281c03..9db1c920219d 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -20,6 +20,7 @@ #include "xe_gt_sriov_pf_policy.h" #include "xe_gt_sriov_printk.h" #include "xe_guc.h" +#include "xe_guc_buf.h" #include "xe_guc_ct.h" #include "xe_guc_db_mgr.h" #include "xe_guc_fwif.h" @@ -71,48 +72,27 @@ static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid) * Return: number of KLVs that were successfully parsed and saved, * negative error code on failure. */ -static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords) +static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords) { - const u32 bytes = num_dwords * sizeof(u32); - struct xe_tile *tile = gt_to_tile(gt); - struct xe_device *xe = tile_to_xe(tile); struct xe_guc *guc = >->uc.guc; - struct xe_bo *bo; - int ret; - bo = xe_bo_create_pin_map(xe, tile, NULL, - ALIGN(bytes, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); - if (IS_ERR(bo)) - return PTR_ERR(bo); - - xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes); - - ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords); - - xe_bo_unpin_map_no_vm(bo); - - return ret; + return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords); } /* * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed, * negative error code on failure. */ -static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, - const u32 *klvs, u32 num_dwords) +static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, + struct xe_guc_buf buf, u32 num_dwords) { int ret; - xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); - - ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords); + ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords); if (ret != num_klvs) { int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO; + void *klvs = xe_guc_buf_cpu_ptr(buf); struct drm_printer p = xe_gt_info_printer(gt); char name[8]; @@ -125,13 +105,35 @@ static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { struct drm_printer p = xe_gt_info_printer(gt); + void *klvs = xe_guc_buf_cpu_ptr(buf); + char name[8]; + xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n", + xe_sriov_function_name(vfid, name, sizeof(name)), + num_klvs, str_plural(num_klvs)); xe_guc_klv_print(klvs, num_dwords, &p); } return 0; } +/* + * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data, + * negative error code on failure. + */ +static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, + const u32 *klvs, u32 num_dwords) +{ + CLASS(xe_guc_buf_from_data, buf)(>->uc.guc.buf, klvs, num_dwords * sizeof(u32)); + + xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); + + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; + + return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords); +} + static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value) { u32 klv[] = { @@ -304,16 +306,17 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); - u32 max_cfg_dwords = SZ_4K / sizeof(u32); + u32 max_cfg_dwords = xe_guc_buf_cache_dwords(>->uc.guc.buf); + CLASS(xe_guc_buf, buf)(>->uc.guc.buf, max_cfg_dwords); u32 num_dwords; int num_klvs; u32 *cfg; int err; - cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL); - if (!cfg) - return -ENOMEM; + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; + cfg = xe_guc_buf_cpu_ptr(buf); num_dwords = encode_config(cfg, config, true); xe_gt_assert(gt, num_dwords <= max_cfg_dwords); @@ -330,9 +333,8 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) xe_gt_assert(gt, num_dwords <= max_cfg_dwords); num_klvs = xe_guc_klv_count(cfg, num_dwords); - err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords); + err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords); - kfree(cfg); return err; } -- 2.51.0 From f90b552dcbb4e142f2a15d2b4458ea601248b8e8 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:42:03 +0100 Subject: [PATCH 16/16] drm/xe/kunit: Allow to replace xe_managed_bo_create_pin_map() MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit We want to use replacement functions in upcoming kunit tests. Signed-off-by: Michal Wajdeczko Reviewed-by: Michał Winiarski Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-11-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_bo.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index cf0dc9e9c53e..c32201123d44 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -15,6 +15,8 @@ #include #include +#include + #include "xe_device.h" #include "xe_dma_buf.h" #include "xe_drm_client.h" @@ -1797,6 +1799,8 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile struct xe_bo *bo; int ret; + KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags); + bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); if (IS_ERR(bo)) return bo; -- 2.51.0