From ae8b507fb8bbea2aa30783184d5728b14ce40c8f Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:41:59 +0100 Subject: [PATCH 01/16] drm/xe/sa: Allow creating suballocator with custom guard size Actual xe_sa_manager implementation uses hardcoded 4K to exclude it from making suballocations but in upcoming patch we want to reuse the xe_sa_manager where such 4K guard is not needed. Add another variant of the xe_sa_bo_manager_init() function that accepts arbitrary guard size. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-7-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_sa.c | 18 ++++++++++++++++-- drivers/gpu/drm/xe/xe_sa.h | 9 +++++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c index 5f89e32b0640..f8fe61e25518 100644 --- a/drivers/gpu/drm/xe/xe_sa.c +++ b/drivers/gpu/drm/xe/xe_sa.c @@ -31,14 +31,28 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg) sa_manager->bo = NULL; } -struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align) +/** + * __xe_sa_bo_manager_init() - Create and initialize the suballocator + * @tile: the &xe_tile where allocate + * @size: number of bytes to allocate + * @guard: number of bytes to exclude from suballocations + * @align: alignment for each suballocated chunk + * + * Prepares the suballocation manager for suballocations. + * + * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure. + */ +struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align) { struct xe_device *xe = tile_to_xe(tile); struct xe_sa_manager *sa_manager; - u32 managed_size = size - SZ_4K; + u32 managed_size; struct xe_bo *bo; int ret; + xe_tile_assert(tile, size > guard); + managed_size = size - guard; + sa_manager = drmm_kzalloc(&xe->drm, sizeof(*sa_manager), GFP_KERNEL); if (!sa_manager) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h index a0341eafbe77..de0330eb36d4 100644 --- a/drivers/gpu/drm/xe/xe_sa.h +++ b/drivers/gpu/drm/xe/xe_sa.h @@ -5,6 +5,7 @@ #ifndef _XE_SA_H_ #define _XE_SA_H_ +#include #include #include "xe_sa_types.h" @@ -12,10 +13,14 @@ struct dma_fence; struct xe_bo; struct xe_tile; -struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align); - +struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align); struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp); +static inline struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align) +{ + return __xe_sa_bo_manager_init(tile, size, SZ_4K, align); +} + /** * xe_sa_bo_new() - Make a suballocation. * @sa_manager: the &xe_sa_manager -- 2.51.0 From c49ca671818a325f2221f0bda8af96e339272a5e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:42:00 +0100 Subject: [PATCH 02/16] drm/xe/sa: Minor header cleanups Drop unused struct xe_bo forward declaration and, while around, fix unnecessary line split in xe_sa_bo_free() declaration. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-8-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_sa.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h index de0330eb36d4..1170ee5a81a8 100644 --- a/drivers/gpu/drm/xe/xe_sa.h +++ b/drivers/gpu/drm/xe/xe_sa.h @@ -10,7 +10,6 @@ #include "xe_sa_types.h" struct dma_fence; -struct xe_bo; struct xe_tile; struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align); @@ -36,8 +35,7 @@ static inline struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager } void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo); -void xe_sa_bo_free(struct drm_suballoc *sa_bo, - struct dma_fence *fence); +void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence); static inline struct xe_sa_manager * to_xe_sa_manager(struct drm_suballoc_manager *mng) -- 2.51.0 From 696bfdf273eab9ce3dd2ff51d26ca30f7924a4bb Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:42:01 +0100 Subject: [PATCH 03/16] drm/xe/guc: Introduce the GuC Buffer Cache The purpose of the GuC Buffer Cache is to maintain a set ofreusable buffers that could be used while sending some of the CTB H2G actions that require separate buffer with indirect data. Currently only few PF actions need this so initialize it only when running as a PF. Signed-off-by: Michal Wajdeczko Cc: Matthew Brost Cc: Rodrigo Vivi Acked-by: Rodrigo Vivi Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-9-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/Makefile | 1 + drivers/gpu/drm/xe/xe_guc.c | 5 + drivers/gpu/drm/xe/xe_guc_buf.c | 172 ++++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_guc_buf.h | 47 +++++++ drivers/gpu/drm/xe/xe_guc_buf_types.h | 28 +++++ drivers/gpu/drm/xe/xe_guc_types.h | 3 + 6 files changed, 256 insertions(+) create mode 100644 drivers/gpu/drm/xe/xe_guc_buf.c create mode 100644 drivers/gpu/drm/xe/xe_guc_buf.h create mode 100644 drivers/gpu/drm/xe/xe_guc_buf_types.h diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 7730e0596299..2e50a697f549 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -56,6 +56,7 @@ xe-y += xe_bb.o \ xe_gt_topology.o \ xe_guc.o \ xe_guc_ads.o \ + xe_guc_buf.o \ xe_guc_capture.o \ xe_guc_ct.o \ xe_guc_db_mgr.o \ diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 408365dfe4ee..1619c0a52db9 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -23,6 +23,7 @@ #include "xe_gt_sriov_vf.h" #include "xe_gt_throttle.h" #include "xe_guc_ads.h" +#include "xe_guc_buf.h" #include "xe_guc_capture.h" #include "xe_guc_ct.h" #include "xe_guc_db_mgr.h" @@ -743,6 +744,10 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc) if (ret) return ret; + ret = xe_guc_buf_cache_init(&guc->buf); + if (ret) + return ret; + return xe_guc_ads_init_post_hwconfig(&guc->ads); } diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c new file mode 100644 index 000000000000..261c7c74417f --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_buf.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include +#include + +#include "xe_assert.h" +#include "xe_bo.h" +#include "xe_gt_printk.h" +#include "xe_guc.h" +#include "xe_guc_buf.h" +#include "xe_sa.h" + +static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache) +{ + return container_of(cache, struct xe_guc, buf); +} + +static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache) +{ + return guc_to_gt(cache_to_guc(cache)); +} + +/** + * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache. + * @cache: the &xe_guc_buf_cache to initialize + * + * The Buffer Cache allows to obtain a reusable buffer that can be used to pass + * indirect H2G data to GuC without a need to create a ad-hoc allocation. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache) +{ + struct xe_gt *gt = cache_to_gt(cache); + struct xe_sa_manager *sam; + + /* XXX: currently it's useful only for the PF actions */ + if (!IS_SRIOV_PF(gt_to_xe(gt))) + return 0; + + sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32)); + if (IS_ERR(sam)) + return PTR_ERR(sam); + cache->sam = sam; + + xe_gt_dbg(gt, "reusable buffer with %u dwords at %#x for %ps\n", + xe_guc_buf_cache_dwords(cache), xe_bo_ggtt_addr(sam->bo), + __builtin_return_address(0)); + return 0; +} + +/** + * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports. + * @cache: the &xe_guc_buf_cache to query + * + * Return: a size of the largest reusable buffer (in dwords) + */ +u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache) +{ + return cache->sam ? cache->sam->base.size / sizeof(u32) : 0; +} + +/** + * xe_guc_buf_reserve() - Reserve a new sub-allocation. + * @cache: the &xe_guc_buf_cache where reserve sub-allocation + * @dwords: the requested size of the buffer in dwords + * + * Use xe_guc_buf_is_valid() to check if returned buffer reference is valid. + * Must use xe_guc_buf_release() to release a sub-allocation. + * + * Return: a &xe_guc_buf of new sub-allocation. + */ +struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords) +{ + struct drm_suballoc *sa; + + if (cache->sam) + sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(32), GFP_ATOMIC); + else + sa = ERR_PTR(-EOPNOTSUPP); + + return (struct xe_guc_buf){ .sa = sa }; +} + +/** + * xe_guc_buf_from_data() - Reserve a new sub-allocation using data. + * @cache: the &xe_guc_buf_cache where reserve sub-allocation + * @data: the data to flush the sub-allocation + * @size: the size of the data + * + * Similar to xe_guc_buf_reserve() but flushes @data to the GPU memory. + * + * Return: a &xe_guc_buf of new sub-allocation. + */ +struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache, + const void *data, size_t size) +{ + struct drm_suballoc *sa; + + sa = __xe_sa_bo_new(cache->sam, size, GFP_ATOMIC); + if (!IS_ERR(sa)) + memcpy(xe_sa_bo_cpu_addr(sa), data, size); + + return (struct xe_guc_buf){ .sa = sa }; +} + +/** + * xe_guc_buf_release() - Release a sub-allocation. + * @buf: the &xe_guc_buf to release + * + * Releases a sub-allocation reserved by the xe_guc_buf_reserve(). + */ +void xe_guc_buf_release(const struct xe_guc_buf buf) +{ + if (xe_guc_buf_is_valid(buf)) + xe_sa_bo_free(buf.sa, NULL); +} + +/** + * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory. + * @buf: the &xe_guc_buf to flush + * + * Return: a GPU address of the sub-allocation. + */ +u64 xe_guc_buf_flush(const struct xe_guc_buf buf) +{ + xe_sa_bo_flush_write(buf.sa); + return xe_sa_bo_gpu_addr(buf.sa); +} + +/** + * xe_guc_buf_cpu_ptr() - Obtain a CPU pointer to the sub-allocation. + * @buf: the &xe_guc_buf to query + * + * Return: a CPU pointer of the sub-allocation. + */ +void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf) +{ + return xe_sa_bo_cpu_addr(buf.sa); +} + +/** + * xe_guc_buf_gpu_addr() - Obtain a GPU address of the sub-allocation. + * @buf: the &xe_guc_buf to query + * + * Return: a GPU address of the sub-allocation. + */ +u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf) +{ + return xe_sa_bo_gpu_addr(buf.sa); +} + +/** + * xe_guc_cache_gpu_addr_from_ptr() - Lookup a GPU address using the pointer. + * @cache: the &xe_guc_buf_cache with sub-allocations + * @ptr: the CPU pointer of the sub-allocation + * @size: the size of the data + * + * Return: a GPU address on success or 0 if the pointer was unrelated. + */ +u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size) +{ + ptrdiff_t offset = ptr - cache->sam->cpu_ptr; + + if (offset < 0 || offset + size > cache->sam->base.size) + return 0; + + return cache->sam->gpu_addr + offset; +} diff --git a/drivers/gpu/drm/xe/xe_guc_buf.h b/drivers/gpu/drm/xe/xe_guc_buf.h new file mode 100644 index 000000000000..0d67604d96bd --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_buf.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GUC_BUF_H_ +#define _XE_GUC_BUF_H_ + +#include +#include + +#include "xe_guc_buf_types.h" + +int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache); +u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache); +struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords); +struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache, + const void *data, size_t size); +void xe_guc_buf_release(const struct xe_guc_buf buf); + +/** + * xe_guc_buf_is_valid() - Check if a buffer reference is valid. + * @buf: the &xe_guc_buf reference to check + * + * Return: true if @ref represents a valid sub-allication. + */ +static inline bool xe_guc_buf_is_valid(const struct xe_guc_buf buf) +{ + return !IS_ERR_OR_NULL(buf.sa); +} + +void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf); +u64 xe_guc_buf_flush(const struct xe_guc_buf buf); +u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf); +u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size); + +DEFINE_CLASS(xe_guc_buf, struct xe_guc_buf, + xe_guc_buf_release(_T), + xe_guc_buf_reserve(cache, num), + struct xe_guc_buf_cache *cache, u32 num); + +DEFINE_CLASS(xe_guc_buf_from_data, struct xe_guc_buf, + xe_guc_buf_release(_T), + xe_guc_buf_from_data(cache, data, size), + struct xe_guc_buf_cache *cache, const void *data, size_t size); + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_buf_types.h b/drivers/gpu/drm/xe/xe_guc_buf_types.h new file mode 100644 index 000000000000..9e123d71c064 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_guc_buf_types.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2024 Intel Corporation + */ + +#ifndef _XE_GUC_BUF_TYPES_H_ +#define _XE_GUC_BUF_TYPES_H_ + +struct drm_suballoc; +struct xe_sa_manager; + +/** + * struct xe_guc_buf_cache - GuC Data Buffer Cache. + */ +struct xe_guc_buf_cache { + /* private: internal sub-allocation manager */ + struct xe_sa_manager *sam; +}; + +/** + * struct xe_guc_buf - GuC Data Buffer Reference. + */ +struct xe_guc_buf { + /* private: internal sub-allocation reference */ + struct drm_suballoc *sa; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index 83a41ebcdc91..573aa6308380 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -11,6 +11,7 @@ #include "regs/xe_reg_defs.h" #include "xe_guc_ads_types.h" +#include "xe_guc_buf_types.h" #include "xe_guc_ct_types.h" #include "xe_guc_fwif.h" #include "xe_guc_log_types.h" @@ -58,6 +59,8 @@ struct xe_guc { struct xe_guc_ads ads; /** @ct: GuC ct */ struct xe_guc_ct ct; + /** @buf: GuC Buffer Cache manager */ + struct xe_guc_buf_cache buf; /** @capture: the error-state-capture module's data and objects */ struct xe_guc_state_capture *capture; /** @pc: GuC Power Conservation */ -- 2.51.0 From d8b2149ba8f184cd138b482289b16d8558787e99 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:42:02 +0100 Subject: [PATCH 04/16] drm/xe/pf: Use GuC Buffer Cache during VFs provisioning Start using GuC buffer cache for the VF's configuration actions. Signed-off-by: Michal Wajdeczko Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-10-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 68 +++++++++++----------- 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 878e96281c03..9db1c920219d 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -20,6 +20,7 @@ #include "xe_gt_sriov_pf_policy.h" #include "xe_gt_sriov_printk.h" #include "xe_guc.h" +#include "xe_guc_buf.h" #include "xe_guc_ct.h" #include "xe_guc_db_mgr.h" #include "xe_guc_fwif.h" @@ -71,48 +72,27 @@ static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid) * Return: number of KLVs that were successfully parsed and saved, * negative error code on failure. */ -static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords) +static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords) { - const u32 bytes = num_dwords * sizeof(u32); - struct xe_tile *tile = gt_to_tile(gt); - struct xe_device *xe = tile_to_xe(tile); struct xe_guc *guc = >->uc.guc; - struct xe_bo *bo; - int ret; - bo = xe_bo_create_pin_map(xe, tile, NULL, - ALIGN(bytes, PAGE_SIZE), - ttm_bo_type_kernel, - XE_BO_FLAG_VRAM_IF_DGFX(tile) | - XE_BO_FLAG_GGTT | - XE_BO_FLAG_GGTT_INVALIDATE); - if (IS_ERR(bo)) - return PTR_ERR(bo); - - xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes); - - ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords); - - xe_bo_unpin_map_no_vm(bo); - - return ret; + return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords); } /* * Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed, * negative error code on failure. */ -static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, - const u32 *klvs, u32 num_dwords) +static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, + struct xe_guc_buf buf, u32 num_dwords) { int ret; - xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); - - ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords); + ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords); if (ret != num_klvs) { int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO; + void *klvs = xe_guc_buf_cpu_ptr(buf); struct drm_printer p = xe_gt_info_printer(gt); char name[8]; @@ -125,13 +105,35 @@ static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { struct drm_printer p = xe_gt_info_printer(gt); + void *klvs = xe_guc_buf_cpu_ptr(buf); + char name[8]; + xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n", + xe_sriov_function_name(vfid, name, sizeof(name)), + num_klvs, str_plural(num_klvs)); xe_guc_klv_print(klvs, num_dwords, &p); } return 0; } +/* + * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data, + * negative error code on failure. + */ +static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs, + const u32 *klvs, u32 num_dwords) +{ + CLASS(xe_guc_buf_from_data, buf)(>->uc.guc.buf, klvs, num_dwords * sizeof(u32)); + + xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords)); + + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; + + return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords); +} + static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value) { u32 klv[] = { @@ -304,16 +306,17 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) { struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); - u32 max_cfg_dwords = SZ_4K / sizeof(u32); + u32 max_cfg_dwords = xe_guc_buf_cache_dwords(>->uc.guc.buf); + CLASS(xe_guc_buf, buf)(>->uc.guc.buf, max_cfg_dwords); u32 num_dwords; int num_klvs; u32 *cfg; int err; - cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL); - if (!cfg) - return -ENOMEM; + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; + cfg = xe_guc_buf_cpu_ptr(buf); num_dwords = encode_config(cfg, config, true); xe_gt_assert(gt, num_dwords <= max_cfg_dwords); @@ -330,9 +333,8 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) xe_gt_assert(gt, num_dwords <= max_cfg_dwords); num_klvs = xe_guc_klv_count(cfg, num_dwords); - err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords); + err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords); - kfree(cfg); return err; } -- 2.51.0 From f90b552dcbb4e142f2a15d2b4458ea601248b8e8 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 20 Dec 2024 20:42:03 +0100 Subject: [PATCH 05/16] drm/xe/kunit: Allow to replace xe_managed_bo_create_pin_map() MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit We want to use replacement functions in upcoming kunit tests. Signed-off-by: Michal Wajdeczko Reviewed-by: Michał Winiarski Link: https://patchwork.freedesktop.org/patch/msgid/20241220194205.995-11-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_bo.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index cf0dc9e9c53e..c32201123d44 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -15,6 +15,8 @@ #include #include +#include + #include "xe_device.h" #include "xe_dma_buf.h" #include "xe_drm_client.h" @@ -1797,6 +1799,8 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile struct xe_bo *bo; int ret; + KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags); + bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); if (IS_ERR(bo)) return bo; -- 2.51.0 From 238f96315ada9e2183b04df90c9714b1da68455c Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 14 Jan 2025 20:21:40 +0100 Subject: [PATCH 06/16] drm/xe/kunit: Add KUnit tests for GuC Buffer Cache MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Add tests to make sure that recently added GuC Buffer Cache component is working as expected. Signed-off-by: Michal Wajdeczko Reviewed-by: Michał Winiarski Link: https://patchwork.freedesktop.org/patch/msgid/20250114192140.1039-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c | 334 ++++++++++++++++++++ drivers/gpu/drm/xe/xe_guc_buf.c | 4 + 2 files changed, 338 insertions(+) create mode 100644 drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c new file mode 100644 index 000000000000..6faffcd74869 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2024 Intel Corporation + */ + +#include +#include +#include + +#include "xe_device.h" +#include "xe_ggtt.h" +#include "xe_guc_ct.h" +#include "xe_kunit_helpers.h" +#include "xe_pci_test.h" + +#define DUT_GGTT_START SZ_1M +#define DUT_GGTT_SIZE SZ_2M + +static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *xe, + struct xe_tile *tile, + size_t size, u32 flags) +{ + struct kunit *test = kunit_get_current_test(); + struct xe_bo *bo; + void *buf; + + bo = drmm_kzalloc(&xe->drm, sizeof(*bo), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo); + + buf = drmm_kzalloc(&xe->drm, size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); + + bo->tile = tile; + bo->ttm.bdev = &xe->ttm; + bo->size = size; + iosys_map_set_vaddr(&bo->vmap, buf); + + if (flags & XE_BO_FLAG_GGTT) { + struct xe_ggtt *ggtt = tile->mem.ggtt; + + bo->ggtt_node[tile->id] = xe_ggtt_node_init(ggtt); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]); + + KUNIT_ASSERT_EQ(test, 0, + drm_mm_insert_node_in_range(&ggtt->mm, + &bo->ggtt_node[tile->id]->base, + bo->size, SZ_4K, + 0, 0, U64_MAX, 0)); + } + + return bo; +} + +static int guc_buf_test_init(struct kunit *test) +{ + struct xe_pci_fake_data fake = { + .sriov_mode = XE_SRIOV_MODE_PF, + .platform = XE_TIGERLAKE, /* some random platform */ + .subplatform = XE_SUBPLATFORM_NONE, + }; + struct xe_ggtt *ggtt; + struct xe_guc *guc; + + test->priv = &fake; + xe_kunit_helper_xe_device_test_init(test); + + ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt; + guc = &xe_device_get_gt(test->priv, 0)->uc.guc; + + drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE); + mutex_init(&ggtt->lock); + + kunit_activate_static_stub(test, xe_managed_bo_create_pin_map, + replacement_xe_managed_bo_create_pin_map); + + KUNIT_ASSERT_EQ(test, 0, xe_guc_buf_cache_init(&guc->buf)); + + test->priv = &guc->buf; + return 0; +} + +static void test_smallest(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf buf; + + buf = xe_guc_buf_reserve(cache, 1); + KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf)); + KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf)); + KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf)); + KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf)); + KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf)); + xe_guc_buf_release(buf); +} + +static void test_largest(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf buf; + + buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache)); + KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf)); + KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf)); + KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf)); + KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf)); + KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf)); + xe_guc_buf_release(buf); +} + +static void test_granular(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf *bufs; + int n, dwords; + + dwords = xe_guc_buf_cache_dwords(cache); + bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL); + KUNIT_EXPECT_NOT_NULL(test, bufs); + + for (n = 0; n < dwords; n++) + bufs[n] = xe_guc_buf_reserve(cache, 1); + + for (n = 0; n < dwords; n++) + KUNIT_EXPECT_TRUE_MSG(test, xe_guc_buf_is_valid(bufs[n]), "n=%d", n); + + for (n = 0; n < dwords; n++) + xe_guc_buf_release(bufs[n]); +} + +static void test_unique(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf *bufs; + int n, m, dwords; + + dwords = xe_guc_buf_cache_dwords(cache); + bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL); + KUNIT_EXPECT_NOT_NULL(test, bufs); + + for (n = 0; n < dwords; n++) + bufs[n] = xe_guc_buf_reserve(cache, 1); + + for (n = 0; n < dwords; n++) { + for (m = n + 1; m < dwords; m++) { + KUNIT_EXPECT_PTR_NE_MSG(test, xe_guc_buf_cpu_ptr(bufs[n]), + xe_guc_buf_cpu_ptr(bufs[m]), "n=%d, m=%d", n, m); + KUNIT_ASSERT_NE_MSG(test, xe_guc_buf_gpu_addr(bufs[n]), + xe_guc_buf_gpu_addr(bufs[m]), "n=%d, m=%d", n, m); + } + } + + for (n = 0; n < dwords; n++) + xe_guc_buf_release(bufs[n]); +} + +static void test_overlap(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf b1, b2; + u32 dwords = xe_guc_buf_cache_dwords(cache) / 2; + u32 bytes = dwords * sizeof(u32); + void *p1, *p2; + u64 a1, a2; + + b1 = xe_guc_buf_reserve(cache, dwords); + b2 = xe_guc_buf_reserve(cache, dwords); + + p1 = xe_guc_buf_cpu_ptr(b1); + p2 = xe_guc_buf_cpu_ptr(b2); + + a1 = xe_guc_buf_gpu_addr(b1); + a2 = xe_guc_buf_gpu_addr(b2); + + KUNIT_EXPECT_PTR_NE(test, p1, p2); + if (p1 < p2) + KUNIT_EXPECT_LT(test, (uintptr_t)(p1 + bytes - 1), (uintptr_t)p2); + else + KUNIT_EXPECT_LT(test, (uintptr_t)(p2 + bytes - 1), (uintptr_t)p1); + + KUNIT_EXPECT_NE(test, a1, a2); + if (a1 < a2) + KUNIT_EXPECT_LT(test, a1 + bytes - 1, a2); + else + KUNIT_EXPECT_LT(test, a2 + bytes - 1, a1); + + xe_guc_buf_release(b1); + xe_guc_buf_release(b2); +} + +static void test_reusable(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf b1, b2; + void *p1; + u64 a1; + + b1 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache)); + KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(b1)); + KUNIT_EXPECT_NOT_NULL(test, p1 = xe_guc_buf_cpu_ptr(b1)); + KUNIT_EXPECT_NE(test, 0, a1 = xe_guc_buf_gpu_addr(b1)); + xe_guc_buf_release(b1); + + b2 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache)); + KUNIT_EXPECT_PTR_EQ(test, p1, xe_guc_buf_cpu_ptr(b2)); + KUNIT_EXPECT_EQ(test, a1, xe_guc_buf_gpu_addr(b2)); + xe_guc_buf_release(b2); +} + +static void test_too_big(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf buf; + + buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache) + 1); + KUNIT_EXPECT_FALSE(test, xe_guc_buf_is_valid(buf)); + xe_guc_buf_release(buf); /* shouldn't crash */ +} + +static void test_flush(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf buf; + const u32 dwords = xe_guc_buf_cache_dwords(cache); + const u32 bytes = dwords * sizeof(u32); + u32 *s, *p, *d; + int n; + + KUNIT_ASSERT_NOT_NULL(test, s = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL)); + KUNIT_ASSERT_NOT_NULL(test, d = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL)); + + for (n = 0; n < dwords; n++) + s[n] = n; + + buf = xe_guc_buf_reserve(cache, dwords); + KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf)); + KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf)); + KUNIT_EXPECT_PTR_NE(test, p, s); + KUNIT_EXPECT_PTR_NE(test, p, d); + + memcpy(p, s, bytes); + KUNIT_EXPECT_NE(test, 0, xe_guc_buf_flush(buf)); + + iosys_map_memcpy_from(d, &cache->sam->bo->vmap, 0, bytes); + KUNIT_EXPECT_MEMEQ(test, s, d, bytes); + + xe_guc_buf_release(buf); +} + +static void test_lookup(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf buf; + u32 dwords; + u64 addr; + u32 *p; + int n; + + dwords = xe_guc_buf_cache_dwords(cache); + buf = xe_guc_buf_reserve(cache, dwords); + KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf)); + KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf)); + KUNIT_ASSERT_NE(test, 0, addr = xe_guc_buf_gpu_addr(buf)); + + KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p - 1, sizeof(u32))); + KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p + dwords, sizeof(u32))); + + for (n = 0; n < dwords; n++) + KUNIT_EXPECT_EQ_MSG(test, xe_guc_cache_gpu_addr_from_ptr(cache, p + n, sizeof(u32)), + addr + n * sizeof(u32), "n=%d", n); + + xe_guc_buf_release(buf); +} + +static void test_data(struct kunit *test) +{ + static const u32 data[] = { 1, 2, 3, 4, 5, 6 }; + struct xe_guc_buf_cache *cache = test->priv; + struct xe_guc_buf buf; + void *p; + + buf = xe_guc_buf_from_data(cache, data, sizeof(data)); + KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf)); + KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf)); + KUNIT_EXPECT_MEMEQ(test, p, data, sizeof(data)); + + xe_guc_buf_release(buf); +} + +static void test_class(struct kunit *test) +{ + struct xe_guc_buf_cache *cache = test->priv; + u32 dwords = xe_guc_buf_cache_dwords(cache); + + { + CLASS(xe_guc_buf, buf)(cache, dwords); + KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf)); + KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf)); + KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf)); + KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf)); + KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf)); + } + + { + CLASS(xe_guc_buf, buf)(cache, dwords); + KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf)); + KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf)); + KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf)); + KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf)); + KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf)); + } +} + +static struct kunit_case guc_buf_test_cases[] = { + KUNIT_CASE(test_smallest), + KUNIT_CASE(test_largest), + KUNIT_CASE(test_granular), + KUNIT_CASE(test_unique), + KUNIT_CASE(test_overlap), + KUNIT_CASE(test_reusable), + KUNIT_CASE(test_too_big), + KUNIT_CASE(test_flush), + KUNIT_CASE(test_lookup), + KUNIT_CASE(test_data), + KUNIT_CASE(test_class), + {} +}; + +static struct kunit_suite guc_buf_suite = { + .name = "guc_buf", + .test_cases = guc_buf_test_cases, + .init = guc_buf_test_init, +}; + +kunit_test_suites(&guc_buf_suite); diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c index 261c7c74417f..ce6d9830e13b 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.c +++ b/drivers/gpu/drm/xe/xe_guc_buf.c @@ -170,3 +170,7 @@ u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *p return cache->sam->gpu_addr + offset; } + +#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST) +#include "tests/xe_guc_buf_kunit.c" +#endif -- 2.51.0 From 173baa1b2dc44e3551d9414f4919a48fe5da4880 Mon Sep 17 00:00:00 2001 From: Satyanarayana K V P Date: Thu, 16 Jan 2025 11:26:17 +0530 Subject: [PATCH 07/16] drm/xe: Suppress printing of mode when running in non-sriov mode MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit The xe_sriov_probe_early() function prints the sriov pf/vf mode on driver probe. When running in non-sriov mode, the below debug message is seen. "Running in none mode". This print does not convey any information. This commit suppresses this debug message and shows only when running in PF/VF mode. Signed-off-by: Satyanarayana K V P Cc: Michał Wajdeczko Reviewed-by: Lucas De Marchi Signed-off-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20250116055617.20611-1-satyanarayana.k.v.p@intel.com --- drivers/gpu/drm/xe/xe_sriov.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c index 04e2f539ccd9..a0eab44c0e76 100644 --- a/drivers/gpu/drm/xe/xe_sriov.c +++ b/drivers/gpu/drm/xe/xe_sriov.c @@ -81,7 +81,7 @@ void xe_sriov_probe_early(struct xe_device *xe) xe->sriov.__mode = mode; xe_assert(xe, xe->sriov.__mode); - if (has_sriov) + if (IS_SRIOV(xe)) drm_info(&xe->drm, "Running in %s mode\n", xe_sriov_mode_to_string(xe_device_sriov_mode(xe))); } -- 2.51.0 From f3b59457808f61d88178b0afa67cbd017d7ce79e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 10 Dec 2024 09:31:11 +0100 Subject: [PATCH 08/16] drm/xe: Do not attempt to bootstrap VF in execlists mode It was mentioned in a review that there is a possibility of choosing to load the module with VF in execlists mode. Of course this doesn't work, just bomb out as hard as possible. Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20241210083111.230484-12-dev@lankhorst.se Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/xe/xe_gt_sriov_vf.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index cca5d5732802..6671030439fd 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -213,6 +213,9 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt) { int err; + if (!xe_device_uc_enabled(gt_to_xe(gt))) + return -ENODEV; + err = vf_reset_guc_state(gt); if (unlikely(err)) return err; -- 2.51.0 From a46ea12eca59fd3741ddfec3042d43f87fadf58f Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Fri, 17 Jan 2025 14:38:27 -0500 Subject: [PATCH 09/16] drm/xe/uapi: Fix documentation indentation Fix these issues: Documentation/gpu/driver-uapi:29: include/uapi/drm/xe_drm.h:817: WARNING: +Bullet list ends without a blank line; unexpected unindent. Documentation/gpu/driver-uapi:29: include/uapi/drm/xe_drm.h:835: WARNING: +Definition list ends without a blank line; unexpected unindent. Fixes: 75d37750a753 ("drm/xe/mmap: Add mmap support for PCI memory barrier") Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/intel-xe/20250117164023.3fdc00b9@canb.auug.org.au/ Cc: Tejas Upadhyay Tested-by: Bagas Sanjaya Reviewed-by: Tejas Upadhyay Link: https://patchwork.freedesktop.org/patch/msgid/20250117193827.91779-1-rodrigo.vivi@intel.com Signed-off-by: Rodrigo Vivi --- include/uapi/drm/xe_drm.h | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index cac607a30f6d..e2160330ad01 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -814,29 +814,29 @@ struct drm_xe_gem_create { * * The @flags can be: * - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset - * for use in mmap ioctl. Writing to the returned mmap address will generate a - * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing - * to VRAM which would also add overhead), acting like an MI_MEM_FENCE - * instruction. + * for use in mmap ioctl. Writing to the returned mmap address will generate a + * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing + * to VRAM which would also add overhead), acting like an MI_MEM_FENCE + * instruction. * - * Note: The mmap size can be at most 4K, due to HW limitations. As a result - * this interface is only supported on CPU architectures that support 4K page - * size. The mmap_offset ioctl will detect this and gracefully return an - * error, where userspace is expected to have a different fallback method for - * triggering a barrier. + * Note: The mmap size can be at most 4K, due to HW limitations. As a result + * this interface is only supported on CPU architectures that support 4K page + * size. The mmap_offset ioctl will detect this and gracefully return an + * error, where userspace is expected to have a different fallback method for + * triggering a barrier. * - * Roughly the usage would be as follows: + * Roughly the usage would be as follows: * - * .. code-block:: C + * .. code-block:: C * - * struct drm_xe_gem_mmap_offset mmo = { - * .handle = 0, // must be set to 0 - * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER, - * }; + * struct drm_xe_gem_mmap_offset mmo = { + * .handle = 0, // must be set to 0 + * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER, + * }; * - * err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo); - * map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset); - * map[i] = 0xdeadbeaf; // issue barrier + * err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo); + * map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset); + * map[i] = 0xdeadbeaf; // issue barrier */ struct drm_xe_gem_mmap_offset { /** @extensions: Pointer to the first extension struct, if any */ -- 2.51.0 From 380b0cdaa76bc8f5c16db16eaf48751e792ff041 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Tue, 10 Dec 2024 09:31:03 +0100 Subject: [PATCH 10/16] drm/xe: Move suballocator init to after display init No allocations should be done before we have had a chance to preserve the display fb. Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20241210083111.230484-4-dev@lankhorst.se Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/xe/xe_device.c | 6 ++++++ drivers/gpu/drm/xe/xe_tile.c | 12 ++++++++---- drivers/gpu/drm/xe/xe_tile.h | 1 + 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 6ecbf7dd396c..af9b2bb62dee 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -840,6 +840,12 @@ int xe_device_probe(struct xe_device *xe) if (err) goto err; + for_each_tile(tile, xe, id) { + err = xe_tile_init(tile); + if (err) + goto err; + } + for_each_gt(gt, xe, id) { last_gt = id; diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c index 07cf7cfe4abd..2825553b568f 100644 --- a/drivers/gpu/drm/xe/xe_tile.c +++ b/drivers/gpu/drm/xe/xe_tile.c @@ -170,10 +170,6 @@ int xe_tile_init_noalloc(struct xe_tile *tile) if (err) return err; - tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16); - if (IS_ERR(tile->mem.kernel_bb_pool)) - return PTR_ERR(tile->mem.kernel_bb_pool); - xe_wa_apply_tile_workarounds(tile); err = xe_tile_sysfs_init(tile); @@ -181,6 +177,14 @@ int xe_tile_init_noalloc(struct xe_tile *tile) return 0; } +int xe_tile_init(struct xe_tile *tile) +{ + tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16); + if (IS_ERR(tile->mem.kernel_bb_pool)) + return PTR_ERR(tile->mem.kernel_bb_pool); + + return 0; +} void xe_tile_migrate_wait(struct xe_tile *tile) { xe_migrate_wait(tile->migrate); diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h index 1c9e42ade6b0..eb939316d55b 100644 --- a/drivers/gpu/drm/xe/xe_tile.h +++ b/drivers/gpu/drm/xe/xe_tile.h @@ -12,6 +12,7 @@ struct xe_tile; int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id); int xe_tile_init_noalloc(struct xe_tile *tile); +int xe_tile_init(struct xe_tile *tile); void xe_tile_migrate_wait(struct xe_tile *tile); -- 2.51.0 From cfa9d40db8c30d894171010fe765d96e9bc6a47e Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Thu, 16 Jan 2025 19:21:55 -0800 Subject: [PATCH 11/16] drm/xe/oa: Preserve oa_ctrl unused bits UMD's have interest in setting unused bits of the oa_ctrl register "out of band" for certain experiments. To facilitate this, don't clobber previous oa_ctrl unused bits, i.e. rmw the values rather than simply write them. Fixes: e936f885f1e9 ("drm/xe/oa/uapi: Expose OA stream fd") Signed-off-by: Ashutosh Dixit Reviewed-by: Umesh Nerlige Ramappa Link: https://patchwork.freedesktop.org/patch/msgid/20250117032155.3048063-1-ashutosh.dixit@intel.com --- drivers/gpu/drm/xe/regs/xe_oa_regs.h | 6 ++++++ drivers/gpu/drm/xe/xe_oa.c | 12 ++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h index a49561e9f3c3..a79ad2da070c 100644 --- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h @@ -51,6 +51,10 @@ /* Common to all OA units */ #define OA_OACONTROL_REPORT_BC_MASK REG_GENMASK(9, 9) #define OA_OACONTROL_COUNTER_SIZE_MASK REG_GENMASK(8, 8) +#define OAG_OACONTROL_USED_BITS \ + (OAG_OACONTROL_OA_PES_DISAG_EN | OAG_OACONTROL_OA_CCS_SELECT_MASK | \ + OAG_OACONTROL_OA_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE | \ + OA_OACONTROL_REPORT_BC_MASK | OA_OACONTROL_COUNTER_SIZE_MASK) #define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED) #define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14) @@ -78,6 +82,8 @@ #define OAM_CONTEXT_CONTROL_OFFSET (0x1bc) #define OAM_CONTROL_OFFSET (0x194) #define OAM_CONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1) +#define OAM_OACONTROL_USED_BITS \ + (OAM_CONTROL_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE) #define OAM_DEBUG_OFFSET (0x198) #define OAM_STATUS_OFFSET (0x19c) #define OAM_MMIO_TRG_OFFSET (0x1d0) diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index eeb96b5f49e2..6a08e6c92835 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -452,6 +452,12 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream) return val; } +static u32 __oactrl_used_bits(struct xe_oa_stream *stream) +{ + return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ? + OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS; +} + static void xe_oa_enable(struct xe_oa_stream *stream) { const struct xe_oa_format *format = stream->oa_buffer.format; @@ -472,14 +478,14 @@ static void xe_oa_enable(struct xe_oa_stream *stream) stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG) val |= OAG_OACONTROL_OA_PES_DISAG_EN; - xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val); + xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val); } static void xe_oa_disable(struct xe_oa_stream *stream) { struct xe_mmio *mmio = &stream->gt->mmio; - xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0); + xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0); if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl, OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false)) drm_err(&stream->oa->xe->drm, @@ -2534,6 +2540,8 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt) u->type = DRM_XE_OA_UNIT_TYPE_OAM; } + xe_mmio_write32(>->mmio, u->regs.oa_ctrl, 0); + /* Ensure MMIO trigger remains disabled till there is a stream */ xe_mmio_write32(>->mmio, u->regs.oa_debug, oag_configure_mmio_trigger(NULL, false)); -- 2.51.0 From 9ebb5846e1a3b1705f8a7cbc528888a1aa0b163e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 21 Jan 2025 00:24:43 +0100 Subject: [PATCH 12/16] drm/xe/pf: Fix migration initialization MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit The migration support only needs to be initialized once, but it was incorrectly called from the xe_gt_sriov_pf_init_hw(), which is part of the reset flow and may be called multiple times. Fixes: d86e3737c7ab ("drm/xe/pf: Add functions to save and restore VF GuC state") Signed-off-by: Michal Wajdeczko Cc: Michał Winiarski Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20250120232443.544-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt.c | 4 +++- drivers/gpu/drm/xe/xe_gt_sriov_pf.c | 14 +++++++++++++- drivers/gpu/drm/xe/xe_gt_sriov_pf.h | 6 ++++++ 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index b5c313a3e946..01a4a852b8f4 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -532,8 +532,10 @@ static int all_fw_domain_init(struct xe_gt *gt) if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt)) xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt); - if (IS_SRIOV_PF(gt_to_xe(gt))) + if (IS_SRIOV_PF(gt_to_xe(gt))) { + xe_gt_sriov_pf_init(gt); xe_gt_sriov_pf_init_hw(gt); + } xe_force_wake_put(gt_to_fw(gt), fw_ref); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index e71fc3d2bda2..6f906c8e8108 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -68,6 +68,19 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt) return 0; } +/** + * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF. + * @gt: the &xe_gt to initialize + * + * Late one-time initialization of the PF data. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_init(struct xe_gt *gt) +{ + return xe_gt_sriov_pf_migration_init(gt); +} + static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe) { return GRAPHICS_VERx100(xe) == 1200; @@ -90,7 +103,6 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) pf_enable_ggtt_guest_update(gt); xe_gt_sriov_pf_service_update(gt); - xe_gt_sriov_pf_migration_init(gt); } static u32 pf_get_vf_regs_stride(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h index 96fab779a906..f474509411c0 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h @@ -10,6 +10,7 @@ struct xe_gt; #ifdef CONFIG_PCI_IOV int xe_gt_sriov_pf_init_early(struct xe_gt *gt); +int xe_gt_sriov_pf_init(struct xe_gt *gt); void xe_gt_sriov_pf_init_hw(struct xe_gt *gt); void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid); void xe_gt_sriov_pf_restart(struct xe_gt *gt); @@ -19,6 +20,11 @@ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt) return 0; } +static inline int xe_gt_sriov_pf_init(struct xe_gt *gt) +{ + return 0; +} + static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) { } -- 2.51.0 From 5994018ecffc9e70e192bede2270819d8c93b2d4 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 21 Jan 2025 10:48:32 +0100 Subject: [PATCH 13/16] drm/xe/guc: Fix sizeof(32) typo A small typo leads to the following static code checker warning: drivers/gpu/drm/xe/xe_guc_buf.c:81 xe_guc_buf_reserve() warn: sizeof(NUMBER)? Reported-by: Dan Carpenter Closes: https://lore.kernel.org/intel-xe/0d5bcbf1-79f9-4a10-a221-ddbaec9f6122@stanley.mountain/ Fixes: 696bfdf273ea ("drm/xe/guc: Introduce the GuC Buffer Cache") Signed-off-by: Michal Wajdeczko Cc: Dan Carpenter Cc: Matthew Brost Reviewed-by: Stuart Summers Link: https://patchwork.freedesktop.org/patch/msgid/20250121094832.588-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_guc_buf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c index ce6d9830e13b..0193c94dd6a0 100644 --- a/drivers/gpu/drm/xe/xe_guc_buf.c +++ b/drivers/gpu/drm/xe/xe_guc_buf.c @@ -78,7 +78,7 @@ struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords) struct drm_suballoc *sa; if (cache->sam) - sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(32), GFP_ATOMIC); + sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(u32), GFP_ATOMIC); else sa = ERR_PTR(-EOPNOTSUPP); -- 2.51.0 From e0a4cd6aceca0e9164d7304ac1a9b061e1d96529 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 17 Jan 2025 08:45:29 -0800 Subject: [PATCH 14/16] MAINTAINERS: Also exclude xe for drm-misc When the xe driver was added, it didn't extend the exclude entries for drm-misc, as done in commit 5a44d50f0072 ("MAINTAINERS: Update drm-misc entry to match all drivers"). Exclude it like is done for i915 and other drivers with dedicated maintainers. Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20250117164529.393503-1-lucas.demarchi@intel.com Signed-off-by: Lucas De Marchi --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 17daa9ee9384..6664d39ab4c6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7598,6 +7598,7 @@ X: drivers/gpu/drm/nouveau/ X: drivers/gpu/drm/radeon/ X: drivers/gpu/drm/renesas/rcar-du/ X: drivers/gpu/drm/tegra/ +X: drivers/gpu/drm/xe/ DRM DRIVERS FOR ALLWINNER A10 M: Maxime Ripard -- 2.51.0 From dddc53806dd2a10e210d5ea08caec6d3f92440b2 Mon Sep 17 00:00:00 2001 From: Vinay Belgaumkar Date: Thu, 16 Jan 2025 10:46:59 -0800 Subject: [PATCH 15/16] drm/xe/ptl: Apply Wa_13011645652 Extend Wa_13011645652 to PTL. Signed-off-by: Vinay Belgaumkar Reviewed-by: Stuart Summers Signed-off-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20250116184659.384874-1-vinay.belgaumkar@intel.com --- drivers/gpu/drm/xe/xe_wa_oob.rules | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 40438c3d9b72..228436532282 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -28,6 +28,7 @@ 16022287689 GRAPHICS_VERSION(2001) GRAPHICS_VERSION(2004) 13011645652 GRAPHICS_VERSION(2004) + GRAPHICS_VERSION(3001) 14022293748 GRAPHICS_VERSION(2001) GRAPHICS_VERSION(2004) 22019794406 GRAPHICS_VERSION(2001) -- 2.51.0 From d3fedff828bb7e4a422c42caeafd5d974e24ee43 Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Wed, 15 Jan 2025 14:20:29 -0800 Subject: [PATCH 16/16] drm/xe/oa: Set stream->pollin in xe_oa_buffer_check_unlocked We rely on stream->pollin to decide whether or not to block during poll/read calls. However, currently there are blocking read code paths which don't even set stream->pollin. The best place to consistently set stream->pollin for all code paths is therefore to set it in xe_oa_buffer_check_unlocked. Fixes: e936f885f1e9 ("drm/xe/oa/uapi: Expose OA stream fd") Signed-off-by: Ashutosh Dixit Acked-by: Rodrigo Vivi Reviewed-by: Jonathan Cavitt Reviewed-by: Umesh Nerlige Ramappa Link: https://patchwork.freedesktop.org/patch/msgid/20250115222029.3002103-1-ashutosh.dixit@intel.com --- drivers/gpu/drm/xe/xe_oa.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 6a08e6c92835..fa873f3d0a9d 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -237,7 +237,6 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream) u32 tail, hw_tail, partial_report_size, available; int report_size = stream->oa_buffer.format->size; unsigned long flags; - bool pollin; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); @@ -282,11 +281,11 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream) stream->oa_buffer.tail = tail; available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head); - pollin = available >= stream->wait_num_reports * report_size; + stream->pollin = available >= stream->wait_num_reports * report_size; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); - return pollin; + return stream->pollin; } static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer) @@ -294,10 +293,8 @@ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer) struct xe_oa_stream *stream = container_of(hrtimer, typeof(*stream), poll_check_timer); - if (xe_oa_buffer_check_unlocked(stream)) { - stream->pollin = true; + if (xe_oa_buffer_check_unlocked(stream)) wake_up(&stream->poll_wq); - } hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns)); -- 2.51.0