enum xe_sriov_vf_ccs_rw_ctxs ctx_id)
{
struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL);
- struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
struct xe_sa_manager *bb_pool;
int err;
* So, this extra DW acts as a guard here.
*/
- bb_pool = tile->sriov.vf.ccs[ctx_id].mem.ccs_bb_pool;
+ bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
bb->bo = xe_sa_bo_new(bb_pool, 4 * (dwords + 1));
if (IS_ERR(bb->bo)) {
struct {
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
struct xe_ggtt_node *ggtt_balloon[2];
-
- /** @sriov.vf.ccs: CCS read and write contexts for VF. */
- struct xe_tile_vf_ccs ccs[XE_SRIOV_VF_CCS_CTX_COUNT];
} vf;
} sriov;
static int sa_info_vf_ccs(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
struct xe_sa_manager *bb_pool;
enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
xe_pm_runtime_get(gt_to_xe(gt));
for_each_ccs_rw_ctx(ctx_id) {
- bb_pool = tile->sriov.vf.ccs[ctx_id].mem.ccs_bb_pool;
+ bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
if (!bb_pool)
break;
return round_up(bb_pool_size * 2, SZ_1M);
}
-static int alloc_bb_pool(struct xe_tile *tile, struct xe_tile_vf_ccs *ctx)
+static int alloc_bb_pool(struct xe_tile *tile, struct xe_sriov_vf_ccs_ctx *ctx)
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_sa_manager *sa_manager;
return 0;
}
-static void ccs_rw_update_ring(struct xe_tile_vf_ccs *ctx)
+static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx)
{
u64 addr = xe_sa_manager_gpu_addr(ctx->mem.ccs_bb_pool);
struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
}
-static int register_save_restore_context(struct xe_tile_vf_ccs *ctx)
+static int register_save_restore_context(struct xe_sriov_vf_ccs_ctx *ctx)
{
int ctx_type;
*/
int xe_sriov_vf_ccs_register_context(struct xe_device *xe)
{
- struct xe_tile *tile = xe_device_get_root_tile(xe);
enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
- struct xe_tile_vf_ccs *ctx;
+ struct xe_sriov_vf_ccs_ctx *ctx;
int err;
xe_assert(xe, IS_VF_CCS_READY(xe));
for_each_ccs_rw_ctx(ctx_id) {
- ctx = &tile->sriov.vf.ccs[ctx_id];
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
err = register_save_restore_context(ctx);
if (err)
return err;
static void xe_sriov_vf_ccs_fini(void *arg)
{
- struct xe_tile_vf_ccs *ctx = arg;
+ struct xe_sriov_vf_ccs_ctx *ctx = arg;
struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
/*
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
- struct xe_tile_vf_ccs *ctx;
+ struct xe_sriov_vf_ccs_ctx *ctx;
struct xe_exec_queue *q;
u32 flags;
int err;
return 0;
for_each_ccs_rw_ctx(ctx_id) {
- ctx = &tile->sriov.vf.ccs[ctx_id];
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
ctx->ctx_id = ctx_id;
flags = EXEC_QUEUE_FLAG_KERNEL |
{
struct xe_device *xe = xe_bo_device(bo);
enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
- struct xe_tile_vf_ccs *ctx;
+ struct xe_sriov_vf_ccs_ctx *ctx;
struct xe_tile *tile;
struct xe_bb *bb;
int err = 0;
/* bb should be NULL here. Assert if not NULL */
xe_assert(xe, !bb);
- ctx = &tile->sriov.vf.ccs[ctx_id];
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
err = xe_migrate_ccs_rw_copy(tile, ctx->mig_q, bo, ctx_id);
}
return err;
#ifndef _XE_SRIOV_VF_CCS_TYPES_H_
#define _XE_SRIOV_VF_CCS_TYPES_H_
+#include <linux/types.h>
+
#define for_each_ccs_rw_ctx(id__) \
for ((id__) = 0; (id__) < XE_SRIOV_VF_CCS_CTX_COUNT; (id__)++)
struct xe_migrate;
struct xe_sa_manager;
-struct xe_tile_vf_ccs {
- /** @id: Id to which context it belongs to */
+/**
+ * struct xe_sriov_vf_ccs_ctx - VF CCS migration context data.
+ */
+struct xe_sriov_vf_ccs_ctx {
+ /** @ctx_id: Id to which context it belongs to */
enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
/** @mig_q: exec queues used for migration */
struct xe_exec_queue *mig_q;
+ /** @mem: memory data */
struct {
- /** @ccs_bb_pool: Pool from which batch buffers are allocated. */
+ /** @mem.ccs_bb_pool: Pool from which batch buffers are allocated. */
struct xe_sa_manager *ccs_bb_pool;
} mem;
};
+/**
+ * struct xe_sriov_vf_ccs - The VF CCS migration support data.
+ */
+struct xe_sriov_vf_ccs {
+ /** @contexts: CCS read and write contexts for VF. */
+ struct xe_sriov_vf_ccs_ctx contexts[XE_SRIOV_VF_CCS_CTX_COUNT];
+
+ /** @initialized: Initialization of VF CCS is completed or not. */
+ bool initialized;
+};
+
#endif
#include <linux/types.h>
#include <linux/workqueue_types.h>
+#include "xe_sriov_vf_ccs_types.h"
+
/**
* struct xe_sriov_vf_relay_version - PF ABI version details.
*/
} migration;
/** @ccs: VF CCS state data */
- struct {
- /** @ccs.initialized: Initilalization of VF CCS is completed or not */
- bool initialized;
- } ccs;
+ struct xe_sriov_vf_ccs ccs;
};
#endif