size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe) && !XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
+ if (!IS_DGFX(xe) && !XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
{
struct xe_device *xe = to_xe_device(display->drm);
- return XE_WA(xe_root_mmio_gt(xe), 16023588340);
+ return XE_GT_WA(xe_root_mmio_gt(xe), 16023588340);
}
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
- if (XE_WA(xe_root_mmio_gt(xe), 22019338487_display))
+ if (XE_GT_WA(xe_root_mmio_gt(xe), 22019338487_display))
return NULL;
/*
}
if (xe->tiles->media_gt &&
- XE_WA(xe->tiles->media_gt, 15015404425_disable))
+ XE_GT_WA(xe->tiles->media_gt, 15015404425_disable))
XE_DEVICE_WA_DISABLE(xe, 15015404425);
err = xe_devcoredump_init(xe);
gt = xe_root_mmio_gt(xe);
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
return;
root_gt = xe_root_mmio_gt(xe);
- if (XE_WA(root_gt, 16023588340)) {
+ if (XE_GT_WA(root_gt, 16023588340)) {
/* A transient flush is not sufficient: flush the L2 */
xe_device_l2_flush(xe);
} else {
return -ETIMEDOUT;
}
- if (XE_WA(gt, 22016596838))
+ if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
cancel_delayed_work_sync(&stream->buf_poll_work);
- if (XE_WA(gt, 22016596838))
+ if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
_MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
{
struct xe_tile *tile = ggtt->tile;
- struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ?
+ struct xe_gt *affected_gt = XE_GT_WA(tile->primary_gt, 22019338487) ?
tile->primary_gt : tile->media_gt;
struct xe_mmio *mmio = &affected_gt->mmio;
- u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
+ u32 max_gtt_writes = XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
/*
* Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
* to wait for completion of prior GTT writes before letting this through.
if (GRAPHICS_VERx100(xe) >= 1270)
ggtt->pt_ops = (ggtt->tile->media_gt &&
- XE_WA(ggtt->tile->media_gt, 22019338487)) ||
- XE_WA(ggtt->tile->primary_gt, 22019338487) ?
+ XE_GT_WA(ggtt->tile->media_gt, 22019338487)) ||
+ XE_GT_WA(ggtt->tile->primary_gt, 22019338487) ?
&xelpg_pt_wa_ops : &xelpg_pt_ops;
else
ggtt->pt_ops = &xelp_pt_ops;
unsigned int fw_ref;
int ret;
- if (XE_WA(tile->primary_gt, 14018094691)) {
+ if (XE_GT_WA(tile->primary_gt, 14018094691)) {
fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
/*
ret = gsc_upload(gsc);
- if (XE_WA(tile->primary_gt, 14018094691))
+ if (XE_GT_WA(tile->primary_gt, 14018094691))
xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
if (ret)
u32 gs1_clr = prep ? 0 : HECI_H_GS1_ER_PREP;
/* WA only applies if the GSC is loaded */
- if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
+ if (!XE_GT_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
return;
xe_mmio_rmw32(>->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
unsigned int fw_ref;
u32 reg;
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
unsigned int fw_ref;
u32 reg;
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
if (xe_gt_is_media_type(gt))
if ((!xe_uc_fw_is_available(>->uc.gsc.fw) ||
xe_uc_fw_is_loaded(>->uc.gsc.fw) ||
xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) &&
- XE_WA(gt, 22019338487))
+ XE_GT_WA(gt, 22019338487))
ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc);
return ret;
* but there's no tracking number assigned yet so we use a custom
* OOB workaround descriptor.
*/
- if (XE_WA(gt, no_media_l3))
+ if (XE_GT_WA(gt, no_media_l3))
return;
if (GRAPHICS_VER(xe) >= 30) {
unsigned long *oob;
/**
* @wa_active.oob_initialized: mark oob as initialized to help
- * detecting misuse of XE_WA() - it can only be called on
+ * detecting misuse of XE_GT_WA() - it can only be called on
* initialization after OOB WAs have being processed
*/
bool oob_initialized;
* on RCS and CCSes with different address spaces, which on DG2 is
* required as a WA for an HW bug.
*/
- if (XE_WA(gt, 22011391025))
+ if (XE_GT_WA(gt, 22011391025))
return true;
/*
struct xe_gt *gt = guc_to_gt(guc);
u32 flags = 0;
- if (XE_WA(gt, 22012773006))
+ if (XE_GT_WA(gt, 22012773006))
flags |= GUC_WA_POLLCS;
- if (XE_WA(gt, 14014475959))
+ if (XE_GT_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
if (needs_wa_dual_queue(gt))
if (GRAPHICS_VERx100(xe) < 1270)
flags |= GUC_WA_PRE_PARSER;
- if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
+ if (XE_GT_WA(gt, 22012727170) || XE_GT_WA(gt, 22012727685))
flags |= GUC_WA_CONTEXT_ISOLATION;
- if (XE_WA(gt, 18020744125) &&
+ if (XE_GT_WA(gt, 18020744125) &&
!xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
- if (XE_WA(gt, 1509372804))
+ if (XE_GT_WA(gt, 1509372804))
flags |= GUC_WA_RENDER_RST_RC6_EXIT;
- if (XE_WA(gt, 14018913170))
+ if (XE_GT_WA(gt, 14018913170))
flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
return flags;
count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
- if (XE_WA(gt, 1607983814))
+ if (XE_GT_WA(gt, 1607983814))
count += LNCFCMOCS_REG_COUNT;
return count * sizeof(struct guc_mmio_reg);
offset = guc_ads_waklv_offset(ads);
remain = guc_ads_waklv_size(ads);
- if (XE_WA(gt, 14019882105) || XE_WA(gt, 16021333562))
+ if (XE_GT_WA(gt, 14019882105) || XE_GT_WA(gt, 16021333562))
guc_waklv_enable(ads, NULL, 0, &offset, &remain,
GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED);
- if (XE_WA(gt, 18024947630))
+ if (XE_GT_WA(gt, 18024947630))
guc_waklv_enable(ads, NULL, 0, &offset, &remain,
GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING);
- if (XE_WA(gt, 16022287689))
+ if (XE_GT_WA(gt, 16022287689))
guc_waklv_enable(ads, NULL, 0, &offset, &remain,
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE);
- if (XE_WA(gt, 14022866841))
+ if (XE_GT_WA(gt, 14022866841))
guc_waklv_enable(ads, NULL, 0, &offset, &remain,
GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO);
* the default value for this register is determined to be 0xC40. This could change in the
* future, so GuC depends on KMD to send it the correct value.
*/
- if (XE_WA(gt, 13011645652)) {
+ if (XE_GT_WA(gt, 13011645652)) {
u32 data = 0xC40;
guc_waklv_enable(ads, &data, sizeof(data) / sizeof(u32), &offset, &remain,
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE);
}
- if (XE_WA(gt, 14022293748) || XE_WA(gt, 22019794406))
+ if (XE_GT_WA(gt, 14022293748) || XE_GT_WA(gt, 22019794406))
guc_waklv_enable(ads, NULL, 0, &offset, &remain,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET);
- if (GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
+ if (GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_GT_WA(gt, 16026508708))
guc_waklv_enable(ads, NULL, 0, &offset, &remain,
GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH);
- if (GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 47, 0) && XE_WA(gt, 16026007364)) {
+ if (GUC_FIRMWARE_VER(>->uc.guc) >= MAKE_GUC_VER(70, 47, 0) && XE_GT_WA(gt, 16026007364)) {
u32 data[] = {
0x0,
0xF,
guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
}
- if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_GT_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
guc_mmio_regset_write_one(ads, regset_map,
XELP_LNCFCMOCS(i), count++);
*/
int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
{
- if (XE_WA(pc_to_gt(pc), 22019338487)) {
+ if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
if (wait_for_flush_complete(pc) != 0)
return -EAGAIN;
}
{
struct xe_gt *gt = pc_to_gt(pc);
- if (XE_WA(gt, 22019338487)) {
+ if (XE_GT_WA(gt, 22019338487)) {
if (xe_gt_is_media_type(gt))
return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
else
if (pc_get_min_freq(pc) > pc->rp0_freq)
ret = pc_set_min_freq(pc, pc->rp0_freq);
- if (XE_WA(tile->primary_gt, 14022085890))
+ if (XE_GT_WA(tile->primary_gt, 14022085890))
ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
out:
{
struct xe_gt *gt = pc_to_gt(pc);
- return XE_WA(gt, 22019338487) &&
+ return XE_GT_WA(gt, 22019338487) &&
pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
}
{
int ret;
- if (!XE_WA(pc_to_gt(pc), 22019338487))
+ if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
return 0;
guard(mutex)(&pc->freq_lock);
u32 maxcnt_units_ns = 640;
bool inhibit_switch = 0;
- if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) {
+ if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_GT_WA(gt, 16023105232)) {
idledly = xe_mmio_read32(>->mmio, RING_IDLEDLY(hwe->mmio_base));
maxcnt = xe_mmio_read32(>->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
static bool
gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class)
{
- if (XE_WA(gt, 16010904313) &&
+ if (XE_GT_WA(gt, 16010904313) &&
(class == XE_ENGINE_CLASS_RENDER ||
class == XE_ENGINE_CLASS_COMPUTE))
return true;
const u32 ts_addr = __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
u32 *cmd = batch;
- if (!XE_WA(lrc->gt, 16010904313) ||
+ if (!XE_GT_WA(lrc->gt, 16010904313) ||
!(hwe->class == XE_ENGINE_CLASS_RENDER ||
hwe->class == XE_ENGINE_CLASS_COMPUTE ||
hwe->class == XE_ENGINE_CLASS_COPY ||
{
u32 *cmd = batch;
- if (!XE_WA(lrc->gt, 18022495364) ||
+ if (!XE_GT_WA(lrc->gt, 18022495364) ||
hwe->class != XE_ENGINE_CLASS_RENDER)
return 0;
* continue to emit all of the SVG state since it's best not to leak
* any of the state between contexts, even if that leakage is harmless.
*/
- if (XE_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_GT_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
state_table = xe_hpg_svg_state;
state_table_size = ARRAY_SIZE(xe_hpg_svg_state);
}
u32 sqcnt1;
/* Enable thread stall DOP gating and EU DOP gating. */
- if (XE_WA(stream->gt, 1508761755)) {
+ if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (XE_WA(stream->gt, 1508761755)) {
+ if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
* GuC reset of engines causes OA to lose configuration
* state. Prevent this by overriding GUCRC mode.
*/
- if (XE_WA(stream->gt, 1509372804)) {
+ if (XE_GT_WA(stream->gt, 1509372804)) {
ret = xe_guc_pc_override_gucrc_mode(>->uc.guc.pc,
SLPC_GUCRC_MODE_GUCRC_NO_RC6);
if (ret)
{
u32 reg, shift;
- if (XE_WA(gt, 18013179988) || XE_WA(gt, 14015568240)) {
+ if (XE_GT_WA(gt, 18013179988) || XE_GT_WA(gt, 14015568240)) {
xe_pm_runtime_get(gt_to_xe(gt));
reg = xe_mmio_read32(>->mmio, RPM_CONFIG0);
xe_pm_runtime_put(gt_to_xe(gt));
sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
/* L3bank mask may not be available for some GTs */
- if (!XE_WA(gt, no_media_l3))
+ if (!XE_GT_WA(gt, no_media_l3))
query_size += sizeof(struct drm_xe_query_topology_mask) +
sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
}
* mask, then it's better to omit L3 from the query rather than
* reporting bogus or zeroed information to userspace.
*/
- if (!XE_WA(gt, no_media_l3)) {
+ if (!XE_GT_WA(gt, no_media_l3)) {
topo.type = DRM_XE_TOPO_L3_BANK;
err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
sizeof(gt->fuse_topo.l3_bank_mask));
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 flags;
- if (XE_WA(gt, 14016712196))
+ if (XE_GT_WA(gt, 14016712196))
i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH,
LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE);
- if (XE_WA(gt, 1409600907))
+ if (XE_GT_WA(gt, 1409600907))
flags |= PIPE_CONTROL_DEPTH_STALL;
if (lacks_render)
if (hwe->class != XE_ENGINE_CLASS_RENDER)
return i;
- if (XE_WA(hwe->gt, 16020292621))
+ if (XE_GT_WA(hwe->gt, 16020292621))
i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_LRI_POST_SYNC,
RING_NOPID(hwe->mmio_base).addr, 0);
stolen_size -= wopcm_size;
- if (media_gt && XE_WA(media_gt, 14019821291)) {
+ if (media_gt && XE_GT_WA(media_gt, 14019821291)) {
u64 gscpsmi_base = xe_mmio_read64_2x32(&media_gt->mmio, GSCPSMI_BASE)
& ~GENMASK_ULL(5, 0);
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
- if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
+ if (XE_GT_WA(xe_root_mmio_gt(xe), 14016763929))
args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
if (IS_SRIOV_VF(tile->xe))
return;
- if (XE_WA(tile->primary_gt, 22010954014))
+ if (XE_GT_WA(tile->primary_gt, 22010954014))
xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
}
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
/**
- * XE_WA - Out-of-band workarounds, to be queried and called as needed.
+ * XE_GT_WA - Out-of-band GT workarounds, to be queried and called as needed.
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
-#define XE_WA(gt__, id__) ({ \
+#define XE_GT_WA(gt__, id__) ({ \
xe_gt_assert(gt__, (gt__)->wa_active.oob_initialized); \
test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
})