// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <linux/debugfs.h>
.write = ivpu_resume_engine_fn,
};
+static int dct_active_get(void *data, u64 *active_percent)
+{
+ struct ivpu_device *vdev = data;
+
+ *active_percent = vdev->pm->dct_active_percent;
+
+ return 0;
+}
+
+static int dct_active_set(void *data, u64 active_percent)
+{
+ struct ivpu_device *vdev = data;
+ int ret;
+
+ if (active_percent > 100)
+ return -EINVAL;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret)
+ return ret;
+
+ if (active_percent)
+ ret = ivpu_pm_dct_enable(vdev, active_percent);
+ else
+ ret = ivpu_pm_dct_disable(vdev);
+
+ ivpu_rpm_put(vdev);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+
void ivpu_debugfs_init(struct ivpu_device *vdev)
{
struct dentry *debugfs_root = vdev->drm.debugfs_root;
debugfs_create_file("resume_engine", 0200, debugfs_root, vdev,
&ivpu_resume_engine_fops);
- if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
+ if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) {
debugfs_create_file("fw_profiling_freq_drive", 0200,
debugfs_root, vdev, &fw_profiling_freq_fops);
+ debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops);
+ }
}
ivpu_hw_irq_enable(vdev);
ivpu_ipc_enable(vdev);
- if (ivpu_fw_is_cold_boot(vdev))
+ if (ivpu_fw_is_cold_boot(vdev)) {
+ ret = ivpu_pm_dct_init(vdev);
+ if (ret)
+ return ret;
+
return ivpu_hw_sched_init(vdev);
+ }
return 0;
}
case IVPU_HW_IRQ_SRC_MMU_EVTQ:
ivpu_context_abort_invalid(vdev);
break;
+ case IVPU_HW_IRQ_SRC_DCT:
+ ivpu_pm_dct_irq_thread_handler(vdev);
+ break;
default:
ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
break;
#define IVPU_HW_IRQ_SRC_IPC 1
#define IVPU_HW_IRQ_SRC_MMU_EVTQ 2
+#define IVPU_HW_IRQ_SRC_DCT 3
struct ivpu_addr_range {
resource_size_t start;
if (!status)
return false;
- if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status))
+ if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
+ if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT))
+ ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
+ }
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ));
return true;
}
-static void dct_drive_40xx(struct ivpu_device *vdev, u32 dct_val)
+int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
{
- u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX);
+ u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW);
+ u32 cmd = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, CMD, val);
+ u32 param1 = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, PARAM1, val);
- val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX, CMD, DCT_REQ, val);
- val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX, PARAM1,
- dct_val ? DCT_ENABLE : DCT_DISABLE, val);
- val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX, PARAM2, dct_val, val);
+ if (cmd != DCT_REQ) {
+ ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd);
+ return -EBADR;
+ }
- REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX, val);
+ switch (param1) {
+ case DCT_ENABLE:
+ *enable = true;
+ return 0;
+ case DCT_DISABLE:
+ *enable = false;
+ return 0;
+ default:
+ ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1);
+ return -EINVAL;
+ }
}
-void ivpu_hw_btrs_dct_drive(struct ivpu_device *vdev, u32 dct_val)
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent)
{
- return dct_drive_40xx(vdev, dct_val);
+ u32 val = 0;
+ u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
+
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, CMD, DCT_REQ, val);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM1, cmd, val);
+ val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM2, active_percent, val);
+
+ REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
}
static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config)
#define PLL_PROFILING_FREQ_HIGH 400000000
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
+#define DCT_DEFAULT_ACTIVE_PERCENT 15u
+#define DCT_PERIOD_US 35300u
+
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev);
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev);
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
-void ivpu_hw_btrs_dct_drive(struct ivpu_device *vdev, u32 dct_val);
+int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH 0x0000005cu
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR 0x00000060u
-#define VPU_HW_BTRS_LNL_PCODE_MAILBOX 0x00000070u
-#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_CMD_MASK GENMASK(7, 0)
-#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_PARAM1_MASK GENMASK(15, 8)
-#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_PARAM2_MASK GENMASK(23, 16)
-#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_PARAM3_MASK GENMASK(31, 24)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS 0x00000070u
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_CMD_MASK GENMASK(7, 0)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM1_MASK GENMASK(15, 8)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM2_MASK GENMASK(23, 16)
+#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM3_MASK GENMASK(31, 24)
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW 0x00000074u
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_CMD_MASK GENMASK(7, 0)
return ret;
}
+
+int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
+ struct vpu_jsm_msg resp;
+
+ req.payload.pwr_dct_control.dct_active_us = active_us;
+ req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
+
+ return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE,
+ &resp, VPU_IPC_CHAN_ASYNC_CMD,
+ vdev->timeout.jsm);
+}
+
+int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
+ struct vpu_jsm_msg resp;
+
+ return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE,
+ &resp, VPU_IPC_CHAN_ASYNC_CMD,
+ vdev->timeout.jsm);
+}
u64 buffer_addr, u64 buffer_size, u64 *bytes_written);
int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
u64 buffer_size, u32 *sample_size, u64 *info_size);
+int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us);
+int ivpu_jsm_dct_disable(struct ivpu_device *vdev);
#endif
ivpu_dbg(vdev, PM, "Runtime suspend..\n");
- is_idle = ivpu_hw_is_idle(vdev);
+ is_idle = ivpu_hw_is_idle(vdev) || vdev->pm->dct_active_percent;
if (!is_idle)
ivpu_err(vdev, "NPU is not idle before autosuspend\n");
pm_runtime_get_noresume(vdev->drm.dev);
pm_runtime_forbid(vdev->drm.dev);
}
+
+int ivpu_pm_dct_init(struct ivpu_device *vdev)
+{
+ if (vdev->pm->dct_active_percent)
+ return ivpu_pm_dct_enable(vdev, vdev->pm->dct_active_percent);
+
+ return 0;
+}
+
+int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent)
+{
+ u32 active_us, inactive_us;
+ int ret;
+
+ if (active_percent == 0 || active_percent > 100)
+ return -EINVAL;
+
+ active_us = (DCT_PERIOD_US * active_percent) / 100;
+ inactive_us = DCT_PERIOD_US - active_us;
+
+ ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us);
+ if (ret) {
+ ivpu_err_ratelimited(vdev, "Filed to enable DCT: %d\n", ret);
+ return ret;
+ }
+
+ vdev->pm->dct_active_percent = active_percent;
+
+ ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n",
+ active_percent, active_us, inactive_us);
+ return 0;
+}
+
+int ivpu_pm_dct_disable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_jsm_dct_disable(vdev);
+ if (ret) {
+ ivpu_err_ratelimited(vdev, "Filed to disable DCT: %d\n", ret);
+ return ret;
+ }
+
+ vdev->pm->dct_active_percent = 0;
+
+ ivpu_dbg(vdev, PM, "DCT disabled\n");
+ return 0;
+}
+
+void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev)
+{
+ bool enable;
+ int ret;
+
+ if (ivpu_hw_btrs_dct_get_request(vdev, &enable))
+ return;
+
+ if (vdev->pm->dct_active_percent)
+ ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT);
+ else
+ ret = ivpu_pm_dct_disable(vdev);
+
+ if (!ret)
+ ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent);
+}
atomic_t reset_counter;
atomic_t reset_pending;
bool is_warmboot;
+ u8 dct_active_percent;
};
void ivpu_pm_init(struct ivpu_device *vdev);
void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
+int ivpu_pm_dct_init(struct ivpu_device *vdev);
+int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent);
+int ivpu_pm_dct_disable(struct ivpu_device *vdev);
+void ivpu_pm_dct_irq_thread_handler(struct ivpu_device *vdev);
+
#endif /* __IVPU_PM_H__ */