#define RTW8852B_IQK_SS 2
 #define RTW8852B_RXK_GROUP_NR 4
 #define RTW8852B_TSSI_PATH_NR 2
+#define RTW8852B_RF_REL_VERSION 34
+#define RTW8852B_DPK_VER 0x0d
+#define RTW8852B_DPK_RF_PATH 2
+#define RTW8852B_DPK_KIP_REG_NUM 2
 
 #define _TSSI_DE_MASK GENMASK(21, 12)
 #define ADDC_T_AVG 100
+#define DPK_TXAGC_LOWER 0x2e
+#define DPK_TXAGC_UPPER 0x3f
+#define DPK_TXAGC_INVAL 0xff
+#define RFREG_MASKRXBB 0x003e0
+#define RFREG_MASKMODE 0xf0000
+
+enum rtw8852b_dpk_id {
+       LBK_RXIQK       = 0x06,
+       SYNC            = 0x10,
+       MDPK_IDL        = 0x11,
+       MDPK_MPA        = 0x12,
+       GAIN_LOSS       = 0x13,
+       GAIN_CAL        = 0x14,
+       DPK_RXAGC       = 0x15,
+       KIP_PRESET      = 0x16,
+       KIP_RESTORE     = 0x17,
+       DPK_TXAGC       = 0x19,
+       D_KIP_PRESET    = 0x28,
+       D_TXAGC         = 0x29,
+       D_RXAGC         = 0x2a,
+       D_SYNC          = 0x2b,
+       D_GAIN_LOSS     = 0x2c,
+       D_MDPK_IDL      = 0x2d,
+       D_GAIN_NORM     = 0x2f,
+       D_KIP_THERMAL   = 0x30,
+       D_KIP_RESTORE   = 0x31
+};
+
+enum dpk_agc_step {
+       DPK_AGC_STEP_SYNC_DGAIN,
+       DPK_AGC_STEP_GAIN_ADJ,
+       DPK_AGC_STEP_GAIN_LOSS_IDX,
+       DPK_AGC_STEP_GL_GT_CRITERION,
+       DPK_AGC_STEP_GL_LT_CRITERION,
+       DPK_AGC_STEP_SET_TX_GAIN,
+};
 
 enum rtw8852b_iqk_type {
        ID_TXAGC = 0x0,
        }
 }
 
+static void _rfk_rf_direct_cntrl(struct rtw89_dev *rtwdev,
+                                enum rtw89_rf_path path, bool is_bybb)
+{
+       if (is_bybb)
+               rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+       else
+               rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+}
+
+static void _rfk_drf_direct_cntrl(struct rtw89_dev *rtwdev,
+                                 enum rtw89_rf_path path, bool is_bybb)
+{
+       if (is_bybb)
+               rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
+       else
+               rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+}
+
 static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path)
 {
        bool fail = true;
        }
 }
 
+static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, const u32 reg[],
+                         u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
+{
+       u8 i;
+
+       for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
+               reg_bkup[path][i] =
+                       rtw89_phy_read32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD);
+               rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
+                           reg[i] + (path << 8), reg_bkup[path][i]);
+       }
+}
+
+static void _dpk_reload_kip(struct rtw89_dev *rtwdev, const u32 reg[],
+                           const u32 reg_bkup[][RTW8852B_DPK_KIP_REG_NUM], u8 path)
+{
+       u8 i;
+
+       for (i = 0; i < RTW8852B_DPK_KIP_REG_NUM; i++) {
+               rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8), MASKDWORD,
+                                      reg_bkup[path][i]);
+               rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
+                           reg[i] + (path << 8), reg_bkup[path][i]);
+       }
+}
+
+static u8 _dpk_order_convert(struct rtw89_dev *rtwdev)
+{
+       u8 order;
+       u8 val;
+
+       order = rtw89_phy_read32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP);
+       val = 0x3 >> order;
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] convert MDPD order to 0x%x\n", val);
+
+       return val;
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+       u8 val, kidx = dpk->cur_idx[path];
+
+       val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
+
+       rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+                              MASKBYTE3, _dpk_order_convert(rtwdev) << 1 | val);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
+                   kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
+}
+
+static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                         enum rtw89_rf_path path, enum rtw8852b_dpk_id id)
+{
+       u16 dpk_cmd;
+       u32 val;
+       int ret;
+
+       dpk_cmd = (id << 8) | (0x19 + (path << 4));
+       rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
+
+       ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+                                      1, 20000, false,
+                                      rtwdev, 0xbff8, MASKBYTE0);
+       if (ret)
+               rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
+
+       udelay(1);
+
+       rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
+
+       ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
+                                      1, 2000, false,
+                                      rtwdev, 0x80fc, MASKLWORD);
+       if (ret)
+               rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot over 20ms!!!!\n");
+
+       rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] one-shot for %s = 0x%x\n",
+                   id == 0x06 ? "LBK_RXIQK" :
+                   id == 0x10 ? "SYNC" :
+                   id == 0x11 ? "MDPK_IDL" :
+                   id == 0x12 ? "MDPK_MPA" :
+                   id == 0x13 ? "GAIN_LOSS" :
+                   id == 0x14 ? "PWR_CAL" :
+                   id == 0x15 ? "DPK_RXAGC" :
+                   id == 0x16 ? "KIP_PRESET" :
+                   id == 0x17 ? "KIP_RESOTRE" : "DPK_TXAGC",
+                   dpk_cmd);
+}
+
+static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                       enum rtw89_rf_path path)
+{
+       rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
+       _set_rx_dck(rtwdev, phy, path);
+}
+
+static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                            enum rtw89_rf_path path)
+{
+       const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+       u8 kidx = dpk->cur_idx[path];
+
+       dpk->bp[path][kidx].band = chan->band_type;
+       dpk->bp[path][kidx].ch = chan->channel;
+       dpk->bp[path][kidx].bw = chan->band_width;
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
+                   path, dpk->cur_idx[path], phy,
+                   rtwdev->is_tssi_mode[path] ? "on" : "off",
+                   rtwdev->dbcc_en ? "on" : "off",
+                   dpk->bp[path][kidx].band == 0 ? "2G" :
+                   dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
+                   dpk->bp[path][kidx].ch,
+                   dpk->bp[path][kidx].bw == 0 ? "20M" :
+                   dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
+}
+
+static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
+                               enum rtw89_phy_idx phy,
+                               enum rtw89_rf_path path, u8 kpath)
+{
+       const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+       rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_defs_tbl);
+
+       if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
+               rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x1);
+               rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, B_PATH1_BW_SEL_EX, 0x1);
+       }
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
+}
+
+static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
+                               enum rtw89_phy_idx phy,
+                               enum rtw89_rf_path path, u8 kpath)
+{
+       const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+       rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_afe_restore_defs_tbl);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
+
+       if (chan->band_width == RTW89_CHANNEL_WIDTH_80) {
+               rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_EX, 0x0);
+               rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,  B_PATH1_BW_SEL_EX, 0x0);
+       }
+}
+
+static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
+                           enum rtw89_rf_path path, bool is_pause)
+{
+       rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+                              B_P0_TSSI_TRK_EN, is_pause);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
+                   is_pause ? "pause" : "resume");
+}
+
+static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
+                            enum rtw89_rf_path path)
+{
+       rtw89_rfk_parser(rtwdev, &rtw8852b_dpk_kip_defs_tbl);
+
+       if (rtwdev->hal.cv > CHIP_CAV)
+               rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), B_DPD_COM_OF, 0x1);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
+}
+
+static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                          enum rtw89_rf_path path)
+{
+       u8 cur_rxbb;
+       u32 tmp;
+
+       cur_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
+
+       rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
+       rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
+
+       tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+       rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
+       rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
+       rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
+
+       if (cur_rxbb >= 0x11)
+               rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
+       else if (cur_rxbb <= 0xa)
+               rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
+       else
+               rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
+
+       rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
+       rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+       rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
+       udelay(70);
+
+       rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+       rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
+
+       _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
+                   rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
+
+       rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+       rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
+       rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
+       rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
+       rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
+       rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
+}
+
+static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+       rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
+       rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
+       rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
+
+       udelay(200);
+
+       dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
+                   dpk->bp[path][kidx].ther_dpk);
+}
+
+static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
+                           enum rtw89_rf_path path, u8 kidx)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+       if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
+               rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
+               rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
+               rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+               rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+       } else {
+               rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
+               rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
+               rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+               rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+               rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
+               rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
+               rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
+       }
+
+       rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
+       rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
+       rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] ARF 0x0/0x11/0x1a = 0x%x/ 0x%x/ 0x%x\n",
+                   rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
+                   rtw89_read_rf(rtwdev, path, RR_TXIG, RFREG_MASK),
+                   rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
+}
+
+static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
+                              enum rtw89_rf_path path, bool is_bypass)
+{
+       if (is_bypass) {
+               rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+                                      B_RXIQC_BYPASS2, 0x1);
+               rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+                                      B_RXIQC_BYPASS, 0x1);
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
+                           rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+                                                 MASKDWORD));
+       } else {
+               rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
+               rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
+                           rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+                                                 MASKDWORD));
+       }
+}
+
+static
+void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+       if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
+               rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
+       else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
+               rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
+       else
+               rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
+                   dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
+                   dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
+}
+
+static void _dpk_table_select(struct rtw89_dev *rtwdev,
+                             enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+       u8 val;
+
+       val = 0x80 + kidx * 0x20 + gain * 0x10;
+       rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
+                   gain, val);
+}
+
+static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+#define DPK_SYNC_TH_DC_I 200
+#define DPK_SYNC_TH_DC_Q 200
+#define DPK_SYNC_TH_CORR 170
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+       u16 dc_i, dc_q;
+       u8 corr_val, corr_idx;
+
+       rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
+
+       corr_idx = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
+       corr_val = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
+                   path, corr_idx, corr_val);
+
+       dpk->corr_idx[path][kidx] = corr_idx;
+       dpk->corr_val[path][kidx] = corr_val;
+
+       rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
+
+       dc_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+       dc_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
+
+       dc_i = abs(sign_extend32(dc_i, 11));
+       dc_q = abs(sign_extend32(dc_q, 11));
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
+                   path, dc_i, dc_q);
+
+       dpk->dc_i[path][kidx] = dc_i;
+       dpk->dc_q[path][kidx] = dc_q;
+
+       if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
+           corr_val < DPK_SYNC_TH_CORR)
+               return true;
+       else
+               return false;
+}
+
+static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                     enum rtw89_rf_path path, u8 kidx)
+{
+       _dpk_one_shot(rtwdev, phy, path, SYNC);
+
+       return _dpk_sync_check(rtwdev, path, kidx);
+}
+
+static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
+{
+       u16 dgain;
+
+       rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+
+       dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
+
+       return dgain;
+}
+
+static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
+{
+       static const u16 bnd[15] = {
+               0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
+               0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
+       };
+       s8 offset;
+
+       if (dgain >= bnd[0])
+               offset = 0x6;
+       else if (bnd[0] > dgain && dgain >= bnd[1])
+               offset = 0x6;
+       else if (bnd[1] > dgain && dgain >= bnd[2])
+               offset = 0x5;
+       else if (bnd[2] > dgain && dgain >= bnd[3])
+               offset = 0x4;
+       else if (bnd[3] > dgain && dgain >= bnd[4])
+               offset = 0x3;
+       else if (bnd[4] > dgain && dgain >= bnd[5])
+               offset = 0x2;
+       else if (bnd[5] > dgain && dgain >= bnd[6])
+               offset = 0x1;
+       else if (bnd[6] > dgain && dgain >= bnd[7])
+               offset = 0x0;
+       else if (bnd[7] > dgain && dgain >= bnd[8])
+               offset = 0xff;
+       else if (bnd[8] > dgain && dgain >= bnd[9])
+               offset = 0xfe;
+       else if (bnd[9] > dgain && dgain >= bnd[10])
+               offset = 0xfd;
+       else if (bnd[10] > dgain && dgain >= bnd[11])
+               offset = 0xfc;
+       else if (bnd[11] > dgain && dgain >= bnd[12])
+               offset = 0xfb;
+       else if (bnd[12] > dgain && dgain >= bnd[13])
+               offset = 0xfa;
+       else if (bnd[13] > dgain && dgain >= bnd[14])
+               offset = 0xf9;
+       else if (bnd[14] > dgain)
+               offset = 0xf8;
+       else
+               offset = 0x0;
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
+
+       return offset;
+}
+
+static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
+{
+       rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
+       rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
+
+       return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
+}
+
+static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                         enum rtw89_rf_path path, u8 kidx)
+{
+       _dpk_table_select(rtwdev, path, kidx, 1);
+       _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
+}
+
+static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                           enum rtw89_rf_path path, u8 kidx)
+{
+       _dpk_tpg_sel(rtwdev, path, kidx);
+       _dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
+}
+
+static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
+                               enum rtw89_rf_path path)
+{
+       rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+       rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
+       rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
+}
+
+static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                              enum rtw89_rf_path path, u8 txagc)
+{
+       rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, txagc);
+       rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+       _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
+       rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
+}
+
+static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                              enum rtw89_rf_path path)
+{
+       u32 tmp;
+
+       tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+       rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, tmp);
+       rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+       _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
+       rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+       rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL_V1, 0x8);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] set RXBB = 0x%x (RF0x0[9:5] = 0x%x)\n",
+                   rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_RXBB_V1),
+                   rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB));
+}
+
+static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                         enum rtw89_rf_path path, s8 gain_offset)
+{
+       u8 txagc;
+
+       txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
+
+       if (txagc - gain_offset < DPK_TXAGC_LOWER)
+               txagc = DPK_TXAGC_LOWER;
+       else if (txagc - gain_offset > DPK_TXAGC_UPPER)
+               txagc = DPK_TXAGC_UPPER;
+       else
+               txagc = txagc - gain_offset;
+
+       _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
+                   gain_offset, txagc);
+       return txagc;
+}
+
+static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
+{
+       u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
+       u8 i;
+
+       rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
+       rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
+       rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
+
+       if (is_check) {
+               rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
+               val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+               val1_i = abs(sign_extend32(val1_i, 11));
+               val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+               val1_q = abs(sign_extend32(val1_q, 11));
+
+               rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
+               val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+               val2_i = abs(sign_extend32(val2_i, 11));
+               val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+               val2_q = abs(sign_extend32(val2_q, 11));
+
+               rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
+                           phy_div(val1_i * val1_i + val1_q * val1_q,
+                                   val2_i * val2_i + val2_q * val2_q));
+       } else {
+               for (i = 0; i < 32; i++) {
+                       rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
+                       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                                   "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
+                                   rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
+               }
+       }
+
+       if (val1_i * val1_i + val1_q * val1_q >=
+           (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
+               return true;
+
+       return false;
+}
+
+static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                  enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
+                  bool loss_only)
+{
+       const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+       u8 step = DPK_AGC_STEP_SYNC_DGAIN;
+       u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
+       u8 goout = 0, agc_cnt = 0, limited_rxbb = 0;
+       u16 dgain = 0;
+       s8 offset;
+       int limit = 200;
+
+       tmp_txagc = init_txagc;
+
+       do {
+               switch (step) {
+               case DPK_AGC_STEP_SYNC_DGAIN:
+                       if (_dpk_sync(rtwdev, phy, path, kidx)) {
+                               tmp_txagc = 0xff;
+                               goout = 1;
+                               break;
+                       }
+
+                       dgain = _dpk_dgain_read(rtwdev);
+
+                       if (loss_only == 1 || limited_rxbb == 1)
+                               step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+                       else
+                               step = DPK_AGC_STEP_GAIN_ADJ;
+                       break;
+
+               case DPK_AGC_STEP_GAIN_ADJ:
+                       tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD,
+                                                RFREG_MASKRXBB);
+                       offset = _dpk_dgain_mapping(rtwdev, dgain);
+
+                       if (tmp_rxbb + offset > 0x1f) {
+                               tmp_rxbb = 0x1f;
+                               limited_rxbb = 1;
+                       } else if (tmp_rxbb + offset < 0) {
+                               tmp_rxbb = 0;
+                               limited_rxbb = 1;
+                       } else {
+                               tmp_rxbb = tmp_rxbb + offset;
+                       }
+
+                       rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB,
+                                      tmp_rxbb);
+                       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                                   "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
+                       if (offset || agc_cnt == 0) {
+                               if (chan->band_width < RTW89_CHANNEL_WIDTH_80)
+                                       _dpk_bypass_rxcfir(rtwdev, path, true);
+                               else
+                                       _dpk_lbk_rxiqk(rtwdev, phy, path);
+                       }
+                       if (dgain > 1922 || dgain < 342)
+                               step = DPK_AGC_STEP_SYNC_DGAIN;
+                       else
+                               step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+
+                       agc_cnt++;
+                       break;
+
+               case DPK_AGC_STEP_GAIN_LOSS_IDX:
+                       _dpk_gainloss(rtwdev, phy, path, kidx);
+                       tmp_gl_idx = _dpk_gainloss_read(rtwdev);
+
+                       if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
+                           tmp_gl_idx >= 7)
+                               step = DPK_AGC_STEP_GL_GT_CRITERION;
+                       else if (tmp_gl_idx == 0)
+                               step = DPK_AGC_STEP_GL_LT_CRITERION;
+                       else
+                               step = DPK_AGC_STEP_SET_TX_GAIN;
+                       break;
+
+               case DPK_AGC_STEP_GL_GT_CRITERION:
+                       if (tmp_txagc == 0x2e) {
+                               goout = 1;
+                               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                                           "[DPK] Txagc@lower bound!!\n");
+                       } else {
+                               tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0x3);
+                       }
+                       step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+                       agc_cnt++;
+                       break;
+
+               case DPK_AGC_STEP_GL_LT_CRITERION:
+                       if (tmp_txagc == 0x3f) {
+                               goout = 1;
+                               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                                           "[DPK] Txagc@upper bound!!\n");
+                       } else {
+                               tmp_txagc = _dpk_set_offset(rtwdev, phy, path, 0xfe);
+                       }
+                       step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+                       agc_cnt++;
+                       break;
+               case DPK_AGC_STEP_SET_TX_GAIN:
+                       tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_gl_idx);
+                       goout = 1;
+                       agc_cnt++;
+                       break;
+
+               default:
+                       goout = 1;
+                       break;
+               }
+       } while (!goout && agc_cnt < 6 && limit-- > 0);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
+                   tmp_rxbb);
+
+       return tmp_txagc;
+}
+
+static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
+{
+       switch (order) {
+       case 0:
+               rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+               rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
+               rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
+               break;
+       case 1:
+               rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+               rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
+               rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
+               break;
+       case 2:
+               rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+               rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
+               rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
+               break;
+       default:
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[DPK] Wrong MDPD order!!(0x%x)\n", order);
+               break;
+       }
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] Set MDPD order to 0x%x for IDL\n", order);
+}
+
+static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                        enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+       if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
+           dpk->bp[path][kidx].band == RTW89_BAND_5G)
+               _dpk_set_mdpd_para(rtwdev, 0x2);
+       else
+               _dpk_set_mdpd_para(rtwdev, 0x0);
+
+       _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
+}
+
+static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                            enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+       const u16 pwsf = 0x78;
+       u8 gs = dpk->dpk_gs[phy];
+
+       rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+                              B_COEF_SEL_MDPD, kidx);
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
+                   pwsf, gs);
+
+       dpk->bp[path][kidx].txagc_dpk = txagc;
+       rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
+                              0x3F << ((gain << 3) + (kidx << 4)), txagc);
+
+       dpk->bp[path][kidx].pwsf = pwsf;
+       rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
+                              0x1FF << (gain << 4), pwsf);
+
+       rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
+       rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
+
+       dpk->bp[path][kidx].gs = gs;
+       if (dpk->dpk_gs[phy] == 0x7f)
+               rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+                                      MASKDWORD, 0x007f7f7f);
+       else
+               rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+                                      MASKDWORD, 0x005b5b5b);
+
+       rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+                              B_DPD_ORDER_V1, _dpk_order_convert(rtwdev));
+       rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
+       rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
+}
+
+static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                             enum rtw89_rf_path path)
+{
+       const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+       bool is_reload = false;
+       u8 idx, cur_band, cur_ch;
+
+       cur_band = chan->band_type;
+       cur_ch = chan->channel;
+
+       for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
+               if (cur_band != dpk->bp[path][idx].band ||
+                   cur_ch != dpk->bp[path][idx].ch)
+                       continue;
+
+               rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+                                      B_COEF_SEL_MDPD, idx);
+               dpk->cur_idx[path] = idx;
+               is_reload = true;
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[DPK] reload S%d[%d] success\n", path, idx);
+       }
+
+       return is_reload;
+}
+
+static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+                     enum rtw89_rf_path path, u8 gain)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+       u8 txagc = 0x38, kidx = dpk->cur_idx[path];
+       bool is_fail = false;
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
+
+       _rfk_rf_direct_cntrl(rtwdev, path, false);
+       _rfk_drf_direct_cntrl(rtwdev, path, false);
+
+       _dpk_kip_pwr_clk_on(rtwdev, path);
+       _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
+       _dpk_rf_setting(rtwdev, gain, path, kidx);
+       _dpk_rx_dck(rtwdev, phy, path);
+
+       _dpk_kip_preset(rtwdev, phy, path, kidx);
+       _dpk_kip_set_rxagc(rtwdev, phy, path);
+       _dpk_table_select(rtwdev, path, kidx, gain);
+
+       txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Adjust txagc = 0x%x\n", txagc);
+
+       if (txagc == 0xff) {
+               is_fail = true;
+       } else {
+               _dpk_get_thermal(rtwdev, kidx, path);
+
+               _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
+
+               rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+
+               _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
+       }
+
+       if (!is_fail)
+               dpk->bp[path][kidx].path_ok = true;
+       else
+               dpk->bp[path][kidx].path_ok = false;
+
+       rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
+                   is_fail ? "Check" : "Success");
+
+       return is_fail;
+}
+
+static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
+                           enum rtw89_phy_idx phy, u8 kpath)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+       static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120};
+       u32 kip_bkup[RTW8852B_DPK_RF_PATH][RTW8852B_DPK_KIP_REG_NUM] = {};
+       u32 backup_rf_val[RTW8852B_DPK_RF_PATH][BACKUP_RF_REGS_NR];
+       u32 backup_bb_val[BACKUP_BB_REGS_NR];
+       bool is_fail = true, reloaded[RTW8852B_DPK_RF_PATH] = {};
+       u8 path;
+
+       if (dpk->is_dpk_reload_en) {
+               for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+                       reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+                       if (!reloaded[path] && dpk->bp[path][0].ch)
+                               dpk->cur_idx[path] = !dpk->cur_idx[path];
+                       else
+                               _dpk_onoff(rtwdev, path, false);
+               }
+       } else {
+               for (path = 0; path < RTW8852B_DPK_RF_PATH; path++)
+                       dpk->cur_idx[path] = 0;
+       }
+
+       _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
+
+       for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+               _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
+               _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+               _dpk_information(rtwdev, phy, path);
+               if (rtwdev->is_tssi_mode[path])
+                       _dpk_tssi_pause(rtwdev, path, true);
+       }
+
+       _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
+
+       for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+               is_fail = _dpk_main(rtwdev, phy, path, 1);
+               _dpk_onoff(rtwdev, path, is_fail);
+       }
+
+       _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
+       _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
+
+       for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+               _dpk_kip_restore(rtwdev, path);
+               _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
+               _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+               if (rtwdev->is_tssi_mode[path])
+                       _dpk_tssi_pause(rtwdev, path, false);
+       }
+}
+
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+       const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+       struct rtw89_fem_info *fem = &rtwdev->fem;
+
+       if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
+               return true;
+       } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
+               return true;
+       } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
+               rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                           "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
+               return true;
+       }
+
+       return false;
+}
+
+static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+       u8 path, kpath;
+
+       kpath = _kpath(rtwdev, phy);
+
+       for (path = 0; path < RTW8852B_DPK_RF_PATH; path++) {
+               if (kpath & BIT(path))
+                       _dpk_onoff(rtwdev, path, true);
+       }
+}
+
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+{
+       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                   "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
+                   RTW8852B_DPK_VER, rtwdev->hal.cv,
+                   RTW8852B_RF_REL_VERSION);
+
+       if (_dpk_bypass_check(rtwdev, phy))
+               _dpk_force_bypass(rtwdev, phy);
+       else
+               _dpk_cal_select(rtwdev, force, phy, RF_AB);
+}
+
+static void _dpk_track(struct rtw89_dev *rtwdev)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+       s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
+       s8 delta_ther[2] = {};
+       u8 trk_idx, txagc_rf;
+       u8 path, kidx;
+       u16 pwsf[2];
+       u8 cur_ther;
+       u32 tmp;
+
+       for (path = 0; path < RF_PATH_NUM_8852B; path++) {
+               kidx = dpk->cur_idx[path];
+
+               rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+                           "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
+                           path, kidx, dpk->bp[path][kidx].ch);
+
+               cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+
+               rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+                           "[DPK_TRK] thermal now = %d\n", cur_ther);
+
+               if (dpk->bp[path][kidx].ch && cur_ther)
+                       delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
+
+               if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
+                       delta_ther[path] = delta_ther[path] * 3 / 2;
+               else
+                       delta_ther[path] = delta_ther[path] * 5 / 2;
+
+               txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+                                                0x0000003f);
+
+               if (rtwdev->is_tssi_mode[path]) {
+                       trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
+
+                       rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+                                   "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
+                                   txagc_rf, trk_idx);
+
+                       txagc_bb =
+                               rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+                                                     MASKBYTE2);
+                       txagc_bb_tp =
+                               rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
+                                                     B_TXAGC_TP);
+
+                       rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+                                   "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
+                                   txagc_bb_tp, txagc_bb);
+
+                       txagc_ofst =
+                               rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+                                                     MASKBYTE3);
+
+                       rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+                                   "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
+                                   txagc_ofst, delta_ther[path]);
+                       tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
+                                                   B_DPD_COM_OF);
+                       if (tmp == 0x1) {
+                               txagc_ofst = 0;
+                               rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+                                           "[DPK_TRK] HW txagc offset mode\n");
+                       }
+
+                       if (txagc_rf && cur_ther)
+                               ini_diff = txagc_ofst + (delta_ther[path]);
+
+                       tmp = rtw89_phy_read32_mask(rtwdev,
+                                                   R_P0_TXDPD + (path << 13),
+                                                   B_P0_TXDPD);
+                       if (tmp == 0x0) {
+                               pwsf[0] = dpk->bp[path][kidx].pwsf +
+                                         txagc_bb_tp - txagc_bb + ini_diff;
+                               pwsf[1] = dpk->bp[path][kidx].pwsf +
+                                         txagc_bb_tp - txagc_bb + ini_diff;
+                       } else {
+                               pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
+                               pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
+                       }
+
+               } else {
+                       pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+                       pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+               }
+
+               tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
+               if (!tmp && txagc_rf) {
+                       rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+                                   "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
+                                   pwsf[0], pwsf[1]);
+
+                       rtw89_phy_write32_mask(rtwdev,
+                                              R_DPD_BND + (path << 8) + (kidx << 2),
+                                              B_DPD_BND_0, pwsf[0]);
+                       rtw89_phy_write32_mask(rtwdev,
+                                              R_DPD_BND + (path << 8) + (kidx << 2),
+                                              B_DPD_BND_1, pwsf[1]);
+               }
+       }
+}
+
+static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+       struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+       u8 tx_scale, ofdm_bkof, path, kpath;
+
+       kpath = _kpath(rtwdev, phy);
+
+       ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
+       tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
+
+       if (ofdm_bkof + tx_scale >= 44) {
+               /* move dpd backoff to bb, and set dpd backoff to 0 */
+               dpk->dpk_gs[phy] = 0x7f;
+               for (path = 0; path < RF_PATH_NUM_8852B; path++) {
+                       if (!(kpath & BIT(path)))
+                               continue;
+
+                       rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
+                                              B_DPD_CFG, 0x7f7f7f);
+                       rtw89_debug(rtwdev, RTW89_DBG_RFK,
+                                   "[RFK] Set S%d DPD backoff to 0dB\n", path);
+               }
+       } else {
+               dpk->dpk_gs[phy] = 0x5b;
+       }
+}
+
 static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
                             enum rtw89_rf_path path)
 {
                    tssi_info->tssi_alimk_time);
 }
 
+void rtw8852b_dpk_init(struct rtw89_dev *rtwdev)
+{
+       _set_dpd_backoff(rtwdev, RTW89_PHY_0);
+}
+
 void rtw8852b_rck(struct rtw89_dev *rtwdev)
 {
        u8 path;
        rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
 }
 
+void rtw8852b_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+       u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+       u32 tx_en;
+
+       rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
+       rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+       _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+       rtwdev->dpk.is_dpk_enable = true;
+       rtwdev->dpk.is_dpk_reload_en = false;
+       _dpk(rtwdev, phy_idx, false);
+
+       rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+       rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
+}
+
+void rtw8852b_dpk_track(struct rtw89_dev *rtwdev)
+{
+       _dpk_track(rtwdev);
+}
+
 void rtw8852b_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
 {
        u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);