return err;
 }
 
+static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
+{
+       struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
+       int ret;
+
+       pwr_info.lane_rx = lanes;
+       pwr_info.lane_tx = lanes;
+       ret = ufshcd_config_pwr_mode(hba, &pwr_info);
+       if (ret)
+               dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
+                       __func__, lanes, ret);
+       return ret;
+}
+
+static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
+                               enum ufs_notify_change_status status,
+                               struct ufs_pa_layer_attr *dev_max_params,
+                               struct ufs_pa_layer_attr *dev_req_params)
+{
+       int err = 0;
+
+       switch (status) {
+       case PRE_CHANGE:
+               if (ufshcd_is_hs_mode(dev_max_params) &&
+                   (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
+                       ufs_intel_set_lanes(hba, 2);
+               memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
+               break;
+       case POST_CHANGE:
+               if (ufshcd_is_hs_mode(dev_req_params)) {
+                       u32 peer_granularity;
+
+                       usleep_range(1000, 1250);
+                       err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+                                                 &peer_granularity);
+               }
+               break;
+       default:
+               break;
+       }
+
+       return err;
+}
+
+static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
+{
+       u32 granularity, peer_granularity;
+       u32 pa_tactivate, peer_pa_tactivate;
+       int ret;
+
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+       if (ret)
+               goto out;
+
+       ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
+       if (ret)
+               goto out;
+
+       if (granularity == peer_granularity) {
+               u32 new_peer_pa_tactivate = pa_tactivate + 2;
+
+               ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
+       }
+out:
+       return ret;
+}
+
 #define INTEL_ACTIVELTR                0x804
 #define INTEL_IDLELTR          0x808
 
        struct ufs_host *ufs_host;
        int err;
 
+       hba->nop_out_timeout = 200;
        hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
        hba->caps |= UFSHCD_CAP_CRYPTO;
        err = ufs_intel_common_init(hba);
        .exit                   = ufs_intel_common_exit,
        .hce_enable_notify      = ufs_intel_hce_enable_notify,
        .link_startup_notify    = ufs_intel_link_startup_notify,
+       .pwr_change_notify      = ufs_intel_lkf_pwr_change_notify,
+       .apply_dev_quirks       = ufs_intel_lkf_apply_dev_quirks,
        .resume                 = ufs_intel_resume,
        .device_reset           = ufs_intel_device_reset,
 };