static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
 {
-       unsigned long flags;
        u32 ah_ms;
 
        if (ufshcd_is_clkgating_allowed(hba)) {
                                          hba->ahit);
                else
                        ah_ms = 10;
-               spin_lock_irqsave(hba->host->host_lock, flags);
-               hba->clk_gating.delay_ms = ah_ms + 5;
-               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
        }
 }
 
 
        return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
 }
 
+void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->clk_gating.delay_ms = value;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
+
 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
-       struct ufs_hba *hba = dev_get_drvdata(dev);
-       unsigned long flags, value;
+       unsigned long value;
 
        if (kstrtoul(buf, 0, &value))
                return -EINVAL;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->clk_gating.delay_ms = value;
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ufshcd_clkgate_delay_set(dev, value);
        return count;
 }
 
 
 int ufshcd_hold(struct ufs_hba *hba, bool async);
 void ufshcd_release(struct ufs_hba *hba);
 
+void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value);
+
 void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
                                  int *desc_length);