{
        struct ufs_hba *hba = container_of(work, struct ufs_hba,
                                           clk_scaling.suspend_work);
-       unsigned long irq_flags;
 
-       spin_lock_irqsave(hba->host->host_lock, irq_flags);
-       if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               return;
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (hba->clk_scaling.active_reqs ||
+                   hba->clk_scaling.is_suspended)
+                       return;
+
+               hba->clk_scaling.is_suspended = true;
+               hba->clk_scaling.window_start_t = 0;
        }
-       hba->clk_scaling.is_suspended = true;
-       hba->clk_scaling.window_start_t = 0;
-       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
        devfreq_suspend_device(hba->devfreq);
 }
 {
        struct ufs_hba *hba = container_of(work, struct ufs_hba,
                                           clk_scaling.resume_work);
-       unsigned long irq_flags;
 
-       spin_lock_irqsave(hba->host->host_lock, irq_flags);
-       if (!hba->clk_scaling.is_suspended) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               return;
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (!hba->clk_scaling.is_suspended)
+                       return;
+               hba->clk_scaling.is_suspended = false;
        }
-       hba->clk_scaling.is_suspended = false;
-       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
        devfreq_resume_device(hba->devfreq);
 }
        bool scale_up = false, sched_clk_scaling_suspend_work = false;
        struct list_head *clk_list = &hba->clk_list_head;
        struct ufs_clk_info *clki;
-       unsigned long irq_flags;
 
        if (!ufshcd_is_clkscaling_supported(hba))
                return -EINVAL;
                *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
        }
 
-       spin_lock_irqsave(hba->host->host_lock, irq_flags);
-       if (ufshcd_eh_in_progress(hba)) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               return 0;
-       }
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (ufshcd_eh_in_progress(hba))
+                       return 0;
 
-       /* Skip scaling clock when clock scaling is suspended */
-       if (hba->clk_scaling.is_suspended) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               dev_warn(hba->dev, "clock scaling is suspended, skip");
-               return 0;
-       }
+               /* Skip scaling clock when clock scaling is suspended */
+               if (hba->clk_scaling.is_suspended) {
+                       dev_warn(hba->dev, "clock scaling is suspended, skip");
+                       return 0;
+               }
 
-       if (!hba->clk_scaling.active_reqs)
-               sched_clk_scaling_suspend_work = true;
+               if (!hba->clk_scaling.active_reqs)
+                       sched_clk_scaling_suspend_work = true;
 
-       if (list_empty(clk_list)) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               goto out;
-       }
+               if (list_empty(clk_list))
+                       goto out;
 
-       /* Decide based on the target or rounded-off frequency and update */
-       if (hba->use_pm_opp)
-               scale_up = *freq > hba->clk_scaling.target_freq;
-       else
-               scale_up = *freq == clki->max_freq;
+               /* Decide based on the target or rounded-off frequency and update */
+               if (hba->use_pm_opp)
+                       scale_up = *freq > hba->clk_scaling.target_freq;
+               else
+                       scale_up = *freq == clki->max_freq;
 
-       if (!hba->use_pm_opp && !scale_up)
-               *freq = clki->min_freq;
+               if (!hba->use_pm_opp && !scale_up)
+                       *freq = clki->min_freq;
 
-       /* Update the frequency */
-       if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               ret = 0;
-               goto out; /* no state change required */
+               /* Update the frequency */
+               if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
+                       ret = 0;
+                       goto out; /* no state change required */
+               }
        }
-       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
 
        start = ktime_get();
        ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
 {
        struct ufs_hba *hba = dev_get_drvdata(dev);
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
-       unsigned long flags;
        ktime_t curr_t;
 
        if (!ufshcd_is_clkscaling_supported(hba))
 
        memset(stat, 0, sizeof(*stat));
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
+       guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
        curr_t = ktime_get();
        if (!scaling->window_start_t)
                goto start_window;
                scaling->busy_start_t = 0;
                scaling->is_busy_started = false;
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
        return 0;
 }
 
 
 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 {
-       unsigned long flags;
        bool suspend = false;
 
        cancel_work_sync(&hba->clk_scaling.suspend_work);
        cancel_work_sync(&hba->clk_scaling.resume_work);
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       if (!hba->clk_scaling.is_suspended) {
-               suspend = true;
-               hba->clk_scaling.is_suspended = true;
-               hba->clk_scaling.window_start_t = 0;
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (!hba->clk_scaling.is_suspended) {
+                       suspend = true;
+                       hba->clk_scaling.is_suspended = true;
+                       hba->clk_scaling.window_start_t = 0;
+               }
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        if (suspend)
                devfreq_suspend_device(hba->devfreq);
 
 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
 {
-       unsigned long flags;
        bool resume = false;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->clk_scaling.is_suspended) {
-               resume = true;
-               hba->clk_scaling.is_suspended = false;
+       scoped_guard(spinlock_irqsave, &hba->clk_scaling.lock)
+       {
+               if (hba->clk_scaling.is_suspended) {
+                       resume = true;
+                       hba->clk_scaling.is_suspended = false;
+               }
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        if (resume)
                devfreq_resume_device(hba->devfreq);
        INIT_WORK(&hba->clk_scaling.resume_work,
                  ufshcd_clk_scaling_resume_work);
 
+       spin_lock_init(&hba->clk_scaling.lock);
+
        hba->clk_scaling.workq = alloc_ordered_workqueue(
                "ufs_clkscaling_%d", WQ_MEM_RECLAIM, hba->host->host_no);
 
 {
        bool queue_resume_work = false;
        ktime_t curr_t = ktime_get();
-       unsigned long flags;
 
        if (!ufshcd_is_clkscaling_supported(hba))
                return;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
+       guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
        if (!hba->clk_scaling.active_reqs++)
                queue_resume_work = true;
 
-       if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
-               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
                return;
-       }
 
        if (queue_resume_work)
                queue_work(hba->clk_scaling.workq,
                hba->clk_scaling.busy_start_t = curr_t;
                hba->clk_scaling.is_busy_started = true;
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 
 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
 {
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
-       unsigned long flags;
 
        if (!ufshcd_is_clkscaling_supported(hba))
                return;
 
-       spin_lock_irqsave(hba->host->host_lock, flags);
+       guard(spinlock_irqsave)(&hba->clk_scaling.lock);
+
        hba->clk_scaling.active_reqs--;
        if (!scaling->active_reqs && scaling->is_busy_started) {
                scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
                scaling->busy_start_t = 0;
                scaling->is_busy_started = false;
        }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 
 static inline int ufshcd_monitor_opcode2dir(u8 opcode)