const u8 pkvm_hyp_id = 1;
 
+static void host_lock_component(void)
+{
+       hyp_spin_lock(&host_kvm.lock);
+}
+
+static void host_unlock_component(void)
+{
+       hyp_spin_unlock(&host_kvm.lock);
+}
+
+static void hyp_lock_component(void)
+{
+       hyp_spin_lock(&pkvm_pgd_lock);
+}
+
+static void hyp_unlock_component(void)
+{
+       hyp_spin_unlock(&pkvm_pgd_lock);
+}
+
 static void *host_s2_zalloc_pages_exact(size_t size)
 {
        void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
 
        prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
 
-       hyp_spin_lock(&host_kvm.lock);
+       host_lock_component();
        ret = host_stage2_adjust_range(addr, &range);
        if (ret)
                goto unlock;
 
        ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
 unlock:
-       hyp_spin_unlock(&host_kvm.lock);
+       host_unlock_component();
 
        return ret;
 }
        if (!addr_is_memory(addr))
                return -EINVAL;
 
-       hyp_spin_lock(&host_kvm.lock);
-       hyp_spin_lock(&pkvm_pgd_lock);
+       host_lock_component();
+       hyp_lock_component();
 
        ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL);
        if (ret)
        BUG_ON(ret);
 
 unlock:
-       hyp_spin_unlock(&pkvm_pgd_lock);
-       hyp_spin_unlock(&host_kvm.lock);
+       hyp_unlock_component();
+       host_unlock_component();
 
        return ret;
 }