int kvm_host_prepare_stage2(void *pgt_pool_base);
 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
 
+int hyp_pin_shared_mem(void *from, void *to);
+void hyp_unpin_shared_mem(void *from, void *to);
+
 static __always_inline void __load_host_stage2(void)
 {
        if (static_branch_likely(&kvm_protected_mode_initialized))
 
 {
        u64 size = tx->nr_pages * PAGE_SIZE;
 
+       if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
+               return -EBUSY;
+
        if (__hyp_ack_skip_pgtable_check(tx))
                return 0;
 
 
        return ret;
 }
+
+int hyp_pin_shared_mem(void *from, void *to)
+{
+       u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+       u64 end = PAGE_ALIGN((u64)to);
+       u64 size = end - start;
+       int ret;
+
+       host_lock_component();
+       hyp_lock_component();
+
+       ret = __host_check_page_state_range(__hyp_pa(start), size,
+                                           PKVM_PAGE_SHARED_OWNED);
+       if (ret)
+               goto unlock;
+
+       ret = __hyp_check_page_state_range(start, size,
+                                          PKVM_PAGE_SHARED_BORROWED);
+       if (ret)
+               goto unlock;
+
+       for (cur = start; cur < end; cur += PAGE_SIZE)
+               hyp_page_ref_inc(hyp_virt_to_page(cur));
+
+unlock:
+       hyp_unlock_component();
+       host_unlock_component();
+
+       return ret;
+}
+
+void hyp_unpin_shared_mem(void *from, void *to)
+{
+       u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+       u64 end = PAGE_ALIGN((u64)to);
+
+       host_lock_component();
+       hyp_lock_component();
+
+       for (cur = start; cur < end; cur += PAGE_SIZE)
+               hyp_page_ref_dec(hyp_virt_to_page(cur));
+
+       hyp_unlock_component();
+       host_unlock_component();
+}