}
 }
 
-static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
+struct hyp_shared_pfn {
+       u64 pfn;
+       int count;
+       struct rb_node node;
+};
+
+static DEFINE_MUTEX(hyp_shared_pfns_lock);
+static struct rb_root hyp_shared_pfns = RB_ROOT;
+
+static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
+                                             struct rb_node **parent)
 {
-       phys_addr_t addr;
-       int ret;
+       struct hyp_shared_pfn *this;
+
+       *node = &hyp_shared_pfns.rb_node;
+       *parent = NULL;
+       while (**node) {
+               this = container_of(**node, struct hyp_shared_pfn, node);
+               *parent = **node;
+               if (this->pfn < pfn)
+                       *node = &((**node)->rb_left);
+               else if (this->pfn > pfn)
+                       *node = &((**node)->rb_right);
+               else
+                       return this;
+       }
 
-       for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
-               ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
-                                       __phys_to_pfn(addr));
-               if (ret)
-                       return ret;
+       return NULL;
+}
+
+static int share_pfn_hyp(u64 pfn)
+{
+       struct rb_node **node, *parent;
+       struct hyp_shared_pfn *this;
+       int ret = 0;
+
+       mutex_lock(&hyp_shared_pfns_lock);
+       this = find_shared_pfn(pfn, &node, &parent);
+       if (this) {
+               this->count++;
+               goto unlock;
        }
 
-       return 0;
+       this = kzalloc(sizeof(*this), GFP_KERNEL);
+       if (!this) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       this->pfn = pfn;
+       this->count = 1;
+       rb_link_node(&this->node, parent, node);
+       rb_insert_color(&this->node, &hyp_shared_pfns);
+       ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
+unlock:
+       mutex_unlock(&hyp_shared_pfns_lock);
+
+       return ret;
 }
 
 int kvm_share_hyp(void *from, void *to)
 {
+       phys_addr_t start, end, cur;
+       u64 pfn;
+       int ret;
+
        if (is_kernel_in_hyp_mode())
                return 0;
 
        if (kvm_host_owns_hyp_mappings())
                return create_hyp_mappings(from, to, PAGE_HYP);
 
-       return pkvm_share_hyp(__pa(from), __pa(to));
+       start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
+       end = PAGE_ALIGN(__pa(to));
+       for (cur = start; cur < end; cur += PAGE_SIZE) {
+               pfn = __phys_to_pfn(cur);
+               ret = share_pfn_hyp(pfn);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
 
 /**