return 0;
 }
 
+/**
+ * vgic_v3_rdist_overlap - check if a region overlaps with any
+ * existing redistributor region
+ *
+ * @kvm: kvm handle
+ * @base: base of the region
+ * @size: size of region
+ *
+ * Return: true if there is an overlap
+ */
+bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
+{
+       struct vgic_dist *d = &kvm->arch.vgic;
+       struct vgic_redist_region *rdreg;
+
+       list_for_each_entry(rdreg, &d->rd_regions, list) {
+               if ((base + size > rdreg->base) &&
+                       (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
+                       return true;
+       }
+       return false;
+}
+
 /*
  * Check for overlapping regions and for regions crossing the end of memory
  * for base addresses which have already been set.
 bool vgic_v3_check_base(struct kvm *kvm)
 {
        struct vgic_dist *d = &kvm->arch.vgic;
-       gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE;
-       struct vgic_redist_region *rdreg =
-               list_first_entry(&d->rd_regions,
-                                struct vgic_redist_region, list);
-
-       redist_size *= atomic_read(&kvm->online_vcpus);
+       struct vgic_redist_region *rdreg;
 
        if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
            d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
                return false;
 
-       if (rdreg && (rdreg->base + redist_size < rdreg->base))
-               return false;
-
-       /* Both base addresses must be set to check if they overlap */
-       if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) || !rdreg)
-               return true;
-
-       if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= rdreg->base)
-               return true;
+       list_for_each_entry(rdreg, &d->rd_regions, list) {
+               if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
+                       rdreg->base)
+                       return false;
+       }
 
-       if (rdreg->base + redist_size <= d->vgic_dist_base)
+       if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
                return true;
 
-       return false;
+       return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
+                                     KVM_VGIC_V3_DIST_SIZE);
 }
 
 /**
 
 
 struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rdregs);
 
+static inline size_t
+vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
+{
+       if (!rdreg->count)
+               return atomic_read(&kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE;
+       else
+               return rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
+}
+bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
+
 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
                         u32 devid, u32 eventid, struct vgic_irq **irq);
 struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);