*/
 static void virtio_mem_refresh_config(struct virtio_mem *vm)
 {
-       const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+       const struct range pluggable_range = mhp_get_pluggable_range(true);
        uint64_t new_plugged_size, usable_region_size, end_addr;
 
        /* the plugged_size is just a reflection of what _we_ did previously */
        /* calculate the last usable memory block id */
        virtio_cread_le(vm->vdev, struct virtio_mem_config,
                        usable_region_size, &usable_region_size);
-       end_addr = vm->addr + usable_region_size;
-       end_addr = min(end_addr, phys_limit);
+       end_addr = min(vm->addr + usable_region_size - 1,
+                      pluggable_range.end);
 
-       if (vm->in_sbm)
-               vm->sbm.last_usable_mb_id =
-                                        virtio_mem_phys_to_mb_id(end_addr) - 1;
-       else
-               vm->bbm.last_usable_bb_id =
-                                    virtio_mem_phys_to_bb_id(vm, end_addr) - 1;
+       if (vm->in_sbm) {
+               vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr);
+               if (!IS_ALIGNED(end_addr + 1, memory_block_size_bytes()))
+                       vm->sbm.last_usable_mb_id--;
+       } else {
+               vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm,
+                                                                    end_addr);
+               if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size))
+                       vm->bbm.last_usable_bb_id--;
+       }
+       /*
+        * If we cannot plug any of our device memory (e.g., nothing in the
+        * usable region is addressable), the last usable memory block id will
+        * be smaller than the first usable memory block id. We'll stop
+        * attempting to add memory with -ENOSPC from our main loop.
+        */
 
        /* see if there is a request to change the size */
        virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
 
 static int virtio_mem_init(struct virtio_mem *vm)
 {
-       const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+       const struct range pluggable_range = mhp_get_pluggable_range(true);
        uint64_t sb_size, addr;
        uint16_t node_id;
 
        if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
                dev_warn(&vm->vdev->dev,
                         "The alignment of the physical end address can make some memory unusable.\n");
-       if (vm->addr + vm->region_size > phys_limit)
+       if (vm->addr < pluggable_range.start ||
+           vm->addr + vm->region_size - 1 > pluggable_range.end)
                dev_warn(&vm->vdev->dev,
-                        "Some memory is not addressable. This can make some memory unusable.\n");
+                        "Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
 
        /*
         * We want subblocks to span at least MAX_ORDER_NR_PAGES and
                                     vm->sbm.sb_size;
 
                /* Round up to the next full memory block */
-               addr = vm->addr + memory_block_size_bytes() - 1;
+               addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
+                      memory_block_size_bytes() - 1;
                vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
                vm->sbm.next_mb_id = vm->sbm.first_mb_id;
        } else {
                }
 
                /* Round up to the next aligned big block */
-               addr = vm->addr + vm->bbm.bb_size - 1;
+               addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
+                      vm->bbm.bb_size - 1;
                vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
                vm->bbm.next_bb_id = vm->bbm.first_bb_id;
        }