struct HostMemoryBackendFile {
HostMemoryBackend parent_obj;
- bool share;
bool discard_data;
char *mem_path;
uint64_t align;
path = object_get_canonical_path(OBJECT(backend));
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend),
path,
- backend->size, fb->align, fb->share,
+ backend->size, fb->align, backend->share,
fb->mem_path, errp);
g_free(path);
}
fb->mem_path = g_strdup(str);
}
-static bool file_memory_backend_get_share(Object *o, Error **errp)
-{
- HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
-
- return fb->share;
-}
-
-static void file_memory_backend_set_share(Object *o, bool value, Error **errp)
-{
- HostMemoryBackend *backend = MEMORY_BACKEND(o);
- HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o);
-
- if (host_memory_backend_mr_inited(backend)) {
- error_setg(errp, "cannot change property value");
- return;
- }
- fb->share = value;
-}
-
static bool file_memory_backend_get_discard_data(Object *o, Error **errp)
{
return MEMORY_BACKEND_FILE(o)->discard_data;
bc->alloc = file_backend_memory_alloc;
oc->unparent = file_backend_unparent;
- object_class_property_add_bool(oc, "share",
- file_memory_backend_get_share, file_memory_backend_set_share,
- &error_abort);
object_class_property_add_bool(oc, "discard-data",
file_memory_backend_get_discard_data, file_memory_backend_set_discard_data,
&error_abort);
}
path = object_get_canonical_path_component(OBJECT(backend));
- memory_region_init_ram_nomigrate(&backend->mr, OBJECT(backend), path,
- backend->size, errp);
+ memory_region_init_ram_shared_nomigrate(&backend->mr, OBJECT(backend), path,
+ backend->size, backend->share, errp);
g_free(path);
}
backend->id = g_strdup(str);
}
+static bool host_memory_backend_get_share(Object *o, Error **errp)
+{
+ HostMemoryBackend *backend = MEMORY_BACKEND(o);
+
+ return backend->share;
+}
+
+static void host_memory_backend_set_share(Object *o, bool value, Error **errp)
+{
+ HostMemoryBackend *backend = MEMORY_BACKEND(o);
+
+ if (host_memory_backend_mr_inited(backend)) {
+ error_setg(errp, "cannot change property value");
+ return;
+ }
+ backend->share = value;
+}
+
static void
host_memory_backend_class_init(ObjectClass *oc, void *data)
{
host_memory_backend_get_policy,
host_memory_backend_set_policy, &error_abort);
object_class_property_add_str(oc, "id", get_id, set_id, &error_abort);
+ object_class_property_add_bool(oc, "share",
+ host_memory_backend_get_share, host_memory_backend_set_share,
+ &error_abort);
}
static void host_memory_backend_finalize(Object *o)
uint16_t section);
static subpage_t *subpage_init(FlatView *fv, hwaddr base);
-static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
+static void *(*phys_mem_alloc)(size_t size, uint64_t *align, bool shared) =
qemu_anon_ram_alloc;
/*
* Accelerators with unusual needs may need this. Hopefully, we can
* get rid of it eventually.
*/
-void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
+void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared))
{
phys_mem_alloc = alloc;
}
}
}
-static void ram_block_add(RAMBlock *new_block, Error **errp)
+static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared)
{
RAMBlock *block;
RAMBlock *last_block = NULL;
}
} else {
new_block->host = phys_mem_alloc(new_block->max_length,
- &new_block->mr->align);
+ &new_block->mr->align, shared);
if (!new_block->host) {
error_setg_errno(errp, errno,
"cannot set up guest memory '%s'",
return NULL;
}
- ram_block_add(new_block, &local_err);
+ ram_block_add(new_block, &local_err, share);
if (local_err) {
g_free(new_block);
error_propagate(errp, local_err);
void (*resized)(const char*,
uint64_t length,
void *host),
- void *host, bool resizeable,
+ void *host, bool resizeable, bool share,
MemoryRegion *mr, Error **errp)
{
RAMBlock *new_block;
if (resizeable) {
new_block->flags |= RAM_RESIZEABLE;
}
- ram_block_add(new_block, &local_err);
+ ram_block_add(new_block, &local_err, share);
if (local_err) {
g_free(new_block);
error_propagate(errp, local_err);
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp)
{
- return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
+ return qemu_ram_alloc_internal(size, size, NULL, host, false,
+ false, mr, errp);
}
-RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
+RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share,
+ MemoryRegion *mr, Error **errp)
{
- return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
+ return qemu_ram_alloc_internal(size, size, NULL, NULL, false,
+ share, mr, errp);
}
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
void *host),
MemoryRegion *mr, Error **errp)
{
- return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
+ return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true,
+ false, mr, errp);
}
static void reclaim_ramblock(RAMBlock *block)
uint64_t size,
Error **errp);
+/**
+ * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region.
+ * Accesses into the region will
+ * modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: Region name, becomes part of RAMBlock name used in migration stream
+ * must be unique within any device
+ * @size: size of the region.
+ * @share: allow remapping RAM to different addresses
+ * @errp: pointer to Error*, to store an error if it happens.
+ *
+ * Note that this function is similar to memory_region_init_ram_nomigrate.
+ * The only difference is part of the RAM region can be remapped.
+ */
+void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
+ struct Object *owner,
+ const char *name,
+ uint64_t size,
+ bool share,
+ Error **errp);
+
/**
* memory_region_init_resizeable_ram: Initialize memory region with resizeable
* RAM. Accesses into the region will
Error **errp);
RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr, Error **errp);
-RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
+RAMBlock *qemu_ram_alloc(ram_addr_t size, bool share, MemoryRegion *mr,
+ Error **errp);
RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
void (*resized)(const char*,
uint64_t length,
int qemu_daemon(int nochdir, int noclose);
void *qemu_try_memalign(size_t alignment, size_t size);
void *qemu_memalign(size_t alignment, size_t size);
-void *qemu_anon_ram_alloc(size_t size, uint64_t *align);
+void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared);
void qemu_vfree(void *ptr);
void qemu_anon_ram_free(void *ptr, size_t size);
char *id;
uint64_t size;
bool merge, dump;
- bool prealloc, force_prealloc, is_mapped;
+ bool prealloc, force_prealloc, is_mapped, share;
DECLARE_BITMAP(host_nodes, MAX_NODES + 1);
HostMemPolicy policy;
/* interface with exec.c */
-void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
+void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align, bool shared));
/* internal API */
const char *name,
uint64_t size,
Error **errp)
+{
+ memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
+}
+
+void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
+ Object *owner,
+ const char *name,
+ uint64_t size,
+ bool share,
+ Error **errp)
{
memory_region_init(mr, owner, name, size);
mr->ram = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, share, mr, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
mr->readonly = true;
mr->terminates = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
}
mr->terminates = true;
mr->rom_device = true;
mr->destructor = memory_region_destructor_ram;
- mr->ram_block = qemu_ram_alloc(size, mr, errp);
+ mr->ram_block = qemu_ram_alloc(size, false, mr, errp);
}
void memory_region_init_iommu(void *_iommu_mr,
region is marked as private to QEMU, or shared. The latter allows
a co-operating external process to access the QEMU memory region.
+The @option{share} is also required for pvrdma devices due to
+limitations in the RDMA API provided by Linux.
+
+Setting share=on might affect the ability to configure NUMA
+bindings for the memory backend under some circumstances, see
+Documentation/vm/numa_memory_policy.txt on the Linux kernel
+source tree for additional details.
+
Setting the @option{discard-data} boolean option to @var{on}
indicates that file contents can be destroyed when QEMU exits,
to avoid unnecessarily flushing data to the backing file. Note
the device DAX /dev/dax0.0 requires 2M alignment rather than 4K. In
such cases, users can specify the required alignment via this option.
-@item -object memory-backend-ram,id=@var{id},merge=@var{on|off},dump=@var{on|off},prealloc=@var{on|off},size=@var{size},host-nodes=@var{host-nodes},policy=@var{default|preferred|bind|interleave}
+@item -object memory-backend-ram,id=@var{id},merge=@var{on|off},dump=@var{on|off},share=@var{on|off},prealloc=@var{on|off},size=@var{size},host-nodes=@var{host-nodes},policy=@var{default|preferred|bind|interleave}
Creates a memory backend object, which can be used to back the guest RAM.
Memory backend objects offer more control than the @option{-m} option that is
static int active_cmma;
-static void *legacy_s390_alloc(size_t size, uint64_t *align);
+static void *legacy_s390_alloc(size_t size, uint64_t *align, bool shared);
static int kvm_s390_query_mem_limit(uint64_t *memory_limit)
{
* to grow. We also have to use MAP parameters that avoid
* read-only mapping of guest pages.
*/
-static void *legacy_s390_alloc(size_t size, uint64_t *align)
+static void *legacy_s390_alloc(size_t size, uint64_t *align, bool shared)
{
void *mem;
}
/* alloc shared memory pages */
-void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment)
+void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared)
{
size_t align = QEMU_VMALLOC_ALIGN;
- void *ptr = qemu_ram_mmap(-1, size, align, false);
+ void *ptr = qemu_ram_mmap(-1, size, align, shared);
if (ptr == MAP_FAILED) {
return NULL;
return qemu_oom_check(qemu_try_memalign(alignment, size));
}
-void *qemu_anon_ram_alloc(size_t size, uint64_t *align)
+void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared)
{
void *ptr;