protect the ramlist with a separate mutex
Add the new mutex that protects shared state between ram_save_live and the iothread. If the iothread mutex has to be taken together with the ramlist mutex, the iothread shall always be _outside_. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Umesh Deshpande <udeshpan@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com> Reviewed-by: Orit Wasserman <owasserm@redhat.com>
This commit is contained in:
parent
f798b07f51
commit
b2a8658ef5
|
@ -528,7 +528,6 @@ static void ram_migration_cancel(void *opaque)
|
||||||
migration_end();
|
migration_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void reset_ram_globals(void)
|
static void reset_ram_globals(void)
|
||||||
{
|
{
|
||||||
last_block = NULL;
|
last_block = NULL;
|
||||||
|
@ -547,6 +546,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||||
bitmap_set(migration_bitmap, 0, ram_pages);
|
bitmap_set(migration_bitmap, 0, ram_pages);
|
||||||
migration_dirty_pages = ram_pages;
|
migration_dirty_pages = ram_pages;
|
||||||
|
|
||||||
|
qemu_mutex_lock_ramlist();
|
||||||
bytes_transferred = 0;
|
bytes_transferred = 0;
|
||||||
reset_ram_globals();
|
reset_ram_globals();
|
||||||
|
|
||||||
|
@ -574,6 +574,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
|
||||||
qemu_put_be64(f, block->length);
|
qemu_put_be64(f, block->length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qemu_mutex_unlock_ramlist();
|
||||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -588,6 +589,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
||||||
uint64_t expected_downtime;
|
uint64_t expected_downtime;
|
||||||
MigrationState *s = migrate_get_current();
|
MigrationState *s = migrate_get_current();
|
||||||
|
|
||||||
|
qemu_mutex_lock_ramlist();
|
||||||
|
|
||||||
if (ram_list.version != last_version) {
|
if (ram_list.version != last_version) {
|
||||||
reset_ram_globals();
|
reset_ram_globals();
|
||||||
}
|
}
|
||||||
|
@ -636,6 +639,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
|
||||||
bwidth = 0.000001;
|
bwidth = 0.000001;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
qemu_mutex_unlock_ramlist();
|
||||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||||
|
|
||||||
expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
|
expected_downtime = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
|
||||||
|
@ -656,6 +660,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
||||||
{
|
{
|
||||||
migration_bitmap_sync();
|
migration_bitmap_sync();
|
||||||
|
|
||||||
|
qemu_mutex_lock_ramlist();
|
||||||
|
|
||||||
/* try transferring iterative blocks of memory */
|
/* try transferring iterative blocks of memory */
|
||||||
|
|
||||||
/* flush all remaining blocks regardless of rate limiting */
|
/* flush all remaining blocks regardless of rate limiting */
|
||||||
|
@ -671,6 +677,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
|
||||||
}
|
}
|
||||||
migration_end();
|
migration_end();
|
||||||
|
|
||||||
|
qemu_mutex_unlock_ramlist();
|
||||||
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
29
exec.c
29
exec.c
|
@ -213,6 +213,7 @@ bool memory_region_is_unassigned(MemoryRegion *mr)
|
||||||
void cpu_exec_init_all(void)
|
void cpu_exec_init_all(void)
|
||||||
{
|
{
|
||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
|
qemu_mutex_init(&ram_list.mutex);
|
||||||
memory_map_init();
|
memory_map_init();
|
||||||
io_mem_init();
|
io_mem_init();
|
||||||
#endif
|
#endif
|
||||||
|
@ -801,6 +802,16 @@ void qemu_flush_coalesced_mmio_buffer(void)
|
||||||
kvm_flush_coalesced_mmio_buffer();
|
kvm_flush_coalesced_mmio_buffer();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void qemu_mutex_lock_ramlist(void)
|
||||||
|
{
|
||||||
|
qemu_mutex_lock(&ram_list.mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void qemu_mutex_unlock_ramlist(void)
|
||||||
|
{
|
||||||
|
qemu_mutex_unlock(&ram_list.mutex);
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(__linux__) && !defined(TARGET_S390X)
|
#if defined(__linux__) && !defined(TARGET_S390X)
|
||||||
|
|
||||||
#include <sys/vfs.h>
|
#include <sys/vfs.h>
|
||||||
|
@ -982,6 +993,8 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
|
||||||
}
|
}
|
||||||
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
|
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
|
||||||
|
|
||||||
|
/* This assumes the iothread lock is taken here too. */
|
||||||
|
qemu_mutex_lock_ramlist();
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
|
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
|
||||||
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
|
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
|
||||||
|
@ -989,6 +1002,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
qemu_mutex_unlock_ramlist();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int memory_try_enable_merging(void *addr, size_t len)
|
static int memory_try_enable_merging(void *addr, size_t len)
|
||||||
|
@ -1012,6 +1026,8 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
||||||
size = TARGET_PAGE_ALIGN(size);
|
size = TARGET_PAGE_ALIGN(size);
|
||||||
new_block = g_malloc0(sizeof(*new_block));
|
new_block = g_malloc0(sizeof(*new_block));
|
||||||
|
|
||||||
|
/* This assumes the iothread lock is taken here too. */
|
||||||
|
qemu_mutex_lock_ramlist();
|
||||||
new_block->mr = mr;
|
new_block->mr = mr;
|
||||||
new_block->offset = find_ram_offset(size);
|
new_block->offset = find_ram_offset(size);
|
||||||
if (host) {
|
if (host) {
|
||||||
|
@ -1057,6 +1073,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
||||||
ram_list.mru_block = NULL;
|
ram_list.mru_block = NULL;
|
||||||
|
|
||||||
ram_list.version++;
|
ram_list.version++;
|
||||||
|
qemu_mutex_unlock_ramlist();
|
||||||
|
|
||||||
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
|
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
|
||||||
last_ram_offset() >> TARGET_PAGE_BITS);
|
last_ram_offset() >> TARGET_PAGE_BITS);
|
||||||
|
@ -1082,21 +1099,26 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
|
/* This assumes the iothread lock is taken here too. */
|
||||||
|
qemu_mutex_lock_ramlist();
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
if (addr == block->offset) {
|
if (addr == block->offset) {
|
||||||
QTAILQ_REMOVE(&ram_list.blocks, block, next);
|
QTAILQ_REMOVE(&ram_list.blocks, block, next);
|
||||||
ram_list.mru_block = NULL;
|
ram_list.mru_block = NULL;
|
||||||
ram_list.version++;
|
ram_list.version++;
|
||||||
g_free(block);
|
g_free(block);
|
||||||
return;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
qemu_mutex_unlock_ramlist();
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_ram_free(ram_addr_t addr)
|
void qemu_ram_free(ram_addr_t addr)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
|
/* This assumes the iothread lock is taken here too. */
|
||||||
|
qemu_mutex_lock_ramlist();
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
if (addr == block->offset) {
|
if (addr == block->offset) {
|
||||||
QTAILQ_REMOVE(&ram_list.blocks, block, next);
|
QTAILQ_REMOVE(&ram_list.blocks, block, next);
|
||||||
|
@ -1127,9 +1149,10 @@ void qemu_ram_free(ram_addr_t addr)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
g_free(block);
|
g_free(block);
|
||||||
return;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
qemu_mutex_unlock_ramlist();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1207,6 +1230,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
|
/* The list is protected by the iothread lock here. */
|
||||||
block = ram_list.mru_block;
|
block = ram_list.mru_block;
|
||||||
if (block && addr - block->offset < block->length) {
|
if (block && addr - block->offset < block->length) {
|
||||||
goto found;
|
goto found;
|
||||||
|
@ -1246,6 +1270,7 @@ static void *qemu_safe_ram_ptr(ram_addr_t addr)
|
||||||
{
|
{
|
||||||
RAMBlock *block;
|
RAMBlock *block;
|
||||||
|
|
||||||
|
/* The list is protected by the iothread lock here. */
|
||||||
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
|
||||||
if (addr - block->offset < block->length) {
|
if (addr - block->offset < block->length) {
|
||||||
if (xen_enabled()) {
|
if (xen_enabled()) {
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include "qemu-common.h"
|
#include "qemu-common.h"
|
||||||
#include "qemu/tls.h"
|
#include "qemu/tls.h"
|
||||||
#include "exec/cpu-common.h"
|
#include "exec/cpu-common.h"
|
||||||
|
#include "qemu/thread.h"
|
||||||
|
|
||||||
/* some important defines:
|
/* some important defines:
|
||||||
*
|
*
|
||||||
|
@ -487,6 +488,9 @@ typedef struct RAMBlock {
|
||||||
ram_addr_t length;
|
ram_addr_t length;
|
||||||
uint32_t flags;
|
uint32_t flags;
|
||||||
char idstr[256];
|
char idstr[256];
|
||||||
|
/* Reads can take either the iothread or the ramlist lock.
|
||||||
|
* Writes must take both locks.
|
||||||
|
*/
|
||||||
QTAILQ_ENTRY(RAMBlock) next;
|
QTAILQ_ENTRY(RAMBlock) next;
|
||||||
#if defined(__linux__) && !defined(TARGET_S390X)
|
#if defined(__linux__) && !defined(TARGET_S390X)
|
||||||
int fd;
|
int fd;
|
||||||
|
@ -494,8 +498,11 @@ typedef struct RAMBlock {
|
||||||
} RAMBlock;
|
} RAMBlock;
|
||||||
|
|
||||||
typedef struct RAMList {
|
typedef struct RAMList {
|
||||||
|
QemuMutex mutex;
|
||||||
|
/* Protected by the iothread lock. */
|
||||||
uint8_t *phys_dirty;
|
uint8_t *phys_dirty;
|
||||||
RAMBlock *mru_block;
|
RAMBlock *mru_block;
|
||||||
|
/* Protected by the ramlist lock. */
|
||||||
QTAILQ_HEAD(, RAMBlock) blocks;
|
QTAILQ_HEAD(, RAMBlock) blocks;
|
||||||
uint32_t version;
|
uint32_t version;
|
||||||
} RAMList;
|
} RAMList;
|
||||||
|
@ -516,6 +523,8 @@ extern int mem_prealloc;
|
||||||
|
|
||||||
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
|
||||||
ram_addr_t last_ram_offset(void);
|
ram_addr_t last_ram_offset(void);
|
||||||
|
void qemu_mutex_lock_ramlist(void);
|
||||||
|
void qemu_mutex_unlock_ramlist(void);
|
||||||
#endif /* !CONFIG_USER_ONLY */
|
#endif /* !CONFIG_USER_ONLY */
|
||||||
|
|
||||||
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
|
int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
|
||||||
|
|
Loading…
Reference in New Issue