All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RESEND 0/7] migration pending patches
@ 2023-10-11 18:45 Fabiano Rosas
  2023-10-11 18:45 ` [PATCH RESEND 1/7] tests/qtest: migration-test: Add tests for file-based migration Fabiano Rosas
                   ` (6 more replies)
  0 siblings, 7 replies; 13+ messages in thread
From: Fabiano Rosas @ 2023-10-11 18:45 UTC (permalink / raw
  To: qemu-devel; +Cc: Juan Quintela, Peter Xu

Hi, this is a resend of a few patches that lingered behind in the past
months. They are all reviewed and tested.

patch 1 is the test for the file transport which missed the last pull;

patches 2-6 are two small refactorings to ram.c that are prerequisite
for the fixed ram work;

patch 7 enables the multifd cancel test which has been fixed for a
while now since commit 01ec0f3a92 ("migration/multifd: Protect
accesses to migration_threads").

CI run: https://gitlab.com/farosas/qemu/-/pipelines/1033679993

Thanks!

Fabiano Rosas (6):
  tests/qtest: migration-test: Add tests for file-based migration
  migration/ram: Remove RAMState from xbzrle_cache_zero_page
  migration/ram: Stop passing QEMUFile around in save_zero_page
  migration/ram: Move xbzrle zero page handling into save_zero_page
  migration/ram: Merge save_zero_page functions
  tests/qtest: Re-enable multifd cancel test

Nikolay Borisov (1):
  migration/ram: Refactor precopy ram loading code

 migration/ram.c              | 216 ++++++++++++++++++-----------------
 tests/qtest/migration-test.c | 157 +++++++++++++++++++++++--
 2 files changed, 258 insertions(+), 115 deletions(-)

-- 
2.35.3



^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH RESEND 1/7] tests/qtest: migration-test: Add tests for file-based migration
  2023-10-11 18:45 [PATCH RESEND 0/7] migration pending patches Fabiano Rosas
@ 2023-10-11 18:45 ` Fabiano Rosas
  2023-10-11 18:45 ` [PATCH RESEND 2/7] migration/ram: Refactor precopy ram loading code Fabiano Rosas
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Fabiano Rosas @ 2023-10-11 18:45 UTC (permalink / raw
  To: qemu-devel
  Cc: Juan Quintela, Peter Xu, Leonardo Bras, Thomas Huth,
	Laurent Vivier, Paolo Bonzini

Add basic tests for file-based migration.

Note that we cannot use test_precopy_common because that routine
expects it to be possible to run the migration live. With the file
transport there is no live migration because we must wait for the
source to finish writing the migration data to the file before the
destination can start reading. Add a new migration function
specifically to handle the file migration.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
 tests/qtest/migration-test.c | 147 +++++++++++++++++++++++++++++++++++
 1 file changed, 147 insertions(+)

diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
index 8eb2053dbb..2dc6ac2f87 100644
--- a/tests/qtest/migration-test.c
+++ b/tests/qtest/migration-test.c
@@ -66,6 +66,10 @@ static bool got_dst_resume;
  */
 #define DIRTYLIMIT_TOLERANCE_RANGE  25  /* MB/s */
 
+#define QEMU_VM_FILE_MAGIC 0x5145564d
+#define FILE_TEST_FILENAME "migfile"
+#define FILE_TEST_OFFSET 0x1000
+
 #if defined(__linux__)
 #include <sys/syscall.h>
 #include <sys/vfs.h>
@@ -882,6 +886,7 @@ static void test_migrate_end(QTestState *from, QTestState *to, bool test_dest)
     cleanup("migsocket");
     cleanup("src_serial");
     cleanup("dest_serial");
+    cleanup(FILE_TEST_FILENAME);
 }
 
 #ifdef CONFIG_GNUTLS
@@ -1610,6 +1615,70 @@ finish:
     test_migrate_end(from, to, args->result == MIG_TEST_SUCCEED);
 }
 
+static void test_file_common(MigrateCommon *args, bool stop_src)
+{
+    QTestState *from, *to;
+    void *data_hook = NULL;
+    g_autofree char *connect_uri = g_strdup(args->connect_uri);
+
+    if (test_migrate_start(&from, &to, args->listen_uri, &args->start)) {
+        return;
+    }
+
+    /*
+     * File migration is never live. We can keep the source VM running
+     * during migration, but the destination will not be running
+     * concurrently.
+     */
+    g_assert_false(args->live);
+
+    if (args->start_hook) {
+        data_hook = args->start_hook(from, to);
+    }
+
+    migrate_ensure_converge(from);
+    wait_for_serial("src_serial");
+
+    if (stop_src) {
+        qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}");
+        if (!got_src_stop) {
+            qtest_qmp_eventwait(from, "STOP");
+        }
+    }
+
+    if (args->result == MIG_TEST_QMP_ERROR) {
+        migrate_qmp_fail(from, connect_uri, "{}");
+        goto finish;
+    }
+
+    migrate_qmp(from, connect_uri, "{}");
+    wait_for_migration_complete(from);
+
+    /*
+     * We need to wait for the source to finish before starting the
+     * destination.
+     */
+    migrate_incoming_qmp(to, connect_uri, "{}");
+    wait_for_migration_complete(to);
+
+    if (stop_src) {
+        qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}");
+    }
+
+    if (!got_dst_resume) {
+        qtest_qmp_eventwait(to, "RESUME");
+    }
+
+    wait_for_serial("dest_serial");
+
+finish:
+    if (args->finish_hook) {
+        args->finish_hook(from, to, data_hook);
+    }
+
+    test_migrate_end(from, to, args->result == MIG_TEST_SUCCEED);
+}
+
 static void test_precopy_unix_plain(void)
 {
     g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
@@ -1805,6 +1874,76 @@ static void test_precopy_unix_compress_nowait(void)
     test_precopy_common(&args);
 }
 
+static void test_precopy_file(void)
+{
+    g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs,
+                                           FILE_TEST_FILENAME);
+    MigrateCommon args = {
+        .connect_uri = uri,
+        .listen_uri = "defer",
+    };
+
+    test_file_common(&args, true);
+}
+
+static void file_offset_finish_hook(QTestState *from, QTestState *to,
+                                    void *opaque)
+{
+#if defined(__linux__)
+    g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, FILE_TEST_FILENAME);
+    size_t size = FILE_TEST_OFFSET + sizeof(QEMU_VM_FILE_MAGIC);
+    uintptr_t *addr, *p;
+    int fd;
+
+    fd = open(path, O_RDONLY);
+    g_assert(fd != -1);
+    addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
+    g_assert(addr != MAP_FAILED);
+
+    /*
+     * Ensure the skipped offset contains zeros and the migration
+     * stream starts at the right place.
+     */
+    p = addr;
+    while (p < addr + FILE_TEST_OFFSET / sizeof(uintptr_t)) {
+        g_assert(*p == 0);
+        p++;
+    }
+    g_assert_cmpint(cpu_to_be32(*p), ==, QEMU_VM_FILE_MAGIC);
+
+    munmap(addr, size);
+    close(fd);
+#endif
+}
+
+static void test_precopy_file_offset(void)
+{
+    g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=%d", tmpfs,
+                                           FILE_TEST_FILENAME,
+                                           FILE_TEST_OFFSET);
+    MigrateCommon args = {
+        .connect_uri = uri,
+        .listen_uri = "defer",
+        .finish_hook = file_offset_finish_hook,
+    };
+
+    test_file_common(&args, false);
+}
+
+static void test_precopy_file_offset_bad(void)
+{
+    /* using a value not supported by qemu_strtosz() */
+    g_autofree char *uri = g_strdup_printf("file:%s/%s,offset=0x20M",
+                                           tmpfs, FILE_TEST_FILENAME);
+    MigrateCommon args = {
+        .connect_uri = uri,
+        .listen_uri = "defer",
+        .result = MIG_TEST_QMP_ERROR,
+    };
+
+    test_file_common(&args, false);
+}
+
 static void test_precopy_tcp_plain(void)
 {
     MigrateCommon args = {
@@ -2849,6 +2988,14 @@ int main(int argc, char **argv)
         qtest_add_func("/migration/precopy/unix/compress/nowait",
                        test_precopy_unix_compress_nowait);
     }
+
+    qtest_add_func("/migration/precopy/file",
+                   test_precopy_file);
+    qtest_add_func("/migration/precopy/file/offset",
+                   test_precopy_file_offset);
+    qtest_add_func("/migration/precopy/file/offset/bad",
+                   test_precopy_file_offset_bad);
+
 #ifdef CONFIG_GNUTLS
     qtest_add_func("/migration/precopy/unix/tls/psk",
                    test_precopy_unix_tls_psk);
-- 
2.35.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RESEND 2/7] migration/ram: Refactor precopy ram loading code
  2023-10-11 18:45 [PATCH RESEND 0/7] migration pending patches Fabiano Rosas
  2023-10-11 18:45 ` [PATCH RESEND 1/7] tests/qtest: migration-test: Add tests for file-based migration Fabiano Rosas
@ 2023-10-11 18:45 ` Fabiano Rosas
  2023-10-16  7:38   ` Juan Quintela
  2023-10-11 18:46 ` [PATCH RESEND 3/7] migration/ram: Remove RAMState from xbzrle_cache_zero_page Fabiano Rosas
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 13+ messages in thread
From: Fabiano Rosas @ 2023-10-11 18:45 UTC (permalink / raw
  To: qemu-devel
  Cc: Juan Quintela, Peter Xu, Nikolay Borisov,
	Philippe Mathieu-Daudé, Leonardo Bras

From: Nikolay Borisov <nborisov@suse.com>

Extract the ramblock parsing code into a routine that operates on the
sequence of headers from the stream and another the parses the
individual ramblock. This makes ram_load_precopy() easier to
comprehend.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
 migration/ram.c | 141 +++++++++++++++++++++++++++---------------------
 1 file changed, 79 insertions(+), 62 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 2f5ce4d60b..1cc14e543e 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -3861,6 +3861,82 @@ void colo_flush_ram_cache(void)
     trace_colo_flush_ram_cache_end();
 }
 
+static int parse_ramblock(QEMUFile *f, RAMBlock *block, ram_addr_t length)
+{
+    int ret = 0;
+    /* ADVISE is earlier, it shows the source has the postcopy capability on */
+    bool postcopy_advised = migration_incoming_postcopy_advised();
+
+    assert(block);
+
+    if (!qemu_ram_is_migratable(block)) {
+        error_report("block %s should not be migrated !", block->idstr);
+        return -EINVAL;
+    }
+
+    if (length != block->used_length) {
+        Error *local_err = NULL;
+
+        ret = qemu_ram_resize(block, length, &local_err);
+        if (local_err) {
+            error_report_err(local_err);
+        }
+    }
+    /* For postcopy we need to check hugepage sizes match */
+    if (postcopy_advised && migrate_postcopy_ram() &&
+        block->page_size != qemu_host_page_size) {
+        uint64_t remote_page_size = qemu_get_be64(f);
+        if (remote_page_size != block->page_size) {
+            error_report("Mismatched RAM page size %s "
+                         "(local) %zd != %" PRId64, block->idstr,
+                         block->page_size, remote_page_size);
+            ret = -EINVAL;
+        }
+    }
+    if (migrate_ignore_shared()) {
+        hwaddr addr = qemu_get_be64(f);
+        if (migrate_ram_is_ignored(block) &&
+            block->mr->addr != addr) {
+            error_report("Mismatched GPAs for block %s "
+                         "%" PRId64 "!= %" PRId64, block->idstr,
+                         (uint64_t)addr, (uint64_t)block->mr->addr);
+            ret = -EINVAL;
+        }
+    }
+    ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG, block->idstr);
+
+    return ret;
+}
+
+static int parse_ramblocks(QEMUFile *f, ram_addr_t total_ram_bytes)
+{
+    int ret = 0;
+
+    /* Synchronize RAM block list */
+    while (!ret && total_ram_bytes) {
+        RAMBlock *block;
+        char id[256];
+        ram_addr_t length;
+        int len = qemu_get_byte(f);
+
+        qemu_get_buffer(f, (uint8_t *)id, len);
+        id[len] = 0;
+        length = qemu_get_be64(f);
+
+        block = qemu_ram_block_by_name(id);
+        if (block) {
+            ret = parse_ramblock(f, block, length);
+        } else {
+            error_report("Unknown ramblock \"%s\", cannot accept "
+                         "migration", id);
+            ret = -EINVAL;
+        }
+        total_ram_bytes -= length;
+    }
+
+    return ret;
+}
+
 /**
  * ram_load_precopy: load pages in precopy case
  *
@@ -3875,14 +3951,13 @@ static int ram_load_precopy(QEMUFile *f)
 {
     MigrationIncomingState *mis = migration_incoming_get_current();
     int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0;
-    /* ADVISE is earlier, it shows the source has the postcopy capability on */
-    bool postcopy_advised = migration_incoming_postcopy_advised();
+
     if (!migrate_compress()) {
         invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
     }
 
     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
-        ram_addr_t addr, total_ram_bytes;
+        ram_addr_t addr;
         void *host = NULL, *host_bak = NULL;
         uint8_t ch;
 
@@ -3953,65 +4028,7 @@ static int ram_load_precopy(QEMUFile *f)
 
         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
         case RAM_SAVE_FLAG_MEM_SIZE:
-            /* Synchronize RAM block list */
-            total_ram_bytes = addr;
-            while (!ret && total_ram_bytes) {
-                RAMBlock *block;
-                char id[256];
-                ram_addr_t length;
-
-                len = qemu_get_byte(f);
-                qemu_get_buffer(f, (uint8_t *)id, len);
-                id[len] = 0;
-                length = qemu_get_be64(f);
-
-                block = qemu_ram_block_by_name(id);
-                if (block && !qemu_ram_is_migratable(block)) {
-                    error_report("block %s should not be migrated !", id);
-                    ret = -EINVAL;
-                } else if (block) {
-                    if (length != block->used_length) {
-                        Error *local_err = NULL;
-
-                        ret = qemu_ram_resize(block, length,
-                                              &local_err);
-                        if (local_err) {
-                            error_report_err(local_err);
-                        }
-                    }
-                    /* For postcopy we need to check hugepage sizes match */
-                    if (postcopy_advised && migrate_postcopy_ram() &&
-                        block->page_size != qemu_host_page_size) {
-                        uint64_t remote_page_size = qemu_get_be64(f);
-                        if (remote_page_size != block->page_size) {
-                            error_report("Mismatched RAM page size %s "
-                                         "(local) %zd != %" PRId64,
-                                         id, block->page_size,
-                                         remote_page_size);
-                            ret = -EINVAL;
-                        }
-                    }
-                    if (migrate_ignore_shared()) {
-                        hwaddr addr2 = qemu_get_be64(f);
-                        if (migrate_ram_is_ignored(block) &&
-                            block->mr->addr != addr2) {
-                            error_report("Mismatched GPAs for block %s "
-                                         "%" PRId64 "!= %" PRId64,
-                                         id, (uint64_t)addr2,
-                                         (uint64_t)block->mr->addr);
-                            ret = -EINVAL;
-                        }
-                    }
-                    ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
-                                          block->idstr);
-                } else {
-                    error_report("Unknown ramblock \"%s\", cannot "
-                                 "accept migration", id);
-                    ret = -EINVAL;
-                }
-
-                total_ram_bytes -= length;
-            }
+            ret = parse_ramblocks(f, addr);
             break;
 
         case RAM_SAVE_FLAG_ZERO:
-- 
2.35.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RESEND 3/7] migration/ram: Remove RAMState from xbzrle_cache_zero_page
  2023-10-11 18:45 [PATCH RESEND 0/7] migration pending patches Fabiano Rosas
  2023-10-11 18:45 ` [PATCH RESEND 1/7] tests/qtest: migration-test: Add tests for file-based migration Fabiano Rosas
  2023-10-11 18:45 ` [PATCH RESEND 2/7] migration/ram: Refactor precopy ram loading code Fabiano Rosas
@ 2023-10-11 18:46 ` Fabiano Rosas
  2023-10-16  7:39   ` Juan Quintela
  2023-10-11 18:46 ` [PATCH RESEND 4/7] migration/ram: Stop passing QEMUFile around in save_zero_page Fabiano Rosas
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 13+ messages in thread
From: Fabiano Rosas @ 2023-10-11 18:46 UTC (permalink / raw
  To: qemu-devel; +Cc: Juan Quintela, Peter Xu, Leonardo Bras

'rs' is not used in that function. It's a leftover from commit
9360447d34 ("ram: Use MigrationStats for statistics").

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
 migration/ram.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 1cc14e543e..cd765212af 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -569,7 +569,6 @@ void mig_throttle_counter_reset(void)
 /**
  * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
  *
- * @rs: current RAM state
  * @current_addr: address for the zero page
  *
  * Update the xbzrle cache to reflect a page that's been sent as all 0.
@@ -578,7 +577,7 @@ void mig_throttle_counter_reset(void)
  * As a bonus, if the page wasn't in the cache it gets added so that
  * when a small write is made into the 0'd page it gets XBZRLE sent.
  */
-static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
+static void xbzrle_cache_zero_page(ram_addr_t current_addr)
 {
     /* We don't care if this fails to allocate a new cache page
      * as long as it updated an old one */
@@ -2144,7 +2143,7 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
          */
         if (rs->xbzrle_started) {
             XBZRLE_cache_lock();
-            xbzrle_cache_zero_page(rs, block->offset + offset);
+            xbzrle_cache_zero_page(block->offset + offset);
             XBZRLE_cache_unlock();
         }
         return res;
-- 
2.35.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RESEND 4/7] migration/ram: Stop passing QEMUFile around in save_zero_page
  2023-10-11 18:45 [PATCH RESEND 0/7] migration pending patches Fabiano Rosas
                   ` (2 preceding siblings ...)
  2023-10-11 18:46 ` [PATCH RESEND 3/7] migration/ram: Remove RAMState from xbzrle_cache_zero_page Fabiano Rosas
@ 2023-10-11 18:46 ` Fabiano Rosas
  2023-10-16  7:40   ` Juan Quintela
  2023-10-11 18:46 ` [PATCH RESEND 5/7] migration/ram: Move xbzrle zero page handling into save_zero_page Fabiano Rosas
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 13+ messages in thread
From: Fabiano Rosas @ 2023-10-11 18:46 UTC (permalink / raw
  To: qemu-devel; +Cc: Juan Quintela, Peter Xu, Leonardo Bras

We don't need the QEMUFile when we're already passing the
PageSearchStatus.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
 migration/ram.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index cd765212af..c8474f6fd8 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1146,10 +1146,11 @@ void ram_release_page(const char *rbname, uint64_t offset)
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  */
-static int save_zero_page_to_file(PageSearchStatus *pss, QEMUFile *file,
-                                  RAMBlock *block, ram_addr_t offset)
+static int save_zero_page_to_file(PageSearchStatus *pss, RAMBlock *block,
+                                  ram_addr_t offset)
 {
     uint8_t *p = block->host + offset;
+    QEMUFile *file = pss->pss_channel;
     int len = 0;
 
     if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
@@ -1170,10 +1171,10 @@ static int save_zero_page_to_file(PageSearchStatus *pss, QEMUFile *file,
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  */
-static int save_zero_page(PageSearchStatus *pss, QEMUFile *f, RAMBlock *block,
+static int save_zero_page(PageSearchStatus *pss, RAMBlock *block,
                           ram_addr_t offset)
 {
-    int len = save_zero_page_to_file(pss, f, block, offset);
+    int len = save_zero_page_to_file(pss, block, offset);
 
     if (len) {
         stat64_add(&mig_stats.zero_pages, 1);
@@ -2136,7 +2137,7 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
         return 1;
     }
 
-    res = save_zero_page(pss, pss->pss_channel, block, offset);
+    res = save_zero_page(pss, block, offset);
     if (res > 0) {
         /* Must let xbzrle know, otherwise a previous (now 0'd) cached
          * page would be stale
-- 
2.35.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RESEND 5/7] migration/ram: Move xbzrle zero page handling into save_zero_page
  2023-10-11 18:45 [PATCH RESEND 0/7] migration pending patches Fabiano Rosas
                   ` (3 preceding siblings ...)
  2023-10-11 18:46 ` [PATCH RESEND 4/7] migration/ram: Stop passing QEMUFile around in save_zero_page Fabiano Rosas
@ 2023-10-11 18:46 ` Fabiano Rosas
  2023-10-11 18:46 ` [PATCH RESEND 6/7] migration/ram: Merge save_zero_page functions Fabiano Rosas
  2023-10-11 18:46 ` [PATCH RESEND 7/7] tests/qtest: Re-enable multifd cancel test Fabiano Rosas
  6 siblings, 0 replies; 13+ messages in thread
From: Fabiano Rosas @ 2023-10-11 18:46 UTC (permalink / raw
  To: qemu-devel; +Cc: Juan Quintela, Peter Xu, Leonardo Bras

It makes a bit more sense to have the zero page handling of xbzrle
right where we save the zero page.

Also invert the exit condition to remove one level of indentation
which makes the next patch easier to grasp.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
 migration/ram.c | 35 ++++++++++++++++++++---------------
 1 file changed, 20 insertions(+), 15 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index c8474f6fd8..1177563523 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1167,21 +1167,34 @@ static int save_zero_page_to_file(PageSearchStatus *pss, RAMBlock *block,
  *
  * Returns the number of pages written.
  *
+ * @rs: current RAM state
  * @pss: current PSS channel
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  */
-static int save_zero_page(PageSearchStatus *pss, RAMBlock *block,
+static int save_zero_page(RAMState *rs, PageSearchStatus *pss, RAMBlock *block,
                           ram_addr_t offset)
 {
     int len = save_zero_page_to_file(pss, block, offset);
 
-    if (len) {
-        stat64_add(&mig_stats.zero_pages, 1);
-        ram_transferred_add(len);
-        return 1;
+    if (!len) {
+        return -1;
     }
-    return -1;
+
+    stat64_add(&mig_stats.zero_pages, 1);
+    ram_transferred_add(len);
+
+    /*
+     * Must let xbzrle know, otherwise a previous (now 0'd) cached
+     * page would be stale.
+     */
+    if (rs->xbzrle_started) {
+        XBZRLE_cache_lock();
+        xbzrle_cache_zero_page(block->offset + offset);
+        XBZRLE_cache_unlock();
+    }
+
+    return 1;
 }
 
 /*
@@ -2137,16 +2150,8 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
         return 1;
     }
 
-    res = save_zero_page(pss, block, offset);
+    res = save_zero_page(rs, pss, block, offset);
     if (res > 0) {
-        /* Must let xbzrle know, otherwise a previous (now 0'd) cached
-         * page would be stale
-         */
-        if (rs->xbzrle_started) {
-            XBZRLE_cache_lock();
-            xbzrle_cache_zero_page(block->offset + offset);
-            XBZRLE_cache_unlock();
-        }
         return res;
     }
 
-- 
2.35.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RESEND 6/7] migration/ram: Merge save_zero_page functions
  2023-10-11 18:45 [PATCH RESEND 0/7] migration pending patches Fabiano Rosas
                   ` (4 preceding siblings ...)
  2023-10-11 18:46 ` [PATCH RESEND 5/7] migration/ram: Move xbzrle zero page handling into save_zero_page Fabiano Rosas
@ 2023-10-11 18:46 ` Fabiano Rosas
  2023-10-11 18:46 ` [PATCH RESEND 7/7] tests/qtest: Re-enable multifd cancel test Fabiano Rosas
  6 siblings, 0 replies; 13+ messages in thread
From: Fabiano Rosas @ 2023-10-11 18:46 UTC (permalink / raw
  To: qemu-devel; +Cc: Juan Quintela, Peter Xu, Leonardo Bras

We don't need to do this in two pieces. One single function makes it
easier to grasp, specially since it removes the indirection on the
return value handling.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
 migration/ram.c | 46 +++++++++++++---------------------------------
 1 file changed, 13 insertions(+), 33 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 1177563523..24c249f944 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1136,32 +1136,6 @@ void ram_release_page(const char *rbname, uint64_t offset)
     ram_discard_range(rbname, offset, TARGET_PAGE_SIZE);
 }
 
-/**
- * save_zero_page_to_file: send the zero page to the file
- *
- * Returns the size of data written to the file, 0 means the page is not
- * a zero page
- *
- * @pss: current PSS channel
- * @block: block that contains the page we want to send
- * @offset: offset inside the block for the page
- */
-static int save_zero_page_to_file(PageSearchStatus *pss, RAMBlock *block,
-                                  ram_addr_t offset)
-{
-    uint8_t *p = block->host + offset;
-    QEMUFile *file = pss->pss_channel;
-    int len = 0;
-
-    if (buffer_is_zero(p, TARGET_PAGE_SIZE)) {
-        len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO);
-        qemu_put_byte(file, 0);
-        len += 1;
-        ram_release_page(block->idstr, offset);
-    }
-    return len;
-}
-
 /**
  * save_zero_page: send the zero page to the stream
  *
@@ -1175,12 +1149,19 @@ static int save_zero_page_to_file(PageSearchStatus *pss, RAMBlock *block,
 static int save_zero_page(RAMState *rs, PageSearchStatus *pss, RAMBlock *block,
                           ram_addr_t offset)
 {
-    int len = save_zero_page_to_file(pss, block, offset);
+    uint8_t *p = block->host + offset;
+    QEMUFile *file = pss->pss_channel;
+    int len = 0;
 
-    if (!len) {
-        return -1;
+    if (!buffer_is_zero(p, TARGET_PAGE_SIZE)) {
+        return 0;
     }
 
+    len += save_page_header(pss, file, block, offset | RAM_SAVE_FLAG_ZERO);
+    qemu_put_byte(file, 0);
+    len += 1;
+    ram_release_page(block->idstr, offset);
+
     stat64_add(&mig_stats.zero_pages, 1);
     ram_transferred_add(len);
 
@@ -1194,7 +1175,7 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss, RAMBlock *block,
         XBZRLE_cache_unlock();
     }
 
-    return 1;
+    return len;
 }
 
 /*
@@ -2150,9 +2131,8 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
         return 1;
     }
 
-    res = save_zero_page(rs, pss, block, offset);
-    if (res > 0) {
-        return res;
+    if (save_zero_page(rs, pss, block, offset)) {
+        return 1;
     }
 
     /*
-- 
2.35.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH RESEND 7/7] tests/qtest: Re-enable multifd cancel test
  2023-10-11 18:45 [PATCH RESEND 0/7] migration pending patches Fabiano Rosas
                   ` (5 preceding siblings ...)
  2023-10-11 18:46 ` [PATCH RESEND 6/7] migration/ram: Merge save_zero_page functions Fabiano Rosas
@ 2023-10-11 18:46 ` Fabiano Rosas
  2023-10-12  7:42   ` Thomas Huth
  2023-10-16  7:46   ` Juan Quintela
  6 siblings, 2 replies; 13+ messages in thread
From: Fabiano Rosas @ 2023-10-11 18:46 UTC (permalink / raw
  To: qemu-devel
  Cc: Juan Quintela, Peter Xu, Leonardo Bras, Thomas Huth,
	Laurent Vivier, Paolo Bonzini

We've found the source of flakiness in this test, so re-enable it.

Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
---
 tests/qtest/migration-test.c | 10 ++--------
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
index 2dc6ac2f87..049463ee5e 100644
--- a/tests/qtest/migration-test.c
+++ b/tests/qtest/migration-test.c
@@ -3068,14 +3068,8 @@ int main(int argc, char **argv)
     }
     qtest_add_func("/migration/multifd/tcp/plain/none",
                    test_multifd_tcp_none);
-    /*
-     * This test is flaky and sometimes fails in CI and otherwise:
-     * don't run unless user opts in via environment variable.
-     */
-    if (getenv("QEMU_TEST_FLAKY_TESTS")) {
-        qtest_add_func("/migration/multifd/tcp/plain/cancel",
-                       test_multifd_tcp_cancel);
-    }
+    qtest_add_func("/migration/multifd/tcp/plain/cancel",
+                   test_multifd_tcp_cancel);
     qtest_add_func("/migration/multifd/tcp/plain/zlib",
                    test_multifd_tcp_zlib);
 #ifdef CONFIG_ZSTD
-- 
2.35.3



^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH RESEND 7/7] tests/qtest: Re-enable multifd cancel test
  2023-10-11 18:46 ` [PATCH RESEND 7/7] tests/qtest: Re-enable multifd cancel test Fabiano Rosas
@ 2023-10-12  7:42   ` Thomas Huth
  2023-10-16  7:46   ` Juan Quintela
  1 sibling, 0 replies; 13+ messages in thread
From: Thomas Huth @ 2023-10-12  7:42 UTC (permalink / raw
  To: Fabiano Rosas, qemu-devel
  Cc: Juan Quintela, Peter Xu, Leonardo Bras, Laurent Vivier,
	Paolo Bonzini

On 11/10/2023 20.46, Fabiano Rosas wrote:
> We've found the source of flakiness in this test, so re-enable it.
> 
> Reviewed-by: Juan Quintela <quintela@redhat.com>
> Signed-off-by: Fabiano Rosas <farosas@suse.de>
> ---
>   tests/qtest/migration-test.c | 10 ++--------
>   1 file changed, 2 insertions(+), 8 deletions(-)
> 
> diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c
> index 2dc6ac2f87..049463ee5e 100644
> --- a/tests/qtest/migration-test.c
> +++ b/tests/qtest/migration-test.c
> @@ -3068,14 +3068,8 @@ int main(int argc, char **argv)
>       }
>       qtest_add_func("/migration/multifd/tcp/plain/none",
>                      test_multifd_tcp_none);
> -    /*
> -     * This test is flaky and sometimes fails in CI and otherwise:
> -     * don't run unless user opts in via environment variable.
> -     */
> -    if (getenv("QEMU_TEST_FLAKY_TESTS")) {
> -        qtest_add_func("/migration/multifd/tcp/plain/cancel",
> -                       test_multifd_tcp_cancel);
> -    }
> +    qtest_add_func("/migration/multifd/tcp/plain/cancel",
> +                   test_multifd_tcp_cancel);
>       qtest_add_func("/migration/multifd/tcp/plain/zlib",
>                      test_multifd_tcp_zlib);
>   #ifdef CONFIG_ZSTD

Acked-by: Thomas Huth <thuth@redhat.com>



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH RESEND 2/7] migration/ram: Refactor precopy ram loading code
  2023-10-11 18:45 ` [PATCH RESEND 2/7] migration/ram: Refactor precopy ram loading code Fabiano Rosas
@ 2023-10-16  7:38   ` Juan Quintela
  0 siblings, 0 replies; 13+ messages in thread
From: Juan Quintela @ 2023-10-16  7:38 UTC (permalink / raw
  To: Fabiano Rosas
  Cc: qemu-devel, Peter Xu, Nikolay Borisov,
	Philippe Mathieu-Daudé, Leonardo Bras

Fabiano Rosas <farosas@suse.de> wrote:
> From: Nikolay Borisov <nborisov@suse.com>
>
> Extract the ramblock parsing code into a routine that operates on the
> sequence of headers from the stream and another the parses the
> individual ramblock. This makes ram_load_precopy() easier to
> comprehend.
>
> Signed-off-by: Nikolay Borisov <nborisov@suse.com>
> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> Reviewed-by: Peter Xu <peterx@redhat.com>
> Signed-off-by: Fabiano Rosas <farosas@suse.de>

Reviewed-by: Juan Quintela <quintela@redhat.com>
queued.



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH RESEND 3/7] migration/ram: Remove RAMState from xbzrle_cache_zero_page
  2023-10-11 18:46 ` [PATCH RESEND 3/7] migration/ram: Remove RAMState from xbzrle_cache_zero_page Fabiano Rosas
@ 2023-10-16  7:39   ` Juan Quintela
  0 siblings, 0 replies; 13+ messages in thread
From: Juan Quintela @ 2023-10-16  7:39 UTC (permalink / raw
  To: Fabiano Rosas; +Cc: qemu-devel, Peter Xu, Leonardo Bras

Fabiano Rosas <farosas@suse.de> wrote:
> 'rs' is not used in that function. It's a leftover from commit
> 9360447d34 ("ram: Use MigrationStats for statistics").
>
> Reviewed-by: Peter Xu <peterx@redhat.com>
> Signed-off-by: Fabiano Rosas <farosas@suse.de>

Reviewed-by: Juan Quintela <quintela@redhat.com>

queued.



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH RESEND 4/7] migration/ram: Stop passing QEMUFile around in save_zero_page
  2023-10-11 18:46 ` [PATCH RESEND 4/7] migration/ram: Stop passing QEMUFile around in save_zero_page Fabiano Rosas
@ 2023-10-16  7:40   ` Juan Quintela
  0 siblings, 0 replies; 13+ messages in thread
From: Juan Quintela @ 2023-10-16  7:40 UTC (permalink / raw
  To: Fabiano Rosas; +Cc: qemu-devel, Peter Xu, Leonardo Bras

Fabiano Rosas <farosas@suse.de> wrote:
> We don't need the QEMUFile when we're already passing the
> PageSearchStatus.
>
> Reviewed-by: Peter Xu <peterx@redhat.com>
> Signed-off-by: Fabiano Rosas <farosas@suse.de>

Reviewed-by: Juan Quintela <quintela@redhat.com>



^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH RESEND 7/7] tests/qtest: Re-enable multifd cancel test
  2023-10-11 18:46 ` [PATCH RESEND 7/7] tests/qtest: Re-enable multifd cancel test Fabiano Rosas
  2023-10-12  7:42   ` Thomas Huth
@ 2023-10-16  7:46   ` Juan Quintela
  1 sibling, 0 replies; 13+ messages in thread
From: Juan Quintela @ 2023-10-16  7:46 UTC (permalink / raw
  To: Fabiano Rosas
  Cc: qemu-devel, Peter Xu, Leonardo Bras, Thomas Huth, Laurent Vivier,
	Paolo Bonzini

Fabiano Rosas <farosas@suse.de> wrote:
> We've found the source of flakiness in this test, so re-enable it.
>
> Reviewed-by: Juan Quintela <quintela@redhat.com>
> Signed-off-by: Fabiano Rosas <farosas@suse.de>

One test still missing to cleanup the serial file.

Will send it later and then we can reenable it.

Later, Juan.



^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2023-10-16  7:47 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-10-11 18:45 [PATCH RESEND 0/7] migration pending patches Fabiano Rosas
2023-10-11 18:45 ` [PATCH RESEND 1/7] tests/qtest: migration-test: Add tests for file-based migration Fabiano Rosas
2023-10-11 18:45 ` [PATCH RESEND 2/7] migration/ram: Refactor precopy ram loading code Fabiano Rosas
2023-10-16  7:38   ` Juan Quintela
2023-10-11 18:46 ` [PATCH RESEND 3/7] migration/ram: Remove RAMState from xbzrle_cache_zero_page Fabiano Rosas
2023-10-16  7:39   ` Juan Quintela
2023-10-11 18:46 ` [PATCH RESEND 4/7] migration/ram: Stop passing QEMUFile around in save_zero_page Fabiano Rosas
2023-10-16  7:40   ` Juan Quintela
2023-10-11 18:46 ` [PATCH RESEND 5/7] migration/ram: Move xbzrle zero page handling into save_zero_page Fabiano Rosas
2023-10-11 18:46 ` [PATCH RESEND 6/7] migration/ram: Merge save_zero_page functions Fabiano Rosas
2023-10-11 18:46 ` [PATCH RESEND 7/7] tests/qtest: Re-enable multifd cancel test Fabiano Rosas
2023-10-12  7:42   ` Thomas Huth
2023-10-16  7:46   ` Juan Quintela

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.