From 04496ce926d77b6de1af2f3c4d050b8e1d411895 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:26 -0600 Subject: [PATCH 01/15] iotests: Drop execute permissions on vvfat.out MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Output files are not executables. Noticed while preparing another iotest addition. Fixes: c8f60bfb43 ("iotests: Add `vvfat` tests", v9.1.0) Signed-off-by: Eric Blake Reviewed-by: Daniel P. Berrangé Message-ID: <20251113011625.878876-16-eblake@redhat.com> --- tests/qemu-iotests/tests/vvfat.out | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 tests/qemu-iotests/tests/vvfat.out diff --git a/tests/qemu-iotests/tests/vvfat.out b/tests/qemu-iotests/tests/vvfat.out old mode 100755 new mode 100644 From 59506e59e0f0a773e892104b945d0f15623381a7 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:27 -0600 Subject: [PATCH 02/15] qio: Add trace points to net_listener MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Upcoming patches will adjust how net_listener watches for new client connections; adding trace points now makes it easier to debug that the changes work as intended. For example, adding --trace='qio_net_listener*' to the qemu-storage-daemon command line before --nbd-server will track when the server first starts listening for clients. Signed-off-by: Eric Blake Reviewed-by: Daniel P. Berrangé Message-ID: <20251113011625.878876-17-eblake@redhat.com> --- io/net-listener.c | 10 ++++++++++ io/trace-events | 5 +++++ 2 files changed, 15 insertions(+) diff --git a/io/net-listener.c b/io/net-listener.c index 47405965a6..007acbd5b1 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -23,6 +23,7 @@ #include "io/dns-resolver.h" #include "qapi/error.h" #include "qemu/module.h" +#include "trace.h" QIONetListener *qio_net_listener_new(void) { @@ -50,6 +51,7 @@ static gboolean qio_net_listener_channel_func(QIOChannel *ioc, return TRUE; } + trace_qio_net_listener_callback(listener, listener->io_func); if (listener->io_func) { listener->io_func(listener, sioc, listener->io_data); } @@ -123,6 +125,7 @@ void qio_net_listener_add(QIONetListener *listener, object_ref(OBJECT(sioc)); listener->connected = true; + trace_qio_net_listener_watch(listener, listener->io_func, "add"); if (listener->io_func != NULL) { object_ref(OBJECT(listener)); listener->io_source[listener->nsioc] = qio_channel_add_watch_source( @@ -143,6 +146,8 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, { size_t i; + trace_qio_net_listener_unwatch(listener, listener->io_func, + "set_client_func"); if (listener->io_notify) { listener->io_notify(listener->io_data); } @@ -158,6 +163,8 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, } } + trace_qio_net_listener_watch(listener, listener->io_func, + "set_client_func"); if (listener->io_func != NULL) { for (i = 0; i < listener->nsioc; i++) { object_ref(OBJECT(listener)); @@ -218,6 +225,7 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) }; size_t i; + trace_qio_net_listener_unwatch(listener, listener->io_func, "wait_client"); for (i = 0; i < listener->nsioc; i++) { if (listener->io_source[i]) { g_source_destroy(listener->io_source[i]); @@ -247,6 +255,7 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) g_main_loop_unref(loop); g_main_context_unref(ctxt); + trace_qio_net_listener_watch(listener, listener->io_func, "wait_client"); if (listener->io_func != NULL) { for (i = 0; i < listener->nsioc; i++) { object_ref(OBJECT(listener)); @@ -268,6 +277,7 @@ void qio_net_listener_disconnect(QIONetListener *listener) return; } + trace_qio_net_listener_unwatch(listener, listener->io_func, "disconnect"); for (i = 0; i < listener->nsioc; i++) { if (listener->io_source[i]) { g_source_destroy(listener->io_source[i]); diff --git a/io/trace-events b/io/trace-events index dc3a63ba1f..10976eca5f 100644 --- a/io/trace-events +++ b/io/trace-events @@ -72,3 +72,8 @@ qio_channel_command_new_pid(void *ioc, int writefd, int readfd, int pid) "Comman qio_channel_command_new_spawn(void *ioc, const char *binary, int flags) "Command new spawn ioc=%p binary=%s flags=%d" qio_channel_command_abort(void *ioc, int pid) "Command abort ioc=%p pid=%d" qio_channel_command_wait(void *ioc, int pid, int ret, int status) "Command abort ioc=%p pid=%d ret=%d status=%d" + +# net-listener.c +qio_net_listener_watch(void *listener, void *func, const char *extra) "Net listener=%p watch enabled func=%p by %s" +qio_net_listener_unwatch(void *listener, void *func, const char *extra) "Net listener=%p watch disabled func=%p by %s" +qio_net_listener_callback(void *listener, void *func) "Net listener=%p callback forwarding to func=%p" From 6e03d5cdc991f5db86969fc6aeaca96234426263 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:28 -0600 Subject: [PATCH 03/15] qio: Unwatch before notify in QIONetListener MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When changing the callback registered with QIONetListener, the code was calling notify on the old opaque data prior to actually removing the old GSource objects still pointing to that data. Similarly, during finalize, it called notify before tearing down the various GSource objects tied to the data. In practice, a grep of the QEMU code base found that every existing client of QIONetListener passes in a NULL notifier (the opaque data, if non-NULL, outlives the NetListener and so does not need cleanup when the NetListener is torn down), so this patch has no impact. And even if a caller had passed in a reference-counted object with a notifier of object_unref but kept its own reference on the data, then the early notify would merely reduce a refcount from (say) 2 to 1, but not free the object. However, it is a latent bug waiting to bite any future caller that passes in data where the notifier actually frees the object, because the GSource could then trigger a use-after-free if it loses the race on a last-minute client connection resulting in the data being passed to one final use of the async callback. Better is to delay the notify call until after all GSource that have been given a copy of the opaque data are torn down. CC: qemu-stable@nongnu.org Fixes: 530473924d "io: introduce a network socket listener API", v2.12.0 Signed-off-by: Eric Blake Reviewed-by: Daniel P. Berrangé Message-ID: <20251113011625.878876-18-eblake@redhat.com> --- io/net-listener.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/io/net-listener.c b/io/net-listener.c index 007acbd5b1..d71b65270e 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -148,13 +148,6 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, trace_qio_net_listener_unwatch(listener, listener->io_func, "set_client_func"); - if (listener->io_notify) { - listener->io_notify(listener->io_data); - } - listener->io_func = func; - listener->io_data = data; - listener->io_notify = notify; - for (i = 0; i < listener->nsioc; i++) { if (listener->io_source[i]) { g_source_destroy(listener->io_source[i]); @@ -163,6 +156,13 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, } } + if (listener->io_notify) { + listener->io_notify(listener->io_data); + } + listener->io_func = func; + listener->io_data = data; + listener->io_notify = notify; + trace_qio_net_listener_watch(listener, listener->io_func, "set_client_func"); if (listener->io_func != NULL) { @@ -300,10 +300,10 @@ static void qio_net_listener_finalize(Object *obj) QIONetListener *listener = QIO_NET_LISTENER(obj); size_t i; + qio_net_listener_disconnect(listener); if (listener->io_notify) { listener->io_notify(listener->io_data); } - qio_net_listener_disconnect(listener); for (i = 0; i < listener->nsioc; i++) { object_unref(OBJECT(listener->sioc[i])); From b5676493a08b4ff80680aae7a1b1bfef8797c6e7 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:29 -0600 Subject: [PATCH 04/15] qio: Remember context of qio_net_listener_set_client_func_full MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit io/net-listener.c has two modes of use: asynchronous (the user calls qio_net_listener_set_client_func to wake up the callback via the global GMainContext, or qio_net_listener_set_client_func_full to wake up the callback via the caller's own alternative GMainContext), and synchronous (the user calls qio_net_listener_wait_client which creates its own GMainContext and waits for the first client connection before returning, with no need for a user's callback). But commit 938c8b79 has a latent logic flaw: when qio_net_listener_wait_client finishes on its temporary context, it reverts all of the siocs back to the global GMainContext rather than the potentially non-NULL context they might have been originally registered with. Similarly, if the user creates a net-listener, adds initial addresses, registers an async callback with a non-default context (which ties to all siocs for the initial addresses), then adds more addresses with qio_net_listener_add, the siocs for later addresses are blindly placed in the global context, rather than sharing the context of the earlier ones. In practice, I don't think this has caused issues. As pointed out by the original commit, all async callers prior to that commit were already okay with the NULL default context; and the typical usage pattern is to first add ALL the addresses the listener will pay attention to before ever setting the async callback. Likewise, if a file uses only qio_net_listener_set_client_func instead of qio_net_listener_set_client_func_full, then it is never using a custom context, so later assignments of async callbacks will still be to the same global context as earlier ones. Meanwhile, any callers that want to do the sync operation to grab the first client are unlikely to register an async callback; altogether bypassing the question of whether later assignments of a GSource are being tied to a different context over time. I do note that chardev/char-socket.c is the only file that calls both qio_net_listener_wait_client (sync for a single client in tcp_chr_accept_server_sync), and qio_net_listener_set_client_func_full (several places, all with chr->gcontext, but sometimes with a NULL callback function during teardown). But as far as I can tell, the two uses are mutually exclusive, based on the is_waitconnect parameter to qmp_chardev_open_socket_server. That said, it is more robust to remember when an async callback function is tied to a non-default context, and have both the sync wait and any late address additions honor that same context. That way, the code will be robust even if a later user performs a sync wait for a specific client in the middle of servicing a longer-lived QIONetListener that has an async callback for all other clients. CC: qemu-stable@nongnu.org Fixes: 938c8b79 ("qio: store gsources for net listeners", v2.12.0) Signed-off-by: Eric Blake Reviewed-by: Daniel P. Berrangé Message-ID: <20251113011625.878876-19-eblake@redhat.com> --- include/io/net-listener.h | 1 + io/net-listener.c | 25 ++++++++++++++++--------- io/trace-events | 6 +++--- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/include/io/net-listener.h b/include/io/net-listener.h index ab9f291ed6..42fbfab546 100644 --- a/include/io/net-listener.h +++ b/include/io/net-listener.h @@ -50,6 +50,7 @@ struct QIONetListener { QIOChannelSocket **sioc; GSource **io_source; size_t nsioc; + GMainContext *context; bool connected; diff --git a/io/net-listener.c b/io/net-listener.c index d71b65270e..0f16b78fbb 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -51,7 +51,8 @@ static gboolean qio_net_listener_channel_func(QIOChannel *ioc, return TRUE; } - trace_qio_net_listener_callback(listener, listener->io_func); + trace_qio_net_listener_callback(listener, listener->io_func, + listener->context); if (listener->io_func) { listener->io_func(listener, sioc, listener->io_data); } @@ -125,13 +126,14 @@ void qio_net_listener_add(QIONetListener *listener, object_ref(OBJECT(sioc)); listener->connected = true; - trace_qio_net_listener_watch(listener, listener->io_func, "add"); + trace_qio_net_listener_watch(listener, listener->io_func, + listener->context, "add"); if (listener->io_func != NULL) { object_ref(OBJECT(listener)); listener->io_source[listener->nsioc] = qio_channel_add_watch_source( QIO_CHANNEL(listener->sioc[listener->nsioc]), G_IO_IN, qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, NULL); + listener, (GDestroyNotify)object_unref, listener->context); } listener->nsioc++; @@ -147,7 +149,8 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, size_t i; trace_qio_net_listener_unwatch(listener, listener->io_func, - "set_client_func"); + listener->context, "set_client_func"); + for (i = 0; i < listener->nsioc; i++) { if (listener->io_source[i]) { g_source_destroy(listener->io_source[i]); @@ -162,9 +165,10 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, listener->io_func = func; listener->io_data = data; listener->io_notify = notify; + listener->context = context; trace_qio_net_listener_watch(listener, listener->io_func, - "set_client_func"); + listener->context, "set_client_func"); if (listener->io_func != NULL) { for (i = 0; i < listener->nsioc; i++) { object_ref(OBJECT(listener)); @@ -225,7 +229,8 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) }; size_t i; - trace_qio_net_listener_unwatch(listener, listener->io_func, "wait_client"); + trace_qio_net_listener_unwatch(listener, listener->io_func, + listener->context, "wait_client"); for (i = 0; i < listener->nsioc; i++) { if (listener->io_source[i]) { g_source_destroy(listener->io_source[i]); @@ -255,14 +260,15 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) g_main_loop_unref(loop); g_main_context_unref(ctxt); - trace_qio_net_listener_watch(listener, listener->io_func, "wait_client"); + trace_qio_net_listener_watch(listener, listener->io_func, + listener->context, "wait_client"); if (listener->io_func != NULL) { for (i = 0; i < listener->nsioc; i++) { object_ref(OBJECT(listener)); listener->io_source[i] = qio_channel_add_watch_source( QIO_CHANNEL(listener->sioc[i]), G_IO_IN, qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, NULL); + listener, (GDestroyNotify)object_unref, listener->context); } } @@ -277,7 +283,8 @@ void qio_net_listener_disconnect(QIONetListener *listener) return; } - trace_qio_net_listener_unwatch(listener, listener->io_func, "disconnect"); + trace_qio_net_listener_unwatch(listener, listener->io_func, + listener->context, "disconnect"); for (i = 0; i < listener->nsioc; i++) { if (listener->io_source[i]) { g_source_destroy(listener->io_source[i]); diff --git a/io/trace-events b/io/trace-events index 10976eca5f..0cb77d579b 100644 --- a/io/trace-events +++ b/io/trace-events @@ -74,6 +74,6 @@ qio_channel_command_abort(void *ioc, int pid) "Command abort ioc=%p pid=%d" qio_channel_command_wait(void *ioc, int pid, int ret, int status) "Command abort ioc=%p pid=%d ret=%d status=%d" # net-listener.c -qio_net_listener_watch(void *listener, void *func, const char *extra) "Net listener=%p watch enabled func=%p by %s" -qio_net_listener_unwatch(void *listener, void *func, const char *extra) "Net listener=%p watch disabled func=%p by %s" -qio_net_listener_callback(void *listener, void *func) "Net listener=%p callback forwarding to func=%p" +qio_net_listener_watch(void *listener, void *func, void *ctx, const char *extra) "Net listener=%p watch enabled func=%p ctx=%p by %s" +qio_net_listener_unwatch(void *listener, void *func, void *ctx, const char *extra) "Net listener=%p watch disabled func=%p ctx=%p by %s" +qio_net_listener_callback(void *listener, void *func, void *ctx) "Net listener=%p callback forwarding to func=%p ctx=%p" From 9d86181874ab7b0e95ae988f6f80715943c618c6 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:30 -0600 Subject: [PATCH 05/15] qio: Protect NetListener callback with mutex MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Without a mutex, NetListener can run into this data race between a thread changing the async callback callback function to use when a client connects, and the thread servicing polling of the listening sockets: Thread 1: qio_net_listener_set_client_func(lstnr, f1, ...); => foreach sock: socket => object_ref(lstnr) => sock_src = qio_channel_socket_add_watch_source(sock, ...., lstnr, object_unref); Thread 2: poll() => event POLLIN on socket => ref(GSourceCallback) => if (lstnr->io_func) // while lstnr->io_func is f1 ...interrupt.. Thread 1: qio_net_listener_set_client_func(lstnr, f2, ...); => foreach sock: socket => g_source_unref(sock_src) => foreach sock: socket => object_ref(lstnr) => sock_src = qio_channel_socket_add_watch_source(sock, ...., lstnr, object_unref); Thread 2: => call lstnr->io_func(lstnr->io_data) // now sees f2 => return dispatch(sock) => unref(GSourceCallback) => destroy-notify => object_unref Found by inspection; I did not spend the time trying to add sleeps or execute under gdb to try and actually trigger the race in practice. This is a SEGFAULT waiting to happen if f2 can become NULL because thread 1 deregisters the user's callback while thread 2 is trying to service the callback. Other messes are also theoretically possible, such as running callback f1 with an opaque pointer that should only be passed to f2 (if the client code were to use more than just a binary choice between a single async function or NULL). Mitigating factor: if the code that modifies the QIONetListener can only be reached by the same thread that is executing the polling and async callbacks, then we are not in a two-thread race documented above (even though poll can see two clients trying to connect in the same window of time, any changes made to the listener by the first async callback will be completed before the thread moves on to the second client). However, QEMU is complex enough that this is hard to generically analyze. If QMP commands (like nbd-server-stop) are run in the main loop and the listener uses the main loop, things should be okay. But when a client uses an alternative GMainContext, or if servicing a QMP command hands off to a coroutine to avoid blocking, I am unable to state with certainty whether a given net listener can be modified by a thread different from the polling thread running callbacks. At any rate, it is worth having the API be robust. To ensure that modifying a NetListener can be safely done from any thread, add a mutex that guarantees atomicity to all members of a listener object related to callbacks. This problem has been present since QIONetListener was introduced. Note that this does NOT prevent the case of a second round of the user's old async callback being invoked with the old opaque data, even when the user has already tried to change the async callback during the first async callback; it is only about ensuring that there is no sharding (the eventual io_func(io_data) call that does get made will correspond to a particular combination that the user had requested at some point in time, and not be sharded to a combination that never existed in practice). In other words, this patch maintains the status quo that a user's async callback function already needs to be robust to parallel clients landing in the same window of poll servicing, even when only one client is desired, if that particular listener can be amended in a thread other than the one doing the polling. CC: qemu-stable@nongnu.org Fixes: 53047392 ("io: introduce a network socket listener API", v2.12.0) Signed-off-by: Eric Blake Message-ID: <20251113011625.878876-20-eblake@redhat.com> Reviewed-by: Daniel P. Berrangé [eblake: minor commit message wording improvements] Signed-off-by: Eric Blake --- include/io/net-listener.h | 1 + io/net-listener.c | 58 +++++++++++++++++++++++++++++---------- 2 files changed, 44 insertions(+), 15 deletions(-) diff --git a/include/io/net-listener.h b/include/io/net-listener.h index 42fbfab546..c2165dc166 100644 --- a/include/io/net-listener.h +++ b/include/io/net-listener.h @@ -54,6 +54,7 @@ struct QIONetListener { bool connected; + QemuMutex lock; /* Protects remaining fields */ QIONetListenerClientFunc io_func; gpointer io_data; GDestroyNotify io_notify; diff --git a/io/net-listener.c b/io/net-listener.c index 0f16b78fbb..f70acdfc5c 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -23,11 +23,16 @@ #include "io/dns-resolver.h" #include "qapi/error.h" #include "qemu/module.h" +#include "qemu/lockable.h" #include "trace.h" QIONetListener *qio_net_listener_new(void) { - return QIO_NET_LISTENER(object_new(TYPE_QIO_NET_LISTENER)); + QIONetListener *listener; + + listener = QIO_NET_LISTENER(object_new(TYPE_QIO_NET_LISTENER)); + qemu_mutex_init(&listener->lock); + return listener; } void qio_net_listener_set_name(QIONetListener *listener, @@ -44,6 +49,9 @@ static gboolean qio_net_listener_channel_func(QIOChannel *ioc, { QIONetListener *listener = QIO_NET_LISTENER(opaque); QIOChannelSocket *sioc; + QIONetListenerClientFunc io_func; + gpointer io_data; + GMainContext *context; sioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(ioc), NULL); @@ -51,10 +59,15 @@ static gboolean qio_net_listener_channel_func(QIOChannel *ioc, return TRUE; } - trace_qio_net_listener_callback(listener, listener->io_func, - listener->context); - if (listener->io_func) { - listener->io_func(listener, sioc, listener->io_data); + WITH_QEMU_LOCK_GUARD(&listener->lock) { + io_func = listener->io_func; + io_data = listener->io_data; + context = listener->context; + } + + trace_qio_net_listener_callback(listener, io_func, context); + if (io_func) { + io_func(listener, sioc, io_data); } object_unref(OBJECT(sioc)); @@ -111,6 +124,9 @@ int qio_net_listener_open_sync(QIONetListener *listener, void qio_net_listener_add(QIONetListener *listener, QIOChannelSocket *sioc) { + QIONetListenerClientFunc io_func; + GMainContext *context; + if (listener->name) { qio_channel_set_name(QIO_CHANNEL(sioc), listener->name); } @@ -126,14 +142,18 @@ void qio_net_listener_add(QIONetListener *listener, object_ref(OBJECT(sioc)); listener->connected = true; - trace_qio_net_listener_watch(listener, listener->io_func, - listener->context, "add"); - if (listener->io_func != NULL) { + WITH_QEMU_LOCK_GUARD(&listener->lock) { + io_func = listener->io_func; + context = listener->context; + } + + trace_qio_net_listener_watch(listener, io_func, context, "add"); + if (io_func) { object_ref(OBJECT(listener)); listener->io_source[listener->nsioc] = qio_channel_add_watch_source( QIO_CHANNEL(listener->sioc[listener->nsioc]), G_IO_IN, qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, listener->context); + listener, (GDestroyNotify)object_unref, context); } listener->nsioc++; @@ -148,6 +168,7 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, { size_t i; + QEMU_LOCK_GUARD(&listener->lock); trace_qio_net_listener_unwatch(listener, listener->io_func, listener->context, "set_client_func"); @@ -228,9 +249,15 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) .loop = loop }; size_t i; + QIONetListenerClientFunc io_func; + GMainContext *context; - trace_qio_net_listener_unwatch(listener, listener->io_func, - listener->context, "wait_client"); + WITH_QEMU_LOCK_GUARD(&listener->lock) { + io_func = listener->io_func; + context = listener->context; + } + + trace_qio_net_listener_unwatch(listener, io_func, context, "wait_client"); for (i = 0; i < listener->nsioc; i++) { if (listener->io_source[i]) { g_source_destroy(listener->io_source[i]); @@ -260,15 +287,14 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) g_main_loop_unref(loop); g_main_context_unref(ctxt); - trace_qio_net_listener_watch(listener, listener->io_func, - listener->context, "wait_client"); - if (listener->io_func != NULL) { + trace_qio_net_listener_watch(listener, io_func, context, "wait_client"); + if (io_func != NULL) { for (i = 0; i < listener->nsioc; i++) { object_ref(OBJECT(listener)); listener->io_source[i] = qio_channel_add_watch_source( QIO_CHANNEL(listener->sioc[i]), G_IO_IN, qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, listener->context); + listener, (GDestroyNotify)object_unref, context); } } @@ -283,6 +309,7 @@ void qio_net_listener_disconnect(QIONetListener *listener) return; } + QEMU_LOCK_GUARD(&listener->lock); trace_qio_net_listener_unwatch(listener, listener->io_func, listener->context, "disconnect"); for (i = 0; i < listener->nsioc; i++) { @@ -318,6 +345,7 @@ static void qio_net_listener_finalize(Object *obj) g_free(listener->io_source); g_free(listener->sioc); g_free(listener->name); + qemu_mutex_destroy(&listener->lock); } static const TypeInfo qio_net_listener_info = { From 36769aeedf6dbd56eda16f0c6e3032d937896fdc Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:31 -0600 Subject: [PATCH 06/15] qio: Minor optimization when callback function is unchanged MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In qemu-nbd and other NBD server setups where parallel clients are supported, it is common that the caller will re-register the same callback function as long as it has not reached its limit on simultaneous clients. In that case, there is no need to tear down and reinstall GSource watches in the GMainContext. In practice, all existing callers currently pass NULL for notify, and no caller ever changes context across calls (for async uses, either the caller consistently uses qio_net_listener_set_client_func_full with the same context, or the caller consistently uses only qio_net_listener_set_client_func which always uses the global context); but the time spent checking these two fields in addition to the more important func and data is still less than the savings of not churning through extra GSource manipulations when the result will be unchanged. Signed-off-by: Eric Blake Reviewed-by: Daniel P. Berrangé Message-ID: <20251113011625.878876-21-eblake@redhat.com> --- io/net-listener.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/io/net-listener.c b/io/net-listener.c index f70acdfc5c..93100a2d25 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -169,6 +169,11 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, size_t i; QEMU_LOCK_GUARD(&listener->lock); + if (listener->io_func == func && listener->io_data == data && + listener->io_notify == notify && listener->context == context) { + return; + } + trace_qio_net_listener_unwatch(listener, listener->io_func, listener->context, "set_client_func"); From e685dd26c7afbf2461dfb4d51a699b9a10824114 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:32 -0600 Subject: [PATCH 07/15] qio: Factor out helpers qio_net_listener_[un]watch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The code had three similar repetitions of an iteration over one or all of nsiocs to set up a GSource, and likewise for teardown. Since an upcoming patch wants to tweak whether GSource or AioContext is used, it's better to consolidate that into one helper function for fewer places to edit later. Signed-off-by: Eric Blake Message-ID: <20251113011625.878876-22-eblake@redhat.com> Reviewed-by: Daniel P. Berrangé --- io/net-listener.c | 122 ++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 70 deletions(-) diff --git a/io/net-listener.c b/io/net-listener.c index 93100a2d25..9a94b15327 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -120,13 +120,54 @@ int qio_net_listener_open_sync(QIONetListener *listener, } } +/* + * i == 0 to set watch on entire array, non-zero to only set watch on + * recent additions when earlier entries are already watched. + * + * listener->lock must be held by caller. + */ +static void +qio_net_listener_watch(QIONetListener *listener, size_t i, const char *caller) +{ + if (!listener->io_func) { + return; + } + + trace_qio_net_listener_watch(listener, listener->io_func, + listener->context, caller); + for ( ; i < listener->nsioc; i++) { + object_ref(OBJECT(listener)); + listener->io_source[i] = qio_channel_add_watch_source( + QIO_CHANNEL(listener->sioc[i]), G_IO_IN, + qio_net_listener_channel_func, + listener, (GDestroyNotify)object_unref, listener->context); + } +} + +/* listener->lock must be held by caller. */ +static void +qio_net_listener_unwatch(QIONetListener *listener, const char *caller) +{ + size_t i; + + if (!listener->io_func) { + return; + } + + trace_qio_net_listener_unwatch(listener, listener->io_func, + listener->context, caller); + for (i = 0; i < listener->nsioc; i++) { + if (listener->io_source[i]) { + g_source_destroy(listener->io_source[i]); + g_source_unref(listener->io_source[i]); + listener->io_source[i] = NULL; + } + } +} void qio_net_listener_add(QIONetListener *listener, QIOChannelSocket *sioc) { - QIONetListenerClientFunc io_func; - GMainContext *context; - if (listener->name) { qio_channel_set_name(QIO_CHANNEL(sioc), listener->name); } @@ -142,21 +183,9 @@ void qio_net_listener_add(QIONetListener *listener, object_ref(OBJECT(sioc)); listener->connected = true; - WITH_QEMU_LOCK_GUARD(&listener->lock) { - io_func = listener->io_func; - context = listener->context; - } - - trace_qio_net_listener_watch(listener, io_func, context, "add"); - if (io_func) { - object_ref(OBJECT(listener)); - listener->io_source[listener->nsioc] = qio_channel_add_watch_source( - QIO_CHANNEL(listener->sioc[listener->nsioc]), G_IO_IN, - qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, context); - } - + QEMU_LOCK_GUARD(&listener->lock); listener->nsioc++; + qio_net_listener_watch(listener, listener->nsioc - 1, "add"); } @@ -166,25 +195,13 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, GDestroyNotify notify, GMainContext *context) { - size_t i; - QEMU_LOCK_GUARD(&listener->lock); if (listener->io_func == func && listener->io_data == data && listener->io_notify == notify && listener->context == context) { return; } - trace_qio_net_listener_unwatch(listener, listener->io_func, - listener->context, "set_client_func"); - - for (i = 0; i < listener->nsioc; i++) { - if (listener->io_source[i]) { - g_source_destroy(listener->io_source[i]); - g_source_unref(listener->io_source[i]); - listener->io_source[i] = NULL; - } - } - + qio_net_listener_unwatch(listener, "set_client_func"); if (listener->io_notify) { listener->io_notify(listener->io_data); } @@ -193,17 +210,7 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, listener->io_notify = notify; listener->context = context; - trace_qio_net_listener_watch(listener, listener->io_func, - listener->context, "set_client_func"); - if (listener->io_func != NULL) { - for (i = 0; i < listener->nsioc; i++) { - object_ref(OBJECT(listener)); - listener->io_source[i] = qio_channel_add_watch_source( - QIO_CHANNEL(listener->sioc[i]), G_IO_IN, - qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, context); - } - } + qio_net_listener_watch(listener, 0, "set_client_func"); } void qio_net_listener_set_client_func(QIONetListener *listener, @@ -254,21 +261,9 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) .loop = loop }; size_t i; - QIONetListenerClientFunc io_func; - GMainContext *context; WITH_QEMU_LOCK_GUARD(&listener->lock) { - io_func = listener->io_func; - context = listener->context; - } - - trace_qio_net_listener_unwatch(listener, io_func, context, "wait_client"); - for (i = 0; i < listener->nsioc; i++) { - if (listener->io_source[i]) { - g_source_destroy(listener->io_source[i]); - g_source_unref(listener->io_source[i]); - listener->io_source[i] = NULL; - } + qio_net_listener_unwatch(listener, "wait_client"); } sources = g_new0(GSource *, listener->nsioc); @@ -292,15 +287,8 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) g_main_loop_unref(loop); g_main_context_unref(ctxt); - trace_qio_net_listener_watch(listener, io_func, context, "wait_client"); - if (io_func != NULL) { - for (i = 0; i < listener->nsioc; i++) { - object_ref(OBJECT(listener)); - listener->io_source[i] = qio_channel_add_watch_source( - QIO_CHANNEL(listener->sioc[i]), G_IO_IN, - qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, context); - } + WITH_QEMU_LOCK_GUARD(&listener->lock) { + qio_net_listener_watch(listener, 0, "wait_client"); } return data.sioc; @@ -315,14 +303,8 @@ void qio_net_listener_disconnect(QIONetListener *listener) } QEMU_LOCK_GUARD(&listener->lock); - trace_qio_net_listener_unwatch(listener, listener->io_func, - listener->context, "disconnect"); + qio_net_listener_unwatch(listener, "disconnect"); for (i = 0; i < listener->nsioc; i++) { - if (listener->io_source[i]) { - g_source_destroy(listener->io_source[i]); - g_source_unref(listener->io_source[i]); - listener->io_source[i] = NULL; - } qio_channel_close(QIO_CHANNEL(listener->sioc[i]), NULL); } listener->connected = false; From dfeadf82c2b428d1c7f59d4b573588095630d99e Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:33 -0600 Subject: [PATCH 08/15] chardev: Reuse channel's cached local address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Directly accessing the fd member of a QIOChannelSocket is an undesirable leaky abstraction. What's more, grabbing that fd merely to force an eventual call to getsockname() can be wasteful, since the channel is often able to return its cached local name. Reported-by: Daniel P. Berrangé Signed-off-by: Eric Blake Message-ID: <20251113011625.878876-23-eblake@redhat.com> Reviewed-by: Daniel P. Berrangé --- chardev/char-socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chardev/char-socket.c b/chardev/char-socket.c index 62852e3caf..ec4116ade4 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -1255,7 +1255,7 @@ static int qmp_chardev_open_socket_server(Chardev *chr, } qapi_free_SocketAddress(s->addr); - s->addr = socket_local_address(s->listener->sioc[0]->fd, errp); + s->addr = qio_channel_socket_get_local_address(s->listener->sioc[0], errp); skip_listen: update_disconnected_filename(s); From ec59a65a4db79297ca155738a2a7358204696f12 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:34 -0600 Subject: [PATCH 09/15] qio: Provide accessor around QIONetListener->sioc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An upcoming patch needs to pass more than just sioc as the opaque pointer to an AioContext; but since our AioContext code in general (and its QIO Channel wrapper code) lacks a notify callback present with GSource, we do not have the trivial option of just g_malloc'ing a small struct to hold all that data coupled with a notify of g_free. Instead, the data pointer must outlive the registered handler; in fact, having the data pointer have the same lifetime as QIONetListener is adequate. But the cleanest way to stick such a helper struct in QIONetListener will be to rearrange internal struct members. And that in turn means that all existing code that currently directly accesses listener->nsioc and listener->sioc[] should instead go through accessor functions, to be immune to the upcoming struct layout changes. So this patch adds accessor methods qio_net_listener_nsioc() and qio_net_listener_sioc(), and puts them to use. While at it, notice that the pattern of grabbing an sioc from the listener only to turn around can call qio_channel_socket_get_local_address is common enough to also warrant the helper of qio_net_listener_get_local_address, and fix a copy-paste error in the corresponding documentation. Signed-off-by: Eric Blake Message-ID: <20251113011625.878876-24-eblake@redhat.com> Reviewed-by: Daniel P. Berrangé --- chardev/char-socket.c | 2 +- include/io/channel-socket.h | 2 +- include/io/net-listener.h | 42 +++++++++++++++++++++++++++++++++++++ io/net-listener.c | 27 ++++++++++++++++++++++++ migration/socket.c | 4 ++-- ui/vnc.c | 34 ++++++++++++++++++------------ 6 files changed, 94 insertions(+), 17 deletions(-) diff --git a/chardev/char-socket.c b/chardev/char-socket.c index ec4116ade4..26d2f11202 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -1255,7 +1255,7 @@ static int qmp_chardev_open_socket_server(Chardev *chr, } qapi_free_SocketAddress(s->addr); - s->addr = qio_channel_socket_get_local_address(s->listener->sioc[0], errp); + s->addr = qio_net_listener_get_local_address(s->listener, 0, errp); skip_listen: update_disconnected_filename(s); diff --git a/include/io/channel-socket.h b/include/io/channel-socket.h index fcfd489c6c..a1ef3136ea 100644 --- a/include/io/channel-socket.h +++ b/include/io/channel-socket.h @@ -228,7 +228,7 @@ void qio_channel_socket_dgram_async(QIOChannelSocket *ioc, * release with a call qapi_free_SocketAddress() when no * longer required. * - * Returns: 0 on success, -1 on error + * Returns: the socket address struct, or NULL on error */ SocketAddress * qio_channel_socket_get_local_address(QIOChannelSocket *ioc, diff --git a/include/io/net-listener.h b/include/io/net-listener.h index c2165dc166..95bc7407d6 100644 --- a/include/io/net-listener.h +++ b/include/io/net-listener.h @@ -185,4 +185,46 @@ void qio_net_listener_disconnect(QIONetListener *listener); */ bool qio_net_listener_is_connected(QIONetListener *listener); + +/** + * qio_net_listener_nsioc: + * @listener: the network listener object + * + * Determine the number of listener channels currently owned by the + * given listener. + * + * Returns: number of channels, or 0 if not listening + */ +size_t qio_net_listener_nsioc(QIONetListener *listener); + + +/** + * qio_net_listener_sioc: + * @listener: the network listener object + * @n: index of the sioc to grab + * + * Accessor for the nth sioc owned by the listener. + * + * Returns: the requested listener, or NULL if not in bounds + */ +QIOChannelSocket *qio_net_listener_sioc(QIONetListener *listener, size_t n); + +/** + * qio_net_listener_get_local_address: + * @listener: the network listener object + * @n: index of the sioc to grab + * @errp: pointer to a NULL-initialized error object + * + * Get the string representation of the local socket + * address. A pointer to the allocated address information + * struct will be returned, which the caller is required to + * release with a call qapi_free_SocketAddress() when no + * longer required. + * + * Returns: the socket address struct, or NULL on error + */ +SocketAddress * +qio_net_listener_get_local_address(QIONetListener *listener, size_t n, + Error **errp); + #endif /* QIO_NET_LISTENER_H */ diff --git a/io/net-listener.c b/io/net-listener.c index 9a94b15327..9ffbc141a7 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -316,6 +316,33 @@ bool qio_net_listener_is_connected(QIONetListener *listener) return listener->connected; } +size_t qio_net_listener_nsioc(QIONetListener *listener) +{ + return listener->nsioc; +} + +QIOChannelSocket *qio_net_listener_sioc(QIONetListener *listener, size_t n) +{ + if (n >= listener->nsioc) { + return NULL; + } + return listener->sioc[n]; +} + +SocketAddress * +qio_net_listener_get_local_address(QIONetListener *listener, size_t n, + Error **errp) +{ + QIOChannelSocket *sioc = qio_net_listener_sioc(listener, n); + + if (!sioc) { + error_setg(errp, "Listener index out of range"); + return NULL; + } + + return qio_channel_socket_get_local_address(sioc, errp); +} + static void qio_net_listener_finalize(Object *obj) { QIONetListener *listener = QIO_NET_LISTENER(obj); diff --git a/migration/socket.c b/migration/socket.c index 5ec65b8c03..9e379bf56f 100644 --- a/migration/socket.c +++ b/migration/socket.c @@ -170,9 +170,9 @@ void socket_start_incoming_migration(SocketAddress *saddr, NULL, NULL, g_main_context_get_thread_default()); - for (i = 0; i < listener->nsioc; i++) { + for (i = 0; i < qio_net_listener_nsioc(listener); i++) { SocketAddress *address = - qio_channel_socket_get_local_address(listener->sioc[i], errp); + qio_net_listener_get_local_address(listener, i, errp); if (!address) { return; } diff --git a/ui/vnc.c b/ui/vnc.c index 50016ff7ab..0d499b208b 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -235,12 +235,12 @@ static VncServerInfo *vnc_server_info_get(VncDisplay *vd) VncServerInfo *info; Error *err = NULL; - if (!vd->listener || !vd->listener->nsioc) { + if (!vd->listener || !qio_net_listener_nsioc(vd->listener)) { return NULL; } info = g_malloc0(sizeof(*info)); - vnc_init_basic_info_from_server_addr(vd->listener->sioc[0], + vnc_init_basic_info_from_server_addr(qio_net_listener_sioc(vd->listener, 0), qapi_VncServerInfo_base(info), &err); info->auth = g_strdup(vnc_auth_name(vd)); if (err) { @@ -377,7 +377,7 @@ VncInfo *qmp_query_vnc(Error **errp) VncDisplay *vd = vnc_display_find(NULL); SocketAddress *addr = NULL; - if (vd == NULL || !vd->listener || !vd->listener->nsioc) { + if (vd == NULL || !vd->listener || !qio_net_listener_nsioc(vd->listener)) { info->enabled = false; } else { info->enabled = true; @@ -386,8 +386,7 @@ VncInfo *qmp_query_vnc(Error **errp) info->has_clients = true; info->clients = qmp_query_client_list(vd); - addr = qio_channel_socket_get_local_address(vd->listener->sioc[0], - errp); + addr = qio_net_listener_get_local_address(vd->listener, 0, errp); if (!addr) { goto out_error; } @@ -549,6 +548,8 @@ VncInfo2List *qmp_query_vnc_servers(Error **errp) size_t i; QTAILQ_FOREACH(vd, &vnc_displays, next) { + size_t nsioc = 0; + info = g_new0(VncInfo2, 1); info->id = g_strdup(vd->id); info->clients = qmp_query_client_list(vd); @@ -559,14 +560,21 @@ VncInfo2List *qmp_query_vnc_servers(Error **errp) "device", &error_abort)); info->display = g_strdup(dev->id); } - for (i = 0; vd->listener != NULL && i < vd->listener->nsioc; i++) { - info->server = qmp_query_server_entry( - vd->listener->sioc[i], false, vd->auth, vd->subauth, - info->server); + if (vd->listener != NULL) { + nsioc = qio_net_listener_nsioc(vd->listener); } - for (i = 0; vd->wslistener != NULL && i < vd->wslistener->nsioc; i++) { + for (i = 0; i < nsioc; i++) { info->server = qmp_query_server_entry( - vd->wslistener->sioc[i], true, vd->ws_auth, + qio_net_listener_sioc(vd->listener, i), false, vd->auth, + vd->subauth, info->server); + } + nsioc = 0; + if (vd->wslistener) { + nsioc = qio_net_listener_nsioc(vd->wslistener); + } + for (i = 0; i < nsioc; i++) { + info->server = qmp_query_server_entry( + qio_net_listener_sioc(vd->wslistener, i), true, vd->ws_auth, vd->ws_subauth, info->server); } @@ -3550,11 +3558,11 @@ static void vnc_display_print_local_addr(VncDisplay *vd) { SocketAddress *addr; - if (!vd->listener || !vd->listener->nsioc) { + if (!vd->listener || !qio_net_listener_nsioc(vd->listener)) { return; } - addr = qio_channel_socket_get_local_address(vd->listener->sioc[0], NULL); + addr = qio_net_listener_get_local_address(vd->listener, 0, NULL); if (!addr) { return; } From cc0faf8273f090a44429be0365cd476efc2d63a2 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:35 -0600 Subject: [PATCH 10/15] qio: Prepare NetListener to use AioContext MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For ease of review, this patch adds an AioContext pointer to the QIONetListener struct, the code to trace it, and refactors listener->io_source to instead be an array of utility structs; but the aio_context pointer is always NULL until the next patch adds an API to set it. There should be no semantic change in this patch. Signed-off-by: Eric Blake Reviewed-by: Daniel P. Berrangé Message-ID: <20251113011625.878876-25-eblake@redhat.com> --- include/io/net-listener.h | 6 ++- io/net-listener.c | 105 ++++++++++++++++++++++++++------------ io/trace-events | 6 +-- 3 files changed, 78 insertions(+), 39 deletions(-) diff --git a/include/io/net-listener.h b/include/io/net-listener.h index 95bc7407d6..93608f7fe8 100644 --- a/include/io/net-listener.h +++ b/include/io/net-listener.h @@ -28,6 +28,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(QIONetListener, QIO_NET_LISTENER) +typedef struct QIONetListenerSource QIONetListenerSource; typedef void (*QIONetListenerClientFunc)(QIONetListener *listener, QIOChannelSocket *sioc, @@ -47,10 +48,11 @@ struct QIONetListener { Object parent; char *name; - QIOChannelSocket **sioc; - GSource **io_source; + QIONetListenerSource **source; size_t nsioc; + /* At most one of context or aio_context will be set */ GMainContext *context; + AioContext *aio_context; bool connected; diff --git a/io/net-listener.c b/io/net-listener.c index 9ffbc141a7..49399ec926 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -24,8 +24,15 @@ #include "qapi/error.h" #include "qemu/module.h" #include "qemu/lockable.h" +#include "qemu/main-loop.h" #include "trace.h" +struct QIONetListenerSource { + QIOChannelSocket *sioc; + GSource *io_source; + QIONetListener *listener; +}; + QIONetListener *qio_net_listener_new(void) { QIONetListener *listener; @@ -52,6 +59,7 @@ static gboolean qio_net_listener_channel_func(QIOChannel *ioc, QIONetListenerClientFunc io_func; gpointer io_data; GMainContext *context; + AioContext *aio_context; sioc = qio_channel_socket_accept(QIO_CHANNEL_SOCKET(ioc), NULL); @@ -63,9 +71,10 @@ static gboolean qio_net_listener_channel_func(QIOChannel *ioc, io_func = listener->io_func; io_data = listener->io_data; context = listener->context; + aio_context = listener->aio_context; } - trace_qio_net_listener_callback(listener, io_func, context); + trace_qio_net_listener_callback(listener, io_func, context, aio_context); if (io_func) { io_func(listener, sioc, io_data); } @@ -134,13 +143,23 @@ qio_net_listener_watch(QIONetListener *listener, size_t i, const char *caller) } trace_qio_net_listener_watch(listener, listener->io_func, - listener->context, caller); + listener->context, listener->aio_context, + caller); for ( ; i < listener->nsioc; i++) { - object_ref(OBJECT(listener)); - listener->io_source[i] = qio_channel_add_watch_source( - QIO_CHANNEL(listener->sioc[i]), G_IO_IN, - qio_net_listener_channel_func, - listener, (GDestroyNotify)object_unref, listener->context); + if (!listener->aio_context) { + /* + * The user passed a GMainContext with the async callback; + * they plan on running the default or their own g_main_loop. + */ + object_ref(OBJECT(listener)); + listener->source[i]->io_source = qio_channel_add_watch_source( + QIO_CHANNEL(listener->source[i]->sioc), G_IO_IN, + qio_net_listener_channel_func, + listener, (GDestroyNotify)object_unref, listener->context); + } else { + /* The user passed an AioContext. Not supported yet. */ + g_assert_not_reached(); + } } } @@ -155,12 +174,17 @@ qio_net_listener_unwatch(QIONetListener *listener, const char *caller) } trace_qio_net_listener_unwatch(listener, listener->io_func, - listener->context, caller); + listener->context, listener->aio_context, + caller); for (i = 0; i < listener->nsioc; i++) { - if (listener->io_source[i]) { - g_source_destroy(listener->io_source[i]); - g_source_unref(listener->io_source[i]); - listener->io_source[i] = NULL; + if (!listener->aio_context) { + if (listener->source[i]->io_source) { + g_source_destroy(listener->source[i]->io_source); + g_source_unref(listener->source[i]->io_source); + listener->source[i]->io_source = NULL; + } + } else { + g_assert_not_reached(); } } } @@ -172,13 +196,12 @@ void qio_net_listener_add(QIONetListener *listener, qio_channel_set_name(QIO_CHANNEL(sioc), listener->name); } - listener->sioc = g_renew(QIOChannelSocket *, listener->sioc, - listener->nsioc + 1); - listener->io_source = g_renew(typeof(listener->io_source[0]), - listener->io_source, - listener->nsioc + 1); - listener->sioc[listener->nsioc] = sioc; - listener->io_source[listener->nsioc] = NULL; + listener->source = g_renew(typeof(listener->source[0]), + listener->source, + listener->nsioc + 1); + listener->source[listener->nsioc] = g_new0(QIONetListenerSource, 1); + listener->source[listener->nsioc]->sioc = sioc; + listener->source[listener->nsioc]->listener = listener; object_ref(OBJECT(sioc)); listener->connected = true; @@ -189,15 +212,18 @@ void qio_net_listener_add(QIONetListener *listener, } -void qio_net_listener_set_client_func_full(QIONetListener *listener, - QIONetListenerClientFunc func, - gpointer data, - GDestroyNotify notify, - GMainContext *context) +static void +qio_net_listener_set_client_func_internal(QIONetListener *listener, + QIONetListenerClientFunc func, + gpointer data, + GDestroyNotify notify, + GMainContext *context, + AioContext *aio_context) { QEMU_LOCK_GUARD(&listener->lock); if (listener->io_func == func && listener->io_data == data && - listener->io_notify == notify && listener->context == context) { + listener->io_notify == notify && listener->context == context && + listener->aio_context == aio_context) { return; } @@ -209,17 +235,28 @@ void qio_net_listener_set_client_func_full(QIONetListener *listener, listener->io_data = data; listener->io_notify = notify; listener->context = context; + listener->aio_context = aio_context; qio_net_listener_watch(listener, 0, "set_client_func"); } +void qio_net_listener_set_client_func_full(QIONetListener *listener, + QIONetListenerClientFunc func, + gpointer data, + GDestroyNotify notify, + GMainContext *context) +{ + qio_net_listener_set_client_func_internal(listener, func, data, + notify, context, NULL); +} + void qio_net_listener_set_client_func(QIONetListener *listener, QIONetListenerClientFunc func, gpointer data, GDestroyNotify notify) { - qio_net_listener_set_client_func_full(listener, func, data, - notify, NULL); + qio_net_listener_set_client_func_internal(listener, func, data, + notify, NULL, NULL); } struct QIONetListenerClientWaitData { @@ -268,8 +305,8 @@ QIOChannelSocket *qio_net_listener_wait_client(QIONetListener *listener) sources = g_new0(GSource *, listener->nsioc); for (i = 0; i < listener->nsioc; i++) { - sources[i] = qio_channel_create_watch(QIO_CHANNEL(listener->sioc[i]), - G_IO_IN); + sources[i] = qio_channel_create_watch( + QIO_CHANNEL(listener->source[i]->sioc), G_IO_IN); g_source_set_callback(sources[i], (GSourceFunc)qio_net_listener_wait_client_func, @@ -305,7 +342,7 @@ void qio_net_listener_disconnect(QIONetListener *listener) QEMU_LOCK_GUARD(&listener->lock); qio_net_listener_unwatch(listener, "disconnect"); for (i = 0; i < listener->nsioc; i++) { - qio_channel_close(QIO_CHANNEL(listener->sioc[i]), NULL); + qio_channel_close(QIO_CHANNEL(listener->source[i]->sioc), NULL); } listener->connected = false; } @@ -326,7 +363,7 @@ QIOChannelSocket *qio_net_listener_sioc(QIONetListener *listener, size_t n) if (n >= listener->nsioc) { return NULL; } - return listener->sioc[n]; + return listener->source[n]->sioc; } SocketAddress * @@ -354,10 +391,10 @@ static void qio_net_listener_finalize(Object *obj) } for (i = 0; i < listener->nsioc; i++) { - object_unref(OBJECT(listener->sioc[i])); + object_unref(OBJECT(listener->source[i]->sioc)); + g_free(listener->source[i]); } - g_free(listener->io_source); - g_free(listener->sioc); + g_free(listener->source); g_free(listener->name); qemu_mutex_destroy(&listener->lock); } diff --git a/io/trace-events b/io/trace-events index 0cb77d579b..ec91453335 100644 --- a/io/trace-events +++ b/io/trace-events @@ -74,6 +74,6 @@ qio_channel_command_abort(void *ioc, int pid) "Command abort ioc=%p pid=%d" qio_channel_command_wait(void *ioc, int pid, int ret, int status) "Command abort ioc=%p pid=%d ret=%d status=%d" # net-listener.c -qio_net_listener_watch(void *listener, void *func, void *ctx, const char *extra) "Net listener=%p watch enabled func=%p ctx=%p by %s" -qio_net_listener_unwatch(void *listener, void *func, void *ctx, const char *extra) "Net listener=%p watch disabled func=%p ctx=%p by %s" -qio_net_listener_callback(void *listener, void *func, void *ctx) "Net listener=%p callback forwarding to func=%p ctx=%p" +qio_net_listener_watch(void *listener, void *func, void *gctx, void *actx, const char *extra) "Net listener=%p watch enabled func=%p ctx=%p/%p by %s" +qio_net_listener_unwatch(void *listener, void *func, void *gctx, void *actx, const char *extra) "Net listener=%p watch disabled func=%p ctx=%p/%p by %s" +qio_net_listener_callback(void *listener, void *func, void *gctx, void *actx) "Net listener=%p callback forwarding to func=%p ctx=%p/%p" From de252f7993bb0a258537c7d3359bb524c6122ae3 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:36 -0600 Subject: [PATCH 11/15] qio: Add QIONetListener API for using AioContext MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The user calling himself "John Doe" reported a deadlock when attempting to use qemu-storage-daemon to serve both a base file over NBD, and a qcow2 file with that NBD export as its backing file, from the same process, even though it worked just fine when there were two q-s-d processes. The bulk of the NBD server code properly uses coroutines to make progress in an event-driven manner, but the code for spawning a new coroutine at the point when listen(2) detects a new client was hard-coded to use the global GMainContext; in other words, the callback that triggers nbd_client_new to let the server start the negotiation sequence with the client requires the main loop to be making progress. However, the code for bdrv_open of a qcow2 image with an NBD backing file uses an AIO_WAIT_WHILE nested event loop to ensure that the entire qcow2 backing chain is either fully loaded or rejected, without any side effects from the main loop causing unwanted changes to the disk being loaded (in short, an AioContext represents the set of actions that are known to be safe while handling block layer I/O, while excluding any other pending actions in the global main loop with potentially larger risk of unwanted side effects). This creates a classic case of deadlock: the server can't progress to the point of accept(2)ing the client to write to the NBD socket because the main loop is being starved until the AIO_WAIT_WHILE completes the bdrv_open, but the AIO_WAIT_WHILE can't progress because it is blocked on the client coroutine stuck in a read() of the expected magic number from the server side of the socket. This patch adds a new API to allow clients to opt in to listening via an AioContext rather than a GMainContext. This will allow NBD to fix the deadlock by performing all actions during bdrv_open in the main loop AioContext. Technical debt warning: I would have loved to utilize a notify function with AioContext to guarantee that we don't finalize listener due to an object_unref if there is any callback still running (the way GSource does), but wiring up notify functions into AioContext is a bigger task that will be deferred to a later QEMU release. But for solving the NBD deadlock, it is sufficient to note that the QMP commands for enabling and disabling the NBD server are really the only points where we want to change the listener's callback. Furthermore, those commands are serviced in the main loop, which is the same AioContext that is also listening for connections. Since a thread cannot interrupt itself, we are ensured that at the point where we are changing the watch, there are no callbacks active. This is NOT as powerful as the GSource cross-thread safety, but sufficient for the needs of today. An upcoming patch will then add a unit test (kept separate to make it easier to rearrange the series to demonstrate the deadlock without this patch). Fixes: https://gitlab.com/qemu-project/qemu/-/issues/3169 Signed-off-by: Eric Blake Message-ID: <20251113011625.878876-26-eblake@redhat.com> Reviewed-by: Daniel P. Berrangé --- include/io/net-listener.h | 21 +++++++++++++ io/net-listener.c | 66 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 84 insertions(+), 3 deletions(-) diff --git a/include/io/net-listener.h b/include/io/net-listener.h index 93608f7fe8..fdc5f1c81a 100644 --- a/include/io/net-listener.h +++ b/include/io/net-listener.h @@ -152,6 +152,27 @@ void qio_net_listener_set_client_func(QIONetListener *listener, gpointer data, GDestroyNotify notify); +/** + * qio_net_listener_set_client_aio_func: + * @listener: the network listener object + * @func: the callback function + * @data: opaque data to pass to @func + * @context: AioContext that @func will be bound to; if NULL, this will + * will use qemu_get_aio_context(). + * + * Similar to qio_net_listener_set_client_func_full(), except that the polling + * will be done by an AioContext rather than a GMainContext. + * + * Because AioContext does not (yet) support a clean way to deregister + * a callback from one thread while another thread might be in that + * callback, this function is only safe to call from the thread + * currently associated with @context. + */ +void qio_net_listener_set_client_aio_func(QIONetListener *listener, + QIONetListenerClientFunc func, + void *data, + AioContext *context); + /** * qio_net_listener_wait_client: * @listener: the network listener object diff --git a/io/net-listener.c b/io/net-listener.c index 49399ec926..9410d72da9 100644 --- a/io/net-listener.c +++ b/io/net-listener.c @@ -85,6 +85,17 @@ static gboolean qio_net_listener_channel_func(QIOChannel *ioc, } +static void qio_net_listener_aio_func(void *opaque) +{ + QIONetListenerSource *data = opaque; + + assert(data->io_source == NULL); + assert(data->listener->aio_context != NULL); + qio_net_listener_channel_func(QIO_CHANNEL(data->sioc), G_IO_IN, + data->listener); +} + + int qio_net_listener_open_sync(QIONetListener *listener, SocketAddress *addr, int num, @@ -157,8 +168,26 @@ qio_net_listener_watch(QIONetListener *listener, size_t i, const char *caller) qio_net_listener_channel_func, listener, (GDestroyNotify)object_unref, listener->context); } else { - /* The user passed an AioContext. Not supported yet. */ - g_assert_not_reached(); + /* + * The user passed an AioContext. At this point, + * AioContext lacks a clean way to call a notify function + * to release a final reference after any callback is + * complete. But we asserted earlier that the async + * callback is changed only from the thread associated + * with aio_context, which means no other thread is in the + * middle of running the callback when we are changing the + * refcount on listener here. Therefore, a single + * reference here is sufficient to ensure listener is not + * finalized during the callback. + */ + assert(listener->context == NULL); + if (i == 0) { + object_ref(OBJECT(listener)); + } + qio_channel_set_aio_fd_handler( + QIO_CHANNEL(listener->source[i]->sioc), + listener->aio_context, qio_net_listener_aio_func, + NULL, NULL, listener->source[i]); } } } @@ -184,7 +213,13 @@ qio_net_listener_unwatch(QIONetListener *listener, const char *caller) listener->source[i]->io_source = NULL; } } else { - g_assert_not_reached(); + assert(listener->context == NULL); + qio_channel_set_aio_fd_handler( + QIO_CHANNEL(listener->source[i]->sioc), + listener->aio_context, NULL, NULL, NULL, NULL); + if (i == listener->nsioc - 1) { + object_unref(OBJECT(listener)); + } } } } @@ -259,6 +294,31 @@ void qio_net_listener_set_client_func(QIONetListener *listener, notify, NULL, NULL); } +void qio_net_listener_set_client_aio_func(QIONetListener *listener, + QIONetListenerClientFunc func, + void *data, + AioContext *context) +{ + if (!context) { + assert(qemu_in_main_thread()); + context = qemu_get_aio_context(); + } else { + /* + * TODO: The API was intentionally designed to allow a caller + * to pass an alternative AioContext for future expansion; + * however, actually implementating that is not possible + * without notify callbacks wired into AioContext similar to + * how they work in GSource. So for now, this code hard-codes + * the knowledge that the only client needing AioContext is + * the NBD server, which uses the global context and does not + * suffer from cross-thread safety issues. + */ + g_assert_not_reached(); + } + qio_net_listener_set_client_func_internal(listener, func, data, + NULL, NULL, context); +} + struct QIONetListenerClientWaitData { QIOChannelSocket *sioc; GMainLoop *loop; From 89179bb4d91cc8d816e762c2c92f7c2120dda663 Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:37 -0600 Subject: [PATCH 12/15] nbd: Avoid deadlock in client connecting to same-process server MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See the previous patch for a longer description of the deadlock. Now that QIONetListener supports waiting for clients in the main loop AioContext, NBD can use that to ensure that the server can make progress even when a client is intentionally starving the GMainContext from any activity not tied to an AioContext. Note that command-line arguments and QMP commands like nbd-server-start or nbd-server-stop that manipulate whether the NBD server exists are serviced in the main loop; and therefore, this patch does not fall foul of the restrictions in the previous patch about the inherent unsafe race possible if a QIONetListener can have its async callback modified by a different thread than the one servicing polls. Fixes: https://gitlab.com/qemu-project/qemu/-/issues/3169 Signed-off-by: Eric Blake Reviewed-by: Daniel P. Berrangé Message-ID: <20251113011625.878876-27-eblake@redhat.com> --- blockdev-nbd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blockdev-nbd.c b/blockdev-nbd.c index 1e3e634b87..696474aea9 100644 --- a/blockdev-nbd.c +++ b/blockdev-nbd.c @@ -94,10 +94,10 @@ static void nbd_update_server_watch(NBDServerData *s) { if (s->listener) { if (!s->max_connections || s->connections < s->max_connections) { - qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, + qio_net_listener_set_client_aio_func(s->listener, nbd_accept, NULL, NULL); } else { - qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL); + qio_net_listener_set_client_aio_func(s->listener, NULL, NULL, NULL); } } } From 24fd6d75b3e73105a95b93e54c0a17bae8f79c0c Mon Sep 17 00:00:00 2001 From: Eric Blake Date: Wed, 12 Nov 2025 19:11:38 -0600 Subject: [PATCH 13/15] iotests: Add coverage of recent NBD qio deadlock fix MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Test that all images in a qcow2 chain using an NBD backing file can be served by the same process. Prior to the recent QIONetListener fixes, this test would demonstrate deadlock. The test borrows heavily from the original formula by "John Doe" in the gitlab bug, but uses a Unix socket rather than TCP to avoid port contention, and uses a full-blown QEMU rather than qemu-storage-daemon since both programs were impacted. The test starts out with the even simpler task of directly adding an NBD client without qcow2 chain ('client'), which also provokes the deadlock; but commenting out the 'Adding explicit NBD client' section will still show deadlock when reaching the 'Adding wrapper image...'. Fixes: https://gitlab.com/qemu-project/qemu/-/issues/3169 Signed-off-by: Eric Blake Reviewed-by: Daniel P. Berrangé Reviewed-by: Vladimir Sementsov-Ogievskiy Message-ID: <20251113011625.878876-28-eblake@redhat.com> --- tests/qemu-iotests/tests/nbd-in-qcow2-chain | 94 +++++++++++++++++++ .../qemu-iotests/tests/nbd-in-qcow2-chain.out | 75 +++++++++++++++ 2 files changed, 169 insertions(+) create mode 100755 tests/qemu-iotests/tests/nbd-in-qcow2-chain create mode 100644 tests/qemu-iotests/tests/nbd-in-qcow2-chain.out diff --git a/tests/qemu-iotests/tests/nbd-in-qcow2-chain b/tests/qemu-iotests/tests/nbd-in-qcow2-chain new file mode 100755 index 0000000000..455ddfa86f --- /dev/null +++ b/tests/qemu-iotests/tests/nbd-in-qcow2-chain @@ -0,0 +1,94 @@ +#!/usr/bin/env bash +# group: rw quick +# +# Test of opening both server and client NBD in a qcow2 backing chain +# +# Copyright (C) Red Hat, Inc. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# creator +owner=eblake@redhat.com + +seq=`basename $0` +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_qemu + _cleanup_test_img + rm -f "$SOCK_DIR/nbd" +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +cd .. +. ./common.rc +. ./common.filter +. ./common.qemu +. ./common.nbd + +_supported_fmt qcow2 # Hardcoded to qcow2 command line and QMP below +_supported_proto file + +size=100M + +echo +echo "=== Preparing base image ===" + +TEST_IMG="$TEST_IMG.base" _make_test_img $size + +echo +echo "=== Starting QEMU and exposing base image ===" + +_launch_qemu -machine q35 +h1=$QEMU_HANDLE +_send_qemu_cmd $QEMU_HANDLE '{"execute": "qmp_capabilities"}' 'return' +_send_qemu_cmd $QEMU_HANDLE '{"execute": "blockdev-add", + "arguments": {"node-name":"base", "driver":"qcow2", + "file":{"driver":"file", "filename":"'"$TEST_IMG.base"'"}}}' 'return' +_send_qemu_cmd $QEMU_HANDLE '{"execute":"nbd-server-start", + "arguments": {"addr":{"type":"unix", + "data":{"path":"'"$SOCK_DIR/nbd"'"}}}}' 'return' +_send_qemu_cmd $QEMU_HANDLE '{"execute":"nbd-server-add", + "arguments": {"device":"base","name":"base"}}' 'return' + +echo +echo "=== Adding explicit NBD client ===" + +_send_qemu_cmd $QEMU_HANDLE '{"execute": "blockdev-add", + "arguments": {"node-name":"client", "driver":"nbd", + "read-only":true, "export":"base", + "server":{"type":"unix", "path":"'"$SOCK_DIR/nbd"'"}}}' 'return' +_send_qemu_cmd $QEMU_HANDLE '{"execute":"nbd-server-add", + "arguments": {"device":"client","name":"client"}}' 'return' + +echo +echo "=== Creating wrapper image ===" + +_make_test_img -F raw -b "nbd+unix:///base?socket=$SOCK_DIR/nbd" $size + +echo +echo "=== Adding wrapper image with implicit client from qcow2 file ===" + +_send_qemu_cmd $QEMU_HANDLE '{"execute": "blockdev-add", + "arguments": {"node-name":"wrap", "driver":"qcow2", + "file":{"driver":"file", "filename":"'"$TEST_IMG"'"}}}' 'return' +_send_qemu_cmd $QEMU_HANDLE '{"execute":"nbd-server-add", + "arguments": {"device":"wrap","name":"wrap"}}' 'return' + +echo +echo "=== Checking NBD server ===" + +$QEMU_NBD --list -k $SOCK_DIR/nbd + +echo +echo "=== Cleaning up ===" + +_send_qemu_cmd $QEMU_HANDLE '{"execute":"quit"}' '' + +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/tests/nbd-in-qcow2-chain.out b/tests/qemu-iotests/tests/nbd-in-qcow2-chain.out new file mode 100644 index 0000000000..b65cdaa4f2 --- /dev/null +++ b/tests/qemu-iotests/tests/nbd-in-qcow2-chain.out @@ -0,0 +1,75 @@ +QA output created by nbd-in-qcow2-chain + +=== Preparing base image === +Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=104857600 + +=== Starting QEMU and exposing base image === +{"execute": "qmp_capabilities"} +{"return": {}} +{"execute": "blockdev-add", + "arguments": {"node-name":"base", "driver":"IMGFMT", + "file":{"driver":"file", "filename":"TEST_DIR/t.IMGFMT.base"}}} +{"return": {}} +{"execute":"nbd-server-start", + "arguments": {"addr":{"type":"unix", + "data":{"path":"SOCK_DIR/nbd"}}}} +{"return": {}} +{"execute":"nbd-server-add", + "arguments": {"device":"base","name":"base"}} +{"return": {}} + +=== Adding explicit NBD client === +{"execute": "blockdev-add", + "arguments": {"node-name":"client", "driver":"nbd", + "read-only":true, "export":"base", + "server":{"type":"unix", "path":"SOCK_DIR/nbd"}}} +{"return": {}} +{"execute":"nbd-server-add", + "arguments": {"device":"client","name":"client"}} +{"return": {}} + +=== Creating wrapper image === +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=104857600 backing_file=nbd+unix:///base?socket=SOCK_DIR/nbd backing_fmt=raw + +=== Adding wrapper image with implicit client from qcow2 file === +{"execute": "blockdev-add", + "arguments": {"node-name":"wrap", "driver":"IMGFMT", + "file":{"driver":"file", "filename":"TEST_DIR/t.IMGFMT"}}} +{"return": {}} +{"execute":"nbd-server-add", + "arguments": {"device":"wrap","name":"wrap"}} +{"return": {}} + +=== Checking NBD server === +exports available: 3 + export: 'base' + size: 104857600 + flags: 0x158f ( readonly flush fua df multi cache block-status-payload ) + min block: 1 + opt block: 4096 + max block: 33554432 + transaction size: 64-bit + available meta contexts: 1 + base:allocation + export: 'client' + size: 104857600 + flags: 0x158f ( readonly flush fua df multi cache block-status-payload ) + min block: 1 + opt block: 4096 + max block: 33554432 + transaction size: 64-bit + available meta contexts: 1 + base:allocation + export: 'wrap' + size: 104857600 + flags: 0x158f ( readonly flush fua df multi cache block-status-payload ) + min block: 1 + opt block: 4096 + max block: 33554432 + transaction size: 64-bit + available meta contexts: 1 + base:allocation + +=== Cleaning up === +{"execute":"quit"} +*** done From 1c34de6d1f929af447a131ee97ab70c06f98d4e3 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 13 Nov 2025 09:05:25 +0100 Subject: [PATCH 14/15] tests/qemu-iotests: Fix broken grep command in iotest 207 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Running "./check -ssh 207" fails for me with lots of lines like this in the output: +base64: invalid input While looking closer at it, I noticed that the grep -v "\\^#" command in this test is not working as expected - it is likely meant to filter out the comment lines that are starting with a "#", but at least my version of grep (GNU grep 3.11) does not work with the backslashes here. There does not seem to be a compelling reason for these backslashes, so let's simply drop them to fix this issue. Signed-off-by: Thomas Huth Message-ID: <20251113080525.444826-1-thuth@redhat.com> Reviewed-by: Daniel P. Berrangé Signed-off-by: Eric Blake --- tests/qemu-iotests/207 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/qemu-iotests/207 b/tests/qemu-iotests/207 index 41dcf3ff55..ceab990e64 100755 --- a/tests/qemu-iotests/207 +++ b/tests/qemu-iotests/207 @@ -119,7 +119,7 @@ with iotests.FilePath('t.img') as disk_path, \ iotests.img_info_log(remote_path) keys = subprocess.check_output( - 'ssh-keyscan 127.0.0.1 2>/dev/null | grep -v "\\^#" | ' + + 'ssh-keyscan 127.0.0.1 2>/dev/null | grep -v "^#" | ' + 'cut -d" " -f3', shell=True).rstrip().decode('ascii').split('\n') From 4c91719a6a78a1c24d8bb854f7594e767962d0d9 Mon Sep 17 00:00:00 2001 From: Alberto Garcia Date: Wed, 12 Nov 2025 18:09:57 +0100 Subject: [PATCH 15/15] tests/qemu-iotest: fix iotest 024 with qed images Use 'qemu-io -c map' instead of 'qemu-img map' to get an output that works with both image types. Cc: qemu-stable Fixes: 909852ba6b4a ("qemu-img rebase: don't exceed IO_BUF_SIZE in one operation") Signed-off-by: Alberto Garcia Message-ID: <20251112170959.700840-1-berto@igalia.com> Reviewed-by: Eric Blake Tested-by: Thomas Huth Signed-off-by: Eric Blake --- tests/qemu-iotests/024 | 2 +- tests/qemu-iotests/024.out | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/qemu-iotests/024 b/tests/qemu-iotests/024 index 021169b4a1..10be2bd845 100755 --- a/tests/qemu-iotests/024 +++ b/tests/qemu-iotests/024 @@ -359,7 +359,7 @@ $QEMU_IO "$OVERLAY" -c "read -P 0x00 0 1M" | _filter_qemu_io $QEMU_IO "$OVERLAY" -c "read -P 0xff 1M 2M" | _filter_qemu_io $QEMU_IO "$OVERLAY" -c "read -P 0x00 3M 1M" | _filter_qemu_io -$QEMU_IMG map "$OVERLAY" | _filter_qemu_img_map +$QEMU_IO -c map "$OVERLAY" | _filter_qemu_io echo diff --git a/tests/qemu-iotests/024.out b/tests/qemu-iotests/024.out index 1b7522ba71..da8fedc08b 100644 --- a/tests/qemu-iotests/024.out +++ b/tests/qemu-iotests/024.out @@ -266,7 +266,6 @@ read 2097152/2097152 bytes at offset 1048576 2 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) read 1048576/1048576 bytes at offset 3145728 1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -Offset Length File -0 0x400000 TEST_DIR/subdir/t.IMGFMT +4 MiB (0x400000) bytes allocated at offset 0 bytes (0x0) *** done