nfs: Run co BH CB in the coroutine’s AioContext
Like in “rbd: Run co BH CB in the coroutine’s AioContext”, drop the completion flag, yield exactly once, and run the BH in the coroutine’s AioContext. (Can be reproduced with multiqueue by adding a usleep(100000) before the `while (!task.complete)` loops.) Like in “iscsi: Run co BH CB in the coroutine’s AioContext”, this makes nfs_co_generic_bh_cb() trivial, so we can drop it in favor of just calling aio_co_wake() directly. Cc: qemu-stable@nongnu.org Signed-off-by: Hanna Czenczek <hreitz@redhat.com> Message-ID: <20251110154854.151484-5-hreitz@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
a9500527db
commit
deb35c129b
1 changed files with 16 additions and 25 deletions
33
block/nfs.c
33
block/nfs.c
|
|
@ -69,7 +69,6 @@ typedef struct NFSClient {
|
|||
typedef struct NFSRPC {
|
||||
BlockDriverState *bs;
|
||||
int ret;
|
||||
int complete;
|
||||
QEMUIOVector *iov;
|
||||
struct stat *st;
|
||||
Coroutine *co;
|
||||
|
|
@ -230,14 +229,6 @@ static void coroutine_fn nfs_co_init_task(BlockDriverState *bs, NFSRPC *task)
|
|||
};
|
||||
}
|
||||
|
||||
static void nfs_co_generic_bh_cb(void *opaque)
|
||||
{
|
||||
NFSRPC *task = opaque;
|
||||
|
||||
task->complete = 1;
|
||||
aio_co_wake(task->co);
|
||||
}
|
||||
|
||||
/* Called (via nfs_service) with QemuMutex held. */
|
||||
static void
|
||||
nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
|
||||
|
|
@ -256,8 +247,16 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
|
|||
if (task->ret < 0) {
|
||||
error_report("NFS Error: %s", nfs_get_error(nfs));
|
||||
}
|
||||
replay_bh_schedule_oneshot_event(task->client->aio_context,
|
||||
nfs_co_generic_bh_cb, task);
|
||||
|
||||
/*
|
||||
* Safe to call: nfs_service(), which called us, is only run from the FD
|
||||
* handlers, never from the request coroutine. The request coroutine in
|
||||
* turn will yield unconditionally.
|
||||
* No need to release the lock, even if we directly enter the coroutine, as
|
||||
* the lock is never re-taken after yielding. (Note: If we do enter the
|
||||
* coroutine, @task will probably be dangling once aio_co_wake() returns.)
|
||||
*/
|
||||
aio_co_wake(task->co);
|
||||
}
|
||||
|
||||
static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
|
|
@ -278,9 +277,7 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, int64_t offset,
|
|||
|
||||
nfs_set_events(client);
|
||||
}
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
|
||||
if (task.ret < 0) {
|
||||
return task.ret;
|
||||
|
|
@ -328,9 +325,7 @@ static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, int64_t offset,
|
|||
|
||||
nfs_set_events(client);
|
||||
}
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
|
||||
if (my_buffer) {
|
||||
g_free(buf);
|
||||
|
|
@ -358,9 +353,7 @@ static int coroutine_fn nfs_co_flush(BlockDriverState *bs)
|
|||
|
||||
nfs_set_events(client);
|
||||
}
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
|
||||
return task.ret;
|
||||
}
|
||||
|
|
@ -723,8 +716,8 @@ nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data,
|
|||
if (task->ret < 0) {
|
||||
error_report("NFS Error: %s", nfs_get_error(nfs));
|
||||
}
|
||||
replay_bh_schedule_oneshot_event(task->client->aio_context,
|
||||
nfs_co_generic_bh_cb, task);
|
||||
/* Safe to call, see nfs_co_generic_cb() */
|
||||
aio_co_wake(task->co);
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn nfs_co_get_allocated_file_size(BlockDriverState *bs)
|
||||
|
|
@ -748,9 +741,7 @@ static int64_t coroutine_fn nfs_co_get_allocated_file_size(BlockDriverState *bs)
|
|||
|
||||
nfs_set_events(client);
|
||||
}
|
||||
while (!task.complete) {
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
|
||||
return (task.ret < 0 ? task.ret : st.st_blocks * 512);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue