diff --git a/block/nvme.c b/block/nvme.c index 7ed5f570bc..b8262ebfd9 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -1176,25 +1176,35 @@ fail: typedef struct { Coroutine *co; + bool skip_yield; int ret; - AioContext *ctx; } NVMeCoData; -static void nvme_rw_cb_bh(void *opaque) -{ - NVMeCoData *data = opaque; - qemu_coroutine_enter(data->co); -} - static void nvme_rw_cb(void *opaque, int ret) { NVMeCoData *data = opaque; + data->ret = ret; - if (!data->co) { - /* The rw coroutine hasn't yielded, don't try to enter. */ - return; + + if (data->co == qemu_coroutine_self()) { + /* + * Fast path: We are inside of the request coroutine (through + * nvme_submit_command, nvme_deferred_fn, nvme_process_completion). + * We can set data->skip_yield here to keep the coroutine from + * yielding, and then we don't need to schedule a BH to wake it. + */ + data->skip_yield = true; + } else { + /* + * Safe to call: The case where we run in the request coroutine is + * handled above, so we must be independent of it; and without + * skip_yield set, the coroutine will yield. + * No need to release NVMeQueuePair.lock (we are called without it + * held). (Note: If we enter the coroutine here, @data will + * probably be dangling once aio_co_wake() returns.) + */ + aio_co_wake(data->co); } - replay_bh_schedule_oneshot_event(data->ctx, nvme_rw_cb_bh, data); } static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, @@ -1218,7 +1228,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, .cdw12 = cpu_to_le32(cdw12), }; NVMeCoData data = { - .ctx = bdrv_get_aio_context(bs), + .co = qemu_coroutine_self(), .ret = -EINPROGRESS, }; @@ -1235,9 +1245,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs, return r; } nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); - - data.co = qemu_coroutine_self(); - while (data.ret == -EINPROGRESS) { + if (!data.skip_yield) { qemu_coroutine_yield(); } @@ -1333,7 +1341,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs) .nsid = cpu_to_le32(s->nsid), }; NVMeCoData data = { - .ctx = bdrv_get_aio_context(bs), + .co = qemu_coroutine_self(), .ret = -EINPROGRESS, }; @@ -1341,9 +1349,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs) req = nvme_get_free_req(ioq); assert(req); nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); - - data.co = qemu_coroutine_self(); - if (data.ret == -EINPROGRESS) { + if (!data.skip_yield) { qemu_coroutine_yield(); } @@ -1384,7 +1390,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, }; NVMeCoData data = { - .ctx = bdrv_get_aio_context(bs), + .co = qemu_coroutine_self(), .ret = -EINPROGRESS, }; @@ -1404,9 +1410,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, assert(req); nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); - - data.co = qemu_coroutine_self(); - while (data.ret == -EINPROGRESS) { + if (!data.skip_yield) { qemu_coroutine_yield(); } @@ -1434,7 +1438,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, }; NVMeCoData data = { - .ctx = bdrv_get_aio_context(bs), + .co = qemu_coroutine_self(), .ret = -EINPROGRESS, }; @@ -1479,9 +1483,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, trace_nvme_dsm(s, offset, bytes); nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data); - - data.co = qemu_coroutine_self(); - while (data.ret == -EINPROGRESS) { + if (!data.skip_yield) { qemu_coroutine_yield(); }