async: access bottom half flags with qatomic_read
Running test-aio-multithread under TSAN reveals data races on bh->flags. Because bottom halves may be scheduled or canceled asynchronously, without taking a lock, adjust aio_compute_bh_timeout() and aio_ctx_check() to use a relaxed read to access the flags. Use an acquire load to ensure that anything that was written prior to qemu_bh_schedule() is visible. Closes: https://gitlab.com/qemu-project/qemu/-/issues/2749 Closes: https://gitlab.com/qemu-project/qemu/-/issues/851 Cc: qemu-stable@nongnu.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
0d22b621b7
commit
5142397c79
1 changed files with 7 additions and 4 deletions
11
util/async.c
11
util/async.c
|
|
@ -256,8 +256,9 @@ static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
|
|||
QEMUBH *bh;
|
||||
|
||||
QSLIST_FOREACH_RCU(bh, head, next) {
|
||||
if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
|
||||
if (bh->flags & BH_IDLE) {
|
||||
int flags = qatomic_load_acquire(&bh->flags);
|
||||
if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
|
||||
if (flags & BH_IDLE) {
|
||||
/* idle bottom halves will be polled at least
|
||||
* every 10ms */
|
||||
timeout = 10000000;
|
||||
|
|
@ -335,14 +336,16 @@ aio_ctx_check(GSource *source)
|
|||
aio_notify_accept(ctx);
|
||||
|
||||
QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
|
||||
if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
|
||||
int flags = qatomic_load_acquire(&bh->flags);
|
||||
if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
|
||||
QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
|
||||
if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
|
||||
int flags = qatomic_load_acquire(&bh->flags);
|
||||
if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue