bdrv_replace_child_noperm(s->child, NULL);
if (bdrv_get_aio_context(bs) != s->old_child_ctx) {
- bdrv_try_set_aio_context(bs, s->old_child_ctx, &error_abort);
+ bdrv_try_change_aio_context(bs, s->old_child_ctx, NULL, &error_abort);
}
if (bdrv_child_get_parent_aio_context(s->child) != s->old_parent_ctx) {
parent_ctx = bdrv_child_get_parent_aio_context(new_child);
if (child_ctx != parent_ctx) {
Error *local_err = NULL;
- int ret = bdrv_try_set_aio_context(child_bs, parent_ctx, &local_err);
+ int ret = bdrv_try_change_aio_context(child_bs, parent_ctx, NULL,
+ &local_err);
if (ret < 0 && child_class->change_aio_ctx) {
Transaction *tran = tran_new();
* When the parent requiring a non-default AioContext is removed, the
* node moves back to the main AioContext
*/
- bdrv_try_set_aio_context(old_bs, qemu_get_aio_context(), NULL);
+ bdrv_try_change_aio_context(old_bs, qemu_get_aio_context(), NULL, NULL);
}
}
return 0;
}
-int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
- Error **errp)
-{
- GLOBAL_STATE_CODE();
- return bdrv_try_change_aio_context(bs, ctx, NULL, errp);
-}
-
void bdrv_add_aio_context_notifier(BlockDriverState *bs,
void (*attached_aio_context)(AioContext *new_context, void *opaque),
void (*detach_aio_context)(void *opaque), void *opaque)
/* Ignore errors with fixed-iothread=false */
set_context_errp = fixed_iothread ? errp : NULL;
- ret = bdrv_try_set_aio_context(bs, new_ctx, set_context_errp);
+ ret = bdrv_try_change_aio_context(bs, new_ctx, NULL, set_context_errp);
if (ret == 0) {
aio_context_release(ctx);
aio_context_acquire(new_ctx);
aio_context_release(aio_context);
aio_context_acquire(tmp_context);
- ret = bdrv_try_set_aio_context(state->old_bs,
- aio_context, NULL);
+ ret = bdrv_try_change_aio_context(state->old_bs,
+ aio_context, NULL, NULL);
assert(ret == 0);
aio_context_release(tmp_context);
goto out;
}
- /* Honor bdrv_try_set_aio_context() context acquisition requirements. */
+ /* Honor bdrv_try_change_aio_context() context acquisition requirements. */
old_context = bdrv_get_aio_context(target_bs);
aio_context_release(aio_context);
aio_context_acquire(old_context);
- ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
+ ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
if (ret < 0) {
bdrv_unref(target_bs);
aio_context_release(old_context);
return;
}
- /* Honor bdrv_try_set_aio_context() context acquisition requirements. */
+ /* Honor bdrv_try_change_aio_context() context acquisition requirements. */
aio_context = bdrv_get_aio_context(bs);
old_context = bdrv_get_aio_context(target_bs);
aio_context_acquire(old_context);
- ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
+ ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
if (ret < 0) {
aio_context_release(old_context);
return;
!bdrv_has_zero_init(target_bs)));
- /* Honor bdrv_try_set_aio_context() context acquisition requirements. */
+ /* Honor bdrv_try_change_aio_context() context acquisition requirements. */
old_context = bdrv_get_aio_context(target_bs);
aio_context_release(aio_context);
aio_context_acquire(old_context);
- ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
+ ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
if (ret < 0) {
bdrv_unref(target_bs);
aio_context_release(old_context);
zero_target = (sync == MIRROR_SYNC_MODE_FULL);
- /* Honor bdrv_try_set_aio_context() context acquisition requirements. */
+ /* Honor bdrv_try_change_aio_context() context acquisition requirements. */
old_context = bdrv_get_aio_context(target_bs);
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(old_context);
- ret = bdrv_try_set_aio_context(target_bs, aio_context, errp);
+ ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp);
aio_context_release(old_context);
aio_context_acquire(aio_context);
old_context = bdrv_get_aio_context(bs);
aio_context_acquire(old_context);
- bdrv_try_set_aio_context(bs, new_context, errp);
+ bdrv_try_change_aio_context(bs, new_context, NULL, errp);
aio_context_release(old_context);
}
AioContext is a generic event loop that can be used by any QEMU subsystem.
The block layer has support for AioContext integrated. Each BlockDriverState
-is associated with an AioContext using bdrv_try_set_aio_context() and
+is associated with an AioContext using bdrv_try_change_aio_context() and
bdrv_get_aio_context(). This allows block layer code to process I/O inside the
right AioContext. Other subsystems may wish to follow a similar approach.
the BlockDriverState's AioContext to avoid the need to acquire/release around
each bdrv_*() call. The functions bdrv_add/remove_aio_context_notifier,
or alternatively blk_add/remove_aio_context_notifier if you use BlockBackends,
-can be used to get a notification whenever bdrv_try_set_aio_context() moves a
+can be used to get a notification whenever bdrv_try_change_aio_context() moves a
BlockDriverState to a different AioContext.
*/
void coroutine_fn bdrv_co_unlock(BlockDriverState *bs);
-int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
- Error **errp);
AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c);
bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx,
GHashTable *visited, Transaction *tran,
next_aio_context = job->aio_context;
/*
* Coroutine has resumed, but in the meanwhile the job AioContext
- * might have changed via bdrv_try_set_aio_context(), so we need to move
+ * might have changed via bdrv_try_change_aio_context(), so we need to move
* the coroutine too in the new aiocontext.
*/
while (qemu_get_current_aio_context() != next_aio_context) {
&error_abort);
bdrv_drained_begin(bs);
- bdrv_try_set_aio_context(bs, ctx_a, &error_abort);
+ bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort);
aio_context_acquire(ctx_a);
bdrv_drained_end(bs);
bdrv_drained_begin(bs);
- bdrv_try_set_aio_context(bs, ctx_b, &error_abort);
+ bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort);
aio_context_release(ctx_a);
aio_context_acquire(ctx_b);
- bdrv_try_set_aio_context(bs, qemu_get_aio_context(), &error_abort);
+ bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort);
aio_context_release(ctx_b);
bdrv_drained_end(bs);
filter = bdrv_find_node("filter_node");
/* Change the AioContext of src */
- bdrv_try_set_aio_context(src, ctx, &error_abort);
+ bdrv_try_change_aio_context(src, ctx, NULL, &error_abort);
g_assert(bdrv_get_aio_context(src) == ctx);
g_assert(bdrv_get_aio_context(target) == ctx);
g_assert(bdrv_get_aio_context(filter) == ctx);
/* Change the AioContext of target */
aio_context_acquire(ctx);
- bdrv_try_set_aio_context(target, main_ctx, &error_abort);
+ bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
aio_context_release(ctx);
g_assert(bdrv_get_aio_context(src) == main_ctx);
g_assert(bdrv_get_aio_context(target) == main_ctx);
blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
blk_insert_bs(blk, src, &error_abort);
- bdrv_try_set_aio_context(target, ctx, &local_err);
+ bdrv_try_change_aio_context(target, ctx, NULL, &local_err);
error_free_or_abort(&local_err);
g_assert(blk_get_aio_context(blk) == main_ctx);
/* ...unless we explicitly allow it */
aio_context_acquire(ctx);
blk_set_allow_aio_context_change(blk, true);
- bdrv_try_set_aio_context(target, ctx, &error_abort);
+ bdrv_try_change_aio_context(target, ctx, NULL, &error_abort);
aio_context_release(ctx);
g_assert(blk_get_aio_context(blk) == ctx);
aio_context_acquire(ctx);
blk_set_aio_context(blk, main_ctx, &error_abort);
- bdrv_try_set_aio_context(target, main_ctx, &error_abort);
+ bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort);
aio_context_release(ctx);
blk_unref(blk);