/* page size or 4k (hdd sector size) should be on the safe side */
return MAX(4096, qemu_real_host_page_size);
}
+ IO_CODE();
return bs->bl.opt_mem_alignment;
}
/* page size or 4k (hdd sector size) should be on the safe side */
return MAX(4096, qemu_real_host_page_size);
}
+ IO_CODE();
return bs->bl.min_mem_alignment;
}
* image is inactivated. */
bool bdrv_is_read_only(BlockDriverState *bs)
{
+ IO_CODE();
return !(bs->open_flags & BDRV_O_RDWR);
}
int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
bool ignore_allow_rdw, Error **errp)
{
+ IO_CODE();
+
/* Do not set read_only if copy_on_read is enabled */
if (bs->copy_on_read && read_only) {
error_setg(errp, "Can't set node '%s' to r/o with copy-on-read enabled",
Error **errp)
{
int ret = 0;
+ IO_CODE();
if (!(bs->open_flags & BDRV_O_RDWR)) {
return 0;
Error *local_err = NULL;
int ret;
+ IO_CODE();
assert(bs != NULL);
if (!bs->drv) {
{
Error *local_err = NULL;
int ret;
+ IO_CODE();
if (!bs) {
return;
AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c)
{
BlockDriverState *bs = c->opaque;
+ IO_CODE();
return bdrv_get_aio_context(bs);
}
AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c)
{
+ IO_CODE();
return c->klass->get_parent_aio_context(c);
}
*/
bool bdrv_is_writable(BlockDriverState *bs)
{
+ IO_CODE();
return bdrv_is_writable_after_reopen(bs, NULL);
}
int64_t bdrv_get_allocated_file_size(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
+ IO_CODE();
+
if (!drv) {
return -ENOMEDIUM;
}
BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
BlockDriverState *in_bs, Error **errp)
{
+ IO_CODE();
if (!drv->bdrv_measure) {
error_setg(errp, "Block driver '%s' does not support size measurement",
drv->format_name);
int64_t bdrv_nb_sectors(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
+ IO_CODE();
if (!drv)
return -ENOMEDIUM;
int64_t bdrv_getlength(BlockDriverState *bs)
{
int64_t ret = bdrv_nb_sectors(bs);
+ IO_CODE();
if (ret < 0) {
return ret;
void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
{
int64_t nb_sectors = bdrv_nb_sectors(bs);
+ IO_CODE();
*nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
}
bool bdrv_is_sg(BlockDriverState *bs)
{
+ IO_CODE();
return bs->sg;
}
bool bdrv_supports_compressed_writes(BlockDriverState *bs)
{
BlockDriverState *filtered;
+ IO_CODE();
if (!bs->drv || !block_driver_can_compress(bs->drv)) {
return false;
const char *bdrv_get_format_name(BlockDriverState *bs)
{
+ IO_CODE();
return bs->drv ? bs->drv->format_name : NULL;
}
const char *bdrv_get_node_name(const BlockDriverState *bs)
{
+ IO_CODE();
return bs->node_name;
}
/* TODO check what callers really want: bs->node_name or blk_name() */
const char *bdrv_get_device_name(const BlockDriverState *bs)
{
+ IO_CODE();
return bdrv_get_parent_name(bs) ?: "";
}
* absent, then this returns an empty (non-null) string. */
const char *bdrv_get_device_or_node_name(const BlockDriverState *bs)
{
+ IO_CODE();
return bdrv_get_parent_name(bs) ?: bs->node_name;
}
bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs)
{
+ IO_CODE();
if (!(bs->open_flags & BDRV_O_UNMAP)) {
return false;
}
void bdrv_get_backing_filename(BlockDriverState *bs,
char *filename, int filename_size)
{
+ IO_CODE();
pstrcpy(filename, filename_size, bs->backing_file);
}
{
int ret;
BlockDriver *drv = bs->drv;
+ IO_CODE();
/* if bs->drv == NULL, bs is closed, so there's nothing to do here */
if (!drv) {
return -ENOMEDIUM;
Error **errp)
{
BlockDriver *drv = bs->drv;
+ IO_CODE();
if (drv && drv->bdrv_get_specific_info) {
return drv->bdrv_get_specific_info(bs, errp);
}
BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
+ IO_CODE();
if (!drv || !drv->bdrv_get_specific_stats) {
return NULL;
}
void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event)
{
+ IO_CODE();
if (!bs || !bs->drv || !bs->drv->bdrv_debug_event) {
return;
}
{
BlockDriver *drv = bs->drv;
BdrvChild *child;
+ IO_CODE();
if (!drv) {
return false;
void bdrv_eject(BlockDriverState *bs, bool eject_flag)
{
BlockDriver *drv = bs->drv;
+ IO_CODE();
if (drv && drv->bdrv_eject) {
drv->bdrv_eject(bs, eject_flag);
void bdrv_lock_medium(BlockDriverState *bs, bool locked)
{
BlockDriver *drv = bs->drv;
-
+ IO_CODE();
trace_bdrv_lock_medium(bs, locked);
if (drv && drv->bdrv_lock_medium) {
AioContext *bdrv_get_aio_context(BlockDriverState *bs)
{
+ IO_CODE();
return bs ? bs->aio_context : qemu_get_aio_context();
}
Coroutine *self = qemu_coroutine_self();
AioContext *old_ctx = qemu_coroutine_get_aio_context(self);
AioContext *new_ctx;
+ IO_CODE();
/*
* Increase bs->in_flight to ensure that this operation is completed before
void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx)
{
+ IO_CODE();
aio_co_reschedule_self(old_ctx);
bdrv_dec_in_flight(bs);
}
void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co)
{
+ IO_CODE();
aio_co_enter(bdrv_get_aio_context(bs), co);
}
void bdrv_parent_drained_end_single(BdrvChild *c)
{
int drained_end_counter = 0;
+ IO_OR_GS_CODE();
bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
}
void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
{
+ IO_OR_GS_CODE();
c->parent_quiesce_counter++;
if (c->klass->drained_begin) {
c->klass->drained_begin(c);
*/
void bdrv_enable_copy_on_read(BlockDriverState *bs)
{
+ IO_CODE();
qatomic_inc(&bs->copy_on_read);
}
void bdrv_disable_copy_on_read(BlockDriverState *bs)
{
int old = qatomic_fetch_dec(&bs->copy_on_read);
+ IO_CODE();
assert(old >= 1);
}
BdrvChild *ignore_parent, bool ignore_bds_parents)
{
BdrvChild *child, *next;
+ IO_OR_GS_CODE();
if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
return true;
void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
BdrvChild *parent, bool ignore_bds_parents)
{
+ IO_OR_GS_CODE();
assert(!qemu_in_coroutine());
/* Stop things in parent-to-child order */
void bdrv_drained_begin(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
bdrv_do_drained_begin(bs, false, NULL, false, true);
}
void bdrv_subtree_drained_begin(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
bdrv_do_drained_begin(bs, true, NULL, false, true);
}
void bdrv_drained_end(BlockDriverState *bs)
{
int drained_end_counter = 0;
+ IO_OR_GS_CODE();
bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
}
void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
{
+ IO_CODE();
bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
}
void bdrv_subtree_drained_end(BlockDriverState *bs)
{
int drained_end_counter = 0;
+ IO_OR_GS_CODE();
bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
}
*/
void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
assert(qemu_in_coroutine());
bdrv_drained_begin(bs);
bdrv_drained_end(bs);
void bdrv_drain(BlockDriverState *bs)
{
+ IO_OR_GS_CODE();
bdrv_drained_begin(bs);
bdrv_drained_end(bs);
}
int64_t *cluster_bytes)
{
BlockDriverInfo bdi;
-
+ IO_CODE();
if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
*cluster_offset = offset;
*cluster_bytes = bytes;
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
int64_t bytes, BdrvRequestFlags flags)
{
+ IO_CODE();
return bdrv_pwritev(child, offset, bytes, NULL,
BDRV_REQ_ZERO_WRITE | flags);
}
int ret;
int64_t target_size, bytes, offset = 0;
BlockDriverState *bs = child->bs;
+ IO_CODE();
target_size = bdrv_getlength(bs);
if (target_size < 0) {
{
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
if (bytes < 0) {
return -EINVAL;
{
int ret;
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
+ IO_CODE();
if (bytes < 0) {
return -EINVAL;
const void *buf, int64_t count)
{
int ret;
+ IO_CODE();
ret = bdrv_pwrite(child, offset, buf, count);
if (ret < 0) {
int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
int64_t bytes, BdrvRequestFlags flags)
{
+ IO_CODE();
trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
{
+ IO_CODE();
return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
pnum, map, file, NULL);
}
int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
+ IO_CODE();
return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
offset, bytes, pnum, map, file);
}
{
int ret;
int64_t pnum = bytes;
+ IO_CODE();
if (!bytes) {
return 1;
{
int ret;
int64_t dummy;
+ IO_CODE();
ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
bytes, pnum ? pnum : &dummy, NULL,
int ret = bdrv_common_block_status_above(top, base, include_base, false,
offset, bytes, pnum, NULL, NULL,
&depth);
+ IO_CODE();
if (ret < 0) {
return ret;
}
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
int ret = bdrv_writev_vmstate(bs, &qiov, pos);
+ IO_CODE();
return ret < 0 ? ret : size;
}
{
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
int ret = bdrv_readv_vmstate(bs, &qiov, pos);
+ IO_CODE();
return ret < 0 ? ret : size;
}
void bdrv_aio_cancel(BlockAIOCB *acb)
{
+ IO_CODE();
qemu_aio_ref(acb);
bdrv_aio_cancel_async(acb);
while (acb->refcnt > 1) {
* In either case the completion callback must be called. */
void bdrv_aio_cancel_async(BlockAIOCB *acb)
{
+ IO_CODE();
if (acb->aiocb_info->cancel_async) {
acb->aiocb_info->cancel_async(acb);
}
BdrvChild *child;
int current_gen;
int ret = 0;
+ IO_CODE();
bdrv_inc_in_flight(bs);
int64_t max_pdiscard;
int head, tail, align;
BlockDriverState *bs = child->bs;
+ IO_CODE();
if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
return -ENOMEDIUM;
.coroutine = qemu_coroutine_self(),
};
BlockAIOCB *acb;
+ IO_CODE();
bdrv_inc_in_flight(bs);
if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
void *qemu_blockalign(BlockDriverState *bs, size_t size)
{
+ IO_CODE();
return qemu_memalign(bdrv_opt_mem_align(bs), size);
}
void *qemu_blockalign0(BlockDriverState *bs, size_t size)
{
+ IO_CODE();
return memset(qemu_blockalign(bs, size), 0, size);
}
void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
{
size_t align = bdrv_opt_mem_align(bs);
+ IO_CODE();
/* Ensure that NULL is never returned on success */
assert(align > 0);
void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
{
void *mem = qemu_try_blockalign(bs, size);
+ IO_CODE();
if (mem) {
memset(mem, 0, size);
{
int i;
size_t alignment = bdrv_min_mem_align(bs);
+ IO_CODE();
for (i = 0; i < qiov->niov; i++) {
if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
void bdrv_io_plug(BlockDriverState *bs)
{
BdrvChild *child;
+ IO_CODE();
QLIST_FOREACH(child, &bs->children, next) {
bdrv_io_plug(child->bs);
void bdrv_io_unplug(BlockDriverState *bs)
{
BdrvChild *child;
+ IO_CODE();
assert(bs->io_plugged);
if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
int64_t bytes, BdrvRequestFlags read_flags,
BdrvRequestFlags write_flags)
{
+ IO_CODE();
return bdrv_co_copy_range_from(src, src_offset,
dst, dst_offset,
bytes, read_flags, write_flags);
BdrvTrackedRequest req;
int64_t old_size, new_bytes;
int ret;
-
+ IO_CODE();
/* if bs->drv == NULL, bs is closed, so there's nothing to do here */
if (!drv) {