#define XEN_QDISK_BACKEND(obj) \
OBJECT_CHECK(XenQdiskBackend, (obj), TYPE_XEN_QDISK_BACKEND)
-typedef struct XenQdiskBackend {
+typedef struct XenQdiskBackend XenQdiskBackend;
+
+struct ioreq {
+ XenQdiskBackend *d;
+ blkif_request_t req;
+ int16_t status;
+ off_t start;
+ void *buf;
+ size_t size;
+ int presync;
+ QEMUIOVector v;
+ int aio_inflight;
+ int aio_errors;
+ QLIST_ENTRY(ioreq) list;
+ BlockAcctCookie acct;
+};
+
+struct XenQdiskBackend {
XenBackend xendev;
XenVdevType type;
int disk;
unsigned int nr_ring_ref;
uint32_t *ring_ref;
unsigned int protocol;
- unsigned int max_requests;
xenevtchn_handle *xeh;
xenevtchn_port_or_error_t event_channel;
xengnttab_handle *xgth;
/* data path */
AioContext *ctx;
QEMUBH *bh;
+ QLIST_HEAD(inflight_head, ioreq) inflight;
+ QLIST_HEAD(finished_head, ioreq) finished;
+ QLIST_HEAD(freelist_head, ioreq) freelist;
+ int requests_total;
+ int requests_inflight;
+ int requests_finished;
+ unsigned int max_requests;
+ int more_work;
+};
+
+static void ioreq_reset(struct ioreq *ioreq)
+{
+ memset(&ioreq->req, 0, sizeof(ioreq->req));
+ ioreq->status = 0;
+ ioreq->start = 0;
+ ioreq->buf = NULL;
+ ioreq->size = 0;
+ ioreq->presync = 0;
+
+ qemu_iovec_reset(&ioreq->v);
+ ioreq->aio_inflight = 0;
+ ioreq->aio_errors = 0;
+
+ ioreq->d = NULL;
+ memset(&ioreq->list, 0, sizeof(ioreq->list));
+ memset(&ioreq->acct, 0, sizeof(ioreq->acct));
+}
+
+static struct ioreq *ioreq_start(XenQdiskBackend *d)
+{
+ struct ioreq *ioreq = NULL;
+
+ if (QLIST_EMPTY(&d->freelist)) {
+ if (d->requests_total >= d->max_requests) {
+ goto out;
+ }
+ /* allocate new struct */
+ ioreq = g_malloc0(sizeof(*ioreq));
+ ioreq->d = d;
+ d->requests_total++;
+ qemu_iovec_init(&ioreq->v, 1);
+ } else {
+ /* get one from freelist */
+ ioreq = QLIST_FIRST(&d->freelist);
+ QLIST_REMOVE(ioreq, list);
+ }
+ QLIST_INSERT_HEAD(&d->inflight, ioreq, list);
+ d->requests_inflight++;
+
+out:
+ return ioreq;
+}
+
+static void ioreq_finish(struct ioreq *ioreq)
+{
+ XenQdiskBackend *d = ioreq->d;
+
+ QLIST_REMOVE(ioreq, list);
+ QLIST_INSERT_HEAD(&d->finished, ioreq, list);
+ d->requests_inflight--;
+ d->requests_finished++;
+}
+
+static void ioreq_release(struct ioreq *ioreq, bool finish)
+{
+ XenQdiskBackend *d = ioreq->d;
+
+ QLIST_REMOVE(ioreq, list);
+ ioreq_reset(ioreq);
+ ioreq->d = d;
+ QLIST_INSERT_HEAD(&d->freelist, ioreq, list);
+ if (finish) {
+ d->requests_finished--;
+ } else {
+ d->requests_inflight--;
+ }
+}
+
+static int ioreq_parse(struct ioreq *ioreq)
+{
+ XenQdiskBackend *d = ioreq->d;
+ size_t len;
+ int i;
+
+ switch (ioreq->req.operation) {
+ case BLKIF_OP_READ:
+ break;
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ ioreq->presync = 1;
+ if (!ioreq->req.nr_segments) {
+ return 0;
+ }
+ /* fall through */
+ case BLKIF_OP_WRITE:
+ break;
+ case BLKIF_OP_DISCARD:
+ return 0;
+ default:
+ goto err;
+ };
+
+ if (ioreq->req.operation != BLKIF_OP_READ && blk_is_read_only(d->blk)) {
+ goto err;
+ }
+
+ ioreq->start = ioreq->req.sector_number * d->conf.logical_block_size;
+ for (i = 0; i < ioreq->req.nr_segments; i++) {
+ if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ goto err;
+ }
+ if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
+ goto err;
+ }
+ if (ioreq->req.seg[i].last_sect * d->conf.logical_block_size >=
+ XC_PAGE_SIZE) {
+ goto err;
+ }
+
+ len = (ioreq->req.seg[i].last_sect -
+ ioreq->req.seg[i].first_sect +
+ 1) * d->conf.logical_block_size;
+ ioreq->size += len;
+ }
+
+ if (ioreq->start + ioreq->size > blk_getlength(d->blk)) {
+ goto err;
+ }
+ return 0;
+
+err:
+ ioreq->status = BLKIF_RSP_ERROR;
+ return -1;
+}
+
+static void ioreq_free_copy_buffers(struct ioreq *ioreq)
+{
+ qemu_vfree(ioreq->buf);
+}
+
+static int ioreq_init_copy_buffers(struct ioreq *ioreq)
+{
+ ioreq->buf = qemu_memalign(XC_PAGE_SIZE, ioreq->size);
+
+ return 0;
+}
+
+static int ioreq_grant_copy(struct ioreq *ioreq)
+{
+ XenQdiskBackend *d = ioreq->d;
+ XenBackend *dev = XEN_BACKEND(d);
+ void *virt = ioreq->buf;
+ xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ int i, rc;
+
+ for (i = 0; i < ioreq->req.nr_segments; i++) {
+ if (ioreq->req.operation == BLKIF_OP_READ) {
+ segs[i].flags = GNTCOPY_dest_gref;
+ segs[i].dest.foreign.ref = ioreq->req.seg[i].gref;
+ segs[i].dest.foreign.domid = dev->frontend_id;
+ segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect *
+ d->conf.logical_block_size;
+ segs[i].source.virt = virt;
+ } else {
+ segs[i].flags = GNTCOPY_source_gref;
+ segs[i].source.foreign.ref = ioreq->req.seg[i].gref;
+ segs[i].source.foreign.domid = dev->frontend_id;
+ segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect *
+ d->conf.logical_block_size;
+ segs[i].dest.virt = virt;
+ }
+ segs[i].len = (ioreq->req.seg[i].last_sect
+ - ioreq->req.seg[i].first_sect +
+ 1) * d->conf.logical_block_size;
+ virt += segs[i].len;
+ }
+
+ rc = xengnttab_grant_copy(d->xgth, ioreq->req.nr_segments, segs);
+
+ if (rc) {
+ ioreq->aio_errors++;
+ return -1;
+ }
+
+ for (i = 0; i < ioreq->req.nr_segments; i++) {
+ if (segs[i].status != GNTST_okay) {
+ ioreq->aio_errors++;
+ rc = -1;
+ }
+ }
+
+ return rc;
+}
+
+static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
+
+static void qemu_aio_complete(void *opaque, int ret)
+{
+ struct ioreq *ioreq = opaque;
+ XenQdiskBackend *d = ioreq->d;
+
+ aio_context_acquire(d->ctx);
+
+ if (ret != 0) {
+ ioreq->aio_errors++;
+ }
+
+ ioreq->aio_inflight--;
+ if (ioreq->presync) {
+ ioreq->presync = 0;
+ ioreq_runio_qemu_aio(ioreq);
+ goto done;
+ }
+ if (ioreq->aio_inflight > 0) {
+ goto done;
+ }
+
+ switch (ioreq->req.operation) {
+ case BLKIF_OP_READ:
+ /* in case of failure ioreq->aio_errors is increased */
+ if (ret == 0) {
+ ioreq_grant_copy(ioreq);
+ }
+ ioreq_free_copy_buffers(ioreq);
+ break;
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ if (!ioreq->req.nr_segments) {
+ break;
+ }
+ ioreq_free_copy_buffers(ioreq);
+ break;
+ default:
+ break;
+ }
+
+ ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
+ ioreq_finish(ioreq);
+
+ switch (ioreq->req.operation) {
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ if (!ioreq->req.nr_segments) {
+ break;
+ }
+ case BLKIF_OP_READ:
+ if (ioreq->status == BLKIF_RSP_OKAY) {
+ block_acct_done(blk_get_stats(d->blk), &ioreq->acct);
+ } else {
+ block_acct_failed(blk_get_stats(d->blk), &ioreq->acct);
+ }
+ break;
+ case BLKIF_OP_DISCARD:
+ default:
+ break;
+ }
+ qemu_bh_schedule(d->bh);
+
+done:
+ aio_context_release(d->ctx);
+}
+
+static bool blk_split_discard(struct ioreq *ioreq,
+ blkif_sector_t sector_number,
+ uint64_t nr_sectors)
+{
+ XenQdiskBackend *d = ioreq->d;
+ int64_t byte_offset;
+ int byte_chunk;
+ uint64_t byte_remaining, limit;
+ uint64_t sec_start = sector_number;
+ uint64_t sec_count = nr_sectors;
+
+ /* Wrap around, or overflowing byte limit? */
+ if (sec_start + sec_count < sec_count ||
+ sec_start + sec_count > INT64_MAX / d->conf.logical_block_size) {
+ return false;
+ }
+
+ limit = BDRV_REQUEST_MAX_SECTORS * d->conf.logical_block_size;
+ byte_offset = sec_start * d->conf.logical_block_size;
+ byte_remaining = sec_count * d->conf.logical_block_size;
+
+ do {
+ byte_chunk = byte_remaining > limit ? limit : byte_remaining;
+ ioreq->aio_inflight++;
+ blk_aio_pdiscard(d->blk, byte_offset, byte_chunk,
+ qemu_aio_complete, ioreq);
+ byte_remaining -= byte_chunk;
+ byte_offset += byte_chunk;
+ } while (byte_remaining > 0);
-} XenQdiskBackend;
+ return true;
+}
+
+static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
+{
+ XenQdiskBackend *d = ioreq->d;
+
+ ioreq_init_copy_buffers(ioreq);
+ if (ioreq->req.nr_segments &&
+ (ioreq->req.operation == BLKIF_OP_WRITE ||
+ ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
+ ioreq_grant_copy(ioreq)) {
+ ioreq_free_copy_buffers(ioreq);
+ goto err;
+ }
+
+ ioreq->aio_inflight++;
+ if (ioreq->presync) {
+ blk_aio_flush(d->blk, qemu_aio_complete, ioreq);
+ return 0;
+ }
+
+ switch (ioreq->req.operation) {
+ case BLKIF_OP_READ:
+ block_acct_start(blk_get_stats(d->blk), &ioreq->acct,
+ ioreq->size, BLOCK_ACCT_READ);
+ qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
+ ioreq->aio_inflight++;
+ blk_aio_preadv(d->blk, ioreq->start, &ioreq->v, 0,
+ qemu_aio_complete, ioreq);
+ break;
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ if (!ioreq->req.nr_segments) {
+ break;
+ }
+
+ block_acct_start(blk_get_stats(d->blk), &ioreq->acct,
+ ioreq->size,
+ ioreq->req.operation == BLKIF_OP_WRITE ?
+ BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
+ qemu_iovec_add(&ioreq->v, ioreq->buf, ioreq->size);
+ ioreq->aio_inflight++;
+ blk_aio_pwritev(d->blk, ioreq->start, &ioreq->v, 0,
+ qemu_aio_complete, ioreq);
+ break;
+ case BLKIF_OP_DISCARD:
+ {
+ struct blkif_request_discard *req = (void *)&ioreq->req;
+ if (!blk_split_discard(ioreq, req->sector_number,
+ req->nr_sectors)) {
+ goto err;
+ }
+ break;
+ }
+ default:
+ goto err;
+ }
+
+ qemu_aio_complete(ioreq, 0);
+
+ return 0;
+
+err:
+ ioreq_finish(ioreq);
+ ioreq->status = BLKIF_RSP_ERROR;
+ return -1;
+}
+
+static int blk_send_response_one(struct ioreq *ioreq)
+{
+ XenQdiskBackend *d = ioreq->d;
+ int send_notify = 0;
+ int have_requests = 0;
+ blkif_response_t *resp;
+
+ switch (d->protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ resp = (blkif_response_t *)RING_GET_RESPONSE(
+ &d->rings.native, d->rings.native.rsp_prod_pvt);
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ resp = (blkif_response_t *)RING_GET_RESPONSE(
+ &d->rings.x86_32_part, d->rings.x86_32_part.rsp_prod_pvt);
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+ resp = (blkif_response_t *)RING_GET_RESPONSE(
+ &d->rings.x86_64_part, d->rings.x86_64_part.rsp_prod_pvt);
+ break;
+ default:
+ return 0;
+ }
+
+ resp->id = ioreq->req.id;
+ resp->operation = ioreq->req.operation;
+ resp->status = ioreq->status;
+
+ d->rings.common.rsp_prod_pvt++;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&d->rings.common, send_notify);
+ if (d->rings.common.rsp_prod_pvt == d->rings.common.req_cons) {
+ RING_FINAL_CHECK_FOR_REQUESTS(&d->rings.common, have_requests);
+ } else if (RING_HAS_UNCONSUMED_REQUESTS(&d->rings.common)) {
+ have_requests = 1;
+ }
+
+ if (have_requests) {
+ d->more_work++;
+ }
+
+ return send_notify;
+}
+
+static void blk_send_response_all(XenQdiskBackend *d)
+{
+ struct ioreq *ioreq;
+ int send_notify = 0;
+
+ while (!QLIST_EMPTY(&d->finished)) {
+ ioreq = QLIST_FIRST(&d->finished);
+ send_notify += blk_send_response_one(ioreq);
+ ioreq_release(ioreq, true);
+ }
+ if (send_notify) {
+ xenevtchn_notify(d->xeh, d->event_channel);
+ }
+}
+
+static int blk_get_request(XenQdiskBackend *d, struct ioreq *ioreq,
+ RING_IDX rc)
+{
+ switch (d->protocol) {
+ case BLKIF_PROTOCOL_NATIVE:
+ memcpy(&ioreq->req, RING_GET_REQUEST(&d->rings.native, rc),
+ sizeof(ioreq->req));
+ break;
+ case BLKIF_PROTOCOL_X86_32:
+ blkif_get_x86_32_req(&ioreq->req,
+ RING_GET_REQUEST(&d->rings.x86_32_part, rc));
+ break;
+ case BLKIF_PROTOCOL_X86_64:
+ blkif_get_x86_64_req(&ioreq->req,
+ RING_GET_REQUEST(&d->rings.x86_64_part, rc));
+ break;
+ }
+
+ barrier();
+ return 0;
+}
+
+static void blk_handle_requests(XenQdiskBackend *d)
+{
+ RING_IDX rc, rp;
+ struct ioreq *ioreq;
+
+ d->more_work = 0;
+
+ rc = d->rings.common.req_cons;
+ rp = d->rings.common.sring->req_prod;
+ xen_rmb();
+
+ blk_send_response_all(d);
+ while (rc != rp) {
+ if (RING_REQUEST_CONS_OVERFLOW(&d->rings.common, rc)) {
+ break;
+ }
+ ioreq = ioreq_start(d);
+ if (ioreq == NULL) {
+ d->more_work++;
+ break;
+ }
+ blk_get_request(d, ioreq, rc);
+ d->rings.common.req_cons = ++rc;
+
+ if (ioreq_parse(ioreq) != 0) {
+ switch (ioreq->req.operation) {
+ case BLKIF_OP_READ:
+ block_acct_invalid(blk_get_stats(d->blk),
+ BLOCK_ACCT_READ);
+ break;
+ case BLKIF_OP_WRITE:
+ block_acct_invalid(blk_get_stats(d->blk),
+ BLOCK_ACCT_WRITE);
+ break;
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ block_acct_invalid(blk_get_stats(d->blk),
+ BLOCK_ACCT_FLUSH);
+ default:
+ break;
+ };
+
+ if (blk_send_response_one(ioreq)) {
+ xenevtchn_notify(d->xeh, d->event_channel);
+ }
+ ioreq_release(ioreq, false);
+ continue;
+ }
+
+ ioreq_runio_qemu_aio(ioreq);
+ }
+
+ if (d->more_work && d->requests_inflight < d->max_requests) {
+ qemu_bh_schedule(d->bh);
+ }
+}
static char *xen_qdisk_get_name(XenBackend *dev, Error **errp)
{
static void xen_qdisk_bh(void *opaque)
{
+ XenQdiskBackend *d = opaque;
+
+ aio_context_acquire(d->ctx);
+ blk_handle_requests(d);
+ aio_context_release(d->ctx);
}
static void xen_qdisk_realize(XenBackend *dev, Error **errp)
{
XenQdiskBackend *d = XEN_QDISK_BACKEND(dev);
+ bool readonly;
DriveInfo *dinfo;
+ bool is_cdrom;
unsigned int info;
int64_t size;
+ trace_xen_qdisk_realize(d->disk, d->partition);
+
if (!d->conf.blk) {
error_setg(errp, "drive property not set");
return;
return;
}
+ readonly = blk_is_read_only(d->conf.blk);
+
+ if (!blkconf_apply_backend_options(&d->conf, readonly, false, errp)) {
+ return;
+ }
+
+ if (!blkconf_geometry(&d->conf, NULL, 65535, 255, 255, errp)) {
+ return;
+ }
+
+ dinfo = blk_legacy_dinfo(d->conf.blk);
+ if (dinfo && dinfo->media_cd)
+ is_cdrom = true;
+
blkconf_blocksizes(&d->conf);
if (d->conf.logical_block_size >
xenstore_backend_printf(dev, "max-ring-page-order", "%u",
d->max_ring_page_order);
- info = blk_is_read_only(d->blk) ? VDISK_READONLY : 0;
-
- dinfo = blk_legacy_dinfo(d->blk);
- info |= dinfo->media_cd ? VDISK_CDROM : 0;
+ info = readonly ? VDISK_READONLY : 0;
+ info |= is_cdrom ? VDISK_CDROM : 0;
xenstore_backend_printf(dev, "info", "%u", info);
+ xenstore_frontend_printf(dev, "device-type", "%s",
+ dinfo->media_cd ? "cdrom" : "disk");
+
size = blk_getlength(d->blk);
xenstore_backend_printf(dev, "sector-size", "%u",
d->conf.logical_block_size);
xenstore_backend_printf(dev, "sectors", "%lu",
size / d->conf.logical_block_size);
+ QLIST_INIT(&d->inflight);
+ QLIST_INIT(&d->finished);
+ QLIST_INIT(&d->freelist);
+
d->ctx = qemu_get_aio_context();
d->bh = aio_bh_new(d->ctx, xen_qdisk_bh, d);
}
unsigned int ring_size;
uint32_t *domids;
+ trace_xen_qdisk_initialize(d->disk, d->partition);
+
if (xenstore_frontend_scanf(dev, "ring-page-order", "%u",
&order) != 1) {
d->nr_ring_ref = 1u;
return;
}
- domids = (uint32_t *)g_new0(domid_t, d->nr_ring_ref);
+ domids = g_new0(uint32_t, d->nr_ring_ref);
for (i = 0; i < d->nr_ring_ref; i++) {
domids[i] = dev->frontend_id;
}
{
XenQdiskBackend *d = XEN_QDISK_BACKEND(dev);
- aio_context_acquire(d->ctx);
+ trace_xen_qdisk_disconnect(d->disk, d->partition);
- xenevtchn_unbind(d->xeh, d->event_channel);
- qemu_set_fd_handler(xenevtchn_fd(d->xeh), NULL, NULL, NULL);
+ if (d->xeh) {
+ aio_context_acquire(d->ctx);
- aio_context_release(d->ctx);
+ xenevtchn_unbind(d->xeh, d->event_channel);
+ qemu_set_fd_handler(xenevtchn_fd(d->xeh), NULL, NULL, NULL);
+
+ aio_context_release(d->ctx);
- xenevtchn_close(d->xeh);
+ xenevtchn_close(d->xeh);
+ d->xeh = NULL;
+ }
- xengnttab_unmap(d->xgth, d->sring, d->nr_ring_ref);
+ if (d->xgth) {
+ xengnttab_unmap(d->xgth, d->sring, d->nr_ring_ref);
- xengnttab_close(d->xgth);
+ xengnttab_close(d->xgth);
+ d->xgth = NULL;
+ }
}
static void xen_qdisk_unrealize(XenBackend *dev, Error **errp)
{
XenQdiskBackend *d = XEN_QDISK_BACKEND(dev);
+ trace_xen_qdisk_unrealize(d->disk, d->partition);
+
+ while (!QLIST_EMPTY(&d->freelist)) {
+ struct ioreq *ioreq = QLIST_FIRST(&d->freelist);
+
+ QLIST_REMOVE(ioreq, list);
+
+ qemu_iovec_destroy(&ioreq->v);
+ g_free(ioreq);
+ }
+
qemu_bh_delete(d->bh);
}