/* ------------------------------------------------------------- */
-static int batch_maps = 0;
-
-/* ------------------------------------------------------------- */
-
#define BLOCK_SIZE 512
#define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
off_t start;
QEMUIOVector v;
int presync;
- uint8_t mapped;
- /* grant mapping */
uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- int prot;
void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
void *pages;
int protocol;
blkif_back_rings_t rings;
int more_work;
- int cnt_map;
/* request lists */
QLIST_HEAD(inflight_head, ioreq) inflight;
ioreq->status = 0;
ioreq->start = 0;
ioreq->presync = 0;
- ioreq->mapped = 0;
memset(ioreq->domids, 0, sizeof(ioreq->domids));
memset(ioreq->refs, 0, sizeof(ioreq->refs));
- ioreq->prot = 0;
memset(ioreq->page, 0, sizeof(ioreq->page));
ioreq->pages = NULL;
ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
switch (ioreq->req.operation) {
case BLKIF_OP_READ:
- ioreq->prot = PROT_WRITE; /* to memory */
break;
case BLKIF_OP_FLUSH_DISKCACHE:
ioreq->presync = 1;
}
/* fall through */
case BLKIF_OP_WRITE:
- ioreq->prot = PROT_READ; /* from memory */
break;
case BLKIF_OP_DISCARD:
return 0;
return -1;
}
-static void ioreq_unmap(struct ioreq *ioreq)
-{
- xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
- int i;
-
- if (ioreq->v.niov == 0 || ioreq->mapped == 0) {
- return;
- }
- if (batch_maps) {
- if (!ioreq->pages) {
- return;
- }
- if (xengnttab_unmap(gnt, ioreq->pages, ioreq->v.niov) != 0) {
- xen_pv_printf(&ioreq->blkdev->xendev, 0,
- "xengnttab_unmap failed: %s\n",
- strerror(errno));
- }
- ioreq->blkdev->cnt_map -= ioreq->v.niov;
- ioreq->pages = NULL;
- } else {
- for (i = 0; i < ioreq->v.niov; i++) {
- if (!ioreq->page[i]) {
- continue;
- }
- if (xengnttab_unmap(gnt, ioreq->page[i], 1) != 0) {
- xen_pv_printf(&ioreq->blkdev->xendev, 0,
- "xengnttab_unmap failed: %s\n",
- strerror(errno));
- }
- ioreq->blkdev->cnt_map--;
- ioreq->page[i] = NULL;
- }
- }
- ioreq->mapped = 0;
-}
-
-static int ioreq_map(struct ioreq *ioreq)
-{
- xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
- int i;
-
- if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
- return 0;
- }
- if (batch_maps) {
- ioreq->pages = xengnttab_map_grant_refs
- (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
- if (ioreq->pages == NULL) {
- xen_pv_printf(&ioreq->blkdev->xendev, 0,
- "can't map %d grant refs (%s, %d maps)\n",
- ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map);
- return -1;
- }
- for (i = 0; i < ioreq->v.niov; i++) {
- ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
- (uintptr_t)ioreq->v.iov[i].iov_base;
- }
- ioreq->blkdev->cnt_map += ioreq->v.niov;
- } else {
- for (i = 0; i < ioreq->v.niov; i++) {
- ioreq->page[i] = xengnttab_map_grant_ref
- (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
- if (ioreq->page[i] == NULL) {
- xen_pv_printf(&ioreq->blkdev->xendev, 0,
- "can't map grant ref %d (%s, %d maps)\n",
- ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map);
- ioreq->mapped = 1;
- ioreq_unmap(ioreq);
- return -1;
- }
- ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base;
- }
- }
- ioreq->mapped = 1;
- return 0;
-}
-
-#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 40800
-
static void ioreq_free_copy_buffers(struct ioreq *ioreq)
{
int i;
return rc;
}
-#else
-static void ioreq_free_copy_buffers(struct ioreq *ioreq)
-{
- abort();
-}
-
-static int ioreq_init_copy_buffers(struct ioreq *ioreq)
-{
- abort();
-}
-
-static int ioreq_grant_copy(struct ioreq *ioreq)
-{
- abort();
-}
-#endif
static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
goto done;
}
- if (xen_feature_grant_copy) {
- switch (ioreq->req.operation) {
- case BLKIF_OP_READ:
- /* in case of failure ioreq->aio_errors is increased */
- if (ret == 0) {
- ioreq_grant_copy(ioreq);
- }
- ioreq_free_copy_buffers(ioreq);
- break;
- case BLKIF_OP_WRITE:
- case BLKIF_OP_FLUSH_DISKCACHE:
- if (!ioreq->req.nr_segments) {
- break;
- }
- ioreq_free_copy_buffers(ioreq);
- break;
- default:
+ switch (ioreq->req.operation) {
+ case BLKIF_OP_READ:
+ /* in case of failure ioreq->aio_errors is increased */
+ if (ret == 0) {
+ ioreq_grant_copy(ioreq);
+ }
+ ioreq_free_copy_buffers(ioreq);
+ break;
+ case BLKIF_OP_WRITE:
+ case BLKIF_OP_FLUSH_DISKCACHE:
+ if (!ioreq->req.nr_segments) {
break;
}
+ ioreq_free_copy_buffers(ioreq);
+ break;
+ default:
+ break;
}
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
- if (!xen_feature_grant_copy) {
- ioreq_unmap(ioreq);
- }
ioreq_finish(ioreq);
+
switch (ioreq->req.operation) {
case BLKIF_OP_WRITE:
case BLKIF_OP_FLUSH_DISKCACHE:
{
struct XenBlkDev *blkdev = ioreq->blkdev;
- if (xen_feature_grant_copy) {
- ioreq_init_copy_buffers(ioreq);
- if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE ||
- ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
- ioreq_grant_copy(ioreq)) {
- ioreq_free_copy_buffers(ioreq);
- goto err;
- }
- } else {
- if (ioreq->req.nr_segments && ioreq_map(ioreq)) {
- goto err;
- }
+ ioreq_init_copy_buffers(ioreq);
+ if (ioreq->req.nr_segments &&
+ (ioreq->req.operation == BLKIF_OP_WRITE ||
+ ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
+ ioreq_grant_copy(ioreq)) {
+ ioreq_free_copy_buffers(ioreq);
+ goto err;
}
ioreq->aio_inflight++;
}
default:
/* unknown operation (shouldn't happen -- parse catches this) */
- if (!xen_feature_grant_copy) {
- ioreq_unmap(ioreq);
- }
goto err;
}
blkdev->ctx = iothread_get_aio_context(blkdev->iothread);
blkdev->bh = aio_bh_new(blkdev->ctx, blk_bh, blkdev);
-
- if (xen_mode != XEN_EMULATE) {
- batch_maps = 1;
- }
}
static void blk_parse_discard(struct XenBlkDev *blkdev)
blkdev->file_blk = BLOCK_SIZE;
- xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
- xen_feature_grant_copy ? "enabled" : "disabled");
+ if (!xen_feature_grant_copy) {
+ goto out_error;
+ }
/* fill info
* blk_connect supplies sector-size and sectors
return -1;
}
-/*
- * We need to account for the grant allocations requiring contiguous
- * chunks; the worst case number would be
- * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
- * but in order to keep things simple just use
- * 2 * max_req * max_seg.
- */
-#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
-
static int blk_connect(struct XenDevice *xendev)
{
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
return -1;
}
- /* Calculate the maximum number of grants needed by ioreqs */
- max_grants = MAX_GRANTS(blkdev->max_requests,
- BLKIF_MAX_SEGMENTS_PER_REQUEST);
/* Add on the number needed for the ring pages */
- max_grants += blkdev->nr_ring_ref;
+ max_grants = blkdev->nr_ring_ref;
blkdev->xendev.gnttabdev = xengnttab_open(NULL, 0);
if (blkdev->xendev.gnttabdev == NULL) {
return -1;
}
- blkdev->cnt_map++;
-
switch (blkdev->protocol) {
case BLKIF_PROTOCOL_NATIVE:
{
if (blkdev->sring) {
xengnttab_unmap(blkdev->xendev.gnttabdev, blkdev->sring,
blkdev->nr_ring_ref);
- blkdev->cnt_map--;
blkdev->sring = NULL;
}