ia64/linux-2.6.18-xen.hg

annotate drivers/xen/scsiback/scsiback.c @ 794:be85b1d7a52b

pvSCSI: add new device assignment mode

Add a new device assignment mode, which assigns whole HBA
(SCSI host) to guest domain. Current implementation requires SCSI
command emulation on backend driver, and it causes limitations for
some SCSI commands. (Please see
"http://www.xen.org/files/xensummit_tokyo/24_Hitoshi%20Matsumoto_en.pdf"
for detail about why we need the new assignment mode.

SCSI command emulation on backend driver is bypassed when "host" mode
is specified.

Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Jun Kamada <kama@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Feb 17 11:17:11 2009 +0000 (2009-02-17)
parents 2fa1d9446f2f
children
rev   line source
keir@562 1 /*
keir@562 2 * Xen SCSI backend driver
keir@562 3 *
keir@562 4 * Copyright (c) 2008, FUJITSU Limited
keir@562 5 *
keir@562 6 * Based on the blkback driver code.
keir@562 7 *
keir@562 8 * This program is free software; you can redistribute it and/or
keir@562 9 * modify it under the terms of the GNU General Public License version 2
keir@562 10 * as published by the Free Software Foundation; or, when distributed
keir@562 11 * separately from the Linux kernel or incorporated into other
keir@562 12 * software packages, subject to the following license:
keir@562 13 *
keir@562 14 * Permission is hereby granted, free of charge, to any person obtaining a copy
keir@562 15 * of this source file (the "Software"), to deal in the Software without
keir@562 16 * restriction, including without limitation the rights to use, copy, modify,
keir@562 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
keir@562 18 * and to permit persons to whom the Software is furnished to do so, subject to
keir@562 19 * the following conditions:
keir@562 20 *
keir@562 21 * The above copyright notice and this permission notice shall be included in
keir@562 22 * all copies or substantial portions of the Software.
keir@562 23 *
keir@562 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
keir@562 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
keir@562 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
keir@562 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
keir@562 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
keir@562 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
keir@562 30 * IN THE SOFTWARE.
keir@562 31 */
keir@562 32
keir@562 33 #include <linux/spinlock.h>
keir@562 34 #include <linux/kthread.h>
keir@562 35 #include <linux/list.h>
keir@562 36 #include <linux/delay.h>
keir@562 37 #include <xen/balloon.h>
keir@562 38 #include <asm/hypervisor.h>
keir@562 39 #include <scsi/scsi.h>
keir@562 40 #include <scsi/scsi_cmnd.h>
keir@562 41 #include <scsi/scsi_host.h>
keir@562 42 #include <scsi/scsi_device.h>
keir@562 43 #include <scsi/scsi_dbg.h>
keir@562 44 #include <scsi/scsi_eh.h>
keir@562 45
keir@562 46 #include "common.h"
keir@562 47
keir@562 48
keir@562 49 struct list_head pending_free;
keir@562 50 DEFINE_SPINLOCK(pending_free_lock);
keir@562 51 DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
keir@562 52
keir@562 53 int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
keir@562 54 module_param_named(reqs, vscsiif_reqs, int, 0);
keir@562 55 MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
keir@562 56
keir@589 57 static unsigned int log_print_stat = 0;
keir@589 58 module_param(log_print_stat, int, 0644);
keir@562 59
keir@562 60 #define SCSIBACK_INVALID_HANDLE (~0)
keir@562 61
keir@562 62 static pending_req_t *pending_reqs;
keir@562 63 static struct page **pending_pages;
keir@562 64 static grant_handle_t *pending_grant_handles;
keir@562 65
keir@562 66 static int vaddr_pagenr(pending_req_t *req, int seg)
keir@562 67 {
keir@562 68 return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
keir@562 69 }
keir@562 70
keir@562 71 static unsigned long vaddr(pending_req_t *req, int seg)
keir@562 72 {
keir@562 73 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
keir@562 74 return (unsigned long)pfn_to_kaddr(pfn);
keir@562 75 }
keir@562 76
keir@562 77 #define pending_handle(_req, _seg) \
keir@562 78 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
keir@562 79
keir@562 80
keir@569 81 void scsiback_fast_flush_area(pending_req_t *req)
keir@562 82 {
keir@562 83 struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
keir@562 84 unsigned int i, invcount = 0;
keir@562 85 grant_handle_t handle;
keir@562 86 int err;
keir@562 87
keir@562 88 if (req->nr_segments) {
keir@562 89 for (i = 0; i < req->nr_segments; i++) {
keir@562 90 handle = pending_handle(req, i);
keir@562 91 if (handle == SCSIBACK_INVALID_HANDLE)
keir@562 92 continue;
keir@562 93 gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
keir@562 94 GNTMAP_host_map, handle);
keir@562 95 pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
keir@562 96 invcount++;
keir@562 97 }
keir@562 98
keir@562 99 err = HYPERVISOR_grant_table_op(
keir@562 100 GNTTABOP_unmap_grant_ref, unmap, invcount);
keir@562 101 BUG_ON(err);
keir@562 102 kfree(req->sgl);
keir@562 103 }
keir@562 104
keir@562 105 return;
keir@562 106 }
keir@562 107
keir@562 108
keir@562 109 static pending_req_t * alloc_req(struct vscsibk_info *info)
keir@562 110 {
keir@562 111 pending_req_t *req = NULL;
keir@562 112 unsigned long flags;
keir@562 113
keir@562 114 spin_lock_irqsave(&pending_free_lock, flags);
keir@562 115 if (!list_empty(&pending_free)) {
keir@562 116 req = list_entry(pending_free.next, pending_req_t, free_list);
keir@562 117 list_del(&req->free_list);
keir@562 118 }
keir@562 119 spin_unlock_irqrestore(&pending_free_lock, flags);
keir@562 120 return req;
keir@562 121 }
keir@562 122
keir@562 123
keir@562 124 static void free_req(pending_req_t *req)
keir@562 125 {
keir@562 126 unsigned long flags;
keir@562 127 int was_empty;
keir@562 128
keir@562 129 spin_lock_irqsave(&pending_free_lock, flags);
keir@562 130 was_empty = list_empty(&pending_free);
keir@562 131 list_add(&req->free_list, &pending_free);
keir@562 132 spin_unlock_irqrestore(&pending_free_lock, flags);
keir@562 133 if (was_empty)
keir@562 134 wake_up(&pending_free_wq);
keir@562 135 }
keir@562 136
keir@562 137
keir@562 138 static void scsiback_notify_work(struct vscsibk_info *info)
keir@562 139 {
keir@562 140 info->waiting_reqs = 1;
keir@562 141 wake_up(&info->wq);
keir@562 142 }
keir@562 143
keir@569 144 void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
keir@603 145 uint32_t resid, pending_req_t *pending_req)
keir@562 146 {
keir@562 147 vscsiif_response_t *ring_res;
keir@562 148 struct vscsibk_info *info = pending_req->info;
keir@562 149 int notify;
keir@562 150 int more_to_do = 1;
keir@755 151 struct scsi_sense_hdr sshdr;
keir@562 152 unsigned long flags;
keir@562 153
keir@562 154 DPRINTK("%s\n",__FUNCTION__);
keir@562 155
keir@562 156 spin_lock_irqsave(&info->ring_lock, flags);
keir@562 157
keir@562 158 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
keir@562 159 info->ring.rsp_prod_pvt++;
keir@562 160
keir@562 161 ring_res->rslt = result;
keir@562 162 ring_res->rqid = pending_req->rqid;
keir@562 163
keir@562 164 if (sense_buffer != NULL) {
keir@755 165 if (scsi_normalize_sense(sense_buffer,
keir@755 166 sizeof(sense_buffer), &sshdr)) {
keir@755 167
keir@755 168 int len = 8 + sense_buffer[7];
keir@755 169
keir@755 170 if (len > VSCSIIF_SENSE_BUFFERSIZE)
keir@755 171 len = VSCSIIF_SENSE_BUFFERSIZE;
keir@755 172
keir@755 173 memcpy(ring_res->sense_buffer, sense_buffer, len);
keir@755 174 ring_res->sense_len = len;
keir@755 175 }
keir@562 176 } else {
keir@562 177 ring_res->sense_len = 0;
keir@562 178 }
keir@562 179
keir@603 180 ring_res->residual_len = resid;
keir@603 181
keir@562 182 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
keir@562 183 if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
keir@562 184 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
keir@562 185 } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
keir@562 186 more_to_do = 1;
keir@562 187 }
keir@562 188
keir@562 189 spin_unlock_irqrestore(&info->ring_lock, flags);
keir@562 190
keir@562 191 if (more_to_do)
keir@562 192 scsiback_notify_work(info);
keir@562 193
keir@562 194 if (notify)
keir@562 195 notify_remote_via_irq(info->irq);
keir@562 196
keir@562 197 free_req(pending_req);
keir@562 198 }
keir@562 199
keir@589 200 static void scsiback_print_status(char *sense_buffer, int errors,
keir@589 201 pending_req_t *pending_req)
keir@589 202 {
keir@589 203 struct scsi_device *sdev = pending_req->sdev;
keir@589 204
keir@589 205 printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
keir@589 206 sdev->channel, sdev->id, sdev->lun);
keir@589 207 printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
keir@589 208 status_byte(errors), msg_byte(errors),
keir@589 209 host_byte(errors), driver_byte(errors));
keir@589 210
keir@589 211 printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n",
keir@589 212 pending_req->cmnd[0]);
keir@589 213
keir@589 214 if (CHECK_CONDITION & status_byte(errors))
keir@589 215 __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
keir@589 216 }
keir@589 217
keir@589 218
keir@759 219 static void scsiback_cmd_done(struct request *req, int uptodate)
keir@562 220 {
keir@562 221 pending_req_t *pending_req = req->end_io_data;
keir@562 222 unsigned char *sense_buffer;
keir@603 223 unsigned int resid;
keir@759 224 int errors;
keir@562 225
keir@562 226 sense_buffer = req->sense;
keir@603 227 resid = req->data_len;
keir@759 228 errors = req->errors;
keir@562 229
keir@589 230 if (errors != 0) {
keir@589 231 if (log_print_stat)
keir@589 232 scsiback_print_status(sense_buffer, errors, pending_req);
keir@562 233 }
keir@562 234
keir@794 235 /* The Host mode is through as for Emulation. */
keir@794 236 if (pending_req->info->feature != VSCSI_TYPE_HOST)
keir@794 237 scsiback_rsp_emulation(pending_req);
keir@589 238
keir@569 239 scsiback_fast_flush_area(pending_req);
keir@603 240 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
keir@589 241 scsiback_put(pending_req->info);
keir@562 242
keir@562 243 __blk_put_request(req->q, req);
keir@562 244 }
keir@562 245
keir@562 246
keir@562 247 static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
keir@562 248 pending_req_t *pending_req)
keir@562 249 {
keir@562 250 u32 flags;
keir@562 251 int write;
keir@562 252 int i, err = 0;
keir@562 253 unsigned int data_len = 0;
keir@562 254 struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
keir@562 255 struct vscsibk_info *info = pending_req->info;
keir@562 256
keir@562 257 int data_dir = (int)pending_req->sc_data_direction;
keir@562 258 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
keir@562 259
keir@562 260 write = (data_dir == DMA_TO_DEVICE);
keir@562 261
keir@562 262 if (nr_segments) {
keir@562 263 /* free of (sgl) in fast_flush_area()*/
keir@562 264 pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
keir@562 265 GFP_KERNEL);
keir@562 266 if (!pending_req->sgl) {
keir@562 267 printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
keir@562 268 return -ENOMEM;
keir@562 269 }
keir@562 270
keir@562 271 for (i = 0; i < nr_segments; i++) {
keir@562 272 flags = GNTMAP_host_map;
keir@562 273 if (write)
keir@562 274 flags |= GNTMAP_readonly;
keir@562 275 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
keir@562 276 ring_req->seg[i].gref,
keir@562 277 info->domid);
keir@562 278 }
keir@562 279
keir@562 280 err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
keir@562 281 BUG_ON(err);
keir@562 282
keir@562 283 for (i = 0; i < nr_segments; i++) {
keir@562 284 if (unlikely(map[i].status != 0)) {
keir@562 285 printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
keir@562 286 map[i].handle = SCSIBACK_INVALID_HANDLE;
keir@562 287 err |= 1;
keir@562 288 }
keir@562 289
keir@562 290 pending_handle(pending_req, i) = map[i].handle;
keir@562 291
keir@562 292 if (err)
keir@562 293 continue;
keir@562 294
keir@562 295 set_phys_to_machine(__pa(vaddr(
keir@562 296 pending_req, i)) >> PAGE_SHIFT,
keir@562 297 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
keir@562 298
keir@562 299 pending_req->sgl[i].page = virt_to_page(vaddr(pending_req, i));
keir@562 300 pending_req->sgl[i].offset = ring_req->seg[i].offset;
keir@562 301 pending_req->sgl[i].length = ring_req->seg[i].length;
keir@562 302 data_len += pending_req->sgl[i].length;
keir@595 303
keir@595 304 barrier();
keir@595 305 if (pending_req->sgl[i].offset >= PAGE_SIZE ||
keir@595 306 pending_req->sgl[i].length > PAGE_SIZE ||
keir@595 307 pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE)
keir@595 308 err |= 1;
keir@595 309
keir@562 310 }
keir@562 311
keir@562 312 if (err)
keir@562 313 goto fail_flush;
keir@562 314 }
keir@562 315
keir@562 316 pending_req->request_bufflen = data_len;
keir@562 317
keir@562 318 return 0;
keir@562 319
keir@562 320 fail_flush:
keir@569 321 scsiback_fast_flush_area(pending_req);
keir@562 322 return -ENOMEM;
keir@562 323 }
keir@562 324
keir@562 325 /* quoted scsi_lib.c/scsi_merge_bio */
keir@562 326 static int scsiback_merge_bio(struct request *rq, struct bio *bio)
keir@562 327 {
keir@562 328 struct request_queue *q = rq->q;
keir@562 329
keir@562 330 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
keir@562 331 if (rq_data_dir(rq) == WRITE)
keir@562 332 bio->bi_rw |= (1 << BIO_RW);
keir@562 333
keir@562 334 blk_queue_bounce(q, &bio);
keir@562 335
keir@562 336 if (!rq->bio)
keir@562 337 blk_rq_bio_prep(q, rq, bio);
keir@562 338 else if (!q->back_merge_fn(q, rq, bio))
keir@562 339 return -EINVAL;
keir@562 340 else {
keir@562 341 rq->biotail->bi_next = bio;
keir@562 342 rq->biotail = bio;
keir@562 343 rq->hard_nr_sectors += bio_sectors(bio);
keir@562 344 rq->nr_sectors = rq->hard_nr_sectors;
keir@562 345 }
keir@562 346
keir@562 347 return 0;
keir@562 348 }
keir@562 349
keir@562 350
keir@562 351 /* quoted scsi_lib.c/scsi_bi_endio */
keir@562 352 static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
keir@562 353 {
keir@562 354 if (bio->bi_size)
keir@562 355 return 1;
keir@562 356
keir@562 357 bio_put(bio);
keir@562 358 return 0;
keir@562 359 }
keir@562 360
keir@562 361
keir@562 362
keir@562 363 /* quoted scsi_lib.c/scsi_req_map_sg . */
keir@589 364 static int request_map_sg(struct request *rq, pending_req_t *pending_req, unsigned int count)
keir@562 365 {
keir@562 366 struct request_queue *q = rq->q;
keir@562 367 int nr_pages;
keir@562 368 unsigned int nsegs = count;
keir@562 369
keir@562 370 unsigned int data_len = 0, len, bytes, off;
keir@562 371 struct page *page;
keir@562 372 struct bio *bio = NULL;
keir@562 373 int i, err, nr_vecs = 0;
keir@562 374
keir@562 375 for (i = 0; i < nsegs; i++) {
keir@562 376 page = pending_req->sgl[i].page;
keir@562 377 off = (unsigned int)pending_req->sgl[i].offset;
keir@562 378 len = (unsigned int)pending_req->sgl[i].length;
keir@562 379 data_len += len;
keir@562 380
keir@562 381 nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
keir@562 382 while (len > 0) {
keir@562 383 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
keir@562 384
keir@562 385 if (!bio) {
keir@562 386 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
keir@562 387 nr_pages -= nr_vecs;
keir@562 388 bio = bio_alloc(GFP_KERNEL, nr_vecs);
keir@562 389 if (!bio) {
keir@562 390 err = -ENOMEM;
keir@562 391 goto free_bios;
keir@562 392 }
keir@562 393 bio->bi_end_io = scsiback_bi_endio;
keir@562 394 }
keir@562 395
keir@562 396 if (bio_add_pc_page(q, bio, page, bytes, off) !=
keir@562 397 bytes) {
keir@562 398 bio_put(bio);
keir@562 399 err = -EINVAL;
keir@562 400 goto free_bios;
keir@562 401 }
keir@562 402
keir@562 403 if (bio->bi_vcnt >= nr_vecs) {
keir@562 404 err = scsiback_merge_bio(rq, bio);
keir@562 405 if (err) {
keir@562 406 bio_endio(bio, bio->bi_size, 0);
keir@562 407 goto free_bios;
keir@562 408 }
keir@562 409 bio = NULL;
keir@562 410 }
keir@562 411
keir@562 412 page++;
keir@562 413 len -= bytes;
keir@562 414 off = 0;
keir@562 415 }
keir@562 416 }
keir@562 417
keir@562 418 rq->buffer = rq->data = NULL;
keir@562 419 rq->data_len = data_len;
keir@562 420
keir@562 421 return 0;
keir@562 422
keir@562 423 free_bios:
keir@562 424 while ((bio = rq->bio) != NULL) {
keir@562 425 rq->bio = bio->bi_next;
keir@562 426 /*
keir@562 427 * call endio instead of bio_put incase it was bounced
keir@562 428 */
keir@562 429 bio_endio(bio, bio->bi_size, 0);
keir@562 430 }
keir@562 431
keir@562 432 return err;
keir@562 433 }
keir@562 434
keir@562 435
keir@562 436 void scsiback_cmd_exec(pending_req_t *pending_req)
keir@562 437 {
keir@562 438 int cmd_len = (int)pending_req->cmd_len;
keir@562 439 int data_dir = (int)pending_req->sc_data_direction;
keir@562 440 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
keir@562 441 unsigned int timeout;
keir@562 442 struct request *rq;
keir@562 443 int write;
keir@562 444
keir@562 445 DPRINTK("%s\n",__FUNCTION__);
keir@562 446
keir@562 447 /* because it doesn't timeout backend earlier than frontend.*/
keir@562 448 if (pending_req->timeout_per_command)
keir@589 449 timeout = pending_req->timeout_per_command * HZ;
keir@562 450 else
keir@562 451 timeout = VSCSIIF_TIMEOUT;
keir@562 452
keir@562 453 write = (data_dir == DMA_TO_DEVICE);
keir@562 454 rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL);
keir@562 455
keir@562 456 rq->flags |= REQ_BLOCK_PC;
keir@562 457 rq->cmd_len = cmd_len;
keir@562 458 memcpy(rq->cmd, pending_req->cmnd, cmd_len);
keir@562 459
keir@562 460 memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
keir@562 461 rq->sense = pending_req->sense_buffer;
keir@562 462 rq->sense_len = 0;
keir@562 463
keir@589 464 /* not allowed to retry in backend. */
keir@562 465 rq->retries = 0;
keir@562 466 rq->timeout = timeout;
keir@562 467 rq->end_io_data = pending_req;
keir@562 468
keir@562 469 if (nr_segments) {
keir@562 470
keir@589 471 if (request_map_sg(rq, pending_req, nr_segments)) {
keir@562 472 printk(KERN_ERR "scsiback: SG Request Map Error\n");
keir@562 473 return;
keir@562 474 }
keir@562 475 }
keir@562 476
keir@589 477 scsiback_get(pending_req->info);
keir@562 478 blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
keir@562 479
keir@562 480 return ;
keir@562 481 }
keir@562 482
keir@562 483
keir@562 484 static void scsiback_device_reset_exec(pending_req_t *pending_req)
keir@562 485 {
keir@562 486 struct vscsibk_info *info = pending_req->info;
keir@562 487 int err;
keir@562 488 struct scsi_device *sdev = pending_req->sdev;
keir@562 489
keir@562 490 scsiback_get(info);
keir@562 491 err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
keir@562 492
keir@603 493 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
keir@589 494 scsiback_put(info);
keir@562 495
keir@562 496 return;
keir@562 497 }
keir@562 498
keir@562 499
keir@562 500 irqreturn_t scsiback_intr(int irq, void *dev_id, struct pt_regs *regs)
keir@562 501 {
keir@562 502 scsiback_notify_work((struct vscsibk_info *)dev_id);
keir@562 503 return IRQ_HANDLED;
keir@562 504 }
keir@562 505
keir@562 506 static int prepare_pending_reqs(struct vscsibk_info *info,
keir@562 507 vscsiif_request_t *ring_req, pending_req_t *pending_req)
keir@562 508 {
keir@562 509 struct scsi_device *sdev;
keir@562 510 struct ids_tuple vir;
keir@562 511 int err = -EINVAL;
keir@562 512
keir@562 513 DPRINTK("%s\n",__FUNCTION__);
keir@562 514
keir@562 515 pending_req->rqid = ring_req->rqid;
keir@562 516 pending_req->act = ring_req->act;
keir@562 517
keir@562 518 pending_req->info = info;
keir@562 519
keir@756 520 pending_req->v_chn = vir.chn = ring_req->channel;
keir@756 521 pending_req->v_tgt = vir.tgt = ring_req->id;
keir@562 522 vir.lun = ring_req->lun;
keir@562 523
keir@590 524 rmb();
keir@562 525 sdev = scsiback_do_translation(info, &vir);
keir@562 526 if (!sdev) {
keir@562 527 pending_req->sdev = NULL;
keir@589 528 DPRINTK("scsiback: doesn't exist.\n");
keir@562 529 err = -ENODEV;
keir@597 530 goto invalid_value;
keir@562 531 }
keir@562 532 pending_req->sdev = sdev;
keir@562 533
keir@562 534 /* request range check from frontend */
keir@590 535 pending_req->sc_data_direction = ring_req->sc_data_direction;
keir@595 536 barrier();
keir@590 537 if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
keir@590 538 (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
keir@590 539 (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
keir@590 540 (pending_req->sc_data_direction != DMA_NONE)) {
keir@589 541 DPRINTK("scsiback: invalid parameter data_dir = %d\n",
keir@590 542 pending_req->sc_data_direction);
keir@562 543 err = -EINVAL;
keir@597 544 goto invalid_value;
keir@562 545 }
keir@562 546
keir@590 547 pending_req->nr_segments = ring_req->nr_segments;
keir@595 548 barrier();
keir@590 549 if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
keir@589 550 DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
keir@590 551 pending_req->nr_segments);
keir@562 552 err = -EINVAL;
keir@597 553 goto invalid_value;
keir@562 554 }
keir@562 555
keir@590 556 pending_req->cmd_len = ring_req->cmd_len;
keir@595 557 barrier();
keir@590 558 if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
keir@589 559 DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
keir@590 560 pending_req->cmd_len);
keir@562 561 err = -EINVAL;
keir@597 562 goto invalid_value;
keir@562 563 }
keir@590 564 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
keir@562 565
keir@562 566 pending_req->timeout_per_command = ring_req->timeout_per_command;
keir@562 567
keir@562 568 if(scsiback_gnttab_data_map(ring_req, pending_req)) {
keir@589 569 DPRINTK("scsiback: invalid buffer\n");
keir@562 570 err = -EINVAL;
keir@597 571 goto invalid_value;
keir@562 572 }
keir@562 573
keir@562 574 return 0;
keir@562 575
keir@597 576 invalid_value:
keir@562 577 return err;
keir@562 578 }
keir@562 579
keir@562 580
keir@562 581 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
keir@562 582 {
keir@562 583 struct vscsiif_back_ring *ring = &info->ring;
keir@562 584 vscsiif_request_t *ring_req;
keir@562 585
keir@562 586 pending_req_t *pending_req;
keir@562 587 RING_IDX rc, rp;
keir@562 588 int err, more_to_do = 0;
keir@562 589
keir@562 590 DPRINTK("%s\n",__FUNCTION__);
keir@562 591
keir@562 592 rc = ring->req_cons;
keir@562 593 rp = ring->sring->req_prod;
keir@562 594 rmb();
keir@562 595
keir@562 596 while ((rc != rp)) {
keir@562 597 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
keir@562 598 break;
keir@562 599 pending_req = alloc_req(info);
keir@562 600 if (NULL == pending_req) {
keir@562 601 more_to_do = 1;
keir@562 602 break;
keir@562 603 }
keir@562 604
keir@562 605 ring_req = RING_GET_REQUEST(ring, rc);
keir@562 606 ring->req_cons = ++rc;
keir@562 607
keir@562 608 err = prepare_pending_reqs(info, ring_req,
keir@562 609 pending_req);
keir@562 610 if (err == -EINVAL) {
keir@562 611 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
keir@603 612 0, pending_req);
keir@562 613 continue;
keir@562 614 } else if (err == -ENODEV) {
keir@562 615 scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
keir@603 616 0, pending_req);
keir@589 617 continue;
keir@562 618 }
keir@562 619
keir@562 620 if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
keir@794 621
keir@794 622 /* The Host mode is through as for Emulation. */
keir@794 623 if (info->feature == VSCSI_TYPE_HOST)
keir@794 624 scsiback_cmd_exec(pending_req);
keir@794 625 else
keir@794 626 scsiback_req_emulation_or_cmdexec(pending_req);
keir@794 627
keir@562 628 } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
keir@562 629 scsiback_device_reset_exec(pending_req);
keir@562 630 } else {
keir@562 631 printk(KERN_ERR "scsiback: invalid parameter for request\n");
keir@562 632 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
keir@603 633 0, pending_req);
keir@562 634 continue;
keir@562 635 }
keir@562 636 }
keir@562 637
keir@562 638 if (RING_HAS_UNCONSUMED_REQUESTS(ring))
keir@562 639 more_to_do = 1;
keir@562 640
keir@589 641 /* Yield point for this unbounded loop. */
keir@589 642 cond_resched();
keir@589 643
keir@562 644 return more_to_do;
keir@562 645 }
keir@562 646
keir@562 647
keir@562 648 int scsiback_schedule(void *data)
keir@562 649 {
keir@562 650 struct vscsibk_info *info = (struct vscsibk_info *)data;
keir@562 651
keir@562 652 DPRINTK("%s\n",__FUNCTION__);
keir@562 653
keir@562 654 while (!kthread_should_stop()) {
keir@562 655 wait_event_interruptible(
keir@562 656 info->wq,
keir@562 657 info->waiting_reqs || kthread_should_stop());
keir@562 658 wait_event_interruptible(
keir@562 659 pending_free_wq,
keir@562 660 !list_empty(&pending_free) || kthread_should_stop());
keir@562 661
keir@562 662 info->waiting_reqs = 0;
keir@562 663 smp_mb();
keir@562 664
keir@562 665 if (scsiback_do_cmd_fn(info))
keir@562 666 info->waiting_reqs = 1;
keir@562 667 }
keir@562 668
keir@562 669 return 0;
keir@562 670 }
keir@562 671
keir@562 672
keir@562 673 static int __init scsiback_init(void)
keir@562 674 {
keir@562 675 int i, mmap_pages;
keir@562 676
keir@562 677 if (!is_running_on_xen())
keir@562 678 return -ENODEV;
keir@562 679
keir@562 680 mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
keir@562 681
keir@562 682 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
keir@562 683 vscsiif_reqs, GFP_KERNEL);
keir@562 684 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
keir@562 685 mmap_pages, GFP_KERNEL);
keir@562 686 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
keir@562 687
keir@562 688 if (!pending_reqs || !pending_grant_handles || !pending_pages)
keir@562 689 goto out_of_memory;
keir@562 690
keir@562 691 for (i = 0; i < mmap_pages; i++)
keir@562 692 pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
keir@562 693
keir@562 694 if (scsiback_interface_init() < 0)
keir@597 695 goto out_of_kmem;
keir@562 696
keir@562 697 memset(pending_reqs, 0, sizeof(pending_reqs));
keir@562 698 INIT_LIST_HEAD(&pending_free);
keir@562 699
keir@562 700 for (i = 0; i < vscsiif_reqs; i++)
keir@562 701 list_add_tail(&pending_reqs[i].free_list, &pending_free);
keir@562 702
keir@562 703 if (scsiback_xenbus_init())
keir@597 704 goto out_of_xenbus;
keir@562 705
keir@569 706 scsiback_emulation_init();
keir@569 707
keir@562 708 return 0;
keir@562 709
keir@597 710 out_of_xenbus:
keir@597 711 scsiback_xenbus_unregister();
keir@597 712 out_of_kmem:
keir@597 713 scsiback_interface_exit();
keir@562 714 out_of_memory:
keir@562 715 kfree(pending_reqs);
keir@562 716 kfree(pending_grant_handles);
keir@562 717 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
keir@562 718 printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
keir@562 719 return -ENOMEM;
keir@562 720 }
keir@562 721
keir@758 722 #if 0
keir@562 723 static void __exit scsiback_exit(void)
keir@562 724 {
keir@562 725 scsiback_xenbus_unregister();
keir@562 726 scsiback_interface_exit();
keir@562 727 kfree(pending_reqs);
keir@562 728 kfree(pending_grant_handles);
keir@562 729 free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
keir@562 730
keir@562 731 }
keir@758 732 #endif
keir@562 733
keir@562 734 module_init(scsiback_init);
keir@758 735
keir@758 736 #if 0
keir@562 737 module_exit(scsiback_exit);
keir@758 738 #endif
keir@562 739
keir@562 740 MODULE_DESCRIPTION("Xen SCSI backend driver");
keir@562 741 MODULE_LICENSE("Dual BSD/GPL");