ia64/linux-2.6.18-xen.hg

view drivers/xen/scsiback/scsiback.c @ 755:3669e92f24cd

[PVSCSI] Fix sense_len

Signed-off-by: James Harper <james.harper@bendigoit.com.au>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Jun Kamada <kama@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Dec 09 13:02:20 2008 +0000 (2008-12-09)
parents bd4b58143713
children eccc622d03af
line source
1 /*
2 * Xen SCSI backend driver
3 *
4 * Copyright (c) 2008, FUJITSU Limited
5 *
6 * Based on the blkback driver code.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/spinlock.h>
34 #include <linux/kthread.h>
35 #include <linux/list.h>
36 #include <linux/delay.h>
37 #include <xen/balloon.h>
38 #include <asm/hypervisor.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_dbg.h>
44 #include <scsi/scsi_eh.h>
46 #include "common.h"
49 struct list_head pending_free;
50 DEFINE_SPINLOCK(pending_free_lock);
51 DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
53 int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
54 module_param_named(reqs, vscsiif_reqs, int, 0);
55 MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
57 static unsigned int log_print_stat = 0;
58 module_param(log_print_stat, int, 0644);
60 #define SCSIBACK_INVALID_HANDLE (~0)
62 static pending_req_t *pending_reqs;
63 static struct page **pending_pages;
64 static grant_handle_t *pending_grant_handles;
66 static int vaddr_pagenr(pending_req_t *req, int seg)
67 {
68 return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
69 }
71 static unsigned long vaddr(pending_req_t *req, int seg)
72 {
73 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
74 return (unsigned long)pfn_to_kaddr(pfn);
75 }
77 #define pending_handle(_req, _seg) \
78 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
81 void scsiback_fast_flush_area(pending_req_t *req)
82 {
83 struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
84 unsigned int i, invcount = 0;
85 grant_handle_t handle;
86 int err;
88 if (req->nr_segments) {
89 for (i = 0; i < req->nr_segments; i++) {
90 handle = pending_handle(req, i);
91 if (handle == SCSIBACK_INVALID_HANDLE)
92 continue;
93 gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
94 GNTMAP_host_map, handle);
95 pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
96 invcount++;
97 }
99 err = HYPERVISOR_grant_table_op(
100 GNTTABOP_unmap_grant_ref, unmap, invcount);
101 BUG_ON(err);
102 kfree(req->sgl);
103 }
105 return;
106 }
109 static pending_req_t * alloc_req(struct vscsibk_info *info)
110 {
111 pending_req_t *req = NULL;
112 unsigned long flags;
114 spin_lock_irqsave(&pending_free_lock, flags);
115 if (!list_empty(&pending_free)) {
116 req = list_entry(pending_free.next, pending_req_t, free_list);
117 list_del(&req->free_list);
118 }
119 spin_unlock_irqrestore(&pending_free_lock, flags);
120 return req;
121 }
124 static void free_req(pending_req_t *req)
125 {
126 unsigned long flags;
127 int was_empty;
129 spin_lock_irqsave(&pending_free_lock, flags);
130 was_empty = list_empty(&pending_free);
131 list_add(&req->free_list, &pending_free);
132 spin_unlock_irqrestore(&pending_free_lock, flags);
133 if (was_empty)
134 wake_up(&pending_free_wq);
135 }
138 static void scsiback_notify_work(struct vscsibk_info *info)
139 {
140 info->waiting_reqs = 1;
141 wake_up(&info->wq);
142 }
144 void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
145 uint32_t resid, pending_req_t *pending_req)
146 {
147 vscsiif_response_t *ring_res;
148 struct vscsibk_info *info = pending_req->info;
149 int notify;
150 int more_to_do = 1;
151 struct scsi_sense_hdr sshdr;
152 unsigned long flags;
154 DPRINTK("%s\n",__FUNCTION__);
156 spin_lock_irqsave(&info->ring_lock, flags);
158 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
159 info->ring.rsp_prod_pvt++;
161 ring_res->rslt = result;
162 ring_res->rqid = pending_req->rqid;
164 if (sense_buffer != NULL) {
165 if (scsi_normalize_sense(sense_buffer,
166 sizeof(sense_buffer), &sshdr)) {
168 int len = 8 + sense_buffer[7];
170 if (len > VSCSIIF_SENSE_BUFFERSIZE)
171 len = VSCSIIF_SENSE_BUFFERSIZE;
173 memcpy(ring_res->sense_buffer, sense_buffer, len);
174 ring_res->sense_len = len;
175 }
176 } else {
177 ring_res->sense_len = 0;
178 }
180 ring_res->residual_len = resid;
182 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
183 if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
184 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
185 } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
186 more_to_do = 1;
187 }
189 spin_unlock_irqrestore(&info->ring_lock, flags);
191 if (more_to_do)
192 scsiback_notify_work(info);
194 if (notify)
195 notify_remote_via_irq(info->irq);
197 free_req(pending_req);
198 }
200 static void scsiback_print_status(char *sense_buffer, int errors,
201 pending_req_t *pending_req)
202 {
203 struct scsi_device *sdev = pending_req->sdev;
205 printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
206 sdev->channel, sdev->id, sdev->lun);
207 printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
208 status_byte(errors), msg_byte(errors),
209 host_byte(errors), driver_byte(errors));
211 printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n",
212 pending_req->cmnd[0]);
214 if (CHECK_CONDITION & status_byte(errors))
215 __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
216 }
219 static void scsiback_cmd_done(struct request *req, int errors)
220 {
221 pending_req_t *pending_req = req->end_io_data;
222 unsigned char *sense_buffer;
223 unsigned int resid;
225 sense_buffer = req->sense;
226 resid = req->data_len;
228 if (errors != 0) {
229 if (log_print_stat)
230 scsiback_print_status(sense_buffer, errors, pending_req);
231 }
233 scsiback_rsp_emulation(pending_req);
235 scsiback_fast_flush_area(pending_req);
236 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
237 scsiback_put(pending_req->info);
239 __blk_put_request(req->q, req);
240 }
243 static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
244 pending_req_t *pending_req)
245 {
246 u32 flags;
247 int write;
248 int i, err = 0;
249 unsigned int data_len = 0;
250 struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
251 struct vscsibk_info *info = pending_req->info;
253 int data_dir = (int)pending_req->sc_data_direction;
254 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
256 write = (data_dir == DMA_TO_DEVICE);
258 if (nr_segments) {
259 /* free of (sgl) in fast_flush_area()*/
260 pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
261 GFP_KERNEL);
262 if (!pending_req->sgl) {
263 printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
264 return -ENOMEM;
265 }
267 for (i = 0; i < nr_segments; i++) {
268 flags = GNTMAP_host_map;
269 if (write)
270 flags |= GNTMAP_readonly;
271 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
272 ring_req->seg[i].gref,
273 info->domid);
274 }
276 err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
277 BUG_ON(err);
279 for (i = 0; i < nr_segments; i++) {
280 if (unlikely(map[i].status != 0)) {
281 printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
282 map[i].handle = SCSIBACK_INVALID_HANDLE;
283 err |= 1;
284 }
286 pending_handle(pending_req, i) = map[i].handle;
288 if (err)
289 continue;
291 set_phys_to_machine(__pa(vaddr(
292 pending_req, i)) >> PAGE_SHIFT,
293 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
295 pending_req->sgl[i].page = virt_to_page(vaddr(pending_req, i));
296 pending_req->sgl[i].offset = ring_req->seg[i].offset;
297 pending_req->sgl[i].length = ring_req->seg[i].length;
298 data_len += pending_req->sgl[i].length;
300 barrier();
301 if (pending_req->sgl[i].offset >= PAGE_SIZE ||
302 pending_req->sgl[i].length > PAGE_SIZE ||
303 pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE)
304 err |= 1;
306 }
308 if (err)
309 goto fail_flush;
310 }
312 pending_req->request_bufflen = data_len;
314 return 0;
316 fail_flush:
317 scsiback_fast_flush_area(pending_req);
318 return -ENOMEM;
319 }
321 /* quoted scsi_lib.c/scsi_merge_bio */
322 static int scsiback_merge_bio(struct request *rq, struct bio *bio)
323 {
324 struct request_queue *q = rq->q;
326 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
327 if (rq_data_dir(rq) == WRITE)
328 bio->bi_rw |= (1 << BIO_RW);
330 blk_queue_bounce(q, &bio);
332 if (!rq->bio)
333 blk_rq_bio_prep(q, rq, bio);
334 else if (!q->back_merge_fn(q, rq, bio))
335 return -EINVAL;
336 else {
337 rq->biotail->bi_next = bio;
338 rq->biotail = bio;
339 rq->hard_nr_sectors += bio_sectors(bio);
340 rq->nr_sectors = rq->hard_nr_sectors;
341 }
343 return 0;
344 }
347 /* quoted scsi_lib.c/scsi_bi_endio */
348 static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
349 {
350 if (bio->bi_size)
351 return 1;
353 bio_put(bio);
354 return 0;
355 }
359 /* quoted scsi_lib.c/scsi_req_map_sg . */
360 static int request_map_sg(struct request *rq, pending_req_t *pending_req, unsigned int count)
361 {
362 struct request_queue *q = rq->q;
363 int nr_pages;
364 unsigned int nsegs = count;
366 unsigned int data_len = 0, len, bytes, off;
367 struct page *page;
368 struct bio *bio = NULL;
369 int i, err, nr_vecs = 0;
371 for (i = 0; i < nsegs; i++) {
372 page = pending_req->sgl[i].page;
373 off = (unsigned int)pending_req->sgl[i].offset;
374 len = (unsigned int)pending_req->sgl[i].length;
375 data_len += len;
377 nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
378 while (len > 0) {
379 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
381 if (!bio) {
382 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
383 nr_pages -= nr_vecs;
384 bio = bio_alloc(GFP_KERNEL, nr_vecs);
385 if (!bio) {
386 err = -ENOMEM;
387 goto free_bios;
388 }
389 bio->bi_end_io = scsiback_bi_endio;
390 }
392 if (bio_add_pc_page(q, bio, page, bytes, off) !=
393 bytes) {
394 bio_put(bio);
395 err = -EINVAL;
396 goto free_bios;
397 }
399 if (bio->bi_vcnt >= nr_vecs) {
400 err = scsiback_merge_bio(rq, bio);
401 if (err) {
402 bio_endio(bio, bio->bi_size, 0);
403 goto free_bios;
404 }
405 bio = NULL;
406 }
408 page++;
409 len -= bytes;
410 off = 0;
411 }
412 }
414 rq->buffer = rq->data = NULL;
415 rq->data_len = data_len;
417 return 0;
419 free_bios:
420 while ((bio = rq->bio) != NULL) {
421 rq->bio = bio->bi_next;
422 /*
423 * call endio instead of bio_put incase it was bounced
424 */
425 bio_endio(bio, bio->bi_size, 0);
426 }
428 return err;
429 }
432 void scsiback_cmd_exec(pending_req_t *pending_req)
433 {
434 int cmd_len = (int)pending_req->cmd_len;
435 int data_dir = (int)pending_req->sc_data_direction;
436 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
437 unsigned int timeout;
438 struct request *rq;
439 int write;
441 DPRINTK("%s\n",__FUNCTION__);
443 /* because it doesn't timeout backend earlier than frontend.*/
444 if (pending_req->timeout_per_command)
445 timeout = pending_req->timeout_per_command * HZ;
446 else
447 timeout = VSCSIIF_TIMEOUT;
449 write = (data_dir == DMA_TO_DEVICE);
450 rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL);
452 rq->flags |= REQ_BLOCK_PC;
453 rq->cmd_len = cmd_len;
454 memcpy(rq->cmd, pending_req->cmnd, cmd_len);
456 memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
457 rq->sense = pending_req->sense_buffer;
458 rq->sense_len = 0;
460 /* not allowed to retry in backend. */
461 rq->retries = 0;
462 rq->timeout = timeout;
463 rq->end_io_data = pending_req;
465 if (nr_segments) {
467 if (request_map_sg(rq, pending_req, nr_segments)) {
468 printk(KERN_ERR "scsiback: SG Request Map Error\n");
469 return;
470 }
471 }
473 scsiback_get(pending_req->info);
474 blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
476 return ;
477 }
480 static void scsiback_device_reset_exec(pending_req_t *pending_req)
481 {
482 struct vscsibk_info *info = pending_req->info;
483 int err;
484 struct scsi_device *sdev = pending_req->sdev;
486 scsiback_get(info);
487 err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
489 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
490 scsiback_put(info);
492 return;
493 }
496 irqreturn_t scsiback_intr(int irq, void *dev_id, struct pt_regs *regs)
497 {
498 scsiback_notify_work((struct vscsibk_info *)dev_id);
499 return IRQ_HANDLED;
500 }
502 static int prepare_pending_reqs(struct vscsibk_info *info,
503 vscsiif_request_t *ring_req, pending_req_t *pending_req)
504 {
505 struct scsi_device *sdev;
506 struct ids_tuple vir;
507 int err = -EINVAL;
509 DPRINTK("%s\n",__FUNCTION__);
511 pending_req->rqid = ring_req->rqid;
512 pending_req->act = ring_req->act;
514 pending_req->info = info;
516 vir.chn = ring_req->channel;
517 vir.tgt = ring_req->id;
518 vir.lun = ring_req->lun;
520 rmb();
521 sdev = scsiback_do_translation(info, &vir);
522 if (!sdev) {
523 pending_req->sdev = NULL;
524 DPRINTK("scsiback: doesn't exist.\n");
525 err = -ENODEV;
526 goto invalid_value;
527 }
528 pending_req->sdev = sdev;
530 /* request range check from frontend */
531 pending_req->sc_data_direction = ring_req->sc_data_direction;
532 barrier();
533 if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
534 (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
535 (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
536 (pending_req->sc_data_direction != DMA_NONE)) {
537 DPRINTK("scsiback: invalid parameter data_dir = %d\n",
538 pending_req->sc_data_direction);
539 err = -EINVAL;
540 goto invalid_value;
541 }
543 pending_req->nr_segments = ring_req->nr_segments;
544 barrier();
545 if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
546 DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
547 pending_req->nr_segments);
548 err = -EINVAL;
549 goto invalid_value;
550 }
552 pending_req->cmd_len = ring_req->cmd_len;
553 barrier();
554 if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
555 DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
556 pending_req->cmd_len);
557 err = -EINVAL;
558 goto invalid_value;
559 }
560 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
562 pending_req->timeout_per_command = ring_req->timeout_per_command;
564 if(scsiback_gnttab_data_map(ring_req, pending_req)) {
565 DPRINTK("scsiback: invalid buffer\n");
566 err = -EINVAL;
567 goto invalid_value;
568 }
570 return 0;
572 invalid_value:
573 return err;
574 }
577 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
578 {
579 struct vscsiif_back_ring *ring = &info->ring;
580 vscsiif_request_t *ring_req;
582 pending_req_t *pending_req;
583 RING_IDX rc, rp;
584 int err, more_to_do = 0;
586 DPRINTK("%s\n",__FUNCTION__);
588 rc = ring->req_cons;
589 rp = ring->sring->req_prod;
590 rmb();
592 while ((rc != rp)) {
593 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
594 break;
595 pending_req = alloc_req(info);
596 if (NULL == pending_req) {
597 more_to_do = 1;
598 break;
599 }
601 ring_req = RING_GET_REQUEST(ring, rc);
602 ring->req_cons = ++rc;
604 err = prepare_pending_reqs(info, ring_req,
605 pending_req);
606 if (err == -EINVAL) {
607 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
608 0, pending_req);
609 continue;
610 } else if (err == -ENODEV) {
611 scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
612 0, pending_req);
613 continue;
614 }
616 if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
617 scsiback_req_emulation_or_cmdexec(pending_req);
618 } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
619 scsiback_device_reset_exec(pending_req);
620 } else {
621 printk(KERN_ERR "scsiback: invalid parameter for request\n");
622 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
623 0, pending_req);
624 continue;
625 }
626 }
628 if (RING_HAS_UNCONSUMED_REQUESTS(ring))
629 more_to_do = 1;
631 /* Yield point for this unbounded loop. */
632 cond_resched();
634 return more_to_do;
635 }
638 int scsiback_schedule(void *data)
639 {
640 struct vscsibk_info *info = (struct vscsibk_info *)data;
642 DPRINTK("%s\n",__FUNCTION__);
644 while (!kthread_should_stop()) {
645 wait_event_interruptible(
646 info->wq,
647 info->waiting_reqs || kthread_should_stop());
648 wait_event_interruptible(
649 pending_free_wq,
650 !list_empty(&pending_free) || kthread_should_stop());
652 info->waiting_reqs = 0;
653 smp_mb();
655 if (scsiback_do_cmd_fn(info))
656 info->waiting_reqs = 1;
657 }
659 return 0;
660 }
663 static int __init scsiback_init(void)
664 {
665 int i, mmap_pages;
667 if (!is_running_on_xen())
668 return -ENODEV;
670 mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
672 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
673 vscsiif_reqs, GFP_KERNEL);
674 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
675 mmap_pages, GFP_KERNEL);
676 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
678 if (!pending_reqs || !pending_grant_handles || !pending_pages)
679 goto out_of_memory;
681 for (i = 0; i < mmap_pages; i++)
682 pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
684 if (scsiback_interface_init() < 0)
685 goto out_of_kmem;
687 memset(pending_reqs, 0, sizeof(pending_reqs));
688 INIT_LIST_HEAD(&pending_free);
690 for (i = 0; i < vscsiif_reqs; i++)
691 list_add_tail(&pending_reqs[i].free_list, &pending_free);
693 if (scsiback_xenbus_init())
694 goto out_of_xenbus;
696 scsiback_emulation_init();
698 return 0;
700 out_of_xenbus:
701 scsiback_xenbus_unregister();
702 out_of_kmem:
703 scsiback_interface_exit();
704 out_of_memory:
705 kfree(pending_reqs);
706 kfree(pending_grant_handles);
707 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
708 printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
709 return -ENOMEM;
710 }
712 static void __exit scsiback_exit(void)
713 {
714 scsiback_xenbus_unregister();
715 scsiback_interface_exit();
716 kfree(pending_reqs);
717 kfree(pending_grant_handles);
718 free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
720 }
722 module_init(scsiback_init);
723 module_exit(scsiback_exit);
725 MODULE_DESCRIPTION("Xen SCSI backend driver");
726 MODULE_LICENSE("Dual BSD/GPL");