ia64/linux-2.6.18-xen.hg

view drivers/xen/scsiback/scsiback.c @ 759:2fa1d9446f2f

pvSCSI: fix slight issue

Signed-off-by: James Harper <james.harper@bendigoit.com.au>
Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Jun Kamada <kama@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 10 13:21:23 2008 +0000 (2008-12-10)
parents 8e3025f94790
children be85b1d7a52b
line source
1 /*
2 * Xen SCSI backend driver
3 *
4 * Copyright (c) 2008, FUJITSU Limited
5 *
6 * Based on the blkback driver code.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/spinlock.h>
34 #include <linux/kthread.h>
35 #include <linux/list.h>
36 #include <linux/delay.h>
37 #include <xen/balloon.h>
38 #include <asm/hypervisor.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_dbg.h>
44 #include <scsi/scsi_eh.h>
46 #include "common.h"
49 struct list_head pending_free;
50 DEFINE_SPINLOCK(pending_free_lock);
51 DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
53 int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
54 module_param_named(reqs, vscsiif_reqs, int, 0);
55 MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
57 static unsigned int log_print_stat = 0;
58 module_param(log_print_stat, int, 0644);
60 #define SCSIBACK_INVALID_HANDLE (~0)
62 static pending_req_t *pending_reqs;
63 static struct page **pending_pages;
64 static grant_handle_t *pending_grant_handles;
66 static int vaddr_pagenr(pending_req_t *req, int seg)
67 {
68 return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
69 }
71 static unsigned long vaddr(pending_req_t *req, int seg)
72 {
73 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
74 return (unsigned long)pfn_to_kaddr(pfn);
75 }
77 #define pending_handle(_req, _seg) \
78 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
81 void scsiback_fast_flush_area(pending_req_t *req)
82 {
83 struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
84 unsigned int i, invcount = 0;
85 grant_handle_t handle;
86 int err;
88 if (req->nr_segments) {
89 for (i = 0; i < req->nr_segments; i++) {
90 handle = pending_handle(req, i);
91 if (handle == SCSIBACK_INVALID_HANDLE)
92 continue;
93 gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
94 GNTMAP_host_map, handle);
95 pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
96 invcount++;
97 }
99 err = HYPERVISOR_grant_table_op(
100 GNTTABOP_unmap_grant_ref, unmap, invcount);
101 BUG_ON(err);
102 kfree(req->sgl);
103 }
105 return;
106 }
109 static pending_req_t * alloc_req(struct vscsibk_info *info)
110 {
111 pending_req_t *req = NULL;
112 unsigned long flags;
114 spin_lock_irqsave(&pending_free_lock, flags);
115 if (!list_empty(&pending_free)) {
116 req = list_entry(pending_free.next, pending_req_t, free_list);
117 list_del(&req->free_list);
118 }
119 spin_unlock_irqrestore(&pending_free_lock, flags);
120 return req;
121 }
124 static void free_req(pending_req_t *req)
125 {
126 unsigned long flags;
127 int was_empty;
129 spin_lock_irqsave(&pending_free_lock, flags);
130 was_empty = list_empty(&pending_free);
131 list_add(&req->free_list, &pending_free);
132 spin_unlock_irqrestore(&pending_free_lock, flags);
133 if (was_empty)
134 wake_up(&pending_free_wq);
135 }
138 static void scsiback_notify_work(struct vscsibk_info *info)
139 {
140 info->waiting_reqs = 1;
141 wake_up(&info->wq);
142 }
144 void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
145 uint32_t resid, pending_req_t *pending_req)
146 {
147 vscsiif_response_t *ring_res;
148 struct vscsibk_info *info = pending_req->info;
149 int notify;
150 int more_to_do = 1;
151 struct scsi_sense_hdr sshdr;
152 unsigned long flags;
154 DPRINTK("%s\n",__FUNCTION__);
156 spin_lock_irqsave(&info->ring_lock, flags);
158 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
159 info->ring.rsp_prod_pvt++;
161 ring_res->rslt = result;
162 ring_res->rqid = pending_req->rqid;
164 if (sense_buffer != NULL) {
165 if (scsi_normalize_sense(sense_buffer,
166 sizeof(sense_buffer), &sshdr)) {
168 int len = 8 + sense_buffer[7];
170 if (len > VSCSIIF_SENSE_BUFFERSIZE)
171 len = VSCSIIF_SENSE_BUFFERSIZE;
173 memcpy(ring_res->sense_buffer, sense_buffer, len);
174 ring_res->sense_len = len;
175 }
176 } else {
177 ring_res->sense_len = 0;
178 }
180 ring_res->residual_len = resid;
182 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
183 if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
184 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
185 } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
186 more_to_do = 1;
187 }
189 spin_unlock_irqrestore(&info->ring_lock, flags);
191 if (more_to_do)
192 scsiback_notify_work(info);
194 if (notify)
195 notify_remote_via_irq(info->irq);
197 free_req(pending_req);
198 }
200 static void scsiback_print_status(char *sense_buffer, int errors,
201 pending_req_t *pending_req)
202 {
203 struct scsi_device *sdev = pending_req->sdev;
205 printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
206 sdev->channel, sdev->id, sdev->lun);
207 printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
208 status_byte(errors), msg_byte(errors),
209 host_byte(errors), driver_byte(errors));
211 printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n",
212 pending_req->cmnd[0]);
214 if (CHECK_CONDITION & status_byte(errors))
215 __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
216 }
219 static void scsiback_cmd_done(struct request *req, int uptodate)
220 {
221 pending_req_t *pending_req = req->end_io_data;
222 unsigned char *sense_buffer;
223 unsigned int resid;
224 int errors;
226 sense_buffer = req->sense;
227 resid = req->data_len;
228 errors = req->errors;
230 if (errors != 0) {
231 if (log_print_stat)
232 scsiback_print_status(sense_buffer, errors, pending_req);
233 }
235 scsiback_rsp_emulation(pending_req);
237 scsiback_fast_flush_area(pending_req);
238 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
239 scsiback_put(pending_req->info);
241 __blk_put_request(req->q, req);
242 }
245 static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
246 pending_req_t *pending_req)
247 {
248 u32 flags;
249 int write;
250 int i, err = 0;
251 unsigned int data_len = 0;
252 struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
253 struct vscsibk_info *info = pending_req->info;
255 int data_dir = (int)pending_req->sc_data_direction;
256 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
258 write = (data_dir == DMA_TO_DEVICE);
260 if (nr_segments) {
261 /* free of (sgl) in fast_flush_area()*/
262 pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
263 GFP_KERNEL);
264 if (!pending_req->sgl) {
265 printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
266 return -ENOMEM;
267 }
269 for (i = 0; i < nr_segments; i++) {
270 flags = GNTMAP_host_map;
271 if (write)
272 flags |= GNTMAP_readonly;
273 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
274 ring_req->seg[i].gref,
275 info->domid);
276 }
278 err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
279 BUG_ON(err);
281 for (i = 0; i < nr_segments; i++) {
282 if (unlikely(map[i].status != 0)) {
283 printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
284 map[i].handle = SCSIBACK_INVALID_HANDLE;
285 err |= 1;
286 }
288 pending_handle(pending_req, i) = map[i].handle;
290 if (err)
291 continue;
293 set_phys_to_machine(__pa(vaddr(
294 pending_req, i)) >> PAGE_SHIFT,
295 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
297 pending_req->sgl[i].page = virt_to_page(vaddr(pending_req, i));
298 pending_req->sgl[i].offset = ring_req->seg[i].offset;
299 pending_req->sgl[i].length = ring_req->seg[i].length;
300 data_len += pending_req->sgl[i].length;
302 barrier();
303 if (pending_req->sgl[i].offset >= PAGE_SIZE ||
304 pending_req->sgl[i].length > PAGE_SIZE ||
305 pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE)
306 err |= 1;
308 }
310 if (err)
311 goto fail_flush;
312 }
314 pending_req->request_bufflen = data_len;
316 return 0;
318 fail_flush:
319 scsiback_fast_flush_area(pending_req);
320 return -ENOMEM;
321 }
323 /* quoted scsi_lib.c/scsi_merge_bio */
324 static int scsiback_merge_bio(struct request *rq, struct bio *bio)
325 {
326 struct request_queue *q = rq->q;
328 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
329 if (rq_data_dir(rq) == WRITE)
330 bio->bi_rw |= (1 << BIO_RW);
332 blk_queue_bounce(q, &bio);
334 if (!rq->bio)
335 blk_rq_bio_prep(q, rq, bio);
336 else if (!q->back_merge_fn(q, rq, bio))
337 return -EINVAL;
338 else {
339 rq->biotail->bi_next = bio;
340 rq->biotail = bio;
341 rq->hard_nr_sectors += bio_sectors(bio);
342 rq->nr_sectors = rq->hard_nr_sectors;
343 }
345 return 0;
346 }
349 /* quoted scsi_lib.c/scsi_bi_endio */
350 static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
351 {
352 if (bio->bi_size)
353 return 1;
355 bio_put(bio);
356 return 0;
357 }
361 /* quoted scsi_lib.c/scsi_req_map_sg . */
362 static int request_map_sg(struct request *rq, pending_req_t *pending_req, unsigned int count)
363 {
364 struct request_queue *q = rq->q;
365 int nr_pages;
366 unsigned int nsegs = count;
368 unsigned int data_len = 0, len, bytes, off;
369 struct page *page;
370 struct bio *bio = NULL;
371 int i, err, nr_vecs = 0;
373 for (i = 0; i < nsegs; i++) {
374 page = pending_req->sgl[i].page;
375 off = (unsigned int)pending_req->sgl[i].offset;
376 len = (unsigned int)pending_req->sgl[i].length;
377 data_len += len;
379 nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
380 while (len > 0) {
381 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
383 if (!bio) {
384 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
385 nr_pages -= nr_vecs;
386 bio = bio_alloc(GFP_KERNEL, nr_vecs);
387 if (!bio) {
388 err = -ENOMEM;
389 goto free_bios;
390 }
391 bio->bi_end_io = scsiback_bi_endio;
392 }
394 if (bio_add_pc_page(q, bio, page, bytes, off) !=
395 bytes) {
396 bio_put(bio);
397 err = -EINVAL;
398 goto free_bios;
399 }
401 if (bio->bi_vcnt >= nr_vecs) {
402 err = scsiback_merge_bio(rq, bio);
403 if (err) {
404 bio_endio(bio, bio->bi_size, 0);
405 goto free_bios;
406 }
407 bio = NULL;
408 }
410 page++;
411 len -= bytes;
412 off = 0;
413 }
414 }
416 rq->buffer = rq->data = NULL;
417 rq->data_len = data_len;
419 return 0;
421 free_bios:
422 while ((bio = rq->bio) != NULL) {
423 rq->bio = bio->bi_next;
424 /*
425 * call endio instead of bio_put incase it was bounced
426 */
427 bio_endio(bio, bio->bi_size, 0);
428 }
430 return err;
431 }
434 void scsiback_cmd_exec(pending_req_t *pending_req)
435 {
436 int cmd_len = (int)pending_req->cmd_len;
437 int data_dir = (int)pending_req->sc_data_direction;
438 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
439 unsigned int timeout;
440 struct request *rq;
441 int write;
443 DPRINTK("%s\n",__FUNCTION__);
445 /* because it doesn't timeout backend earlier than frontend.*/
446 if (pending_req->timeout_per_command)
447 timeout = pending_req->timeout_per_command * HZ;
448 else
449 timeout = VSCSIIF_TIMEOUT;
451 write = (data_dir == DMA_TO_DEVICE);
452 rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL);
454 rq->flags |= REQ_BLOCK_PC;
455 rq->cmd_len = cmd_len;
456 memcpy(rq->cmd, pending_req->cmnd, cmd_len);
458 memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
459 rq->sense = pending_req->sense_buffer;
460 rq->sense_len = 0;
462 /* not allowed to retry in backend. */
463 rq->retries = 0;
464 rq->timeout = timeout;
465 rq->end_io_data = pending_req;
467 if (nr_segments) {
469 if (request_map_sg(rq, pending_req, nr_segments)) {
470 printk(KERN_ERR "scsiback: SG Request Map Error\n");
471 return;
472 }
473 }
475 scsiback_get(pending_req->info);
476 blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
478 return ;
479 }
482 static void scsiback_device_reset_exec(pending_req_t *pending_req)
483 {
484 struct vscsibk_info *info = pending_req->info;
485 int err;
486 struct scsi_device *sdev = pending_req->sdev;
488 scsiback_get(info);
489 err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
491 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
492 scsiback_put(info);
494 return;
495 }
498 irqreturn_t scsiback_intr(int irq, void *dev_id, struct pt_regs *regs)
499 {
500 scsiback_notify_work((struct vscsibk_info *)dev_id);
501 return IRQ_HANDLED;
502 }
504 static int prepare_pending_reqs(struct vscsibk_info *info,
505 vscsiif_request_t *ring_req, pending_req_t *pending_req)
506 {
507 struct scsi_device *sdev;
508 struct ids_tuple vir;
509 int err = -EINVAL;
511 DPRINTK("%s\n",__FUNCTION__);
513 pending_req->rqid = ring_req->rqid;
514 pending_req->act = ring_req->act;
516 pending_req->info = info;
518 pending_req->v_chn = vir.chn = ring_req->channel;
519 pending_req->v_tgt = vir.tgt = ring_req->id;
520 vir.lun = ring_req->lun;
522 rmb();
523 sdev = scsiback_do_translation(info, &vir);
524 if (!sdev) {
525 pending_req->sdev = NULL;
526 DPRINTK("scsiback: doesn't exist.\n");
527 err = -ENODEV;
528 goto invalid_value;
529 }
530 pending_req->sdev = sdev;
532 /* request range check from frontend */
533 pending_req->sc_data_direction = ring_req->sc_data_direction;
534 barrier();
535 if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
536 (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
537 (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
538 (pending_req->sc_data_direction != DMA_NONE)) {
539 DPRINTK("scsiback: invalid parameter data_dir = %d\n",
540 pending_req->sc_data_direction);
541 err = -EINVAL;
542 goto invalid_value;
543 }
545 pending_req->nr_segments = ring_req->nr_segments;
546 barrier();
547 if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
548 DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
549 pending_req->nr_segments);
550 err = -EINVAL;
551 goto invalid_value;
552 }
554 pending_req->cmd_len = ring_req->cmd_len;
555 barrier();
556 if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
557 DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
558 pending_req->cmd_len);
559 err = -EINVAL;
560 goto invalid_value;
561 }
562 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
564 pending_req->timeout_per_command = ring_req->timeout_per_command;
566 if(scsiback_gnttab_data_map(ring_req, pending_req)) {
567 DPRINTK("scsiback: invalid buffer\n");
568 err = -EINVAL;
569 goto invalid_value;
570 }
572 return 0;
574 invalid_value:
575 return err;
576 }
579 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
580 {
581 struct vscsiif_back_ring *ring = &info->ring;
582 vscsiif_request_t *ring_req;
584 pending_req_t *pending_req;
585 RING_IDX rc, rp;
586 int err, more_to_do = 0;
588 DPRINTK("%s\n",__FUNCTION__);
590 rc = ring->req_cons;
591 rp = ring->sring->req_prod;
592 rmb();
594 while ((rc != rp)) {
595 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
596 break;
597 pending_req = alloc_req(info);
598 if (NULL == pending_req) {
599 more_to_do = 1;
600 break;
601 }
603 ring_req = RING_GET_REQUEST(ring, rc);
604 ring->req_cons = ++rc;
606 err = prepare_pending_reqs(info, ring_req,
607 pending_req);
608 if (err == -EINVAL) {
609 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
610 0, pending_req);
611 continue;
612 } else if (err == -ENODEV) {
613 scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
614 0, pending_req);
615 continue;
616 }
618 if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
619 scsiback_req_emulation_or_cmdexec(pending_req);
620 } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
621 scsiback_device_reset_exec(pending_req);
622 } else {
623 printk(KERN_ERR "scsiback: invalid parameter for request\n");
624 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
625 0, pending_req);
626 continue;
627 }
628 }
630 if (RING_HAS_UNCONSUMED_REQUESTS(ring))
631 more_to_do = 1;
633 /* Yield point for this unbounded loop. */
634 cond_resched();
636 return more_to_do;
637 }
640 int scsiback_schedule(void *data)
641 {
642 struct vscsibk_info *info = (struct vscsibk_info *)data;
644 DPRINTK("%s\n",__FUNCTION__);
646 while (!kthread_should_stop()) {
647 wait_event_interruptible(
648 info->wq,
649 info->waiting_reqs || kthread_should_stop());
650 wait_event_interruptible(
651 pending_free_wq,
652 !list_empty(&pending_free) || kthread_should_stop());
654 info->waiting_reqs = 0;
655 smp_mb();
657 if (scsiback_do_cmd_fn(info))
658 info->waiting_reqs = 1;
659 }
661 return 0;
662 }
665 static int __init scsiback_init(void)
666 {
667 int i, mmap_pages;
669 if (!is_running_on_xen())
670 return -ENODEV;
672 mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
674 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
675 vscsiif_reqs, GFP_KERNEL);
676 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
677 mmap_pages, GFP_KERNEL);
678 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
680 if (!pending_reqs || !pending_grant_handles || !pending_pages)
681 goto out_of_memory;
683 for (i = 0; i < mmap_pages; i++)
684 pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
686 if (scsiback_interface_init() < 0)
687 goto out_of_kmem;
689 memset(pending_reqs, 0, sizeof(pending_reqs));
690 INIT_LIST_HEAD(&pending_free);
692 for (i = 0; i < vscsiif_reqs; i++)
693 list_add_tail(&pending_reqs[i].free_list, &pending_free);
695 if (scsiback_xenbus_init())
696 goto out_of_xenbus;
698 scsiback_emulation_init();
700 return 0;
702 out_of_xenbus:
703 scsiback_xenbus_unregister();
704 out_of_kmem:
705 scsiback_interface_exit();
706 out_of_memory:
707 kfree(pending_reqs);
708 kfree(pending_grant_handles);
709 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
710 printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
711 return -ENOMEM;
712 }
714 #if 0
715 static void __exit scsiback_exit(void)
716 {
717 scsiback_xenbus_unregister();
718 scsiback_interface_exit();
719 kfree(pending_reqs);
720 kfree(pending_grant_handles);
721 free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
723 }
724 #endif
726 module_init(scsiback_init);
728 #if 0
729 module_exit(scsiback_exit);
730 #endif
732 MODULE_DESCRIPTION("Xen SCSI backend driver");
733 MODULE_LICENSE("Dual BSD/GPL");