ia64/linux-2.6.18-xen.hg

view drivers/xen/scsiback/scsiback.c @ 794:be85b1d7a52b

pvSCSI: add new device assignment mode

Add a new device assignment mode, which assigns whole HBA
(SCSI host) to guest domain. Current implementation requires SCSI
command emulation on backend driver, and it causes limitations for
some SCSI commands. (Please see
"http://www.xen.org/files/xensummit_tokyo/24_Hitoshi%20Matsumoto_en.pdf"
for detail about why we need the new assignment mode.

SCSI command emulation on backend driver is bypassed when "host" mode
is specified.

Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Jun Kamada <kama@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Feb 17 11:17:11 2009 +0000 (2009-02-17)
parents 2fa1d9446f2f
children
line source
1 /*
2 * Xen SCSI backend driver
3 *
4 * Copyright (c) 2008, FUJITSU Limited
5 *
6 * Based on the blkback driver code.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/spinlock.h>
34 #include <linux/kthread.h>
35 #include <linux/list.h>
36 #include <linux/delay.h>
37 #include <xen/balloon.h>
38 #include <asm/hypervisor.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_dbg.h>
44 #include <scsi/scsi_eh.h>
46 #include "common.h"
49 struct list_head pending_free;
50 DEFINE_SPINLOCK(pending_free_lock);
51 DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
53 int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
54 module_param_named(reqs, vscsiif_reqs, int, 0);
55 MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
57 static unsigned int log_print_stat = 0;
58 module_param(log_print_stat, int, 0644);
60 #define SCSIBACK_INVALID_HANDLE (~0)
62 static pending_req_t *pending_reqs;
63 static struct page **pending_pages;
64 static grant_handle_t *pending_grant_handles;
66 static int vaddr_pagenr(pending_req_t *req, int seg)
67 {
68 return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
69 }
71 static unsigned long vaddr(pending_req_t *req, int seg)
72 {
73 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
74 return (unsigned long)pfn_to_kaddr(pfn);
75 }
77 #define pending_handle(_req, _seg) \
78 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
81 void scsiback_fast_flush_area(pending_req_t *req)
82 {
83 struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
84 unsigned int i, invcount = 0;
85 grant_handle_t handle;
86 int err;
88 if (req->nr_segments) {
89 for (i = 0; i < req->nr_segments; i++) {
90 handle = pending_handle(req, i);
91 if (handle == SCSIBACK_INVALID_HANDLE)
92 continue;
93 gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
94 GNTMAP_host_map, handle);
95 pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
96 invcount++;
97 }
99 err = HYPERVISOR_grant_table_op(
100 GNTTABOP_unmap_grant_ref, unmap, invcount);
101 BUG_ON(err);
102 kfree(req->sgl);
103 }
105 return;
106 }
109 static pending_req_t * alloc_req(struct vscsibk_info *info)
110 {
111 pending_req_t *req = NULL;
112 unsigned long flags;
114 spin_lock_irqsave(&pending_free_lock, flags);
115 if (!list_empty(&pending_free)) {
116 req = list_entry(pending_free.next, pending_req_t, free_list);
117 list_del(&req->free_list);
118 }
119 spin_unlock_irqrestore(&pending_free_lock, flags);
120 return req;
121 }
124 static void free_req(pending_req_t *req)
125 {
126 unsigned long flags;
127 int was_empty;
129 spin_lock_irqsave(&pending_free_lock, flags);
130 was_empty = list_empty(&pending_free);
131 list_add(&req->free_list, &pending_free);
132 spin_unlock_irqrestore(&pending_free_lock, flags);
133 if (was_empty)
134 wake_up(&pending_free_wq);
135 }
138 static void scsiback_notify_work(struct vscsibk_info *info)
139 {
140 info->waiting_reqs = 1;
141 wake_up(&info->wq);
142 }
144 void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
145 uint32_t resid, pending_req_t *pending_req)
146 {
147 vscsiif_response_t *ring_res;
148 struct vscsibk_info *info = pending_req->info;
149 int notify;
150 int more_to_do = 1;
151 struct scsi_sense_hdr sshdr;
152 unsigned long flags;
154 DPRINTK("%s\n",__FUNCTION__);
156 spin_lock_irqsave(&info->ring_lock, flags);
158 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
159 info->ring.rsp_prod_pvt++;
161 ring_res->rslt = result;
162 ring_res->rqid = pending_req->rqid;
164 if (sense_buffer != NULL) {
165 if (scsi_normalize_sense(sense_buffer,
166 sizeof(sense_buffer), &sshdr)) {
168 int len = 8 + sense_buffer[7];
170 if (len > VSCSIIF_SENSE_BUFFERSIZE)
171 len = VSCSIIF_SENSE_BUFFERSIZE;
173 memcpy(ring_res->sense_buffer, sense_buffer, len);
174 ring_res->sense_len = len;
175 }
176 } else {
177 ring_res->sense_len = 0;
178 }
180 ring_res->residual_len = resid;
182 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
183 if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
184 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
185 } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
186 more_to_do = 1;
187 }
189 spin_unlock_irqrestore(&info->ring_lock, flags);
191 if (more_to_do)
192 scsiback_notify_work(info);
194 if (notify)
195 notify_remote_via_irq(info->irq);
197 free_req(pending_req);
198 }
200 static void scsiback_print_status(char *sense_buffer, int errors,
201 pending_req_t *pending_req)
202 {
203 struct scsi_device *sdev = pending_req->sdev;
205 printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
206 sdev->channel, sdev->id, sdev->lun);
207 printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
208 status_byte(errors), msg_byte(errors),
209 host_byte(errors), driver_byte(errors));
211 printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n",
212 pending_req->cmnd[0]);
214 if (CHECK_CONDITION & status_byte(errors))
215 __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
216 }
219 static void scsiback_cmd_done(struct request *req, int uptodate)
220 {
221 pending_req_t *pending_req = req->end_io_data;
222 unsigned char *sense_buffer;
223 unsigned int resid;
224 int errors;
226 sense_buffer = req->sense;
227 resid = req->data_len;
228 errors = req->errors;
230 if (errors != 0) {
231 if (log_print_stat)
232 scsiback_print_status(sense_buffer, errors, pending_req);
233 }
235 /* The Host mode is through as for Emulation. */
236 if (pending_req->info->feature != VSCSI_TYPE_HOST)
237 scsiback_rsp_emulation(pending_req);
239 scsiback_fast_flush_area(pending_req);
240 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
241 scsiback_put(pending_req->info);
243 __blk_put_request(req->q, req);
244 }
247 static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
248 pending_req_t *pending_req)
249 {
250 u32 flags;
251 int write;
252 int i, err = 0;
253 unsigned int data_len = 0;
254 struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
255 struct vscsibk_info *info = pending_req->info;
257 int data_dir = (int)pending_req->sc_data_direction;
258 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
260 write = (data_dir == DMA_TO_DEVICE);
262 if (nr_segments) {
263 /* free of (sgl) in fast_flush_area()*/
264 pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
265 GFP_KERNEL);
266 if (!pending_req->sgl) {
267 printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
268 return -ENOMEM;
269 }
271 for (i = 0; i < nr_segments; i++) {
272 flags = GNTMAP_host_map;
273 if (write)
274 flags |= GNTMAP_readonly;
275 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
276 ring_req->seg[i].gref,
277 info->domid);
278 }
280 err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
281 BUG_ON(err);
283 for (i = 0; i < nr_segments; i++) {
284 if (unlikely(map[i].status != 0)) {
285 printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
286 map[i].handle = SCSIBACK_INVALID_HANDLE;
287 err |= 1;
288 }
290 pending_handle(pending_req, i) = map[i].handle;
292 if (err)
293 continue;
295 set_phys_to_machine(__pa(vaddr(
296 pending_req, i)) >> PAGE_SHIFT,
297 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
299 pending_req->sgl[i].page = virt_to_page(vaddr(pending_req, i));
300 pending_req->sgl[i].offset = ring_req->seg[i].offset;
301 pending_req->sgl[i].length = ring_req->seg[i].length;
302 data_len += pending_req->sgl[i].length;
304 barrier();
305 if (pending_req->sgl[i].offset >= PAGE_SIZE ||
306 pending_req->sgl[i].length > PAGE_SIZE ||
307 pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE)
308 err |= 1;
310 }
312 if (err)
313 goto fail_flush;
314 }
316 pending_req->request_bufflen = data_len;
318 return 0;
320 fail_flush:
321 scsiback_fast_flush_area(pending_req);
322 return -ENOMEM;
323 }
325 /* quoted scsi_lib.c/scsi_merge_bio */
326 static int scsiback_merge_bio(struct request *rq, struct bio *bio)
327 {
328 struct request_queue *q = rq->q;
330 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
331 if (rq_data_dir(rq) == WRITE)
332 bio->bi_rw |= (1 << BIO_RW);
334 blk_queue_bounce(q, &bio);
336 if (!rq->bio)
337 blk_rq_bio_prep(q, rq, bio);
338 else if (!q->back_merge_fn(q, rq, bio))
339 return -EINVAL;
340 else {
341 rq->biotail->bi_next = bio;
342 rq->biotail = bio;
343 rq->hard_nr_sectors += bio_sectors(bio);
344 rq->nr_sectors = rq->hard_nr_sectors;
345 }
347 return 0;
348 }
351 /* quoted scsi_lib.c/scsi_bi_endio */
352 static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
353 {
354 if (bio->bi_size)
355 return 1;
357 bio_put(bio);
358 return 0;
359 }
363 /* quoted scsi_lib.c/scsi_req_map_sg . */
364 static int request_map_sg(struct request *rq, pending_req_t *pending_req, unsigned int count)
365 {
366 struct request_queue *q = rq->q;
367 int nr_pages;
368 unsigned int nsegs = count;
370 unsigned int data_len = 0, len, bytes, off;
371 struct page *page;
372 struct bio *bio = NULL;
373 int i, err, nr_vecs = 0;
375 for (i = 0; i < nsegs; i++) {
376 page = pending_req->sgl[i].page;
377 off = (unsigned int)pending_req->sgl[i].offset;
378 len = (unsigned int)pending_req->sgl[i].length;
379 data_len += len;
381 nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
382 while (len > 0) {
383 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
385 if (!bio) {
386 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
387 nr_pages -= nr_vecs;
388 bio = bio_alloc(GFP_KERNEL, nr_vecs);
389 if (!bio) {
390 err = -ENOMEM;
391 goto free_bios;
392 }
393 bio->bi_end_io = scsiback_bi_endio;
394 }
396 if (bio_add_pc_page(q, bio, page, bytes, off) !=
397 bytes) {
398 bio_put(bio);
399 err = -EINVAL;
400 goto free_bios;
401 }
403 if (bio->bi_vcnt >= nr_vecs) {
404 err = scsiback_merge_bio(rq, bio);
405 if (err) {
406 bio_endio(bio, bio->bi_size, 0);
407 goto free_bios;
408 }
409 bio = NULL;
410 }
412 page++;
413 len -= bytes;
414 off = 0;
415 }
416 }
418 rq->buffer = rq->data = NULL;
419 rq->data_len = data_len;
421 return 0;
423 free_bios:
424 while ((bio = rq->bio) != NULL) {
425 rq->bio = bio->bi_next;
426 /*
427 * call endio instead of bio_put incase it was bounced
428 */
429 bio_endio(bio, bio->bi_size, 0);
430 }
432 return err;
433 }
436 void scsiback_cmd_exec(pending_req_t *pending_req)
437 {
438 int cmd_len = (int)pending_req->cmd_len;
439 int data_dir = (int)pending_req->sc_data_direction;
440 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
441 unsigned int timeout;
442 struct request *rq;
443 int write;
445 DPRINTK("%s\n",__FUNCTION__);
447 /* because it doesn't timeout backend earlier than frontend.*/
448 if (pending_req->timeout_per_command)
449 timeout = pending_req->timeout_per_command * HZ;
450 else
451 timeout = VSCSIIF_TIMEOUT;
453 write = (data_dir == DMA_TO_DEVICE);
454 rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL);
456 rq->flags |= REQ_BLOCK_PC;
457 rq->cmd_len = cmd_len;
458 memcpy(rq->cmd, pending_req->cmnd, cmd_len);
460 memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
461 rq->sense = pending_req->sense_buffer;
462 rq->sense_len = 0;
464 /* not allowed to retry in backend. */
465 rq->retries = 0;
466 rq->timeout = timeout;
467 rq->end_io_data = pending_req;
469 if (nr_segments) {
471 if (request_map_sg(rq, pending_req, nr_segments)) {
472 printk(KERN_ERR "scsiback: SG Request Map Error\n");
473 return;
474 }
475 }
477 scsiback_get(pending_req->info);
478 blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
480 return ;
481 }
484 static void scsiback_device_reset_exec(pending_req_t *pending_req)
485 {
486 struct vscsibk_info *info = pending_req->info;
487 int err;
488 struct scsi_device *sdev = pending_req->sdev;
490 scsiback_get(info);
491 err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
493 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
494 scsiback_put(info);
496 return;
497 }
500 irqreturn_t scsiback_intr(int irq, void *dev_id, struct pt_regs *regs)
501 {
502 scsiback_notify_work((struct vscsibk_info *)dev_id);
503 return IRQ_HANDLED;
504 }
506 static int prepare_pending_reqs(struct vscsibk_info *info,
507 vscsiif_request_t *ring_req, pending_req_t *pending_req)
508 {
509 struct scsi_device *sdev;
510 struct ids_tuple vir;
511 int err = -EINVAL;
513 DPRINTK("%s\n",__FUNCTION__);
515 pending_req->rqid = ring_req->rqid;
516 pending_req->act = ring_req->act;
518 pending_req->info = info;
520 pending_req->v_chn = vir.chn = ring_req->channel;
521 pending_req->v_tgt = vir.tgt = ring_req->id;
522 vir.lun = ring_req->lun;
524 rmb();
525 sdev = scsiback_do_translation(info, &vir);
526 if (!sdev) {
527 pending_req->sdev = NULL;
528 DPRINTK("scsiback: doesn't exist.\n");
529 err = -ENODEV;
530 goto invalid_value;
531 }
532 pending_req->sdev = sdev;
534 /* request range check from frontend */
535 pending_req->sc_data_direction = ring_req->sc_data_direction;
536 barrier();
537 if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
538 (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
539 (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
540 (pending_req->sc_data_direction != DMA_NONE)) {
541 DPRINTK("scsiback: invalid parameter data_dir = %d\n",
542 pending_req->sc_data_direction);
543 err = -EINVAL;
544 goto invalid_value;
545 }
547 pending_req->nr_segments = ring_req->nr_segments;
548 barrier();
549 if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
550 DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
551 pending_req->nr_segments);
552 err = -EINVAL;
553 goto invalid_value;
554 }
556 pending_req->cmd_len = ring_req->cmd_len;
557 barrier();
558 if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
559 DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
560 pending_req->cmd_len);
561 err = -EINVAL;
562 goto invalid_value;
563 }
564 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
566 pending_req->timeout_per_command = ring_req->timeout_per_command;
568 if(scsiback_gnttab_data_map(ring_req, pending_req)) {
569 DPRINTK("scsiback: invalid buffer\n");
570 err = -EINVAL;
571 goto invalid_value;
572 }
574 return 0;
576 invalid_value:
577 return err;
578 }
581 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
582 {
583 struct vscsiif_back_ring *ring = &info->ring;
584 vscsiif_request_t *ring_req;
586 pending_req_t *pending_req;
587 RING_IDX rc, rp;
588 int err, more_to_do = 0;
590 DPRINTK("%s\n",__FUNCTION__);
592 rc = ring->req_cons;
593 rp = ring->sring->req_prod;
594 rmb();
596 while ((rc != rp)) {
597 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
598 break;
599 pending_req = alloc_req(info);
600 if (NULL == pending_req) {
601 more_to_do = 1;
602 break;
603 }
605 ring_req = RING_GET_REQUEST(ring, rc);
606 ring->req_cons = ++rc;
608 err = prepare_pending_reqs(info, ring_req,
609 pending_req);
610 if (err == -EINVAL) {
611 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
612 0, pending_req);
613 continue;
614 } else if (err == -ENODEV) {
615 scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
616 0, pending_req);
617 continue;
618 }
620 if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
622 /* The Host mode is through as for Emulation. */
623 if (info->feature == VSCSI_TYPE_HOST)
624 scsiback_cmd_exec(pending_req);
625 else
626 scsiback_req_emulation_or_cmdexec(pending_req);
628 } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
629 scsiback_device_reset_exec(pending_req);
630 } else {
631 printk(KERN_ERR "scsiback: invalid parameter for request\n");
632 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
633 0, pending_req);
634 continue;
635 }
636 }
638 if (RING_HAS_UNCONSUMED_REQUESTS(ring))
639 more_to_do = 1;
641 /* Yield point for this unbounded loop. */
642 cond_resched();
644 return more_to_do;
645 }
648 int scsiback_schedule(void *data)
649 {
650 struct vscsibk_info *info = (struct vscsibk_info *)data;
652 DPRINTK("%s\n",__FUNCTION__);
654 while (!kthread_should_stop()) {
655 wait_event_interruptible(
656 info->wq,
657 info->waiting_reqs || kthread_should_stop());
658 wait_event_interruptible(
659 pending_free_wq,
660 !list_empty(&pending_free) || kthread_should_stop());
662 info->waiting_reqs = 0;
663 smp_mb();
665 if (scsiback_do_cmd_fn(info))
666 info->waiting_reqs = 1;
667 }
669 return 0;
670 }
673 static int __init scsiback_init(void)
674 {
675 int i, mmap_pages;
677 if (!is_running_on_xen())
678 return -ENODEV;
680 mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
682 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
683 vscsiif_reqs, GFP_KERNEL);
684 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
685 mmap_pages, GFP_KERNEL);
686 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
688 if (!pending_reqs || !pending_grant_handles || !pending_pages)
689 goto out_of_memory;
691 for (i = 0; i < mmap_pages; i++)
692 pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
694 if (scsiback_interface_init() < 0)
695 goto out_of_kmem;
697 memset(pending_reqs, 0, sizeof(pending_reqs));
698 INIT_LIST_HEAD(&pending_free);
700 for (i = 0; i < vscsiif_reqs; i++)
701 list_add_tail(&pending_reqs[i].free_list, &pending_free);
703 if (scsiback_xenbus_init())
704 goto out_of_xenbus;
706 scsiback_emulation_init();
708 return 0;
710 out_of_xenbus:
711 scsiback_xenbus_unregister();
712 out_of_kmem:
713 scsiback_interface_exit();
714 out_of_memory:
715 kfree(pending_reqs);
716 kfree(pending_grant_handles);
717 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
718 printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
719 return -ENOMEM;
720 }
722 #if 0
723 static void __exit scsiback_exit(void)
724 {
725 scsiback_xenbus_unregister();
726 scsiback_interface_exit();
727 kfree(pending_reqs);
728 kfree(pending_grant_handles);
729 free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
731 }
732 #endif
734 module_init(scsiback_init);
736 #if 0
737 module_exit(scsiback_exit);
738 #endif
740 MODULE_DESCRIPTION("Xen SCSI backend driver");
741 MODULE_LICENSE("Dual BSD/GPL");