ia64/linux-2.6.18-xen.hg

view drivers/xen/scsiback/scsiback.c @ 562:66faefe721eb

pvSCSI backend driver

Signed-off-by: Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
Signed-off-by: Jun Kamada <kama@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jun 02 09:58:27 2008 +0100 (2008-06-02)
parents
children 3b045d92c4c0
line source
1 /*
2 * Xen SCSI backend driver
3 *
4 * Copyright (c) 2008, FUJITSU Limited
5 *
6 * Based on the blkback driver code.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
33 #include <linux/spinlock.h>
34 #include <linux/kthread.h>
35 #include <linux/list.h>
36 #include <linux/delay.h>
37 #include <xen/balloon.h>
38 #include <asm/hypervisor.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_dbg.h>
44 #include <scsi/scsi_eh.h>
46 #include "common.h"
49 #define NO_ASYNC 1 /*!aync*/
51 struct list_head pending_free;
52 DEFINE_SPINLOCK(pending_free_lock);
53 DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
55 int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
56 module_param_named(reqs, vscsiif_reqs, int, 0);
57 MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
60 #define SCSIBACK_INVALID_HANDLE (~0)
62 static pending_req_t *pending_reqs;
63 static struct page **pending_pages;
64 static grant_handle_t *pending_grant_handles;
66 static int vaddr_pagenr(pending_req_t *req, int seg)
67 {
68 return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
69 }
71 static unsigned long vaddr(pending_req_t *req, int seg)
72 {
73 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
74 return (unsigned long)pfn_to_kaddr(pfn);
75 }
77 #define pending_handle(_req, _seg) \
78 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
81 static void fast_flush_area(pending_req_t *req)
82 {
83 struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
84 unsigned int i, invcount = 0;
85 grant_handle_t handle;
86 int err;
88 if (req->nr_segments) {
89 for (i = 0; i < req->nr_segments; i++) {
90 handle = pending_handle(req, i);
91 if (handle == SCSIBACK_INVALID_HANDLE)
92 continue;
93 gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
94 GNTMAP_host_map, handle);
95 pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
96 invcount++;
97 }
99 err = HYPERVISOR_grant_table_op(
100 GNTTABOP_unmap_grant_ref, unmap, invcount);
101 BUG_ON(err);
102 kfree(req->sgl);
103 }
105 return;
106 }
109 static pending_req_t * alloc_req(struct vscsibk_info *info)
110 {
111 pending_req_t *req = NULL;
112 unsigned long flags;
114 spin_lock_irqsave(&pending_free_lock, flags);
115 if (!list_empty(&pending_free)) {
116 req = list_entry(pending_free.next, pending_req_t, free_list);
117 list_del(&req->free_list);
118 }
119 spin_unlock_irqrestore(&pending_free_lock, flags);
120 return req;
121 }
124 static void free_req(pending_req_t *req)
125 {
126 unsigned long flags;
127 int was_empty;
129 spin_lock_irqsave(&pending_free_lock, flags);
130 was_empty = list_empty(&pending_free);
131 list_add(&req->free_list, &pending_free);
132 spin_unlock_irqrestore(&pending_free_lock, flags);
133 if (was_empty)
134 wake_up(&pending_free_wq);
135 }
138 static void scsiback_notify_work(struct vscsibk_info *info)
139 {
140 info->waiting_reqs = 1;
141 wake_up(&info->wq);
142 }
144 static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
145 pending_req_t *pending_req)
146 {
147 vscsiif_response_t *ring_res;
148 struct vscsibk_info *info = pending_req->info;
149 int notify;
150 int more_to_do = 1;
151 unsigned long flags;
153 DPRINTK("%s\n",__FUNCTION__);
155 spin_lock_irqsave(&info->ring_lock, flags);
157 rmb();
158 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
159 info->ring.rsp_prod_pvt++;
161 ring_res->rslt = result;
162 ring_res->rqid = pending_req->rqid;
164 if (sense_buffer != NULL) {
165 memcpy(ring_res->sense_buffer, sense_buffer,
166 VSCSIIF_SENSE_BUFFERSIZE);
167 ring_res->sense_len = VSCSIIF_SENSE_BUFFERSIZE;
168 } else {
169 ring_res->sense_len = 0;
170 }
172 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
173 if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
174 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
175 } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
176 more_to_do = 1;
177 }
179 spin_unlock_irqrestore(&info->ring_lock, flags);
181 if (more_to_do)
182 scsiback_notify_work(info);
184 if (notify)
185 notify_remote_via_irq(info->irq);
187 scsiback_put(pending_req->info);
188 free_req(pending_req);
189 }
191 #ifdef NO_ASYNC /*!async*/
192 static void scsiback_cmd_done(struct request *req, int errors)
193 {
194 pending_req_t *pending_req = req->end_io_data;
195 struct scsi_device *sdev = pending_req->sdev;
196 unsigned char *sense_buffer;
198 sense_buffer = req->sense;
200 #else
201 static void scsiback_cmd_done(void *data, char *sense_buffer,
202 int errors, int resid)
203 {
204 pending_req_t *pending_req = data;
205 struct scsi_device *sdev = pending_req->sdev;
207 DPRINTK("%s\n",__FUNCTION__);
208 #endif
210 if ((errors != 0) && (pending_req->cmnd[0] != TEST_UNIT_READY)) {
212 printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
213 sdev->channel, sdev->id, sdev->lun);
214 printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
215 status_byte(errors), msg_byte(errors),
216 host_byte(errors), driver_byte(errors));
218 printk(KERN_ERR "scsiback: cmnd[0]=0x%02X nr_segments=%d\n",
219 pending_req->cmnd[0],
220 pending_req->nr_segments);
222 if (CHECK_CONDITION & status_byte(errors))
223 __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
224 }
226 #if 0 /*SAMPLE CODING(tentative)*//*emulation*/
227 scsiback_rsp_emulation(pending_req);
228 #endif
229 fast_flush_area(pending_req);
230 scsiback_do_resp_with_sense(sense_buffer, errors, pending_req);
233 #ifdef NO_ASYNC /*!async*/
234 __blk_put_request(req->q, req);
235 #endif
236 }
239 static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
240 pending_req_t *pending_req)
241 {
242 u32 flags;
243 int write;
244 int i, err = 0;
245 unsigned int data_len = 0;
246 struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
247 struct vscsibk_info *info = pending_req->info;
249 int data_dir = (int)pending_req->sc_data_direction;
250 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
252 write = (data_dir == DMA_TO_DEVICE);
254 if (nr_segments) {
255 /* free of (sgl) in fast_flush_area()*/
256 pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
257 GFP_KERNEL);
258 if (!pending_req->sgl) {
259 printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
260 return -ENOMEM;
261 }
263 for (i = 0; i < nr_segments; i++) {
264 flags = GNTMAP_host_map;
265 if (write)
266 flags |= GNTMAP_readonly;
267 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
268 ring_req->seg[i].gref,
269 info->domid);
270 }
272 err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
273 BUG_ON(err);
275 for (i = 0; i < nr_segments; i++) {
276 if (unlikely(map[i].status != 0)) {
277 printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
278 map[i].handle = SCSIBACK_INVALID_HANDLE;
279 err |= 1;
280 }
282 pending_handle(pending_req, i) = map[i].handle;
284 if (err)
285 continue;
287 set_phys_to_machine(__pa(vaddr(
288 pending_req, i)) >> PAGE_SHIFT,
289 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
291 pending_req->sgl[i].page = virt_to_page(vaddr(pending_req, i));
292 pending_req->sgl[i].offset = ring_req->seg[i].offset;
293 pending_req->sgl[i].length = ring_req->seg[i].length;
294 data_len += pending_req->sgl[i].length;
295 }
297 if (err)
298 goto fail_flush;
299 }
301 pending_req->request_bufflen = data_len;
303 return 0;
305 fail_flush:
306 fast_flush_area(pending_req);
307 return -ENOMEM;
308 }
311 #ifdef NO_ASYNC /*!async*/
313 /* quoted scsi_lib.c/scsi_merge_bio */
314 static int scsiback_merge_bio(struct request *rq, struct bio *bio)
315 {
316 struct request_queue *q = rq->q;
318 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
319 if (rq_data_dir(rq) == WRITE)
320 bio->bi_rw |= (1 << BIO_RW);
322 blk_queue_bounce(q, &bio);
324 if (!rq->bio)
325 blk_rq_bio_prep(q, rq, bio);
326 else if (!q->back_merge_fn(q, rq, bio))
327 return -EINVAL;
328 else {
329 rq->biotail->bi_next = bio;
330 rq->biotail = bio;
331 rq->hard_nr_sectors += bio_sectors(bio);
332 rq->nr_sectors = rq->hard_nr_sectors;
333 }
335 return 0;
336 }
339 /* quoted scsi_lib.c/scsi_bi_endio */
340 static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
341 {
342 if (bio->bi_size)
343 return 1;
345 bio_put(bio);
346 return 0;
347 }
351 /* quoted scsi_lib.c/scsi_req_map_sg . */
352 static int requset_map_sg(struct request *rq, pending_req_t *pending_req, unsigned int count)
353 {
354 struct request_queue *q = rq->q;
355 int nr_pages;
356 unsigned int nsegs = count;
358 unsigned int data_len = 0, len, bytes, off;
359 struct page *page;
360 struct bio *bio = NULL;
361 int i, err, nr_vecs = 0;
363 for (i = 0; i < nsegs; i++) {
364 page = pending_req->sgl[i].page;
365 off = (unsigned int)pending_req->sgl[i].offset;
366 len = (unsigned int)pending_req->sgl[i].length;
367 data_len += len;
369 nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
370 while (len > 0) {
371 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
373 if (!bio) {
374 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
375 nr_pages -= nr_vecs;
376 bio = bio_alloc(GFP_KERNEL, nr_vecs);
377 if (!bio) {
378 err = -ENOMEM;
379 goto free_bios;
380 }
381 bio->bi_end_io = scsiback_bi_endio;
382 }
384 if (bio_add_pc_page(q, bio, page, bytes, off) !=
385 bytes) {
386 bio_put(bio);
387 err = -EINVAL;
388 goto free_bios;
389 }
391 if (bio->bi_vcnt >= nr_vecs) {
392 err = scsiback_merge_bio(rq, bio);
393 if (err) {
394 bio_endio(bio, bio->bi_size, 0);
395 goto free_bios;
396 }
397 bio = NULL;
398 }
400 page++;
401 len -= bytes;
402 off = 0;
403 }
404 }
406 rq->buffer = rq->data = NULL;
407 rq->data_len = data_len;
409 return 0;
411 free_bios:
412 while ((bio = rq->bio) != NULL) {
413 rq->bio = bio->bi_next;
414 /*
415 * call endio instead of bio_put incase it was bounced
416 */
417 bio_endio(bio, bio->bi_size, 0);
418 }
420 return err;
421 }
423 #endif
425 void scsiback_cmd_exec(pending_req_t *pending_req)
426 {
427 int err;
429 int cmd_len = (int)pending_req->cmd_len;
430 int data_dir = (int)pending_req->sc_data_direction;
431 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
432 unsigned int timeout;
433 #ifdef NO_ASYNC /*!async*/
434 struct request *rq;
435 int write;
436 #else
437 unsigned int data_len = pending_req->request_bufflen;
438 #endif
440 DPRINTK("%s\n",__FUNCTION__);
442 /* because it doesn't timeout backend earlier than frontend.*/
443 if (pending_req->timeout_per_command)
444 timeout = (pending_req->timeout_per_command * HZ * 2);
445 else
446 timeout = VSCSIIF_TIMEOUT;
448 #ifdef NO_ASYNC /*!async*/
449 err = 0;
450 write = (data_dir == DMA_TO_DEVICE);
451 rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL);
453 rq->flags |= REQ_BLOCK_PC;
454 rq->cmd_len = cmd_len;
455 memcpy(rq->cmd, pending_req->cmnd, cmd_len);
457 memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
458 rq->sense = pending_req->sense_buffer;
459 rq->sense_len = 0;
461 rq->retries = 0;
462 rq->timeout = timeout;
463 rq->end_io_data = pending_req;
465 if (nr_segments) {
467 if (requset_map_sg(rq, pending_req, nr_segments)) {
468 printk(KERN_ERR "scsiback: SG Request Map Error\n");
469 return;
470 }
471 }
473 blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
475 #else /*async*/
476 /* not allowed to retry in backend. */
477 /* timeout of backend is longer than that of brontend.*/
478 err = scsi_execute_async(pending_req->sdev, &(pending_req->cmnd[0]),
479 cmd_len, data_dir, &(pending_req->sgl[0]), data_len, nr_segments, timeout, 0,
480 pending_req, scsiback_cmd_done, GFP_ATOMIC);
482 #endif /*!async*/
484 if (err)
485 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24), pending_req);
487 return ;
488 }
491 static void scsiback_device_reset_exec(pending_req_t *pending_req)
492 {
493 struct vscsibk_info *info = pending_req->info;
494 int err;
495 struct scsi_device *sdev = pending_req->sdev;
497 scsiback_get(info);
499 err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
501 scsiback_do_resp_with_sense(NULL, err, pending_req);
503 notify_remote_via_irq(info->irq);
504 return;
505 }
508 irqreturn_t scsiback_intr(int irq, void *dev_id, struct pt_regs *regs)
509 {
510 scsiback_notify_work((struct vscsibk_info *)dev_id);
511 return IRQ_HANDLED;
512 }
514 static int prepare_pending_reqs(struct vscsibk_info *info,
515 vscsiif_request_t *ring_req, pending_req_t *pending_req)
516 {
517 struct scsi_device *sdev;
518 struct ids_tuple vir;
519 int err = -EINVAL;
521 DPRINTK("%s\n",__FUNCTION__);
523 pending_req->rqid = ring_req->rqid;
524 pending_req->act = ring_req->act;
526 pending_req->info = info;
528 vir.chn = ring_req->channel;
529 vir.tgt = ring_req->id;
530 vir.lun = ring_req->lun;
532 sdev = scsiback_do_translation(info, &vir);
533 if (!sdev) {
534 pending_req->sdev = NULL;
535 printk(KERN_ERR "scsiback: doesn't exist.\n");
536 err = -ENODEV;
537 goto invald_value;
538 }
539 pending_req->sdev = sdev;
541 /* request range check from frontend */
542 if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
543 (ring_req->sc_data_direction != DMA_TO_DEVICE) &&
544 (ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
545 (ring_req->sc_data_direction != DMA_NONE)) {
546 printk(KERN_ERR "scsiback: invalid parameter data_dir = %d\n",
547 ring_req->sc_data_direction);
548 err = -EINVAL;
549 goto invald_value;
550 }
552 if (ring_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
553 printk(KERN_ERR "scsiback: invalid parameter nr_seg = %d\n",
554 ring_req->nr_segments);
555 err = -EINVAL;
556 goto invald_value;
557 }
558 pending_req->nr_segments = ring_req->nr_segments;
560 if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
561 printk(KERN_ERR "scsiback: invalid parameter cmd_len = %d\n",
562 ring_req->cmd_len);
563 err = -EINVAL;
564 goto invald_value;
565 }
566 memcpy(pending_req->cmnd, ring_req->cmnd, ring_req->cmd_len);
567 pending_req->cmd_len = ring_req->cmd_len;
569 pending_req->sc_data_direction = ring_req->sc_data_direction;
570 pending_req->timeout_per_command = ring_req->timeout_per_command;
572 if(scsiback_gnttab_data_map(ring_req, pending_req)) {
573 printk(KERN_ERR "scsiback: invalid buffer\n");
574 err = -EINVAL;
575 goto invald_value;
576 }
578 return 0;
580 invald_value:
581 return err;
582 }
585 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
586 {
587 struct vscsiif_back_ring *ring = &info->ring;
588 vscsiif_request_t *ring_req;
590 pending_req_t *pending_req;
591 RING_IDX rc, rp;
592 int err, more_to_do = 0;
594 DPRINTK("%s\n",__FUNCTION__);
596 rc = ring->req_cons;
597 rp = ring->sring->req_prod;
598 rmb();
600 while ((rc != rp)) {
601 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
602 break;
603 pending_req = alloc_req(info);
604 if (NULL == pending_req) {
605 more_to_do = 1;
606 break;
607 }
609 ring_req = RING_GET_REQUEST(ring, rc);
610 ring->req_cons = ++rc;
612 scsiback_get(info);
613 err = prepare_pending_reqs(info, ring_req,
614 pending_req);
615 if (err == -EINVAL) {
616 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
617 pending_req);
618 continue;
619 } else if (err == -ENODEV) {
620 scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
621 pending_req);
622 }
624 if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
625 #if 0 /*SAMPLE CODING(tentative)*//*emulation*/
626 scsiback_req_emulation_or_through(pending_req);
627 #else
628 scsiback_cmd_exec(pending_req);
629 #endif
630 } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
631 scsiback_device_reset_exec(pending_req);
632 } else {
633 printk(KERN_ERR "scsiback: invalid parameter for request\n");
634 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
635 pending_req);
636 continue;
637 }
638 }
640 if (RING_HAS_UNCONSUMED_REQUESTS(ring))
641 more_to_do = 1;
643 return more_to_do;
644 }
647 int scsiback_schedule(void *data)
648 {
649 struct vscsibk_info *info = (struct vscsibk_info *)data;
651 DPRINTK("%s\n",__FUNCTION__);
653 scsiback_get(info);
655 while (!kthread_should_stop()) {
656 wait_event_interruptible(
657 info->wq,
658 info->waiting_reqs || kthread_should_stop());
659 wait_event_interruptible(
660 pending_free_wq,
661 !list_empty(&pending_free) || kthread_should_stop());
663 info->waiting_reqs = 0;
664 smp_mb();
666 if (scsiback_do_cmd_fn(info))
667 info->waiting_reqs = 1;
668 }
670 info->kthread = NULL;
671 scsiback_put(info);
673 return 0;
674 }
677 static int __init scsiback_init(void)
678 {
679 int i, mmap_pages;
681 if (!is_running_on_xen())
682 return -ENODEV;
684 mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
686 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
687 vscsiif_reqs, GFP_KERNEL);
688 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
689 mmap_pages, GFP_KERNEL);
690 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
692 if (!pending_reqs || !pending_grant_handles || !pending_pages)
693 goto out_of_memory;
695 for (i = 0; i < mmap_pages; i++)
696 pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
698 if (scsiback_interface_init() < 0)
699 goto out_of_memory;
701 memset(pending_reqs, 0, sizeof(pending_reqs));
702 INIT_LIST_HEAD(&pending_free);
704 for (i = 0; i < vscsiif_reqs; i++)
705 list_add_tail(&pending_reqs[i].free_list, &pending_free);
707 if (scsiback_xenbus_init())
708 goto out_of_memory;
710 return 0;
712 out_of_memory:
713 kfree(pending_reqs);
714 kfree(pending_grant_handles);
715 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
716 printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
717 return -ENOMEM;
718 }
720 static void __exit scsiback_exit(void)
721 {
722 scsiback_xenbus_unregister();
723 scsiback_interface_exit();
724 kfree(pending_reqs);
725 kfree(pending_grant_handles);
726 free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
728 }
730 module_init(scsiback_init);
731 module_exit(scsiback_exit);
733 MODULE_DESCRIPTION("Xen SCSI backend driver");
734 MODULE_LICENSE("Dual BSD/GPL");