ia64/linux-2.6.18-xen.hg

view drivers/xen/blkback/blkback.c @ 878:eba6fe6d8d53

blktap2: a completely rewritten blktap implementation

Benefits to blktap2 over the old version of blktap:

* Isolation from xenstore - Blktap devices are now created directly on
the linux dom0 command line, rather than being spawned in response
to XenStore events. This is handy for debugging, makes blktap
generally easier to work with, and is a step toward a generic
user-level block device implementation that is not Xen-specific.

* Improved tapdisk infrastructure: simpler request forwarding, new
request scheduler, request merging, more efficient use of AIO.

* Improved tapdisk error handling and memory management. No
allocations on the block data path, IO retry logic to protect
guests
transient block device failures. This has been tested and is known
to work on weird environments such as NFS soft mounts.

* Pause and snapshot of live virtual disks (see xmsnap script).

* VHD support. The VHD code in this release has been rigorously
tested, and represents a very mature implementation of the VHD
image
format.

* No more duplication of mechanism with blkback. The blktap kernel
module has changed dramatically from the original blktap. Blkback
is now always used to talk to Xen guests, blktap just presents a
Linux gendisk that blkback can export. This is done while
preserving the zero-copy data path from domU to physical device.

These patches deprecate the old blktap code, which can hopefully be
removed from the tree completely at some point in the future.

Signed-off-by: Jake Wires <jake.wires@citrix.com>
Signed-off-by: Dutch Meyer <dmeyer@cs.ubc.ca>
author Keir Fraser <keir.fraser@citrix.com>
date Tue May 26 11:23:16 2009 +0100 (2009-05-26)
parents 5012c470f875
children
line source
1 /******************************************************************************
2 * arch/xen/drivers/blkif/backend/main.c
3 *
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/blkif/frontend
9 *
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 * Copyright (c) 2005, Christopher Clark
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/list.h>
41 #include <linux/delay.h>
42 #include <xen/balloon.h>
43 #include <asm/hypervisor.h>
44 #include "common.h"
46 /*
47 * These are rather arbitrary. They are fairly large because adjacent requests
48 * pulled from a communication ring are quite likely to end up being part of
49 * the same scatter/gather request at the disc.
50 *
51 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
52 *
53 * This will increase the chances of being able to write whole tracks.
54 * 64 should be enough to keep us competitive with Linux.
55 */
56 static int blkif_reqs = 64;
57 module_param_named(reqs, blkif_reqs, int, 0);
58 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
60 /* Run-time switchable: /sys/module/blkback/parameters/ */
61 static unsigned int log_stats = 0;
62 static unsigned int debug_lvl = 0;
63 module_param(log_stats, int, 0644);
64 module_param(debug_lvl, int, 0644);
66 /*
67 * Each outstanding request that we've passed to the lower device layers has a
68 * 'pending_req' allocated to it. Each buffer_head that completes decrements
69 * the pendcnt towards zero. When it hits zero, the specified domain has a
70 * response queued for it, with the saved 'id' passed back.
71 */
72 typedef struct {
73 blkif_t *blkif;
74 u64 id;
75 int nr_pages;
76 atomic_t pendcnt;
77 unsigned short operation;
78 int status;
79 struct list_head free_list;
80 } pending_req_t;
82 static pending_req_t *pending_reqs;
83 static struct list_head pending_free;
84 static DEFINE_SPINLOCK(pending_free_lock);
85 static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
87 #define BLKBACK_INVALID_HANDLE (~0)
89 static struct page **pending_pages;
90 static grant_handle_t *pending_grant_handles;
92 static inline int vaddr_pagenr(pending_req_t *req, int seg)
93 {
94 return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
95 }
97 static inline unsigned long vaddr(pending_req_t *req, int seg)
98 {
99 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
100 return (unsigned long)pfn_to_kaddr(pfn);
101 }
103 #define pending_handle(_req, _seg) \
104 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
107 static int do_block_io_op(blkif_t *blkif);
108 static void dispatch_rw_block_io(blkif_t *blkif,
109 blkif_request_t *req,
110 pending_req_t *pending_req);
111 static void make_response(blkif_t *blkif, u64 id,
112 unsigned short op, int st);
114 /******************************************************************
115 * misc small helpers
116 */
117 static pending_req_t* alloc_req(void)
118 {
119 pending_req_t *req = NULL;
120 unsigned long flags;
122 spin_lock_irqsave(&pending_free_lock, flags);
123 if (!list_empty(&pending_free)) {
124 req = list_entry(pending_free.next, pending_req_t, free_list);
125 list_del(&req->free_list);
126 }
127 spin_unlock_irqrestore(&pending_free_lock, flags);
128 return req;
129 }
131 static void free_req(pending_req_t *req)
132 {
133 unsigned long flags;
134 int was_empty;
136 spin_lock_irqsave(&pending_free_lock, flags);
137 was_empty = list_empty(&pending_free);
138 list_add(&req->free_list, &pending_free);
139 spin_unlock_irqrestore(&pending_free_lock, flags);
140 if (was_empty)
141 wake_up(&pending_free_wq);
142 }
144 static void unplug_queue(blkif_t *blkif)
145 {
146 if (blkif->plug == NULL)
147 return;
148 if (blkif->plug->unplug_fn)
149 blkif->plug->unplug_fn(blkif->plug);
150 blk_put_queue(blkif->plug);
151 blkif->plug = NULL;
152 }
154 static void plug_queue(blkif_t *blkif, struct block_device *bdev)
155 {
156 request_queue_t *q = bdev_get_queue(bdev);
158 if (q == blkif->plug)
159 return;
160 unplug_queue(blkif);
161 blk_get_queue(q);
162 blkif->plug = q;
163 }
165 static void fast_flush_area(pending_req_t *req)
166 {
167 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
168 unsigned int i, invcount = 0;
169 grant_handle_t handle;
170 int ret;
172 for (i = 0; i < req->nr_pages; i++) {
173 handle = pending_handle(req, i);
174 if (handle == BLKBACK_INVALID_HANDLE)
175 continue;
176 blkback_pagemap_clear(virt_to_page(vaddr(req, i)));
177 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
178 GNTMAP_host_map, handle);
179 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
180 invcount++;
181 }
183 ret = HYPERVISOR_grant_table_op(
184 GNTTABOP_unmap_grant_ref, unmap, invcount);
185 BUG_ON(ret);
186 }
188 /******************************************************************
189 * SCHEDULER FUNCTIONS
190 */
192 static void print_stats(blkif_t *blkif)
193 {
194 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
195 current->comm, blkif->st_oo_req,
196 blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
197 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
198 blkif->st_rd_req = 0;
199 blkif->st_wr_req = 0;
200 blkif->st_oo_req = 0;
201 }
203 int blkif_schedule(void *arg)
204 {
205 blkif_t *blkif = arg;
207 blkif_get(blkif);
209 if (debug_lvl)
210 printk(KERN_DEBUG "%s: started\n", current->comm);
212 while (!kthread_should_stop()) {
213 if (try_to_freeze())
214 continue;
216 wait_event_interruptible(
217 blkif->wq,
218 blkif->waiting_reqs || kthread_should_stop());
219 wait_event_interruptible(
220 pending_free_wq,
221 !list_empty(&pending_free) || kthread_should_stop());
223 blkif->waiting_reqs = 0;
224 smp_mb(); /* clear flag *before* checking for work */
226 if (do_block_io_op(blkif))
227 blkif->waiting_reqs = 1;
228 unplug_queue(blkif);
230 if (log_stats && time_after(jiffies, blkif->st_print))
231 print_stats(blkif);
232 }
234 if (log_stats)
235 print_stats(blkif);
236 if (debug_lvl)
237 printk(KERN_DEBUG "%s: exiting\n", current->comm);
239 blkif->xenblkd = NULL;
240 blkif_put(blkif);
242 return 0;
243 }
245 /******************************************************************
246 * COMPLETION CALLBACK -- Called as bh->b_end_io()
247 */
249 static void __end_block_io_op(pending_req_t *pending_req, int error)
250 {
251 /* An error fails the entire request. */
252 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
253 (error == -EOPNOTSUPP)) {
254 DPRINTK("blkback: write barrier op failed, not supported\n");
255 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
256 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
257 } else if (error) {
258 DPRINTK("Buffer not up-to-date at end of operation, "
259 "error=%d\n", error);
260 pending_req->status = BLKIF_RSP_ERROR;
261 }
263 if (atomic_dec_and_test(&pending_req->pendcnt)) {
264 fast_flush_area(pending_req);
265 make_response(pending_req->blkif, pending_req->id,
266 pending_req->operation, pending_req->status);
267 blkif_put(pending_req->blkif);
268 free_req(pending_req);
269 }
270 }
272 static int end_block_io_op(struct bio *bio, unsigned int done, int error)
273 {
274 if (bio->bi_size != 0)
275 return 1;
276 __end_block_io_op(bio->bi_private, error);
277 bio_put(bio);
278 return error;
279 }
282 /******************************************************************************
283 * NOTIFICATION FROM GUEST OS.
284 */
286 static void blkif_notify_work(blkif_t *blkif)
287 {
288 blkif->waiting_reqs = 1;
289 wake_up(&blkif->wq);
290 }
292 irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
293 {
294 blkif_notify_work(dev_id);
295 return IRQ_HANDLED;
296 }
300 /******************************************************************
301 * DOWNWARD CALLS -- These interface with the block-device layer proper.
302 */
304 static int do_block_io_op(blkif_t *blkif)
305 {
306 blkif_back_rings_t *blk_rings = &blkif->blk_rings;
307 blkif_request_t req;
308 pending_req_t *pending_req;
309 RING_IDX rc, rp;
310 int more_to_do = 0;
312 rc = blk_rings->common.req_cons;
313 rp = blk_rings->common.sring->req_prod;
314 rmb(); /* Ensure we see queued requests up to 'rp'. */
316 while (rc != rp) {
318 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
319 break;
321 if (kthread_should_stop()) {
322 more_to_do = 1;
323 break;
324 }
326 pending_req = alloc_req();
327 if (NULL == pending_req) {
328 blkif->st_oo_req++;
329 more_to_do = 1;
330 break;
331 }
333 switch (blkif->blk_protocol) {
334 case BLKIF_PROTOCOL_NATIVE:
335 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
336 break;
337 case BLKIF_PROTOCOL_X86_32:
338 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
339 break;
340 case BLKIF_PROTOCOL_X86_64:
341 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
342 break;
343 default:
344 BUG();
345 }
346 blk_rings->common.req_cons = ++rc; /* before make_response() */
348 /* Apply all sanity checks to /private copy/ of request. */
349 barrier();
351 switch (req.operation) {
352 case BLKIF_OP_READ:
353 blkif->st_rd_req++;
354 dispatch_rw_block_io(blkif, &req, pending_req);
355 break;
356 case BLKIF_OP_WRITE_BARRIER:
357 blkif->st_br_req++;
358 /* fall through */
359 case BLKIF_OP_WRITE:
360 blkif->st_wr_req++;
361 dispatch_rw_block_io(blkif, &req, pending_req);
362 break;
363 default:
364 /* A good sign something is wrong: sleep for a while to
365 * avoid excessive CPU consumption by a bad guest. */
366 msleep(1);
367 DPRINTK("error: unknown block io operation [%d]\n",
368 req.operation);
369 make_response(blkif, req.id, req.operation,
370 BLKIF_RSP_ERROR);
371 free_req(pending_req);
372 break;
373 }
375 /* Yield point for this unbounded loop. */
376 cond_resched();
377 }
379 return more_to_do;
380 }
382 static void dispatch_rw_block_io(blkif_t *blkif,
383 blkif_request_t *req,
384 pending_req_t *pending_req)
385 {
386 extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
387 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
388 struct phys_req preq;
389 struct {
390 unsigned long buf; unsigned int nsec;
391 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
392 unsigned int nseg;
393 struct bio *bio = NULL;
394 int ret, i;
395 int operation;
397 switch (req->operation) {
398 case BLKIF_OP_READ:
399 operation = READ;
400 break;
401 case BLKIF_OP_WRITE:
402 operation = WRITE;
403 break;
404 case BLKIF_OP_WRITE_BARRIER:
405 operation = WRITE_BARRIER;
406 break;
407 default:
408 operation = 0; /* make gcc happy */
409 BUG();
410 }
412 /* Check that number of segments is sane. */
413 nseg = req->nr_segments;
414 if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
415 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
416 DPRINTK("Bad number of segments in request (%d)\n", nseg);
417 goto fail_response;
418 }
420 preq.dev = req->handle;
421 preq.sector_number = req->sector_number;
422 preq.nr_sects = 0;
424 pending_req->blkif = blkif;
425 pending_req->id = req->id;
426 pending_req->operation = req->operation;
427 pending_req->status = BLKIF_RSP_OKAY;
428 pending_req->nr_pages = nseg;
430 for (i = 0; i < nseg; i++) {
431 uint32_t flags;
433 seg[i].nsec = req->seg[i].last_sect -
434 req->seg[i].first_sect + 1;
436 if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
437 (req->seg[i].last_sect < req->seg[i].first_sect))
438 goto fail_response;
439 preq.nr_sects += seg[i].nsec;
441 flags = GNTMAP_host_map;
442 if (operation != READ)
443 flags |= GNTMAP_readonly;
444 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
445 req->seg[i].gref, blkif->domid);
446 }
448 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
449 BUG_ON(ret);
451 for (i = 0; i < nseg; i++) {
452 if (unlikely(map[i].status != 0)) {
453 DPRINTK("invalid buffer -- could not remap it\n");
454 map[i].handle = BLKBACK_INVALID_HANDLE;
455 ret |= 1;
456 }
458 pending_handle(pending_req, i) = map[i].handle;
460 if (ret)
461 continue;
463 set_phys_to_machine(__pa(vaddr(
464 pending_req, i)) >> PAGE_SHIFT,
465 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
466 seg[i].buf = map[i].dev_bus_addr |
467 (req->seg[i].first_sect << 9);
468 blkback_pagemap_set(vaddr_pagenr(pending_req, i),
469 virt_to_page(vaddr(pending_req, i)),
470 blkif->domid, req->handle,
471 req->seg[i].gref);
472 }
474 if (ret)
475 goto fail_flush;
477 if (vbd_translate(&preq, blkif, operation) != 0) {
478 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
479 operation == READ ? "read" : "write",
480 preq.sector_number,
481 preq.sector_number + preq.nr_sects, preq.dev);
482 goto fail_flush;
483 }
485 plug_queue(blkif, preq.bdev);
486 atomic_set(&pending_req->pendcnt, 1);
487 blkif_get(blkif);
489 for (i = 0; i < nseg; i++) {
490 if (((int)preq.sector_number|(int)seg[i].nsec) &
491 ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
492 DPRINTK("Misaligned I/O request from domain %d",
493 blkif->domid);
494 goto fail_put_bio;
495 }
497 while ((bio == NULL) ||
498 (bio_add_page(bio,
499 virt_to_page(vaddr(pending_req, i)),
500 seg[i].nsec << 9,
501 seg[i].buf & ~PAGE_MASK) == 0)) {
502 if (bio) {
503 atomic_inc(&pending_req->pendcnt);
504 submit_bio(operation, bio);
505 }
507 bio = bio_alloc(GFP_KERNEL, nseg-i);
508 if (unlikely(bio == NULL))
509 goto fail_put_bio;
511 bio->bi_bdev = preq.bdev;
512 bio->bi_private = pending_req;
513 bio->bi_end_io = end_block_io_op;
514 bio->bi_sector = preq.sector_number;
515 }
517 preq.sector_number += seg[i].nsec;
518 }
520 if (!bio) {
521 BUG_ON(operation != WRITE_BARRIER);
522 bio = bio_alloc(GFP_KERNEL, 0);
523 if (unlikely(bio == NULL))
524 goto fail_put_bio;
526 bio->bi_bdev = preq.bdev;
527 bio->bi_private = pending_req;
528 bio->bi_end_io = end_block_io_op;
529 bio->bi_sector = -1;
530 }
532 submit_bio(operation, bio);
534 if (operation == READ)
535 blkif->st_rd_sect += preq.nr_sects;
536 else if (operation == WRITE || operation == WRITE_BARRIER)
537 blkif->st_wr_sect += preq.nr_sects;
539 return;
541 fail_flush:
542 fast_flush_area(pending_req);
543 fail_response:
544 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
545 free_req(pending_req);
546 msleep(1); /* back off a bit */
547 return;
549 fail_put_bio:
550 __end_block_io_op(pending_req, -EINVAL);
551 if (bio)
552 bio_put(bio);
553 unplug_queue(blkif);
554 msleep(1); /* back off a bit */
555 return;
556 }
560 /******************************************************************
561 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
562 */
565 static void make_response(blkif_t *blkif, u64 id,
566 unsigned short op, int st)
567 {
568 blkif_response_t resp;
569 unsigned long flags;
570 blkif_back_rings_t *blk_rings = &blkif->blk_rings;
571 int more_to_do = 0;
572 int notify;
574 resp.id = id;
575 resp.operation = op;
576 resp.status = st;
578 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
579 /* Place on the response ring for the relevant domain. */
580 switch (blkif->blk_protocol) {
581 case BLKIF_PROTOCOL_NATIVE:
582 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
583 &resp, sizeof(resp));
584 break;
585 case BLKIF_PROTOCOL_X86_32:
586 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
587 &resp, sizeof(resp));
588 break;
589 case BLKIF_PROTOCOL_X86_64:
590 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
591 &resp, sizeof(resp));
592 break;
593 default:
594 BUG();
595 }
596 blk_rings->common.rsp_prod_pvt++;
597 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
598 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
599 /*
600 * Tail check for pending requests. Allows frontend to avoid
601 * notifications if requests are already in flight (lower
602 * overheads and promotes batching).
603 */
604 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
606 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
607 more_to_do = 1;
608 }
610 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
612 if (more_to_do)
613 blkif_notify_work(blkif);
614 if (notify)
615 notify_remote_via_irq(blkif->irq);
616 }
618 static int __init blkif_init(void)
619 {
620 int i, mmap_pages;
622 if (!is_running_on_xen())
623 return -ENODEV;
625 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
627 pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
628 blkif_reqs, GFP_KERNEL);
629 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
630 mmap_pages, GFP_KERNEL);
631 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
633 if (blkback_pagemap_init(mmap_pages))
634 goto out_of_memory;
636 if (!pending_reqs || !pending_grant_handles || !pending_pages)
637 goto out_of_memory;
639 for (i = 0; i < mmap_pages; i++)
640 pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
642 blkif_interface_init();
644 memset(pending_reqs, 0, sizeof(pending_reqs));
645 INIT_LIST_HEAD(&pending_free);
647 for (i = 0; i < blkif_reqs; i++)
648 list_add_tail(&pending_reqs[i].free_list, &pending_free);
650 blkif_xenbus_init();
652 return 0;
654 out_of_memory:
655 kfree(pending_reqs);
656 kfree(pending_grant_handles);
657 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
658 printk("%s: out of memory\n", __FUNCTION__);
659 return -ENOMEM;
660 }
662 module_init(blkif_init);
664 MODULE_LICENSE("Dual BSD/GPL");