ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c @ 6742:ac6605bceb9d

remove pointless NULL check before calling kfree

Signed-off-by: Vincent Hanquez <vincent@xensource.com>
author vh249@arcadians.cl.cam.ac.uk
date Sat Sep 10 14:41:16 2005 +0000 (2005-09-10)
parents cdfa7dd00c44
children 9ead08216805
line source
1 /******************************************************************************
2 * blkfront.c
3 *
4 * XenLinux virtual block-device driver.
5 *
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This file may be distributed separately from the Linux kernel, or
13 * incorporated into other software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
34 #if 1
35 #define ASSERT(p) \
36 if (!(p)) { printk("Assertion '%s' failed, line %d, file %s", #p , \
37 __LINE__, __FILE__); *(int*)0=0; }
38 #else
39 #define ASSERT(_p)
40 #endif
42 #include <linux/version.h>
43 #include "block.h"
44 #include <linux/cdrom.h>
45 #include <linux/sched.h>
46 #include <linux/interrupt.h>
47 #include <scsi/scsi.h>
48 #include <asm-xen/evtchn.h>
49 #include <asm-xen/xenbus.h>
50 #include <asm-xen/xen-public/grant_table.h>
51 #include <asm-xen/gnttab.h>
53 #define BLKIF_STATE_DISCONNECTED 0
54 #define BLKIF_STATE_CONNECTED 1
56 static unsigned int blkif_state = BLKIF_STATE_DISCONNECTED;
58 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
59 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
60 #define GRANTREF_INVALID (1<<15)
61 #define GRANT_INVALID_REF (0xFFFF)
63 static int recovery = 0; /* Recovery in progress: protected by blkif_io_lock */
65 static void kick_pending_request_queues(struct blkfront_info *info);
67 static void blkif_completion(struct blk_shadow *s);
69 static inline int GET_ID_FROM_FREELIST(
70 struct blkfront_info *info)
71 {
72 unsigned long free = info->shadow_free;
73 BUG_ON(free > BLK_RING_SIZE);
74 info->shadow_free = info->shadow[free].req.id;
75 info->shadow[free].req.id = 0x0fffffee; /* debug */
76 return free;
77 }
79 static inline void ADD_ID_TO_FREELIST(
80 struct blkfront_info *info, unsigned long id)
81 {
82 info->shadow[id].req.id = info->shadow_free;
83 info->shadow[id].request = 0;
84 info->shadow_free = id;
85 }
87 static inline void pickle_request(struct blk_shadow *s, blkif_request_t *r)
88 {
90 s->req = *r;
91 }
93 static inline void unpickle_request(blkif_request_t *r, struct blk_shadow *s)
94 {
96 *r = s->req;
97 }
99 static inline void flush_requests(struct blkfront_info *info)
100 {
101 RING_PUSH_REQUESTS(&info->ring);
102 notify_via_evtchn(info->evtchn);
103 }
105 static void kick_pending_request_queues(struct blkfront_info *info)
106 {
107 if (!RING_FULL(&info->ring)) {
108 /* Re-enable calldowns. */
109 blk_start_queue(info->rq);
110 /* Kick things off immediately. */
111 do_blkif_request(info->rq);
112 }
113 }
115 static void blkif_restart_queue(void *arg)
116 {
117 struct blkfront_info *info = (struct blkfront_info *)arg;
118 spin_lock_irq(&blkif_io_lock);
119 kick_pending_request_queues(info);
120 spin_unlock_irq(&blkif_io_lock);
121 }
123 static void blkif_restart_queue_callback(void *arg)
124 {
125 struct blkfront_info *info = (struct blkfront_info *)arg;
126 schedule_work(&info->work);
127 }
129 int blkif_open(struct inode *inode, struct file *filep)
130 {
131 return 0;
132 }
135 int blkif_release(struct inode *inode, struct file *filep)
136 {
137 return 0;
138 }
141 int blkif_ioctl(struct inode *inode, struct file *filep,
142 unsigned command, unsigned long argument)
143 {
144 int i;
146 DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
147 command, (long)argument, inode->i_rdev);
149 switch ( command )
150 {
151 case HDIO_GETGEO:
152 /* return ENOSYS to use defaults */
153 return -ENOSYS;
155 case CDROMMULTISESSION:
156 DPRINTK("FIXME: support multisession CDs later\n");
157 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
158 if (put_user(0, (char *)(argument + i)))
159 return -EFAULT;
160 return 0;
162 default:
163 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
164 command);*/
165 return -EINVAL; /* same return as native Linux */
166 }
168 return 0;
169 }
172 /*
173 * blkif_queue_request
174 *
175 * request block io
176 *
177 * id: for guest use only.
178 * operation: BLKIF_OP_{READ,WRITE,PROBE}
179 * buffer: buffer to read/write into. this should be a
180 * virtual address in the guest os.
181 */
182 static int blkif_queue_request(struct request *req)
183 {
184 struct blkfront_info *info = req->rq_disk->private_data;
185 unsigned long buffer_mfn;
186 blkif_request_t *ring_req;
187 struct bio *bio;
188 struct bio_vec *bvec;
189 int idx;
190 unsigned long id;
191 unsigned int fsect, lsect;
192 int ref;
193 grant_ref_t gref_head;
195 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
196 return 1;
198 if (gnttab_alloc_grant_references(
199 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
200 gnttab_request_free_callback(
201 &info->callback,
202 blkif_restart_queue_callback,
203 info,
204 BLKIF_MAX_SEGMENTS_PER_REQUEST);
205 return 1;
206 }
208 /* Fill out a communications ring structure. */
209 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
210 id = GET_ID_FROM_FREELIST(info);
211 info->shadow[id].request = (unsigned long)req;
213 ring_req->id = id;
214 ring_req->operation = rq_data_dir(req) ?
215 BLKIF_OP_WRITE : BLKIF_OP_READ;
216 ring_req->sector_number = (blkif_sector_t)req->sector;
217 ring_req->handle = info->handle;
219 ring_req->nr_segments = 0;
220 rq_for_each_bio (bio, req) {
221 bio_for_each_segment (bvec, bio, idx) {
222 BUG_ON(ring_req->nr_segments
223 == BLKIF_MAX_SEGMENTS_PER_REQUEST);
224 buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
225 fsect = bvec->bv_offset >> 9;
226 lsect = fsect + (bvec->bv_len >> 9) - 1;
227 /* install a grant reference. */
228 ref = gnttab_claim_grant_reference(&gref_head);
229 ASSERT(ref != -ENOSPC);
231 gnttab_grant_foreign_access_ref(
232 ref,
233 info->backend_id,
234 buffer_mfn,
235 rq_data_dir(req) );
237 info->shadow[id].frame[ring_req->nr_segments] =
238 buffer_mfn;
240 ring_req->frame_and_sects[ring_req->nr_segments] =
241 blkif_fas_from_gref(ref, fsect, lsect);
243 ring_req->nr_segments++;
244 }
245 }
247 info->ring.req_prod_pvt++;
249 /* Keep a private copy so we can reissue requests when recovering. */
250 pickle_request(&info->shadow[id], ring_req);
252 gnttab_free_grant_references(gref_head);
254 return 0;
255 }
257 /*
258 * do_blkif_request
259 * read a block; request is in a request queue
260 */
261 void do_blkif_request(request_queue_t *rq)
262 {
263 struct blkfront_info *info = NULL;
264 struct request *req;
265 int queued;
267 DPRINTK("Entered do_blkif_request\n");
269 queued = 0;
271 while ((req = elv_next_request(rq)) != NULL) {
272 info = req->rq_disk->private_data;
274 if (!blk_fs_request(req)) {
275 end_request(req, 0);
276 continue;
277 }
279 if (RING_FULL(&info->ring))
280 goto wait;
282 DPRINTK("do_blk_req %p: cmd %p, sec %lx, "
283 "(%u/%li) buffer:%p [%s]\n",
284 req, req->cmd, req->sector, req->current_nr_sectors,
285 req->nr_sectors, req->buffer,
286 rq_data_dir(req) ? "write" : "read");
288 blkdev_dequeue_request(req);
289 if (blkif_queue_request(req)) {
290 blk_requeue_request(rq, req);
291 wait:
292 /* Avoid pointless unplugs. */
293 blk_stop_queue(rq);
294 break;
295 }
297 queued++;
298 }
300 if (queued != 0)
301 flush_requests(info);
302 }
305 static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
306 {
307 struct request *req;
308 blkif_response_t *bret;
309 RING_IDX i, rp;
310 unsigned long flags;
311 struct blkfront_info *info = (struct blkfront_info *)dev_id;
313 spin_lock_irqsave(&blkif_io_lock, flags);
315 if (unlikely(info->connected != BLKIF_STATE_CONNECTED || recovery)) {
316 spin_unlock_irqrestore(&blkif_io_lock, flags);
317 return IRQ_HANDLED;
318 }
320 rp = info->ring.sring->rsp_prod;
321 rmb(); /* Ensure we see queued responses up to 'rp'. */
323 for (i = info->ring.rsp_cons; i != rp; i++) {
324 unsigned long id;
326 bret = RING_GET_RESPONSE(&info->ring, i);
327 id = bret->id;
328 req = (struct request *)info->shadow[id].request;
330 blkif_completion(&info->shadow[id]);
332 ADD_ID_TO_FREELIST(info, id);
334 switch (bret->operation) {
335 case BLKIF_OP_READ:
336 case BLKIF_OP_WRITE:
337 if (unlikely(bret->status != BLKIF_RSP_OKAY))
338 DPRINTK("Bad return from blkdev data "
339 "request: %x\n", bret->status);
341 BUG_ON(end_that_request_first(
342 req, (bret->status == BLKIF_RSP_OKAY),
343 req->hard_nr_sectors));
344 end_that_request_last(req);
345 break;
346 default:
347 BUG();
348 }
349 }
351 info->ring.rsp_cons = i;
353 kick_pending_request_queues(info);
355 spin_unlock_irqrestore(&blkif_io_lock, flags);
357 return IRQ_HANDLED;
358 }
360 static void blkif_free(struct blkfront_info *info)
361 {
362 /* Prevent new requests being issued until we fix things up. */
363 spin_lock_irq(&blkif_io_lock);
364 info->connected = BLKIF_STATE_DISCONNECTED;
365 spin_unlock_irq(&blkif_io_lock);
367 /* Free resources associated with old device channel. */
368 if (info->ring.sring != NULL) {
369 free_page((unsigned long)info->ring.sring);
370 info->ring.sring = NULL;
371 }
372 if (info->ring_ref != GRANT_INVALID_REF)
373 gnttab_end_foreign_access(info->ring_ref, 0);
374 info->ring_ref = GRANT_INVALID_REF;
375 unbind_evtchn_from_irqhandler(info->evtchn, info);
376 info->evtchn = 0;
377 }
379 static void blkif_recover(struct blkfront_info *info)
380 {
381 int i;
382 blkif_request_t *req;
383 struct blk_shadow *copy;
384 int j;
386 /* Stage 1: Make a safe copy of the shadow state. */
387 copy = (struct blk_shadow *)kmalloc(sizeof(info->shadow), GFP_KERNEL);
388 BUG_ON(copy == NULL);
389 memcpy(copy, info->shadow, sizeof(info->shadow));
391 /* Stage 2: Set up free list. */
392 memset(&info->shadow, 0, sizeof(info->shadow));
393 for (i = 0; i < BLK_RING_SIZE; i++)
394 info->shadow[i].req.id = i+1;
395 info->shadow_free = info->ring.req_prod_pvt;
396 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
398 /* Stage 3: Find pending requests and requeue them. */
399 for (i = 0; i < BLK_RING_SIZE; i++) {
400 /* Not in use? */
401 if (copy[i].request == 0)
402 continue;
404 /* Grab a request slot and unpickle shadow state into it. */
405 req = RING_GET_REQUEST(
406 &info->ring, info->ring.req_prod_pvt);
407 unpickle_request(req, &copy[i]);
409 /* We get a new request id, and must reset the shadow state. */
410 req->id = GET_ID_FROM_FREELIST(info);
411 memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
413 /* Rewrite any grant references invalidated by susp/resume. */
414 for (j = 0; j < req->nr_segments; j++) {
415 if ( req->frame_and_sects[j] & GRANTREF_INVALID )
416 gnttab_grant_foreign_access_ref(
417 blkif_gref_from_fas(
418 req->frame_and_sects[j]),
419 info->backend_id,
420 info->shadow[req->id].frame[j],
421 rq_data_dir(
422 (struct request *)
423 info->shadow[req->id].request));
424 req->frame_and_sects[j] &= ~GRANTREF_INVALID;
425 }
426 info->shadow[req->id].req = *req;
428 info->ring.req_prod_pvt++;
429 }
431 kfree(copy);
433 recovery = 0;
435 /* info->ring->req_prod will be set when we flush_requests().*/
436 wmb();
438 /* Kicks things back into life. */
439 flush_requests(info);
441 /* Now safe to left other people use the interface. */
442 info->connected = BLKIF_STATE_CONNECTED;
443 }
445 static void blkif_connect(struct blkfront_info *info, u16 evtchn)
446 {
447 int err = 0;
449 info->evtchn = evtchn;
451 err = bind_evtchn_to_irqhandler(
452 info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
453 if (err != 0) {
454 WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
455 return;
456 }
457 }
460 static struct xenbus_device_id blkfront_ids[] = {
461 { "vbd" },
462 { "" }
463 };
465 static void watch_for_status(struct xenbus_watch *watch, const char *node)
466 {
467 struct blkfront_info *info;
468 unsigned int binfo;
469 unsigned long sectors, sector_size;
470 int err;
472 info = container_of(watch, struct blkfront_info, watch);
473 node += strlen(watch->node);
475 /* FIXME: clean up when error on the other end. */
476 if (info->connected == BLKIF_STATE_CONNECTED)
477 return;
479 err = xenbus_gather(watch->node,
480 "sectors", "%lu", &sectors,
481 "info", "%u", &binfo,
482 "sector-size", "%lu", &sector_size,
483 NULL);
484 if (err) {
485 xenbus_dev_error(info->xbdev, err,
486 "reading backend fields at %s", watch->node);
487 return;
488 }
490 xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
491 info->connected = BLKIF_STATE_CONNECTED;
493 blkif_state = BLKIF_STATE_CONNECTED;
495 xenbus_dev_ok(info->xbdev);
497 /* Kick pending requests. */
498 spin_lock_irq(&blkif_io_lock);
499 kick_pending_request_queues(info);
500 spin_unlock_irq(&blkif_io_lock);
501 }
503 static int setup_blkring(struct xenbus_device *dev, struct blkfront_info *info)
504 {
505 blkif_sring_t *sring;
506 evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
507 int err;
509 info->ring_ref = GRANT_INVALID_REF;
511 sring = (void *)__get_free_page(GFP_KERNEL);
512 if (!sring) {
513 xenbus_dev_error(dev, -ENOMEM, "allocating shared ring");
514 return -ENOMEM;
515 }
516 SHARED_RING_INIT(sring);
517 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
519 err = gnttab_grant_foreign_access(info->backend_id,
520 virt_to_mfn(info->ring.sring), 0);
521 if (err == -ENOSPC) {
522 free_page((unsigned long)info->ring.sring);
523 info->ring.sring = 0;
524 xenbus_dev_error(dev, err, "granting access to ring page");
525 return err;
526 }
527 info->ring_ref = err;
529 op.u.alloc_unbound.dom = info->backend_id;
530 err = HYPERVISOR_event_channel_op(&op);
531 if (err) {
532 gnttab_end_foreign_access(info->ring_ref, 0);
533 info->ring_ref = GRANT_INVALID_REF;
534 free_page((unsigned long)info->ring.sring);
535 info->ring.sring = 0;
536 xenbus_dev_error(dev, err, "allocating event channel");
537 return err;
538 }
539 blkif_connect(info, op.u.alloc_unbound.port);
540 return 0;
541 }
543 /* Common code used when first setting up, and when resuming. */
544 static int talk_to_backend(struct xenbus_device *dev,
545 struct blkfront_info *info)
546 {
547 char *backend;
548 const char *message;
549 int err;
551 backend = NULL;
552 err = xenbus_gather(dev->nodename,
553 "backend-id", "%i", &info->backend_id,
554 "backend", NULL, &backend,
555 NULL);
556 if (XENBUS_EXIST_ERR(err))
557 goto out;
558 if (backend && strlen(backend) == 0) {
559 err = -ENOENT;
560 goto out;
561 }
562 if (err < 0) {
563 xenbus_dev_error(dev, err, "reading %s/backend or backend-id",
564 dev->nodename);
565 goto out;
566 }
568 /* Create shared ring, alloc event channel. */
569 err = setup_blkring(dev, info);
570 if (err) {
571 xenbus_dev_error(dev, err, "setting up block ring");
572 goto out;
573 }
575 err = xenbus_transaction_start(dev->nodename);
576 if (err) {
577 xenbus_dev_error(dev, err, "starting transaction");
578 goto destroy_blkring;
579 }
581 err = xenbus_printf(dev->nodename, "ring-ref","%u", info->ring_ref);
582 if (err) {
583 message = "writing ring-ref";
584 goto abort_transaction;
585 }
586 err = xenbus_printf(dev->nodename,
587 "event-channel", "%u", info->evtchn);
588 if (err) {
589 message = "writing event-channel";
590 goto abort_transaction;
591 }
593 info->backend = backend;
594 backend = NULL;
596 info->watch.node = info->backend;
597 info->watch.callback = watch_for_status;
598 err = register_xenbus_watch(&info->watch);
599 if (err) {
600 message = "registering watch on backend";
601 goto abort_transaction;
602 }
604 err = xenbus_transaction_end(0);
605 if (err) {
606 xenbus_dev_error(dev, err, "completing transaction");
607 goto destroy_blkring;
608 }
610 out:
611 kfree(backend);
612 return err;
614 abort_transaction:
615 xenbus_transaction_end(1);
616 /* Have to do this *outside* transaction. */
617 xenbus_dev_error(dev, err, "%s", message);
618 destroy_blkring:
619 blkif_free(info);
620 goto out;
621 }
623 /* Setup supplies the backend dir, virtual device.
625 We place an event channel and shared frame entries.
626 We watch backend to wait if it's ok. */
627 static int blkfront_probe(struct xenbus_device *dev,
628 const struct xenbus_device_id *id)
629 {
630 int err, vdevice, i;
631 struct blkfront_info *info;
633 /* FIXME: Use dynamic device id if this is not set. */
634 err = xenbus_scanf(dev->nodename, "virtual-device", "%i", &vdevice);
635 if (XENBUS_EXIST_ERR(err))
636 return err;
637 if (err < 0) {
638 xenbus_dev_error(dev, err, "reading virtual-device");
639 return err;
640 }
642 info = kmalloc(sizeof(*info), GFP_KERNEL);
643 if (!info) {
644 xenbus_dev_error(dev, err, "allocating info structure");
645 return err;
646 }
647 info->xbdev = dev;
648 info->vdevice = vdevice;
649 info->connected = BLKIF_STATE_DISCONNECTED;
650 info->mi = NULL;
651 INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
653 info->shadow_free = 0;
654 memset(info->shadow, 0, sizeof(info->shadow));
655 for (i = 0; i < BLK_RING_SIZE; i++)
656 info->shadow[i].req.id = i+1;
657 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
659 /* Front end dir is a number, which is used as the id. */
660 info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
661 dev->data = info;
663 err = talk_to_backend(dev, info);
664 if (err) {
665 kfree(info);
666 dev->data = NULL;
667 return err;
668 }
670 /* Call once in case entries already there. */
671 watch_for_status(&info->watch, info->watch.node);
672 return 0;
673 }
675 static int blkfront_remove(struct xenbus_device *dev)
676 {
677 struct blkfront_info *info = dev->data;
679 if (info->backend)
680 unregister_xenbus_watch(&info->watch);
682 if (info->mi)
683 xlvbd_del(info);
685 blkif_free(info);
687 kfree(info->backend);
688 kfree(info);
690 return 0;
691 }
693 static int blkfront_suspend(struct xenbus_device *dev)
694 {
695 struct blkfront_info *info = dev->data;
697 unregister_xenbus_watch(&info->watch);
698 kfree(info->backend);
699 info->backend = NULL;
701 recovery = 1;
702 blkif_free(info);
704 return 0;
705 }
707 static int blkfront_resume(struct xenbus_device *dev)
708 {
709 struct blkfront_info *info = dev->data;
710 int err;
712 /* FIXME: Check geometry hasn't changed here... */
713 err = talk_to_backend(dev, info);
714 if (!err) {
715 blkif_recover(info);
716 }
717 return err;
718 }
720 static struct xenbus_driver blkfront = {
721 .name = "vbd",
722 .owner = THIS_MODULE,
723 .ids = blkfront_ids,
724 .probe = blkfront_probe,
725 .remove = blkfront_remove,
726 .resume = blkfront_resume,
727 .suspend = blkfront_suspend,
728 };
730 static void __init init_blk_xenbus(void)
731 {
732 xenbus_register_device(&blkfront);
733 }
735 static int wait_for_blkif(void)
736 {
737 int err = 0;
738 int i;
740 /*
741 * We should figure out how many and which devices we need to
742 * proceed and only wait for those. For now, continue once the
743 * first device is around.
744 */
745 for (i = 0; blkif_state != BLKIF_STATE_CONNECTED && (i < 10*HZ); i++) {
746 set_current_state(TASK_INTERRUPTIBLE);
747 schedule_timeout(1);
748 }
750 if (blkif_state != BLKIF_STATE_CONNECTED) {
751 WPRINTK("Timeout connecting to device!\n");
752 err = -ENOSYS;
753 }
754 return err;
755 }
757 static int __init xlblk_init(void)
758 {
759 if ((xen_start_info->flags & SIF_INITDOMAIN) ||
760 (xen_start_info->flags & SIF_BLK_BE_DOMAIN) )
761 return 0;
763 IPRINTK("Initialising virtual block device driver\n");
765 init_blk_xenbus();
767 wait_for_blkif();
769 return 0;
770 }
772 module_init(xlblk_init);
774 static void blkif_completion(struct blk_shadow *s)
775 {
776 int i;
777 for (i = 0; i < s->req.nr_segments; i++)
778 gnttab_end_foreign_access(
779 blkif_gref_from_fas(s->req.frame_and_sects[i]), 0);
780 }
782 /*
783 * Local variables:
784 * c-file-style: "linux"
785 * indent-tabs-mode: t
786 * c-indent-level: 8
787 * c-basic-offset: 8
788 * tab-width: 8
789 * End:
790 */