direct-io.hg

view linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c @ 8440:7c1f2e20123a

Prevent vbd frontend from oopsing if the underlying device doesn't exist.

Signed-off-by: Horms <horms@verge.net.au>
Signed-off-by: Vincent Hanquez <vincent@xensource.com>
author vhanquez@kneesa.uk.xensource.com
date Tue Dec 27 10:40:33 2005 +0000 (2005-12-27)
parents 3c88c4d68fc1
children 991ccc24bf2e
line source
1 /******************************************************************************
2 * blkfront.c
3 *
4 * XenLinux virtual block-device driver.
5 *
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
38 #if 1
39 #define ASSERT(p) \
40 if (!(p)) { printk("Assertion '%s' failed, line %d, file %s", #p , \
41 __LINE__, __FILE__); *(int*)0=0; }
42 #else
43 #define ASSERT(_p)
44 #endif
46 #include <linux/version.h>
47 #include "block.h"
48 #include <linux/cdrom.h>
49 #include <linux/sched.h>
50 #include <linux/interrupt.h>
51 #include <scsi/scsi.h>
52 #include <asm-xen/evtchn.h>
53 #include <asm-xen/xenbus.h>
54 #include <asm-xen/xen-public/grant_table.h>
55 #include <asm-xen/gnttab.h>
56 #include <asm/hypervisor.h>
58 #define BLKIF_STATE_DISCONNECTED 0
59 #define BLKIF_STATE_CONNECTED 1
60 #define BLKIF_STATE_SUSPENDED 2
62 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
63 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
64 #define GRANT_INVALID_REF 0
66 static void connect(struct blkfront_info *);
67 static void blkfront_closing(struct xenbus_device *);
68 static int blkfront_remove(struct xenbus_device *);
69 static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
70 static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
72 static void kick_pending_request_queues(struct blkfront_info *);
74 static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
75 static void blkif_restart_queue(void *arg);
76 static void blkif_recover(struct blkfront_info *);
77 static void blkif_completion(struct blk_shadow *);
78 static void blkif_free(struct blkfront_info *, int);
81 /**
82 * Entry point to this code when a new device is created. Allocate the basic
83 * structures and the ring buffer for communication with the backend, and
84 * inform the backend of the appropriate details for those. Switch to
85 * Initialised state.
86 */
87 static int blkfront_probe(struct xenbus_device *dev,
88 const struct xenbus_device_id *id)
89 {
90 int err, vdevice, i;
91 struct blkfront_info *info;
93 /* FIXME: Use dynamic device id if this is not set. */
94 err = xenbus_scanf(NULL, dev->nodename,
95 "virtual-device", "%i", &vdevice);
96 if (err != 1) {
97 xenbus_dev_fatal(dev, err, "reading virtual-device");
98 return err;
99 }
101 info = kmalloc(sizeof(*info), GFP_KERNEL);
102 if (!info) {
103 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
104 return -ENOMEM;
105 }
106 info->xbdev = dev;
107 info->vdevice = vdevice;
108 info->connected = BLKIF_STATE_DISCONNECTED;
109 info->mi = NULL;
110 info->gd = NULL;
111 INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
113 info->shadow_free = 0;
114 memset(info->shadow, 0, sizeof(info->shadow));
115 for (i = 0; i < BLK_RING_SIZE; i++)
116 info->shadow[i].req.id = i+1;
117 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
119 info->users = 0;
121 /* Front end dir is a number, which is used as the id. */
122 info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
123 dev->data = info;
125 err = talk_to_backend(dev, info);
126 if (err) {
127 kfree(info);
128 dev->data = NULL;
129 return err;
130 }
132 return 0;
133 }
136 /**
137 * We are reconnecting to the backend, due to a suspend/resume, or a backend
138 * driver restart. We tear down our blkif structure and recreate it, but
139 * leave the device-layer structures intact so that this is transparent to the
140 * rest of the kernel.
141 */
142 static int blkfront_resume(struct xenbus_device *dev)
143 {
144 struct blkfront_info *info = dev->data;
145 int err;
147 DPRINTK("blkfront_resume: %s\n", dev->nodename);
149 blkif_free(info, 1);
151 err = talk_to_backend(dev, info);
152 if (!err)
153 blkif_recover(info);
155 return err;
156 }
159 /* Common code used when first setting up, and when resuming. */
160 static int talk_to_backend(struct xenbus_device *dev,
161 struct blkfront_info *info)
162 {
163 const char *message = NULL;
164 struct xenbus_transaction *xbt;
165 int err;
167 /* Create shared ring, alloc event channel. */
168 err = setup_blkring(dev, info);
169 if (err)
170 goto out;
172 again:
173 xbt = xenbus_transaction_start();
174 if (IS_ERR(xbt)) {
175 xenbus_dev_fatal(dev, err, "starting transaction");
176 goto destroy_blkring;
177 }
179 err = xenbus_printf(xbt, dev->nodename,
180 "ring-ref","%u", info->ring_ref);
181 if (err) {
182 message = "writing ring-ref";
183 goto abort_transaction;
184 }
185 err = xenbus_printf(xbt, dev->nodename,
186 "event-channel", "%u", info->evtchn);
187 if (err) {
188 message = "writing event-channel";
189 goto abort_transaction;
190 }
192 err = xenbus_switch_state(dev, xbt, XenbusStateInitialised);
193 if (err) {
194 goto abort_transaction;
195 }
197 err = xenbus_transaction_end(xbt, 0);
198 if (err) {
199 if (err == -EAGAIN)
200 goto again;
201 xenbus_dev_fatal(dev, err, "completing transaction");
202 goto destroy_blkring;
203 }
205 return 0;
207 abort_transaction:
208 xenbus_transaction_end(xbt, 1);
209 if (message)
210 xenbus_dev_fatal(dev, err, "%s", message);
211 destroy_blkring:
212 blkif_free(info, 0);
213 out:
214 return err;
215 }
218 static int setup_blkring(struct xenbus_device *dev,
219 struct blkfront_info *info)
220 {
221 blkif_sring_t *sring;
222 int err;
224 info->ring_ref = GRANT_INVALID_REF;
226 sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
227 if (!sring) {
228 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
229 return -ENOMEM;
230 }
231 SHARED_RING_INIT(sring);
232 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
234 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
235 if (err < 0) {
236 free_page((unsigned long)sring);
237 info->ring.sring = NULL;
238 goto fail;
239 }
240 info->ring_ref = err;
242 err = xenbus_alloc_evtchn(dev, &info->evtchn);
243 if (err)
244 goto fail;
246 err = bind_evtchn_to_irqhandler(
247 info->evtchn, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
248 if (err <= 0) {
249 xenbus_dev_fatal(dev, err,
250 "bind_evtchn_to_irqhandler failed");
251 goto fail;
252 }
253 info->irq = err;
255 return 0;
256 fail:
257 blkif_free(info, 0);
258 return err;
259 }
262 /**
263 * Callback received when the backend's state changes.
264 */
265 static void backend_changed(struct xenbus_device *dev,
266 XenbusState backend_state)
267 {
268 struct blkfront_info *info = dev->data;
269 struct block_device *bd;
271 DPRINTK("blkfront:backend_changed.\n");
273 switch (backend_state) {
274 case XenbusStateUnknown:
275 case XenbusStateInitialising:
276 case XenbusStateInitWait:
277 case XenbusStateInitialised:
278 case XenbusStateClosed:
279 break;
281 case XenbusStateConnected:
282 connect(info);
283 break;
285 case XenbusStateClosing:
286 bd = bdget(info->dev);
287 if (bd == NULL)
288 xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
290 down(&bd->bd_sem);
291 if (info->users > 0)
292 xenbus_dev_error(dev, -EBUSY,
293 "Device in use; refusing to close");
294 else
295 blkfront_closing(dev);
296 up(&bd->bd_sem);
297 bdput(bd);
298 break;
299 }
300 }
303 /* ** Connection ** */
306 /*
307 ** Invoked when the backend is finally 'ready' (and has told produced
308 ** the details about the physical device - #sectors, size, etc).
309 */
310 static void connect(struct blkfront_info *info)
311 {
312 unsigned long sectors, sector_size;
313 unsigned int binfo;
314 int err;
316 if( (info->connected == BLKIF_STATE_CONNECTED) ||
317 (info->connected == BLKIF_STATE_SUSPENDED) )
318 return;
320 DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
322 err = xenbus_gather(NULL, info->xbdev->otherend,
323 "sectors", "%lu", &sectors,
324 "info", "%u", &binfo,
325 "sector-size", "%lu", &sector_size,
326 NULL);
327 if (err) {
328 xenbus_dev_fatal(info->xbdev, err,
329 "reading backend fields at %s",
330 info->xbdev->otherend);
331 return;
332 }
334 err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
335 if (err) {
336 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
337 info->xbdev->otherend);
338 return;
339 }
341 (void)xenbus_switch_state(info->xbdev, NULL, XenbusStateConnected);
343 /* Kick pending requests. */
344 spin_lock_irq(&blkif_io_lock);
345 info->connected = BLKIF_STATE_CONNECTED;
346 kick_pending_request_queues(info);
347 spin_unlock_irq(&blkif_io_lock);
349 add_disk(info->gd);
350 }
352 /**
353 * Handle the change of state of the backend to Closing. We must delete our
354 * device-layer structures now, to ensure that writes are flushed through to
355 * the backend. Once is this done, we can switch to Closed in
356 * acknowledgement.
357 */
358 static void blkfront_closing(struct xenbus_device *dev)
359 {
360 struct blkfront_info *info = dev->data;
362 DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
364 if (info->mi) {
365 DPRINTK("Calling xlvbd_del\n");
366 xlvbd_del(info);
367 info->mi = NULL;
368 }
370 xenbus_switch_state(dev, NULL, XenbusStateClosed);
371 }
374 static int blkfront_remove(struct xenbus_device *dev)
375 {
376 struct blkfront_info *info = dev->data;
378 DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
380 blkif_free(info, 0);
382 kfree(info);
384 return 0;
385 }
388 static inline int GET_ID_FROM_FREELIST(
389 struct blkfront_info *info)
390 {
391 unsigned long free = info->shadow_free;
392 BUG_ON(free > BLK_RING_SIZE);
393 info->shadow_free = info->shadow[free].req.id;
394 info->shadow[free].req.id = 0x0fffffee; /* debug */
395 return free;
396 }
398 static inline void ADD_ID_TO_FREELIST(
399 struct blkfront_info *info, unsigned long id)
400 {
401 info->shadow[id].req.id = info->shadow_free;
402 info->shadow[id].request = 0;
403 info->shadow_free = id;
404 }
406 static inline void flush_requests(struct blkfront_info *info)
407 {
408 int notify;
410 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
412 if (notify)
413 notify_remote_via_irq(info->irq);
414 }
416 static void kick_pending_request_queues(struct blkfront_info *info)
417 {
418 if (!RING_FULL(&info->ring)) {
419 /* Re-enable calldowns. */
420 blk_start_queue(info->rq);
421 /* Kick things off immediately. */
422 do_blkif_request(info->rq);
423 }
424 }
426 static void blkif_restart_queue(void *arg)
427 {
428 struct blkfront_info *info = (struct blkfront_info *)arg;
429 spin_lock_irq(&blkif_io_lock);
430 kick_pending_request_queues(info);
431 spin_unlock_irq(&blkif_io_lock);
432 }
434 static void blkif_restart_queue_callback(void *arg)
435 {
436 struct blkfront_info *info = (struct blkfront_info *)arg;
437 schedule_work(&info->work);
438 }
440 int blkif_open(struct inode *inode, struct file *filep)
441 {
442 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
443 info->users++;
444 return 0;
445 }
448 int blkif_release(struct inode *inode, struct file *filep)
449 {
450 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
451 info->users--;
452 if (info->users == 0) {
453 /* Check whether we have been instructed to close. We will
454 have ignored this request initially, as the device was
455 still mounted. */
456 struct xenbus_device * dev = info->xbdev;
457 XenbusState state = xenbus_read_driver_state(dev->otherend);
459 if (state == XenbusStateClosing)
460 blkfront_closing(dev);
461 }
462 return 0;
463 }
466 int blkif_ioctl(struct inode *inode, struct file *filep,
467 unsigned command, unsigned long argument)
468 {
469 int i;
471 DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
472 command, (long)argument, inode->i_rdev);
474 switch ( command )
475 {
476 case HDIO_GETGEO:
477 /* return ENOSYS to use defaults */
478 return -ENOSYS;
480 case CDROMMULTISESSION:
481 DPRINTK("FIXME: support multisession CDs later\n");
482 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
483 if (put_user(0, (char __user *)(argument + i)))
484 return -EFAULT;
485 return 0;
487 default:
488 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
489 command);*/
490 return -EINVAL; /* same return as native Linux */
491 }
493 return 0;
494 }
497 /*
498 * blkif_queue_request
499 *
500 * request block io
501 *
502 * id: for guest use only.
503 * operation: BLKIF_OP_{READ,WRITE,PROBE}
504 * buffer: buffer to read/write into. this should be a
505 * virtual address in the guest os.
506 */
507 static int blkif_queue_request(struct request *req)
508 {
509 struct blkfront_info *info = req->rq_disk->private_data;
510 unsigned long buffer_mfn;
511 blkif_request_t *ring_req;
512 struct bio *bio;
513 struct bio_vec *bvec;
514 int idx;
515 unsigned long id;
516 unsigned int fsect, lsect;
517 int ref;
518 grant_ref_t gref_head;
520 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
521 return 1;
523 if (gnttab_alloc_grant_references(
524 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
525 gnttab_request_free_callback(
526 &info->callback,
527 blkif_restart_queue_callback,
528 info,
529 BLKIF_MAX_SEGMENTS_PER_REQUEST);
530 return 1;
531 }
533 /* Fill out a communications ring structure. */
534 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
535 id = GET_ID_FROM_FREELIST(info);
536 info->shadow[id].request = (unsigned long)req;
538 ring_req->id = id;
539 ring_req->operation = rq_data_dir(req) ?
540 BLKIF_OP_WRITE : BLKIF_OP_READ;
541 ring_req->sector_number = (blkif_sector_t)req->sector;
542 ring_req->handle = info->handle;
544 ring_req->nr_segments = 0;
545 rq_for_each_bio (bio, req) {
546 bio_for_each_segment (bvec, bio, idx) {
547 BUG_ON(ring_req->nr_segments
548 == BLKIF_MAX_SEGMENTS_PER_REQUEST);
549 buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
550 fsect = bvec->bv_offset >> 9;
551 lsect = fsect + (bvec->bv_len >> 9) - 1;
552 /* install a grant reference. */
553 ref = gnttab_claim_grant_reference(&gref_head);
554 ASSERT(ref != -ENOSPC);
556 gnttab_grant_foreign_access_ref(
557 ref,
558 info->xbdev->otherend_id,
559 buffer_mfn,
560 rq_data_dir(req) );
562 info->shadow[id].frame[ring_req->nr_segments] =
563 mfn_to_pfn(buffer_mfn);
565 ring_req->seg[ring_req->nr_segments] =
566 (struct blkif_request_segment) {
567 .gref = ref,
568 .first_sect = fsect,
569 .last_sect = lsect };
571 ring_req->nr_segments++;
572 }
573 }
575 info->ring.req_prod_pvt++;
577 /* Keep a private copy so we can reissue requests when recovering. */
578 info->shadow[id].req = *ring_req;
580 gnttab_free_grant_references(gref_head);
582 return 0;
583 }
585 /*
586 * do_blkif_request
587 * read a block; request is in a request queue
588 */
589 void do_blkif_request(request_queue_t *rq)
590 {
591 struct blkfront_info *info = NULL;
592 struct request *req;
593 int queued;
595 DPRINTK("Entered do_blkif_request\n");
597 queued = 0;
599 while ((req = elv_next_request(rq)) != NULL) {
600 info = req->rq_disk->private_data;
601 if (!blk_fs_request(req)) {
602 end_request(req, 0);
603 continue;
604 }
606 if (RING_FULL(&info->ring))
607 goto wait;
609 DPRINTK("do_blk_req %p: cmd %p, sec %lx, "
610 "(%u/%li) buffer:%p [%s]\n",
611 req, req->cmd, req->sector, req->current_nr_sectors,
612 req->nr_sectors, req->buffer,
613 rq_data_dir(req) ? "write" : "read");
616 blkdev_dequeue_request(req);
617 if (blkif_queue_request(req)) {
618 blk_requeue_request(rq, req);
619 wait:
620 /* Avoid pointless unplugs. */
621 blk_stop_queue(rq);
622 break;
623 }
625 queued++;
626 }
628 if (queued != 0)
629 flush_requests(info);
630 }
633 static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
634 {
635 struct request *req;
636 blkif_response_t *bret;
637 RING_IDX i, rp;
638 unsigned long flags;
639 struct blkfront_info *info = (struct blkfront_info *)dev_id;
641 spin_lock_irqsave(&blkif_io_lock, flags);
643 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
644 spin_unlock_irqrestore(&blkif_io_lock, flags);
645 return IRQ_HANDLED;
646 }
648 again:
649 rp = info->ring.sring->rsp_prod;
650 rmb(); /* Ensure we see queued responses up to 'rp'. */
652 for (i = info->ring.rsp_cons; i != rp; i++) {
653 unsigned long id;
654 int ret;
656 bret = RING_GET_RESPONSE(&info->ring, i);
657 id = bret->id;
658 req = (struct request *)info->shadow[id].request;
660 blkif_completion(&info->shadow[id]);
662 ADD_ID_TO_FREELIST(info, id);
664 switch (bret->operation) {
665 case BLKIF_OP_READ:
666 case BLKIF_OP_WRITE:
667 if (unlikely(bret->status != BLKIF_RSP_OKAY))
668 DPRINTK("Bad return from blkdev data "
669 "request: %x\n", bret->status);
671 ret = end_that_request_first(
672 req, (bret->status == BLKIF_RSP_OKAY),
673 req->hard_nr_sectors);
674 BUG_ON(ret);
675 end_that_request_last(req);
676 break;
677 default:
678 BUG();
679 }
680 }
682 info->ring.rsp_cons = i;
684 if (i != info->ring.req_prod_pvt) {
685 int more_to_do;
686 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
687 if (more_to_do)
688 goto again;
689 } else {
690 info->ring.sring->rsp_event = i + 1;
691 }
693 kick_pending_request_queues(info);
695 spin_unlock_irqrestore(&blkif_io_lock, flags);
697 return IRQ_HANDLED;
698 }
700 static void blkif_free(struct blkfront_info *info, int suspend)
701 {
702 /* Prevent new requests being issued until we fix things up. */
703 spin_lock_irq(&blkif_io_lock);
704 info->connected = suspend ?
705 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
706 spin_unlock_irq(&blkif_io_lock);
708 /* Free resources associated with old device channel. */
709 if (info->ring_ref != GRANT_INVALID_REF) {
710 gnttab_end_foreign_access(info->ring_ref, 0,
711 (unsigned long)info->ring.sring);
712 info->ring_ref = GRANT_INVALID_REF;
713 info->ring.sring = NULL;
714 }
715 if (info->irq)
716 unbind_from_irqhandler(info->irq, info);
717 info->evtchn = info->irq = 0;
719 }
721 static void blkif_completion(struct blk_shadow *s)
722 {
723 int i;
724 for (i = 0; i < s->req.nr_segments; i++)
725 gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
726 }
728 static void blkif_recover(struct blkfront_info *info)
729 {
730 int i;
731 blkif_request_t *req;
732 struct blk_shadow *copy;
733 int j;
735 /* Stage 1: Make a safe copy of the shadow state. */
736 copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
737 BUG_ON(copy == NULL);
738 memcpy(copy, info->shadow, sizeof(info->shadow));
740 /* Stage 2: Set up free list. */
741 memset(&info->shadow, 0, sizeof(info->shadow));
742 for (i = 0; i < BLK_RING_SIZE; i++)
743 info->shadow[i].req.id = i+1;
744 info->shadow_free = info->ring.req_prod_pvt;
745 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
747 /* Stage 3: Find pending requests and requeue them. */
748 for (i = 0; i < BLK_RING_SIZE; i++) {
749 /* Not in use? */
750 if (copy[i].request == 0)
751 continue;
753 /* Grab a request slot and copy shadow state into it. */
754 req = RING_GET_REQUEST(
755 &info->ring, info->ring.req_prod_pvt);
756 *req = copy[i].req;
758 /* We get a new request id, and must reset the shadow state. */
759 req->id = GET_ID_FROM_FREELIST(info);
760 memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
762 /* Rewrite any grant references invalidated by susp/resume. */
763 for (j = 0; j < req->nr_segments; j++)
764 gnttab_grant_foreign_access_ref(
765 req->seg[j].gref,
766 info->xbdev->otherend_id,
767 pfn_to_mfn(info->shadow[req->id].frame[j]),
768 rq_data_dir(
769 (struct request *)
770 info->shadow[req->id].request));
771 info->shadow[req->id].req = *req;
773 info->ring.req_prod_pvt++;
774 }
776 kfree(copy);
778 (void)xenbus_switch_state(info->xbdev, NULL, XenbusStateConnected);
780 /* Now safe for us to use the shared ring */
781 spin_lock_irq(&blkif_io_lock);
782 info->connected = BLKIF_STATE_CONNECTED;
783 spin_unlock_irq(&blkif_io_lock);
785 /* Send off requeued requests */
786 flush_requests(info);
788 /* Kick any other new requests queued since we resumed */
789 spin_lock_irq(&blkif_io_lock);
790 kick_pending_request_queues(info);
791 spin_unlock_irq(&blkif_io_lock);
792 }
795 /* ** Driver Registration ** */
798 static struct xenbus_device_id blkfront_ids[] = {
799 { "vbd" },
800 { "" }
801 };
804 static struct xenbus_driver blkfront = {
805 .name = "vbd",
806 .owner = THIS_MODULE,
807 .ids = blkfront_ids,
808 .probe = blkfront_probe,
809 .remove = blkfront_remove,
810 .resume = blkfront_resume,
811 .otherend_changed = backend_changed,
812 };
815 static int __init xlblk_init(void)
816 {
817 if (xen_init() < 0)
818 return -ENODEV;
820 return xenbus_register_frontend(&blkfront);
821 }
822 module_init(xlblk_init);
825 static void xlblk_exit(void)
826 {
827 return xenbus_unregister_driver(&blkfront);
828 }
829 module_exit(xlblk_exit);
831 MODULE_LICENSE("Dual BSD/GPL");
834 /*
835 * Local variables:
836 * c-file-style: "linux"
837 * indent-tabs-mode: t
838 * c-indent-level: 8
839 * c-basic-offset: 8
840 * tab-width: 8
841 * End:
842 */