ia64/linux-2.6.18-xen.hg

view drivers/xen/blkfront/blkfront.c @ 470:5baef0e18e36

xenbus: prevent warnings on unhandled enumeration values

XenbusStateReconfiguring/XenbusStateReconfigured were introduced by
c/s 437, but aren't handled in many switch statements. This c/s also
introduced a possibly un-referenced label, which also gets eliminated
here.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Mar 05 17:28:41 2008 +0000 (2008-03-05)
parents e8b49cfbdac0
children ba72914de93a
line source
1 /******************************************************************************
2 * blkfront.c
3 *
4 * XenLinux virtual block-device driver.
5 *
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
38 #include <linux/version.h>
39 #include "block.h"
40 #include <linux/cdrom.h>
41 #include <linux/sched.h>
42 #include <linux/interrupt.h>
43 #include <scsi/scsi.h>
44 #include <xen/evtchn.h>
45 #include <xen/xenbus.h>
46 #include <xen/interface/grant_table.h>
47 #include <xen/interface/io/protocols.h>
48 #include <xen/gnttab.h>
49 #include <asm/hypervisor.h>
50 #include <asm/maddr.h>
52 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
53 #include <xen/platform-compat.h>
54 #endif
56 #define BLKIF_STATE_DISCONNECTED 0
57 #define BLKIF_STATE_CONNECTED 1
58 #define BLKIF_STATE_SUSPENDED 2
60 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
61 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
62 #define GRANT_INVALID_REF 0
64 static void connect(struct blkfront_info *);
65 static void blkfront_closing(struct xenbus_device *);
66 static int blkfront_remove(struct xenbus_device *);
67 static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
68 static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
70 static void kick_pending_request_queues(struct blkfront_info *);
72 static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
73 static void blkif_restart_queue(void *arg);
74 static void blkif_recover(struct blkfront_info *);
75 static void blkif_completion(struct blk_shadow *);
76 static void blkif_free(struct blkfront_info *, int);
79 /**
80 * Entry point to this code when a new device is created. Allocate the basic
81 * structures and the ring buffer for communication with the backend, and
82 * inform the backend of the appropriate details for those. Switch to
83 * Initialised state.
84 */
85 static int blkfront_probe(struct xenbus_device *dev,
86 const struct xenbus_device_id *id)
87 {
88 int err, vdevice, i;
89 struct blkfront_info *info;
91 /* FIXME: Use dynamic device id if this is not set. */
92 err = xenbus_scanf(XBT_NIL, dev->nodename,
93 "virtual-device", "%i", &vdevice);
94 if (err != 1) {
95 xenbus_dev_fatal(dev, err, "reading virtual-device");
96 return err;
97 }
99 info = kzalloc(sizeof(*info), GFP_KERNEL);
100 if (!info) {
101 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
102 return -ENOMEM;
103 }
105 info->xbdev = dev;
106 info->vdevice = vdevice;
107 info->connected = BLKIF_STATE_DISCONNECTED;
108 INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
110 for (i = 0; i < BLK_RING_SIZE; i++)
111 info->shadow[i].req.id = i+1;
112 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
114 /* Front end dir is a number, which is used as the id. */
115 info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
116 dev->dev.driver_data = info;
118 err = talk_to_backend(dev, info);
119 if (err) {
120 kfree(info);
121 dev->dev.driver_data = NULL;
122 return err;
123 }
125 return 0;
126 }
129 /**
130 * We are reconnecting to the backend, due to a suspend/resume, or a backend
131 * driver restart. We tear down our blkif structure and recreate it, but
132 * leave the device-layer structures intact so that this is transparent to the
133 * rest of the kernel.
134 */
135 static int blkfront_resume(struct xenbus_device *dev)
136 {
137 struct blkfront_info *info = dev->dev.driver_data;
138 int err;
140 DPRINTK("blkfront_resume: %s\n", dev->nodename);
142 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
144 err = talk_to_backend(dev, info);
145 if (info->connected == BLKIF_STATE_SUSPENDED && !err)
146 blkif_recover(info);
148 return err;
149 }
152 /* Common code used when first setting up, and when resuming. */
153 static int talk_to_backend(struct xenbus_device *dev,
154 struct blkfront_info *info)
155 {
156 const char *message = NULL;
157 struct xenbus_transaction xbt;
158 int err;
160 /* Create shared ring, alloc event channel. */
161 err = setup_blkring(dev, info);
162 if (err)
163 goto out;
165 again:
166 err = xenbus_transaction_start(&xbt);
167 if (err) {
168 xenbus_dev_fatal(dev, err, "starting transaction");
169 goto destroy_blkring;
170 }
172 err = xenbus_printf(xbt, dev->nodename,
173 "ring-ref","%u", info->ring_ref);
174 if (err) {
175 message = "writing ring-ref";
176 goto abort_transaction;
177 }
178 err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
179 irq_to_evtchn_port(info->irq));
180 if (err) {
181 message = "writing event-channel";
182 goto abort_transaction;
183 }
184 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
185 XEN_IO_PROTO_ABI_NATIVE);
186 if (err) {
187 message = "writing protocol";
188 goto abort_transaction;
189 }
191 err = xenbus_transaction_end(xbt, 0);
192 if (err) {
193 if (err == -EAGAIN)
194 goto again;
195 xenbus_dev_fatal(dev, err, "completing transaction");
196 goto destroy_blkring;
197 }
199 xenbus_switch_state(dev, XenbusStateInitialised);
201 return 0;
203 abort_transaction:
204 xenbus_transaction_end(xbt, 1);
205 if (message)
206 xenbus_dev_fatal(dev, err, "%s", message);
207 destroy_blkring:
208 blkif_free(info, 0);
209 out:
210 return err;
211 }
214 static int setup_blkring(struct xenbus_device *dev,
215 struct blkfront_info *info)
216 {
217 blkif_sring_t *sring;
218 int err;
220 info->ring_ref = GRANT_INVALID_REF;
222 sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL|__GFP_HIGH);
223 if (!sring) {
224 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
225 return -ENOMEM;
226 }
227 SHARED_RING_INIT(sring);
228 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
230 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
231 if (err < 0) {
232 free_page((unsigned long)sring);
233 info->ring.sring = NULL;
234 goto fail;
235 }
236 info->ring_ref = err;
238 err = bind_listening_port_to_irqhandler(
239 dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
240 if (err <= 0) {
241 xenbus_dev_fatal(dev, err,
242 "bind_listening_port_to_irqhandler");
243 goto fail;
244 }
245 info->irq = err;
247 return 0;
248 fail:
249 blkif_free(info, 0);
250 return err;
251 }
254 /**
255 * Callback received when the backend's state changes.
256 */
257 static void backend_changed(struct xenbus_device *dev,
258 enum xenbus_state backend_state)
259 {
260 struct blkfront_info *info = dev->dev.driver_data;
261 struct block_device *bd;
263 DPRINTK("blkfront:backend_changed.\n");
265 switch (backend_state) {
266 case XenbusStateInitialising:
267 case XenbusStateInitWait:
268 case XenbusStateInitialised:
269 case XenbusStateReconfiguring:
270 case XenbusStateReconfigured:
271 case XenbusStateUnknown:
272 case XenbusStateClosed:
273 break;
275 case XenbusStateConnected:
276 connect(info);
277 break;
279 case XenbusStateClosing:
280 bd = bdget(info->dev);
281 if (bd == NULL)
282 xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
284 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
285 down(&bd->bd_sem);
286 #else
287 mutex_lock(&bd->bd_mutex);
288 #endif
289 if (info->users > 0)
290 xenbus_dev_error(dev, -EBUSY,
291 "Device in use; refusing to close");
292 else
293 blkfront_closing(dev);
294 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
295 up(&bd->bd_sem);
296 #else
297 mutex_unlock(&bd->bd_mutex);
298 #endif
299 bdput(bd);
300 break;
301 }
302 }
305 /* ** Connection ** */
308 /*
309 * Invoked when the backend is finally 'ready' (and has told produced
310 * the details about the physical device - #sectors, size, etc).
311 */
312 static void connect(struct blkfront_info *info)
313 {
314 unsigned long long sectors;
315 unsigned long sector_size;
316 unsigned int binfo;
317 int err;
319 if ((info->connected == BLKIF_STATE_CONNECTED) ||
320 (info->connected == BLKIF_STATE_SUSPENDED) )
321 return;
323 DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
325 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
326 "sectors", "%Lu", &sectors,
327 "info", "%u", &binfo,
328 "sector-size", "%lu", &sector_size,
329 NULL);
330 if (err) {
331 xenbus_dev_fatal(info->xbdev, err,
332 "reading backend fields at %s",
333 info->xbdev->otherend);
334 return;
335 }
337 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
338 "feature-barrier", "%lu", &info->feature_barrier,
339 NULL);
340 if (err)
341 info->feature_barrier = 0;
343 err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
344 if (err) {
345 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
346 info->xbdev->otherend);
347 return;
348 }
350 (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
352 /* Kick pending requests. */
353 spin_lock_irq(&blkif_io_lock);
354 info->connected = BLKIF_STATE_CONNECTED;
355 kick_pending_request_queues(info);
356 spin_unlock_irq(&blkif_io_lock);
358 add_disk(info->gd);
360 info->is_ready = 1;
361 }
363 /**
364 * Handle the change of state of the backend to Closing. We must delete our
365 * device-layer structures now, to ensure that writes are flushed through to
366 * the backend. Once is this done, we can switch to Closed in
367 * acknowledgement.
368 */
369 static void blkfront_closing(struct xenbus_device *dev)
370 {
371 struct blkfront_info *info = dev->dev.driver_data;
372 unsigned long flags;
374 DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
376 if (info->rq == NULL)
377 goto out;
379 spin_lock_irqsave(&blkif_io_lock, flags);
380 /* No more blkif_request(). */
381 blk_stop_queue(info->rq);
382 /* No more gnttab callback work. */
383 gnttab_cancel_free_callback(&info->callback);
384 spin_unlock_irqrestore(&blkif_io_lock, flags);
386 /* Flush gnttab callback work. Must be done with no locks held. */
387 flush_scheduled_work();
389 xlvbd_del(info);
391 out:
392 xenbus_frontend_closed(dev);
393 }
396 static int blkfront_remove(struct xenbus_device *dev)
397 {
398 struct blkfront_info *info = dev->dev.driver_data;
400 DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
402 blkif_free(info, 0);
404 kfree(info);
406 return 0;
407 }
410 static inline int GET_ID_FROM_FREELIST(
411 struct blkfront_info *info)
412 {
413 unsigned long free = info->shadow_free;
414 BUG_ON(free > BLK_RING_SIZE);
415 info->shadow_free = info->shadow[free].req.id;
416 info->shadow[free].req.id = 0x0fffffee; /* debug */
417 return free;
418 }
420 static inline void ADD_ID_TO_FREELIST(
421 struct blkfront_info *info, unsigned long id)
422 {
423 info->shadow[id].req.id = info->shadow_free;
424 info->shadow[id].request = 0;
425 info->shadow_free = id;
426 }
428 static inline void flush_requests(struct blkfront_info *info)
429 {
430 int notify;
432 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
434 if (notify)
435 notify_remote_via_irq(info->irq);
436 }
438 static void kick_pending_request_queues(struct blkfront_info *info)
439 {
440 if (!RING_FULL(&info->ring)) {
441 /* Re-enable calldowns. */
442 blk_start_queue(info->rq);
443 /* Kick things off immediately. */
444 do_blkif_request(info->rq);
445 }
446 }
448 static void blkif_restart_queue(void *arg)
449 {
450 struct blkfront_info *info = (struct blkfront_info *)arg;
451 spin_lock_irq(&blkif_io_lock);
452 if (info->connected == BLKIF_STATE_CONNECTED)
453 kick_pending_request_queues(info);
454 spin_unlock_irq(&blkif_io_lock);
455 }
457 static void blkif_restart_queue_callback(void *arg)
458 {
459 struct blkfront_info *info = (struct blkfront_info *)arg;
460 schedule_work(&info->work);
461 }
463 int blkif_open(struct inode *inode, struct file *filep)
464 {
465 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
466 info->users++;
467 return 0;
468 }
471 int blkif_release(struct inode *inode, struct file *filep)
472 {
473 struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
474 info->users--;
475 if (info->users == 0) {
476 /* Check whether we have been instructed to close. We will
477 have ignored this request initially, as the device was
478 still mounted. */
479 struct xenbus_device * dev = info->xbdev;
480 enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
482 if (state == XenbusStateClosing && info->is_ready)
483 blkfront_closing(dev);
484 }
485 return 0;
486 }
489 int blkif_ioctl(struct inode *inode, struct file *filep,
490 unsigned command, unsigned long argument)
491 {
492 int i;
494 DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
495 command, (long)argument, inode->i_rdev);
497 switch (command) {
498 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
499 case HDIO_GETGEO: {
500 struct block_device *bd = inode->i_bdev;
501 struct hd_geometry geo;
502 int ret;
504 if (!argument)
505 return -EINVAL;
507 geo.start = get_start_sect(bd);
508 ret = blkif_getgeo(bd, &geo);
509 if (ret)
510 return ret;
512 if (copy_to_user((struct hd_geometry __user *)argument, &geo,
513 sizeof(geo)))
514 return -EFAULT;
516 return 0;
517 }
518 #endif
519 case CDROMMULTISESSION:
520 DPRINTK("FIXME: support multisession CDs later\n");
521 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
522 if (put_user(0, (char __user *)(argument + i)))
523 return -EFAULT;
524 return 0;
526 case CDROM_GET_CAPABILITY: {
527 struct blkfront_info *info =
528 inode->i_bdev->bd_disk->private_data;
529 struct gendisk *gd = info->gd;
530 if (gd->flags & GENHD_FL_CD)
531 return 0;
532 return -EINVAL;
533 }
534 default:
535 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
536 command);*/
537 return -EINVAL; /* same return as native Linux */
538 }
540 return 0;
541 }
544 int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
545 {
546 /* We don't have real geometry info, but let's at least return
547 values consistent with the size of the device */
548 sector_t nsect = get_capacity(bd->bd_disk);
549 sector_t cylinders = nsect;
551 hg->heads = 0xff;
552 hg->sectors = 0x3f;
553 sector_div(cylinders, hg->heads * hg->sectors);
554 hg->cylinders = cylinders;
555 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
556 hg->cylinders = 0xffff;
557 return 0;
558 }
561 /*
562 * blkif_queue_request
563 *
564 * request block io
565 *
566 * id: for guest use only.
567 * operation: BLKIF_OP_{READ,WRITE,PROBE}
568 * buffer: buffer to read/write into. this should be a
569 * virtual address in the guest os.
570 */
571 static int blkif_queue_request(struct request *req)
572 {
573 struct blkfront_info *info = req->rq_disk->private_data;
574 unsigned long buffer_mfn;
575 blkif_request_t *ring_req;
576 struct bio *bio;
577 struct bio_vec *bvec;
578 int idx;
579 unsigned long id;
580 unsigned int fsect, lsect;
581 int ref;
582 grant_ref_t gref_head;
584 if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
585 return 1;
587 if (gnttab_alloc_grant_references(
588 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
589 gnttab_request_free_callback(
590 &info->callback,
591 blkif_restart_queue_callback,
592 info,
593 BLKIF_MAX_SEGMENTS_PER_REQUEST);
594 return 1;
595 }
597 /* Fill out a communications ring structure. */
598 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
599 id = GET_ID_FROM_FREELIST(info);
600 info->shadow[id].request = (unsigned long)req;
602 ring_req->id = id;
603 ring_req->sector_number = (blkif_sector_t)req->sector;
604 ring_req->handle = info->handle;
606 ring_req->operation = rq_data_dir(req) ?
607 BLKIF_OP_WRITE : BLKIF_OP_READ;
608 if (blk_barrier_rq(req))
609 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
611 ring_req->nr_segments = 0;
612 rq_for_each_bio (bio, req) {
613 bio_for_each_segment (bvec, bio, idx) {
614 BUG_ON(ring_req->nr_segments
615 == BLKIF_MAX_SEGMENTS_PER_REQUEST);
616 buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
617 fsect = bvec->bv_offset >> 9;
618 lsect = fsect + (bvec->bv_len >> 9) - 1;
619 /* install a grant reference. */
620 ref = gnttab_claim_grant_reference(&gref_head);
621 BUG_ON(ref == -ENOSPC);
623 gnttab_grant_foreign_access_ref(
624 ref,
625 info->xbdev->otherend_id,
626 buffer_mfn,
627 rq_data_dir(req) ? GTF_readonly : 0 );
629 info->shadow[id].frame[ring_req->nr_segments] =
630 mfn_to_pfn(buffer_mfn);
632 ring_req->seg[ring_req->nr_segments] =
633 (struct blkif_request_segment) {
634 .gref = ref,
635 .first_sect = fsect,
636 .last_sect = lsect };
638 ring_req->nr_segments++;
639 }
640 }
642 info->ring.req_prod_pvt++;
644 /* Keep a private copy so we can reissue requests when recovering. */
645 info->shadow[id].req = *ring_req;
647 gnttab_free_grant_references(gref_head);
649 return 0;
650 }
652 /*
653 * do_blkif_request
654 * read a block; request is in a request queue
655 */
656 void do_blkif_request(request_queue_t *rq)
657 {
658 struct blkfront_info *info = NULL;
659 struct request *req;
660 int queued;
662 DPRINTK("Entered do_blkif_request\n");
664 queued = 0;
666 while ((req = elv_next_request(rq)) != NULL) {
667 info = req->rq_disk->private_data;
668 if (!blk_fs_request(req)) {
669 end_request(req, 0);
670 continue;
671 }
673 if (RING_FULL(&info->ring))
674 goto wait;
676 DPRINTK("do_blk_req %p: cmd %p, sec %llx, "
677 "(%u/%li) buffer:%p [%s]\n",
678 req, req->cmd, (long long)req->sector,
679 req->current_nr_sectors,
680 req->nr_sectors, req->buffer,
681 rq_data_dir(req) ? "write" : "read");
684 blkdev_dequeue_request(req);
685 if (blkif_queue_request(req)) {
686 blk_requeue_request(rq, req);
687 wait:
688 /* Avoid pointless unplugs. */
689 blk_stop_queue(rq);
690 break;
691 }
693 queued++;
694 }
696 if (queued != 0)
697 flush_requests(info);
698 }
701 static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
702 {
703 struct request *req;
704 blkif_response_t *bret;
705 RING_IDX i, rp;
706 unsigned long flags;
707 struct blkfront_info *info = (struct blkfront_info *)dev_id;
708 int uptodate;
710 spin_lock_irqsave(&blkif_io_lock, flags);
712 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
713 spin_unlock_irqrestore(&blkif_io_lock, flags);
714 return IRQ_HANDLED;
715 }
717 again:
718 rp = info->ring.sring->rsp_prod;
719 rmb(); /* Ensure we see queued responses up to 'rp'. */
721 for (i = info->ring.rsp_cons; i != rp; i++) {
722 unsigned long id;
723 int ret;
725 bret = RING_GET_RESPONSE(&info->ring, i);
726 id = bret->id;
727 req = (struct request *)info->shadow[id].request;
729 blkif_completion(&info->shadow[id]);
731 ADD_ID_TO_FREELIST(info, id);
733 uptodate = (bret->status == BLKIF_RSP_OKAY);
734 switch (bret->operation) {
735 case BLKIF_OP_WRITE_BARRIER:
736 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
737 printk("blkfront: %s: write barrier op failed\n",
738 info->gd->disk_name);
739 uptodate = -EOPNOTSUPP;
740 info->feature_barrier = 0;
741 xlvbd_barrier(info);
742 }
743 /* fall through */
744 case BLKIF_OP_READ:
745 case BLKIF_OP_WRITE:
746 if (unlikely(bret->status != BLKIF_RSP_OKAY))
747 DPRINTK("Bad return from blkdev data "
748 "request: %x\n", bret->status);
750 ret = end_that_request_first(req, uptodate,
751 req->hard_nr_sectors);
752 BUG_ON(ret);
753 end_that_request_last(req, uptodate);
754 break;
755 default:
756 BUG();
757 }
758 }
760 info->ring.rsp_cons = i;
762 if (i != info->ring.req_prod_pvt) {
763 int more_to_do;
764 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
765 if (more_to_do)
766 goto again;
767 } else
768 info->ring.sring->rsp_event = i + 1;
770 kick_pending_request_queues(info);
772 spin_unlock_irqrestore(&blkif_io_lock, flags);
774 return IRQ_HANDLED;
775 }
777 static void blkif_free(struct blkfront_info *info, int suspend)
778 {
779 /* Prevent new requests being issued until we fix things up. */
780 spin_lock_irq(&blkif_io_lock);
781 info->connected = suspend ?
782 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
783 /* No more blkif_request(). */
784 if (info->rq)
785 blk_stop_queue(info->rq);
786 /* No more gnttab callback work. */
787 gnttab_cancel_free_callback(&info->callback);
788 spin_unlock_irq(&blkif_io_lock);
790 /* Flush gnttab callback work. Must be done with no locks held. */
791 flush_scheduled_work();
793 /* Free resources associated with old device channel. */
794 if (info->ring_ref != GRANT_INVALID_REF) {
795 gnttab_end_foreign_access(info->ring_ref,
796 (unsigned long)info->ring.sring);
797 info->ring_ref = GRANT_INVALID_REF;
798 info->ring.sring = NULL;
799 }
800 if (info->irq)
801 unbind_from_irqhandler(info->irq, info);
802 info->irq = 0;
803 }
805 static void blkif_completion(struct blk_shadow *s)
806 {
807 int i;
808 for (i = 0; i < s->req.nr_segments; i++)
809 gnttab_end_foreign_access(s->req.seg[i].gref, 0UL);
810 }
812 static void blkif_recover(struct blkfront_info *info)
813 {
814 int i;
815 blkif_request_t *req;
816 struct blk_shadow *copy;
817 int j;
819 /* Stage 1: Make a safe copy of the shadow state. */
820 copy = kmalloc(sizeof(info->shadow), GFP_KERNEL | __GFP_NOFAIL);
821 memcpy(copy, info->shadow, sizeof(info->shadow));
823 /* Stage 2: Set up free list. */
824 memset(&info->shadow, 0, sizeof(info->shadow));
825 for (i = 0; i < BLK_RING_SIZE; i++)
826 info->shadow[i].req.id = i+1;
827 info->shadow_free = info->ring.req_prod_pvt;
828 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
830 /* Stage 3: Find pending requests and requeue them. */
831 for (i = 0; i < BLK_RING_SIZE; i++) {
832 /* Not in use? */
833 if (copy[i].request == 0)
834 continue;
836 /* Grab a request slot and copy shadow state into it. */
837 req = RING_GET_REQUEST(
838 &info->ring, info->ring.req_prod_pvt);
839 *req = copy[i].req;
841 /* We get a new request id, and must reset the shadow state. */
842 req->id = GET_ID_FROM_FREELIST(info);
843 memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
845 /* Rewrite any grant references invalidated by susp/resume. */
846 for (j = 0; j < req->nr_segments; j++)
847 gnttab_grant_foreign_access_ref(
848 req->seg[j].gref,
849 info->xbdev->otherend_id,
850 pfn_to_mfn(info->shadow[req->id].frame[j]),
851 rq_data_dir((struct request *)
852 info->shadow[req->id].request) ?
853 GTF_readonly : 0);
854 info->shadow[req->id].req = *req;
856 info->ring.req_prod_pvt++;
857 }
859 kfree(copy);
861 (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
863 spin_lock_irq(&blkif_io_lock);
865 /* Now safe for us to use the shared ring */
866 info->connected = BLKIF_STATE_CONNECTED;
868 /* Send off requeued requests */
869 flush_requests(info);
871 /* Kick any other new requests queued since we resumed */
872 kick_pending_request_queues(info);
874 spin_unlock_irq(&blkif_io_lock);
875 }
877 int blkfront_is_ready(struct xenbus_device *dev)
878 {
879 struct blkfront_info *info = dev->dev.driver_data;
881 return info->is_ready;
882 }
885 /* ** Driver Registration ** */
888 static struct xenbus_device_id blkfront_ids[] = {
889 { "vbd" },
890 { "" }
891 };
892 MODULE_ALIAS("xen:vbd");
894 static struct xenbus_driver blkfront = {
895 .name = "vbd",
896 .owner = THIS_MODULE,
897 .ids = blkfront_ids,
898 .probe = blkfront_probe,
899 .remove = blkfront_remove,
900 .resume = blkfront_resume,
901 .otherend_changed = backend_changed,
902 .is_ready = blkfront_is_ready,
903 };
906 static int __init xlblk_init(void)
907 {
908 if (!is_running_on_xen())
909 return -ENODEV;
911 return xenbus_register_frontend(&blkfront);
912 }
913 module_init(xlblk_init);
916 static void __exit xlblk_exit(void)
917 {
918 return xenbus_unregister_driver(&blkfront);
919 }
920 module_exit(xlblk_exit);
922 MODULE_LICENSE("Dual BSD/GPL");