ia64/linux-2.6.18-xen.hg

view drivers/xen/fbfront/xenfb.c @ 470:5baef0e18e36

xenbus: prevent warnings on unhandled enumeration values

XenbusStateReconfiguring/XenbusStateReconfigured were introduced by
c/s 437, but aren't handled in many switch statements. This c/s also
introduced a possibly un-referenced label, which also gets eliminated
here.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Mar 05 17:28:41 2008 +0000 (2008-03-05)
parents ca05cf1a9bdc
children ba72914de93a
line source
1 /*
2 * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
3 *
4 * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6 *
7 * Based on linux/drivers/video/q40fb.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
14 /*
15 * TODO:
16 *
17 * Switch to grant tables when they become capable of dealing with the
18 * frame buffer.
19 */
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/fb.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/mm.h>
27 #include <linux/mutex.h>
28 #include <asm/hypervisor.h>
29 #include <xen/evtchn.h>
30 #include <xen/interface/io/fbif.h>
31 #include <xen/interface/io/protocols.h>
32 #include <xen/xenbus.h>
33 #include <linux/kthread.h>
35 struct xenfb_mapping
36 {
37 struct list_head link;
38 struct vm_area_struct *vma;
39 atomic_t map_refs;
40 int faults;
41 struct xenfb_info *info;
42 };
44 struct xenfb_info
45 {
46 struct task_struct *kthread;
47 wait_queue_head_t wq;
49 unsigned char *fb;
50 struct fb_info *fb_info;
51 struct timer_list refresh;
52 int dirty;
53 int x1, y1, x2, y2; /* dirty rectangle,
54 protected by dirty_lock */
55 spinlock_t dirty_lock;
56 struct mutex mm_lock;
57 int nr_pages;
58 struct page **pages;
59 struct list_head mappings; /* protected by mm_lock */
61 int irq;
62 struct xenfb_page *page;
63 unsigned long *mfns;
64 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
66 struct xenbus_device *xbdev;
67 };
69 /*
70 * How the locks work together
71 *
72 * There are two locks: spinlock dirty_lock protecting the dirty
73 * rectangle, and mutex mm_lock protecting mappings.
74 *
75 * The problem is that dirty rectangle and mappings aren't
76 * independent: the dirty rectangle must cover all faulted pages in
77 * mappings. We need to prove that our locking maintains this
78 * invariant.
79 *
80 * There are several kinds of critical regions:
81 *
82 * 1. Holding only dirty_lock: xenfb_refresh(). May run in
83 * interrupts. Extends the dirty rectangle. Trivially preserves
84 * invariant.
85 *
86 * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
87 * only mappings. The former creates unfaulted pages. Preserves
88 * invariant. The latter removes pages. Preserves invariant.
89 *
90 * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
91 * rectangle and updates mappings consistently. Preserves
92 * invariant.
93 *
94 * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
95 * rectangle and update mappings consistently.
96 *
97 * We can't simply hold both locks, because zap_page_range() cannot
98 * be called with a spinlock held.
99 *
100 * Therefore, we first clear the dirty rectangle with both locks
101 * held. Then we unlock dirty_lock and update the mappings.
102 * Critical regions that hold only dirty_lock may interfere with
103 * that. This can only be region 1: xenfb_refresh(). But that
104 * just extends the dirty rectangle, which can't harm the
105 * invariant.
106 *
107 * But FIXME: the invariant is too weak. It misses that the fault
108 * record in mappings must be consistent with the mapping of pages in
109 * the associated address space! do_no_page() updates the PTE after
110 * xenfb_vm_nopage() returns, i.e. outside the critical region. This
111 * allows the following race:
112 *
113 * X writes to some address in the Xen frame buffer
114 * Fault - call do_no_page()
115 * call xenfb_vm_nopage()
116 * grab mm_lock
117 * map->faults++;
118 * release mm_lock
119 * return back to do_no_page()
120 * (preempted, or SMP)
121 * Xen worker thread runs.
122 * grab mm_lock
123 * look at mappings
124 * find this mapping, zaps its pages (but page not in pte yet)
125 * clear map->faults
126 * releases mm_lock
127 * (back to X process)
128 * put page in X's pte
129 *
130 * Oh well, we wont be updating the writes to this page anytime soon.
131 */
133 static int xenfb_fps = 20;
134 static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
136 static int xenfb_remove(struct xenbus_device *);
137 static void xenfb_init_shared_page(struct xenfb_info *);
138 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
139 static void xenfb_disconnect_backend(struct xenfb_info *);
141 static void xenfb_do_update(struct xenfb_info *info,
142 int x, int y, int w, int h)
143 {
144 union xenfb_out_event event;
145 __u32 prod;
147 event.type = XENFB_TYPE_UPDATE;
148 event.update.x = x;
149 event.update.y = y;
150 event.update.width = w;
151 event.update.height = h;
153 prod = info->page->out_prod;
154 /* caller ensures !xenfb_queue_full() */
155 mb(); /* ensure ring space available */
156 XENFB_OUT_RING_REF(info->page, prod) = event;
157 wmb(); /* ensure ring contents visible */
158 info->page->out_prod = prod + 1;
160 notify_remote_via_irq(info->irq);
161 }
163 static int xenfb_queue_full(struct xenfb_info *info)
164 {
165 __u32 cons, prod;
167 prod = info->page->out_prod;
168 cons = info->page->out_cons;
169 return prod - cons == XENFB_OUT_RING_LEN;
170 }
172 static void xenfb_update_screen(struct xenfb_info *info)
173 {
174 unsigned long flags;
175 int y1, y2, x1, x2;
176 struct xenfb_mapping *map;
178 if (!info->update_wanted)
179 return;
180 if (xenfb_queue_full(info))
181 return;
183 mutex_lock(&info->mm_lock);
185 spin_lock_irqsave(&info->dirty_lock, flags);
186 y1 = info->y1;
187 y2 = info->y2;
188 x1 = info->x1;
189 x2 = info->x2;
190 info->x1 = info->y1 = INT_MAX;
191 info->x2 = info->y2 = 0;
192 spin_unlock_irqrestore(&info->dirty_lock, flags);
194 list_for_each_entry(map, &info->mappings, link) {
195 if (!map->faults)
196 continue;
197 zap_page_range(map->vma, map->vma->vm_start,
198 map->vma->vm_end - map->vma->vm_start, NULL);
199 map->faults = 0;
200 }
202 mutex_unlock(&info->mm_lock);
204 if (x2 < x1 || y2 < y1) {
205 printk("xenfb_update_screen bogus rect %d %d %d %d\n",
206 x1, x2, y1, y2);
207 WARN_ON(1);
208 }
209 xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
210 }
212 static int xenfb_thread(void *data)
213 {
214 struct xenfb_info *info = data;
216 while (!kthread_should_stop()) {
217 if (info->dirty) {
218 info->dirty = 0;
219 xenfb_update_screen(info);
220 }
221 wait_event_interruptible(info->wq,
222 kthread_should_stop() || info->dirty);
223 try_to_freeze();
224 }
225 return 0;
226 }
228 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
229 unsigned blue, unsigned transp,
230 struct fb_info *info)
231 {
232 u32 v;
234 if (regno > info->cmap.len)
235 return 1;
237 red >>= (16 - info->var.red.length);
238 green >>= (16 - info->var.green.length);
239 blue >>= (16 - info->var.blue.length);
241 v = (red << info->var.red.offset) |
242 (green << info->var.green.offset) |
243 (blue << info->var.blue.offset);
245 /* FIXME is this sane? check against xxxfb_setcolreg()! */
246 switch (info->var.bits_per_pixel) {
247 case 16:
248 case 24:
249 case 32:
250 ((u32 *)info->pseudo_palette)[regno] = v;
251 break;
252 }
254 return 0;
255 }
257 static void xenfb_timer(unsigned long data)
258 {
259 struct xenfb_info *info = (struct xenfb_info *)data;
260 wake_up(&info->wq);
261 }
263 static void __xenfb_refresh(struct xenfb_info *info,
264 int x1, int y1, int w, int h)
265 {
266 int y2, x2;
268 y2 = y1 + h;
269 x2 = x1 + w;
271 if (info->y1 > y1)
272 info->y1 = y1;
273 if (info->y2 < y2)
274 info->y2 = y2;
275 if (info->x1 > x1)
276 info->x1 = x1;
277 if (info->x2 < x2)
278 info->x2 = x2;
279 info->dirty = 1;
281 if (timer_pending(&info->refresh))
282 return;
284 mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
285 }
287 static void xenfb_refresh(struct xenfb_info *info,
288 int x1, int y1, int w, int h)
289 {
290 unsigned long flags;
292 spin_lock_irqsave(&info->dirty_lock, flags);
293 __xenfb_refresh(info, x1, y1, w, h);
294 spin_unlock_irqrestore(&info->dirty_lock, flags);
295 }
297 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
298 {
299 struct xenfb_info *info = p->par;
301 cfb_fillrect(p, rect);
302 xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
303 }
305 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
306 {
307 struct xenfb_info *info = p->par;
309 cfb_imageblit(p, image);
310 xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
311 }
313 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
314 {
315 struct xenfb_info *info = p->par;
317 cfb_copyarea(p, area);
318 xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
319 }
321 static void xenfb_vm_open(struct vm_area_struct *vma)
322 {
323 struct xenfb_mapping *map = vma->vm_private_data;
324 atomic_inc(&map->map_refs);
325 }
327 static void xenfb_vm_close(struct vm_area_struct *vma)
328 {
329 struct xenfb_mapping *map = vma->vm_private_data;
330 struct xenfb_info *info = map->info;
332 mutex_lock(&info->mm_lock);
333 if (atomic_dec_and_test(&map->map_refs)) {
334 list_del(&map->link);
335 kfree(map);
336 }
337 mutex_unlock(&info->mm_lock);
338 }
340 static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
341 unsigned long vaddr, int *type)
342 {
343 struct xenfb_mapping *map = vma->vm_private_data;
344 struct xenfb_info *info = map->info;
345 int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
346 unsigned long flags;
347 struct page *page;
348 int y1, y2;
350 if (pgnr >= info->nr_pages)
351 return NOPAGE_SIGBUS;
353 mutex_lock(&info->mm_lock);
354 spin_lock_irqsave(&info->dirty_lock, flags);
355 page = info->pages[pgnr];
356 get_page(page);
357 map->faults++;
359 y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
360 y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
361 if (y2 > info->fb_info->var.yres)
362 y2 = info->fb_info->var.yres;
363 __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
364 spin_unlock_irqrestore(&info->dirty_lock, flags);
365 mutex_unlock(&info->mm_lock);
367 if (type)
368 *type = VM_FAULT_MINOR;
370 return page;
371 }
373 static struct vm_operations_struct xenfb_vm_ops = {
374 .open = xenfb_vm_open,
375 .close = xenfb_vm_close,
376 .nopage = xenfb_vm_nopage,
377 };
379 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
380 {
381 struct xenfb_info *info = fb_info->par;
382 struct xenfb_mapping *map;
383 int map_pages;
385 if (!(vma->vm_flags & VM_WRITE))
386 return -EINVAL;
387 if (!(vma->vm_flags & VM_SHARED))
388 return -EINVAL;
389 if (vma->vm_pgoff != 0)
390 return -EINVAL;
392 map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
393 if (map_pages > info->nr_pages)
394 return -EINVAL;
396 map = kzalloc(sizeof(*map), GFP_KERNEL);
397 if (map == NULL)
398 return -ENOMEM;
400 map->vma = vma;
401 map->faults = 0;
402 map->info = info;
403 atomic_set(&map->map_refs, 1);
405 mutex_lock(&info->mm_lock);
406 list_add(&map->link, &info->mappings);
407 mutex_unlock(&info->mm_lock);
409 vma->vm_ops = &xenfb_vm_ops;
410 vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
411 vma->vm_private_data = map;
413 return 0;
414 }
416 static struct fb_ops xenfb_fb_ops = {
417 .owner = THIS_MODULE,
418 .fb_setcolreg = xenfb_setcolreg,
419 .fb_fillrect = xenfb_fillrect,
420 .fb_copyarea = xenfb_copyarea,
421 .fb_imageblit = xenfb_imageblit,
422 .fb_mmap = xenfb_mmap,
423 };
425 static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
426 struct pt_regs *regs)
427 {
428 /*
429 * No in events recognized, simply ignore them all.
430 * If you need to recognize some, see xenbkd's input_handler()
431 * for how to do that.
432 */
433 struct xenfb_info *info = dev_id;
434 struct xenfb_page *page = info->page;
436 if (page->in_cons != page->in_prod) {
437 info->page->in_cons = info->page->in_prod;
438 notify_remote_via_irq(info->irq);
439 }
440 return IRQ_HANDLED;
441 }
443 static unsigned long vmalloc_to_mfn(void *address)
444 {
445 return pfn_to_mfn(vmalloc_to_pfn(address));
446 }
448 static int __devinit xenfb_probe(struct xenbus_device *dev,
449 const struct xenbus_device_id *id)
450 {
451 struct xenfb_info *info;
452 struct fb_info *fb_info;
453 int ret;
455 info = kzalloc(sizeof(*info), GFP_KERNEL);
456 if (info == NULL) {
457 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
458 return -ENOMEM;
459 }
460 dev->dev.driver_data = info;
461 info->xbdev = dev;
462 info->irq = -1;
463 info->x1 = info->y1 = INT_MAX;
464 spin_lock_init(&info->dirty_lock);
465 mutex_init(&info->mm_lock);
466 init_waitqueue_head(&info->wq);
467 init_timer(&info->refresh);
468 info->refresh.function = xenfb_timer;
469 info->refresh.data = (unsigned long)info;
470 INIT_LIST_HEAD(&info->mappings);
472 info->fb = vmalloc(xenfb_mem_len);
473 if (info->fb == NULL)
474 goto error_nomem;
475 memset(info->fb, 0, xenfb_mem_len);
477 info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
479 info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
480 GFP_KERNEL);
481 if (info->pages == NULL)
482 goto error_nomem;
484 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
485 if (!info->mfns)
486 goto error_nomem;
488 /* set up shared page */
489 info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
490 if (!info->page)
491 goto error_nomem;
493 xenfb_init_shared_page(info);
495 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
496 /* see fishy hackery below */
497 if (fb_info == NULL)
498 goto error_nomem;
500 /* FIXME fishy hackery */
501 fb_info->pseudo_palette = fb_info->par;
502 fb_info->par = info;
503 /* /FIXME */
504 fb_info->screen_base = info->fb;
506 fb_info->fbops = &xenfb_fb_ops;
507 fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
508 fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
509 fb_info->var.bits_per_pixel = info->page->depth;
511 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
512 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
513 fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
515 fb_info->var.activate = FB_ACTIVATE_NOW;
516 fb_info->var.height = -1;
517 fb_info->var.width = -1;
518 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
520 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
521 fb_info->fix.line_length = info->page->line_length;
522 fb_info->fix.smem_start = 0;
523 fb_info->fix.smem_len = xenfb_mem_len;
524 strcpy(fb_info->fix.id, "xen");
525 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
526 fb_info->fix.accel = FB_ACCEL_NONE;
528 fb_info->flags = FBINFO_FLAG_DEFAULT;
530 ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
531 if (ret < 0) {
532 framebuffer_release(fb_info);
533 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
534 goto error;
535 }
537 ret = register_framebuffer(fb_info);
538 if (ret) {
539 fb_dealloc_cmap(&info->fb_info->cmap);
540 framebuffer_release(fb_info);
541 xenbus_dev_fatal(dev, ret, "register_framebuffer");
542 goto error;
543 }
544 info->fb_info = fb_info;
546 /* FIXME should this be delayed until backend XenbusStateConnected? */
547 info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
548 if (IS_ERR(info->kthread)) {
549 ret = PTR_ERR(info->kthread);
550 info->kthread = NULL;
551 xenbus_dev_fatal(dev, ret, "register_framebuffer");
552 goto error;
553 }
555 ret = xenfb_connect_backend(dev, info);
556 if (ret < 0)
557 goto error;
559 return 0;
561 error_nomem:
562 ret = -ENOMEM;
563 xenbus_dev_fatal(dev, ret, "allocating device memory");
564 error:
565 xenfb_remove(dev);
566 return ret;
567 }
569 static int xenfb_resume(struct xenbus_device *dev)
570 {
571 struct xenfb_info *info = dev->dev.driver_data;
573 xenfb_disconnect_backend(info);
574 xenfb_init_shared_page(info);
575 return xenfb_connect_backend(dev, info);
576 }
578 static int xenfb_remove(struct xenbus_device *dev)
579 {
580 struct xenfb_info *info = dev->dev.driver_data;
582 del_timer(&info->refresh);
583 if (info->kthread)
584 kthread_stop(info->kthread);
585 xenfb_disconnect_backend(info);
586 if (info->fb_info) {
587 unregister_framebuffer(info->fb_info);
588 fb_dealloc_cmap(&info->fb_info->cmap);
589 framebuffer_release(info->fb_info);
590 }
591 free_page((unsigned long)info->page);
592 vfree(info->mfns);
593 kfree(info->pages);
594 vfree(info->fb);
595 kfree(info);
597 return 0;
598 }
600 static void xenfb_init_shared_page(struct xenfb_info *info)
601 {
602 int i;
604 for (i = 0; i < info->nr_pages; i++)
605 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
607 for (i = 0; i < info->nr_pages; i++)
608 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
610 info->page->pd[0] = vmalloc_to_mfn(info->mfns);
611 info->page->pd[1] = 0;
612 info->page->width = XENFB_WIDTH;
613 info->page->height = XENFB_HEIGHT;
614 info->page->depth = XENFB_DEPTH;
615 info->page->line_length = (info->page->depth / 8) * info->page->width;
616 info->page->mem_length = xenfb_mem_len;
617 info->page->in_cons = info->page->in_prod = 0;
618 info->page->out_cons = info->page->out_prod = 0;
619 }
621 static int xenfb_connect_backend(struct xenbus_device *dev,
622 struct xenfb_info *info)
623 {
624 int ret;
625 struct xenbus_transaction xbt;
627 ret = bind_listening_port_to_irqhandler(
628 dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
629 if (ret < 0) {
630 xenbus_dev_fatal(dev, ret,
631 "bind_listening_port_to_irqhandler");
632 return ret;
633 }
634 info->irq = ret;
636 again:
637 ret = xenbus_transaction_start(&xbt);
638 if (ret) {
639 xenbus_dev_fatal(dev, ret, "starting transaction");
640 return ret;
641 }
642 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
643 virt_to_mfn(info->page));
644 if (ret)
645 goto error_xenbus;
646 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
647 irq_to_evtchn_port(info->irq));
648 if (ret)
649 goto error_xenbus;
650 ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
651 XEN_IO_PROTO_ABI_NATIVE);
652 if (ret)
653 goto error_xenbus;
654 ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
655 if (ret)
656 goto error_xenbus;
657 ret = xenbus_transaction_end(xbt, 0);
658 if (ret) {
659 if (ret == -EAGAIN)
660 goto again;
661 xenbus_dev_fatal(dev, ret, "completing transaction");
662 return ret;
663 }
665 xenbus_switch_state(dev, XenbusStateInitialised);
666 return 0;
668 error_xenbus:
669 xenbus_transaction_end(xbt, 1);
670 xenbus_dev_fatal(dev, ret, "writing xenstore");
671 return ret;
672 }
674 static void xenfb_disconnect_backend(struct xenfb_info *info)
675 {
676 if (info->irq >= 0)
677 unbind_from_irqhandler(info->irq, info);
678 info->irq = -1;
679 }
681 static void xenfb_backend_changed(struct xenbus_device *dev,
682 enum xenbus_state backend_state)
683 {
684 struct xenfb_info *info = dev->dev.driver_data;
685 int val;
687 switch (backend_state) {
688 case XenbusStateInitialising:
689 case XenbusStateInitialised:
690 case XenbusStateReconfiguring:
691 case XenbusStateReconfigured:
692 case XenbusStateUnknown:
693 case XenbusStateClosed:
694 break;
696 case XenbusStateInitWait:
697 InitWait:
698 xenbus_switch_state(dev, XenbusStateConnected);
699 break;
701 case XenbusStateConnected:
702 /*
703 * Work around xenbus race condition: If backend goes
704 * through InitWait to Connected fast enough, we can
705 * get Connected twice here.
706 */
707 if (dev->state != XenbusStateConnected)
708 goto InitWait; /* no InitWait seen yet, fudge it */
710 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
711 "request-update", "%d", &val) < 0)
712 val = 0;
713 if (val)
714 info->update_wanted = 1;
715 break;
717 case XenbusStateClosing:
718 // FIXME is this safe in any dev->state?
719 xenbus_frontend_closed(dev);
720 break;
721 }
722 }
724 static struct xenbus_device_id xenfb_ids[] = {
725 { "vfb" },
726 { "" }
727 };
728 MODULE_ALIAS("xen:vfb");
730 static struct xenbus_driver xenfb = {
731 .name = "vfb",
732 .owner = THIS_MODULE,
733 .ids = xenfb_ids,
734 .probe = xenfb_probe,
735 .remove = xenfb_remove,
736 .resume = xenfb_resume,
737 .otherend_changed = xenfb_backend_changed,
738 };
740 static int __init xenfb_init(void)
741 {
742 if (!is_running_on_xen())
743 return -ENODEV;
745 /* Nothing to do if running in dom0. */
746 if (is_initial_xendomain())
747 return -ENODEV;
749 return xenbus_register_frontend(&xenfb);
750 }
752 static void __exit xenfb_cleanup(void)
753 {
754 return xenbus_unregister_driver(&xenfb);
755 }
757 module_init(xenfb_init);
758 module_exit(xenfb_cleanup);
760 MODULE_LICENSE("GPL");