ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/fbfront/xenfb.c @ 13233:d275951acf10

[LINUX] Extend the event-channel interfaces to provide helper methods
for creating interdomain event channels bound to IRQ handlers.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@localhost.localdomain
date Sat Dec 30 18:23:27 2006 +0000 (2006-12-30)
parents 1b6354023e64
children b9ffa4b49d97
line source
1 /*
2 * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
3 *
4 * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6 *
7 * Based on linux/drivers/video/q40fb.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
14 /*
15 * TODO:
16 *
17 * Switch to grant tables when they become capable of dealing with the
18 * frame buffer.
19 */
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/fb.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/mm.h>
27 #include <asm/hypervisor.h>
28 #include <xen/evtchn.h>
29 #include <xen/interface/io/fbif.h>
30 #include <xen/xenbus.h>
31 #include <linux/kthread.h>
33 struct xenfb_mapping
34 {
35 struct list_head link;
36 struct vm_area_struct *vma;
37 atomic_t map_refs;
38 int faults;
39 struct xenfb_info *info;
40 };
42 struct xenfb_info
43 {
44 struct task_struct *kthread;
45 wait_queue_head_t wq;
47 unsigned char *fb;
48 struct fb_info *fb_info;
49 struct timer_list refresh;
50 int dirty;
51 int x1, y1, x2, y2; /* dirty rectangle,
52 protected by dirty_lock */
53 spinlock_t dirty_lock;
54 struct mutex mm_lock;
55 int nr_pages;
56 struct page **pages;
57 struct list_head mappings; /* protected by mm_lock */
59 int irq;
60 struct xenfb_page *page;
61 unsigned long *mfns;
62 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
64 struct xenbus_device *xbdev;
65 };
67 /*
68 * How the locks work together
69 *
70 * There are two locks: spinlock dirty_lock protecting the dirty
71 * rectangle, and mutex mm_lock protecting mappings.
72 *
73 * The problem is that dirty rectangle and mappings aren't
74 * independent: the dirty rectangle must cover all faulted pages in
75 * mappings. We need to prove that our locking maintains this
76 * invariant.
77 *
78 * There are several kinds of critical regions:
79 *
80 * 1. Holding only dirty_lock: xenfb_refresh(). May run in
81 * interrupts. Extends the dirty rectangle. Trivially preserves
82 * invariant.
83 *
84 * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
85 * only mappings. The former creates unfaulted pages. Preserves
86 * invariant. The latter removes pages. Preserves invariant.
87 *
88 * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
89 * rectangle and updates mappings consistently. Preserves
90 * invariant.
91 *
92 * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
93 * rectangle and update mappings consistently.
94 *
95 * We can't simply hold both locks, because zap_page_range() cannot
96 * be called with a spinlock held.
97 *
98 * Therefore, we first clear the dirty rectangle with both locks
99 * held. Then we unlock dirty_lock and update the mappings.
100 * Critical regions that hold only dirty_lock may interfere with
101 * that. This can only be region 1: xenfb_refresh(). But that
102 * just extends the dirty rectangle, which can't harm the
103 * invariant.
104 *
105 * But FIXME: the invariant is too weak. It misses that the fault
106 * record in mappings must be consistent with the mapping of pages in
107 * the associated address space! do_no_page() updates the PTE after
108 * xenfb_vm_nopage() returns, i.e. outside the critical region. This
109 * allows the following race:
110 *
111 * X writes to some address in the Xen frame buffer
112 * Fault - call do_no_page()
113 * call xenfb_vm_nopage()
114 * grab mm_lock
115 * map->faults++;
116 * release mm_lock
117 * return back to do_no_page()
118 * (preempted, or SMP)
119 * Xen worker thread runs.
120 * grab mm_lock
121 * look at mappings
122 * find this mapping, zaps its pages (but page not in pte yet)
123 * clear map->faults
124 * releases mm_lock
125 * (back to X process)
126 * put page in X's pte
127 *
128 * Oh well, we wont be updating the writes to this page anytime soon.
129 */
131 static int xenfb_fps = 20;
132 static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
134 static int xenfb_remove(struct xenbus_device *);
135 static void xenfb_init_shared_page(struct xenfb_info *);
136 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
137 static void xenfb_disconnect_backend(struct xenfb_info *);
139 static void xenfb_do_update(struct xenfb_info *info,
140 int x, int y, int w, int h)
141 {
142 union xenfb_out_event event;
143 __u32 prod;
145 event.type = XENFB_TYPE_UPDATE;
146 event.update.x = x;
147 event.update.y = y;
148 event.update.width = w;
149 event.update.height = h;
151 prod = info->page->out_prod;
152 /* caller ensures !xenfb_queue_full() */
153 mb(); /* ensure ring space available */
154 XENFB_OUT_RING_REF(info->page, prod) = event;
155 wmb(); /* ensure ring contents visible */
156 info->page->out_prod = prod + 1;
158 notify_remote_via_irq(info->irq);
159 }
161 static int xenfb_queue_full(struct xenfb_info *info)
162 {
163 __u32 cons, prod;
165 prod = info->page->out_prod;
166 cons = info->page->out_cons;
167 return prod - cons == XENFB_OUT_RING_LEN;
168 }
170 static void xenfb_update_screen(struct xenfb_info *info)
171 {
172 unsigned long flags;
173 int y1, y2, x1, x2;
174 struct xenfb_mapping *map;
176 if (!info->update_wanted)
177 return;
178 if (xenfb_queue_full(info))
179 return;
181 mutex_lock(&info->mm_lock);
183 spin_lock_irqsave(&info->dirty_lock, flags);
184 y1 = info->y1;
185 y2 = info->y2;
186 x1 = info->x1;
187 x2 = info->x2;
188 info->x1 = info->y1 = INT_MAX;
189 info->x2 = info->y2 = 0;
190 spin_unlock_irqrestore(&info->dirty_lock, flags);
192 list_for_each_entry(map, &info->mappings, link) {
193 if (!map->faults)
194 continue;
195 zap_page_range(map->vma, map->vma->vm_start,
196 map->vma->vm_end - map->vma->vm_start, NULL);
197 map->faults = 0;
198 }
200 mutex_unlock(&info->mm_lock);
202 xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
203 }
205 static int xenfb_thread(void *data)
206 {
207 struct xenfb_info *info = data;
209 while (!kthread_should_stop()) {
210 if (info->dirty) {
211 info->dirty = 0;
212 xenfb_update_screen(info);
213 }
214 wait_event_interruptible(info->wq,
215 kthread_should_stop() || info->dirty);
216 try_to_freeze();
217 }
218 return 0;
219 }
221 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
222 unsigned blue, unsigned transp,
223 struct fb_info *info)
224 {
225 u32 v;
227 if (regno > info->cmap.len)
228 return 1;
230 red >>= (16 - info->var.red.length);
231 green >>= (16 - info->var.green.length);
232 blue >>= (16 - info->var.blue.length);
234 v = (red << info->var.red.offset) |
235 (green << info->var.green.offset) |
236 (blue << info->var.blue.offset);
238 /* FIXME is this sane? check against xxxfb_setcolreg()! */
239 switch (info->var.bits_per_pixel) {
240 case 16:
241 case 24:
242 case 32:
243 ((u32 *)info->pseudo_palette)[regno] = v;
244 break;
245 }
247 return 0;
248 }
250 static void xenfb_timer(unsigned long data)
251 {
252 struct xenfb_info *info = (struct xenfb_info *)data;
253 info->dirty = 1;
254 wake_up(&info->wq);
255 }
257 static void __xenfb_refresh(struct xenfb_info *info,
258 int x1, int y1, int w, int h)
259 {
260 int y2, x2;
262 y2 = y1 + h;
263 x2 = x1 + w;
265 if (info->y1 > y1)
266 info->y1 = y1;
267 if (info->y2 < y2)
268 info->y2 = y2;
269 if (info->x1 > x1)
270 info->x1 = x1;
271 if (info->x2 < x2)
272 info->x2 = x2;
274 if (timer_pending(&info->refresh))
275 return;
277 mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
278 }
280 static void xenfb_refresh(struct xenfb_info *info,
281 int x1, int y1, int w, int h)
282 {
283 unsigned long flags;
285 spin_lock_irqsave(&info->dirty_lock, flags);
286 __xenfb_refresh(info, x1, y1, w, h);
287 spin_unlock_irqrestore(&info->dirty_lock, flags);
288 }
290 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
291 {
292 struct xenfb_info *info = p->par;
294 cfb_fillrect(p, rect);
295 xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
296 }
298 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
299 {
300 struct xenfb_info *info = p->par;
302 cfb_imageblit(p, image);
303 xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
304 }
306 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
307 {
308 struct xenfb_info *info = p->par;
310 cfb_copyarea(p, area);
311 xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
312 }
314 static void xenfb_vm_open(struct vm_area_struct *vma)
315 {
316 struct xenfb_mapping *map = vma->vm_private_data;
317 atomic_inc(&map->map_refs);
318 }
320 static void xenfb_vm_close(struct vm_area_struct *vma)
321 {
322 struct xenfb_mapping *map = vma->vm_private_data;
323 struct xenfb_info *info = map->info;
325 mutex_lock(&info->mm_lock);
326 if (atomic_dec_and_test(&map->map_refs)) {
327 list_del(&map->link);
328 kfree(map);
329 }
330 mutex_unlock(&info->mm_lock);
331 }
333 static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
334 unsigned long vaddr, int *type)
335 {
336 struct xenfb_mapping *map = vma->vm_private_data;
337 struct xenfb_info *info = map->info;
338 int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
339 unsigned long flags;
340 struct page *page;
341 int y1, y2;
343 if (pgnr >= info->nr_pages)
344 return NOPAGE_SIGBUS;
346 mutex_lock(&info->mm_lock);
347 spin_lock_irqsave(&info->dirty_lock, flags);
348 page = info->pages[pgnr];
349 get_page(page);
350 map->faults++;
352 y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
353 y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
354 if (y2 > info->fb_info->var.yres)
355 y2 = info->fb_info->var.yres;
356 __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
357 spin_unlock_irqrestore(&info->dirty_lock, flags);
358 mutex_unlock(&info->mm_lock);
360 if (type)
361 *type = VM_FAULT_MINOR;
363 return page;
364 }
366 static struct vm_operations_struct xenfb_vm_ops = {
367 .open = xenfb_vm_open,
368 .close = xenfb_vm_close,
369 .nopage = xenfb_vm_nopage,
370 };
372 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
373 {
374 struct xenfb_info *info = fb_info->par;
375 struct xenfb_mapping *map;
376 int map_pages;
378 if (!(vma->vm_flags & VM_WRITE))
379 return -EINVAL;
380 if (!(vma->vm_flags & VM_SHARED))
381 return -EINVAL;
382 if (vma->vm_pgoff != 0)
383 return -EINVAL;
385 map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
386 if (map_pages > info->nr_pages)
387 return -EINVAL;
389 map = kzalloc(sizeof(*map), GFP_KERNEL);
390 if (map == NULL)
391 return -ENOMEM;
393 map->vma = vma;
394 map->faults = 0;
395 map->info = info;
396 atomic_set(&map->map_refs, 1);
398 mutex_lock(&info->mm_lock);
399 list_add(&map->link, &info->mappings);
400 mutex_unlock(&info->mm_lock);
402 vma->vm_ops = &xenfb_vm_ops;
403 vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
404 vma->vm_private_data = map;
406 return 0;
407 }
409 static struct fb_ops xenfb_fb_ops = {
410 .owner = THIS_MODULE,
411 .fb_setcolreg = xenfb_setcolreg,
412 .fb_fillrect = xenfb_fillrect,
413 .fb_copyarea = xenfb_copyarea,
414 .fb_imageblit = xenfb_imageblit,
415 .fb_mmap = xenfb_mmap,
416 };
418 static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
419 struct pt_regs *regs)
420 {
421 /*
422 * No in events recognized, simply ignore them all.
423 * If you need to recognize some, see xenbkd's input_handler()
424 * for how to do that.
425 */
426 struct xenfb_info *info = dev_id;
427 struct xenfb_page *page = info->page;
429 if (page->in_cons != page->in_prod) {
430 info->page->in_cons = info->page->in_prod;
431 notify_remote_via_irq(info->irq);
432 }
433 return IRQ_HANDLED;
434 }
436 static unsigned long vmalloc_to_mfn(void *address)
437 {
438 return pfn_to_mfn(vmalloc_to_pfn(address));
439 }
441 static int __devinit xenfb_probe(struct xenbus_device *dev,
442 const struct xenbus_device_id *id)
443 {
444 struct xenfb_info *info;
445 struct fb_info *fb_info;
446 int ret;
448 info = kzalloc(sizeof(*info), GFP_KERNEL);
449 if (info == NULL) {
450 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
451 return -ENOMEM;
452 }
453 dev->dev.driver_data = info;
454 info->xbdev = dev;
455 info->irq = -1;
456 info->x1 = info->y1 = INT_MAX;
457 spin_lock_init(&info->dirty_lock);
458 mutex_init(&info->mm_lock);
459 init_waitqueue_head(&info->wq);
460 init_timer(&info->refresh);
461 info->refresh.function = xenfb_timer;
462 info->refresh.data = (unsigned long)info;
463 INIT_LIST_HEAD(&info->mappings);
465 info->fb = vmalloc(xenfb_mem_len);
466 if (info->fb == NULL)
467 goto error_nomem;
468 memset(info->fb, 0, xenfb_mem_len);
470 info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
472 info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
473 GFP_KERNEL);
474 if (info->pages == NULL)
475 goto error_nomem;
477 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
478 if (!info->mfns)
479 goto error_nomem;
481 /* set up shared page */
482 info->page = (void *)__get_free_page(GFP_KERNEL);
483 if (!info->page)
484 goto error_nomem;
486 xenfb_init_shared_page(info);
488 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
489 /* see fishy hackery below */
490 if (fb_info == NULL)
491 goto error_nomem;
493 /* FIXME fishy hackery */
494 fb_info->pseudo_palette = fb_info->par;
495 fb_info->par = info;
496 /* /FIXME */
497 fb_info->screen_base = info->fb;
499 fb_info->fbops = &xenfb_fb_ops;
500 fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
501 fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
502 fb_info->var.bits_per_pixel = info->page->depth;
504 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
505 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
506 fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
508 fb_info->var.activate = FB_ACTIVATE_NOW;
509 fb_info->var.height = -1;
510 fb_info->var.width = -1;
511 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
513 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
514 fb_info->fix.line_length = info->page->line_length;
515 fb_info->fix.smem_start = 0;
516 fb_info->fix.smem_len = xenfb_mem_len;
517 strcpy(fb_info->fix.id, "xen");
518 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
519 fb_info->fix.accel = FB_ACCEL_NONE;
521 fb_info->flags = FBINFO_FLAG_DEFAULT;
523 ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
524 if (ret < 0) {
525 framebuffer_release(fb_info);
526 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
527 goto error;
528 }
530 ret = register_framebuffer(fb_info);
531 if (ret) {
532 fb_dealloc_cmap(&info->fb_info->cmap);
533 framebuffer_release(fb_info);
534 xenbus_dev_fatal(dev, ret, "register_framebuffer");
535 goto error;
536 }
537 info->fb_info = fb_info;
539 /* FIXME should this be delayed until backend XenbusStateConnected? */
540 info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
541 if (IS_ERR(info->kthread)) {
542 ret = PTR_ERR(info->kthread);
543 info->kthread = NULL;
544 xenbus_dev_fatal(dev, ret, "register_framebuffer");
545 goto error;
546 }
548 ret = xenfb_connect_backend(dev, info);
549 if (ret < 0)
550 goto error;
552 return 0;
554 error_nomem:
555 ret = -ENOMEM;
556 xenbus_dev_fatal(dev, ret, "allocating device memory");
557 error:
558 xenfb_remove(dev);
559 return ret;
560 }
562 static int xenfb_resume(struct xenbus_device *dev)
563 {
564 struct xenfb_info *info = dev->dev.driver_data;
566 xenfb_disconnect_backend(info);
567 xenfb_init_shared_page(info);
568 return xenfb_connect_backend(dev, info);
569 }
571 static int xenfb_remove(struct xenbus_device *dev)
572 {
573 struct xenfb_info *info = dev->dev.driver_data;
575 del_timer(&info->refresh);
576 if (info->kthread)
577 kthread_stop(info->kthread);
578 xenfb_disconnect_backend(info);
579 if (info->fb_info) {
580 unregister_framebuffer(info->fb_info);
581 fb_dealloc_cmap(&info->fb_info->cmap);
582 framebuffer_release(info->fb_info);
583 }
584 free_page((unsigned long)info->page);
585 vfree(info->mfns);
586 kfree(info->pages);
587 vfree(info->fb);
588 kfree(info);
590 return 0;
591 }
593 static void xenfb_init_shared_page(struct xenfb_info *info)
594 {
595 int i;
597 for (i = 0; i < info->nr_pages; i++)
598 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
600 for (i = 0; i < info->nr_pages; i++)
601 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
603 info->page->pd[0] = vmalloc_to_mfn(info->mfns);
604 info->page->pd[1] = 0;
605 info->page->width = XENFB_WIDTH;
606 info->page->height = XENFB_HEIGHT;
607 info->page->depth = XENFB_DEPTH;
608 info->page->line_length = (info->page->depth / 8) * info->page->width;
609 info->page->mem_length = xenfb_mem_len;
610 info->page->in_cons = info->page->in_prod = 0;
611 info->page->out_cons = info->page->out_prod = 0;
612 }
614 static int xenfb_connect_backend(struct xenbus_device *dev,
615 struct xenfb_info *info)
616 {
617 int ret;
618 struct xenbus_transaction xbt;
620 ret = bind_listening_port_to_irqhandler(
621 dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
622 if (ret < 0) {
623 xenbus_dev_fatal(dev, ret,
624 "bind_listening_port_to_irqhandler");
625 return ret;
626 }
627 info->irq = ret;
629 again:
630 ret = xenbus_transaction_start(&xbt);
631 if (ret) {
632 xenbus_dev_fatal(dev, ret, "starting transaction");
633 return ret;
634 }
635 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
636 virt_to_mfn(info->page));
637 if (ret)
638 goto error_xenbus;
639 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
640 irq_to_evtchn_port(info->irq));
641 if (ret)
642 goto error_xenbus;
643 ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
644 if (ret)
645 goto error_xenbus;
646 ret = xenbus_transaction_end(xbt, 0);
647 if (ret) {
648 if (ret == -EAGAIN)
649 goto again;
650 xenbus_dev_fatal(dev, ret, "completing transaction");
651 return ret;
652 }
654 xenbus_switch_state(dev, XenbusStateInitialised);
655 return 0;
657 error_xenbus:
658 xenbus_transaction_end(xbt, 1);
659 xenbus_dev_fatal(dev, ret, "writing xenstore");
660 return ret;
661 }
663 static void xenfb_disconnect_backend(struct xenfb_info *info)
664 {
665 if (info->irq >= 0)
666 unbind_from_irqhandler(info->irq, info);
667 info->irq = -1;
668 }
670 static void xenfb_backend_changed(struct xenbus_device *dev,
671 enum xenbus_state backend_state)
672 {
673 struct xenfb_info *info = dev->dev.driver_data;
674 int val;
676 switch (backend_state) {
677 case XenbusStateInitialising:
678 case XenbusStateInitialised:
679 case XenbusStateUnknown:
680 case XenbusStateClosed:
681 break;
683 case XenbusStateInitWait:
684 InitWait:
685 xenbus_switch_state(dev, XenbusStateConnected);
686 break;
688 case XenbusStateConnected:
689 /*
690 * Work around xenbus race condition: If backend goes
691 * through InitWait to Connected fast enough, we can
692 * get Connected twice here.
693 */
694 if (dev->state != XenbusStateConnected)
695 goto InitWait; /* no InitWait seen yet, fudge it */
697 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
698 "request-update", "%d", &val) < 0)
699 val = 0;
700 if (val)
701 info->update_wanted = 1;
702 break;
704 case XenbusStateClosing:
705 // FIXME is this safe in any dev->state?
706 xenbus_frontend_closed(dev);
707 break;
708 }
709 }
711 static struct xenbus_device_id xenfb_ids[] = {
712 { "vfb" },
713 { "" }
714 };
716 static struct xenbus_driver xenfb = {
717 .name = "vfb",
718 .owner = THIS_MODULE,
719 .ids = xenfb_ids,
720 .probe = xenfb_probe,
721 .remove = xenfb_remove,
722 .resume = xenfb_resume,
723 .otherend_changed = xenfb_backend_changed,
724 };
726 static int __init xenfb_init(void)
727 {
728 if (!is_running_on_xen())
729 return -ENODEV;
731 /* Nothing to do if running in dom0. */
732 if (is_initial_xendomain())
733 return -ENODEV;
735 return xenbus_register_frontend(&xenfb);
736 }
738 static void __exit xenfb_cleanup(void)
739 {
740 return xenbus_unregister_driver(&xenfb);
741 }
743 module_init(xenfb_init);
744 module_exit(xenfb_cleanup);
746 MODULE_LICENSE("GPL");