ia64/xen-unstable

view linux-2.6-xen-sparse/drivers/xen/fbfront/xenfb.c @ 13576:b9ffa4b49d97

bimodal: pvfb frontend

Create a new node "protocol" in xenstore, add the protocol name it
speaks there.

Signed-off-by: Gerd Hoffmann <kraxel@suse.de>
author kfraser@localhost.localdomain
date Tue Jan 23 14:47:26 2007 +0000 (2007-01-23)
parents d275951acf10
children 4d29476fc460
line source
1 /*
2 * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
3 *
4 * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6 *
7 * Based on linux/drivers/video/q40fb.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
14 /*
15 * TODO:
16 *
17 * Switch to grant tables when they become capable of dealing with the
18 * frame buffer.
19 */
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/fb.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/mm.h>
27 #include <asm/hypervisor.h>
28 #include <xen/evtchn.h>
29 #include <xen/interface/io/fbif.h>
30 #include <xen/interface/io/protocols.h>
31 #include <xen/xenbus.h>
32 #include <linux/kthread.h>
34 struct xenfb_mapping
35 {
36 struct list_head link;
37 struct vm_area_struct *vma;
38 atomic_t map_refs;
39 int faults;
40 struct xenfb_info *info;
41 };
43 struct xenfb_info
44 {
45 struct task_struct *kthread;
46 wait_queue_head_t wq;
48 unsigned char *fb;
49 struct fb_info *fb_info;
50 struct timer_list refresh;
51 int dirty;
52 int x1, y1, x2, y2; /* dirty rectangle,
53 protected by dirty_lock */
54 spinlock_t dirty_lock;
55 struct mutex mm_lock;
56 int nr_pages;
57 struct page **pages;
58 struct list_head mappings; /* protected by mm_lock */
60 int irq;
61 struct xenfb_page *page;
62 unsigned long *mfns;
63 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
65 struct xenbus_device *xbdev;
66 };
68 /*
69 * How the locks work together
70 *
71 * There are two locks: spinlock dirty_lock protecting the dirty
72 * rectangle, and mutex mm_lock protecting mappings.
73 *
74 * The problem is that dirty rectangle and mappings aren't
75 * independent: the dirty rectangle must cover all faulted pages in
76 * mappings. We need to prove that our locking maintains this
77 * invariant.
78 *
79 * There are several kinds of critical regions:
80 *
81 * 1. Holding only dirty_lock: xenfb_refresh(). May run in
82 * interrupts. Extends the dirty rectangle. Trivially preserves
83 * invariant.
84 *
85 * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
86 * only mappings. The former creates unfaulted pages. Preserves
87 * invariant. The latter removes pages. Preserves invariant.
88 *
89 * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
90 * rectangle and updates mappings consistently. Preserves
91 * invariant.
92 *
93 * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
94 * rectangle and update mappings consistently.
95 *
96 * We can't simply hold both locks, because zap_page_range() cannot
97 * be called with a spinlock held.
98 *
99 * Therefore, we first clear the dirty rectangle with both locks
100 * held. Then we unlock dirty_lock and update the mappings.
101 * Critical regions that hold only dirty_lock may interfere with
102 * that. This can only be region 1: xenfb_refresh(). But that
103 * just extends the dirty rectangle, which can't harm the
104 * invariant.
105 *
106 * But FIXME: the invariant is too weak. It misses that the fault
107 * record in mappings must be consistent with the mapping of pages in
108 * the associated address space! do_no_page() updates the PTE after
109 * xenfb_vm_nopage() returns, i.e. outside the critical region. This
110 * allows the following race:
111 *
112 * X writes to some address in the Xen frame buffer
113 * Fault - call do_no_page()
114 * call xenfb_vm_nopage()
115 * grab mm_lock
116 * map->faults++;
117 * release mm_lock
118 * return back to do_no_page()
119 * (preempted, or SMP)
120 * Xen worker thread runs.
121 * grab mm_lock
122 * look at mappings
123 * find this mapping, zaps its pages (but page not in pte yet)
124 * clear map->faults
125 * releases mm_lock
126 * (back to X process)
127 * put page in X's pte
128 *
129 * Oh well, we wont be updating the writes to this page anytime soon.
130 */
132 static int xenfb_fps = 20;
133 static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8;
135 static int xenfb_remove(struct xenbus_device *);
136 static void xenfb_init_shared_page(struct xenfb_info *);
137 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
138 static void xenfb_disconnect_backend(struct xenfb_info *);
140 static void xenfb_do_update(struct xenfb_info *info,
141 int x, int y, int w, int h)
142 {
143 union xenfb_out_event event;
144 __u32 prod;
146 event.type = XENFB_TYPE_UPDATE;
147 event.update.x = x;
148 event.update.y = y;
149 event.update.width = w;
150 event.update.height = h;
152 prod = info->page->out_prod;
153 /* caller ensures !xenfb_queue_full() */
154 mb(); /* ensure ring space available */
155 XENFB_OUT_RING_REF(info->page, prod) = event;
156 wmb(); /* ensure ring contents visible */
157 info->page->out_prod = prod + 1;
159 notify_remote_via_irq(info->irq);
160 }
162 static int xenfb_queue_full(struct xenfb_info *info)
163 {
164 __u32 cons, prod;
166 prod = info->page->out_prod;
167 cons = info->page->out_cons;
168 return prod - cons == XENFB_OUT_RING_LEN;
169 }
171 static void xenfb_update_screen(struct xenfb_info *info)
172 {
173 unsigned long flags;
174 int y1, y2, x1, x2;
175 struct xenfb_mapping *map;
177 if (!info->update_wanted)
178 return;
179 if (xenfb_queue_full(info))
180 return;
182 mutex_lock(&info->mm_lock);
184 spin_lock_irqsave(&info->dirty_lock, flags);
185 y1 = info->y1;
186 y2 = info->y2;
187 x1 = info->x1;
188 x2 = info->x2;
189 info->x1 = info->y1 = INT_MAX;
190 info->x2 = info->y2 = 0;
191 spin_unlock_irqrestore(&info->dirty_lock, flags);
193 list_for_each_entry(map, &info->mappings, link) {
194 if (!map->faults)
195 continue;
196 zap_page_range(map->vma, map->vma->vm_start,
197 map->vma->vm_end - map->vma->vm_start, NULL);
198 map->faults = 0;
199 }
201 mutex_unlock(&info->mm_lock);
203 xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
204 }
206 static int xenfb_thread(void *data)
207 {
208 struct xenfb_info *info = data;
210 while (!kthread_should_stop()) {
211 if (info->dirty) {
212 info->dirty = 0;
213 xenfb_update_screen(info);
214 }
215 wait_event_interruptible(info->wq,
216 kthread_should_stop() || info->dirty);
217 try_to_freeze();
218 }
219 return 0;
220 }
222 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
223 unsigned blue, unsigned transp,
224 struct fb_info *info)
225 {
226 u32 v;
228 if (regno > info->cmap.len)
229 return 1;
231 red >>= (16 - info->var.red.length);
232 green >>= (16 - info->var.green.length);
233 blue >>= (16 - info->var.blue.length);
235 v = (red << info->var.red.offset) |
236 (green << info->var.green.offset) |
237 (blue << info->var.blue.offset);
239 /* FIXME is this sane? check against xxxfb_setcolreg()! */
240 switch (info->var.bits_per_pixel) {
241 case 16:
242 case 24:
243 case 32:
244 ((u32 *)info->pseudo_palette)[regno] = v;
245 break;
246 }
248 return 0;
249 }
251 static void xenfb_timer(unsigned long data)
252 {
253 struct xenfb_info *info = (struct xenfb_info *)data;
254 info->dirty = 1;
255 wake_up(&info->wq);
256 }
258 static void __xenfb_refresh(struct xenfb_info *info,
259 int x1, int y1, int w, int h)
260 {
261 int y2, x2;
263 y2 = y1 + h;
264 x2 = x1 + w;
266 if (info->y1 > y1)
267 info->y1 = y1;
268 if (info->y2 < y2)
269 info->y2 = y2;
270 if (info->x1 > x1)
271 info->x1 = x1;
272 if (info->x2 < x2)
273 info->x2 = x2;
275 if (timer_pending(&info->refresh))
276 return;
278 mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
279 }
281 static void xenfb_refresh(struct xenfb_info *info,
282 int x1, int y1, int w, int h)
283 {
284 unsigned long flags;
286 spin_lock_irqsave(&info->dirty_lock, flags);
287 __xenfb_refresh(info, x1, y1, w, h);
288 spin_unlock_irqrestore(&info->dirty_lock, flags);
289 }
291 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
292 {
293 struct xenfb_info *info = p->par;
295 cfb_fillrect(p, rect);
296 xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
297 }
299 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
300 {
301 struct xenfb_info *info = p->par;
303 cfb_imageblit(p, image);
304 xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
305 }
307 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
308 {
309 struct xenfb_info *info = p->par;
311 cfb_copyarea(p, area);
312 xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
313 }
315 static void xenfb_vm_open(struct vm_area_struct *vma)
316 {
317 struct xenfb_mapping *map = vma->vm_private_data;
318 atomic_inc(&map->map_refs);
319 }
321 static void xenfb_vm_close(struct vm_area_struct *vma)
322 {
323 struct xenfb_mapping *map = vma->vm_private_data;
324 struct xenfb_info *info = map->info;
326 mutex_lock(&info->mm_lock);
327 if (atomic_dec_and_test(&map->map_refs)) {
328 list_del(&map->link);
329 kfree(map);
330 }
331 mutex_unlock(&info->mm_lock);
332 }
334 static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
335 unsigned long vaddr, int *type)
336 {
337 struct xenfb_mapping *map = vma->vm_private_data;
338 struct xenfb_info *info = map->info;
339 int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
340 unsigned long flags;
341 struct page *page;
342 int y1, y2;
344 if (pgnr >= info->nr_pages)
345 return NOPAGE_SIGBUS;
347 mutex_lock(&info->mm_lock);
348 spin_lock_irqsave(&info->dirty_lock, flags);
349 page = info->pages[pgnr];
350 get_page(page);
351 map->faults++;
353 y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
354 y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
355 if (y2 > info->fb_info->var.yres)
356 y2 = info->fb_info->var.yres;
357 __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
358 spin_unlock_irqrestore(&info->dirty_lock, flags);
359 mutex_unlock(&info->mm_lock);
361 if (type)
362 *type = VM_FAULT_MINOR;
364 return page;
365 }
367 static struct vm_operations_struct xenfb_vm_ops = {
368 .open = xenfb_vm_open,
369 .close = xenfb_vm_close,
370 .nopage = xenfb_vm_nopage,
371 };
373 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
374 {
375 struct xenfb_info *info = fb_info->par;
376 struct xenfb_mapping *map;
377 int map_pages;
379 if (!(vma->vm_flags & VM_WRITE))
380 return -EINVAL;
381 if (!(vma->vm_flags & VM_SHARED))
382 return -EINVAL;
383 if (vma->vm_pgoff != 0)
384 return -EINVAL;
386 map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
387 if (map_pages > info->nr_pages)
388 return -EINVAL;
390 map = kzalloc(sizeof(*map), GFP_KERNEL);
391 if (map == NULL)
392 return -ENOMEM;
394 map->vma = vma;
395 map->faults = 0;
396 map->info = info;
397 atomic_set(&map->map_refs, 1);
399 mutex_lock(&info->mm_lock);
400 list_add(&map->link, &info->mappings);
401 mutex_unlock(&info->mm_lock);
403 vma->vm_ops = &xenfb_vm_ops;
404 vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
405 vma->vm_private_data = map;
407 return 0;
408 }
410 static struct fb_ops xenfb_fb_ops = {
411 .owner = THIS_MODULE,
412 .fb_setcolreg = xenfb_setcolreg,
413 .fb_fillrect = xenfb_fillrect,
414 .fb_copyarea = xenfb_copyarea,
415 .fb_imageblit = xenfb_imageblit,
416 .fb_mmap = xenfb_mmap,
417 };
419 static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
420 struct pt_regs *regs)
421 {
422 /*
423 * No in events recognized, simply ignore them all.
424 * If you need to recognize some, see xenbkd's input_handler()
425 * for how to do that.
426 */
427 struct xenfb_info *info = dev_id;
428 struct xenfb_page *page = info->page;
430 if (page->in_cons != page->in_prod) {
431 info->page->in_cons = info->page->in_prod;
432 notify_remote_via_irq(info->irq);
433 }
434 return IRQ_HANDLED;
435 }
437 static unsigned long vmalloc_to_mfn(void *address)
438 {
439 return pfn_to_mfn(vmalloc_to_pfn(address));
440 }
442 static int __devinit xenfb_probe(struct xenbus_device *dev,
443 const struct xenbus_device_id *id)
444 {
445 struct xenfb_info *info;
446 struct fb_info *fb_info;
447 int ret;
449 info = kzalloc(sizeof(*info), GFP_KERNEL);
450 if (info == NULL) {
451 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
452 return -ENOMEM;
453 }
454 dev->dev.driver_data = info;
455 info->xbdev = dev;
456 info->irq = -1;
457 info->x1 = info->y1 = INT_MAX;
458 spin_lock_init(&info->dirty_lock);
459 mutex_init(&info->mm_lock);
460 init_waitqueue_head(&info->wq);
461 init_timer(&info->refresh);
462 info->refresh.function = xenfb_timer;
463 info->refresh.data = (unsigned long)info;
464 INIT_LIST_HEAD(&info->mappings);
466 info->fb = vmalloc(xenfb_mem_len);
467 if (info->fb == NULL)
468 goto error_nomem;
469 memset(info->fb, 0, xenfb_mem_len);
471 info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
473 info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
474 GFP_KERNEL);
475 if (info->pages == NULL)
476 goto error_nomem;
478 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
479 if (!info->mfns)
480 goto error_nomem;
482 /* set up shared page */
483 info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
484 if (!info->page)
485 goto error_nomem;
487 xenfb_init_shared_page(info);
489 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
490 /* see fishy hackery below */
491 if (fb_info == NULL)
492 goto error_nomem;
494 /* FIXME fishy hackery */
495 fb_info->pseudo_palette = fb_info->par;
496 fb_info->par = info;
497 /* /FIXME */
498 fb_info->screen_base = info->fb;
500 fb_info->fbops = &xenfb_fb_ops;
501 fb_info->var.xres_virtual = fb_info->var.xres = info->page->width;
502 fb_info->var.yres_virtual = fb_info->var.yres = info->page->height;
503 fb_info->var.bits_per_pixel = info->page->depth;
505 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
506 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
507 fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
509 fb_info->var.activate = FB_ACTIVATE_NOW;
510 fb_info->var.height = -1;
511 fb_info->var.width = -1;
512 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
514 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
515 fb_info->fix.line_length = info->page->line_length;
516 fb_info->fix.smem_start = 0;
517 fb_info->fix.smem_len = xenfb_mem_len;
518 strcpy(fb_info->fix.id, "xen");
519 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
520 fb_info->fix.accel = FB_ACCEL_NONE;
522 fb_info->flags = FBINFO_FLAG_DEFAULT;
524 ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
525 if (ret < 0) {
526 framebuffer_release(fb_info);
527 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
528 goto error;
529 }
531 ret = register_framebuffer(fb_info);
532 if (ret) {
533 fb_dealloc_cmap(&info->fb_info->cmap);
534 framebuffer_release(fb_info);
535 xenbus_dev_fatal(dev, ret, "register_framebuffer");
536 goto error;
537 }
538 info->fb_info = fb_info;
540 /* FIXME should this be delayed until backend XenbusStateConnected? */
541 info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
542 if (IS_ERR(info->kthread)) {
543 ret = PTR_ERR(info->kthread);
544 info->kthread = NULL;
545 xenbus_dev_fatal(dev, ret, "register_framebuffer");
546 goto error;
547 }
549 ret = xenfb_connect_backend(dev, info);
550 if (ret < 0)
551 goto error;
553 return 0;
555 error_nomem:
556 ret = -ENOMEM;
557 xenbus_dev_fatal(dev, ret, "allocating device memory");
558 error:
559 xenfb_remove(dev);
560 return ret;
561 }
563 static int xenfb_resume(struct xenbus_device *dev)
564 {
565 struct xenfb_info *info = dev->dev.driver_data;
567 xenfb_disconnect_backend(info);
568 xenfb_init_shared_page(info);
569 return xenfb_connect_backend(dev, info);
570 }
572 static int xenfb_remove(struct xenbus_device *dev)
573 {
574 struct xenfb_info *info = dev->dev.driver_data;
576 del_timer(&info->refresh);
577 if (info->kthread)
578 kthread_stop(info->kthread);
579 xenfb_disconnect_backend(info);
580 if (info->fb_info) {
581 unregister_framebuffer(info->fb_info);
582 fb_dealloc_cmap(&info->fb_info->cmap);
583 framebuffer_release(info->fb_info);
584 }
585 free_page((unsigned long)info->page);
586 vfree(info->mfns);
587 kfree(info->pages);
588 vfree(info->fb);
589 kfree(info);
591 return 0;
592 }
594 static void xenfb_init_shared_page(struct xenfb_info *info)
595 {
596 int i;
598 for (i = 0; i < info->nr_pages; i++)
599 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
601 for (i = 0; i < info->nr_pages; i++)
602 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
604 info->page->pd[0] = vmalloc_to_mfn(info->mfns);
605 info->page->pd[1] = 0;
606 info->page->width = XENFB_WIDTH;
607 info->page->height = XENFB_HEIGHT;
608 info->page->depth = XENFB_DEPTH;
609 info->page->line_length = (info->page->depth / 8) * info->page->width;
610 info->page->mem_length = xenfb_mem_len;
611 info->page->in_cons = info->page->in_prod = 0;
612 info->page->out_cons = info->page->out_prod = 0;
613 }
615 static int xenfb_connect_backend(struct xenbus_device *dev,
616 struct xenfb_info *info)
617 {
618 int ret;
619 struct xenbus_transaction xbt;
621 ret = bind_listening_port_to_irqhandler(
622 dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
623 if (ret < 0) {
624 xenbus_dev_fatal(dev, ret,
625 "bind_listening_port_to_irqhandler");
626 return ret;
627 }
628 info->irq = ret;
630 again:
631 ret = xenbus_transaction_start(&xbt);
632 if (ret) {
633 xenbus_dev_fatal(dev, ret, "starting transaction");
634 return ret;
635 }
636 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
637 virt_to_mfn(info->page));
638 if (ret)
639 goto error_xenbus;
640 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
641 irq_to_evtchn_port(info->irq));
642 if (ret)
643 goto error_xenbus;
644 ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
645 XEN_IO_PROTO_ABI_NATIVE);
646 if (ret)
647 goto error_xenbus;
648 ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
649 if (ret)
650 goto error_xenbus;
651 ret = xenbus_transaction_end(xbt, 0);
652 if (ret) {
653 if (ret == -EAGAIN)
654 goto again;
655 xenbus_dev_fatal(dev, ret, "completing transaction");
656 return ret;
657 }
659 xenbus_switch_state(dev, XenbusStateInitialised);
660 return 0;
662 error_xenbus:
663 xenbus_transaction_end(xbt, 1);
664 xenbus_dev_fatal(dev, ret, "writing xenstore");
665 return ret;
666 }
668 static void xenfb_disconnect_backend(struct xenfb_info *info)
669 {
670 if (info->irq >= 0)
671 unbind_from_irqhandler(info->irq, info);
672 info->irq = -1;
673 }
675 static void xenfb_backend_changed(struct xenbus_device *dev,
676 enum xenbus_state backend_state)
677 {
678 struct xenfb_info *info = dev->dev.driver_data;
679 int val;
681 switch (backend_state) {
682 case XenbusStateInitialising:
683 case XenbusStateInitialised:
684 case XenbusStateUnknown:
685 case XenbusStateClosed:
686 break;
688 case XenbusStateInitWait:
689 InitWait:
690 xenbus_switch_state(dev, XenbusStateConnected);
691 break;
693 case XenbusStateConnected:
694 /*
695 * Work around xenbus race condition: If backend goes
696 * through InitWait to Connected fast enough, we can
697 * get Connected twice here.
698 */
699 if (dev->state != XenbusStateConnected)
700 goto InitWait; /* no InitWait seen yet, fudge it */
702 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
703 "request-update", "%d", &val) < 0)
704 val = 0;
705 if (val)
706 info->update_wanted = 1;
707 break;
709 case XenbusStateClosing:
710 // FIXME is this safe in any dev->state?
711 xenbus_frontend_closed(dev);
712 break;
713 }
714 }
716 static struct xenbus_device_id xenfb_ids[] = {
717 { "vfb" },
718 { "" }
719 };
721 static struct xenbus_driver xenfb = {
722 .name = "vfb",
723 .owner = THIS_MODULE,
724 .ids = xenfb_ids,
725 .probe = xenfb_probe,
726 .remove = xenfb_remove,
727 .resume = xenfb_resume,
728 .otherend_changed = xenfb_backend_changed,
729 };
731 static int __init xenfb_init(void)
732 {
733 if (!is_running_on_xen())
734 return -ENODEV;
736 /* Nothing to do if running in dom0. */
737 if (is_initial_xendomain())
738 return -ENODEV;
740 return xenbus_register_frontend(&xenfb);
741 }
743 static void __exit xenfb_cleanup(void)
744 {
745 return xenbus_unregister_driver(&xenfb);
746 }
748 module_init(xenfb_init);
749 module_exit(xenfb_cleanup);
751 MODULE_LICENSE("GPL");