ia64/linux-2.6.18-xen.hg

view drivers/xen/fbfront/xenfb.c @ 783:c9783c08495c

xenfb: fix xenfb_update_screen bogus rect
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 28 13:41:33 2009 +0000 (2009-01-28)
parents f29bf0bf3e97
children 8197c86e6729
line source
1 /*
2 * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
3 *
4 * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6 *
7 * Based on linux/drivers/video/q40fb.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
14 /*
15 * TODO:
16 *
17 * Switch to grant tables when they become capable of dealing with the
18 * frame buffer.
19 */
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/fb.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/mm.h>
27 #include <linux/mutex.h>
28 #include <asm/hypervisor.h>
29 #include <xen/evtchn.h>
30 #include <xen/interface/io/fbif.h>
31 #include <xen/interface/io/protocols.h>
32 #include <xen/xenbus.h>
33 #include <linux/kthread.h>
35 struct xenfb_mapping
36 {
37 struct list_head link;
38 struct vm_area_struct *vma;
39 atomic_t map_refs;
40 int faults;
41 struct xenfb_info *info;
42 };
44 struct xenfb_info
45 {
46 struct task_struct *kthread;
47 wait_queue_head_t wq;
49 unsigned char *fb;
50 struct fb_info *fb_info;
51 struct timer_list refresh;
52 int dirty;
53 int x1, y1, x2, y2; /* dirty rectangle,
54 protected by dirty_lock */
55 spinlock_t dirty_lock;
56 struct mutex mm_lock;
57 int nr_pages;
58 struct page **pages;
59 struct list_head mappings; /* protected by mm_lock */
61 int irq;
62 struct xenfb_page *page;
63 unsigned long *mfns;
64 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
65 int feature_resize; /* Backend has resize feature */
66 struct xenfb_resize resize;
67 int resize_dpy;
68 spinlock_t resize_lock;
70 struct xenbus_device *xbdev;
71 };
73 /*
74 * There are three locks:
75 * spinlock resize_lock protecting resize_dpy and resize
76 * spinlock dirty_lock protecting the dirty rectangle
77 * mutex mm_lock protecting mappings.
78 *
79 * How the dirty and mapping locks work together
80 *
81 * The problem is that dirty rectangle and mappings aren't
82 * independent: the dirty rectangle must cover all faulted pages in
83 * mappings. We need to prove that our locking maintains this
84 * invariant.
85 *
86 * There are several kinds of critical regions:
87 *
88 * 1. Holding only dirty_lock: xenfb_refresh(). May run in
89 * interrupts. Extends the dirty rectangle. Trivially preserves
90 * invariant.
91 *
92 * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
93 * only mappings. The former creates unfaulted pages. Preserves
94 * invariant. The latter removes pages. Preserves invariant.
95 *
96 * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
97 * rectangle and updates mappings consistently. Preserves
98 * invariant.
99 *
100 * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
101 * rectangle and update mappings consistently.
102 *
103 * We can't simply hold both locks, because zap_page_range() cannot
104 * be called with a spinlock held.
105 *
106 * Therefore, we first clear the dirty rectangle with both locks
107 * held. Then we unlock dirty_lock and update the mappings.
108 * Critical regions that hold only dirty_lock may interfere with
109 * that. This can only be region 1: xenfb_refresh(). But that
110 * just extends the dirty rectangle, which can't harm the
111 * invariant.
112 *
113 * But FIXME: the invariant is too weak. It misses that the fault
114 * record in mappings must be consistent with the mapping of pages in
115 * the associated address space! do_no_page() updates the PTE after
116 * xenfb_vm_nopage() returns, i.e. outside the critical region. This
117 * allows the following race:
118 *
119 * X writes to some address in the Xen frame buffer
120 * Fault - call do_no_page()
121 * call xenfb_vm_nopage()
122 * grab mm_lock
123 * map->faults++;
124 * release mm_lock
125 * return back to do_no_page()
126 * (preempted, or SMP)
127 * Xen worker thread runs.
128 * grab mm_lock
129 * look at mappings
130 * find this mapping, zaps its pages (but page not in pte yet)
131 * clear map->faults
132 * releases mm_lock
133 * (back to X process)
134 * put page in X's pte
135 *
136 * Oh well, we wont be updating the writes to this page anytime soon.
137 */
138 #define MB_ (1024*1024)
139 #define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
141 enum {KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT};
142 static int video[KPARAM_CNT] = {2, XENFB_WIDTH, XENFB_HEIGHT};
143 module_param_array(video, int, NULL, 0);
144 MODULE_PARM_DESC(video,
145 "Size of video memory in MB and width,height in pixels, default = (2,800,600)");
147 static int xenfb_fps = 20;
149 static int xenfb_remove(struct xenbus_device *);
150 static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
151 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
152 static void xenfb_disconnect_backend(struct xenfb_info *);
154 static void xenfb_send_event(struct xenfb_info *info,
155 union xenfb_out_event *event)
156 {
157 __u32 prod;
159 prod = info->page->out_prod;
160 /* caller ensures !xenfb_queue_full() */
161 mb(); /* ensure ring space available */
162 XENFB_OUT_RING_REF(info->page, prod) = *event;
163 wmb(); /* ensure ring contents visible */
164 info->page->out_prod = prod + 1;
166 notify_remote_via_irq(info->irq);
167 }
169 static void xenfb_do_update(struct xenfb_info *info,
170 int x, int y, int w, int h)
171 {
172 union xenfb_out_event event;
174 memset(&event, 0, sizeof(event));
175 event.type = XENFB_TYPE_UPDATE;
176 event.update.x = x;
177 event.update.y = y;
178 event.update.width = w;
179 event.update.height = h;
181 /* caller ensures !xenfb_queue_full() */
182 xenfb_send_event(info, &event);
183 }
185 static void xenfb_do_resize(struct xenfb_info *info)
186 {
187 union xenfb_out_event event;
189 memset(&event, 0, sizeof(event));
190 event.resize = info->resize;
192 /* caller ensures !xenfb_queue_full() */
193 xenfb_send_event(info, &event);
194 }
196 static int xenfb_queue_full(struct xenfb_info *info)
197 {
198 __u32 cons, prod;
200 prod = info->page->out_prod;
201 cons = info->page->out_cons;
202 return prod - cons == XENFB_OUT_RING_LEN;
203 }
205 static void xenfb_update_screen(struct xenfb_info *info)
206 {
207 unsigned long flags;
208 int y1, y2, x1, x2;
209 struct xenfb_mapping *map;
211 if (!info->update_wanted)
212 return;
213 if (xenfb_queue_full(info))
214 return;
216 spin_lock_irqsave(&info->dirty_lock, flags);
217 if (info->dirty){
218 info->dirty = 0;
219 y1 = info->y1;
220 y2 = info->y2;
221 x1 = info->x1;
222 x2 = info->x2;
223 info->x1 = info->y1 = INT_MAX;
224 info->x2 = info->y2 = 0;
225 } else {
226 spin_unlock_irqrestore(&info->dirty_lock, flags);
227 return;
228 }
229 spin_unlock_irqrestore(&info->dirty_lock, flags);
231 mutex_lock(&info->mm_lock);
233 list_for_each_entry(map, &info->mappings, link) {
234 if (!map->faults)
235 continue;
236 zap_page_range(map->vma, map->vma->vm_start,
237 map->vma->vm_end - map->vma->vm_start, NULL);
238 map->faults = 0;
239 }
241 mutex_unlock(&info->mm_lock);
243 if (x2 < x1 || y2 < y1) {
244 printk("xenfb_update_screen bogus rect %d %d %d %d\n",
245 x1, x2, y1, y2);
246 WARN_ON(1);
247 }
248 xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
249 }
251 static void xenfb_handle_resize_dpy(struct xenfb_info *info)
252 {
253 unsigned long flags;
255 spin_lock_irqsave(&info->resize_lock, flags);
256 if (info->resize_dpy) {
257 if (!xenfb_queue_full(info)) {
258 info->resize_dpy = 0;
259 xenfb_do_resize(info);
260 }
261 }
262 spin_unlock_irqrestore(&info->resize_lock, flags);
263 }
265 static int xenfb_thread(void *data)
266 {
267 struct xenfb_info *info = data;
269 while (!kthread_should_stop()) {
270 xenfb_handle_resize_dpy(info);
271 xenfb_update_screen(info);
272 wait_event_interruptible(info->wq,
273 kthread_should_stop() || info->dirty);
274 try_to_freeze();
275 }
276 return 0;
277 }
279 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
280 unsigned blue, unsigned transp,
281 struct fb_info *info)
282 {
283 u32 v;
285 if (regno > info->cmap.len)
286 return 1;
288 red >>= (16 - info->var.red.length);
289 green >>= (16 - info->var.green.length);
290 blue >>= (16 - info->var.blue.length);
292 v = (red << info->var.red.offset) |
293 (green << info->var.green.offset) |
294 (blue << info->var.blue.offset);
296 /* FIXME is this sane? check against xxxfb_setcolreg()! */
297 switch (info->var.bits_per_pixel) {
298 case 16:
299 case 24:
300 case 32:
301 ((u32 *)info->pseudo_palette)[regno] = v;
302 break;
303 }
305 return 0;
306 }
308 static void xenfb_timer(unsigned long data)
309 {
310 struct xenfb_info *info = (struct xenfb_info *)data;
311 wake_up(&info->wq);
312 }
314 static void __xenfb_refresh(struct xenfb_info *info,
315 int x1, int y1, int w, int h)
316 {
317 int y2, x2;
319 y2 = y1 + h;
320 x2 = x1 + w;
322 if (info->y1 > y1)
323 info->y1 = y1;
324 if (info->y2 < y2)
325 info->y2 = y2;
326 if (info->x1 > x1)
327 info->x1 = x1;
328 if (info->x2 < x2)
329 info->x2 = x2;
330 info->dirty = 1;
332 if (timer_pending(&info->refresh))
333 return;
335 mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
336 }
338 static void xenfb_refresh(struct xenfb_info *info,
339 int x1, int y1, int w, int h)
340 {
341 unsigned long flags;
343 spin_lock_irqsave(&info->dirty_lock, flags);
344 __xenfb_refresh(info, x1, y1, w, h);
345 spin_unlock_irqrestore(&info->dirty_lock, flags);
346 }
348 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
349 {
350 struct xenfb_info *info = p->par;
352 cfb_fillrect(p, rect);
353 xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
354 }
356 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
357 {
358 struct xenfb_info *info = p->par;
360 cfb_imageblit(p, image);
361 xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
362 }
364 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
365 {
366 struct xenfb_info *info = p->par;
368 cfb_copyarea(p, area);
369 xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
370 }
372 static void xenfb_vm_open(struct vm_area_struct *vma)
373 {
374 struct xenfb_mapping *map = vma->vm_private_data;
375 atomic_inc(&map->map_refs);
376 }
378 static void xenfb_vm_close(struct vm_area_struct *vma)
379 {
380 struct xenfb_mapping *map = vma->vm_private_data;
381 struct xenfb_info *info = map->info;
383 mutex_lock(&info->mm_lock);
384 if (atomic_dec_and_test(&map->map_refs)) {
385 list_del(&map->link);
386 kfree(map);
387 }
388 mutex_unlock(&info->mm_lock);
389 }
391 static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
392 unsigned long vaddr, int *type)
393 {
394 struct xenfb_mapping *map = vma->vm_private_data;
395 struct xenfb_info *info = map->info;
396 int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
397 unsigned long flags;
398 struct page *page;
399 int y1, y2;
401 if (pgnr >= info->nr_pages)
402 return NOPAGE_SIGBUS;
404 mutex_lock(&info->mm_lock);
405 spin_lock_irqsave(&info->dirty_lock, flags);
406 page = info->pages[pgnr];
407 get_page(page);
408 map->faults++;
410 y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
411 y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
412 if (y2 > info->fb_info->var.yres)
413 y2 = info->fb_info->var.yres;
414 __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
415 spin_unlock_irqrestore(&info->dirty_lock, flags);
416 mutex_unlock(&info->mm_lock);
418 if (type)
419 *type = VM_FAULT_MINOR;
421 return page;
422 }
424 static struct vm_operations_struct xenfb_vm_ops = {
425 .open = xenfb_vm_open,
426 .close = xenfb_vm_close,
427 .nopage = xenfb_vm_nopage,
428 };
430 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
431 {
432 struct xenfb_info *info = fb_info->par;
433 struct xenfb_mapping *map;
434 int map_pages;
436 if (!(vma->vm_flags & VM_WRITE))
437 return -EINVAL;
438 if (!(vma->vm_flags & VM_SHARED))
439 return -EINVAL;
440 if (vma->vm_pgoff != 0)
441 return -EINVAL;
443 map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
444 if (map_pages > info->nr_pages)
445 return -EINVAL;
447 map = kzalloc(sizeof(*map), GFP_KERNEL);
448 if (map == NULL)
449 return -ENOMEM;
451 map->vma = vma;
452 map->faults = 0;
453 map->info = info;
454 atomic_set(&map->map_refs, 1);
456 mutex_lock(&info->mm_lock);
457 list_add(&map->link, &info->mappings);
458 mutex_unlock(&info->mm_lock);
460 vma->vm_ops = &xenfb_vm_ops;
461 vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
462 vma->vm_private_data = map;
464 return 0;
465 }
467 static int
468 xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
469 {
470 struct xenfb_info *xenfb_info;
471 int required_mem_len;
473 xenfb_info = info->par;
475 if (!xenfb_info->feature_resize) {
476 if (var->xres == video[KPARAM_WIDTH] &&
477 var->yres == video[KPARAM_HEIGHT] &&
478 var->bits_per_pixel == xenfb_info->page->depth) {
479 return 0;
480 }
481 return -EINVAL;
482 }
484 /* Can't resize past initial width and height */
485 if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
486 return -EINVAL;
488 required_mem_len = var->xres * var->yres * (xenfb_info->page->depth / 8);
489 if (var->bits_per_pixel == xenfb_info->page->depth &&
490 var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
491 required_mem_len <= info->fix.smem_len) {
492 var->xres_virtual = var->xres;
493 var->yres_virtual = var->yres;
494 return 0;
495 }
496 return -EINVAL;
497 }
499 static int xenfb_set_par(struct fb_info *info)
500 {
501 struct xenfb_info *xenfb_info;
502 unsigned long flags;
504 xenfb_info = info->par;
506 spin_lock_irqsave(&xenfb_info->resize_lock, flags);
507 xenfb_info->resize.type = XENFB_TYPE_RESIZE;
508 xenfb_info->resize.width = info->var.xres;
509 xenfb_info->resize.height = info->var.yres;
510 xenfb_info->resize.stride = info->fix.line_length;
511 xenfb_info->resize.depth = info->var.bits_per_pixel;
512 xenfb_info->resize.offset = 0;
513 xenfb_info->resize_dpy = 1;
514 spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
515 return 0;
516 }
518 static struct fb_ops xenfb_fb_ops = {
519 .owner = THIS_MODULE,
520 .fb_setcolreg = xenfb_setcolreg,
521 .fb_fillrect = xenfb_fillrect,
522 .fb_copyarea = xenfb_copyarea,
523 .fb_imageblit = xenfb_imageblit,
524 .fb_mmap = xenfb_mmap,
525 .fb_check_var = xenfb_check_var,
526 .fb_set_par = xenfb_set_par,
527 };
529 static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
530 struct pt_regs *regs)
531 {
532 /*
533 * No in events recognized, simply ignore them all.
534 * If you need to recognize some, see xenbkd's input_handler()
535 * for how to do that.
536 */
537 struct xenfb_info *info = dev_id;
538 struct xenfb_page *page = info->page;
540 if (page->in_cons != page->in_prod) {
541 info->page->in_cons = info->page->in_prod;
542 notify_remote_via_irq(info->irq);
543 }
544 return IRQ_HANDLED;
545 }
547 static unsigned long vmalloc_to_mfn(void *address)
548 {
549 return pfn_to_mfn(vmalloc_to_pfn(address));
550 }
552 static int __devinit xenfb_probe(struct xenbus_device *dev,
553 const struct xenbus_device_id *id)
554 {
555 struct xenfb_info *info;
556 struct fb_info *fb_info;
557 int fb_size;
558 int val;
559 int ret;
561 info = kzalloc(sizeof(*info), GFP_KERNEL);
562 if (info == NULL) {
563 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
564 return -ENOMEM;
565 }
567 /* Limit kernel param videoram amount to what is in xenstore */
568 if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
569 if (val < video[KPARAM_MEM])
570 video[KPARAM_MEM] = val;
571 }
573 /* If requested res does not fit in available memory, use default */
574 fb_size = video[KPARAM_MEM] * MB_;
575 if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH/8 > fb_size) {
576 video[KPARAM_WIDTH] = XENFB_WIDTH;
577 video[KPARAM_HEIGHT] = XENFB_HEIGHT;
578 fb_size = XENFB_DEFAULT_FB_LEN;
579 }
581 dev->dev.driver_data = info;
582 info->xbdev = dev;
583 info->irq = -1;
584 info->x1 = info->y1 = INT_MAX;
585 spin_lock_init(&info->dirty_lock);
586 spin_lock_init(&info->resize_lock);
587 mutex_init(&info->mm_lock);
588 init_waitqueue_head(&info->wq);
589 init_timer(&info->refresh);
590 info->refresh.function = xenfb_timer;
591 info->refresh.data = (unsigned long)info;
592 INIT_LIST_HEAD(&info->mappings);
594 info->fb = vmalloc(fb_size);
595 if (info->fb == NULL)
596 goto error_nomem;
597 memset(info->fb, 0, fb_size);
599 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
601 info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
602 GFP_KERNEL);
603 if (info->pages == NULL)
604 goto error_nomem;
606 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
607 if (!info->mfns)
608 goto error_nomem;
610 /* set up shared page */
611 info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
612 if (!info->page)
613 goto error_nomem;
615 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
616 /* see fishy hackery below */
617 if (fb_info == NULL)
618 goto error_nomem;
620 /* FIXME fishy hackery */
621 fb_info->pseudo_palette = fb_info->par;
622 fb_info->par = info;
623 /* /FIXME */
624 fb_info->screen_base = info->fb;
626 fb_info->fbops = &xenfb_fb_ops;
627 fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
628 fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
629 fb_info->var.bits_per_pixel = XENFB_DEPTH;
631 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
632 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
633 fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
635 fb_info->var.activate = FB_ACTIVATE_NOW;
636 fb_info->var.height = -1;
637 fb_info->var.width = -1;
638 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
640 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
641 fb_info->fix.line_length = fb_info->var.xres * (XENFB_DEPTH / 8);
642 fb_info->fix.smem_start = 0;
643 fb_info->fix.smem_len = fb_size;
644 strcpy(fb_info->fix.id, "xen");
645 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
646 fb_info->fix.accel = FB_ACCEL_NONE;
648 fb_info->flags = FBINFO_FLAG_DEFAULT;
650 ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
651 if (ret < 0) {
652 framebuffer_release(fb_info);
653 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
654 goto error;
655 }
657 xenfb_init_shared_page(info, fb_info);
659 ret = register_framebuffer(fb_info);
660 if (ret) {
661 fb_dealloc_cmap(&info->fb_info->cmap);
662 framebuffer_release(fb_info);
663 xenbus_dev_fatal(dev, ret, "register_framebuffer");
664 goto error;
665 }
666 info->fb_info = fb_info;
668 ret = xenfb_connect_backend(dev, info);
669 if (ret < 0)
670 goto error;
672 return 0;
674 error_nomem:
675 ret = -ENOMEM;
676 xenbus_dev_fatal(dev, ret, "allocating device memory");
677 error:
678 xenfb_remove(dev);
679 return ret;
680 }
682 static int xenfb_resume(struct xenbus_device *dev)
683 {
684 struct xenfb_info *info = dev->dev.driver_data;
686 xenfb_disconnect_backend(info);
687 xenfb_init_shared_page(info, info->fb_info);
688 return xenfb_connect_backend(dev, info);
689 }
691 static int xenfb_remove(struct xenbus_device *dev)
692 {
693 struct xenfb_info *info = dev->dev.driver_data;
695 del_timer(&info->refresh);
696 if (info->kthread)
697 kthread_stop(info->kthread);
698 xenfb_disconnect_backend(info);
699 if (info->fb_info) {
700 unregister_framebuffer(info->fb_info);
701 fb_dealloc_cmap(&info->fb_info->cmap);
702 framebuffer_release(info->fb_info);
703 }
704 free_page((unsigned long)info->page);
705 vfree(info->mfns);
706 kfree(info->pages);
707 vfree(info->fb);
708 kfree(info);
710 return 0;
711 }
713 static void xenfb_init_shared_page(struct xenfb_info *info,
714 struct fb_info * fb_info)
715 {
716 int i;
717 int epd = PAGE_SIZE / sizeof(info->mfns[0]);
719 for (i = 0; i < info->nr_pages; i++)
720 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
722 for (i = 0; i < info->nr_pages; i++)
723 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
725 for (i = 0; i * epd < info->nr_pages; i++)
726 info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
728 info->page->width = fb_info->var.xres;
729 info->page->height = fb_info->var.yres;
730 info->page->depth = fb_info->var.bits_per_pixel;
731 info->page->line_length = fb_info->fix.line_length;
732 info->page->mem_length = fb_info->fix.smem_len;
733 info->page->in_cons = info->page->in_prod = 0;
734 info->page->out_cons = info->page->out_prod = 0;
735 }
737 static int xenfb_connect_backend(struct xenbus_device *dev,
738 struct xenfb_info *info)
739 {
740 int ret;
741 struct xenbus_transaction xbt;
743 ret = bind_listening_port_to_irqhandler(
744 dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
745 if (ret < 0) {
746 xenbus_dev_fatal(dev, ret,
747 "bind_listening_port_to_irqhandler");
748 return ret;
749 }
750 info->irq = ret;
752 again:
753 ret = xenbus_transaction_start(&xbt);
754 if (ret) {
755 xenbus_dev_fatal(dev, ret, "starting transaction");
756 return ret;
757 }
758 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
759 virt_to_mfn(info->page));
760 if (ret)
761 goto error_xenbus;
762 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
763 irq_to_evtchn_port(info->irq));
764 if (ret)
765 goto error_xenbus;
766 ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
767 XEN_IO_PROTO_ABI_NATIVE);
768 if (ret)
769 goto error_xenbus;
770 ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
771 if (ret)
772 goto error_xenbus;
773 ret = xenbus_transaction_end(xbt, 0);
774 if (ret) {
775 if (ret == -EAGAIN)
776 goto again;
777 xenbus_dev_fatal(dev, ret, "completing transaction");
778 return ret;
779 }
781 xenbus_switch_state(dev, XenbusStateInitialised);
782 return 0;
784 error_xenbus:
785 xenbus_transaction_end(xbt, 1);
786 xenbus_dev_fatal(dev, ret, "writing xenstore");
787 return ret;
788 }
790 static void xenfb_disconnect_backend(struct xenfb_info *info)
791 {
792 if (info->irq >= 0)
793 unbind_from_irqhandler(info->irq, info);
794 info->irq = -1;
795 }
797 static void xenfb_backend_changed(struct xenbus_device *dev,
798 enum xenbus_state backend_state)
799 {
800 struct xenfb_info *info = dev->dev.driver_data;
801 int val;
803 switch (backend_state) {
804 case XenbusStateInitialising:
805 case XenbusStateInitialised:
806 case XenbusStateReconfiguring:
807 case XenbusStateReconfigured:
808 case XenbusStateUnknown:
809 case XenbusStateClosed:
810 break;
812 case XenbusStateInitWait:
813 InitWait:
814 xenbus_switch_state(dev, XenbusStateConnected);
815 break;
817 case XenbusStateConnected:
818 /*
819 * Work around xenbus race condition: If backend goes
820 * through InitWait to Connected fast enough, we can
821 * get Connected twice here.
822 */
823 if (dev->state != XenbusStateConnected)
824 goto InitWait; /* no InitWait seen yet, fudge it */
826 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
827 "request-update", "%d", &val) < 0)
828 val = 0;
829 if (val)
830 info->update_wanted = 1;
832 if (xenbus_scanf(XBT_NIL, dev->otherend,
833 "feature-resize", "%d", &val) < 0)
834 val = 0;
835 info->feature_resize = val;
837 info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
838 if (IS_ERR(info->kthread)) {
839 info->kthread = NULL;
840 xenbus_dev_fatal(dev, PTR_ERR(info->kthread),
841 "register_framebuffer");
842 }
843 break;
845 case XenbusStateClosing:
846 // FIXME is this safe in any dev->state?
847 xenbus_frontend_closed(dev);
848 break;
849 }
850 }
852 static const struct xenbus_device_id xenfb_ids[] = {
853 { "vfb" },
854 { "" }
855 };
856 MODULE_ALIAS("xen:vfb");
858 static struct xenbus_driver xenfb_driver = {
859 .name = "vfb",
860 .owner = THIS_MODULE,
861 .ids = xenfb_ids,
862 .probe = xenfb_probe,
863 .remove = xenfb_remove,
864 .resume = xenfb_resume,
865 .otherend_changed = xenfb_backend_changed,
866 };
868 static int __init xenfb_init(void)
869 {
870 if (!is_running_on_xen())
871 return -ENODEV;
873 /* Nothing to do if running in dom0. */
874 if (is_initial_xendomain())
875 return -ENODEV;
877 return xenbus_register_frontend(&xenfb_driver);
878 }
880 static void __exit xenfb_cleanup(void)
881 {
882 return xenbus_unregister_driver(&xenfb_driver);
883 }
885 module_init(xenfb_init);
886 module_exit(xenfb_cleanup);
888 MODULE_LICENSE("GPL");