ia64/linux-2.6.18-xen.hg

view drivers/xen/fbfront/xenfb.c @ 729:f29bf0bf3e97

linux: re-order fbfront initialization

The helper thread shouldn't be started before xenfb_connect_backend(),
to avoid the thread using the potentially not yet initialized irq.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Nov 19 13:15:08 2008 +0000 (2008-11-19)
parents bfc040135633
children c9783c08495c
line source
1 /*
2 * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
3 *
4 * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6 *
7 * Based on linux/drivers/video/q40fb.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
14 /*
15 * TODO:
16 *
17 * Switch to grant tables when they become capable of dealing with the
18 * frame buffer.
19 */
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/fb.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/mm.h>
27 #include <linux/mutex.h>
28 #include <asm/hypervisor.h>
29 #include <xen/evtchn.h>
30 #include <xen/interface/io/fbif.h>
31 #include <xen/interface/io/protocols.h>
32 #include <xen/xenbus.h>
33 #include <linux/kthread.h>
35 struct xenfb_mapping
36 {
37 struct list_head link;
38 struct vm_area_struct *vma;
39 atomic_t map_refs;
40 int faults;
41 struct xenfb_info *info;
42 };
44 struct xenfb_info
45 {
46 struct task_struct *kthread;
47 wait_queue_head_t wq;
49 unsigned char *fb;
50 struct fb_info *fb_info;
51 struct timer_list refresh;
52 int dirty;
53 int x1, y1, x2, y2; /* dirty rectangle,
54 protected by dirty_lock */
55 spinlock_t dirty_lock;
56 struct mutex mm_lock;
57 int nr_pages;
58 struct page **pages;
59 struct list_head mappings; /* protected by mm_lock */
61 int irq;
62 struct xenfb_page *page;
63 unsigned long *mfns;
64 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
65 int feature_resize; /* Backend has resize feature */
66 struct xenfb_resize resize;
67 int resize_dpy;
68 spinlock_t resize_lock;
70 struct xenbus_device *xbdev;
71 };
73 /*
74 * There are three locks:
75 * spinlock resize_lock protecting resize_dpy and resize
76 * spinlock dirty_lock protecting the dirty rectangle
77 * mutex mm_lock protecting mappings.
78 *
79 * How the dirty and mapping locks work together
80 *
81 * The problem is that dirty rectangle and mappings aren't
82 * independent: the dirty rectangle must cover all faulted pages in
83 * mappings. We need to prove that our locking maintains this
84 * invariant.
85 *
86 * There are several kinds of critical regions:
87 *
88 * 1. Holding only dirty_lock: xenfb_refresh(). May run in
89 * interrupts. Extends the dirty rectangle. Trivially preserves
90 * invariant.
91 *
92 * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
93 * only mappings. The former creates unfaulted pages. Preserves
94 * invariant. The latter removes pages. Preserves invariant.
95 *
96 * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
97 * rectangle and updates mappings consistently. Preserves
98 * invariant.
99 *
100 * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
101 * rectangle and update mappings consistently.
102 *
103 * We can't simply hold both locks, because zap_page_range() cannot
104 * be called with a spinlock held.
105 *
106 * Therefore, we first clear the dirty rectangle with both locks
107 * held. Then we unlock dirty_lock and update the mappings.
108 * Critical regions that hold only dirty_lock may interfere with
109 * that. This can only be region 1: xenfb_refresh(). But that
110 * just extends the dirty rectangle, which can't harm the
111 * invariant.
112 *
113 * But FIXME: the invariant is too weak. It misses that the fault
114 * record in mappings must be consistent with the mapping of pages in
115 * the associated address space! do_no_page() updates the PTE after
116 * xenfb_vm_nopage() returns, i.e. outside the critical region. This
117 * allows the following race:
118 *
119 * X writes to some address in the Xen frame buffer
120 * Fault - call do_no_page()
121 * call xenfb_vm_nopage()
122 * grab mm_lock
123 * map->faults++;
124 * release mm_lock
125 * return back to do_no_page()
126 * (preempted, or SMP)
127 * Xen worker thread runs.
128 * grab mm_lock
129 * look at mappings
130 * find this mapping, zaps its pages (but page not in pte yet)
131 * clear map->faults
132 * releases mm_lock
133 * (back to X process)
134 * put page in X's pte
135 *
136 * Oh well, we wont be updating the writes to this page anytime soon.
137 */
138 #define MB_ (1024*1024)
139 #define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
141 enum {KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT};
142 static int video[KPARAM_CNT] = {2, XENFB_WIDTH, XENFB_HEIGHT};
143 module_param_array(video, int, NULL, 0);
144 MODULE_PARM_DESC(video,
145 "Size of video memory in MB and width,height in pixels, default = (2,800,600)");
147 static int xenfb_fps = 20;
149 static int xenfb_remove(struct xenbus_device *);
150 static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
151 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
152 static void xenfb_disconnect_backend(struct xenfb_info *);
154 static void xenfb_send_event(struct xenfb_info *info,
155 union xenfb_out_event *event)
156 {
157 __u32 prod;
159 prod = info->page->out_prod;
160 /* caller ensures !xenfb_queue_full() */
161 mb(); /* ensure ring space available */
162 XENFB_OUT_RING_REF(info->page, prod) = *event;
163 wmb(); /* ensure ring contents visible */
164 info->page->out_prod = prod + 1;
166 notify_remote_via_irq(info->irq);
167 }
169 static void xenfb_do_update(struct xenfb_info *info,
170 int x, int y, int w, int h)
171 {
172 union xenfb_out_event event;
174 memset(&event, 0, sizeof(event));
175 event.type = XENFB_TYPE_UPDATE;
176 event.update.x = x;
177 event.update.y = y;
178 event.update.width = w;
179 event.update.height = h;
181 /* caller ensures !xenfb_queue_full() */
182 xenfb_send_event(info, &event);
183 }
185 static void xenfb_do_resize(struct xenfb_info *info)
186 {
187 union xenfb_out_event event;
189 memset(&event, 0, sizeof(event));
190 event.resize = info->resize;
192 /* caller ensures !xenfb_queue_full() */
193 xenfb_send_event(info, &event);
194 }
196 static int xenfb_queue_full(struct xenfb_info *info)
197 {
198 __u32 cons, prod;
200 prod = info->page->out_prod;
201 cons = info->page->out_cons;
202 return prod - cons == XENFB_OUT_RING_LEN;
203 }
205 static void xenfb_update_screen(struct xenfb_info *info)
206 {
207 unsigned long flags;
208 int y1, y2, x1, x2;
209 struct xenfb_mapping *map;
211 if (!info->update_wanted)
212 return;
213 if (xenfb_queue_full(info))
214 return;
216 mutex_lock(&info->mm_lock);
218 spin_lock_irqsave(&info->dirty_lock, flags);
219 y1 = info->y1;
220 y2 = info->y2;
221 x1 = info->x1;
222 x2 = info->x2;
223 info->x1 = info->y1 = INT_MAX;
224 info->x2 = info->y2 = 0;
225 spin_unlock_irqrestore(&info->dirty_lock, flags);
227 list_for_each_entry(map, &info->mappings, link) {
228 if (!map->faults)
229 continue;
230 zap_page_range(map->vma, map->vma->vm_start,
231 map->vma->vm_end - map->vma->vm_start, NULL);
232 map->faults = 0;
233 }
235 mutex_unlock(&info->mm_lock);
237 if (x2 < x1 || y2 < y1) {
238 printk("xenfb_update_screen bogus rect %d %d %d %d\n",
239 x1, x2, y1, y2);
240 WARN_ON(1);
241 }
242 xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
243 }
245 static void xenfb_handle_resize_dpy(struct xenfb_info *info)
246 {
247 unsigned long flags;
249 spin_lock_irqsave(&info->resize_lock, flags);
250 if (info->resize_dpy) {
251 if (!xenfb_queue_full(info)) {
252 info->resize_dpy = 0;
253 xenfb_do_resize(info);
254 }
255 }
256 spin_unlock_irqrestore(&info->resize_lock, flags);
257 }
259 static int xenfb_thread(void *data)
260 {
261 struct xenfb_info *info = data;
263 while (!kthread_should_stop()) {
264 xenfb_handle_resize_dpy(info);
265 if (info->dirty) {
266 info->dirty = 0;
267 xenfb_update_screen(info);
268 }
269 wait_event_interruptible(info->wq,
270 kthread_should_stop() || info->dirty);
271 try_to_freeze();
272 }
273 return 0;
274 }
276 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
277 unsigned blue, unsigned transp,
278 struct fb_info *info)
279 {
280 u32 v;
282 if (regno > info->cmap.len)
283 return 1;
285 red >>= (16 - info->var.red.length);
286 green >>= (16 - info->var.green.length);
287 blue >>= (16 - info->var.blue.length);
289 v = (red << info->var.red.offset) |
290 (green << info->var.green.offset) |
291 (blue << info->var.blue.offset);
293 /* FIXME is this sane? check against xxxfb_setcolreg()! */
294 switch (info->var.bits_per_pixel) {
295 case 16:
296 case 24:
297 case 32:
298 ((u32 *)info->pseudo_palette)[regno] = v;
299 break;
300 }
302 return 0;
303 }
305 static void xenfb_timer(unsigned long data)
306 {
307 struct xenfb_info *info = (struct xenfb_info *)data;
308 wake_up(&info->wq);
309 }
311 static void __xenfb_refresh(struct xenfb_info *info,
312 int x1, int y1, int w, int h)
313 {
314 int y2, x2;
316 y2 = y1 + h;
317 x2 = x1 + w;
319 if (info->y1 > y1)
320 info->y1 = y1;
321 if (info->y2 < y2)
322 info->y2 = y2;
323 if (info->x1 > x1)
324 info->x1 = x1;
325 if (info->x2 < x2)
326 info->x2 = x2;
327 info->dirty = 1;
329 if (timer_pending(&info->refresh))
330 return;
332 mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
333 }
335 static void xenfb_refresh(struct xenfb_info *info,
336 int x1, int y1, int w, int h)
337 {
338 unsigned long flags;
340 spin_lock_irqsave(&info->dirty_lock, flags);
341 __xenfb_refresh(info, x1, y1, w, h);
342 spin_unlock_irqrestore(&info->dirty_lock, flags);
343 }
345 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
346 {
347 struct xenfb_info *info = p->par;
349 cfb_fillrect(p, rect);
350 xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
351 }
353 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
354 {
355 struct xenfb_info *info = p->par;
357 cfb_imageblit(p, image);
358 xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
359 }
361 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
362 {
363 struct xenfb_info *info = p->par;
365 cfb_copyarea(p, area);
366 xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
367 }
369 static void xenfb_vm_open(struct vm_area_struct *vma)
370 {
371 struct xenfb_mapping *map = vma->vm_private_data;
372 atomic_inc(&map->map_refs);
373 }
375 static void xenfb_vm_close(struct vm_area_struct *vma)
376 {
377 struct xenfb_mapping *map = vma->vm_private_data;
378 struct xenfb_info *info = map->info;
380 mutex_lock(&info->mm_lock);
381 if (atomic_dec_and_test(&map->map_refs)) {
382 list_del(&map->link);
383 kfree(map);
384 }
385 mutex_unlock(&info->mm_lock);
386 }
388 static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
389 unsigned long vaddr, int *type)
390 {
391 struct xenfb_mapping *map = vma->vm_private_data;
392 struct xenfb_info *info = map->info;
393 int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
394 unsigned long flags;
395 struct page *page;
396 int y1, y2;
398 if (pgnr >= info->nr_pages)
399 return NOPAGE_SIGBUS;
401 mutex_lock(&info->mm_lock);
402 spin_lock_irqsave(&info->dirty_lock, flags);
403 page = info->pages[pgnr];
404 get_page(page);
405 map->faults++;
407 y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
408 y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
409 if (y2 > info->fb_info->var.yres)
410 y2 = info->fb_info->var.yres;
411 __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
412 spin_unlock_irqrestore(&info->dirty_lock, flags);
413 mutex_unlock(&info->mm_lock);
415 if (type)
416 *type = VM_FAULT_MINOR;
418 return page;
419 }
421 static struct vm_operations_struct xenfb_vm_ops = {
422 .open = xenfb_vm_open,
423 .close = xenfb_vm_close,
424 .nopage = xenfb_vm_nopage,
425 };
427 static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
428 {
429 struct xenfb_info *info = fb_info->par;
430 struct xenfb_mapping *map;
431 int map_pages;
433 if (!(vma->vm_flags & VM_WRITE))
434 return -EINVAL;
435 if (!(vma->vm_flags & VM_SHARED))
436 return -EINVAL;
437 if (vma->vm_pgoff != 0)
438 return -EINVAL;
440 map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
441 if (map_pages > info->nr_pages)
442 return -EINVAL;
444 map = kzalloc(sizeof(*map), GFP_KERNEL);
445 if (map == NULL)
446 return -ENOMEM;
448 map->vma = vma;
449 map->faults = 0;
450 map->info = info;
451 atomic_set(&map->map_refs, 1);
453 mutex_lock(&info->mm_lock);
454 list_add(&map->link, &info->mappings);
455 mutex_unlock(&info->mm_lock);
457 vma->vm_ops = &xenfb_vm_ops;
458 vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
459 vma->vm_private_data = map;
461 return 0;
462 }
464 static int
465 xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
466 {
467 struct xenfb_info *xenfb_info;
468 int required_mem_len;
470 xenfb_info = info->par;
472 if (!xenfb_info->feature_resize) {
473 if (var->xres == video[KPARAM_WIDTH] &&
474 var->yres == video[KPARAM_HEIGHT] &&
475 var->bits_per_pixel == xenfb_info->page->depth) {
476 return 0;
477 }
478 return -EINVAL;
479 }
481 /* Can't resize past initial width and height */
482 if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
483 return -EINVAL;
485 required_mem_len = var->xres * var->yres * (xenfb_info->page->depth / 8);
486 if (var->bits_per_pixel == xenfb_info->page->depth &&
487 var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
488 required_mem_len <= info->fix.smem_len) {
489 var->xres_virtual = var->xres;
490 var->yres_virtual = var->yres;
491 return 0;
492 }
493 return -EINVAL;
494 }
496 static int xenfb_set_par(struct fb_info *info)
497 {
498 struct xenfb_info *xenfb_info;
499 unsigned long flags;
501 xenfb_info = info->par;
503 spin_lock_irqsave(&xenfb_info->resize_lock, flags);
504 xenfb_info->resize.type = XENFB_TYPE_RESIZE;
505 xenfb_info->resize.width = info->var.xres;
506 xenfb_info->resize.height = info->var.yres;
507 xenfb_info->resize.stride = info->fix.line_length;
508 xenfb_info->resize.depth = info->var.bits_per_pixel;
509 xenfb_info->resize.offset = 0;
510 xenfb_info->resize_dpy = 1;
511 spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
512 return 0;
513 }
515 static struct fb_ops xenfb_fb_ops = {
516 .owner = THIS_MODULE,
517 .fb_setcolreg = xenfb_setcolreg,
518 .fb_fillrect = xenfb_fillrect,
519 .fb_copyarea = xenfb_copyarea,
520 .fb_imageblit = xenfb_imageblit,
521 .fb_mmap = xenfb_mmap,
522 .fb_check_var = xenfb_check_var,
523 .fb_set_par = xenfb_set_par,
524 };
526 static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
527 struct pt_regs *regs)
528 {
529 /*
530 * No in events recognized, simply ignore them all.
531 * If you need to recognize some, see xenbkd's input_handler()
532 * for how to do that.
533 */
534 struct xenfb_info *info = dev_id;
535 struct xenfb_page *page = info->page;
537 if (page->in_cons != page->in_prod) {
538 info->page->in_cons = info->page->in_prod;
539 notify_remote_via_irq(info->irq);
540 }
541 return IRQ_HANDLED;
542 }
544 static unsigned long vmalloc_to_mfn(void *address)
545 {
546 return pfn_to_mfn(vmalloc_to_pfn(address));
547 }
549 static int __devinit xenfb_probe(struct xenbus_device *dev,
550 const struct xenbus_device_id *id)
551 {
552 struct xenfb_info *info;
553 struct fb_info *fb_info;
554 int fb_size;
555 int val;
556 int ret;
558 info = kzalloc(sizeof(*info), GFP_KERNEL);
559 if (info == NULL) {
560 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
561 return -ENOMEM;
562 }
564 /* Limit kernel param videoram amount to what is in xenstore */
565 if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
566 if (val < video[KPARAM_MEM])
567 video[KPARAM_MEM] = val;
568 }
570 /* If requested res does not fit in available memory, use default */
571 fb_size = video[KPARAM_MEM] * MB_;
572 if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH/8 > fb_size) {
573 video[KPARAM_WIDTH] = XENFB_WIDTH;
574 video[KPARAM_HEIGHT] = XENFB_HEIGHT;
575 fb_size = XENFB_DEFAULT_FB_LEN;
576 }
578 dev->dev.driver_data = info;
579 info->xbdev = dev;
580 info->irq = -1;
581 info->x1 = info->y1 = INT_MAX;
582 spin_lock_init(&info->dirty_lock);
583 spin_lock_init(&info->resize_lock);
584 mutex_init(&info->mm_lock);
585 init_waitqueue_head(&info->wq);
586 init_timer(&info->refresh);
587 info->refresh.function = xenfb_timer;
588 info->refresh.data = (unsigned long)info;
589 INIT_LIST_HEAD(&info->mappings);
591 info->fb = vmalloc(fb_size);
592 if (info->fb == NULL)
593 goto error_nomem;
594 memset(info->fb, 0, fb_size);
596 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
598 info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
599 GFP_KERNEL);
600 if (info->pages == NULL)
601 goto error_nomem;
603 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
604 if (!info->mfns)
605 goto error_nomem;
607 /* set up shared page */
608 info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
609 if (!info->page)
610 goto error_nomem;
612 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
613 /* see fishy hackery below */
614 if (fb_info == NULL)
615 goto error_nomem;
617 /* FIXME fishy hackery */
618 fb_info->pseudo_palette = fb_info->par;
619 fb_info->par = info;
620 /* /FIXME */
621 fb_info->screen_base = info->fb;
623 fb_info->fbops = &xenfb_fb_ops;
624 fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
625 fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
626 fb_info->var.bits_per_pixel = XENFB_DEPTH;
628 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
629 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
630 fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
632 fb_info->var.activate = FB_ACTIVATE_NOW;
633 fb_info->var.height = -1;
634 fb_info->var.width = -1;
635 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
637 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
638 fb_info->fix.line_length = fb_info->var.xres * (XENFB_DEPTH / 8);
639 fb_info->fix.smem_start = 0;
640 fb_info->fix.smem_len = fb_size;
641 strcpy(fb_info->fix.id, "xen");
642 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
643 fb_info->fix.accel = FB_ACCEL_NONE;
645 fb_info->flags = FBINFO_FLAG_DEFAULT;
647 ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
648 if (ret < 0) {
649 framebuffer_release(fb_info);
650 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
651 goto error;
652 }
654 xenfb_init_shared_page(info, fb_info);
656 ret = register_framebuffer(fb_info);
657 if (ret) {
658 fb_dealloc_cmap(&info->fb_info->cmap);
659 framebuffer_release(fb_info);
660 xenbus_dev_fatal(dev, ret, "register_framebuffer");
661 goto error;
662 }
663 info->fb_info = fb_info;
665 ret = xenfb_connect_backend(dev, info);
666 if (ret < 0)
667 goto error;
669 /* FIXME should this be delayed until backend XenbusStateConnected? */
670 info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
671 if (IS_ERR(info->kthread)) {
672 ret = PTR_ERR(info->kthread);
673 info->kthread = NULL;
674 xenbus_dev_fatal(dev, ret, "register_framebuffer");
675 goto error;
676 }
678 return 0;
680 error_nomem:
681 ret = -ENOMEM;
682 xenbus_dev_fatal(dev, ret, "allocating device memory");
683 error:
684 xenfb_remove(dev);
685 return ret;
686 }
688 static int xenfb_resume(struct xenbus_device *dev)
689 {
690 struct xenfb_info *info = dev->dev.driver_data;
692 xenfb_disconnect_backend(info);
693 xenfb_init_shared_page(info, info->fb_info);
694 return xenfb_connect_backend(dev, info);
695 }
697 static int xenfb_remove(struct xenbus_device *dev)
698 {
699 struct xenfb_info *info = dev->dev.driver_data;
701 del_timer(&info->refresh);
702 if (info->kthread)
703 kthread_stop(info->kthread);
704 xenfb_disconnect_backend(info);
705 if (info->fb_info) {
706 unregister_framebuffer(info->fb_info);
707 fb_dealloc_cmap(&info->fb_info->cmap);
708 framebuffer_release(info->fb_info);
709 }
710 free_page((unsigned long)info->page);
711 vfree(info->mfns);
712 kfree(info->pages);
713 vfree(info->fb);
714 kfree(info);
716 return 0;
717 }
719 static void xenfb_init_shared_page(struct xenfb_info *info,
720 struct fb_info * fb_info)
721 {
722 int i;
723 int epd = PAGE_SIZE / sizeof(info->mfns[0]);
725 for (i = 0; i < info->nr_pages; i++)
726 info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
728 for (i = 0; i < info->nr_pages; i++)
729 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
731 for (i = 0; i * epd < info->nr_pages; i++)
732 info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
734 info->page->width = fb_info->var.xres;
735 info->page->height = fb_info->var.yres;
736 info->page->depth = fb_info->var.bits_per_pixel;
737 info->page->line_length = fb_info->fix.line_length;
738 info->page->mem_length = fb_info->fix.smem_len;
739 info->page->in_cons = info->page->in_prod = 0;
740 info->page->out_cons = info->page->out_prod = 0;
741 }
743 static int xenfb_connect_backend(struct xenbus_device *dev,
744 struct xenfb_info *info)
745 {
746 int ret;
747 struct xenbus_transaction xbt;
749 ret = bind_listening_port_to_irqhandler(
750 dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
751 if (ret < 0) {
752 xenbus_dev_fatal(dev, ret,
753 "bind_listening_port_to_irqhandler");
754 return ret;
755 }
756 info->irq = ret;
758 again:
759 ret = xenbus_transaction_start(&xbt);
760 if (ret) {
761 xenbus_dev_fatal(dev, ret, "starting transaction");
762 return ret;
763 }
764 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
765 virt_to_mfn(info->page));
766 if (ret)
767 goto error_xenbus;
768 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
769 irq_to_evtchn_port(info->irq));
770 if (ret)
771 goto error_xenbus;
772 ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
773 XEN_IO_PROTO_ABI_NATIVE);
774 if (ret)
775 goto error_xenbus;
776 ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
777 if (ret)
778 goto error_xenbus;
779 ret = xenbus_transaction_end(xbt, 0);
780 if (ret) {
781 if (ret == -EAGAIN)
782 goto again;
783 xenbus_dev_fatal(dev, ret, "completing transaction");
784 return ret;
785 }
787 xenbus_switch_state(dev, XenbusStateInitialised);
788 return 0;
790 error_xenbus:
791 xenbus_transaction_end(xbt, 1);
792 xenbus_dev_fatal(dev, ret, "writing xenstore");
793 return ret;
794 }
796 static void xenfb_disconnect_backend(struct xenfb_info *info)
797 {
798 if (info->irq >= 0)
799 unbind_from_irqhandler(info->irq, info);
800 info->irq = -1;
801 }
803 static void xenfb_backend_changed(struct xenbus_device *dev,
804 enum xenbus_state backend_state)
805 {
806 struct xenfb_info *info = dev->dev.driver_data;
807 int val;
809 switch (backend_state) {
810 case XenbusStateInitialising:
811 case XenbusStateInitialised:
812 case XenbusStateReconfiguring:
813 case XenbusStateReconfigured:
814 case XenbusStateUnknown:
815 case XenbusStateClosed:
816 break;
818 case XenbusStateInitWait:
819 InitWait:
820 xenbus_switch_state(dev, XenbusStateConnected);
821 break;
823 case XenbusStateConnected:
824 /*
825 * Work around xenbus race condition: If backend goes
826 * through InitWait to Connected fast enough, we can
827 * get Connected twice here.
828 */
829 if (dev->state != XenbusStateConnected)
830 goto InitWait; /* no InitWait seen yet, fudge it */
832 if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
833 "request-update", "%d", &val) < 0)
834 val = 0;
835 if (val)
836 info->update_wanted = 1;
838 if (xenbus_scanf(XBT_NIL, dev->otherend,
839 "feature-resize", "%d", &val) < 0)
840 val = 0;
841 info->feature_resize = val;
842 break;
844 case XenbusStateClosing:
845 // FIXME is this safe in any dev->state?
846 xenbus_frontend_closed(dev);
847 break;
848 }
849 }
851 static const struct xenbus_device_id xenfb_ids[] = {
852 { "vfb" },
853 { "" }
854 };
855 MODULE_ALIAS("xen:vfb");
857 static struct xenbus_driver xenfb_driver = {
858 .name = "vfb",
859 .owner = THIS_MODULE,
860 .ids = xenfb_ids,
861 .probe = xenfb_probe,
862 .remove = xenfb_remove,
863 .resume = xenfb_resume,
864 .otherend_changed = xenfb_backend_changed,
865 };
867 static int __init xenfb_init(void)
868 {
869 if (!is_running_on_xen())
870 return -ENODEV;
872 /* Nothing to do if running in dom0. */
873 if (is_initial_xendomain())
874 return -ENODEV;
876 return xenbus_register_frontend(&xenfb_driver);
877 }
879 static void __exit xenfb_cleanup(void)
880 {
881 return xenbus_unregister_driver(&xenfb_driver);
882 }
884 module_init(xenfb_init);
885 module_exit(xenfb_cleanup);
887 MODULE_LICENSE("GPL");