ia64/xen-unstable

view xen/arch/x86/hvm/stdvga.c @ 16381:d1ac500f77c1

x86, hvm: Allow stdvga acceleration to work with 32-bit x86.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Nov 16 14:40:22 2007 +0000 (2007-11-16)
parents c0bdfda5183d
children 01c9b2b3118a
line source
1 /*
2 * Copyright (c) 2003-2007, Virtual Iron Software, Inc.
3 *
4 * Portions have been modified by Virtual Iron Software, Inc.
5 * (c) 2007. This file and the modifications can be redistributed and/or
6 * modified under the terms and conditions of the GNU General Public
7 * License, version 2.1 and not any later version of the GPL, as published
8 * by the Free Software Foundation.
9 *
10 * This improves the performance of Standard VGA,
11 * the mode used during Windows boot and by the Linux
12 * splash screen.
13 *
14 * It does so by buffering all the stdvga programmed output ops
15 * and memory mapped ops (both reads and writes) that are sent to QEMU.
16 *
17 * We maintain locally essential VGA state so we can respond
18 * immediately to input and read ops without waiting for
19 * QEMU. We snoop output and write ops to keep our state
20 * up-to-date.
21 *
22 * PIO input ops are satisfied from cached state without
23 * bothering QEMU.
24 *
25 * PIO output and mmio ops are passed through to QEMU, including
26 * mmio read ops. This is necessary because mmio reads
27 * can have side effects.
28 */
30 #include <xen/config.h>
31 #include <xen/types.h>
32 #include <xen/sched.h>
33 #include <xen/domain_page.h>
34 #include <asm/hvm/support.h>
36 #define PAT(x) (x)
37 static const uint32_t mask16[16] = {
38 PAT(0x00000000),
39 PAT(0x000000ff),
40 PAT(0x0000ff00),
41 PAT(0x0000ffff),
42 PAT(0x00ff0000),
43 PAT(0x00ff00ff),
44 PAT(0x00ffff00),
45 PAT(0x00ffffff),
46 PAT(0xff000000),
47 PAT(0xff0000ff),
48 PAT(0xff00ff00),
49 PAT(0xff00ffff),
50 PAT(0xffff0000),
51 PAT(0xffff00ff),
52 PAT(0xffffff00),
53 PAT(0xffffffff),
54 };
56 /* force some bits to zero */
57 const uint8_t sr_mask[8] = {
58 (uint8_t)~0xfc,
59 (uint8_t)~0xc2,
60 (uint8_t)~0xf0,
61 (uint8_t)~0xc0,
62 (uint8_t)~0xf1,
63 (uint8_t)~0xff,
64 (uint8_t)~0xff,
65 (uint8_t)~0x00,
66 };
68 const uint8_t gr_mask[16] = {
69 (uint8_t)~0xf0, /* 0x00 */
70 (uint8_t)~0xf0, /* 0x01 */
71 (uint8_t)~0xf0, /* 0x02 */
72 (uint8_t)~0xe0, /* 0x03 */
73 (uint8_t)~0xfc, /* 0x04 */
74 (uint8_t)~0x84, /* 0x05 */
75 (uint8_t)~0xf0, /* 0x06 */
76 (uint8_t)~0xf0, /* 0x07 */
77 (uint8_t)~0x00, /* 0x08 */
78 };
80 static uint8_t *vram_getb(struct hvm_hw_stdvga *s, unsigned int a)
81 {
82 struct page_info *pg = s->vram_page[(a >> 12) & 0x3f];
83 uint8_t *p = map_domain_page(page_to_mfn(pg));
84 return &p[a & 0xfff];
85 }
87 static uint32_t *vram_getl(struct hvm_hw_stdvga *s, unsigned int a)
88 {
89 struct page_info *pg = s->vram_page[(a >> 10) & 0x3f];
90 uint32_t *p = map_domain_page(page_to_mfn(pg));
91 return &p[a & 0x3ff];
92 }
94 static void vram_put(struct hvm_hw_stdvga *s, void *p)
95 {
96 unmap_domain_page(p);
97 }
99 static uint64_t stdvga_inb(uint64_t addr)
100 {
101 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
102 uint8_t val = 0;
104 switch ( addr )
105 {
106 case 0x3c4: /* sequencer address register */
107 val = s->sr_index;
108 break;
110 case 0x3c5: /* sequencer data register */
111 if ( s->sr_index < sizeof(s->sr) )
112 val = s->sr[s->sr_index];
113 break;
115 case 0x3ce: /* graphics address register */
116 val = s->gr_index;
117 break;
119 case 0x3cf: /* graphics data register */
120 val = s->gr[s->gr_index];
121 break;
123 default:
124 gdprintk(XENLOG_WARNING, "unexpected io addr 0x%04x\n", (int)addr);
125 }
127 return val;
128 }
130 static uint64_t stdvga_in(ioreq_t *p)
131 {
132 /* Satisfy reads from sequence and graphics registers using local values */
133 uint64_t data = 0;
135 switch ( p->size )
136 {
137 case 1:
138 data = stdvga_inb(p->addr);
139 break;
141 case 2:
142 data = stdvga_inb(p->addr);
143 data |= stdvga_inb(p->addr + 1) << 8;
144 break;
146 case 4:
147 data = stdvga_inb(p->addr);
148 data |= stdvga_inb(p->addr + 1) << 8;
149 data |= stdvga_inb(p->addr + 2) << 16;
150 data |= stdvga_inb(p->addr + 3) << 24;
151 break;
153 case 8:
154 data = stdvga_inb(p->addr);
155 data |= stdvga_inb(p->addr + 1) << 8;
156 data |= stdvga_inb(p->addr + 2) << 16;
157 data |= stdvga_inb(p->addr + 3) << 24;
158 data |= stdvga_inb(p->addr + 4) << 32;
159 data |= stdvga_inb(p->addr + 5) << 40;
160 data |= stdvga_inb(p->addr + 6) << 48;
161 data |= stdvga_inb(p->addr + 7) << 56;
162 break;
164 default:
165 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", (int)p->size);
166 }
168 return data;
169 }
171 static void stdvga_outb(uint64_t addr, uint8_t val)
172 {
173 /* Bookkeep (via snooping) the sequencer and graphics registers */
175 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
176 int prev_stdvga = s->stdvga;
178 switch ( addr )
179 {
180 case 0x3c4: /* sequencer address register */
181 s->sr_index = val;
182 break;
184 case 0x3c5: /* sequencer data register */
185 switch ( s->sr_index )
186 {
187 case 0x00 ... 0x05:
188 case 0x07:
189 s->sr[s->sr_index] = val & sr_mask[s->sr_index];
190 break;
191 case 0x06:
192 s->sr[s->sr_index] = ((val & 0x17) == 0x12) ? 0x12 : 0x0f;
193 break;
194 default:
195 if ( s->sr_index < sizeof(s->sr) )
196 s->sr[s->sr_index] = val;
197 break;
198 }
199 break;
201 case 0x3ce: /* graphics address register */
202 s->gr_index = val;
203 break;
205 case 0x3cf: /* graphics data register */
206 s->gr[s->gr_index] = val;
207 if ( s->gr_index < sizeof(gr_mask) )
208 s->gr[s->gr_index] &= gr_mask[s->gr_index];
209 break;
210 }
212 /* When in standard vga mode, emulate here all writes to the vram buffer
213 * so we can immediately satisfy reads without waiting for qemu. */
214 s->stdvga =
215 (s->sr[0x07] == 0) && /* standard vga mode */
216 (s->gr[6] == 0x05); /* misc graphics register w/ MemoryMapSelect=1
217 * 0xa0000-0xaffff (64k region), AlphaDis=1 */
219 if ( !prev_stdvga && s->stdvga )
220 {
221 s->cache = 1; /* (re)start caching video buffer */
222 gdprintk(XENLOG_INFO, "entering stdvga and caching modes\n");
223 }
224 else if ( prev_stdvga && !s->stdvga )
225 {
226 gdprintk(XENLOG_INFO, "leaving stdvga\n");
227 }
228 }
230 static void stdvga_outv(uint64_t addr, uint64_t data, uint32_t size)
231 {
232 switch ( size )
233 {
234 case 1:
235 stdvga_outb(addr, data);
236 break;
238 case 2:
239 stdvga_outb(addr+0, data >> 0);
240 stdvga_outb(addr+1, data >> 8);
241 break;
243 case 4:
244 stdvga_outb(addr+0, data >> 0);
245 stdvga_outb(addr+1, data >> 8);
246 stdvga_outb(addr+2, data >> 16);
247 stdvga_outb(addr+3, data >> 24);
248 break;
250 case 8:
251 stdvga_outb(addr+0, data >> 0);
252 stdvga_outb(addr+1, data >> 8);
253 stdvga_outb(addr+2, data >> 16);
254 stdvga_outb(addr+3, data >> 24);
255 stdvga_outb(addr+4, data >> 32);
256 stdvga_outb(addr+5, data >> 40);
257 stdvga_outb(addr+6, data >> 48);
258 stdvga_outb(addr+7, data >> 56);
259 break;
261 default:
262 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
263 }
264 }
266 static void stdvga_out(ioreq_t *p)
267 {
268 if ( p->data_is_ptr )
269 {
270 int i, sign = p->df ? -1 : 1;
271 uint64_t addr = p->addr, data = p->data, tmp;
272 for ( i = 0; i < p->count; i++ )
273 {
274 hvm_copy_from_guest_phys(&tmp, data, p->size);
275 stdvga_outv(addr, tmp, p->size);
276 data += sign * p->size;
277 addr += sign * p->size;
278 }
279 }
280 else
281 {
282 stdvga_outv(p->addr, p->data, p->size);
283 }
284 }
286 int stdvga_intercept_pio(ioreq_t *p)
287 {
288 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
289 int buf = 0, rc;
291 if ( p->size > 8 )
292 {
293 gdprintk(XENLOG_WARNING, "stdvga bad access size %d\n", (int)p->size);
294 return 0;
295 }
297 spin_lock(&s->lock);
299 if ( p->dir == IOREQ_READ )
300 {
301 if ( p->size != 1 )
302 gdprintk(XENLOG_WARNING, "unexpected io size:%d\n", (int)p->size);
303 if ( p->data_is_ptr )
304 gdprintk(XENLOG_WARNING, "unexpected data_is_ptr\n");
305 if ( !((p->addr == 0x3c5) && (s->sr_index >= sizeof(sr_mask))) &&
306 !((p->addr == 0x3cf) && (s->gr_index >= sizeof(gr_mask))) )
307 {
308 p->data = stdvga_in(p);
309 buf = 1;
310 }
311 }
312 else
313 {
314 stdvga_out(p);
315 buf = 1;
316 }
318 rc = (buf && hvm_buffered_io_send(p));
320 spin_unlock(&s->lock);
322 return rc;
323 }
325 #define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff)
327 static uint8_t stdvga_mem_readb(uint64_t addr)
328 {
329 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
330 int plane;
331 uint32_t ret, *vram_l;
332 uint8_t *vram_b;
334 addr &= 0x1ffff;
335 if ( addr >= 0x10000 )
336 return 0xff;
338 if ( s->sr[4] & 0x08 )
339 {
340 /* chain 4 mode : simplest access */
341 vram_b = vram_getb(s, addr);
342 ret = *vram_b;
343 vram_put(s, vram_b);
344 }
345 else if ( s->gr[5] & 0x10 )
346 {
347 /* odd/even mode (aka text mode mapping) */
348 plane = (s->gr[4] & 2) | (addr & 1);
349 vram_b = vram_getb(s, ((addr & ~1) << 1) | plane);
350 ret = *vram_b;
351 vram_put(s, vram_b);
352 }
353 else
354 {
355 /* standard VGA latched access */
356 vram_l = vram_getl(s, addr);
357 s->latch = *vram_l;
358 vram_put(s, vram_l);
360 if ( !(s->gr[5] & 0x08) )
361 {
362 /* read mode 0 */
363 plane = s->gr[4];
364 ret = GET_PLANE(s->latch, plane);
365 }
366 else
367 {
368 /* read mode 1 */
369 ret = (s->latch ^ mask16[s->gr[2]]) & mask16[s->gr[7]];
370 ret |= ret >> 16;
371 ret |= ret >> 8;
372 ret = (~ret) & 0xff;
373 }
374 }
376 return ret;
377 }
379 static uint32_t stdvga_mem_read(uint32_t addr, uint32_t size)
380 {
381 uint32_t data = 0;
383 switch ( size )
384 {
385 case 1:
386 data = stdvga_mem_readb(addr);
387 break;
389 case 2:
390 data = stdvga_mem_readb(addr);
391 data |= stdvga_mem_readb(addr + 1) << 8;
392 break;
394 case 4:
395 data = stdvga_mem_readb(addr);
396 data |= stdvga_mem_readb(addr + 1) << 8;
397 data |= stdvga_mem_readb(addr + 2) << 16;
398 data |= stdvga_mem_readb(addr + 3) << 24;
399 break;
401 default:
402 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
403 break;
404 }
406 return data;
407 }
409 static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
410 {
411 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
412 int plane, write_mode, b, func_select, mask;
413 uint32_t write_mask, bit_mask, set_mask, *vram_l;
414 uint8_t *vram_b;
416 addr &= 0x1ffff;
417 if ( addr >= 0x10000 )
418 return;
420 if ( s->sr[4] & 0x08 )
421 {
422 /* chain 4 mode : simplest access */
423 plane = addr & 3;
424 mask = (1 << plane);
425 if ( s->sr[2] & mask )
426 {
427 vram_b = vram_getb(s, addr);
428 *vram_b = val;
429 vram_put(s, vram_b);
430 }
431 }
432 else if ( s->gr[5] & 0x10 )
433 {
434 /* odd/even mode (aka text mode mapping) */
435 plane = (s->gr[4] & 2) | (addr & 1);
436 mask = (1 << plane);
437 if ( s->sr[2] & mask )
438 {
439 addr = ((addr & ~1) << 1) | plane;
440 vram_b = vram_getb(s, addr);
441 *vram_b = val;
442 vram_put(s, vram_b);
443 }
444 }
445 else
446 {
447 write_mode = s->gr[5] & 3;
448 switch ( write_mode )
449 {
450 default:
451 case 0:
452 /* rotate */
453 b = s->gr[3] & 7;
454 val = ((val >> b) | (val << (8 - b))) & 0xff;
455 val |= val << 8;
456 val |= val << 16;
458 /* apply set/reset mask */
459 set_mask = mask16[s->gr[1]];
460 val = (val & ~set_mask) | (mask16[s->gr[0]] & set_mask);
461 bit_mask = s->gr[8];
462 break;
463 case 1:
464 val = s->latch;
465 goto do_write;
466 case 2:
467 val = mask16[val & 0x0f];
468 bit_mask = s->gr[8];
469 break;
470 case 3:
471 /* rotate */
472 b = s->gr[3] & 7;
473 val = (val >> b) | (val << (8 - b));
475 bit_mask = s->gr[8] & val;
476 val = mask16[s->gr[0]];
477 break;
478 }
480 /* apply logical operation */
481 func_select = s->gr[3] >> 3;
482 switch ( func_select )
483 {
484 case 0:
485 default:
486 /* nothing to do */
487 break;
488 case 1:
489 /* and */
490 val &= s->latch;
491 break;
492 case 2:
493 /* or */
494 val |= s->latch;
495 break;
496 case 3:
497 /* xor */
498 val ^= s->latch;
499 break;
500 }
502 /* apply bit mask */
503 bit_mask |= bit_mask << 8;
504 bit_mask |= bit_mask << 16;
505 val = (val & bit_mask) | (s->latch & ~bit_mask);
507 do_write:
508 /* mask data according to sr[2] */
509 mask = s->sr[2];
510 write_mask = mask16[mask];
511 vram_l = vram_getl(s, addr);
512 *vram_l = (*vram_l & ~write_mask) | (val & write_mask);
513 vram_put(s, vram_l);
514 }
515 }
517 static void stdvga_mem_write(uint32_t addr, uint32_t data, uint32_t size)
518 {
519 /* Intercept mmio write */
520 switch ( size )
521 {
522 case 1:
523 stdvga_mem_writeb(addr, (data >> 0) & 0xff);
524 break;
526 case 2:
527 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
528 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
529 break;
531 case 4:
532 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
533 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
534 stdvga_mem_writeb(addr+2, (data >> 16) & 0xff);
535 stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
536 break;
538 default:
539 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
540 break;
541 }
542 }
544 static uint32_t read_data;
546 static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
547 {
548 int i;
549 int sign = p->df ? -1 : 1;
551 if ( p->data_is_ptr )
552 {
553 if ( p->dir == IOREQ_READ )
554 {
555 uint32_t addr = p->addr, data = p->data, tmp;
556 for ( i = 0; i < p->count; i++ )
557 {
558 tmp = stdvga_mem_read(addr, p->size);
559 hvm_copy_to_guest_phys(data, &tmp, p->size);
560 data += sign * p->size;
561 addr += sign * p->size;
562 }
563 }
564 else
565 {
566 uint32_t addr = p->addr, data = p->data, tmp;
567 for ( i = 0; i < p->count; i++ )
568 {
569 hvm_copy_from_guest_phys(&tmp, data, p->size);
570 stdvga_mem_write(addr, tmp, p->size);
571 data += sign * p->size;
572 addr += sign * p->size;
573 }
574 }
575 }
576 else
577 {
578 if ( p->dir == IOREQ_READ )
579 {
580 uint32_t addr = p->addr;
581 for ( i = 0; i < p->count; i++ )
582 {
583 p->data = stdvga_mem_read(addr, p->size);
584 addr += sign * p->size;
585 }
586 }
587 else
588 {
589 uint32_t addr = p->addr;
590 for ( i = 0; i < p->count; i++ )
591 {
592 stdvga_mem_write(addr, p->data, p->size);
593 addr += sign * p->size;
594 }
595 }
596 }
598 read_data = p->data;
599 return 1;
600 }
602 static uint32_t op_and(uint32_t a, uint32_t b) { return a & b; }
603 static uint32_t op_or (uint32_t a, uint32_t b) { return a | b; }
604 static uint32_t op_xor(uint32_t a, uint32_t b) { return a ^ b; }
605 static uint32_t op_add(uint32_t a, uint32_t b) { return a + b; }
606 static uint32_t op_sub(uint32_t a, uint32_t b) { return a - b; }
607 static uint32_t (*op_array[])(uint32_t, uint32_t) = {
608 [IOREQ_TYPE_AND] = op_and,
609 [IOREQ_TYPE_OR ] = op_or,
610 [IOREQ_TYPE_XOR] = op_xor,
611 [IOREQ_TYPE_ADD] = op_add,
612 [IOREQ_TYPE_SUB] = op_sub
613 };
615 static int mmio_op(struct hvm_hw_stdvga *s, ioreq_t *p)
616 {
617 uint32_t orig, mod = 0;
618 orig = stdvga_mem_read(p->addr, p->size);
620 if ( p->dir == IOREQ_WRITE )
621 {
622 mod = (op_array[p->type])(orig, p->data);
623 stdvga_mem_write(p->addr, mod, p->size);
624 }
626 return 0; /* Don't try to buffer these operations */
627 }
629 int stdvga_intercept_mmio(ioreq_t *p)
630 {
631 struct domain *d = current->domain;
632 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
633 int buf = 0, rc;
635 if ( p->size > 8 )
636 {
637 gdprintk(XENLOG_WARNING, "invalid mmio size %d\n", (int)p->size);
638 return 0;
639 }
641 spin_lock(&s->lock);
643 if ( s->stdvga && s->cache )
644 {
645 switch ( p->type )
646 {
647 case IOREQ_TYPE_COPY:
648 buf = mmio_move(s, p);
649 break;
650 case IOREQ_TYPE_AND:
651 case IOREQ_TYPE_OR:
652 case IOREQ_TYPE_XOR:
653 case IOREQ_TYPE_ADD:
654 case IOREQ_TYPE_SUB:
655 buf = mmio_op(s, p);
656 break;
657 default:
658 gdprintk(XENLOG_WARNING, "unsupported mmio request type:%d "
659 "addr:0x%04x data:0x%04x size:%d count:%d state:%d "
660 "isptr:%d dir:%d df:%d\n",
661 p->type, (int)p->addr, (int)p->data, (int)p->size,
662 (int)p->count, p->state,
663 p->data_is_ptr, p->dir, p->df);
664 s->cache = 0;
665 }
666 }
667 else
668 {
669 buf = (p->dir == IOREQ_WRITE);
670 }
672 rc = (buf && hvm_buffered_io_send(p));
674 spin_unlock(&s->lock);
676 return rc;
677 }
679 void stdvga_init(struct domain *d)
680 {
681 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
682 struct page_info *pg;
683 void *p;
684 int i;
686 memset(s, 0, sizeof(*s));
687 spin_lock_init(&s->lock);
689 for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
690 {
691 if ( (pg = alloc_domheap_page(NULL)) == NULL )
692 break;
693 s->vram_page[i] = pg;
694 p = map_domain_page(page_to_mfn(pg));
695 clear_page(p);
696 unmap_domain_page(p);
697 }
699 if ( i == ARRAY_SIZE(s->vram_page) )
700 {
701 /* Sequencer registers. */
702 register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio);
703 /* Graphics registers. */
704 register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio);
705 /* MMIO. */
706 register_buffered_io_handler(
707 d, 0xa0000, 0x10000, stdvga_intercept_mmio);
708 }
709 }
711 void stdvga_deinit(struct domain *d)
712 {
713 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
714 int i;
716 for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
717 {
718 if ( s->vram_page[i] == NULL )
719 continue;
720 free_domheap_page(s->vram_page[i]);
721 s->vram_page[i] = NULL;
722 }
723 }