ia64/xen-unstable

view xen/arch/x86/hvm/stdvga.c @ 17672:3a5750f4a738

stdvga: handle 64bit io operations

Handle 64bit operations in stdvga instead of throwing away the
upper 32bits.

This fixes some noisy xen messages like "invalid io size: 8".

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon May 19 09:40:53 2008 +0100 (2008-05-19)
parents d5589865bfce
children 351ce3b94e2d
line source
1 /*
2 * Copyright (c) 2003-2007, Virtual Iron Software, Inc.
3 *
4 * Portions have been modified by Virtual Iron Software, Inc.
5 * (c) 2007. This file and the modifications can be redistributed and/or
6 * modified under the terms and conditions of the GNU General Public
7 * License, version 2.1 and not any later version of the GPL, as published
8 * by the Free Software Foundation.
9 *
10 * This improves the performance of Standard VGA,
11 * the mode used during Windows boot and by the Linux
12 * splash screen.
13 *
14 * It does so by buffering all the stdvga programmed output ops
15 * and memory mapped ops (both reads and writes) that are sent to QEMU.
16 *
17 * We maintain locally essential VGA state so we can respond
18 * immediately to input and read ops without waiting for
19 * QEMU. We snoop output and write ops to keep our state
20 * up-to-date.
21 *
22 * PIO input ops are satisfied from cached state without
23 * bothering QEMU.
24 *
25 * PIO output and mmio ops are passed through to QEMU, including
26 * mmio read ops. This is necessary because mmio reads
27 * can have side effects.
28 */
30 #include <xen/config.h>
31 #include <xen/types.h>
32 #include <xen/sched.h>
33 #include <xen/domain_page.h>
34 #include <asm/hvm/support.h>
35 #include <xen/numa.h>
37 #define PAT(x) (x)
38 static const uint32_t mask16[16] = {
39 PAT(0x00000000),
40 PAT(0x000000ff),
41 PAT(0x0000ff00),
42 PAT(0x0000ffff),
43 PAT(0x00ff0000),
44 PAT(0x00ff00ff),
45 PAT(0x00ffff00),
46 PAT(0x00ffffff),
47 PAT(0xff000000),
48 PAT(0xff0000ff),
49 PAT(0xff00ff00),
50 PAT(0xff00ffff),
51 PAT(0xffff0000),
52 PAT(0xffff00ff),
53 PAT(0xffffff00),
54 PAT(0xffffffff),
55 };
57 /* force some bits to zero */
58 const uint8_t sr_mask[8] = {
59 (uint8_t)~0xfc,
60 (uint8_t)~0xc2,
61 (uint8_t)~0xf0,
62 (uint8_t)~0xc0,
63 (uint8_t)~0xf1,
64 (uint8_t)~0xff,
65 (uint8_t)~0xff,
66 (uint8_t)~0x00,
67 };
69 const uint8_t gr_mask[9] = {
70 (uint8_t)~0xf0, /* 0x00 */
71 (uint8_t)~0xf0, /* 0x01 */
72 (uint8_t)~0xf0, /* 0x02 */
73 (uint8_t)~0xe0, /* 0x03 */
74 (uint8_t)~0xfc, /* 0x04 */
75 (uint8_t)~0x84, /* 0x05 */
76 (uint8_t)~0xf0, /* 0x06 */
77 (uint8_t)~0xf0, /* 0x07 */
78 (uint8_t)~0x00, /* 0x08 */
79 };
81 static uint8_t *vram_getb(struct hvm_hw_stdvga *s, unsigned int a)
82 {
83 struct page_info *pg = s->vram_page[(a >> 12) & 0x3f];
84 uint8_t *p = map_domain_page(page_to_mfn(pg));
85 return &p[a & 0xfff];
86 }
88 static uint32_t *vram_getl(struct hvm_hw_stdvga *s, unsigned int a)
89 {
90 struct page_info *pg = s->vram_page[(a >> 10) & 0x3f];
91 uint32_t *p = map_domain_page(page_to_mfn(pg));
92 return &p[a & 0x3ff];
93 }
95 static void vram_put(struct hvm_hw_stdvga *s, void *p)
96 {
97 unmap_domain_page(p);
98 }
100 static int stdvga_outb(uint64_t addr, uint8_t val)
101 {
102 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
103 int rc = 1, prev_stdvga = s->stdvga;
105 switch ( addr )
106 {
107 case 0x3c4: /* sequencer address register */
108 s->sr_index = val;
109 break;
111 case 0x3c5: /* sequencer data register */
112 rc = (s->sr_index < sizeof(s->sr));
113 if ( rc )
114 s->sr[s->sr_index] = val & sr_mask[s->sr_index] ;
115 break;
117 case 0x3ce: /* graphics address register */
118 s->gr_index = val;
119 break;
121 case 0x3cf: /* graphics data register */
122 rc = (s->gr_index < sizeof(s->gr));
123 if ( rc )
124 s->gr[s->gr_index] = val & gr_mask[s->gr_index];
125 break;
127 default:
128 rc = 0;
129 break;
130 }
132 /* When in standard vga mode, emulate here all writes to the vram buffer
133 * so we can immediately satisfy reads without waiting for qemu. */
134 s->stdvga = (s->sr[7] == 0x00);
136 if ( !prev_stdvga && s->stdvga )
137 {
138 /*
139 * (Re)start caching of video buffer.
140 * XXX TODO: In case of a restart the cache could be unsynced.
141 */
142 s->cache = 1;
143 gdprintk(XENLOG_INFO, "entering stdvga and caching modes\n");
144 }
145 else if ( prev_stdvga && !s->stdvga )
146 {
147 gdprintk(XENLOG_INFO, "leaving stdvga\n");
148 }
150 return rc;
151 }
153 static void stdvga_out(uint32_t port, uint32_t bytes, uint32_t val)
154 {
155 switch ( bytes )
156 {
157 case 1:
158 stdvga_outb(port, val);
159 break;
161 case 2:
162 stdvga_outb(port + 0, val >> 0);
163 stdvga_outb(port + 1, val >> 8);
164 break;
166 default:
167 break;
168 }
169 }
171 static int stdvga_intercept_pio(
172 int dir, uint32_t port, uint32_t bytes, uint32_t *val)
173 {
174 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
176 if ( dir == IOREQ_WRITE )
177 {
178 spin_lock(&s->lock);
179 stdvga_out(port, bytes, *val);
180 spin_unlock(&s->lock);
181 }
183 return X86EMUL_UNHANDLEABLE; /* propagate to external ioemu */
184 }
186 static unsigned int stdvga_mem_offset(
187 struct hvm_hw_stdvga *s, unsigned int mmio_addr)
188 {
189 unsigned int memory_map_mode = (s->gr[6] >> 2) & 3;
190 unsigned int offset = mmio_addr & 0x1ffff;
192 switch ( memory_map_mode )
193 {
194 case 0:
195 break;
196 case 1:
197 if ( offset >= 0x10000 )
198 goto fail;
199 offset += 0; /* assume bank_offset == 0; */
200 break;
201 case 2:
202 offset -= 0x10000;
203 if ( offset >= 0x8000 )
204 goto fail;
205 break;
206 default:
207 case 3:
208 offset -= 0x18000;
209 if ( offset >= 0x8000 )
210 goto fail;
211 break;
212 }
214 return offset;
216 fail:
217 return ~0u;
218 }
220 #define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff)
222 static uint8_t stdvga_mem_readb(uint64_t addr)
223 {
224 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
225 int plane;
226 uint32_t ret, *vram_l;
227 uint8_t *vram_b;
229 addr = stdvga_mem_offset(s, addr);
230 if ( addr == ~0u )
231 return 0xff;
233 if ( s->sr[4] & 0x08 )
234 {
235 /* chain 4 mode : simplest access */
236 vram_b = vram_getb(s, addr);
237 ret = *vram_b;
238 vram_put(s, vram_b);
239 }
240 else if ( s->gr[5] & 0x10 )
241 {
242 /* odd/even mode (aka text mode mapping) */
243 plane = (s->gr[4] & 2) | (addr & 1);
244 vram_b = vram_getb(s, ((addr & ~1) << 1) | plane);
245 ret = *vram_b;
246 vram_put(s, vram_b);
247 }
248 else
249 {
250 /* standard VGA latched access */
251 vram_l = vram_getl(s, addr);
252 s->latch = *vram_l;
253 vram_put(s, vram_l);
255 if ( !(s->gr[5] & 0x08) )
256 {
257 /* read mode 0 */
258 plane = s->gr[4];
259 ret = GET_PLANE(s->latch, plane);
260 }
261 else
262 {
263 /* read mode 1 */
264 ret = (s->latch ^ mask16[s->gr[2]]) & mask16[s->gr[7]];
265 ret |= ret >> 16;
266 ret |= ret >> 8;
267 ret = (~ret) & 0xff;
268 }
269 }
271 return ret;
272 }
274 static uint64_t stdvga_mem_read(uint64_t addr, uint64_t size)
275 {
276 uint64_t data = 0;
278 switch ( size )
279 {
280 case 1:
281 data = stdvga_mem_readb(addr);
282 break;
284 case 2:
285 data = stdvga_mem_readb(addr);
286 data |= stdvga_mem_readb(addr + 1) << 8;
287 break;
289 case 4:
290 data = stdvga_mem_readb(addr);
291 data |= stdvga_mem_readb(addr + 1) << 8;
292 data |= stdvga_mem_readb(addr + 2) << 16;
293 data |= stdvga_mem_readb(addr + 3) << 24;
294 break;
296 case 8:
297 data = (uint64_t)(stdvga_mem_readb(addr));
298 data |= (uint64_t)(stdvga_mem_readb(addr + 1)) << 8;
299 data |= (uint64_t)(stdvga_mem_readb(addr + 2)) << 16;
300 data |= (uint64_t)(stdvga_mem_readb(addr + 3)) << 24;
301 data |= (uint64_t)(stdvga_mem_readb(addr + 4)) << 32;
302 data |= (uint64_t)(stdvga_mem_readb(addr + 5)) << 40;
303 data |= (uint64_t)(stdvga_mem_readb(addr + 6)) << 48;
304 data |= (uint64_t)(stdvga_mem_readb(addr + 7)) << 56;
305 break;
307 default:
308 gdprintk(XENLOG_WARNING, "invalid io size: %"PRId64"\n", size);
309 break;
310 }
312 return data;
313 }
315 static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
316 {
317 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
318 int plane, write_mode, b, func_select, mask;
319 uint32_t write_mask, bit_mask, set_mask, *vram_l;
320 uint8_t *vram_b;
322 addr = stdvga_mem_offset(s, addr);
323 if ( addr == ~0u )
324 return;
326 if ( s->sr[4] & 0x08 )
327 {
328 /* chain 4 mode : simplest access */
329 plane = addr & 3;
330 mask = (1 << plane);
331 if ( s->sr[2] & mask )
332 {
333 vram_b = vram_getb(s, addr);
334 *vram_b = val;
335 vram_put(s, vram_b);
336 }
337 }
338 else if ( s->gr[5] & 0x10 )
339 {
340 /* odd/even mode (aka text mode mapping) */
341 plane = (s->gr[4] & 2) | (addr & 1);
342 mask = (1 << plane);
343 if ( s->sr[2] & mask )
344 {
345 addr = ((addr & ~1) << 1) | plane;
346 vram_b = vram_getb(s, addr);
347 *vram_b = val;
348 vram_put(s, vram_b);
349 }
350 }
351 else
352 {
353 write_mode = s->gr[5] & 3;
354 switch ( write_mode )
355 {
356 default:
357 case 0:
358 /* rotate */
359 b = s->gr[3] & 7;
360 val = ((val >> b) | (val << (8 - b))) & 0xff;
361 val |= val << 8;
362 val |= val << 16;
364 /* apply set/reset mask */
365 set_mask = mask16[s->gr[1]];
366 val = (val & ~set_mask) | (mask16[s->gr[0]] & set_mask);
367 bit_mask = s->gr[8];
368 break;
369 case 1:
370 val = s->latch;
371 goto do_write;
372 case 2:
373 val = mask16[val & 0x0f];
374 bit_mask = s->gr[8];
375 break;
376 case 3:
377 /* rotate */
378 b = s->gr[3] & 7;
379 val = (val >> b) | (val << (8 - b));
381 bit_mask = s->gr[8] & val;
382 val = mask16[s->gr[0]];
383 break;
384 }
386 /* apply logical operation */
387 func_select = s->gr[3] >> 3;
388 switch ( func_select )
389 {
390 case 0:
391 default:
392 /* nothing to do */
393 break;
394 case 1:
395 /* and */
396 val &= s->latch;
397 break;
398 case 2:
399 /* or */
400 val |= s->latch;
401 break;
402 case 3:
403 /* xor */
404 val ^= s->latch;
405 break;
406 }
408 /* apply bit mask */
409 bit_mask |= bit_mask << 8;
410 bit_mask |= bit_mask << 16;
411 val = (val & bit_mask) | (s->latch & ~bit_mask);
413 do_write:
414 /* mask data according to sr[2] */
415 mask = s->sr[2];
416 write_mask = mask16[mask];
417 vram_l = vram_getl(s, addr);
418 *vram_l = (*vram_l & ~write_mask) | (val & write_mask);
419 vram_put(s, vram_l);
420 }
421 }
423 static void stdvga_mem_write(uint64_t addr, uint64_t data, uint64_t size)
424 {
425 /* Intercept mmio write */
426 switch ( size )
427 {
428 case 1:
429 stdvga_mem_writeb(addr, (data >> 0) & 0xff);
430 break;
432 case 2:
433 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
434 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
435 break;
437 case 4:
438 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
439 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
440 stdvga_mem_writeb(addr+2, (data >> 16) & 0xff);
441 stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
442 break;
444 case 8:
445 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
446 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
447 stdvga_mem_writeb(addr+2, (data >> 16) & 0xff);
448 stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
449 stdvga_mem_writeb(addr+4, (data >> 32) & 0xff);
450 stdvga_mem_writeb(addr+5, (data >> 40) & 0xff);
451 stdvga_mem_writeb(addr+6, (data >> 48) & 0xff);
452 stdvga_mem_writeb(addr+7, (data >> 56) & 0xff);
453 break;
455 default:
456 gdprintk(XENLOG_WARNING, "invalid io size: %"PRId64"\n", size);
457 break;
458 }
459 }
461 static uint32_t read_data;
463 static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
464 {
465 int i;
466 int sign = p->df ? -1 : 1;
468 if ( p->data_is_ptr )
469 {
470 if ( p->dir == IOREQ_READ )
471 {
472 uint64_t addr = p->addr, data = p->data, tmp;
473 for ( i = 0; i < p->count; i++ )
474 {
475 tmp = stdvga_mem_read(addr, p->size);
476 hvm_copy_to_guest_phys(data, &tmp, p->size);
477 data += sign * p->size;
478 addr += sign * p->size;
479 }
480 }
481 else
482 {
483 uint32_t addr = p->addr, data = p->data, tmp;
484 for ( i = 0; i < p->count; i++ )
485 {
486 hvm_copy_from_guest_phys(&tmp, data, p->size);
487 stdvga_mem_write(addr, tmp, p->size);
488 data += sign * p->size;
489 addr += sign * p->size;
490 }
491 }
492 }
493 else
494 {
495 if ( p->dir == IOREQ_READ )
496 {
497 uint32_t addr = p->addr;
498 for ( i = 0; i < p->count; i++ )
499 {
500 p->data = stdvga_mem_read(addr, p->size);
501 addr += sign * p->size;
502 }
503 }
504 else
505 {
506 uint32_t addr = p->addr;
507 for ( i = 0; i < p->count; i++ )
508 {
509 stdvga_mem_write(addr, p->data, p->size);
510 addr += sign * p->size;
511 }
512 }
513 }
515 read_data = p->data;
516 return 1;
517 }
519 static int stdvga_intercept_mmio(ioreq_t *p)
520 {
521 struct domain *d = current->domain;
522 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
523 int buf = 0, rc;
525 if ( p->size > 8 )
526 {
527 gdprintk(XENLOG_WARNING, "invalid mmio size %d\n", (int)p->size);
528 return X86EMUL_UNHANDLEABLE;
529 }
531 spin_lock(&s->lock);
533 if ( s->stdvga && s->cache )
534 {
535 switch ( p->type )
536 {
537 case IOREQ_TYPE_COPY:
538 buf = mmio_move(s, p);
539 break;
540 default:
541 gdprintk(XENLOG_WARNING, "unsupported mmio request type:%d "
542 "addr:0x%04x data:0x%04x size:%d count:%d state:%d "
543 "isptr:%d dir:%d df:%d\n",
544 p->type, (int)p->addr, (int)p->data, (int)p->size,
545 (int)p->count, p->state,
546 p->data_is_ptr, p->dir, p->df);
547 s->cache = 0;
548 }
549 }
550 else
551 {
552 buf = (p->dir == IOREQ_WRITE);
553 }
555 rc = (buf && hvm_buffered_io_send(p));
557 spin_unlock(&s->lock);
559 return rc ? X86EMUL_OKAY : X86EMUL_UNHANDLEABLE;
560 }
562 void stdvga_init(struct domain *d)
563 {
564 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
565 struct page_info *pg;
566 void *p;
567 int i;
569 memset(s, 0, sizeof(*s));
570 spin_lock_init(&s->lock);
572 for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
573 {
574 pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
575 if ( pg == NULL )
576 break;
577 s->vram_page[i] = pg;
578 p = map_domain_page(page_to_mfn(pg));
579 clear_page(p);
580 unmap_domain_page(p);
581 }
583 if ( i == ARRAY_SIZE(s->vram_page) )
584 {
585 /* Sequencer registers. */
586 register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio);
587 /* Graphics registers. */
588 register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio);
589 /* MMIO. */
590 register_buffered_io_handler(
591 d, 0xa0000, 0x20000, stdvga_intercept_mmio);
592 }
593 }
595 void stdvga_deinit(struct domain *d)
596 {
597 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
598 int i;
600 for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
601 {
602 if ( s->vram_page[i] == NULL )
603 continue;
604 free_domheap_page(s->vram_page[i]);
605 s->vram_page[i] = NULL;
606 }
607 }