ia64/xen-unstable

view xen/arch/x86/vmx_io.c @ 5672:ffb406dfb170

Oh gcc4 ....how you complain soo. This patch fixes compile with gcc4
where an unintialized variable is used in a function.

Signed-off-by: Jerone Young <jerone@gmail.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Jul 06 08:46:38 2005 +0000 (2005-07-06)
parents 82390e707bb9
children 6e11af443eb1 b53a65034532
line source
1 /*
2 * vmx_io.c: handling I/O, interrupts related VMX entry/exit
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
24 #include <xen/trace.h>
25 #include <xen/event.h>
27 #include <asm/current.h>
28 #include <asm/cpufeature.h>
29 #include <asm/processor.h>
30 #include <asm/msr.h>
31 #include <asm/vmx.h>
32 #include <asm/vmx_vmcs.h>
33 #include <asm/vmx_platform.h>
34 #include <asm/vmx_virpit.h>
35 #include <asm/apic.h>
37 #include <public/io/ioreq.h>
38 #include <public/io/vmx_vlapic.h>
40 #ifdef CONFIG_VMX
41 #if defined (__i386__)
42 static void load_cpu_user_regs(struct cpu_user_regs *regs)
43 {
44 /*
45 * Write the guest register value into VMCS
46 */
47 __vmwrite(GUEST_SS_SELECTOR, regs->ss);
48 __vmwrite(GUEST_RSP, regs->esp);
49 __vmwrite(GUEST_RFLAGS, regs->eflags);
50 __vmwrite(GUEST_CS_SELECTOR, regs->cs);
51 __vmwrite(GUEST_RIP, regs->eip);
52 }
54 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
55 {
56 switch (size) {
57 case BYTE:
58 switch (index) {
59 case 0:
60 regs->eax &= 0xFFFFFF00;
61 regs->eax |= (value & 0xFF);
62 break;
63 case 1:
64 regs->ecx &= 0xFFFFFF00;
65 regs->ecx |= (value & 0xFF);
66 break;
67 case 2:
68 regs->edx &= 0xFFFFFF00;
69 regs->edx |= (value & 0xFF);
70 break;
71 case 3:
72 regs->ebx &= 0xFFFFFF00;
73 regs->ebx |= (value & 0xFF);
74 break;
75 case 4:
76 regs->eax &= 0xFFFF00FF;
77 regs->eax |= ((value & 0xFF) << 8);
78 break;
79 case 5:
80 regs->ecx &= 0xFFFF00FF;
81 regs->ecx |= ((value & 0xFF) << 8);
82 break;
83 case 6:
84 regs->edx &= 0xFFFF00FF;
85 regs->edx |= ((value & 0xFF) << 8);
86 break;
87 case 7:
88 regs->ebx &= 0xFFFF00FF;
89 regs->ebx |= ((value & 0xFF) << 8);
90 break;
91 default:
92 printk("Error: size:%x, index:%x are invalid!\n", size, index);
93 domain_crash_synchronous();
94 break;
96 }
97 break;
98 case WORD:
99 switch (index) {
100 case 0:
101 regs->eax &= 0xFFFF0000;
102 regs->eax |= (value & 0xFFFF);
103 break;
104 case 1:
105 regs->ecx &= 0xFFFF0000;
106 regs->ecx |= (value & 0xFFFF);
107 break;
108 case 2:
109 regs->edx &= 0xFFFF0000;
110 regs->edx |= (value & 0xFFFF);
111 break;
112 case 3:
113 regs->ebx &= 0xFFFF0000;
114 regs->ebx |= (value & 0xFFFF);
115 break;
116 case 4:
117 regs->esp &= 0xFFFF0000;
118 regs->esp |= (value & 0xFFFF);
119 break;
121 case 5:
122 regs->ebp &= 0xFFFF0000;
123 regs->ebp |= (value & 0xFFFF);
124 break;
125 case 6:
126 regs->esi &= 0xFFFF0000;
127 regs->esi |= (value & 0xFFFF);
128 break;
129 case 7:
130 regs->edi &= 0xFFFF0000;
131 regs->edi |= (value & 0xFFFF);
132 break;
133 default:
134 printk("Error: size:%x, index:%x are invalid!\n", size, index);
135 domain_crash_synchronous();
136 break;
137 }
138 break;
139 case LONG:
140 switch (index) {
141 case 0:
142 regs->eax = value;
143 break;
144 case 1:
145 regs->ecx = value;
146 break;
147 case 2:
148 regs->edx = value;
149 break;
150 case 3:
151 regs->ebx = value;
152 break;
153 case 4:
154 regs->esp = value;
155 break;
156 case 5:
157 regs->ebp = value;
158 break;
159 case 6:
160 regs->esi = value;
161 break;
162 case 7:
163 regs->edi = value;
164 break;
165 default:
166 printk("Error: size:%x, index:%x are invalid!\n", size, index);
167 domain_crash_synchronous();
168 break;
169 }
170 break;
171 default:
172 printk("Error: size:%x, index:%x are invalid!\n", size, index);
173 domain_crash_synchronous();
174 break;
175 }
176 }
177 #else
178 static void load_cpu_user_regs(struct cpu_user_regs *regs)
179 {
180 __vmwrite(GUEST_SS_SELECTOR, regs->ss);
181 __vmwrite(GUEST_RSP, regs->rsp);
182 __vmwrite(GUEST_RFLAGS, regs->rflags);
183 __vmwrite(GUEST_CS_SELECTOR, regs->cs);
184 __vmwrite(GUEST_RIP, regs->rip);
185 }
187 static inline void __set_reg_value(unsigned long *reg, int size, long value)
188 {
189 switch (size) {
190 case BYTE_64:
191 *reg &= ~0xFF;
192 *reg |= (value & 0xFF);
193 break;
194 case WORD:
195 *reg &= ~0xFFFF;
196 *reg |= (value & 0xFFFF);
197 break;
199 case LONG:
200 *reg &= ~0xFFFFFFFF;
201 *reg |= (value & 0xFFFFFFFF);
202 break;
203 case QUAD:
204 *reg = value;
205 break;
206 default:
207 printk("Error: <__set_reg_value> : Unknown size for register\n");
208 domain_crash_synchronous();
209 }
210 }
212 static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
213 {
214 if (size == BYTE) {
215 switch (index) {
216 case 0:
217 regs->rax &= ~0xFF;
218 regs->rax |= (value & 0xFF);
219 break;
220 case 1:
221 regs->rcx &= ~0xFF;
222 regs->rcx |= (value & 0xFF);
223 break;
224 case 2:
225 regs->rdx &= ~0xFF;
226 regs->rdx |= (value & 0xFF);
227 break;
228 case 3:
229 regs->rbx &= ~0xFF;
230 regs->rbx |= (value & 0xFF);
231 break;
232 case 4:
233 regs->rax &= 0xFFFFFFFFFFFF00FF;
234 regs->rax |= ((value & 0xFF) << 8);
235 break;
236 case 5:
237 regs->rcx &= 0xFFFFFFFFFFFF00FF;
238 regs->rcx |= ((value & 0xFF) << 8);
239 break;
240 case 6:
241 regs->rdx &= 0xFFFFFFFFFFFF00FF;
242 regs->rdx |= ((value & 0xFF) << 8);
243 break;
244 case 7:
245 regs->rbx &= 0xFFFFFFFFFFFF00FF;
246 regs->rbx |= ((value & 0xFF) << 8);
247 break;
248 default:
249 printk("Error: size:%x, index:%x are invalid!\n", size, index);
250 domain_crash_synchronous();
251 break;
252 }
254 }
256 switch (index) {
257 case 0:
258 __set_reg_value(&regs->rax, size, value);
259 break;
260 case 1:
261 __set_reg_value(&regs->rcx, size, value);
262 break;
263 case 2:
264 __set_reg_value(&regs->rdx, size, value);
265 break;
266 case 3:
267 __set_reg_value(&regs->rbx, size, value);
268 break;
269 case 4:
270 __set_reg_value(&regs->rsp, size, value);
271 break;
272 case 5:
273 __set_reg_value(&regs->rbp, size, value);
274 break;
275 case 6:
276 __set_reg_value(&regs->rsi, size, value);
277 break;
278 case 7:
279 __set_reg_value(&regs->rdi, size, value);
280 break;
281 case 8:
282 __set_reg_value(&regs->r8, size, value);
283 break;
284 case 9:
285 __set_reg_value(&regs->r9, size, value);
286 break;
287 case 10:
288 __set_reg_value(&regs->r10, size, value);
289 break;
290 case 11:
291 __set_reg_value(&regs->r11, size, value);
292 break;
293 case 12:
294 __set_reg_value(&regs->r12, size, value);
295 break;
296 case 13:
297 __set_reg_value(&regs->r13, size, value);
298 break;
299 case 14:
300 __set_reg_value(&regs->r14, size, value);
301 break;
302 case 15:
303 __set_reg_value(&regs->r15, size, value);
304 break;
305 default:
306 printk("Error: <set_reg_value> Invalid index\n");
307 domain_crash_synchronous();
308 }
309 return;
310 }
311 #endif
313 void vmx_io_assist(struct vcpu *v)
314 {
315 vcpu_iodata_t *vio;
316 ioreq_t *p;
317 struct cpu_user_regs *regs = guest_cpu_user_regs();
318 unsigned long old_eax;
319 int sign;
320 struct mi_per_cpu_info *mpci_p;
321 struct cpu_user_regs *inst_decoder_regs;
323 mpci_p = &v->domain->arch.vmx_platform.mpci;
324 inst_decoder_regs = mpci_p->inst_decoder_regs;
326 vio = get_vio(v->domain, v->vcpu_id);
328 if (vio == 0) {
329 VMX_DBG_LOG(DBG_LEVEL_1,
330 "bad shared page: %lx", (unsigned long) vio);
331 domain_crash_synchronous();
332 }
333 p = &vio->vp_ioreq;
335 if (p->state == STATE_IORESP_HOOK){
336 vmx_hooks_assist(v);
337 }
339 /* clear IO wait VMX flag */
340 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
341 if (p->state != STATE_IORESP_READY) {
342 /* An interrupt send event raced us */
343 return;
344 } else {
345 p->state = STATE_INVALID;
346 }
347 clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
348 } else {
349 return;
350 }
352 sign = (p->df) ? -1 : 1;
353 if (p->port_mm) {
354 if (p->pdata_valid) {
355 regs->esi += sign * p->count * p->size;
356 regs->edi += sign * p->count * p->size;
357 } else {
358 if (p->dir == IOREQ_WRITE) {
359 return;
360 }
361 int size = -1, index = -1;
363 size = operand_size(v->domain->arch.vmx_platform.mpci.mmio_target);
364 index = operand_index(v->domain->arch.vmx_platform.mpci.mmio_target);
366 if (v->domain->arch.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
367 p->u.data = p->u.data & 0xffff;
368 }
369 set_reg_value(size, index, 0, regs, p->u.data);
371 }
372 load_cpu_user_regs(regs);
373 return;
374 }
376 if (p->dir == IOREQ_WRITE) {
377 if (p->pdata_valid) {
378 regs->esi += sign * p->count * p->size;
379 regs->ecx -= p->count;
380 }
381 return;
382 } else {
383 if (p->pdata_valid) {
384 regs->edi += sign * p->count * p->size;
385 regs->ecx -= p->count;
386 return;
387 }
388 }
390 old_eax = regs->eax;
392 switch(p->size) {
393 case 1:
394 regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
395 break;
396 case 2:
397 regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
398 break;
399 case 4:
400 regs->eax = (p->u.data & 0xffffffff);
401 break;
402 default:
403 printk("Error: %s unknwon port size\n", __FUNCTION__);
404 domain_crash_synchronous();
405 }
406 }
408 int vmx_clear_pending_io_event(struct vcpu *v)
409 {
410 struct domain *d = v->domain;
411 int port = iopacket_port(d);
413 /* evtchn_pending is shared by other event channels in 0-31 range */
414 if (!d->shared_info->evtchn_pending[port>>5])
415 clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
417 /* Note: VMX domains may need upcalls as well */
418 if (!v->vcpu_info->evtchn_pending_sel)
419 clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
421 /* clear the pending bit for port */
422 return test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]);
423 }
425 /* Because we've cleared the pending events first, we need to guarantee that
426 * all events to be handled by xen for VMX domains are taken care of here.
427 *
428 * interrupts are guaranteed to be checked before resuming guest.
429 * VMX upcalls have been already arranged for if necessary.
430 */
431 void vmx_check_events(struct vcpu *d)
432 {
433 /* clear the event *before* checking for work. This should avoid
434 the set-and-check races */
435 if (vmx_clear_pending_io_event(current))
436 vmx_io_assist(d);
437 }
439 /* On exit from vmx_wait_io, we're guaranteed to have a I/O response from
440 the device model */
441 void vmx_wait_io()
442 {
443 extern void do_block();
444 int port = iopacket_port(current->domain);
446 do {
447 if(!test_bit(port, &current->domain->shared_info->evtchn_pending[0]))
448 do_block();
449 vmx_check_events(current);
450 if (!test_bit(ARCH_VMX_IO_WAIT, &current->arch.arch_vmx.flags))
451 break;
452 /* Events other than IOPACKET_PORT might have woken us up. In that
453 case, safely go back to sleep. */
454 clear_bit(port>>5, &current->vcpu_info->evtchn_pending_sel);
455 clear_bit(0, &current->vcpu_info->evtchn_upcall_pending);
456 } while(1);
457 }
459 #if defined(__i386__) || defined(__x86_64__)
460 static inline int __fls(u32 word)
461 {
462 int bit;
464 __asm__("bsrl %1,%0"
465 :"=r" (bit)
466 :"rm" (word));
467 return word ? bit : -1;
468 }
469 #else
470 #define __fls(x) generic_fls(x)
471 static __inline__ int generic_fls(u32 x)
472 {
473 int r = 31;
475 if (!x)
476 return -1;
477 if (!(x & 0xffff0000u)) {
478 x <<= 16;
479 r -= 16;
480 }
481 if (!(x & 0xff000000u)) {
482 x <<= 8;
483 r -= 8;
484 }
485 if (!(x & 0xf0000000u)) {
486 x <<= 4;
487 r -= 4;
488 }
489 if (!(x & 0xc0000000u)) {
490 x <<= 2;
491 r -= 2;
492 }
493 if (!(x & 0x80000000u)) {
494 x <<= 1;
495 r -= 1;
496 }
497 return r;
498 }
499 #endif
501 /* Simple minded Local APIC priority implementation. Fix later */
502 static __inline__ int find_highest_irq(u32 *pintr)
503 {
504 if (pintr[7])
505 return __fls(pintr[7]) + (256-32*1);
506 if (pintr[6])
507 return __fls(pintr[6]) + (256-32*2);
508 if (pintr[5])
509 return __fls(pintr[5]) + (256-32*3);
510 if (pintr[4])
511 return __fls(pintr[4]) + (256-32*4);
512 if (pintr[3])
513 return __fls(pintr[3]) + (256-32*5);
514 if (pintr[2])
515 return __fls(pintr[2]) + (256-32*6);
516 if (pintr[1])
517 return __fls(pintr[1]) + (256-32*7);
518 return __fls(pintr[0]);
519 }
521 #define BSP_CPU(d) (!(d->vcpu_id))
522 static inline void clear_extint(struct vcpu *v)
523 {
524 global_iodata_t *spg;
525 int i;
526 spg = &get_sp(v->domain)->sp_global;
528 for(i = 0; i < INTR_LEN; i++)
529 spg->pic_intr[i] = 0;
530 }
532 static inline void clear_highest_bit(struct vcpu *v, int vector)
533 {
534 global_iodata_t *spg;
536 spg = &get_sp(v->domain)->sp_global;
538 clear_bit(vector, &spg->pic_intr[0]);
539 }
541 static inline int find_highest_pic_irq(struct vcpu *v)
542 {
543 u64 intr[INTR_LEN];
544 global_iodata_t *spg;
545 int i;
547 if(!BSP_CPU(v))
548 return -1;
550 spg = &get_sp(v->domain)->sp_global;
552 for(i = 0; i < INTR_LEN; i++){
553 intr[i] = spg->pic_intr[i] & ~spg->pic_mask[i];
554 }
556 return find_highest_irq((u32 *)&intr[0]);
557 }
559 /*
560 * Return 0-255 for pending irq.
561 * -1 when no pending.
562 */
563 static inline int find_highest_pending_irq(struct vcpu *v, int *type)
564 {
565 int result = -1;
566 if ((result = find_highest_pic_irq(v)) != -1){
567 *type = VLAPIC_DELIV_MODE_EXT;
568 return result;
569 }
570 return result;
571 }
573 static inline void
574 interrupt_post_injection(struct vcpu * v, int vector, int type)
575 {
576 struct vmx_virpit_t *vpit = &(v->domain->arch.vmx_platform.vmx_pit);
577 switch(type)
578 {
579 case VLAPIC_DELIV_MODE_EXT:
580 if (vpit->pending_intr_nr && vector == vpit->vector)
581 vpit->pending_intr_nr--;
582 else
583 clear_highest_bit(v, vector);
585 if (vector == vpit->vector && !vpit->first_injected){
586 vpit->first_injected = 1;
587 vpit->pending_intr_nr = 0;
588 }
589 if (vector == vpit->vector)
590 vpit->inject_point = NOW();
591 break;
593 default:
594 printk("Not support interrupt type\n");
595 break;
596 }
597 }
599 static inline void
600 enable_irq_window(unsigned long cpu_exec_control)
601 {
602 if (!(cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING)) {
603 cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
604 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, cpu_exec_control);
605 }
606 }
608 static inline void
609 disable_irq_window(unsigned long cpu_exec_control)
610 {
611 if ( cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING ) {
612 cpu_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
613 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, cpu_exec_control);
614 }
615 }
617 static inline int irq_masked(unsigned long eflags)
618 {
619 return ((eflags & X86_EFLAGS_IF) == 0);
620 }
622 void vmx_intr_assist(struct vcpu *v)
623 {
624 int intr_type = 0;
625 int highest_vector = find_highest_pending_irq(v, &intr_type);
626 unsigned long intr_fields, eflags, interruptibility, cpu_exec_control;
628 __vmread(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
630 if (highest_vector == -1) {
631 disable_irq_window(cpu_exec_control);
632 return;
633 }
635 __vmread(VM_ENTRY_INTR_INFO_FIELD, &intr_fields);
637 if (intr_fields & INTR_INFO_VALID_MASK) {
638 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_intr_assist: intr_fields: %lx",
639 intr_fields);
640 return;
641 }
643 __vmread(GUEST_INTERRUPTIBILITY_INFO, &interruptibility);
645 if (interruptibility) {
646 enable_irq_window(cpu_exec_control);
647 VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, interruptibility: %lx",
648 highest_vector, interruptibility);
649 return;
650 }
652 __vmread(GUEST_RFLAGS, &eflags);
654 switch (intr_type) {
655 case VLAPIC_DELIV_MODE_EXT:
656 if (irq_masked(eflags)) {
657 enable_irq_window(cpu_exec_control);
658 VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx",
659 highest_vector, eflags);
660 return;
661 }
663 vmx_inject_extint(v, highest_vector, VMX_INVALID_ERROR_CODE);
664 TRACE_3D(TRC_VMX_INT, v->domain->domain_id, highest_vector, 0);
665 break;
666 case VLAPIC_DELIV_MODE_FIXED:
667 case VLAPIC_DELIV_MODE_LPRI:
668 case VLAPIC_DELIV_MODE_SMI:
669 case VLAPIC_DELIV_MODE_NMI:
670 case VLAPIC_DELIV_MODE_INIT:
671 case VLAPIC_DELIV_MODE_STARTUP:
672 default:
673 printk("Unsupported interrupt type\n");
674 BUG();
675 break;
676 }
678 interrupt_post_injection(v, highest_vector, intr_type);
679 return;
680 }
682 void vmx_do_resume(struct vcpu *d)
683 {
684 vmx_stts();
685 if ( vmx_paging_enabled(d) )
686 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
687 else
688 // paging is not enabled in the guest
689 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
691 __vmwrite(HOST_CR3, pagetable_get_paddr(d->arch.monitor_table));
692 __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
694 if (event_pending(d)) {
695 vmx_check_events(d);
697 if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags))
698 vmx_wait_io();
699 }
701 /* We can't resume the guest if we're waiting on I/O */
702 ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags));
704 /* We always check for interrupts before resuming guest */
705 vmx_intr_assist(d);
706 }
708 #endif /* CONFIG_VMX */
710 /*
711 * Local variables:
712 * mode: C
713 * c-set-style: "BSD"
714 * c-basic-offset: 4
715 * tab-width: 4
716 * indent-tabs-mode: nil
717 * End:
718 */