ia64/xen-unstable
changeset 7269:1b4ad6eb6968
Move mmio operation structure from domain to vcpu.
Also do some cleanup.
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Nakajima Jun <nakajima.jun@intel.com>
Also do some cleanup.
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Nakajima Jun <nakajima.jun@intel.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Oct 07 23:17:24 2005 +0100 (2005-10-07) |
parents | 62d815160f01 |
children | 18f765da2725 |
files | xen/arch/x86/vmx.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/include/asm-x86/vmx_platform.h xen/include/asm-x86/vmx_vmcs.h |
line diff
1.1 --- a/xen/arch/x86/vmx.c Fri Oct 07 16:49:29 2005 +0100 1.2 +++ b/xen/arch/x86/vmx.c Fri Oct 07 23:17:24 2005 +0100 1.3 @@ -659,14 +659,14 @@ void send_pio_req(struct cpu_user_regs * 1.4 static void vmx_io_instruction(struct cpu_user_regs *regs, 1.5 unsigned long exit_qualification, unsigned long inst_len) 1.6 { 1.7 - struct mi_per_cpu_info *mpcip; 1.8 + struct mmio_op *mmio_opp; 1.9 unsigned long eip, cs, eflags; 1.10 unsigned long port, size, dir; 1.11 int vm86; 1.12 1.13 - mpcip = ¤t->domain->arch.vmx_platform.mpci; 1.14 - mpcip->instr = INSTR_PIO; 1.15 - mpcip->flags = 0; 1.16 + mmio_opp = ¤t->arch.arch_vmx.mmio_op; 1.17 + mmio_opp->instr = INSTR_PIO; 1.18 + mmio_opp->flags = 0; 1.19 1.20 __vmread(GUEST_RIP, &eip); 1.21 __vmread(GUEST_CS_SELECTOR, &cs); 1.22 @@ -700,7 +700,7 @@ static void vmx_io_instruction(struct cp 1.23 addr = dir == IOREQ_WRITE ? regs->esi : regs->edi; 1.24 1.25 if (test_bit(5, &exit_qualification)) { /* "rep" prefix */ 1.26 - mpcip->flags |= REPZ; 1.27 + mmio_opp->flags |= REPZ; 1.28 count = vm86 ? regs->ecx & 0xFFFF : regs->ecx; 1.29 } 1.30 1.31 @@ -711,7 +711,7 @@ static void vmx_io_instruction(struct cp 1.32 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) { 1.33 unsigned long value = 0; 1.34 1.35 - mpcip->flags |= OVERLAP; 1.36 + mmio_opp->flags |= OVERLAP; 1.37 if (dir == IOREQ_WRITE) 1.38 vmx_copy(&value, addr, size, VMX_COPY_IN); 1.39 send_pio_req(regs, port, 1, size, value, dir, 0); 1.40 @@ -1695,7 +1695,7 @@ asmlinkage void vmx_vmexit_handler(struc 1.41 (unsigned long)regs.eax, (unsigned long)regs.ebx, 1.42 (unsigned long)regs.ecx, (unsigned long)regs.edx, 1.43 (unsigned long)regs.esi, (unsigned long)regs.edi); 1.44 - v->domain->arch.vmx_platform.mpci.inst_decoder_regs = ®s; 1.45 + v->arch.arch_vmx.mmio_op.inst_decoder_regs = ®s; 1.46 1.47 if (!(error = vmx_do_page_fault(va, ®s))) { 1.48 /*
2.1 --- a/xen/arch/x86/vmx_io.c Fri Oct 07 16:49:29 2005 +0100 2.2 +++ b/xen/arch/x86/vmx_io.c Fri Oct 07 23:17:24 2005 +0100 2.3 @@ -1,5 +1,5 @@ 2.4 /* 2.5 - * vmx_io.c: handling I/O, interrupts related VMX entry/exit 2.6 + * vmx_io.c: handling I/O, interrupts related VMX entry/exit 2.7 * Copyright (c) 2004, Intel Corporation. 2.8 * 2.9 * This program is free software; you can redistribute it and/or modify it 2.10 @@ -42,7 +42,7 @@ 2.11 #ifdef CONFIG_VMX 2.12 #if defined (__i386__) 2.13 void load_cpu_user_regs(struct cpu_user_regs *regs) 2.14 -{ 2.15 +{ 2.16 /* 2.17 * Write the guest register value into VMCS 2.18 */ 2.19 @@ -52,7 +52,7 @@ void load_cpu_user_regs(struct cpu_user_ 2.20 __vmwrite(GUEST_RFLAGS, regs->eflags); 2.21 if (regs->eflags & EF_TF) 2.22 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 2.23 - else 2.24 + else 2.25 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 2.26 2.27 __vmwrite(GUEST_CS_SELECTOR, regs->cs); 2.28 @@ -189,7 +189,7 @@ void load_cpu_user_regs(struct cpu_user_ 2.29 __vmwrite(GUEST_RFLAGS, regs->rflags); 2.30 if (regs->rflags & EF_TF) 2.31 __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 2.32 - else 2.33 + else 2.34 __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB); 2.35 2.36 __vmwrite(GUEST_CS_SELECTOR, regs->cs); 2.37 @@ -265,52 +265,52 @@ static void set_reg_value (int size, int 2.38 } 2.39 2.40 switch (index) { 2.41 - case 0: 2.42 + case 0: 2.43 __set_reg_value(®s->rax, size, value); 2.44 break; 2.45 - case 1: 2.46 + case 1: 2.47 __set_reg_value(®s->rcx, size, value); 2.48 break; 2.49 - case 2: 2.50 + case 2: 2.51 __set_reg_value(®s->rdx, size, value); 2.52 break; 2.53 - case 3: 2.54 + case 3: 2.55 __set_reg_value(®s->rbx, size, value); 2.56 break; 2.57 - case 4: 2.58 + case 4: 2.59 __set_reg_value(®s->rsp, size, value); 2.60 break; 2.61 - case 5: 2.62 + case 5: 2.63 __set_reg_value(®s->rbp, size, value); 2.64 break; 2.65 - case 6: 2.66 + case 6: 2.67 __set_reg_value(®s->rsi, size, value); 2.68 break; 2.69 - case 7: 2.70 + case 7: 2.71 __set_reg_value(®s->rdi, size, value); 2.72 break; 2.73 - case 8: 2.74 + case 8: 2.75 __set_reg_value(®s->r8, size, value); 2.76 break; 2.77 - case 9: 2.78 + case 9: 2.79 __set_reg_value(®s->r9, size, value); 2.80 break; 2.81 - case 10: 2.82 + case 10: 2.83 __set_reg_value(®s->r10, size, value); 2.84 break; 2.85 - case 11: 2.86 + case 11: 2.87 __set_reg_value(®s->r11, size, value); 2.88 break; 2.89 - case 12: 2.90 + case 12: 2.91 __set_reg_value(®s->r12, size, value); 2.92 break; 2.93 - case 13: 2.94 + case 13: 2.95 __set_reg_value(®s->r13, size, value); 2.96 break; 2.97 - case 14: 2.98 + case 14: 2.99 __set_reg_value(®s->r14, size, value); 2.100 break; 2.101 - case 15: 2.102 + case 15: 2.103 __set_reg_value(®s->r15, size, value); 2.104 break; 2.105 default: 2.106 @@ -391,7 +391,7 @@ static inline void set_eflags_PF(int siz 2.107 } 2.108 2.109 static void vmx_pio_assist(struct cpu_user_regs *regs, ioreq_t *p, 2.110 - struct mi_per_cpu_info *mpcip) 2.111 + struct mmio_op *mmio_opp) 2.112 { 2.113 unsigned long old_eax; 2.114 int sign = p->df ? -1 : 1; 2.115 @@ -399,15 +399,15 @@ static void vmx_pio_assist(struct cpu_us 2.116 if (p->dir == IOREQ_WRITE) { 2.117 if (p->pdata_valid) { 2.118 regs->esi += sign * p->count * p->size; 2.119 - if (mpcip->flags & REPZ) 2.120 + if (mmio_opp->flags & REPZ) 2.121 regs->ecx -= p->count; 2.122 } 2.123 } else { 2.124 - if (mpcip->flags & OVERLAP) { 2.125 + if (mmio_opp->flags & OVERLAP) { 2.126 unsigned long addr; 2.127 2.128 regs->edi += sign * p->count * p->size; 2.129 - if (mpcip->flags & REPZ) 2.130 + if (mmio_opp->flags & REPZ) 2.131 regs->ecx -= p->count; 2.132 2.133 addr = regs->edi; 2.134 @@ -416,7 +416,7 @@ static void vmx_pio_assist(struct cpu_us 2.135 vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT); 2.136 } else if (p->pdata_valid) { 2.137 regs->edi += sign * p->count * p->size; 2.138 - if (mpcip->flags & REPZ) 2.139 + if (mmio_opp->flags & REPZ) 2.140 regs->ecx -= p->count; 2.141 } else { 2.142 old_eax = regs->eax; 2.143 @@ -439,18 +439,18 @@ static void vmx_pio_assist(struct cpu_us 2.144 } 2.145 2.146 static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p, 2.147 - struct mi_per_cpu_info *mpcip) 2.148 + struct mmio_op *mmio_opp) 2.149 { 2.150 int sign = p->df ? -1 : 1; 2.151 int size = -1, index = -1; 2.152 unsigned long value = 0, diff = 0; 2.153 unsigned long src, dst; 2.154 2.155 - src = mpcip->operand[0]; 2.156 - dst = mpcip->operand[1]; 2.157 + src = mmio_opp->operand[0]; 2.158 + dst = mmio_opp->operand[1]; 2.159 size = operand_size(src); 2.160 2.161 - switch (mpcip->instr) { 2.162 + switch (mmio_opp->instr) { 2.163 case INSTR_MOV: 2.164 if (dst & REGISTER) { 2.165 index = operand_index(dst); 2.166 @@ -475,7 +475,7 @@ static void vmx_mmio_assist(struct cpu_u 2.167 regs->esi += sign * p->count * p->size; 2.168 regs->edi += sign * p->count * p->size; 2.169 2.170 - if ((mpcip->flags & OVERLAP) && p->dir == IOREQ_READ) { 2.171 + if ((mmio_opp->flags & OVERLAP) && p->dir == IOREQ_READ) { 2.172 unsigned long addr = regs->edi; 2.173 2.174 if (sign > 0) 2.175 @@ -483,14 +483,14 @@ static void vmx_mmio_assist(struct cpu_u 2.176 vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT); 2.177 } 2.178 2.179 - if (mpcip->flags & REPZ) 2.180 + if (mmio_opp->flags & REPZ) 2.181 regs->ecx -= p->count; 2.182 break; 2.183 2.184 case INSTR_STOS: 2.185 sign = p->df ? -1 : 1; 2.186 regs->edi += sign * p->count * p->size; 2.187 - if (mpcip->flags & REPZ) 2.188 + if (mmio_opp->flags & REPZ) 2.189 regs->ecx -= p->count; 2.190 break; 2.191 2.192 @@ -500,7 +500,7 @@ static void vmx_mmio_assist(struct cpu_u 2.193 value = get_reg_value(size, index, 0, regs); 2.194 diff = (unsigned long) p->u.data & value; 2.195 } else if (src & IMMEDIATE) { 2.196 - value = mpcip->immediate; 2.197 + value = mmio_opp->immediate; 2.198 diff = (unsigned long) p->u.data & value; 2.199 } else if (src & MEMORY) { 2.200 index = operand_index(dst); 2.201 @@ -527,7 +527,7 @@ static void vmx_mmio_assist(struct cpu_u 2.202 value = get_reg_value(size, index, 0, regs); 2.203 diff = (unsigned long) p->u.data | value; 2.204 } else if (src & IMMEDIATE) { 2.205 - value = mpcip->immediate; 2.206 + value = mmio_opp->immediate; 2.207 diff = (unsigned long) p->u.data | value; 2.208 } else if (src & MEMORY) { 2.209 index = operand_index(dst); 2.210 @@ -554,7 +554,7 @@ static void vmx_mmio_assist(struct cpu_u 2.211 value = get_reg_value(size, index, 0, regs); 2.212 diff = (unsigned long) p->u.data ^ value; 2.213 } else if (src & IMMEDIATE) { 2.214 - value = mpcip->immediate; 2.215 + value = mmio_opp->immediate; 2.216 diff = (unsigned long) p->u.data ^ value; 2.217 } else if (src & MEMORY) { 2.218 index = operand_index(dst); 2.219 @@ -581,7 +581,7 @@ static void vmx_mmio_assist(struct cpu_u 2.220 value = get_reg_value(size, index, 0, regs); 2.221 diff = (unsigned long) p->u.data - value; 2.222 } else if (src & IMMEDIATE) { 2.223 - value = mpcip->immediate; 2.224 + value = mmio_opp->immediate; 2.225 diff = (unsigned long) p->u.data - value; 2.226 } else if (src & MEMORY) { 2.227 index = operand_index(dst); 2.228 @@ -608,7 +608,7 @@ static void vmx_mmio_assist(struct cpu_u 2.229 index = operand_index(src); 2.230 value = get_reg_value(size, index, 0, regs); 2.231 } else if (src & IMMEDIATE) { 2.232 - value = mpcip->immediate; 2.233 + value = mmio_opp->immediate; 2.234 } else if (src & MEMORY) { 2.235 index = operand_index(dst); 2.236 value = get_reg_value(size, index, 0, regs); 2.237 @@ -629,21 +629,21 @@ static void vmx_mmio_assist(struct cpu_u 2.238 load_cpu_user_regs(regs); 2.239 } 2.240 2.241 -void vmx_io_assist(struct vcpu *v) 2.242 +void vmx_io_assist(struct vcpu *v) 2.243 { 2.244 vcpu_iodata_t *vio; 2.245 ioreq_t *p; 2.246 struct cpu_user_regs *regs = guest_cpu_user_regs(); 2.247 - struct mi_per_cpu_info *mpci_p; 2.248 + struct mmio_op *mmio_opp; 2.249 struct cpu_user_regs *inst_decoder_regs; 2.250 2.251 - mpci_p = &v->domain->arch.vmx_platform.mpci; 2.252 - inst_decoder_regs = mpci_p->inst_decoder_regs; 2.253 + mmio_opp = &v->arch.arch_vmx.mmio_op; 2.254 + inst_decoder_regs = mmio_opp->inst_decoder_regs; 2.255 2.256 vio = get_vio(v->domain, v->vcpu_id); 2.257 2.258 if (vio == 0) { 2.259 - VMX_DBG_LOG(DBG_LEVEL_1, 2.260 + VMX_DBG_LOG(DBG_LEVEL_1, 2.261 "bad shared page: %lx", (unsigned long) vio); 2.262 printf("bad shared page: %lx\n", (unsigned long) vio); 2.263 domain_crash_synchronous(); 2.264 @@ -660,15 +660,15 @@ void vmx_io_assist(struct vcpu *v) 2.265 clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags); 2.266 2.267 if (p->type == IOREQ_TYPE_PIO) 2.268 - vmx_pio_assist(regs, p, mpci_p); 2.269 + vmx_pio_assist(regs, p, mmio_opp); 2.270 else 2.271 - vmx_mmio_assist(regs, p, mpci_p); 2.272 + vmx_mmio_assist(regs, p, mmio_opp); 2.273 } 2.274 /* else an interrupt send event raced us */ 2.275 } 2.276 } 2.277 2.278 -int vmx_clear_pending_io_event(struct vcpu *v) 2.279 +int vmx_clear_pending_io_event(struct vcpu *v) 2.280 { 2.281 struct domain *d = v->domain; 2.282 int port = iopacket_port(d); 2.283 @@ -678,7 +678,7 @@ int vmx_clear_pending_io_event(struct vc 2.284 clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel); 2.285 2.286 /* Note: VMX domains may need upcalls as well */ 2.287 - if (!v->vcpu_info->evtchn_pending_sel) 2.288 + if (!v->vcpu_info->evtchn_pending_sel) 2.289 clear_bit(0, &v->vcpu_info->evtchn_upcall_pending); 2.290 2.291 /* clear the pending bit for port */ 2.292 @@ -688,18 +688,18 @@ int vmx_clear_pending_io_event(struct vc 2.293 /* Because we've cleared the pending events first, we need to guarantee that 2.294 * all events to be handled by xen for VMX domains are taken care of here. 2.295 * 2.296 - * interrupts are guaranteed to be checked before resuming guest. 2.297 - * VMX upcalls have been already arranged for if necessary. 2.298 + * interrupts are guaranteed to be checked before resuming guest. 2.299 + * VMX upcalls have been already arranged for if necessary. 2.300 */ 2.301 -void vmx_check_events(struct vcpu *d) 2.302 +void vmx_check_events(struct vcpu *v) 2.303 { 2.304 - /* clear the event *before* checking for work. This should avoid 2.305 + /* clear the event *before* checking for work. This should avoid 2.306 the set-and-check races */ 2.307 if (vmx_clear_pending_io_event(current)) 2.308 - vmx_io_assist(d); 2.309 + vmx_io_assist(v); 2.310 } 2.311 2.312 -/* On exit from vmx_wait_io, we're guaranteed to have a I/O response from 2.313 +/* On exit from vmx_wait_io, we're guaranteed to have a I/O response from 2.314 the device model */ 2.315 void vmx_wait_io() 2.316 { 2.317 @@ -782,7 +782,7 @@ static __inline__ int find_highest_irq(u 2.318 return __fls(pintr[0]); 2.319 } 2.320 2.321 -#define BSP_CPU(d) (!(d->vcpu_id)) 2.322 +#define BSP_CPU(v) (!(v->vcpu_id)) 2.323 static inline void clear_extint(struct vcpu *v) 2.324 { 2.325 global_iodata_t *spg; 2.326 @@ -883,7 +883,7 @@ static inline int irq_masked(unsigned lo 2.327 return ((eflags & X86_EFLAGS_IF) == 0); 2.328 } 2.329 2.330 -asmlinkage void vmx_intr_assist(void) 2.331 +asmlinkage void vmx_intr_assist(void) 2.332 { 2.333 int intr_type = 0; 2.334 int highest_vector; 2.335 @@ -945,19 +945,19 @@ asmlinkage void vmx_intr_assist(void) 2.336 return; 2.337 } 2.338 2.339 -void vmx_do_resume(struct vcpu *d) 2.340 +void vmx_do_resume(struct vcpu *v) 2.341 { 2.342 vmx_stts(); 2.343 2.344 - if (event_pending(d)) { 2.345 - vmx_check_events(d); 2.346 + if (event_pending(v)) { 2.347 + vmx_check_events(v); 2.348 2.349 - if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) 2.350 + if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) 2.351 vmx_wait_io(); 2.352 } 2.353 2.354 /* We can't resume the guest if we're waiting on I/O */ 2.355 - ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)); 2.356 + ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)); 2.357 } 2.358 2.359 #endif /* CONFIG_VMX */
3.1 --- a/xen/arch/x86/vmx_platform.c Fri Oct 07 16:49:29 2005 +0100 3.2 +++ b/xen/arch/x86/vmx_platform.c Fri Oct 07 23:17:24 2005 +0100 3.3 @@ -22,8 +22,8 @@ 3.4 #include <xen/mm.h> 3.5 #include <asm/shadow.h> 3.6 #include <xen/domain_page.h> 3.7 -#include <asm/page.h> 3.8 -#include <xen/event.h> 3.9 +#include <asm/page.h> 3.10 +#include <xen/event.h> 3.11 #include <xen/trace.h> 3.12 #include <asm/vmx.h> 3.13 #include <asm/vmx_platform.h> 3.14 @@ -69,16 +69,16 @@ static inline long __get_reg_value(unsig 3.15 } 3.16 } 3.17 3.18 -long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) 3.19 +long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) 3.20 { 3.21 if (size == BYTE) { 3.22 - switch (index) { 3.23 + switch (index) { 3.24 case 0: /* %al */ 3.25 return (char)(regs->rax & 0xFF); 3.26 case 1: /* %cl */ 3.27 return (char)(regs->rcx & 0xFF); 3.28 case 2: /* %dl */ 3.29 - return (char)(regs->rdx & 0xFF); 3.30 + return (char)(regs->rdx & 0xFF); 3.31 case 3: /* %bl */ 3.32 return (char)(regs->rbx & 0xFF); 3.33 case 4: /* %ah */ 3.34 @@ -90,7 +90,7 @@ long get_reg_value(int size, int index, 3.35 case 7: /* %bh */ 3.36 return (char)((regs->rbx & 0xFF00) >> 8); 3.37 default: 3.38 - printf("Error: (get_reg_value) Invalid index value\n"); 3.39 + printf("Error: (get_reg_value) Invalid index value\n"); 3.40 domain_crash_synchronous(); 3.41 } 3.42 /* NOTREACHED */ 3.43 @@ -114,7 +114,7 @@ long get_reg_value(int size, int index, 3.44 case 14: return __get_reg_value(regs->r14, size); 3.45 case 15: return __get_reg_value(regs->r15, size); 3.46 default: 3.47 - printf("Error: (get_reg_value) Invalid index value\n"); 3.48 + printf("Error: (get_reg_value) Invalid index value\n"); 3.49 domain_crash_synchronous(); 3.50 } 3.51 } 3.52 @@ -131,7 +131,7 @@ void store_cpu_user_regs(struct cpu_user 3.53 } 3.54 3.55 static inline long __get_reg_value(unsigned long reg, int size) 3.56 -{ 3.57 +{ 3.58 switch(size) { 3.59 case WORD: 3.60 return (short)(reg & 0xFFFF); 3.61 @@ -144,15 +144,15 @@ static inline long __get_reg_value(unsig 3.62 } 3.63 3.64 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) 3.65 -{ 3.66 +{ 3.67 if (size == BYTE) { 3.68 - switch (index) { 3.69 + switch (index) { 3.70 case 0: /* %al */ 3.71 return (char)(regs->eax & 0xFF); 3.72 case 1: /* %cl */ 3.73 return (char)(regs->ecx & 0xFF); 3.74 case 2: /* %dl */ 3.75 - return (char)(regs->edx & 0xFF); 3.76 + return (char)(regs->edx & 0xFF); 3.77 case 3: /* %bl */ 3.78 return (char)(regs->ebx & 0xFF); 3.79 case 4: /* %ah */ 3.80 @@ -164,7 +164,7 @@ long get_reg_value(int size, int index, 3.81 case 7: /* %bh */ 3.82 return (char)((regs->ebx & 0xFF00) >> 8); 3.83 default: 3.84 - printf("Error: (get_reg_value) Invalid index value\n"); 3.85 + printf("Error: (get_reg_value) Invalid index value\n"); 3.86 domain_crash_synchronous(); 3.87 } 3.88 } 3.89 @@ -179,7 +179,7 @@ long get_reg_value(int size, int index, 3.90 case 6: return __get_reg_value(regs->esi, size); 3.91 case 7: return __get_reg_value(regs->edi, size); 3.92 default: 3.93 - printf("Error: (get_reg_value) Invalid index value\n"); 3.94 + printf("Error: (get_reg_value) Invalid index value\n"); 3.95 domain_crash_synchronous(); 3.96 } 3.97 } 3.98 @@ -283,9 +283,9 @@ static inline int get_index(const unsign 3.99 3.100 //Only one operand in the instruction is register 3.101 if (mod == 3) { 3.102 - return (rm + (rex_b << 3)); 3.103 + return (rm + (rex_b << 3)); 3.104 } else { 3.105 - return (reg + (rex_r << 3)); 3.106 + return (reg + (rex_r << 3)); 3.107 } 3.108 return 0; 3.109 } 3.110 @@ -299,7 +299,7 @@ static void init_instruction(struct inst 3.111 3.112 mmio_inst->operand[0] = 0; 3.113 mmio_inst->operand[1] = 0; 3.114 - 3.115 + 3.116 mmio_inst->flags = 0; 3.117 } 3.118 3.119 @@ -498,12 +498,12 @@ static int vmx_decode(unsigned char *opc 3.120 instr->instr = INSTR_MOVS; 3.121 instr->op_size = BYTE; 3.122 return DECODE_success; 3.123 - 3.124 + 3.125 case 0xA5: /* movsw/movsl */ 3.126 instr->instr = INSTR_MOVS; 3.127 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 3.128 return DECODE_success; 3.129 - 3.130 + 3.131 case 0xAA: /* stosb */ 3.132 instr->instr = INSTR_STOS; 3.133 instr->op_size = BYTE; 3.134 @@ -513,7 +513,7 @@ static int vmx_decode(unsigned char *opc 3.135 instr->instr = INSTR_STOS; 3.136 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 3.137 return DECODE_success; 3.138 - 3.139 + 3.140 case 0xC6: 3.141 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */ 3.142 instr->instr = INSTR_MOV; 3.143 @@ -522,11 +522,11 @@ static int vmx_decode(unsigned char *opc 3.144 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 3.145 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size); 3.146 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 3.147 - 3.148 + 3.149 return DECODE_success; 3.150 } else 3.151 return DECODE_failure; 3.152 - 3.153 + 3.154 case 0xC7: 3.155 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */ 3.156 instr->instr = INSTR_MOV; 3.157 @@ -535,7 +535,7 @@ static int vmx_decode(unsigned char *opc 3.158 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 3.159 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size); 3.160 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 3.161 - 3.162 + 3.163 return DECODE_success; 3.164 } else 3.165 return DECODE_failure; 3.166 @@ -598,34 +598,34 @@ int inst_copy_from_guest(unsigned char * 3.167 return inst_len; 3.168 } 3.169 3.170 -void send_mmio_req(unsigned char type, unsigned long gpa, 3.171 +void send_mmio_req(unsigned char type, unsigned long gpa, 3.172 unsigned long count, int size, long value, int dir, int pvalid) 3.173 { 3.174 - struct vcpu *d = current; 3.175 + struct vcpu *v = current; 3.176 vcpu_iodata_t *vio; 3.177 ioreq_t *p; 3.178 int vm86; 3.179 struct cpu_user_regs *regs; 3.180 extern long evtchn_send(int lport); 3.181 3.182 - regs = current->domain->arch.vmx_platform.mpci.inst_decoder_regs; 3.183 + regs = current->arch.arch_vmx.mmio_op.inst_decoder_regs; 3.184 3.185 - vio = get_vio(d->domain, d->vcpu_id); 3.186 + vio = get_vio(v->domain, v->vcpu_id); 3.187 if (vio == NULL) { 3.188 printf("bad shared page\n"); 3.189 - domain_crash_synchronous(); 3.190 + domain_crash_synchronous(); 3.191 } 3.192 3.193 p = &vio->vp_ioreq; 3.194 3.195 vm86 = regs->eflags & X86_EFLAGS_VM; 3.196 3.197 - if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) { 3.198 + if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) { 3.199 printf("VMX I/O has not yet completed\n"); 3.200 domain_crash_synchronous(); 3.201 } 3.202 3.203 - set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags); 3.204 + set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags); 3.205 p->dir = dir; 3.206 p->pdata_valid = pvalid; 3.207 3.208 @@ -647,27 +647,27 @@ void send_mmio_req(unsigned char type, u 3.209 3.210 if (vmx_mmio_intercept(p)){ 3.211 p->state = STATE_IORESP_READY; 3.212 - vmx_io_assist(d); 3.213 + vmx_io_assist(v); 3.214 return; 3.215 } 3.216 3.217 - evtchn_send(iopacket_port(d->domain)); 3.218 + evtchn_send(iopacket_port(v->domain)); 3.219 vmx_wait_io(); 3.220 } 3.221 3.222 static void mmio_operands(int type, unsigned long gpa, struct instruction *inst, 3.223 - struct mi_per_cpu_info *mpcip, struct cpu_user_regs *regs) 3.224 + struct mmio_op *mmio_opp, struct cpu_user_regs *regs) 3.225 { 3.226 unsigned long value = 0; 3.227 int index, size; 3.228 - 3.229 + 3.230 size = operand_size(inst->operand[0]); 3.231 3.232 - mpcip->flags = inst->flags; 3.233 - mpcip->instr = inst->instr; 3.234 - mpcip->operand[0] = inst->operand[0]; /* source */ 3.235 - mpcip->operand[1] = inst->operand[1]; /* destination */ 3.236 - mpcip->immediate = inst->immediate; 3.237 + mmio_opp->flags = inst->flags; 3.238 + mmio_opp->instr = inst->instr; 3.239 + mmio_opp->operand[0] = inst->operand[0]; /* source */ 3.240 + mmio_opp->operand[1] = inst->operand[1]; /* destination */ 3.241 + mmio_opp->immediate = inst->immediate; 3.242 3.243 if (inst->operand[0] & REGISTER) { /* dest is memory */ 3.244 index = operand_index(inst->operand[0]); 3.245 @@ -687,19 +687,19 @@ static void mmio_operands(int type, unsi 3.246 3.247 #define GET_REPEAT_COUNT() \ 3.248 (mmio_inst.flags & REPZ ? (vm86 ? regs->ecx & 0xFFFF : regs->ecx) : 1) 3.249 - 3.250 + 3.251 void handle_mmio(unsigned long va, unsigned long gpa) 3.252 { 3.253 unsigned long eip, eflags, cs; 3.254 unsigned long inst_len, inst_addr; 3.255 - struct mi_per_cpu_info *mpcip; 3.256 + struct mmio_op *mmio_opp; 3.257 struct cpu_user_regs *regs; 3.258 struct instruction mmio_inst; 3.259 unsigned char inst[MAX_INST_LEN]; 3.260 int i, vm86, ret; 3.261 - 3.262 - mpcip = ¤t->domain->arch.vmx_platform.mpci; 3.263 - regs = mpcip->inst_decoder_regs; 3.264 + 3.265 + mmio_opp = ¤t->arch.arch_vmx.mmio_op; 3.266 + regs = mmio_opp->inst_decoder_regs; 3.267 3.268 __vmread(GUEST_RIP, &eip); 3.269 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len); 3.270 @@ -720,7 +720,7 @@ void handle_mmio(unsigned long va, unsig 3.271 } 3.272 3.273 init_instruction(&mmio_inst); 3.274 - 3.275 + 3.276 if (vmx_decode(inst, &mmio_inst) == DECODE_failure) { 3.277 printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:", 3.278 va, gpa, inst_len); 3.279 @@ -735,7 +735,7 @@ void handle_mmio(unsigned long va, unsig 3.280 3.281 switch (mmio_inst.instr) { 3.282 case INSTR_MOV: 3.283 - mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 3.284 + mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs); 3.285 break; 3.286 3.287 case INSTR_MOVS: 3.288 @@ -769,8 +769,8 @@ void handle_mmio(unsigned long va, unsig 3.289 } 3.290 } 3.291 3.292 - mpcip->flags = mmio_inst.flags; 3.293 - mpcip->instr = mmio_inst.instr; 3.294 + mmio_opp->flags = mmio_inst.flags; 3.295 + mmio_opp->instr = mmio_inst.instr; 3.296 3.297 /* 3.298 * In case of a movs spanning multiple pages, we break the accesses 3.299 @@ -785,7 +785,7 @@ void handle_mmio(unsigned long va, unsig 3.300 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) { 3.301 unsigned long value = 0; 3.302 3.303 - mpcip->flags |= OVERLAP; 3.304 + mmio_opp->flags |= OVERLAP; 3.305 3.306 regs->eip -= inst_len; /* do not advance %eip */ 3.307 3.308 @@ -808,7 +808,7 @@ void handle_mmio(unsigned long va, unsig 3.309 } 3.310 3.311 case INSTR_MOVZ: 3.312 - mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 3.313 + mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mmio_opp, regs); 3.314 break; 3.315 3.316 case INSTR_STOS: 3.317 @@ -816,31 +816,31 @@ void handle_mmio(unsigned long va, unsig 3.318 * Since the destination is always in (contiguous) mmio space we don't 3.319 * need to break it up into pages. 3.320 */ 3.321 - mpcip->flags = mmio_inst.flags; 3.322 - mpcip->instr = mmio_inst.instr; 3.323 + mmio_opp->flags = mmio_inst.flags; 3.324 + mmio_opp->instr = mmio_inst.instr; 3.325 send_mmio_req(IOREQ_TYPE_COPY, gpa, 3.326 GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0); 3.327 break; 3.328 3.329 case INSTR_OR: 3.330 - mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs); 3.331 + mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mmio_opp, regs); 3.332 break; 3.333 3.334 case INSTR_AND: 3.335 - mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs); 3.336 + mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mmio_opp, regs); 3.337 break; 3.338 3.339 case INSTR_XOR: 3.340 - mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs); 3.341 + mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mmio_opp, regs); 3.342 break; 3.343 3.344 case INSTR_CMP: /* Pass through */ 3.345 case INSTR_TEST: 3.346 - mpcip->flags = mmio_inst.flags; 3.347 - mpcip->instr = mmio_inst.instr; 3.348 - mpcip->operand[0] = mmio_inst.operand[0]; /* source */ 3.349 - mpcip->operand[1] = mmio_inst.operand[1]; /* destination */ 3.350 - mpcip->immediate = mmio_inst.immediate; 3.351 + mmio_opp->flags = mmio_inst.flags; 3.352 + mmio_opp->instr = mmio_inst.instr; 3.353 + mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */ 3.354 + mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */ 3.355 + mmio_opp->immediate = mmio_inst.immediate; 3.356 3.357 /* send the request and wait for the value */ 3.358 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, mmio_inst.op_size, 0, IOREQ_READ, 0);
4.1 --- a/xen/include/asm-x86/vmx_platform.h Fri Oct 07 16:49:29 2005 +0100 4.2 +++ b/xen/include/asm-x86/vmx_platform.h Fri Oct 07 23:17:24 2005 +0100 4.3 @@ -75,20 +75,11 @@ struct instruction { 4.4 4.5 #define MAX_INST_LEN 32 4.6 4.7 -struct mi_per_cpu_info { 4.8 - int flags; 4.9 - int instr; /* instruction */ 4.10 - unsigned long operand[2]; /* operands */ 4.11 - unsigned long immediate; /* immediate portion */ 4.12 - struct cpu_user_regs *inst_decoder_regs; /* current context */ 4.13 -}; 4.14 - 4.15 struct virtual_platform_def { 4.16 unsigned long *real_mode_data; /* E820, etc. */ 4.17 unsigned long shared_page_va; 4.18 struct vmx_virpit_t vmx_pit; 4.19 struct vmx_handler_t vmx_handler; 4.20 - struct mi_per_cpu_info mpci; /* MMIO */ 4.21 }; 4.22 4.23 extern void handle_mmio(unsigned long, unsigned long);
5.1 --- a/xen/include/asm-x86/vmx_vmcs.h Fri Oct 07 16:49:29 2005 +0100 5.2 +++ b/xen/include/asm-x86/vmx_vmcs.h Fri Oct 07 23:17:24 2005 +0100 5.3 @@ -71,6 +71,14 @@ struct msr_state{ 5.4 unsigned long shadow_gs; 5.5 }; 5.6 5.7 +struct mmio_op { 5.8 + int flags; 5.9 + int instr; /* instruction */ 5.10 + unsigned long operand[2]; /* operands */ 5.11 + unsigned long immediate; /* immediate portion */ 5.12 + struct cpu_user_regs *inst_decoder_regs; /* current context */ 5.13 +}; 5.14 + 5.15 #define PC_DEBUG_PORT 0x80 5.16 5.17 struct arch_vmx_struct { 5.18 @@ -83,7 +91,8 @@ struct arch_vmx_struct { 5.19 unsigned long cpu_state; 5.20 unsigned long cpu_based_exec_control; 5.21 struct msr_state msr_content; 5.22 - void *io_bitmap_a, *io_bitmap_b; 5.23 + struct mmio_op mmio_op; /* MMIO */ 5.24 + void *io_bitmap_a, *io_bitmap_b; 5.25 }; 5.26 5.27 #define vmx_schedule_tail(next) \