ia64/xen-unstable
changeset 3574:d9cdcc864e90
bitkeeper revision 1.1159.212.48 (41fa4077FWhTpWbD65XTzGwOfmTJOQ)
Merge ssh://srg//auto/groups/xeno/BK/xeno.bk
into equilibrium.research:/home/irchomes/mwilli2/src/xen-3.0-devel.bk
Merge ssh://srg//auto/groups/xeno/BK/xeno.bk
into equilibrium.research:/home/irchomes/mwilli2/src/xen-3.0-devel.bk
author | mwilli2@equilibrium.research |
---|---|
date | Fri Jan 28 13:39:03 2005 +0000 (2005-01-28) |
parents | 9bef38a51377 20c37da2d56b |
children | eef1949801b8 |
files | .rootkeys tools/python/xen/xend/XendDomainInfo.py xen/arch/x86/domain.c xen/arch/x86/vmx.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/include/asm-x86/shadow.h xen/include/asm-x86/vmx_platform.h xen/include/asm-x86/vmx_vmcs.h xen/include/public/io/ioreq.h |
line diff
1.1 --- a/.rootkeys Thu Jan 27 23:55:18 2005 +0000 1.2 +++ b/.rootkeys Fri Jan 28 13:39:03 2005 +0000 1.3 @@ -891,6 +891,7 @@ 3ddb79bccYVzXZJyVaxuv5T42Z1Fsw xen/arch/ 1.4 3ddb79bcOftONV9h4QCxXOfiT0h91w xen/arch/x86/traps.c 1.5 41c0c411tD3C7TpfDMiFTf7BaNd_Dg xen/arch/x86/vmx.c 1.6 41c0c411ODt8uEmV-yUxpQLpqimE5Q xen/arch/x86/vmx_io.c 1.7 +41f97ef5139vN42cOYHfX_Ac8WOOjA xen/arch/x86/vmx_platform.c 1.8 41c0c4128URE0dxcO15JME_MuKBPfg xen/arch/x86/vmx_vmcs.c 1.9 419cbedeQDg8IrO3izo3o5rQNlo0kQ xen/arch/x86/x86_32/asm-offsets.c 1.10 3e32af9aRnYGl4GMOaDKp7JdfhOGhg xen/arch/x86/x86_32/domain_page.c
3.1 --- a/xen/arch/x86/domain.c Thu Jan 27 23:55:18 2005 +0000 3.2 +++ b/xen/arch/x86/domain.c Fri Jan 28 13:39:03 2005 +0000 3.3 @@ -308,15 +308,21 @@ void arch_do_createdomain(struct exec_do 3.4 } 3.5 3.6 #ifdef CONFIG_VMX 3.7 -void arch_vmx_do_resume(struct exec_domain *d) 3.8 +void arch_vmx_do_resume(struct exec_domain *ed) 3.9 { 3.10 - vmx_do_resume(d); 3.11 + u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->thread.arch_vmx.vmcs); 3.12 + 3.13 + load_vmcs(&ed->thread.arch_vmx, vmcs_phys_ptr); 3.14 + vmx_do_resume(ed); 3.15 reset_stack_and_jump(vmx_asm_do_resume); 3.16 } 3.17 3.18 -void arch_vmx_do_launch(struct exec_domain *d) 3.19 +void arch_vmx_do_launch(struct exec_domain *ed) 3.20 { 3.21 - vmx_do_launch(d); 3.22 + u64 vmcs_phys_ptr = (u64) virt_to_phys(ed->thread.arch_vmx.vmcs); 3.23 + 3.24 + load_vmcs(&ed->thread.arch_vmx, vmcs_phys_ptr); 3.25 + vmx_do_launch(ed); 3.26 reset_stack_and_jump(vmx_asm_do_launch); 3.27 } 3.28 3.29 @@ -332,14 +338,14 @@ static void monitor_mk_pagetable(struct 3.30 ASSERT( mpfn_info ); 3.31 3.32 mpfn = (unsigned long) (mpfn_info - frame_table); 3.33 - mpl2e = (l2_pgentry_t *) map_domain_mem(mpfn << PAGE_SHIFT); 3.34 + mpl2e = (l2_pgentry_t *) map_domain_mem(mpfn << L1_PAGETABLE_SHIFT); 3.35 memset(mpl2e, 0, PAGE_SIZE); 3.36 3.37 memcpy(&mpl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 3.38 &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 3.39 HYPERVISOR_ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t)); 3.40 3.41 - m->monitor_table = mk_pagetable(mpfn << PAGE_SHIFT); 3.42 + m->monitor_table = mk_pagetable(mpfn << L1_PAGETABLE_SHIFT); 3.43 m->shadow_mode = SHM_full_32; 3.44 3.45 mpl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = 3.46 @@ -349,13 +355,42 @@ static void monitor_mk_pagetable(struct 3.47 unmap_domain_mem(mpl2e); 3.48 } 3.49 3.50 -static int vmx_final_setup_guestos(struct exec_domain *d, 3.51 +/* 3.52 + * Free the pages for monitor_table and guest_pl2e_cache 3.53 + */ 3.54 +static void monitor_rm_pagetable(struct exec_domain *ed) 3.55 +{ 3.56 + struct mm_struct *m = &ed->mm; 3.57 + l2_pgentry_t *mpl2e; 3.58 + unsigned long mpfn; 3.59 + 3.60 + ASSERT( m->monitor_table ); 3.61 + 3.62 + mpl2e = (l2_pgentry_t *) map_domain_mem(pagetable_val(m->monitor_table)); 3.63 + /* 3.64 + * First get the pfn for guest_pl2e_cache by looking at monitor_table 3.65 + */ 3.66 + mpfn = l2_pgentry_val(mpl2e[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT]) 3.67 + >> PAGE_SHIFT; 3.68 + 3.69 + free_domheap_page(&frame_table[mpfn]); 3.70 + unmap_domain_mem(mpl2e); 3.71 + 3.72 + /* 3.73 + * Then free monitor_table. 3.74 + */ 3.75 + mpfn = (pagetable_val(m->monitor_table)) >> PAGE_SHIFT; 3.76 + free_domheap_page(&frame_table[mpfn]); 3.77 + 3.78 + m->monitor_table = mk_pagetable(0); 3.79 +} 3.80 + 3.81 +static int vmx_final_setup_guestos(struct exec_domain *ed, 3.82 full_execution_context_t *full_context) 3.83 { 3.84 int error; 3.85 execution_context_t *context; 3.86 struct vmcs_struct *vmcs; 3.87 - unsigned long guest_pa; 3.88 3.89 context = &full_context->cpu_ctxt; 3.90 3.91 @@ -367,33 +402,38 @@ static int vmx_final_setup_guestos(struc 3.92 return -ENOMEM; 3.93 } 3.94 3.95 - memset(&d->thread.arch_vmx, 0, sizeof (struct arch_vmx_struct)); 3.96 + memset(&ed->thread.arch_vmx, 0, sizeof (struct arch_vmx_struct)); 3.97 3.98 - d->thread.arch_vmx.vmcs = vmcs; 3.99 - error = construct_vmcs(&d->thread.arch_vmx, context, full_context, VMCS_USE_HOST_ENV); 3.100 + ed->thread.arch_vmx.vmcs = vmcs; 3.101 + error = construct_vmcs(&ed->thread.arch_vmx, context, full_context, VMCS_USE_HOST_ENV); 3.102 if (error < 0) { 3.103 printk("Failed to construct a new VMCS\n"); 3.104 goto out; 3.105 } 3.106 3.107 - monitor_mk_pagetable(d); 3.108 - guest_pa = pagetable_val(d->mm.pagetable); 3.109 - clear_bit(VMX_CPU_STATE_PG_ENABLED, &d->thread.arch_vmx.cpu_state); 3.110 + monitor_mk_pagetable(ed); 3.111 + ed->thread.schedule_tail = arch_vmx_do_launch; 3.112 + clear_bit(VMX_CPU_STATE_PG_ENABLED, &ed->thread.arch_vmx.cpu_state); 3.113 3.114 - d->thread.arch_vmx.vmx_platform.real_mode_data = 3.115 +#if defined (__i386) 3.116 + ed->thread.arch_vmx.vmx_platform.real_mode_data = 3.117 (unsigned long *) context->esi; 3.118 +#endif 3.119 3.120 - memset(&d->domain->shared_info->evtchn_mask[0], 0xff, 3.121 - sizeof(d->domain->shared_info->evtchn_mask)); 3.122 - clear_bit(IOPACKET_PORT, &d->domain->shared_info->evtchn_mask[0]); 3.123 - 3.124 - d->thread.schedule_tail = arch_vmx_do_launch; 3.125 + if (ed == ed->domain->exec_domain[0]) { 3.126 + /* 3.127 + * Required to do this once per domain 3.128 + */ 3.129 + memset(&ed->domain->shared_info->evtchn_mask[0], 0xff, 3.130 + sizeof(ed->domain->shared_info->evtchn_mask)); 3.131 + clear_bit(IOPACKET_PORT, &ed->domain->shared_info->evtchn_mask[0]); 3.132 + } 3.133 3.134 return 0; 3.135 3.136 out: 3.137 free_vmcs(vmcs); 3.138 - d->thread.arch_vmx.vmcs = 0; 3.139 + ed->thread.arch_vmx.vmcs = 0; 3.140 return error; 3.141 } 3.142 #endif 3.143 @@ -707,6 +747,35 @@ static void relinquish_list(struct domai 3.144 spin_unlock_recursive(&d->page_alloc_lock); 3.145 } 3.146 3.147 +static void vmx_domain_relinquish_memory(struct exec_domain *ed) 3.148 +{ 3.149 + struct domain *d = ed->domain; 3.150 + 3.151 + /* 3.152 + * Free VMCS 3.153 + */ 3.154 + ASSERT(ed->thread.arch_vmx.vmcs); 3.155 + free_vmcs(ed->thread.arch_vmx.vmcs); 3.156 + ed->thread.arch_vmx.vmcs = 0; 3.157 + 3.158 + monitor_rm_pagetable(ed); 3.159 + 3.160 + if (ed == d->exec_domain[0]) { 3.161 + int i; 3.162 + unsigned long pfn; 3.163 + 3.164 + for (i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++) { 3.165 + unsigned long l1e; 3.166 + 3.167 + l1e = l1_pgentry_val(d->mm_perdomain_pt[i]); 3.168 + if (l1e & _PAGE_PRESENT) { 3.169 + pfn = l1e >> PAGE_SHIFT; 3.170 + free_domheap_page(&frame_table[pfn]); 3.171 + } 3.172 + } 3.173 + } 3.174 + 3.175 +} 3.176 3.177 void domain_relinquish_memory(struct domain *d) 3.178 { 3.179 @@ -725,6 +794,10 @@ void domain_relinquish_memory(struct dom 3.180 PAGE_SHIFT]); 3.181 } 3.182 3.183 + if (VMX_DOMAIN(d->exec_domain[0])) 3.184 + for_each_exec_domain(d, ed) 3.185 + vmx_domain_relinquish_memory(ed); 3.186 + 3.187 /* 3.188 * Relinquish GDT mappings. No need for explicit unmapping of the LDT as 3.189 * it automatically gets squashed when the guest's mappings go away.
4.1 --- a/xen/arch/x86/vmx.c Thu Jan 27 23:55:18 2005 +0000 4.2 +++ b/xen/arch/x86/vmx.c Fri Jan 28 13:39:03 2005 +0000 4.3 @@ -41,6 +41,11 @@ unsigned int opt_vmx_debug_level; 4.4 extern long evtchn_send(int lport); 4.5 extern long do_block(void); 4.6 4.7 +#define VECTOR_DB 1 4.8 +#define VECTOR_BP 3 4.9 +#define VECTOR_GP 13 4.10 +#define VECTOR_PG 14 4.11 + 4.12 int start_vmx() 4.13 { 4.14 struct vmcs_struct *vmcs; 4.15 @@ -102,7 +107,7 @@ static int vmx_do_page_fault(unsigned lo 4.16 { 4.17 unsigned long eip, pfn; 4.18 unsigned int index; 4.19 - unsigned long gpde = 0; 4.20 + unsigned long gpde = 0, gpte, gpa; 4.21 int result; 4.22 struct exec_domain *ed = current; 4.23 struct mm_struct *m = &ed->mm; 4.24 @@ -132,6 +137,15 @@ static int vmx_do_page_fault(unsigned lo 4.25 m->guest_pl2e_cache[index] = 4.26 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR); 4.27 } 4.28 + 4.29 + if (unlikely(__get_user(gpte, (unsigned long *) 4.30 + &linear_pg_table[va >> PAGE_SHIFT]))) 4.31 + return 0; 4.32 + 4.33 + gpa = (gpte & PAGE_MASK) | (va & (PAGE_SIZE - 1)); 4.34 + 4.35 + if (mmio_space(gpa)) 4.36 + handle_mmio(va, gpte, gpa); 4.37 4.38 if ((result = shadow_fault(va, error_code))) 4.39 return result; 4.40 @@ -142,19 +156,26 @@ static int vmx_do_page_fault(unsigned lo 4.41 static void vmx_do_general_protection_fault(struct xen_regs *regs) 4.42 { 4.43 unsigned long eip, error_code; 4.44 + unsigned long intr_fields; 4.45 4.46 __vmread(GUEST_EIP, &eip); 4.47 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code); 4.48 4.49 - VMX_DBG_LOG(DBG_LEVEL_1, 4.50 + VMX_DBG_LOG(DBG_LEVEL_1, 4.51 "vmx_general_protection_fault: eip = %lx, erro_code = %lx\n", 4.52 eip, error_code); 4.53 4.54 - VMX_DBG_LOG(DBG_LEVEL_1, 4.55 + VMX_DBG_LOG(DBG_LEVEL_1, 4.56 "eax=%x, ebx=%x, ecx=%x, edx=%x, esi=%x, edi=%x\n", 4.57 regs->eax, regs->ebx, regs->ecx, regs->edx, regs->esi, regs->edi); 4.58 4.59 - __vmx_bug(regs); 4.60 + /* Reflect it back into the guest */ 4.61 + intr_fields = (INTR_INFO_VALID_MASK | 4.62 + INTR_TYPE_EXCEPTION | 4.63 + INTR_INFO_DELIEVER_CODE_MASK | 4.64 + VECTOR_GP); 4.65 + __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields); 4.66 + __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 4.67 } 4.68 4.69 static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs) 4.70 @@ -271,7 +292,7 @@ static inline void guest_pl2e_cache_inva 4.71 memset(m->guest_pl2e_cache, 0, PAGE_SIZE); 4.72 } 4.73 4.74 -static inline unsigned long gva_to_gpa(unsigned long gva) 4.75 +inline unsigned long gva_to_gpa(unsigned long gva) 4.76 { 4.77 unsigned long gpde, gpte, pfn, index; 4.78 struct exec_domain *d = current; 4.79 @@ -340,6 +361,10 @@ static void vmx_io_instruction(struct xe 4.80 p->size = (exit_qualification & 7) + 1; 4.81 4.82 if (test_bit(4, &exit_qualification)) { 4.83 + unsigned long eflags; 4.84 + 4.85 + __vmread(GUEST_EFLAGS, &eflags); 4.86 + p->df = (eflags & X86_EFLAGS_DF) ? 1 : 0; 4.87 p->pdata_valid = 1; 4.88 p->u.pdata = (void *) ((p->dir == IOREQ_WRITE) ? 4.89 regs->esi 4.90 @@ -725,11 +750,6 @@ asmlinkage void vmx_vmexit_handler(struc 4.91 switch (exit_reason) { 4.92 case EXIT_REASON_EXCEPTION_NMI: 4.93 { 4.94 -#define VECTOR_DB 1 4.95 -#define VECTOR_BP 3 4.96 -#define VECTOR_GP 13 4.97 -#define VECTOR_PG 14 4.98 - 4.99 /* 4.100 * We don't set the software-interrupt exiting (INT n). 4.101 * (1) We can get an exception (e.g. #PG) in the guest, or 4.102 @@ -773,6 +793,7 @@ asmlinkage void vmx_vmexit_handler(struc 4.103 __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code); 4.104 VMX_DBG_LOG(DBG_LEVEL_VMMU, 4.105 "eax=%x, ebx=%x, ecx=%x, edx=%x, esi=%x, edi=%x\n", regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi, regs.edi); 4.106 + d->thread.arch_vmx.vmx_platform.mpci.inst_decoder_regs = ®s; 4.107 4.108 if (!(error = vmx_do_page_fault(va, error_code))) { 4.109 /*
5.1 --- a/xen/arch/x86/vmx_io.c Thu Jan 27 23:55:18 2005 +0000 5.2 +++ b/xen/arch/x86/vmx_io.c Fri Jan 28 13:39:03 2005 +0000 5.3 @@ -29,8 +29,143 @@ 5.4 #include <asm/vmx_vmcs.h> 5.5 #include <xen/event.h> 5.6 #include <public/io/ioreq.h> 5.7 +#include <asm/vmx_platform.h> 5.8 5.9 extern long do_block(); 5.10 + 5.11 +#if defined (__i386__) 5.12 +static void load_xen_regs(struct xen_regs *regs) 5.13 +{ 5.14 + /* 5.15 + * Write the guest register value into VMCS 5.16 + */ 5.17 + __vmwrite(GUEST_SS_SELECTOR, regs->ss); 5.18 + __vmwrite(GUEST_ESP, regs->esp); 5.19 + __vmwrite(GUEST_EFLAGS, regs->eflags); 5.20 + __vmwrite(GUEST_CS_SELECTOR, regs->cs); 5.21 + __vmwrite(GUEST_EIP, regs->eip); 5.22 +} 5.23 + 5.24 +static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value) 5.25 +{ 5.26 + switch (size) { 5.27 + case BYTE: 5.28 + switch (index) { 5.29 + case 0: 5.30 + regs->eax &= 0xFFFFFF00; 5.31 + regs->eax |= (value & 0xFF); 5.32 + break; 5.33 + case 1: 5.34 + regs->ecx &= 0xFFFFFF00; 5.35 + regs->ecx |= (value & 0xFF); 5.36 + break; 5.37 + case 2: 5.38 + regs->edx &= 0xFFFFFF00; 5.39 + regs->edx |= (value & 0xFF); 5.40 + break; 5.41 + case 3: 5.42 + regs->ebx &= 0xFFFFFF00; 5.43 + regs->ebx |= (value & 0xFF); 5.44 + break; 5.45 + case 4: 5.46 + regs->eax &= 0xFFFF00FF; 5.47 + regs->eax |= ((value & 0xFF) << 8); 5.48 + break; 5.49 + case 5: 5.50 + regs->ecx &= 0xFFFF00FF; 5.51 + regs->ecx |= ((value & 0xFF) << 8); 5.52 + break; 5.53 + case 6: 5.54 + regs->edx &= 0xFFFF00FF; 5.55 + regs->edx |= ((value & 0xFF) << 8); 5.56 + break; 5.57 + case 7: 5.58 + regs->ebx &= 0xFFFF00FF; 5.59 + regs->ebx |= ((value & 0xFF) << 8); 5.60 + break; 5.61 + default: 5.62 + printk("size:%x, index:%x are invalid!\n", size, index); 5.63 + break; 5.64 + 5.65 + } 5.66 + break; 5.67 + case WORD: 5.68 + switch (index) { 5.69 + case 0: 5.70 + regs->eax &= 0xFFFF0000; 5.71 + regs->eax |= (value & 0xFFFF); 5.72 + break; 5.73 + case 1: 5.74 + regs->ecx &= 0xFFFF0000; 5.75 + regs->ecx |= (value & 0xFFFF); 5.76 + break; 5.77 + case 2: 5.78 + regs->edx &= 0xFFFF0000; 5.79 + regs->edx |= (value & 0xFFFF); 5.80 + break; 5.81 + case 3: 5.82 + regs->ebx &= 0xFFFF0000; 5.83 + regs->ebx |= (value & 0xFFFF); 5.84 + break; 5.85 + case 4: 5.86 + regs->esp &= 0xFFFF0000; 5.87 + regs->esp |= (value & 0xFFFF); 5.88 + break; 5.89 + 5.90 + case 5: 5.91 + regs->ebp &= 0xFFFF0000; 5.92 + regs->ebp |= (value & 0xFFFF); 5.93 + break; 5.94 + case 6: 5.95 + regs->esi &= 0xFFFF0000; 5.96 + regs->esi |= (value & 0xFFFF); 5.97 + break; 5.98 + case 7: 5.99 + regs->edi &= 0xFFFF0000; 5.100 + regs->edi |= (value & 0xFFFF); 5.101 + break; 5.102 + default: 5.103 + printk("size:%x, index:%x are invalid!\n", size, index); 5.104 + break; 5.105 + } 5.106 + break; 5.107 + case LONG: 5.108 + switch (index) { 5.109 + case 0: 5.110 + regs->eax = value; 5.111 + break; 5.112 + case 1: 5.113 + regs->ecx = value; 5.114 + break; 5.115 + case 2: 5.116 + regs->edx = value; 5.117 + break; 5.118 + case 3: 5.119 + regs->ebx = value; 5.120 + break; 5.121 + case 4: 5.122 + regs->esp = value; 5.123 + break; 5.124 + case 5: 5.125 + regs->ebp = value; 5.126 + break; 5.127 + case 6: 5.128 + regs->esi = value; 5.129 + break; 5.130 + case 7: 5.131 + regs->edi = value; 5.132 + break; 5.133 + default: 5.134 + printk("size:%x, index:%x are invalid!\n", size, index); 5.135 + break; 5.136 + } 5.137 + break; 5.138 + default: 5.139 + printk("size:%x, index:%x are invalid!\n", size, index); 5.140 + break; 5.141 + } 5.142 +} 5.143 +#endif 5.144 5.145 void vmx_io_assist(struct exec_domain *ed) 5.146 { 5.147 @@ -39,8 +174,12 @@ void vmx_io_assist(struct exec_domain *e 5.148 struct domain *d = ed->domain; 5.149 execution_context_t *ec = get_execution_context(); 5.150 unsigned long old_eax; 5.151 - unsigned long eflags; 5.152 - int dir; 5.153 + int sign; 5.154 + struct mi_per_cpu_info *mpci_p; 5.155 + struct xen_regs *inst_decoder_regs; 5.156 + 5.157 + mpci_p = &ed->thread.arch_vmx.vmx_platform.mpci; 5.158 + inst_decoder_regs = mpci_p->inst_decoder_regs; 5.159 5.160 /* clear the pending event */ 5.161 ed->vcpu_info->evtchn_upcall_pending = 0; 5.162 @@ -68,24 +207,39 @@ void vmx_io_assist(struct exec_domain *e 5.163 return; 5.164 } 5.165 5.166 - __vmread(GUEST_EFLAGS, &eflags); 5.167 - dir = (eflags & X86_EFLAGS_DF); 5.168 + sign = (p->df) ? -1 : 1; 5.169 + if (p->port_mm) { 5.170 + if (p->pdata_valid) { 5.171 + ec->esi += sign * p->count * p->size; 5.172 + ec->edi += sign * p->count * p->size; 5.173 + } else { 5.174 + if (p->dir == IOREQ_WRITE) { 5.175 + return; 5.176 + } 5.177 + int size = -1, index = -1; 5.178 + 5.179 + size = operand_size(ed->thread.arch_vmx.vmx_platform.mpci.mmio_target); 5.180 + index = operand_index(ed->thread.arch_vmx.vmx_platform.mpci.mmio_target); 5.181 + 5.182 + if (ed->thread.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) { 5.183 + p->u.data = p->u.data & 0xffff; 5.184 + } 5.185 + set_reg_value(size, index, 0, (struct xen_regs *)ec, p->u.data); 5.186 + 5.187 + } 5.188 + load_xen_regs((struct xen_regs *)ec); 5.189 + return; 5.190 + } 5.191 5.192 if (p->dir == IOREQ_WRITE) { 5.193 if (p->pdata_valid) { 5.194 - if (!dir) 5.195 - ec->esi += p->count * p->size; 5.196 - else 5.197 - ec->esi -= p->count * p->size; 5.198 + ec->esi += sign * p->count * p->size; 5.199 ec->ecx -= p->count; 5.200 } 5.201 return; 5.202 } else { 5.203 if (p->pdata_valid) { 5.204 - if (!dir) 5.205 - ec->edi += p->count * p->size; 5.206 - else 5.207 - ec->edi -= p->count * p->size; 5.208 + ec->edi += sign * p->count * p->size; 5.209 ec->ecx -= p->count; 5.210 return; 5.211 }
6.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 6.2 +++ b/xen/arch/x86/vmx_platform.c Fri Jan 28 13:39:03 2005 +0000 6.3 @@ -0,0 +1,554 @@ 6.4 +/* 6.5 + * vmx_platform.c: handling x86 platform related MMIO instructions 6.6 + * Copyright (c) 2004, Intel Corporation. 6.7 + * 6.8 + * This program is free software; you can redistribute it and/or modify it 6.9 + * under the terms and conditions of the GNU General Public License, 6.10 + * version 2, as published by the Free Software Foundation. 6.11 + * 6.12 + * This program is distributed in the hope it will be useful, but WITHOUT 6.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 6.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 6.15 + * more details. 6.16 + * 6.17 + * You should have received a copy of the GNU General Public License along with 6.18 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 6.19 + * Place - Suite 330, Boston, MA 02111-1307 USA. 6.20 + * 6.21 + */ 6.22 + 6.23 +#include <xen/config.h> 6.24 +#include <xen/types.h> 6.25 +#include <xen/mm.h> 6.26 +#include <asm/shadow.h> 6.27 +#include <asm/domain_page.h> 6.28 +#include <asm/page.h> 6.29 +#include <xen/event.h> 6.30 +#include <xen/trace.h> 6.31 +#include <asm/vmx.h> 6.32 +#include <asm/vmx_platform.h> 6.33 +#include <public/io/ioreq.h> 6.34 + 6.35 +#include <xen/lib.h> 6.36 +#include <xen/sched.h> 6.37 +#include <asm/current.h> 6.38 + 6.39 +#define DECODE_success 1 6.40 +#define DECODE_failure 0 6.41 + 6.42 +#if defined (__x86_64__) 6.43 +static void store_xen_regs(struct xen_regs *regs) 6.44 +{ 6.45 + 6.46 +} 6.47 + 6.48 +static long get_reg_value(int size, int index, int seg, struct xen_regs *regs) 6.49 +{ 6.50 + return 0; 6.51 +} 6.52 +#elif defined (__i386__) 6.53 +static void store_xen_regs(struct xen_regs *regs) 6.54 +{ 6.55 + __vmread(GUEST_SS_SELECTOR, ®s->ss); 6.56 + __vmread(GUEST_ESP, ®s->esp); 6.57 + __vmread(GUEST_EFLAGS, ®s->eflags); 6.58 + __vmread(GUEST_CS_SELECTOR, ®s->cs); 6.59 + __vmread(GUEST_EIP, ®s->eip); 6.60 +} 6.61 + 6.62 +static long get_reg_value(int size, int index, int seg, struct xen_regs *regs) 6.63 +{ 6.64 + /* 6.65 + * Reference the db_reg[] table 6.66 + */ 6.67 + switch (size) { 6.68 + case BYTE: 6.69 + switch (index) { 6.70 + case 0: //%al 6.71 + return (char)(regs->eax & 0xFF); 6.72 + case 1: //%cl 6.73 + return (char)(regs->ecx & 0xFF); 6.74 + case 2: //%dl 6.75 + return (char)(regs->edx & 0xFF); 6.76 + case 3: //%bl 6.77 + return (char)(regs->ebx & 0xFF); 6.78 + case 4: //%ah 6.79 + return (char)((regs->eax & 0xFF00) >> 8); 6.80 + case 5: //%ch 6.81 + return (char)((regs->ecx & 0xFF00) >> 8); 6.82 + case 6: //%dh 6.83 + return (char)((regs->edx & 0xFF00) >> 8); 6.84 + case 7: //%bh 6.85 + return (char)((regs->ebx & 0xFF00) >> 8); 6.86 + default: 6.87 + printk("(get_reg_value)size case 0 error\n"); 6.88 + return -1; 6.89 + } 6.90 + case WORD: 6.91 + switch (index) { 6.92 + case 0: //%ax 6.93 + return (short)(regs->eax & 0xFFFF); 6.94 + case 1: //%cx 6.95 + return (short)(regs->ecx & 0xFFFF); 6.96 + case 2: //%dx 6.97 + return (short)(regs->edx & 0xFFFF); 6.98 + case 3: //%bx 6.99 + return (short)(regs->ebx & 0xFFFF); 6.100 + case 4: //%sp 6.101 + return (short)(regs->esp & 0xFFFF); 6.102 + break; 6.103 + case 5: //%bp 6.104 + return (short)(regs->ebp & 0xFFFF); 6.105 + case 6: //%si 6.106 + return (short)(regs->esi & 0xFFFF); 6.107 + case 7: //%di 6.108 + return (short)(regs->edi & 0xFFFF); 6.109 + default: 6.110 + printk("(get_reg_value)size case 1 error\n"); 6.111 + return -1; 6.112 + } 6.113 + case LONG: 6.114 + switch (index) { 6.115 + case 0: //%eax 6.116 + return regs->eax; 6.117 + case 1: //%ecx 6.118 + return regs->ecx; 6.119 + case 2: //%edx 6.120 + return regs->edx; 6.121 + 6.122 + case 3: //%ebx 6.123 + return regs->ebx; 6.124 + case 4: //%esp 6.125 + return regs->esp; 6.126 + case 5: //%ebp 6.127 + return regs->ebp; 6.128 + case 6: //%esi 6.129 + return regs->esi; 6.130 + case 7: //%edi 6.131 + return regs->edi; 6.132 + default: 6.133 + printk("(get_reg_value)size case 2 error\n"); 6.134 + return -1; 6.135 + } 6.136 + default: 6.137 + printk("(get_reg_value)size case error\n"); 6.138 + return -1; 6.139 + } 6.140 +} 6.141 +#endif 6.142 + 6.143 +static inline unsigned char *check_prefix(unsigned char *inst, struct instruction *thread_inst) 6.144 +{ 6.145 + while (1) { 6.146 + switch (*inst) { 6.147 + case 0xf3: //REPZ 6.148 + case 0xf2: //REPNZ 6.149 + case 0xf0: //LOCK 6.150 + case 0x2e: //CS 6.151 + case 0x36: //SS 6.152 + case 0x3e: //DS 6.153 + case 0x26: //ES 6.154 + case 0x64: //FS 6.155 + case 0x65: //GS 6.156 + break; 6.157 + case 0x66: //32bit->16bit 6.158 + thread_inst->op_size = WORD; 6.159 + break; 6.160 + case 0x67: 6.161 + break; 6.162 + default: 6.163 + return inst; 6.164 + } 6.165 + inst++; 6.166 + } 6.167 +} 6.168 + 6.169 +static inline unsigned long get_immediate(const unsigned char *inst, int op_size) 6.170 +{ 6.171 + int mod, reg, rm; 6.172 + unsigned long val = 0; 6.173 + int i; 6.174 + 6.175 + mod = (*inst >> 6) & 3; 6.176 + reg = (*inst >> 3) & 7; 6.177 + rm = *inst & 7; 6.178 + 6.179 + inst++; //skip ModR/M byte 6.180 + if (mod != 3 && rm == 4) { 6.181 + inst++; //skip SIB byte 6.182 + } 6.183 + 6.184 + switch(mod) { 6.185 + case 0: 6.186 + if (rm == 5) { 6.187 + inst = inst + 4; //disp32, skip 4 bytes 6.188 + } 6.189 + break; 6.190 + case 1: 6.191 + inst++; //disp8, skip 1 byte 6.192 + break; 6.193 + case 2: 6.194 + inst = inst + 4; //disp32, skip 4 bytes 6.195 + } 6.196 + for (i = 0; i < op_size; i++) { 6.197 + val |= (*inst++ & 0xff) << (8 * i); 6.198 + } 6.199 + 6.200 + return val; 6.201 +} 6.202 + 6.203 +static inline int get_index(const unsigned char *inst) 6.204 +{ 6.205 + int mod, reg, rm; 6.206 + 6.207 + mod = (*inst >> 6) & 3; 6.208 + reg = (*inst >> 3) & 7; 6.209 + rm = *inst & 7; 6.210 + 6.211 + //Only one operand in the instruction is register 6.212 + if (mod == 3) { 6.213 + return rm; 6.214 + } else { 6.215 + return reg; 6.216 + } 6.217 + return 0; 6.218 +} 6.219 + 6.220 +static int vmx_decode(const unsigned char *inst, struct instruction *thread_inst) 6.221 +{ 6.222 + int index; 6.223 + 6.224 + switch(*inst) { 6.225 + case 0x88: 6.226 + /* mov r8 to m8 */ 6.227 + thread_inst->op_size = BYTE; 6.228 + index = get_index((inst + 1)); 6.229 + thread_inst->operand[0] = mk_operand(BYTE, index, 0, REGISTER); 6.230 + break; 6.231 + case 0x89: 6.232 + /* mov r32/16 to m32/16 */ 6.233 + index = get_index((inst + 1)); 6.234 + if (thread_inst->op_size == WORD) { 6.235 + thread_inst->operand[0] = mk_operand(WORD, index, 0, REGISTER); 6.236 + } else { 6.237 + thread_inst->op_size = LONG; 6.238 + thread_inst->operand[0] = mk_operand(LONG, index, 0, REGISTER); 6.239 + } 6.240 + break; 6.241 + case 0x8a: 6.242 + /* mov m8 to r8 */ 6.243 + thread_inst->op_size = BYTE; 6.244 + index = get_index((inst + 1)); 6.245 + thread_inst->operand[1] = mk_operand(BYTE, index, 0, REGISTER); 6.246 + break; 6.247 + case 0x8b: 6.248 + /* mov r32/16 to m32/16 */ 6.249 + index = get_index((inst + 1)); 6.250 + if (thread_inst->op_size == WORD) { 6.251 + thread_inst->operand[1] = mk_operand(WORD, index, 0, REGISTER); 6.252 + } else { 6.253 + thread_inst->op_size = LONG; 6.254 + thread_inst->operand[1] = mk_operand(LONG, index, 0, REGISTER); 6.255 + } 6.256 + break; 6.257 + case 0x8c: 6.258 + case 0x8e: 6.259 + printk("%x, This opcode hasn't been handled yet!", *inst); 6.260 + return DECODE_failure; 6.261 + /* Not handle it yet. */ 6.262 + 6.263 + case 0xa0: 6.264 + /* mov byte to al */ 6.265 + thread_inst->op_size = BYTE; 6.266 + thread_inst->operand[1] = mk_operand(BYTE, 0, 0, REGISTER); 6.267 + break; 6.268 + case 0xa1: 6.269 + /* mov word/doubleword to ax/eax */ 6.270 + if (thread_inst->op_size == WORD) { 6.271 + thread_inst->operand[1] = mk_operand(WORD, 0, 0, REGISTER); 6.272 + } else { 6.273 + thread_inst->op_size = LONG; 6.274 + thread_inst->operand[1] = mk_operand(LONG, 0, 0, REGISTER); 6.275 + } 6.276 + break; 6.277 + case 0xa2: 6.278 + /* mov al to (seg:offset) */ 6.279 + thread_inst->op_size = BYTE; 6.280 + thread_inst->operand[0] = mk_operand(BYTE, 0, 0, REGISTER); 6.281 + break; 6.282 + case 0xa3: 6.283 + /* mov ax/eax to (seg:offset) */ 6.284 + if (thread_inst->op_size == WORD) { 6.285 + thread_inst->operand[0] = mk_operand(WORD, 0, 0, REGISTER); 6.286 + } else { 6.287 + thread_inst->op_size = LONG; 6.288 + thread_inst->operand[0] = mk_operand(LONG, 0, 0, REGISTER); 6.289 + } 6.290 + break; 6.291 + case 0xa4: 6.292 + /* movsb */ 6.293 + thread_inst->op_size = BYTE; 6.294 + strcpy(thread_inst->i_name, "movs"); 6.295 + 6.296 + return DECODE_success; 6.297 + case 0xa5: 6.298 + /* movsw/movsl */ 6.299 + if (thread_inst->op_size == WORD) { 6.300 + } else { 6.301 + thread_inst->op_size = LONG; 6.302 + } 6.303 + 6.304 + strcpy(thread_inst->i_name, "movs"); 6.305 + 6.306 + return DECODE_success; 6.307 + 6.308 + case 0xc6: 6.309 + /* mov imm8 to m8 */ 6.310 + thread_inst->op_size = BYTE; 6.311 + thread_inst->operand[0] = mk_operand(BYTE, 0, 0, IMMEDIATE); 6.312 + thread_inst->immediate = get_immediate((inst+1), thread_inst->op_size); 6.313 + break; 6.314 + case 0xc7: 6.315 + /* mov imm16/32 to m16/32 */ 6.316 + if (thread_inst->op_size == WORD) { 6.317 + thread_inst->operand[0] = mk_operand(WORD, 0, 0, IMMEDIATE); 6.318 + } else { 6.319 + thread_inst->op_size = LONG; 6.320 + thread_inst->operand[0] = mk_operand(LONG, 0, 0, IMMEDIATE); 6.321 + } 6.322 + thread_inst->immediate = get_immediate((inst+1), thread_inst->op_size); 6.323 + break; 6.324 + 6.325 + case 0x0f: 6.326 + break; 6.327 + default: 6.328 + printk("%x, This opcode hasn't been handled yet!", *inst); 6.329 + return DECODE_failure; 6.330 + } 6.331 + 6.332 + strcpy(thread_inst->i_name, "mov"); 6.333 + if (*inst != 0x0f) { 6.334 + return DECODE_success; 6.335 + } 6.336 + 6.337 + inst++; 6.338 + switch (*inst) { 6.339 + 6.340 + /* movz */ 6.341 + case 0xb7: 6.342 + index = get_index((inst + 1)); 6.343 + thread_inst->operand[1] = mk_operand(LONG, index, 0, REGISTER); 6.344 + strcpy(thread_inst->i_name, "movzw"); 6.345 + 6.346 + return DECODE_success; 6.347 + default: 6.348 + printk("0f %x, This opcode hasn't been handled yet!", *inst); 6.349 + return DECODE_failure; 6.350 + } 6.351 + 6.352 + /* will never reach here */ 6.353 + return DECODE_failure; 6.354 +} 6.355 + 6.356 +static int inst_copy_from_guest(char *buf, unsigned long guest_eip, int inst_len) 6.357 +{ 6.358 + unsigned long gpte; 6.359 + unsigned long mfn; 6.360 + unsigned long ma; 6.361 + unsigned char * inst_start; 6.362 + 6.363 + if (inst_len > MAX_INST_LEN || inst_len <= 0) { 6.364 + return 0; 6.365 + } 6.366 + 6.367 + if ((guest_eip & PAGE_MASK) == ((guest_eip + inst_len) & PAGE_MASK)) { 6.368 + if ( unlikely(__get_user(gpte, (unsigned long *) 6.369 + &linear_pg_table[guest_eip >> PAGE_SHIFT])) ) 6.370 + { 6.371 + printk("inst_copy_from_guest- EXIT: read gpte faulted" ); 6.372 + return 0; 6.373 + } 6.374 + mfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT]; 6.375 + ma = (mfn << PAGE_SHIFT) | (guest_eip & (PAGE_SIZE - 1)); 6.376 + inst_start = (unsigned char *)map_domain_mem(ma); 6.377 + 6.378 + strncpy(buf, inst_start, inst_len); 6.379 + unmap_domain_mem(inst_start); 6.380 + } else { 6.381 + // Todo: In two page frames 6.382 + } 6.383 + 6.384 + return inst_len; 6.385 +} 6.386 + 6.387 +static void init_instruction(struct instruction *mmio_inst) 6.388 +{ 6.389 + memset(mmio_inst->i_name, '0', I_NAME_LEN); 6.390 + mmio_inst->op_size = 0; 6.391 + mmio_inst->offset = 0; 6.392 + mmio_inst->immediate = 0; 6.393 + mmio_inst->seg_sel = 0; 6.394 + mmio_inst->op_num = 0; 6.395 + 6.396 + mmio_inst->operand[0] = 0; 6.397 + mmio_inst->operand[1] = 0; 6.398 + mmio_inst->operand[2] = 0; 6.399 + 6.400 + mmio_inst->flags = 0; 6.401 +} 6.402 + 6.403 +static int read_from_mmio(struct instruction *inst_p) 6.404 +{ 6.405 + // Only for mov instruction now!!! 6.406 + if (inst_p->operand[1] & REGISTER) 6.407 + return 1; 6.408 + 6.409 + return 0; 6.410 +} 6.411 + 6.412 +// dir: 1 read from mmio 6.413 +// 0 write to mmio 6.414 +static void send_mmio_req(unsigned long gpa, 6.415 + struct instruction *inst_p, long value, int dir, int pvalid) 6.416 +{ 6.417 + struct exec_domain *d = current; 6.418 + vcpu_iodata_t *vio; 6.419 + ioreq_t *p; 6.420 + struct mi_per_cpu_info *mpci_p; 6.421 + struct xen_regs *inst_decoder_regs; 6.422 + extern inline unsigned long gva_to_gpa(unsigned long gva); 6.423 + extern long evtchn_send(int lport); 6.424 + extern long do_block(void); 6.425 + 6.426 + mpci_p = ¤t->thread.arch_vmx.vmx_platform.mpci; 6.427 + inst_decoder_regs = mpci_p->inst_decoder_regs; 6.428 + vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va; 6.429 + 6.430 + if (vio == NULL) { 6.431 + printk("bad shared page\n"); 6.432 + domain_crash(); 6.433 + } 6.434 + p = &vio->vp_ioreq; 6.435 + 6.436 + set_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags); 6.437 + p->dir = dir; 6.438 + p->pdata_valid = pvalid; 6.439 + p->count = 1; 6.440 + 6.441 + p->port_mm = 1; 6.442 + p->size = inst_p->op_size; 6.443 + p->addr = gpa; 6.444 + p->u.data = value; 6.445 + 6.446 + // p->state = STATE_UPSTREAM_SENDING; 6.447 + p->state = STATE_IOREQ_READY; 6.448 + 6.449 + // Try to use ins/outs' framework 6.450 + if (pvalid) { 6.451 + // Handle "movs" 6.452 + p->u.pdata = (void *) ((p->dir == IOREQ_WRITE) ? 6.453 + inst_decoder_regs->esi 6.454 + : inst_decoder_regs->edi); 6.455 + p->u.pdata = (void *) gva_to_gpa(p->u.data); 6.456 + p->count = inst_decoder_regs->ecx; 6.457 + inst_decoder_regs->ecx = 0; 6.458 + p->df = (inst_decoder_regs->eflags & EF_DF) ? 1 : 0; 6.459 + } 6.460 + 6.461 + evtchn_send(IOPACKET_PORT); 6.462 + do_block(); 6.463 + 6.464 +} 6.465 + 6.466 +void handle_mmio(unsigned long va, unsigned long gpte, unsigned long gpa) 6.467 +{ 6.468 + unsigned long eip; 6.469 + unsigned long inst_len; 6.470 + struct mi_per_cpu_info *mpci_p; 6.471 + struct xen_regs *inst_decoder_regs; 6.472 + struct instruction mmio_inst; 6.473 + unsigned char inst[MAX_INST_LEN]; 6.474 + int ret; 6.475 + 6.476 + mpci_p = ¤t->thread.arch_vmx.vmx_platform.mpci; 6.477 + inst_decoder_regs = mpci_p->inst_decoder_regs; 6.478 + 6.479 + __vmread(GUEST_EIP, &eip); 6.480 + __vmread(INSTRUCTION_LEN, &inst_len); 6.481 + 6.482 + memset(inst, '0', MAX_INST_LEN); 6.483 + ret = inst_copy_from_guest(inst, eip, inst_len); 6.484 + if (ret != inst_len) { 6.485 + printk("handle_mmio - EXIT: get guest instruction fault\n"); 6.486 + domain_crash(); 6.487 + } 6.488 + 6.489 + init_instruction(&mmio_inst); 6.490 + 6.491 + if (vmx_decode(check_prefix(inst, &mmio_inst), &mmio_inst) == DECODE_failure) 6.492 + domain_crash(); 6.493 + 6.494 + __vmwrite(GUEST_EIP, eip + inst_len); 6.495 + store_xen_regs(inst_decoder_regs); 6.496 + 6.497 + // Only handle "mov" and "movs" instructions! 6.498 + if (!strncmp(mmio_inst.i_name, "movzw", 5)) { 6.499 + long value = 0; 6.500 + int index; 6.501 + 6.502 + if (read_from_mmio(&mmio_inst)) { 6.503 + // Send the request and waiting for return value. 6.504 + mpci_p->mmio_target = mmio_inst.operand[1] | WZEROEXTEND; 6.505 + mmio_inst.op_size = WORD; 6.506 + send_mmio_req(gpa, &mmio_inst, value, 1, 0); 6.507 + } else { 6.508 + // Write to MMIO 6.509 + if (mmio_inst.operand[0] & IMMEDIATE) { 6.510 + value = mmio_inst.immediate; 6.511 + } else if (mmio_inst.operand[0] & REGISTER) { 6.512 + index = operand_index(mmio_inst.operand[0]); 6.513 + value = get_reg_value(WORD, index, 0, inst_decoder_regs); 6.514 + } else { 6.515 + domain_crash(); 6.516 + } 6.517 + mmio_inst.op_size = WORD; 6.518 + send_mmio_req(gpa, &mmio_inst, value, 0, 0); 6.519 + return; 6.520 + } 6.521 + } 6.522 + 6.523 + if (!strncmp(mmio_inst.i_name, "movs", 4)) { 6.524 + int tmp_dir; 6.525 + 6.526 + tmp_dir = ((va == inst_decoder_regs->edi) ? IOREQ_WRITE : IOREQ_READ); 6.527 + send_mmio_req(gpa, &mmio_inst, 0, tmp_dir, 1); 6.528 + return; 6.529 + } 6.530 + 6.531 + if (!strncmp(mmio_inst.i_name, "mov", 3)) { 6.532 + long value = 0; 6.533 + int size, index; 6.534 + 6.535 + if (read_from_mmio(&mmio_inst)) { 6.536 + // Send the request and waiting for return value. 6.537 + mpci_p->mmio_target = mmio_inst.operand[1]; 6.538 + send_mmio_req(gpa, &mmio_inst, value, 1, 0); 6.539 + } else { 6.540 + // Write to MMIO 6.541 + if (mmio_inst.operand[0] & IMMEDIATE) { 6.542 + value = mmio_inst.immediate; 6.543 + } else if (mmio_inst.operand[0] & REGISTER) { 6.544 + size = operand_size(mmio_inst.operand[0]); 6.545 + index = operand_index(mmio_inst.operand[0]); 6.546 + value = get_reg_value(size, index, 0, inst_decoder_regs); 6.547 + } else { 6.548 + domain_crash(); 6.549 + } 6.550 + send_mmio_req(gpa, &mmio_inst, value, 0, 0); 6.551 + return; 6.552 + } 6.553 + domain_crash(); 6.554 + } 6.555 + domain_crash(); 6.556 +} 6.557 +
7.1 --- a/xen/include/asm-x86/shadow.h Thu Jan 27 23:55:18 2005 +0000 7.2 +++ b/xen/include/asm-x86/shadow.h Fri Jan 28 13:39:03 2005 +0000 7.3 @@ -305,10 +305,15 @@ static inline void l1pte_propagate_from_ 7.4 case SHM_full_32: 7.5 { 7.6 unsigned long host_pfn, host_gpte; 7.7 + spte = 0; 7.8 + 7.9 + if (mmio_space(gpte & 0xFFFFF000)) { 7.10 + *spte_p = spte; 7.11 + return; 7.12 + } 7.13 7.14 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT]; 7.15 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK); 7.16 - spte = 0; 7.17 7.18 if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) == 7.19 (_PAGE_PRESENT|_PAGE_ACCESSED) ) 7.20 @@ -697,7 +702,7 @@ static inline void __shadow_mk_pagetable 7.21 SH_VVLOG("__shadow_mk_pagetable(guest_gpfn=%08lx, gpfn=%08lx\n", 7.22 guest_gpfn, gpfn); 7.23 7.24 - spfn = __shadow_status(mm, gpfn) & PSH_pfn_mask; 7.25 + spfn = __shadow_status(mm, guest_gpfn) & PSH_pfn_mask; 7.26 if ( unlikely(spfn == 0) ) { 7.27 spfn = shadow_l2_table(mm, gpfn); 7.28 mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
8.1 --- a/xen/include/asm-x86/vmx_platform.h Thu Jan 27 23:55:18 2005 +0000 8.2 +++ b/xen/include/asm-x86/vmx_platform.h Fri Jan 28 13:39:03 2005 +0000 8.3 @@ -19,6 +19,78 @@ 8.4 #ifndef __ASM_X86_VMX_PLATFORM_H__ 8.5 #define __ASM_X86_VMX_PLATFORM_H__ 8.6 8.7 -#include <asm/e820.h> /* from Linux */ 8.8 +#include <asm/e820.h> /* from Linux */ 8.9 + 8.10 +#define MAX_OPERAND_NUM 3 8.11 +#define I_NAME_LEN 16 8.12 + 8.13 +#define mk_operand(size, index, seg, flag) \ 8.14 + (((size) << 24) | ((index) << 16) | ((seg) << 8) | (flag)) 8.15 + 8.16 +#define operand_size(operand) \ 8.17 + ((operand >> 24) & 0xFF) 8.18 + 8.19 +#define operand_index(operand) \ 8.20 + ((operand >> 16) & 0xFF) 8.21 + //For instruction.operand[].size 8.22 +#define BYTE 1 8.23 +#define WORD 2 8.24 +#define LONG 4 8.25 +#define QUAD 8 8.26 + 8.27 + //For instruction.operand[].flag 8.28 +#define REGISTER 0x1 8.29 +#define MEMORY 0x2 8.30 +#define IMMEDIATE 0x4 8.31 +#define WZEROEXTEND 0x8 8.32 + 8.33 + //For instruction.flags 8.34 +#define REPZ 0x1 8.35 +#define REPNZ 0x2 8.36 + 8.37 +struct instruction { 8.38 + __s8 i_name[I_NAME_LEN]; //Instruction's name 8.39 + __s16 op_size; //The operand's bit size, e.g. 16-bit or 32-bit. 8.40 + 8.41 + __u64 offset; //The effective address 8.42 + //offset = Base + (Index * Scale) + Displacement 8.43 + 8.44 + __u64 immediate; 8.45 + 8.46 + __u16 seg_sel; //Segmentation selector 8.47 + 8.48 + __u32 operand[MAX_OPERAND_NUM]; //The order of operand is from AT&T Assembly 8.49 + __s16 op_num; //The operand numbers 8.50 + 8.51 + __u32 flags; // 8.52 +}; 8.53 + 8.54 +#define VGA_SPACE_START 0xA0000 8.55 +#define VGA_SPACE_END 0xC0000 8.56 +#define MAX_INST_LEN 32 8.57 + 8.58 +struct mi_per_cpu_info 8.59 +{ 8.60 + unsigned long mmio_target; 8.61 + struct xen_regs *inst_decoder_regs; 8.62 +}; 8.63 + 8.64 +struct virutal_platform_def { 8.65 + unsigned long *real_mode_data; /* E820, etc. */ 8.66 + unsigned long shared_page_va; 8.67 + struct mi_per_cpu_info mpci; /* MMIO */ 8.68 +}; 8.69 + 8.70 +extern int mmio_space(unsigned long); 8.71 +extern void handle_mmio(unsigned long, unsigned long, unsigned long); 8.72 +extern int vmx_setup_platform(struct exec_domain *, execution_context_t *); 8.73 + 8.74 +extern inline int mmio_space(unsigned long gpa) 8.75 +{ 8.76 + if (gpa >= VGA_SPACE_START && gpa < VGA_SPACE_END) { 8.77 + return 1; 8.78 + } 8.79 + return 0; 8.80 +} 8.81 8.82 #endif
9.1 --- a/xen/include/asm-x86/vmx_vmcs.h Thu Jan 27 23:55:18 2005 +0000 9.2 +++ b/xen/include/asm-x86/vmx_vmcs.h Fri Jan 28 13:39:03 2005 +0000 9.3 @@ -39,15 +39,7 @@ union vmcs_arbytes { 9.4 unsigned int bytes; 9.5 }; 9.6 9.7 -struct virutal_platform_def { 9.8 - unsigned long *real_mode_data; /* E820, etc. */ 9.9 - unsigned long shared_page_va; 9.10 -}; 9.11 - 9.12 -int vmx_setup_platform(struct exec_domain *, execution_context_t *); 9.13 - 9.14 #define VMX_CPU_STATE_PG_ENABLED 0 9.15 - 9.16 #define VMCS_SIZE 0x1000 9.17 9.18 struct vmcs_struct { 9.19 @@ -62,10 +54,6 @@ struct arch_vmx_struct { 9.20 unsigned long cpu_cr3; 9.21 unsigned long cpu_state; 9.22 struct virutal_platform_def vmx_platform; 9.23 -#if 0 9.24 - /* open */ 9.25 - unsigned long *page_list; /* page list for MMIO */ 9.26 -#endif 9.27 }; 9.28 9.29 #define vmx_schedule_tail(next) \
10.1 --- a/xen/include/public/io/ioreq.h Thu Jan 27 23:55:18 2005 +0000 10.2 +++ b/xen/include/public/io/ioreq.h Fri Jan 28 13:39:03 2005 +0000 10.3 @@ -41,10 +41,11 @@ typedef struct { 10.4 u64 data; /* data */ 10.5 void *pdata; /* pointer to data */ 10.6 } u; 10.7 - u8 state:5; 10.8 + u8 state:4; 10.9 u8 pdata_valid:1; /* if 1, use pdata above */ 10.10 u8 dir:1; /* 1=read, 0=write */ 10.11 u8 port_mm:1; /* 0=portio, 1=mmio */ 10.12 + u8 df:1; 10.13 } ioreq_t; 10.14 10.15 #define MAX_VECTOR 256