ia64/xen-unstable
changeset 17343:e7abfeee2808
x86_emulate: Check I/O port accesses.
Implements both CPL/IOPL and TSS-bitmap checks.
Requires changes to read/write callback hooks to disable user-access
checks when walking pagetables on behalf of GDT/LDT/TSS accesses.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Implements both CPL/IOPL and TSS-bitmap checks.
Requires changes to read/write callback hooks to disable user-access
checks when walking pagetables on behalf of GDT/LDT/TSS accesses.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Mar 27 17:25:22 2008 +0000 (2008-03-27) |
parents | 892a20f824a7 |
children | 7a3702ff0e8c |
files | xen/arch/x86/hvm/emulate.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/emulate.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/x86_emulate.c xen/include/asm-x86/hvm/support.h |
line diff
1.1 --- a/xen/arch/x86/hvm/emulate.c Thu Mar 27 17:14:41 2008 +0000 1.2 +++ b/xen/arch/x86/hvm/emulate.c Thu Mar 27 17:25:22 2008 +0000 1.3 @@ -94,19 +94,18 @@ static int hvmemul_do_mmio( 1.4 * Convert addr from linear to physical form, valid over the range 1.5 * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to 1.6 * the valid computed range. It is always >0 when X86EMUL_OKAY is returned. 1.7 + * @pfec indicates the access checks to be performed during page-table walks. 1.8 */ 1.9 static int hvmemul_linear_to_phys( 1.10 unsigned long addr, 1.11 paddr_t *paddr, 1.12 unsigned int bytes_per_rep, 1.13 unsigned long *reps, 1.14 - enum hvm_access_type access_type, 1.15 + uint32_t pfec, 1.16 struct hvm_emulate_ctxt *hvmemul_ctxt) 1.17 { 1.18 struct vcpu *curr = current; 1.19 unsigned long pfn, npfn, done, todo, i; 1.20 - struct segment_register *sreg; 1.21 - uint32_t pfec; 1.22 1.23 /* Clip repetitions to a sensible maximum. */ 1.24 *reps = min_t(unsigned long, *reps, 4096); 1.25 @@ -120,14 +119,6 @@ static int hvmemul_linear_to_phys( 1.26 1.27 *paddr = addr & ~PAGE_MASK; 1.28 1.29 - /* Gather access-type information for the page walks. */ 1.30 - sreg = hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt); 1.31 - pfec = PFEC_page_present; 1.32 - if ( sreg->attr.fields.dpl == 3 ) 1.33 - pfec |= PFEC_user_mode; 1.34 - if ( access_type == hvm_access_write ) 1.35 - pfec |= PFEC_write_access; 1.36 - 1.37 /* Get the first PFN in the range. */ 1.38 if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN ) 1.39 { 1.40 @@ -216,6 +207,7 @@ static int __hvmemul_read( 1.41 { 1.42 struct vcpu *curr = current; 1.43 unsigned long addr; 1.44 + uint32_t pfec = PFEC_page_present; 1.45 paddr_t gpa; 1.46 int rc; 1.47 1.48 @@ -237,9 +229,13 @@ static int __hvmemul_read( 1.49 return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val); 1.50 } 1.51 1.52 + if ( (seg != x86_seg_none) && 1.53 + (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) ) 1.54 + pfec |= PFEC_user_mode; 1.55 + 1.56 rc = ((access_type == hvm_access_insn_fetch) ? 1.57 - hvm_fetch_from_guest_virt(val, addr, bytes) : 1.58 - hvm_copy_from_guest_virt(val, addr, bytes)); 1.59 + hvm_fetch_from_guest_virt(val, addr, bytes, pfec) : 1.60 + hvm_copy_from_guest_virt(val, addr, bytes, pfec)); 1.61 if ( rc == HVMCOPY_bad_gva_to_gfn ) 1.62 return X86EMUL_EXCEPTION; 1.63 1.64 @@ -251,7 +247,7 @@ static int __hvmemul_read( 1.65 return X86EMUL_UNHANDLEABLE; 1.66 1.67 rc = hvmemul_linear_to_phys( 1.68 - addr, &gpa, bytes, &reps, access_type, hvmemul_ctxt); 1.69 + addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt); 1.70 if ( rc != X86EMUL_OKAY ) 1.71 return rc; 1.72 1.73 @@ -307,6 +303,7 @@ static int hvmemul_write( 1.74 container_of(ctxt, struct hvm_emulate_ctxt, ctxt); 1.75 struct vcpu *curr = current; 1.76 unsigned long addr; 1.77 + uint32_t pfec = PFEC_page_present | PFEC_write_access; 1.78 paddr_t gpa; 1.79 int rc; 1.80 1.81 @@ -325,7 +322,11 @@ static int hvmemul_write( 1.82 0, 0, NULL); 1.83 } 1.84 1.85 - rc = hvm_copy_to_guest_virt(addr, &val, bytes); 1.86 + if ( (seg != x86_seg_none) && 1.87 + (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) ) 1.88 + pfec |= PFEC_user_mode; 1.89 + 1.90 + rc = hvm_copy_to_guest_virt(addr, &val, bytes, pfec); 1.91 if ( rc == HVMCOPY_bad_gva_to_gfn ) 1.92 return X86EMUL_EXCEPTION; 1.93 1.94 @@ -334,7 +335,7 @@ static int hvmemul_write( 1.95 unsigned long reps = 1; 1.96 1.97 rc = hvmemul_linear_to_phys( 1.98 - addr, &gpa, bytes, &reps, hvm_access_write, hvmemul_ctxt); 1.99 + addr, &gpa, bytes, &reps, pfec, hvmemul_ctxt); 1.100 if ( rc != X86EMUL_OKAY ) 1.101 return rc; 1.102 1.103 @@ -367,6 +368,7 @@ static int hvmemul_rep_ins( 1.104 struct hvm_emulate_ctxt *hvmemul_ctxt = 1.105 container_of(ctxt, struct hvm_emulate_ctxt, ctxt); 1.106 unsigned long addr; 1.107 + uint32_t pfec = PFEC_page_present | PFEC_write_access; 1.108 paddr_t gpa; 1.109 int rc; 1.110 1.111 @@ -376,8 +378,11 @@ static int hvmemul_rep_ins( 1.112 if ( rc != X86EMUL_OKAY ) 1.113 return rc; 1.114 1.115 + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) 1.116 + pfec |= PFEC_user_mode; 1.117 + 1.118 rc = hvmemul_linear_to_phys( 1.119 - addr, &gpa, bytes_per_rep, reps, hvm_access_write, hvmemul_ctxt); 1.120 + addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt); 1.121 if ( rc != X86EMUL_OKAY ) 1.122 return rc; 1.123 1.124 @@ -396,6 +401,7 @@ static int hvmemul_rep_outs( 1.125 struct hvm_emulate_ctxt *hvmemul_ctxt = 1.126 container_of(ctxt, struct hvm_emulate_ctxt, ctxt); 1.127 unsigned long addr; 1.128 + uint32_t pfec = PFEC_page_present; 1.129 paddr_t gpa; 1.130 int rc; 1.131 1.132 @@ -405,8 +411,11 @@ static int hvmemul_rep_outs( 1.133 if ( rc != X86EMUL_OKAY ) 1.134 return rc; 1.135 1.136 + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) 1.137 + pfec |= PFEC_user_mode; 1.138 + 1.139 rc = hvmemul_linear_to_phys( 1.140 - addr, &gpa, bytes_per_rep, reps, hvm_access_read, hvmemul_ctxt); 1.141 + addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt); 1.142 if ( rc != X86EMUL_OKAY ) 1.143 return rc; 1.144 1.145 @@ -427,6 +436,7 @@ static int hvmemul_rep_movs( 1.146 container_of(ctxt, struct hvm_emulate_ctxt, ctxt); 1.147 unsigned long saddr, daddr; 1.148 paddr_t sgpa, dgpa; 1.149 + uint32_t pfec = PFEC_page_present; 1.150 p2m_type_t p2mt; 1.151 int rc; 1.152 1.153 @@ -442,13 +452,17 @@ static int hvmemul_rep_movs( 1.154 if ( rc != X86EMUL_OKAY ) 1.155 return rc; 1.156 1.157 + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) 1.158 + pfec |= PFEC_user_mode; 1.159 + 1.160 rc = hvmemul_linear_to_phys( 1.161 - saddr, &sgpa, bytes_per_rep, reps, hvm_access_read, hvmemul_ctxt); 1.162 + saddr, &sgpa, bytes_per_rep, reps, pfec, hvmemul_ctxt); 1.163 if ( rc != X86EMUL_OKAY ) 1.164 return rc; 1.165 1.166 rc = hvmemul_linear_to_phys( 1.167 - daddr, &dgpa, bytes_per_rep, reps, hvm_access_write, hvmemul_ctxt); 1.168 + daddr, &dgpa, bytes_per_rep, reps, 1.169 + pfec | PFEC_write_access, hvmemul_ctxt); 1.170 if ( rc != X86EMUL_OKAY ) 1.171 return rc; 1.172 1.173 @@ -696,7 +710,7 @@ int hvm_emulate_one( 1.174 { 1.175 struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs; 1.176 struct vcpu *curr = current; 1.177 - uint32_t new_intr_shadow; 1.178 + uint32_t new_intr_shadow, pfec = PFEC_page_present; 1.179 unsigned long addr; 1.180 int rc; 1.181 1.182 @@ -713,6 +727,9 @@ int hvm_emulate_one( 1.183 hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16; 1.184 } 1.185 1.186 + if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 ) 1.187 + pfec |= PFEC_user_mode; 1.188 + 1.189 hvmemul_ctxt->insn_buf_eip = regs->eip; 1.190 hvmemul_ctxt->insn_buf_bytes = 1.191 (hvm_virtual_to_linear_addr( 1.192 @@ -720,7 +737,8 @@ int hvm_emulate_one( 1.193 regs->eip, sizeof(hvmemul_ctxt->insn_buf), 1.194 hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) && 1.195 !hvm_fetch_from_guest_virt_nofault( 1.196 - hvmemul_ctxt->insn_buf, addr, sizeof(hvmemul_ctxt->insn_buf))) 1.197 + hvmemul_ctxt->insn_buf, addr, 1.198 + sizeof(hvmemul_ctxt->insn_buf), pfec)) 1.199 ? sizeof(hvmemul_ctxt->insn_buf) : 0; 1.200 1.201 hvmemul_ctxt->exn_pending = 0;
2.1 --- a/xen/arch/x86/hvm/hvm.c Thu Mar 27 17:14:41 2008 +0000 2.2 +++ b/xen/arch/x86/hvm/hvm.c Thu Mar 27 17:25:22 2008 +0000 2.3 @@ -1302,7 +1302,7 @@ void hvm_task_switch( 2.4 goto out; 2.5 } 2.6 2.7 - if ( !tr.attr.fields.g && (tr.limit < (sizeof(tss)-1)) ) 2.8 + if ( tr.limit < (sizeof(tss)-1) ) 2.9 { 2.10 hvm_inject_exception(TRAP_invalid_tss, tss_sel & 0xfff8, 0); 2.11 goto out; 2.12 @@ -1410,7 +1410,7 @@ void hvm_task_switch( 2.13 if ( hvm_virtual_to_linear_addr(x86_seg_ss, ®, regs->esp, 2.14 4, hvm_access_write, 32, 2.15 &linear_addr) ) 2.16 - hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4); 2.17 + hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4, 0); 2.18 } 2.19 2.20 out: 2.21 @@ -1418,60 +1418,31 @@ void hvm_task_switch( 2.22 hvm_unmap(nptss_desc); 2.23 } 2.24 2.25 -/* 2.26 - * __hvm_copy(): 2.27 - * @buf = hypervisor buffer 2.28 - * @addr = guest address to copy to/from 2.29 - * @size = number of bytes to copy 2.30 - * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)? 2.31 - * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)? 2.32 - * @fetch = copy is an instruction fetch? 2.33 - * Returns number of bytes failed to copy (0 == complete success). 2.34 - */ 2.35 +#define HVMCOPY_from_guest (0u<<0) 2.36 +#define HVMCOPY_to_guest (1u<<0) 2.37 +#define HVMCOPY_no_fault (0u<<1) 2.38 +#define HVMCOPY_fault (1u<<1) 2.39 +#define HVMCOPY_phys (0u<<2) 2.40 +#define HVMCOPY_virt (1u<<2) 2.41 static enum hvm_copy_result __hvm_copy( 2.42 - void *buf, paddr_t addr, int size, int dir, int virt, int fetch) 2.43 + void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec) 2.44 { 2.45 struct vcpu *curr = current; 2.46 unsigned long gfn, mfn; 2.47 p2m_type_t p2mt; 2.48 char *p; 2.49 - int count, todo; 2.50 - uint32_t pfec = PFEC_page_present; 2.51 - 2.52 - /* 2.53 - * We cannot use hvm_get_segment_register() while executing in 2.54 - * vmx_realmode() as segment register state is cached. Furthermore, 2.55 - * VMREADs on every data access hurts emulation performance. 2.56 - * Hence we do not gather extra PFEC flags if CR0.PG == 0. 2.57 - */ 2.58 - if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG) ) 2.59 - virt = 0; 2.60 + int count, todo = size; 2.61 2.62 - if ( virt ) 2.63 - { 2.64 - struct segment_register sreg; 2.65 - hvm_get_segment_register(curr, x86_seg_ss, &sreg); 2.66 - if ( sreg.attr.fields.dpl == 3 ) 2.67 - pfec |= PFEC_user_mode; 2.68 - 2.69 - if ( dir ) 2.70 - pfec |= PFEC_write_access; 2.71 - 2.72 - if ( fetch ) 2.73 - pfec |= PFEC_insn_fetch; 2.74 - } 2.75 - 2.76 - todo = size; 2.77 while ( todo > 0 ) 2.78 { 2.79 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo); 2.80 2.81 - if ( virt ) 2.82 + if ( flags & HVMCOPY_virt ) 2.83 { 2.84 gfn = paging_gva_to_gfn(curr, addr, &pfec); 2.85 if ( gfn == INVALID_GFN ) 2.86 { 2.87 - if ( virt == 2 ) /* 2 means generate a fault */ 2.88 + if ( flags & HVMCOPY_fault ) 2.89 hvm_inject_exception(TRAP_page_fault, pfec, addr); 2.90 return HVMCOPY_bad_gva_to_gfn; 2.91 } 2.92 @@ -1489,16 +1460,18 @@ static enum hvm_copy_result __hvm_copy( 2.93 2.94 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK); 2.95 2.96 - if ( dir ) 2.97 + if ( flags & HVMCOPY_to_guest ) 2.98 { 2.99 - memcpy(p, buf, count); /* dir == TRUE: *to* guest */ 2.100 + memcpy(p, buf, count); 2.101 paging_mark_dirty(curr->domain, mfn); 2.102 } 2.103 else 2.104 - memcpy(buf, p, count); /* dir == FALSE: *from guest */ 2.105 + { 2.106 + memcpy(buf, p, count); 2.107 + } 2.108 2.109 unmap_domain_page(p); 2.110 - 2.111 + 2.112 addr += count; 2.113 buf += count; 2.114 todo -= count; 2.115 @@ -1510,56 +1483,73 @@ static enum hvm_copy_result __hvm_copy( 2.116 enum hvm_copy_result hvm_copy_to_guest_phys( 2.117 paddr_t paddr, void *buf, int size) 2.118 { 2.119 - return __hvm_copy(buf, paddr, size, 1, 0, 0); 2.120 + return __hvm_copy(buf, paddr, size, 2.121 + HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys, 2.122 + 0); 2.123 } 2.124 2.125 enum hvm_copy_result hvm_copy_from_guest_phys( 2.126 void *buf, paddr_t paddr, int size) 2.127 { 2.128 - return __hvm_copy(buf, paddr, size, 0, 0, 0); 2.129 + return __hvm_copy(buf, paddr, size, 2.130 + HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys, 2.131 + 0); 2.132 } 2.133 2.134 enum hvm_copy_result hvm_copy_to_guest_virt( 2.135 - unsigned long vaddr, void *buf, int size) 2.136 + unsigned long vaddr, void *buf, int size, uint32_t pfec) 2.137 { 2.138 - return __hvm_copy(buf, vaddr, size, 1, 2, 0); 2.139 + return __hvm_copy(buf, vaddr, size, 2.140 + HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt, 2.141 + PFEC_page_present | PFEC_write_access | pfec); 2.142 } 2.143 2.144 enum hvm_copy_result hvm_copy_from_guest_virt( 2.145 - void *buf, unsigned long vaddr, int size) 2.146 + void *buf, unsigned long vaddr, int size, uint32_t pfec) 2.147 { 2.148 - return __hvm_copy(buf, vaddr, size, 0, 2, 0); 2.149 + return __hvm_copy(buf, vaddr, size, 2.150 + HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt, 2.151 + PFEC_page_present | pfec); 2.152 } 2.153 2.154 enum hvm_copy_result hvm_fetch_from_guest_virt( 2.155 - void *buf, unsigned long vaddr, int size) 2.156 + void *buf, unsigned long vaddr, int size, uint32_t pfec) 2.157 { 2.158 - return __hvm_copy(buf, vaddr, size, 0, 2, hvm_nx_enabled(current)); 2.159 + if ( hvm_nx_enabled(current) ) 2.160 + pfec |= PFEC_insn_fetch; 2.161 + return __hvm_copy(buf, vaddr, size, 2.162 + HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt, 2.163 + PFEC_page_present | pfec); 2.164 } 2.165 2.166 enum hvm_copy_result hvm_copy_to_guest_virt_nofault( 2.167 - unsigned long vaddr, void *buf, int size) 2.168 + unsigned long vaddr, void *buf, int size, uint32_t pfec) 2.169 { 2.170 - return __hvm_copy(buf, vaddr, size, 1, 1, 0); 2.171 + return __hvm_copy(buf, vaddr, size, 2.172 + HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt, 2.173 + PFEC_page_present | PFEC_write_access | pfec); 2.174 } 2.175 2.176 enum hvm_copy_result hvm_copy_from_guest_virt_nofault( 2.177 - void *buf, unsigned long vaddr, int size) 2.178 + void *buf, unsigned long vaddr, int size, uint32_t pfec) 2.179 { 2.180 - return __hvm_copy(buf, vaddr, size, 0, 1, 0); 2.181 + return __hvm_copy(buf, vaddr, size, 2.182 + HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt, 2.183 + PFEC_page_present | pfec); 2.184 } 2.185 2.186 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault( 2.187 - void *buf, unsigned long vaddr, int size) 2.188 + void *buf, unsigned long vaddr, int size, uint32_t pfec) 2.189 { 2.190 - return __hvm_copy(buf, vaddr, size, 0, 1, hvm_nx_enabled(current)); 2.191 + if ( hvm_nx_enabled(current) ) 2.192 + pfec |= PFEC_insn_fetch; 2.193 + return __hvm_copy(buf, vaddr, size, 2.194 + HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt, 2.195 + PFEC_page_present | pfec); 2.196 } 2.197 2.198 DEFINE_PER_CPU(int, guest_handles_in_xen_space); 2.199 2.200 -/* Note that copy_{to,from}_user_hvm require the PTE to be writable even 2.201 - when they're only trying to read from it. The guest is expected to 2.202 - deal with this. */ 2.203 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len) 2.204 { 2.205 int rc; 2.206 @@ -1570,7 +1560,8 @@ unsigned long copy_to_user_hvm(void *to, 2.207 return 0; 2.208 } 2.209 2.210 - rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from, len); 2.211 + rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from, 2.212 + len, 0); 2.213 return rc ? len : 0; /* fake a copy_to_user() return code */ 2.214 } 2.215 2.216 @@ -1584,7 +1575,7 @@ unsigned long copy_from_user_hvm(void *t 2.217 return 0; 2.218 } 2.219 2.220 - rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len); 2.221 + rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0); 2.222 return rc ? len : 0; /* fake a copy_from_user() return code */ 2.223 } 2.224
3.1 --- a/xen/arch/x86/hvm/svm/emulate.c Thu Mar 27 17:14:41 2008 +0000 3.2 +++ b/xen/arch/x86/hvm/svm/emulate.c Thu Mar 27 17:25:22 2008 +0000 3.3 @@ -32,9 +32,11 @@ 3.4 static int inst_copy_from_guest( 3.5 unsigned char *buf, unsigned long guest_eip, int inst_len) 3.6 { 3.7 + struct vmcb_struct *vmcb = current->arch.hvm_svm.vmcb; 3.8 + uint32_t pfec = (vmcb->cpl == 3) ? PFEC_user_mode : 0; 3.9 if ( (inst_len > MAX_INST_LEN) || (inst_len <= 0) ) 3.10 return 0; 3.11 - if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len) ) 3.12 + if ( hvm_fetch_from_guest_virt_nofault(buf, guest_eip, inst_len, pfec) ) 3.13 return 0; 3.14 return inst_len; 3.15 }
4.1 --- a/xen/arch/x86/mm/shadow/common.c Thu Mar 27 17:14:41 2008 +0000 4.2 +++ b/xen/arch/x86/mm/shadow/common.c Thu Mar 27 17:25:22 2008 +0000 4.3 @@ -152,9 +152,9 @@ hvm_read(enum x86_segment seg, 4.4 *val = 0; 4.5 4.6 if ( access_type == hvm_access_insn_fetch ) 4.7 - rc = hvm_fetch_from_guest_virt(val, addr, bytes); 4.8 + rc = hvm_fetch_from_guest_virt(val, addr, bytes, 0); 4.9 else 4.10 - rc = hvm_copy_from_guest_virt(val, addr, bytes); 4.11 + rc = hvm_copy_from_guest_virt(val, addr, bytes, 0); 4.12 4.13 switch ( rc ) 4.14 { 4.15 @@ -416,7 +416,7 @@ struct x86_emulate_ops *shadow_init_emul 4.16 x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf), 4.17 hvm_access_insn_fetch, sh_ctxt, &addr) && 4.18 !hvm_fetch_from_guest_virt_nofault( 4.19 - sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf))) 4.20 + sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0)) 4.21 ? sizeof(sh_ctxt->insn_buf) : 0; 4.22 4.23 return &hvm_shadow_emulator_ops; 4.24 @@ -444,7 +444,7 @@ void shadow_continue_emulation(struct sh 4.25 x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf), 4.26 hvm_access_insn_fetch, sh_ctxt, &addr) && 4.27 !hvm_fetch_from_guest_virt_nofault( 4.28 - sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf))) 4.29 + sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0)) 4.30 ? sizeof(sh_ctxt->insn_buf) : 0; 4.31 sh_ctxt->insn_buf_eip = regs->eip; 4.32 }
5.1 --- a/xen/arch/x86/x86_emulate.c Thu Mar 27 17:14:41 2008 +0000 5.2 +++ b/xen/arch/x86/x86_emulate.c Thu Mar 27 17:25:22 2008 +0000 5.3 @@ -787,7 +787,7 @@ static int 5.4 int cpl = get_cpl(ctxt, ops); 5.5 if ( cpl == -1 ) 5.6 return -1; 5.7 - return ((cpl >= 0) && (cpl <= ((ctxt->regs->eflags >> 12) & 3))); 5.8 + return (cpl <= ((ctxt->regs->eflags >> 12) & 3)); 5.9 } 5.10 5.11 #define mode_ring0() ({ \ 5.12 @@ -801,6 +801,50 @@ static int 5.13 _iopl; \ 5.14 }) 5.15 5.16 +static int ioport_access_check( 5.17 + unsigned int first_port, 5.18 + unsigned int bytes, 5.19 + struct x86_emulate_ctxt *ctxt, 5.20 + struct x86_emulate_ops *ops) 5.21 +{ 5.22 + unsigned long iobmp; 5.23 + struct segment_register tr; 5.24 + int rc = X86EMUL_OKAY; 5.25 + 5.26 + if ( !(ctxt->regs->eflags & EFLG_VM) && mode_iopl() ) 5.27 + return X86EMUL_OKAY; 5.28 + 5.29 + fail_if(ops->read_segment == NULL); 5.30 + if ( (rc = ops->read_segment(x86_seg_tr, &tr, ctxt)) != 0 ) 5.31 + return rc; 5.32 + 5.33 + /* Ensure that the TSS is valid and has an io-bitmap-offset field. */ 5.34 + if ( !tr.attr.fields.p || 5.35 + ((tr.attr.fields.type & 0xd) != 0x9) || 5.36 + (tr.limit < 0x67) ) 5.37 + goto raise_exception; 5.38 + 5.39 + if ( (rc = ops->read(x86_seg_none, tr.base + 0x66, &iobmp, 2, ctxt)) ) 5.40 + return rc; 5.41 + 5.42 + /* Ensure TSS includes two bytes including byte containing first port. */ 5.43 + iobmp += first_port / 8; 5.44 + if ( tr.limit <= iobmp ) 5.45 + goto raise_exception; 5.46 + 5.47 + if ( (rc = ops->read(x86_seg_none, tr.base + iobmp, &iobmp, 2, ctxt)) ) 5.48 + return rc; 5.49 + if ( (iobmp & (((1<<bytes)-1) << (first_port&7))) != 0 ) 5.50 + goto raise_exception; 5.51 + 5.52 + done: 5.53 + return rc; 5.54 + 5.55 + raise_exception: 5.56 + fail_if(ops->inject_hw_exception == NULL); 5.57 + return ops->inject_hw_exception(EXC_GP, 0, ctxt) ? : X86EMUL_EXCEPTION; 5.58 +} 5.59 + 5.60 static int 5.61 in_realmode( 5.62 struct x86_emulate_ctxt *ctxt, 5.63 @@ -2265,12 +2309,14 @@ x86_emulate( 5.64 5.65 case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ { 5.66 unsigned long nr_reps = get_rep_prefix(); 5.67 + unsigned int port = (uint16_t)_regs.edx; 5.68 dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; 5.69 dst.mem.seg = x86_seg_es; 5.70 dst.mem.off = truncate_ea(_regs.edi); 5.71 + if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 ) 5.72 + goto done; 5.73 if ( (nr_reps > 1) && (ops->rep_ins != NULL) && 5.74 - ((rc = ops->rep_ins((uint16_t)_regs.edx, dst.mem.seg, 5.75 - dst.mem.off, dst.bytes, 5.76 + ((rc = ops->rep_ins(port, dst.mem.seg, dst.mem.off, dst.bytes, 5.77 &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) ) 5.78 { 5.79 if ( rc != 0 ) 5.80 @@ -2279,8 +2325,7 @@ x86_emulate( 5.81 else 5.82 { 5.83 fail_if(ops->read_io == NULL); 5.84 - if ( (rc = ops->read_io((uint16_t)_regs.edx, dst.bytes, 5.85 - &dst.val, ctxt)) != 0 ) 5.86 + if ( (rc = ops->read_io(port, dst.bytes, &dst.val, ctxt)) != 0 ) 5.87 goto done; 5.88 dst.type = OP_MEM; 5.89 nr_reps = 1; 5.90 @@ -2294,10 +2339,13 @@ x86_emulate( 5.91 5.92 case 0x6e ... 0x6f: /* outs %esi,%dx */ { 5.93 unsigned long nr_reps = get_rep_prefix(); 5.94 + unsigned int port = (uint16_t)_regs.edx; 5.95 dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; 5.96 + if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 ) 5.97 + goto done; 5.98 if ( (nr_reps > 1) && (ops->rep_outs != NULL) && 5.99 ((rc = ops->rep_outs(ea.mem.seg, truncate_ea(_regs.esi), 5.100 - (uint16_t)_regs.edx, dst.bytes, 5.101 + port, dst.bytes, 5.102 &nr_reps, ctxt)) != X86EMUL_UNHANDLEABLE) ) 5.103 { 5.104 if ( rc != 0 ) 5.105 @@ -2309,8 +2357,7 @@ x86_emulate( 5.106 &dst.val, dst.bytes, ctxt)) != 0 ) 5.107 goto done; 5.108 fail_if(ops->write_io == NULL); 5.109 - if ( (rc = ops->write_io((uint16_t)_regs.edx, dst.bytes, 5.110 - dst.val, ctxt)) != 0 ) 5.111 + if ( (rc = ops->write_io(port, dst.bytes, dst.val, ctxt)) != 0 ) 5.112 goto done; 5.113 nr_reps = 1; 5.114 } 5.115 @@ -2831,6 +2878,8 @@ x86_emulate( 5.116 ? insn_fetch_type(uint8_t) 5.117 : (uint16_t)_regs.edx); 5.118 op_bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes; 5.119 + if ( (rc = ioport_access_check(port, op_bytes, ctxt, ops)) != 0 ) 5.120 + goto done; 5.121 if ( b & 2 ) 5.122 { 5.123 /* out */
6.1 --- a/xen/include/asm-x86/hvm/support.h Thu Mar 27 17:14:41 2008 +0000 6.2 +++ b/xen/include/asm-x86/hvm/support.h Thu Mar 27 17:25:22 2008 +0000 6.3 @@ -99,7 +99,11 @@ enum hvm_copy_result hvm_copy_from_guest 6.4 void *buf, paddr_t paddr, int size); 6.5 6.6 /* 6.7 - * Copy to/from a guest virtual address. 6.8 + * Copy to/from a guest virtual address. @pfec should include PFEC_user_mode 6.9 + * if emulating a user-mode access (CPL=3). All other flags in @pfec are 6.10 + * managed by the called function: it is therefore optional for the caller 6.11 + * to set them. 6.12 + * 6.13 * Returns: 6.14 * HVMCOPY_okay: Copy was entirely successful. 6.15 * HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to 6.16 @@ -110,22 +114,22 @@ enum hvm_copy_result hvm_copy_from_guest 6.17 * for injection into the current HVM VCPU. 6.18 */ 6.19 enum hvm_copy_result hvm_copy_to_guest_virt( 6.20 - unsigned long vaddr, void *buf, int size); 6.21 + unsigned long vaddr, void *buf, int size, uint32_t pfec); 6.22 enum hvm_copy_result hvm_copy_from_guest_virt( 6.23 - void *buf, unsigned long vaddr, int size); 6.24 + void *buf, unsigned long vaddr, int size, uint32_t pfec); 6.25 enum hvm_copy_result hvm_fetch_from_guest_virt( 6.26 - void *buf, unsigned long vaddr, int size); 6.27 + void *buf, unsigned long vaddr, int size, uint32_t pfec); 6.28 6.29 /* 6.30 * As above (copy to/from a guest virtual address), but no fault is generated 6.31 * when HVMCOPY_bad_gva_to_gfn is returned. 6.32 */ 6.33 enum hvm_copy_result hvm_copy_to_guest_virt_nofault( 6.34 - unsigned long vaddr, void *buf, int size); 6.35 + unsigned long vaddr, void *buf, int size, uint32_t pfec); 6.36 enum hvm_copy_result hvm_copy_from_guest_virt_nofault( 6.37 - void *buf, unsigned long vaddr, int size); 6.38 + void *buf, unsigned long vaddr, int size, uint32_t pfec); 6.39 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault( 6.40 - void *buf, unsigned long vaddr, int size); 6.41 + void *buf, unsigned long vaddr, int size, uint32_t pfec); 6.42 6.43 void hvm_print_line(struct vcpu *v, const char c); 6.44 void hlt_timer_fn(void *data);