ia64/xen-unstable
changeset 12611:b75574cb80a3
[HVM] Add/fix access rights and limit checks to INS/OUTS emulation
Since these instructions are documented to take their intercepts
before these checks are being done in hardware, they must be carried
out in software.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Since these instructions are documented to take their intercepts
before these checks are being done in hardware, they must be carried
out in software.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
author | kfraser@localhost.localdomain |
---|---|
date | Tue Nov 28 11:46:39 2006 +0000 (2006-11-28) |
parents | 519a74928bd4 |
children | 0af1ba62a14b |
files | xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c |
line diff
1.1 --- a/xen/arch/x86/hvm/svm/svm.c Tue Nov 28 11:45:54 2006 +0000 1.2 +++ b/xen/arch/x86/hvm/svm/svm.c Tue Nov 28 11:46:39 2006 +0000 1.3 @@ -1233,8 +1233,7 @@ static inline int svm_get_io_address( 1.4 unsigned long *count, unsigned long *addr) 1.5 { 1.6 unsigned long reg; 1.7 - unsigned int asize = 0; 1.8 - unsigned int isize; 1.9 + unsigned int asize, isize; 1.10 int long_mode = 0; 1.11 segment_selector_t *seg = NULL; 1.12 struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; 1.13 @@ -1267,17 +1266,25 @@ static inline int svm_get_io_address( 1.14 reg = regs->esi; 1.15 if (!seg) /* If no prefix, used DS. */ 1.16 seg = &vmcb->ds; 1.17 + if (!long_mode && (seg->attributes.fields.type & 0xa) == 0x8) { 1.18 + svm_inject_exception(v, TRAP_gp_fault, 1, 0); 1.19 + return 0; 1.20 + } 1.21 } 1.22 else 1.23 { 1.24 reg = regs->edi; 1.25 seg = &vmcb->es; /* Note: This is ALWAYS ES. */ 1.26 + if (!long_mode && (seg->attributes.fields.type & 0xa) != 0x2) { 1.27 + svm_inject_exception(v, TRAP_gp_fault, 1, 0); 1.28 + return 0; 1.29 + } 1.30 } 1.31 1.32 /* If the segment isn't present, give GP fault! */ 1.33 if (!long_mode && !seg->attributes.fields.p) 1.34 { 1.35 - svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel); 1.36 + svm_inject_exception(v, TRAP_gp_fault, 1, 0); 1.37 return 0; 1.38 } 1.39 1.40 @@ -1294,16 +1301,59 @@ static inline int svm_get_io_address( 1.41 if (!info.fields.rep) 1.42 *count = 1; 1.43 1.44 - if (!long_mode) { 1.45 - if (*addr > seg->limit) 1.46 + if (!long_mode) 1.47 + { 1.48 + ASSERT(*addr == (u32)*addr); 1.49 + if ((u32)(*addr + size - 1) < (u32)*addr || 1.50 + (seg->attributes.fields.type & 0xc) != 0x4 ? 1.51 + *addr + size - 1 > seg->limit : 1.52 + *addr <= seg->limit) 1.53 { 1.54 - svm_inject_exception(v, TRAP_gp_fault, 1, seg->sel); 1.55 + svm_inject_exception(v, TRAP_gp_fault, 1, 0); 1.56 return 0; 1.57 - } 1.58 - else 1.59 + } 1.60 + 1.61 + /* Check the limit for repeated instructions, as above we checked only 1.62 + the first instance. Truncate the count if a limit violation would 1.63 + occur. Note that the checking is not necessary for page granular 1.64 + segments as transfers crossing page boundaries will be broken up 1.65 + anyway. */ 1.66 + if (!seg->attributes.fields.g && *count > 1) 1.67 { 1.68 - *addr += seg->base; 1.69 + if ((seg->attributes.fields.type & 0xc) != 0x4) 1.70 + { 1.71 + /* expand-up */ 1.72 + if (!(regs->eflags & EF_DF)) 1.73 + { 1.74 + if (*addr + *count * size - 1 < *addr || 1.75 + *addr + *count * size - 1 > seg->limit) 1.76 + *count = (seg->limit + 1UL - *addr) / size; 1.77 + } 1.78 + else 1.79 + { 1.80 + if (*count - 1 > *addr / size) 1.81 + *count = *addr / size + 1; 1.82 + } 1.83 + } 1.84 + else 1.85 + { 1.86 + /* expand-down */ 1.87 + if (!(regs->eflags & EF_DF)) 1.88 + { 1.89 + if (*count - 1 > -(s32)*addr / size) 1.90 + *count = -(s32)*addr / size + 1UL; 1.91 + } 1.92 + else 1.93 + { 1.94 + if (*addr < (*count - 1) * size || 1.95 + *addr - (*count - 1) * size <= seg->limit) 1.96 + *count = (*addr - seg->limit - 1) / size + 1; 1.97 + } 1.98 + } 1.99 + ASSERT(*count); 1.100 } 1.101 + 1.102 + *addr += seg->base; 1.103 } 1.104 else if (seg == &vmcb->fs || seg == &vmcb->gs) 1.105 *addr += seg->base;
2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c Tue Nov 28 11:45:54 2006 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c Tue Nov 28 11:46:39 2006 +0000 2.3 @@ -958,12 +958,13 @@ static void vmx_do_invlpg(unsigned long 2.4 2.5 2.6 static int vmx_check_descriptor(int long_mode, unsigned long eip, int inst_len, 2.7 - enum segment seg, unsigned long *base) 2.8 + enum segment seg, unsigned long *base, 2.9 + u32 *limit, u32 *ar_bytes) 2.10 { 2.11 - enum vmcs_field ar_field, base_field; 2.12 - u32 ar_bytes; 2.13 + enum vmcs_field ar_field, base_field, limit_field; 2.14 2.15 *base = 0; 2.16 + *limit = 0; 2.17 if ( seg != seg_es ) 2.18 { 2.19 unsigned char inst[MAX_INST_LEN]; 2.20 @@ -1020,26 +1021,32 @@ static int vmx_check_descriptor(int long 2.21 case seg_cs: 2.22 ar_field = GUEST_CS_AR_BYTES; 2.23 base_field = GUEST_CS_BASE; 2.24 + limit_field = GUEST_CS_LIMIT; 2.25 break; 2.26 case seg_ds: 2.27 ar_field = GUEST_DS_AR_BYTES; 2.28 base_field = GUEST_DS_BASE; 2.29 + limit_field = GUEST_DS_LIMIT; 2.30 break; 2.31 case seg_es: 2.32 ar_field = GUEST_ES_AR_BYTES; 2.33 base_field = GUEST_ES_BASE; 2.34 + limit_field = GUEST_ES_LIMIT; 2.35 break; 2.36 case seg_fs: 2.37 ar_field = GUEST_FS_AR_BYTES; 2.38 base_field = GUEST_FS_BASE; 2.39 + limit_field = GUEST_FS_LIMIT; 2.40 break; 2.41 case seg_gs: 2.42 ar_field = GUEST_FS_AR_BYTES; 2.43 base_field = GUEST_FS_BASE; 2.44 + limit_field = GUEST_FS_LIMIT; 2.45 break; 2.46 case seg_ss: 2.47 ar_field = GUEST_GS_AR_BYTES; 2.48 base_field = GUEST_GS_BASE; 2.49 + limit_field = GUEST_GS_LIMIT; 2.50 break; 2.51 default: 2.52 BUG(); 2.53 @@ -1047,10 +1054,13 @@ static int vmx_check_descriptor(int long 2.54 } 2.55 2.56 if ( !long_mode || seg == seg_fs || seg == seg_gs ) 2.57 + { 2.58 *base = __vmread(base_field); 2.59 - ar_bytes = __vmread(ar_field); 2.60 + *limit = __vmread(limit_field); 2.61 + } 2.62 + *ar_bytes = __vmread(ar_field); 2.63 2.64 - return !(ar_bytes & 0x10000); 2.65 + return !(*ar_bytes & 0x10000); 2.66 } 2.67 2.68 static void vmx_io_instruction(unsigned long exit_qualification, 2.69 @@ -1090,7 +1100,7 @@ static void vmx_io_instruction(unsigned 2.70 2.71 if ( test_bit(4, &exit_qualification) ) { /* string instruction */ 2.72 unsigned long addr, count = 1, base; 2.73 - u32 ar_bytes; 2.74 + u32 ar_bytes, limit; 2.75 int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1; 2.76 int long_mode = 0; 2.77 2.78 @@ -1101,20 +1111,86 @@ static void vmx_io_instruction(unsigned 2.79 #endif 2.80 addr = __vmread(GUEST_LINEAR_ADDRESS); 2.81 2.82 + if ( test_bit(5, &exit_qualification) ) { /* "rep" prefix */ 2.83 + pio_opp->flags |= REPZ; 2.84 + count = regs->ecx; 2.85 + if ( !long_mode && (vm86 || !(ar_bytes & (1u<<14))) ) 2.86 + count &= 0xFFFF; 2.87 + } 2.88 + 2.89 /* 2.90 * In protected mode, guest linear address is invalid if the 2.91 * selector is null. 2.92 */ 2.93 if ( !vmx_check_descriptor(long_mode, regs->eip, inst_len, 2.94 dir == IOREQ_WRITE ? seg_ds : seg_es, 2.95 - &base) ) 2.96 + &base, &limit, &ar_bytes) ) { 2.97 + if ( !long_mode ) { 2.98 + vmx_inject_hw_exception(current, TRAP_gp_fault, 0); 2.99 + return; 2.100 + } 2.101 addr = dir == IOREQ_WRITE ? base + regs->esi : regs->edi; 2.102 + } 2.103 + 2.104 + if ( !long_mode ) { 2.105 + unsigned long ea = addr - base; 2.106 + 2.107 + /* Segment must be readable for outs and writeable for ins. */ 2.108 + if ( dir == IOREQ_WRITE ? (ar_bytes & 0xa) == 0x8 2.109 + : (ar_bytes & 0xa) != 0x2 ) { 2.110 + vmx_inject_hw_exception(current, TRAP_gp_fault, 0); 2.111 + return; 2.112 + } 2.113 + 2.114 + /* Offset must be within limits. */ 2.115 + ASSERT(ea == (u32)ea); 2.116 + if ( (u32)(ea + size - 1) < (u32)ea || 2.117 + (ar_bytes & 0xc) != 0x4 ? ea + size - 1 > limit 2.118 + : ea <= limit ) 2.119 + { 2.120 + vmx_inject_hw_exception(current, TRAP_gp_fault, 0); 2.121 + return; 2.122 + } 2.123 2.124 - if ( test_bit(5, &exit_qualification) ) { /* "rep" prefix */ 2.125 - pio_opp->flags |= REPZ; 2.126 - count = regs->ecx; 2.127 - if ( !long_mode && (vm86 || !(ar_bytes & (1u<<14))) ) 2.128 - count &= 0xFFFF; 2.129 + /* Check the limit for repeated instructions, as above we checked 2.130 + only the first instance. Truncate the count if a limit violation 2.131 + would occur. Note that the checking is not necessary for page 2.132 + granular segments as transfers crossing page boundaries will be 2.133 + broken up anyway. */ 2.134 + if ( !(ar_bytes & (1u<<15)) && count > 1 ) 2.135 + { 2.136 + if ( (ar_bytes & 0xc) != 0x4 ) 2.137 + { 2.138 + /* expand-up */ 2.139 + if ( !df ) 2.140 + { 2.141 + if ( ea + count * size - 1 < ea || 2.142 + ea + count * size - 1 > limit ) 2.143 + count = (limit + 1UL - ea) / size; 2.144 + } 2.145 + else 2.146 + { 2.147 + if ( count - 1 > ea / size ) 2.148 + count = ea / size + 1; 2.149 + } 2.150 + } 2.151 + else 2.152 + { 2.153 + /* expand-down */ 2.154 + if ( !df ) 2.155 + { 2.156 + if ( count - 1 > -(s32)ea / size ) 2.157 + count = -(s32)ea / size + 1UL; 2.158 + } 2.159 + else 2.160 + { 2.161 + if ( ea < (count - 1) * size || 2.162 + ea - (count - 1) * size <= limit ) 2.163 + count = (ea - limit - 1) / size + 1; 2.164 + } 2.165 + } 2.166 + ASSERT(count); 2.167 + } 2.168 } 2.169 2.170 /*