#define ID_MASK 0x00200000
/* hidden flags - used internally by qemu to represent additional cpu
- states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
- redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK
- bit positions to ease oring with eflags. */
+ states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
+ avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
+ positions to ease oring with eflags. */
/* current cpl */
#define HF_CPL_SHIFT 0
/* true if soft mmu is being used */
/* update the hidden flags */
{
if (seg_reg == R_CS) {
+ int cpl = selector & 3;
#ifdef TARGET_X86_64
if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
/* long mode */
#endif
{
/* legacy / compatibility case */
+ if (!(env->cr[0] & CR0_PE_MASK))
+ cpl = 0;
+ else if (env->eflags & VM_MASK)
+ cpl = 3;
new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_CS32_SHIFT);
env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
new_hflags;
}
+#if HF_CPL_MASK != 3
+#error HF_CPL_MASK is hardcoded
+#endif
+ env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
}
new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - HF_SS32_SHIFT);
target_ulong *base, unsigned int *limit,
unsigned int *flags);
-/* wrapper, just in case memory mappings must be changed */
-static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
-{
-#if HF_CPL_MASK == 3
- s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
-#else
-#error HF_CPL_MASK is hardcoded
-#endif
-}
-
/* op_helper.c */
/* used for debug or cpu save/restore */
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
for (i = 0; i < 6; i++) {
load_seg_vm(env, i, new_segs[i]);
}
- /* in vm86, CPL is always 3 */
- cpu_x86_set_cpl(env, 3);
} else {
- /* CPL is set the RPL of CS */
- cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
/* first just selectors as the rest may trigger exceptions */
for (i = 0; i < 6; i++) {
cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, dpl);
env->eip = offset;
}
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, dpl);
env->eip = offset;
}
#endif
env->eflags &= ~env->fmask;
cpu_load_eflags(env, env->eflags, 0);
- cpu_x86_set_cpl(env, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_P_MASK |
env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
- cpu_x86_set_cpl(env, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
- cpu_x86_set_cpl(env, 3);
} else {
env->eflags |= IF_MASK;
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
- cpu_x86_set_cpl(env, 3);
}
}
#endif
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, dpl);
SET_ESP(sp, sp_mask);
env->eip = offset;
}
get_seg_base(e1, e2),
get_seg_limit(e1, e2),
e2);
- cpu_x86_set_cpl(env, rpl);
sp = new_esp;
#ifdef TARGET_X86_64
if (env->hflags & HF_CS64_MASK) {
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
VIP_MASK);
load_seg_vm(env, R_CS, new_cs & 0xffff);
- cpu_x86_set_cpl(env, 3);
load_seg_vm(env, R_SS, new_ss & 0xffff);
load_seg_vm(env, R_ES, new_es & 0xffff);
load_seg_vm(env, R_DS, new_ds & 0xffff);
raise_exception_err(env, EXCP0D_GPF, 0);
}
env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
- cpu_x86_set_cpl(env, 0);
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
if (env->sysenter_cs == 0 || cpl != 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
- cpu_x86_set_cpl(env, 3);
#ifdef TARGET_X86_64
if (dflag == 2) {
cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
env->vm_vmcb + offsetof(struct vmcb, save.dr7));
env->dr[6] = ldq_phys(cs->as,
env->vm_vmcb + offsetof(struct vmcb, save.dr6));
- cpu_x86_set_cpl(env, ldub_phys(cs->as,
- env->vm_vmcb + offsetof(struct vmcb,
- save.cpl)));
/* FIXME: guest state consistency checks */
env->vm_hsave + offsetof(struct vmcb, save.dr7));
/* other setups */
- cpu_x86_set_cpl(env, 0);
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
exit_code);
stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),