init_int80_direct_trap(v);
/* IOPL privileges are virtualised. */
- v->arch.pv_vcpu.iopl = v->arch.user_regs._eflags & X86_EFLAGS_IOPL;
- v->arch.user_regs._eflags &= ~X86_EFLAGS_IOPL;
+ v->arch.pv_vcpu.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
+ v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL;
/* Ensure real hardware interrupts are enabled. */
- v->arch.user_regs._eflags |= X86_EFLAGS_IF;
+ v->arch.user_regs.eflags |= X86_EFLAGS_IF;
if ( !v->is_initialised )
{
if ( !ring_1(regs) )
{
ret = put_user(regs->ss, esp-1);
- ret |= put_user(regs->_esp, esp-2);
+ ret |= put_user(regs->esp, esp-2);
esp -= 2;
}
if ( ret |
put_user(rflags, esp-1) |
put_user(cs_and_mask, esp-2) |
- put_user(regs->_eip, esp-3) |
+ put_user(regs->eip, esp-3) |
put_user(uregs->gs, esp-4) |
put_user(uregs->fs, esp-5) |
put_user(uregs->es, esp-6) |
vcpu_info(n, evtchn_upcall_mask) = 1;
regs->entry_vector |= TRAP_syscall;
- regs->_eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT|
+ regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT|
X86_EFLAGS_IOPL|X86_EFLAGS_TF);
regs->ss = FLAT_COMPAT_KERNEL_SS;
- regs->_esp = (unsigned long)(esp-7);
+ regs->esp = (unsigned long)(esp-7);
regs->cs = FLAT_COMPAT_KERNEL_CS;
- regs->_eip = pv->failsafe_callback_eip;
+ regs->eip = pv->failsafe_callback_eip;
return;
}
regs->rip = parms.virt_entry;
regs->rsp = vstack_end;
regs->rsi = vstartinfo_start;
- regs->_eflags = X86_EFLAGS_IF;
+ regs->eflags = X86_EFLAGS_IF;
#ifdef CONFIG_SHADOW_PAGING
if ( opt_dom0_shadow )
}
/* IOPL privileges are virtualised: merge back into returned eflags. */
- BUG_ON((c(user_regs._eflags) & X86_EFLAGS_IOPL) != 0);
- c(user_regs._eflags |= v->arch.pv_vcpu.iopl);
+ BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
+ c(user_regs.eflags |= v->arch.pv_vcpu.iopl);
if ( !compat )
{
if ( addr != -1UL )
regs->rip = addr;
- regs->_eflags &= ~X86_EFLAGS_TF;
+ regs->eflags &= ~X86_EFLAGS_TF;
/* Set eflags.RF to ensure we do not re-enter. */
- regs->_eflags |= X86_EFLAGS_RF;
+ regs->eflags |= X86_EFLAGS_RF;
/* Set the trap flag if we are single stepping. */
if ( type == GDB_STEP )
- regs->_eflags |= X86_EFLAGS_TF;
+ regs->eflags |= X86_EFLAGS_TF;
}
/*
panic("FATAL TRAP: vector = %d (%s)\n"
"[error_code=%04x] %s",
trapnr, trapstr(trapnr), regs->error_code,
- (regs->_eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
+ (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
}
void pv_inject_event(const struct x86_event *event)
static void instruction_done(struct cpu_user_regs *regs, unsigned long rip)
{
regs->rip = rip;
- regs->_eflags &= ~X86_EFLAGS_RF;
- if ( regs->_eflags & X86_EFLAGS_TF )
+ regs->eflags &= ~X86_EFLAGS_RF;
+ if ( regs->eflags & X86_EFLAGS_TF )
{
current->arch.debugreg[6] |= DR_STEP | DR_STATUS_RESERVED_ONE;
do_guest_trap(TRAP_debug, regs);
eip += sizeof(instr);
- guest_cpuid(current, regs->_eax, regs->_ecx, &res);
+ guest_cpuid(current, regs->eax, regs->ecx, &res);
regs->rax = res.a;
regs->rbx = res.b;
* - Page fault in kernel mode
*/
if ( (cr4 & X86_CR4_SMAP) && !(error_code & PFEC_user_mode) &&
- (((regs->cs & 3) == 3) || !(regs->_eflags & X86_EFLAGS_AC)) )
+ (((regs->cs & 3) == 3) || !(regs->eflags & X86_EFLAGS_AC)) )
return smap_fault;
}
struct domain *d = v->domain;
/* No fixups in interrupt context or when interrupts are disabled. */
- if ( in_irq() || !(regs->_eflags & X86_EFLAGS_IF) )
+ if ( in_irq() || !(regs->eflags & X86_EFLAGS_IF) )
return 0;
if ( !(regs->error_code & PFEC_page_present) &&
break;
/* x86_emulate() clips the repetition count to ensure we don't wrap. */
- if ( unlikely(ctxt->regs->_eflags & X86_EFLAGS_DF) )
+ if ( unlikely(ctxt->regs->eflags & X86_EFLAGS_DF) )
offset -= bytes_per_rep;
else
offset += bytes_per_rep;
break;
/* x86_emulate() clips the repetition count to ensure we don't wrap. */
- if ( unlikely(ctxt->regs->_eflags & X86_EFLAGS_DF) )
+ if ( unlikely(ctxt->regs->eflags & X86_EFLAGS_DF) )
offset -= bytes_per_rep;
else
offset += bytes_per_rep;
return 0;
/* Mirror virtualized state into EFLAGS. */
- ASSERT(regs->_eflags & X86_EFLAGS_IF);
+ ASSERT(regs->eflags & X86_EFLAGS_IF);
if ( vcpu_info(curr, evtchn_upcall_mask) )
- regs->_eflags &= ~X86_EFLAGS_IF;
+ regs->eflags &= ~X86_EFLAGS_IF;
else
- regs->_eflags |= X86_EFLAGS_IF;
- ASSERT(!(regs->_eflags & X86_EFLAGS_IOPL));
- regs->_eflags |= curr->arch.pv_vcpu.iopl;
- eflags = regs->_eflags;
+ regs->eflags |= X86_EFLAGS_IF;
+ ASSERT(!(regs->eflags & X86_EFLAGS_IOPL));
+ regs->eflags |= curr->arch.pv_vcpu.iopl;
+ eflags = regs->eflags;
ctxt.ctxt.addr_size = ar & _SEGMENT_L ? 64 : ar & _SEGMENT_DB ? 32 : 16;
/* Leave zero in ctxt.ctxt.sp_size, as it's not needed. */
* Nothing we allow to be emulated can change anything other than the
* arithmetic bits, and the resume flag.
*/
- ASSERT(!((regs->_eflags ^ eflags) &
+ ASSERT(!((regs->eflags ^ eflags) &
~(X86_EFLAGS_RF | X86_EFLAGS_ARITH_MASK)));
- regs->_eflags |= X86_EFLAGS_IF;
- regs->_eflags &= ~X86_EFLAGS_IOPL;
+ regs->eflags |= X86_EFLAGS_IF;
+ regs->eflags &= ~X86_EFLAGS_IOPL;
/* More strict than x86_emulate_wrapper(). */
ASSERT(ctxt.ctxt.event_pending == (rc == X86EMUL_EXCEPTION));
!(ar & _SEGMENT_WR) ||
!check_stack_limit(ar, limit, esp + nparm * 4, nparm * 4) )
return do_guest_trap(TRAP_gp_fault, regs);
- ustkp = (unsigned int *)(unsigned long)((unsigned int)base + regs->_esp + nparm * 4);
+ ustkp = (unsigned int *)(unsigned long)
+ ((unsigned int)base + regs->esp + nparm * 4);
if ( !compat_access_ok(ustkp - nparm, nparm * 4) )
{
do_guest_trap(TRAP_gp_fault, regs);
if ( !guest_mode(regs) )
{
- if ( regs->_eflags & X86_EFLAGS_TF )
+ if ( regs->eflags & X86_EFLAGS_TF )
{
/* In SYSENTER entry path we can't zap TF until EFLAGS is saved. */
if ( (regs->rip >= (unsigned long)sysenter_entry) &&
(regs->rip <= (unsigned long)sysenter_eflags_saved) )
{
if ( regs->rip == (unsigned long)sysenter_eflags_saved )
- regs->_eflags &= ~X86_EFLAGS_TF;
+ regs->eflags &= ~X86_EFLAGS_TF;
goto out;
}
if ( !debugger_trap_fatal(TRAP_debug, regs) )
{
WARN();
- regs->_eflags &= ~X86_EFLAGS_TF;
+ regs->eflags &= ~X86_EFLAGS_TF;
}
}
else
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct mc_state *mcs = ¤t->mc_state;
unsigned int arg1 = !(mcs->flags & MCSF_in_multicall)
- ? regs->_ecx
+ ? regs->ecx
: mcs->call.args[1];
unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
BUG_ON(!hypercall_xlat_continuation(&left, 4, 0x01, nat_ops,
cmp_uops));
if ( !(mcs->flags & MCSF_in_multicall) )
- regs->_ecx += count - i;
+ regs->ecx += count - i;
else
mcs->compat_call.args[1] += count - i;
}
{
unsigned int i, *stack, addr, mask = STACK_SIZE;
- stack = (unsigned int *)(unsigned long)regs->_esp;
+ stack = (unsigned int *)(unsigned long)regs->esp;
printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack);
if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) )
regs->rsp = (u32)regs->rsp;
/* Restore EAX (clobbered by hypercall). */
- if ( unlikely(__get_user(regs->_eax, (u32 *)regs->rsp)) )
+ if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
{
domain_crash(v->domain);
return 0;
}
/* Restore CS and EIP. */
- if ( unlikely(__get_user(regs->_eip, (u32 *)regs->rsp + 1)) ||
+ if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
{
domain_crash(v->domain);
if ( VM_ASSIST(v->domain, architectural_iopl) )
v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
- regs->_eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
+ regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
if ( unlikely(eflags & X86_EFLAGS_VM) )
{
int rc = 0;
gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
- regs->_esp, ksp);
- if ( ksp < regs->_esp )
+ regs->esp, ksp);
+ if ( ksp < regs->esp )
{
for (i = 1; i < 10; ++i)
{
rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
}
}
- else if ( ksp > regs->_esp )
+ else if ( ksp > regs->esp )
{
for ( i = 9; i > 0; --i )
{
domain_crash(v->domain);
return 0;
}
- regs->_esp = ksp;
+ regs->esp = ksp;
regs->ss = v->arch.pv_vcpu.kernel_ss;
ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
if ( TI_GET_IF(ti) )
eflags &= ~X86_EFLAGS_IF;
- regs->_eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
- X86_EFLAGS_NT|X86_EFLAGS_TF);
+ regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
+ X86_EFLAGS_NT|X86_EFLAGS_TF);
if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
{
domain_crash(v->domain);
return 0;
}
- regs->_eip = ti->address;
+ regs->eip = ti->address;
regs->cs = ti->cs;
}
else if ( unlikely(ring_0(regs)) )
return 0;
}
else if ( ring_1(regs) )
- regs->_esp += 16;
+ regs->esp += 16;
/* Return to ring 2/3: restore ESP and SS. */
else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
- __get_user(regs->_esp, (u32 *)regs->rsp + 4) )
+ __get_user(regs->esp, (u32 *)regs->rsp + 4) )
{
domain_crash(v->domain);
return 0;
* The hypercall exit path will overwrite EAX with this return
* value.
*/
- return regs->_eax;
+ return regs->eax;
}
static long compat_register_guest_callback(
GDB_REG64(regs->r15);
GDB_REG64(regs->rip);
- GDB_REG32(regs->_eflags);
+ GDB_REG32(regs->eflags);
GDB_REG32(regs->cs);
GDB_REG32(regs->ss);
static inline uint64_t msr_fold(const struct cpu_user_regs *regs)
{
- return (regs->rdx << 32) | regs->_eax;
+ return (regs->rdx << 32) | regs->eax;
}
static inline void msr_split(struct cpu_user_regs *regs, uint64_t val)