cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs;
cmp->ip = cur_regs->rip;
cmp->sp = cur_regs->rsp;
- cmp->flags = cur_regs->eflags;
+ cmp->flags = cur_regs->rflags;
cmp->ss = cur_regs->ss;
cmp->cs = cur_regs->cs;
if ( (cmp->cs & 3) > 1 )
r->ip = cur_regs->rip;
r->sp = cur_regs->rsp;
- r->flags = cur_regs->eflags;
+ r->flags = cur_regs->rflags;
if ( !has_hvm_container_vcpu(sampled) )
{
init_int80_direct_trap(v);
/* IOPL privileges are virtualised. */
- v->arch.pv_vcpu.iopl = v->arch.user_regs.eflags & X86_EFLAGS_IOPL;
- v->arch.user_regs.eflags &= ~X86_EFLAGS_IOPL;
+ v->arch.pv_vcpu.iopl = v->arch.user_regs._eflags & X86_EFLAGS_IOPL;
+ v->arch.user_regs._eflags &= ~X86_EFLAGS_IOPL;
/* Ensure real hardware interrupts are enabled. */
- v->arch.user_regs.eflags |= X86_EFLAGS_IF;
+ v->arch.user_regs._eflags |= X86_EFLAGS_IF;
if ( !v->is_initialised )
{
else
{
if ( is_pv_vcpu(current) )
- regs->eip += 2; /* skip re-execute 'syscall' / 'int $xx' */
+ regs->rip += 2; /* skip re-execute 'syscall' / 'int $xx' */
else
current->arch.hvm_vcpu.hcall_preempted = 0;
}
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct vcpu *curr = current;
- regs->eax = op;
+ regs->rax = op;
/* Ensure the hypercall trap instruction is re-executed. */
if ( is_pv_vcpu(curr) )
- regs->eip -= 2; /* re-execute 'syscall' / 'int $xx' */
+ regs->rip -= 2; /* re-execute 'syscall' / 'int $xx' */
else
curr->arch.hvm_vcpu.hcall_preempted = 1;
arg = next_arg(p, args);
switch ( i )
{
- case 0: regs->ebx = arg; break;
- case 1: regs->ecx = arg; break;
- case 2: regs->edx = arg; break;
- case 3: regs->esi = arg; break;
- case 4: regs->edi = arg; break;
- case 5: regs->ebp = arg; break;
+ case 0: regs->rbx = arg; break;
+ case 1: regs->rcx = arg; break;
+ case 2: regs->rdx = arg; break;
+ case 3: regs->rsi = arg; break;
+ case 4: regs->rdi = arg; break;
+ case 5: regs->rbp = arg; break;
}
}
}
switch ( i )
{
- case 0: reg = ®s->ebx; break;
- case 1: reg = ®s->ecx; break;
- case 2: reg = ®s->edx; break;
- case 3: reg = ®s->esi; break;
- case 4: reg = ®s->edi; break;
- case 5: reg = ®s->ebp; break;
+ case 0: reg = ®s->rbx; break;
+ case 1: reg = ®s->rcx; break;
+ case 2: reg = ®s->rdx; break;
+ case 3: reg = ®s->rsi; break;
+ case 4: reg = ®s->rdi; break;
+ case 5: reg = ®s->rbp; break;
default: BUG(); reg = NULL; break;
}
if ( (mask & 1) )
/*
* Initial register values:
* DS,ES,FS,GS = FLAT_KERNEL_DS
- * CS:EIP = FLAT_KERNEL_CS:start_pc
- * SS:ESP = FLAT_KERNEL_SS:start_stack
- * ESI = start_info
- * [EAX,EBX,ECX,EDX,EDI,EBP are zero]
+ * CS:rIP = FLAT_KERNEL_CS:start_pc
+ * SS:rSP = FLAT_KERNEL_SS:start_stack
+ * rSI = start_info
+ * [rAX,rBX,rCX,rDX,rDI,rBP,R8-R15 are zero]
*/
regs = &v->arch.user_regs;
regs->ds = regs->es = regs->fs = regs->gs =
FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS);
regs->cs = (!is_pv_32bit_domain(d) ?
FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS);
- regs->eip = parms.virt_entry;
- regs->esp = vstack_end;
- regs->esi = vstartinfo_start;
- regs->eflags = X86_EFLAGS_IF;
+ regs->rip = parms.virt_entry;
+ regs->rsp = vstack_end;
+ regs->rsi = vstartinfo_start;
+ regs->_eflags = X86_EFLAGS_IF;
#ifdef CONFIG_SHADOW_PAGING
if ( opt_dom0_shadow )
unsigned long
search_pre_exception_table(struct cpu_user_regs *regs)
{
- unsigned long addr = (unsigned long)regs->eip;
+ unsigned long addr = regs->rip;
unsigned long fixup = search_one_extable(
__start___pre_ex_table, __stop___pre_ex_table-1, addr);
if ( fixup )
ASSERT(guest_kernel_mode(curr, regs));
- eax = is_pv_32bit_vcpu(curr) ? regs->_eax : regs->eax;
+ eax = is_pv_32bit_vcpu(curr) ? regs->_eax : regs->rax;
BUILD_BUG_ON(ARRAY_SIZE(pv_hypercall_table) >
ARRAY_SIZE(hypercall_args_table));
if ( (eax >= ARRAY_SIZE(pv_hypercall_table)) ||
!pv_hypercall_table[eax].native )
{
- regs->eax = -ENOSYS;
+ regs->rax = -ENOSYS;
return;
}
__trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args);
}
- regs->eax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9);
+ regs->rax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9);
#ifndef NDEBUG
if ( regs->rip == old_rip )
void __trace_pv_page_fault(unsigned long addr, unsigned error_code)
{
- unsigned long eip = guest_cpu_user_regs()->eip;
+ unsigned long eip = guest_cpu_user_regs()->rip;
if ( is_pv_32bit_vcpu(current) )
{
void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte)
{
- unsigned long eip = guest_cpu_user_regs()->eip;
+ unsigned long eip = guest_cpu_user_regs()->rip;
/* We have a couple of different modes to worry about:
* - 32-on-32: 32-bit pte, 32-bit virtual addresses
OFFSET(UREGS_saved_upcall_mask, struct cpu_user_regs, saved_upcall_mask);
OFFSET(UREGS_rip, struct cpu_user_regs, rip);
OFFSET(UREGS_cs, struct cpu_user_regs, cs);
- OFFSET(UREGS_eflags, struct cpu_user_regs, eflags);
+ OFFSET(UREGS_eflags, struct cpu_user_regs, rflags);
OFFSET(UREGS_rsp, struct cpu_user_regs, rsp);
OFFSET(UREGS_ss, struct cpu_user_regs, ss);
OFFSET(UREGS_ds, struct cpu_user_regs, ds);
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct mc_state *mcs = ¤t->mc_state;
unsigned int arg1 = !(mcs->flags & MCSF_in_multicall)
- ? regs->ecx
+ ? regs->_ecx
: mcs->call.args[1];
unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
(diff == 0); \
})
-#define return_reg(v) ((v)->arch.user_regs.eax)
+#define return_reg(v) ((v)->arch.user_regs.rax)
#endif /* __X86_REGS_H__ */
* (may be omitted)
* c. list of allocated page frames [mfn_list, nr_pages]
* (unless relocated due to XEN_ELFNOTE_INIT_P2M)
- * d. start_info_t structure [register ESI (x86)]
+ * d. start_info_t structure [register rSI (x86)]
* in case of dom0 this page contains the console info, too
* e. unless dom0: xenstore ring page
* f. unless dom0: console ring page