if ( is_hvm_vcpu(v) )
{
if ( !is_pv_32on64_domain(v->domain) )
- hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
+ {
+ hvm_store_cpu_guest_regs(v, &c.nat->user_regs);
+ memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
+ c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
+ c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
+ c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
+ c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
+ }
#ifdef CONFIG_COMPAT
else
{
struct cpu_user_regs user_regs;
- typeof(c.nat->ctrlreg) ctrlreg;
unsigned i;
- hvm_store_cpu_guest_regs(v, &user_regs, ctrlreg);
+ hvm_store_cpu_guest_regs(v, &user_regs);
XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs);
- for ( i = 0; i < ARRAY_SIZE(c.cmp->ctrlreg); ++i )
- c.cmp->ctrlreg[i] = ctrlreg[i];
+ memset(c.cmp->ctrlreg, 0, sizeof(c.cmp->ctrlreg));
+ c.cmp->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
+ c.cmp->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
+ c.cmp->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
+ c.cmp->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
}
#endif
}
goto out;
}
- hvm_store_cpu_guest_regs(v, regs, NULL);
+ hvm_store_cpu_guest_regs(v, regs);
ptss = hvm_map(prev_tr.base, sizeof(tss));
if ( ptss == NULL )
#endif
case 4:
case 2:
- hvm_store_cpu_guest_regs(current, regs, NULL);
+ hvm_store_cpu_guest_regs(current, regs);
if ( unlikely(ring_3(regs)) )
{
default:
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- hvm_store_cpu_guest_regs(v, regs, NULL);
+ hvm_store_cpu_guest_regs(v, regs);
df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
}
static void svm_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
+ struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- if ( regs != NULL )
- {
- regs->ss = vmcb->ss.sel;
- regs->esp = vmcb->rsp;
- regs->eflags = vmcb->rflags;
- regs->cs = vmcb->cs.sel;
- regs->eip = vmcb->rip;
- }
-
- if ( crs != NULL )
- {
- /* Returning the guest's regs */
- crs[0] = v->arch.hvm_vcpu.guest_cr[0];
- crs[2] = v->arch.hvm_vcpu.guest_cr[2];
- crs[3] = v->arch.hvm_vcpu.guest_cr[3];
- crs[4] = v->arch.hvm_vcpu.guest_cr[4];
- }
+ regs->esp = vmcb->rsp;
+ regs->eflags = vmcb->rflags;
+ regs->eip = vmcb->rip;
}
static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
{
case x86_seg_cs:
memcpy(&vmcb->cs, reg, sizeof(*reg));
- guest_cpu_user_regs()->cs = reg->sel;
break;
case x86_seg_ds:
memcpy(&vmcb->ds, reg, sizeof(*reg));
break;
case x86_seg_ss:
memcpy(&vmcb->ss, reg, sizeof(*reg));
- guest_cpu_user_regs()->ss = reg->sel;
+ vmcb->cpl = vmcb->ss.attr.fields.dpl;
break;
case x86_seg_tr:
svm_sync_vmcb(v);
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- vmcb->ss.sel = regs->ss;
vmcb->rsp = regs->esp;
vmcb->rflags = regs->eflags | 2UL;
- vmcb->cs.sel = regs->cs;
vmcb->rip = regs->eip;
}
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- svm_store_cpu_guest_regs(v, regs, NULL);
+ svm_store_cpu_guest_regs(v, regs);
info.bytes = vmcb->exitinfo1;
case VMEXIT_EXCEPTION_MC:
HVMTRACE_0D(MCE, v);
- svm_store_cpu_guest_regs(v, regs, NULL);
+ svm_store_cpu_guest_regs(v, regs);
do_machine_check(regs);
break;
}
static void vmx_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
+ struct vcpu *v, struct cpu_user_regs *regs)
{
vmx_vmcs_enter(v);
- if ( regs != NULL )
- {
- regs->eflags = __vmread(GUEST_RFLAGS);
- regs->ss = __vmread(GUEST_SS_SELECTOR);
- regs->cs = __vmread(GUEST_CS_SELECTOR);
- regs->eip = __vmread(GUEST_RIP);
- regs->esp = __vmread(GUEST_RSP);
- }
-
- if ( crs != NULL )
- {
- crs[0] = v->arch.hvm_vcpu.guest_cr[0];
- crs[2] = v->arch.hvm_vcpu.guest_cr[2];
- crs[3] = v->arch.hvm_vcpu.guest_cr[3];
- crs[4] = v->arch.hvm_vcpu.guest_cr[4];
- }
+ regs->eflags = __vmread(GUEST_RFLAGS);
+ regs->eip = __vmread(GUEST_RIP);
+ regs->esp = __vmread(GUEST_RSP);
vmx_vmcs_exit(v);
}
static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
{
- unsigned long base;
-
vmx_vmcs_enter(v);
- __vmwrite(GUEST_SS_SELECTOR, regs->ss);
- __vmwrite(GUEST_RSP, regs->esp);
-
/* NB. Bit 1 of RFLAGS must be set for VMENTRY to succeed. */
__vmwrite(GUEST_RFLAGS, regs->eflags | 2UL);
-
- if ( regs->eflags & EF_VM )
- {
- /*
- * The VMX spec (section 4.3.1.2, Checks on Guest Segment
- * Registers) says that virtual-8086 mode guests' segment
- * base-address fields in the VMCS must be equal to their
- * corresponding segment selector field shifted right by
- * four bits upon vmentry.
- */
- base = __vmread(GUEST_CS_BASE);
- if ( (regs->cs << 4) != base )
- __vmwrite(GUEST_CS_BASE, regs->cs << 4);
- base = __vmread(GUEST_SS_BASE);
- if ( (regs->ss << 4) != base )
- __vmwrite(GUEST_SS_BASE, regs->ss << 4);
- }
-
- __vmwrite(GUEST_CS_SELECTOR, regs->cs);
__vmwrite(GUEST_RIP, regs->eip);
+ __vmwrite(GUEST_RSP, regs->esp);
vmx_vmcs_exit(v);
}
__vmwrite(GUEST_CS_LIMIT, reg->limit);
__vmwrite(GUEST_CS_BASE, reg->base);
__vmwrite(GUEST_CS_AR_BYTES, attr);
- guest_cpu_user_regs()->cs = reg->sel;
break;
case x86_seg_ds:
__vmwrite(GUEST_DS_SELECTOR, reg->sel);
__vmwrite(GUEST_SS_LIMIT, reg->limit);
__vmwrite(GUEST_SS_BASE, reg->base);
__vmwrite(GUEST_SS_AR_BYTES, attr);
- guest_cpu_user_regs()->ss = reg->sel;
break;
case x86_seg_tr:
__vmwrite(GUEST_TR_SELECTOR, reg->sel);
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- vmx_store_cpu_guest_regs(current, regs, NULL);
+ vmx_store_cpu_guest_regs(current, regs);
HVM_DBG_LOG(DBG_LEVEL_IO, "vm86 %d, eip=%x:%lx, "
"exit_qualification = %lx",
case EXIT_REASON_MACHINE_CHECK:
printk("caused by machine check.\n");
HVMTRACE_0D(MCE, current);
- vmx_store_cpu_guest_regs(current, regs, NULL);
+ vmx_store_cpu_guest_regs(current, regs);
do_machine_check(regs);
break;
default:
(X86_EVENTTYPE_NMI << 8) )
goto exit_and_crash;
HVMTRACE_0D(NMI, v);
- vmx_store_cpu_guest_regs(v, regs, NULL);
+ vmx_store_cpu_guest_regs(v, regs);
do_nmi(regs); /* Real NMI, vector 2: normal processing. */
break;
case TRAP_machine_check:
HVMTRACE_0D(MCE, v);
- vmx_store_cpu_guest_regs(v, regs, NULL);
+ vmx_store_cpu_guest_regs(v, regs);
do_machine_check(regs);
break;
default:
goto done;
}
- hvm_store_cpu_guest_regs(v, regs, NULL);
+ hvm_store_cpu_guest_regs(v, regs);
}
SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
(regs->eip == (unsigned long)svm_stgi_label)) {
/* SVM guest was running when NMI occurred */
ASSERT(is_hvm_vcpu(v));
- hvm_store_cpu_guest_regs(v, guest_regs, NULL);
+ hvm_store_cpu_guest_regs(v, guest_regs);
eip = guest_regs->eip;
mode = xenoprofile_get_mode(v, guest_regs);
} else {
struct cpu_user_regs fault_regs = *regs;
unsigned long fault_crs[8];
const char *context;
+ struct vcpu *v = current;
- if ( is_hvm_vcpu(current) && guest_mode(regs) )
+ if ( is_hvm_vcpu(v) && guest_mode(regs) )
{
+ struct segment_register sreg;
context = "hvm";
- hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
+ hvm_store_cpu_guest_regs(v, &fault_regs);
+ fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+ fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+ fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+ fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4];
+ hvm_get_segment_register(v, x86_seg_cs, &sreg);
+ fault_regs.cs = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_ds, &sreg);
+ fault_regs.ds = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_es, &sreg);
+ fault_regs.es = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_fs, &sreg);
+ fault_regs.fs = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_gs, &sreg);
+ fault_regs.gs = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_ss, &sreg);
+ fault_regs.ss = sreg.sel;
}
else
{
else
{
context = "guest";
- fault_crs[2] = current->vcpu_info->arch.cr2;
+ fault_crs[2] = v->vcpu_info->arch.cr2;
}
fault_crs[0] = read_cr0();
struct cpu_user_regs fault_regs = *regs;
unsigned long fault_crs[8];
const char *context;
+ struct vcpu *v = current;
- if ( is_hvm_vcpu(current) && guest_mode(regs) )
+ if ( is_hvm_vcpu(v) && guest_mode(regs) )
{
+ struct segment_register sreg;
context = "hvm";
- hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
+ hvm_store_cpu_guest_regs(v, &fault_regs);
+ fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
+ fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
+ fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
+ fault_crs[4] = v->arch.hvm_vcpu.guest_cr[4];
+ hvm_get_segment_register(v, x86_seg_cs, &sreg);
+ fault_regs.cs = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_ds, &sreg);
+ fault_regs.ds = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_es, &sreg);
+ fault_regs.es = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_fs, &sreg);
+ fault_regs.fs = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_gs, &sreg);
+ fault_regs.gs = sreg.sel;
+ hvm_get_segment_register(v, x86_seg_ss, &sreg);
+ fault_regs.ss = sreg.sel;
}
else
{
if ( guest_mode(regs) )
{
context = "guest";
- fault_crs[2] = arch_get_cr2(current);
+ fault_crs[2] = arch_get_cr2(v);
}
else
{
* 2) modify guest state (e.g., set debug flags).
*/
void (*store_cpu_guest_regs)(
- struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
+ struct vcpu *v, struct cpu_user_regs *r);
void (*load_cpu_guest_regs)(
struct vcpu *v, struct cpu_user_regs *r);
static inline void
hvm_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs)
+ struct vcpu *v, struct cpu_user_regs *r)
{
- hvm_funcs.store_cpu_guest_regs(v, r, crs);
+ hvm_funcs.store_cpu_guest_regs(v, r);
}
static inline void