void pmu_apic_interrupt(struct cpu_user_regs *regs)
{
ack_APIC_irq();
- hvm_do_pmu_interrupt(regs);
+ vpmu_do_interrupt(regs);
}
/*
#define set_segment_register(name, value) \
asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
+static void svm_update_guest_efer(struct vcpu *);
+
static struct hvm_function_table svm_function_table;
/* va of hardware host save area */
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
- hvm_update_guest_cr(v, 0);
- hvm_update_guest_cr(v, 2);
- hvm_update_guest_cr(v, 4);
+ svm_update_guest_cr(v, 0);
+ svm_update_guest_cr(v, 2);
+ svm_update_guest_cr(v, 4);
/* Load sysenter MSRs into both VMCB save area and VCPU fields. */
vmcb->sysenter_cs = v->arch.hvm_svm.guest_sysenter_cs = c->sysenter_cs;
vmcb->cstar = data->msr_cstar;
vmcb->sfmask = data->msr_syscall_mask;
v->arch.hvm_vcpu.guest_efer = data->msr_efer;
- hvm_update_guest_efer(v);
+ svm_update_guest_efer(v);
hvm_set_guest_tsc(v, data->tsc);
}
return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
}
-static void svm_update_host_cr3(struct vcpu *v)
-{
- /* SVM doesn't have a HOST_CR3 equivalent to update. */
-}
-
-static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
+void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
uint64_t value;
return vmcb->eventinj.fields.v;
}
-static int svm_do_pmu_interrupt(struct cpu_user_regs *regs)
-{
- return vpmu_do_interrupt(regs);
-}
-
static void svm_cpu_dead(unsigned int cpu)
{
free_xenheap_page(per_cpu(hsa, cpu));
.get_segment_register = svm_get_segment_register,
.set_segment_register = svm_set_segment_register,
.get_shadow_gs_base = svm_get_shadow_gs_base,
- .update_host_cr3 = svm_update_host_cr3,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
.set_guest_pat = svm_set_guest_pat,
.inject_trap = svm_inject_trap,
.init_hypercall_page = svm_init_hypercall_page,
.event_pending = svm_event_pending,
- .do_pmu_interrupt = svm_do_pmu_interrupt,
.cpuid_intercept = svm_cpuid_intercept,
.wbinvd_intercept = svm_wbinvd_intercept,
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
.fields = { .type = 0xb, .s = 0, .dpl = 0, .p = 1, .avl = 0, \
.l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
-static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
- struct segment_register *reg)
+void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
+ struct segment_register *reg)
{
uint32_t attr = 0;
return (__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK);
}
-static int vmx_do_pmu_interrupt(struct cpu_user_regs *regs)
-{
- return vpmu_do_interrupt(regs);
-}
-
static void vmx_set_uc_mode(struct vcpu *v)
{
if ( paging_mode_hap(v->domain) )
.inject_trap = vmx_inject_trap,
.init_hypercall_page = vmx_init_hypercall_page,
.event_pending = vmx_event_pending,
- .do_pmu_interrupt = vmx_do_pmu_interrupt,
.cpu_up = vmx_cpu_up,
.cpu_down = vmx_cpu_down,
.cpuid_intercept = vmx_cpuid_intercept,
{
case 0x80000001:
/* SYSCALL is visible iff running in long mode. */
- hvm_get_segment_register(v, x86_seg_cs, &cs);
+ vmx_get_segment_register(v, x86_seg_cs, &cs);
if ( cs.attr.fields.l )
*edx |= cpufeat_mask(X86_FEATURE_SYSCALL);
else
struct vcpu *v = current;
struct segment_register cs;
- hvm_get_segment_register(v, x86_seg_cs, &cs);
-
if ( vmxop_check )
{
if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
else if ( !vcpu_2_nvmx(v).vmxon_region_pa )
goto invalid_op;
+ vmx_get_segment_register(v, x86_seg_cs, &cs);
+
if ( (regs->eflags & X86_EFLAGS_VM) ||
(hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
goto invalid_op;
if ( hvm_long_mode_enabled(v) )
{
- hvm_get_segment_register(v, x86_seg_cs, &seg);
+ vmx_get_segment_register(v, x86_seg_cs, &seg);
mode_64bit = seg.attr.fields.l;
}
if ( info.fields.segment > VMX_SREG_GS )
goto gp_fault;
- hvm_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
+ vmx_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
seg_base = seg.base;
base = info.fields.base_reg_invalid ? 0 :
void (*init_hypercall_page)(struct domain *d, void *hypercall_page);
int (*event_pending)(struct vcpu *v);
- int (*do_pmu_interrupt)(struct cpu_user_regs *regs);
int (*cpu_up_prepare)(unsigned int cpu);
void (*cpu_dead)(unsigned int cpu);
static inline void
hvm_update_host_cr3(struct vcpu *v)
{
- hvm_funcs.update_host_cr3(v);
+ if ( hvm_funcs.update_host_cr3 )
+ hvm_funcs.update_host_cr3(v);
}
static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
return hvm_funcs.event_pending(v);
}
-static inline int hvm_do_pmu_interrupt(struct cpu_user_regs *regs)
-{
- return hvm_funcs.do_pmu_interrupt(regs);
-}
-
/* These reserved bits in lower 32 remain 0 after any load of CR0 */
#define HVM_CR0_GUEST_RESERVED_BITS \
(~((unsigned long) \
unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr);
void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len);
+void svm_update_guest_cr(struct vcpu *, unsigned int cr);
extern u32 svm_feature_flags;
return rc;
}
+void vmx_get_segment_register(struct vcpu *, enum x86_segment,
+ struct segment_register *);
void vmx_inject_extint(int trap);
void vmx_inject_nmi(void);