xstate_sizes[X86_XCR0_PKRU_POS]);
}
- if ( p->extd.lwp )
- {
- xstates |= X86_XCR0_LWP;
- xstate_size = max(xstate_size,
- xstate_offsets[X86_XCR0_LWP_POS] +
- xstate_sizes[X86_XCR0_LWP_POS]);
- }
-
p->xstate.max_size = xstate_size;
p->xstate.xcr0_low = xstates & ~XSTATE_XSAVES_ONLY;
p->xstate.xcr0_high = (xstates & ~XSTATE_XSAVES_ONLY) >> 32;
zero_leaves(p->extd.raw, 0xb, 0x18);
p->extd.raw[0x1b] = EMPTY_LEAF; /* IBS - not supported. */
-
- p->extd.raw[0x1c].a = 0; /* LWP.a entirely dynamic. */
+ p->extd.raw[0x1c] = EMPTY_LEAF; /* LWP - not supported. */
break;
}
}
if ( !p->extd.page1gb )
p->extd.raw[0x19] = EMPTY_LEAF;
-
- if ( p->extd.lwp )
- p->extd.raw[0x1c].d &= max->extd.raw[0x1c].d;
- else
- p->extd.raw[0x1c] = EMPTY_LEAF;
}
int init_domain_cpuid_policy(struct domain *d)
res->d |= cpufeat_mask(X86_FEATURE_MTRR);
}
break;
-
- case 0x8000001c:
- if ( (v->arch.xcr0 & X86_XCR0_LWP) && cpu_has_svm )
- /* Turn on available bit and other features specified in lwp_cfg. */
- res->a = (res->d & v->arch.hvm.svm.guest_lwp_cfg) | 1;
- break;
}
}
*(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
}
-static void svm_lwp_interrupt(struct cpu_user_regs *regs)
-{
- struct vcpu *curr = current;
-
- ack_APIC_irq();
- vlapic_set_irq(
- vcpu_vlapic(curr),
- (curr->arch.hvm.svm.guest_lwp_cfg >> 40) & 0xff,
- 0);
-}
-
-static inline void svm_lwp_save(struct vcpu *v)
-{
- /* Don't mess up with other guests. Disable LWP for next VCPU. */
- if ( v->arch.hvm.svm.guest_lwp_cfg )
- {
- wrmsrl(MSR_AMD64_LWP_CFG, 0x0);
- wrmsrl(MSR_AMD64_LWP_CBADDR, 0x0);
- }
-}
-
-static inline void svm_lwp_load(struct vcpu *v)
-{
- /* Only LWP_CFG is reloaded. LWP_CBADDR will be reloaded via xrstor. */
- if ( v->arch.hvm.svm.guest_lwp_cfg )
- wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm.svm.cpu_lwp_cfg);
-}
-
-/* Update LWP_CFG MSR (0xc0000105). Return -1 if error; otherwise returns 0. */
-static int svm_update_lwp_cfg(struct vcpu *v, uint64_t msr_content)
-{
- uint32_t msr_low;
- static uint8_t lwp_intr_vector;
-
- if ( xsave_enabled(v) && cpu_has_lwp )
- {
- msr_low = (uint32_t)msr_content;
-
- /* generate #GP if guest tries to turn on unsupported features. */
- if ( msr_low & ~v->domain->arch.cpuid->extd.raw[0x1c].d )
- return -1;
-
- v->arch.hvm.svm.guest_lwp_cfg = msr_content;
-
- /* setup interrupt handler if needed */
- if ( (msr_content & 0x80000000) && ((msr_content >> 40) & 0xff) )
- {
- alloc_direct_apic_vector(&lwp_intr_vector, svm_lwp_interrupt);
- v->arch.hvm.svm.cpu_lwp_cfg = (msr_content & 0xffff00ffffffffffULL)
- | ((uint64_t)lwp_intr_vector << 40);
- }
- else
- {
- /* otherwise disable it */
- v->arch.hvm.svm.cpu_lwp_cfg = msr_content & 0xffff00ff7fffffffULL;
- }
-
- wrmsrl(MSR_AMD64_LWP_CFG, v->arch.hvm.svm.cpu_lwp_cfg);
-
- /* track nonalzy state if LWP_CFG is non-zero. */
- v->arch.nonlazy_xstate_used = !!(msr_content);
- }
-
- return 0;
-}
-
static inline void svm_tsc_ratio_save(struct vcpu *v)
{
/* Other vcpus might not have vtsc enabled. So disable TSC_RATIO here. */
svm_fpu_leave(v);
svm_save_dr(v);
- svm_lwp_save(v);
svm_tsc_ratio_save(v);
svm_sync_vmcb(v, vmcb_needs_vmload);
svm_vmsave_pa(per_cpu(host_vmcb, cpu));
vmcb->cleanbits.bytes = 0;
- svm_lwp_load(v);
svm_tsc_ratio_load(v);
if ( cpu_has_msr_tsc_aux )
*msr_content = vmcb_get_lastinttoip(vmcb);
break;
- case MSR_AMD64_LWP_CFG:
- *msr_content = v->arch.hvm.svm.guest_lwp_cfg;
- break;
-
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
vmcb_set_lastinttoip(vmcb, msr_content);
break;
- case MSR_AMD64_LWP_CFG:
- if ( svm_update_lwp_cfg(v, msr_content) < 0 )
- goto gpf;
- break;
-
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
svm_disable_intercept_for_msr(v, MSR_STAR);
svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
- /* LWP_CBADDR MSR is saved and restored by FPU code. So SVM doesn't need to
- * intercept it. */
- if ( cpu_has_lwp )
- svm_disable_intercept_for_msr(v, MSR_AMD64_LWP_CBADDR);
-
vmcb->_msrpm_base_pa = virt_to_maddr(svm->msrpm);
vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm.io_bitmap);
case MSR_FLUSH_CMD:
/* Write-only */
case MSR_TSX_FORCE_ABORT:
+ case MSR_AMD64_LWP_CFG:
+ case MSR_AMD64_LWP_CBADDR:
/* Not offered to guests. */
goto gp_fault;
case MSR_ARCH_CAPABILITIES:
/* Read-only */
case MSR_TSX_FORCE_ABORT:
+ case MSR_AMD64_LWP_CFG:
+ case MSR_AMD64_LWP_CBADDR:
/* Not offered to guests. */
goto gp_fault;
curr->arch.xcr0 = new_bv;
curr->arch.xcr0_accum |= new_bv;
- /* LWP sets nonlazy_xstate_used independently. */
- if ( new_bv & (XSTATE_NONLAZY & ~X86_XCR0_LWP) )
+ if ( new_bv & XSTATE_NONLAZY )
curr->arch.nonlazy_xstate_used = 1;
mask &= curr->fpu_dirtied ? ~XSTATE_FP_SSE : XSTATE_NONLAZY;
#define cpu_has_svm boot_cpu_has(X86_FEATURE_SVM)
#define cpu_has_sse4a boot_cpu_has(X86_FEATURE_SSE4A)
#define cpu_has_xop boot_cpu_has(X86_FEATURE_XOP)
-#define cpu_has_lwp boot_cpu_has(X86_FEATURE_LWP)
#define cpu_has_fma4 boot_cpu_has(X86_FEATURE_FMA4)
#define cpu_has_tbm boot_cpu_has(X86_FEATURE_TBM)
uint64_t guest_sysenter_cs;
uint64_t guest_sysenter_esp;
uint64_t guest_sysenter_eip;
-
- /* AMD lightweight profiling MSR */
- uint64_t guest_lwp_cfg; /* guest version */
- uint64_t cpu_lwp_cfg; /* CPU version */
};
struct vmcb_struct *alloc_vmcb(void);
XSTATE_NONLAZY)
#define XSTATE_ALL (~(1ULL << 63))
-#define XSTATE_NONLAZY (X86_XCR0_LWP | X86_XCR0_BNDREGS | X86_XCR0_BNDCSR | \
- X86_XCR0_PKRU)
+#define XSTATE_NONLAZY (X86_XCR0_BNDREGS | X86_XCR0_BNDCSR | X86_XCR0_PKRU)
#define XSTATE_LAZY (XSTATE_ALL & ~XSTATE_NONLAZY)
#define XSTATE_XSAVES_ONLY 0
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
XEN_CPUFEATURE(XOP, 3*32+11) /*A extended AVX instructions */
XEN_CPUFEATURE(SKINIT, 3*32+12) /* SKINIT/STGI instructions */
XEN_CPUFEATURE(WDT, 3*32+13) /* Watchdog timer */
-XEN_CPUFEATURE(LWP, 3*32+15) /*S Light Weight Profiling */
+XEN_CPUFEATURE(LWP, 3*32+15) /* Light Weight Profiling */
XEN_CPUFEATURE(FMA4, 3*32+16) /*A 4 operands MAC instructions */
XEN_CPUFEATURE(NODEID_MSR, 3*32+19) /* NodeId MSR */
XEN_CPUFEATURE(TBM, 3*32+21) /*A trailing bit manipulations */