#include <xen/smp.h>
#include <asm/processor.h>
#include <asm/msr.h>
+#include <asm/debugreg.h>
#include <asm/flushtlb.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
write_cr4(cr4);
if ( unlikely(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
- {
- write_debugreg(0, v->arch.debugreg[0]);
- write_debugreg(1, v->arch.debugreg[1]);
- write_debugreg(2, v->arch.debugreg[2]);
- write_debugreg(3, v->arch.debugreg[3]);
- write_debugreg(6, v->arch.debugreg[6]);
- write_debugreg(7, v->arch.debugreg[7]);
- }
+ activate_debugregs(v);
if ( (v->domain->arch.tsc_mode == TSC_MODE_PVRDTSCP) &&
boot_cpu_has(X86_FEATURE_RDTSCP) )
{
long ret = 0;
bool_t copyback = 0;
+ unsigned long i;
switch ( domctl->cmd )
{
case XEN_DOMCTL_getmemlist:
{
- int i;
unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
uint64_t mfn;
struct page_info *page;
unsigned long mfn = domctl->u.memory_mapping.first_mfn;
unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
int add = domctl->u.memory_mapping.add_mapping;
- unsigned long i;
ret = -EINVAL;
if ( (mfn + nr_mfns - 1) < mfn || /* wrap? */
{
struct xen_domctl_ext_vcpucontext *evc;
struct vcpu *v;
+ struct xen_domctl_ext_vcpu_msr msr;
evc = &domctl->u.ext_vcpucontext;
evc->vmce.mci_ctl2_bank0 = v->arch.vmce.bank[0].mci_ctl2;
evc->vmce.mci_ctl2_bank1 = v->arch.vmce.bank[1].mci_ctl2;
- ret = 0;
+ i = ret = 0;
+ if ( boot_cpu_has(X86_FEATURE_DBEXT) )
+ {
+ unsigned int j;
+
+ if ( v->arch.pv_vcpu.dr_mask[0] )
+ {
+ if ( i < evc->msr_count && !ret )
+ {
+ msr.index = MSR_AMD64_DR0_ADDRESS_MASK;
+ msr.reserved = 0;
+ msr.value = v->arch.pv_vcpu.dr_mask[0];
+ if ( copy_to_guest_offset(evc->msrs, i, &msr, 1) )
+ ret = -EFAULT;
+ }
+ ++i;
+ }
+ for ( j = 0; j < 3; ++j )
+ {
+ if ( !v->arch.pv_vcpu.dr_mask[1 + j] )
+ continue;
+ if ( i < evc->msr_count && !ret )
+ {
+ msr.index = MSR_AMD64_DR1_ADDRESS_MASK + j;
+ msr.reserved = 0;
+ msr.value = v->arch.pv_vcpu.dr_mask[1 + j];
+ if ( copy_to_guest_offset(evc->msrs, i, &msr, 1) )
+ ret = -EFAULT;
+ }
+ ++i;
+ }
+ }
+ if ( i > evc->msr_count && !ret )
+ ret = -ENOBUFS;
+ evc->msr_count = i;
+
vcpu_unpause(v);
copyback = 1;
}
ret = vmce_restore_vcpu(v, &vmce);
}
+ else if ( evc->size > offsetof(typeof(*evc), vmce) )
+ ret = -EINVAL;
else
ret = 0;
+ if ( ret || evc->size <= offsetof(typeof(*evc), msrs) )
+ /* nothing */;
+ else if ( evc->size < offsetof(typeof(*evc), msrs) +
+ sizeof(evc->msrs) )
+ ret = -EINVAL;
+ else
+ {
+ for ( i = 0; i < evc->msr_count; ++i )
+ {
+ ret = -EFAULT;
+ if ( copy_from_guest_offset(&msr, evc->msrs, i, 1) )
+ break;
+ ret = -EINVAL;
+ if ( msr.reserved )
+ break;
+ switch ( msr.index )
+ {
+ case MSR_AMD64_DR0_ADDRESS_MASK:
+ if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
+ (msr.value >> 32) )
+ break;
+ v->arch.pv_vcpu.dr_mask[0] = msr.value;
+ continue;
+ case MSR_AMD64_DR1_ADDRESS_MASK ...
+ MSR_AMD64_DR3_ADDRESS_MASK:
+ if ( !boot_cpu_has(X86_FEATURE_DBEXT) ||
+ (msr.value >> 32) )
+ break;
+ msr.index -= MSR_AMD64_DR1_ADDRESS_MASK - 1;
+ v->arch.pv_vcpu.dr_mask[msr.index] = msr.value;
+ continue;
+ }
+ break;
+ }
+ if ( i == evc->msr_count )
+ ret = 0;
+ }
+
domain_unpause(d);
}
}
{
xen_domctl_cpuid_t *ctl = &domctl->u.cpuid;
cpuid_input_t *cpuid = NULL;
- int i;
for ( i = 0; i < MAX_CPUID_INPUT; i++ )
{
if ( wrmsr_safe(regs->ecx, msr_content) != 0 )
goto fail;
break;
+
+ case MSR_AMD64_DR0_ADDRESS_MASK:
+ if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (msr_content >> 32) )
+ goto fail;
+ v->arch.pv_vcpu.dr_mask[0] = msr_content;
+ if ( v->arch.debugreg[7] & DR7_ACTIVE_MASK )
+ wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, msr_content);
+ break;
+ case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
+ if ( !boot_cpu_has(X86_FEATURE_DBEXT) || (msr_content >> 32) )
+ goto fail;
+ v->arch.pv_vcpu.dr_mask
+ [regs->_ecx - MSR_AMD64_DR1_ADDRESS_MASK + 1] = msr_content;
+ if ( v->arch.debugreg[7] & DR7_ACTIVE_MASK )
+ wrmsrl(regs->_ecx, msr_content);
+ break;
+
default:
if ( wrmsr_hypervisor_regs(regs->ecx, msr_content) == 1 )
break;
regs->eax = (uint32_t)msr_content;
regs->edx = (uint32_t)(msr_content >> 32);
break;
+
+ case MSR_AMD64_DR0_ADDRESS_MASK:
+ if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
+ goto fail;
+ regs->eax = v->arch.pv_vcpu.dr_mask[0];
+ regs->edx = 0;
+ break;
+ case MSR_AMD64_DR1_ADDRESS_MASK ... MSR_AMD64_DR3_ADDRESS_MASK:
+ if ( !boot_cpu_has(X86_FEATURE_DBEXT) )
+ goto fail;
+ regs->eax = v->arch.pv_vcpu.dr_mask
+ [regs->_ecx - MSR_AMD64_DR1_ADDRESS_MASK + 1];
+ regs->edx = 0;
+ break;
+
default:
if ( rdmsr_hypervisor_regs(regs->ecx, &val) )
{
return rc;
}
-long set_debugreg(struct vcpu *v, int reg, unsigned long value)
+void activate_debugregs(const struct vcpu *curr)
+{
+ ASSERT(curr == current);
+
+ write_debugreg(0, curr->arch.debugreg[0]);
+ write_debugreg(1, curr->arch.debugreg[1]);
+ write_debugreg(2, curr->arch.debugreg[2]);
+ write_debugreg(3, curr->arch.debugreg[3]);
+ write_debugreg(6, curr->arch.debugreg[6]);
+
+ /*
+ * Avoid writing the subsequently getting replaced value when getting
+ * called from set_debugreg() below. Eventual future callers will need
+ * to take this into account.
+ */
+ if ( curr->arch.debugreg[7] & DR7_ACTIVE_MASK )
+ write_debugreg(7, curr->arch.debugreg[7]);
+
+ if ( boot_cpu_has(X86_FEATURE_DBEXT) )
+ {
+ wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[0]);
+ wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[1]);
+ wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[2]);
+ wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, curr->arch.pv_vcpu.dr_mask[3]);
+ }
+}
+
+long set_debugreg(struct vcpu *v, unsigned int reg, unsigned long value)
{
int i;
struct vcpu *curr = current;
if ( (v == curr) &&
!(v->arch.debugreg[7] & DR7_ACTIVE_MASK) )
{
- write_debugreg(0, v->arch.debugreg[0]);
- write_debugreg(1, v->arch.debugreg[1]);
- write_debugreg(2, v->arch.debugreg[2]);
- write_debugreg(3, v->arch.debugreg[3]);
- write_debugreg(6, v->arch.debugreg[6]);
+ activate_debugregs(v);
+ break;
}
}
if ( v == curr )
#define DR_GLOBAL_EXACT_ENABLE (0x00000200ul) /* Global exact enable */
#define DR_GENERAL_DETECT (0x00002000ul) /* General detect enable */
+#define write_debugreg(reg, val) do { \
+ unsigned long __val = val; \
+ asm volatile ( "mov %0,%%db" #reg : : "r" (__val) ); \
+} while (0)
+#define read_debugreg(reg) ({ \
+ unsigned long __val; \
+ asm volatile ( "mov %%db" #reg ",%0" : "=r" (__val) ); \
+ __val; \
+})
+long set_debugreg(struct vcpu *, unsigned int reg, unsigned long value);
+void activate_debugregs(const struct vcpu *);
+
#endif /* _X86_DEBUGREG_H */
unsigned long shadow_ldt_mapcnt;
spinlock_t shadow_ldt_lock;
+ /* data breakpoint extension MSRs */
+ uint32_t dr_mask[4];
+
/* Deferred VA-based update state. */
bool_t need_update_runstate_area;
struct vcpu_time_info pending_system_time;
unsigned long *frames,
unsigned int entries);
-#define write_debugreg(reg, val) do { \
- unsigned long __val = val; \
- asm volatile ( "mov %0,%%db" #reg : : "r" (__val) ); \
-} while (0)
-#define read_debugreg(reg) ({ \
- unsigned long __val; \
- asm volatile ( "mov %%db" #reg ",%0" : "=r" (__val) ); \
- __val; \
-})
-long set_debugreg(struct vcpu *p, int reg, unsigned long value);
-
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static always_inline void rep_nop(void)
{
#include "grant_table.h"
#include "hvm/save.h"
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000009
+#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000a
/*
* NB. xen_domctl.domain is an IN/OUT parameter for this operation.
DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
+#if defined(__i386__) || defined(__x86_64__)
+struct xen_domctl_ext_vcpu_msr {
+ uint32_t index;
+ uint32_t reserved;
+ uint64_aligned_t value;
+};
+typedef struct xen_domctl_ext_vcpu_msr xen_domctl_ext_vcpu_msr_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpu_msr_t);
+#endif
+
/* XEN_DOMCTL_set_ext_vcpucontext */
/* XEN_DOMCTL_get_ext_vcpucontext */
struct xen_domctl_ext_vcpucontext {
uint16_t sysenter_callback_cs;
uint8_t syscall32_disables_events;
uint8_t sysenter_disables_events;
+ /*
+ * When, for the "get" version, msr_count is too small to cover all MSRs
+ * the hypervisor needs to be saved, the call will return -ENOBUFS and
+ * set msr_count to the required (minimum) value. Furthermore, for both
+ * "get" and "set", that field as well as the msrs one only get looked at
+ * if the size field above covers the structure up to the entire msrs one.
+ */
+ uint16_t msr_count;
#if defined(__GNUC__)
union {
uint64_aligned_t mcg_cap;
#else
struct hvm_vmce_vcpu vmce;
#endif
+ XEN_GUEST_HANDLE_64(xen_domctl_ext_vcpu_msr_t) msrs;
#endif
};
typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;