if ( (is_hvm_domain(d) ||
pv_trap_callback_registered(v, TRAP_machine_check)) &&
- !test_and_set_bool(v->mce_pending) )
+ !test_and_set_bool(v->arch.mce_pending) )
{
mce_printk(MCE_VERBOSE, "MCE: inject vMCE to %pv\n", v);
vcpu_kick(v);
vlapic_set_irq(vlapic, vlapic_lvtpc & APIC_VECTOR_MASK, 0);
break;
case APIC_MODE_NMI:
- sampling->nmi_pending = 1;
+ sampling->arch.nmi_pending = true;
break;
}
#endif
int arch_vcpu_reset(struct vcpu *v)
{
+ v->arch.async_exception_mask = 0;
+ memset(v->arch.async_exception_state, 0,
+ sizeof(v->arch.async_exception_state));
+
if ( is_pv_vcpu(v) )
{
pv_destroy_gdt(v);
switch ( cmd )
{
+ case VCPUOP_send_nmi:
+ if ( !guest_handle_is_null(arg) )
+ return -EINVAL;
+
+ if ( !test_and_set_bool(v->arch.nmi_pending) )
+ vcpu_kick(v);
+ break;
+
case VCPUOP_register_vcpu_time_memory_area:
{
struct vcpu_register_time_memory_area area;
{
case XEN_DOMCTL_SENDTRIGGER_NMI:
ret = 0;
- if ( !test_and_set_bool(v->nmi_pending) )
+ if ( !test_and_set_bool(v->arch.nmi_pending) )
vcpu_kick(v);
break;
*/
vlapic_sync_pir_to_irr(v);
- if ( unlikely(v->nmi_pending) )
+ if ( unlikely(v->arch.nmi_pending) )
return hvm_intack_nmi;
- if ( unlikely(v->mce_pending) )
+ if ( unlikely(v->arch.mce_pending) )
return hvm_intack_mce;
if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector)
switch ( intack.source )
{
case hvm_intsrc_nmi:
- if ( !test_and_clear_bool(v->nmi_pending) )
+ if ( !test_and_clear_bool(v->arch.nmi_pending) )
intack = hvm_intack_none;
break;
case hvm_intsrc_mce:
- if ( !test_and_clear_bool(v->mce_pending) )
+ if ( !test_and_clear_bool(v->arch.mce_pending) )
intack = hvm_intack_none;
break;
case hvm_intsrc_pic:
for_each_vcpu ( d, v )
if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
0, dest, dest_mode) &&
- !test_and_set_bool(v->nmi_pending) )
+ !test_and_set_bool(v->arch.nmi_pending) )
vcpu_kick(v);
break;
}
break;
case APIC_DM_NMI:
- if ( !test_and_set_bool(v->nmi_pending) )
+ if ( !test_and_set_bool(v->arch.nmi_pending) )
{
bool_t wake = 0;
domain_lock(v->domain);
if ( !hardware_domain || !(v = domain_vcpu(hardware_domain, 0)) )
return;
- pend = v->nmi_pending;
- mask = v->async_exception_mask & (1 << VCPU_TRAP_NMI);
+ pend = v->arch.nmi_pending;
+ mask = v->arch.async_exception_mask & (1 << VCPU_TRAP_NMI);
if ( pend || mask )
printk("%pv: NMI%s%s\n",
v, pend ? " pending" : "", mask ? " masked" : "");
send_guest_vcpu_virq(current, VIRQ_XENOPROF);
if ( ovf == 2 )
- current->nmi_pending = 1;
+ current->arch.nmi_pending = true;
return 1;
}
* now.
*/
if ( curr->vcpu_id == 0 && arch_get_nmi_reason(d) != 0 )
- curr->nmi_pending = 1;
+ curr->arch.nmi_pending = true;
return 0;
}
{
unsigned int trap;
- if ( !curr->async_exception_mask )
+ if ( !curr->arch.async_exception_mask )
return;
- if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
- trap = __scanbit(curr->async_exception_mask, VCPU_TRAP_NONE);
+ if ( !(curr->arch.async_exception_mask & (curr->arch.async_exception_mask - 1)) )
+ trap = __scanbit(curr->arch.async_exception_mask, VCPU_TRAP_NONE);
else
for ( trap = VCPU_TRAP_NONE + 1; trap <= VCPU_TRAP_LAST; ++trap )
- if ( (curr->async_exception_mask ^
- curr->async_exception_state(trap).old_mask) == (1u << trap) )
+ if ( (curr->arch.async_exception_mask ^
+ curr->arch.async_exception_state(trap).old_mask) == (1u << trap) )
break;
if ( unlikely(trap > VCPU_TRAP_LAST) )
{
}
/* Restore previous asynchronous exception mask. */
- curr->async_exception_mask = curr->async_exception_state(trap).old_mask;
+ curr->arch.async_exception_mask =
+ curr->arch.async_exception_state(trap).old_mask;
}
unsigned long do_iret(void)
if ( cmpxchgptr(v_ptr, NULL, v) )
return -EBUSY;
- if ( !test_and_set_bool(v->nmi_pending) )
+ if ( !test_and_set_bool(v->arch.nmi_pending) )
{
/* Not safe to wake up a vcpu here */
raise_softirq(NMI_SOFTIRQ);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.pv.vgc_flags);
OFFSET(VCPU_cr3, struct vcpu, arch.cr3);
OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs);
- OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
- OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
- OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
- OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
- OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
+ OFFSET(VCPU_nmi_pending, struct vcpu, arch.nmi_pending);
+ OFFSET(VCPU_mce_pending, struct vcpu, arch.mce_pending);
+ OFFSET(VCPU_nmi_old_mask, struct vcpu, arch.nmi_state.old_mask);
+ OFFSET(VCPU_mce_old_mask, struct vcpu, arch.mce_state.old_mask);
+ OFFSET(VCPU_async_exception_mask, struct vcpu, arch.async_exception_mask);
DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events);
v->fpu_initialised = 0;
v->fpu_dirtied = 0;
v->is_initialised = 0;
-#ifdef VCPU_TRAP_LAST
- v->async_exception_mask = 0;
- memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
-#endif
if ( v->affinity_broken & VCPU_AFFINITY_OVERRIDE )
vcpu_temporary_affinity(v, NR_CPUS, VCPU_AFFINITY_OVERRIDE);
if ( v->affinity_broken & VCPU_AFFINITY_WAIT )
break;
}
-#ifdef VCPU_TRAP_NMI
- case VCPUOP_send_nmi:
- if ( !guest_handle_is_null(arg) )
- return -EINVAL;
-
- if ( !test_and_set_bool(v->nmi_pending) )
- vcpu_kick(v);
-
- break;
-#endif
-
default:
rc = arch_do_vcpu_op(cmd, v, arg);
break;
#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
#define is_domain_direct_mapped(d) ((void)(d), 0)
+#define VCPU_TRAP_NONE 0
#define VCPU_TRAP_NMI 1
#define VCPU_TRAP_MCE 2
#define VCPU_TRAP_LAST VCPU_TRAP_MCE
struct vpmu_struct vpmu;
+ struct {
+ bool pending;
+ uint8_t old_mask;
+ } async_exception_state[VCPU_TRAP_LAST];
+#define async_exception_state(t) async_exception_state[(t)-1]
+ uint8_t async_exception_mask;
+
/* Virtual Machine Extensions */
union {
struct pv_vcpu pv;
bool is_urgent;
/* VCPU must context_switch without scheduling unit. */
bool force_context_switch;
-
-#ifdef VCPU_TRAP_LAST
-#define VCPU_TRAP_NONE 0
- struct {
- bool pending;
- uint8_t old_mask;
- } async_exception_state[VCPU_TRAP_LAST];
-#define async_exception_state(t) async_exception_state[(t)-1]
- uint8_t async_exception_mask;
-#endif
-
/* Require shutdown to be deferred for some asynchronous operation? */
bool defer_shutdown;
/* VCPU is paused following shutdown request (d->is_shutting_down)? */