struct arch_domain *ad = &d->arch;
uint32_t capabilities = get_capabilities(d);
+ if ( current->domain == d ) /* no domain_pause() */
+ return -EPERM;
+
rc = xsm_vm_event_control(XSM_PRIV, d, mop->op, mop->event);
if ( rc )
return rc;
return 0;
case XEN_DOMCTL_MONITOR_OP_EMULATE_EACH_REP:
- d->arch.mem_access_emulate_each_rep = !!mop->event;
+ domain_pause(d);
+ ad->mem_access_emulate_each_rep = !!mop->event;
+ domain_unpause(d);
return 0;
}
if ( rc )
return rc;
+ domain_pause(d);
+
if ( mop->u.mov_to_cr.sync )
ad->monitor.write_ctrlreg_sync |= ctrlreg_bitmask;
else
else
ad->monitor.write_ctrlreg_onchangeonly &= ~ctrlreg_bitmask;
- domain_pause(d);
-
if ( !status )
ad->monitor.write_ctrlreg_enabled |= ctrlreg_bitmask;
else
ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;
- domain_unpause(d);
-
if ( mop->u.mov_to_cr.index == VM_EVENT_X86_CR3 )
/* Latches new CR3 mask through CR0 code */
for_each_vcpu ( d, v )
hvm_update_guest_cr(v, 0);
+ domain_unpause(d);
+
break;
}
if ( rc )
return rc;
+ if ( mop->op == XEN_DOMCTL_MONITOR_OP_ENABLE &&
+ mop->u.mov_to_msr.extended_capture &&
+ !hvm_enable_msr_exit_interception(d) )
+ return -EOPNOTSUPP;
+
+ domain_pause(d);
+
if ( mop->op == XEN_DOMCTL_MONITOR_OP_ENABLE &&
mop->u.mov_to_msr.extended_capture )
- {
- if ( hvm_enable_msr_exit_interception(d) )
ad->monitor.mov_to_msr_extended = 1;
- else
- return -EOPNOTSUPP;
- } else
+ else
ad->monitor.mov_to_msr_extended = 0;
- domain_pause(d);
ad->monitor.mov_to_msr_enabled = !status;
domain_unpause(d);
break;
if ( rc )
return rc;
- ad->monitor.guest_request_sync = mop->u.guest_request.sync;
-
domain_pause(d);
+ ad->monitor.guest_request_sync = mop->u.guest_request.sync;
ad->monitor.guest_request_enabled = !status;
domain_unpause(d);
break;
}
case XEN_DOMCTL_soft_reset:
- if ( d == current->domain )
+ if ( d == current->domain ) /* no domain_pause() */
{
ret = -EINVAL;
break;
#ifdef CONFIG_HAS_MEM_ACCESS
case XEN_DOMCTL_set_access_required:
- if ( unlikely(current->domain == d) )
+ if ( unlikely(current->domain == d) ) /* no domain_pause() */
ret = -EPERM;
else
+ {
+ domain_pause(d);
p2m_get_hostp2m(d)->access_required =
op->u.access_required.access_required;
+ domain_unpause(d);
+ }
break;
#endif
}
case XEN_DOMCTL_monitor_op:
- ret = -EPERM;
- if ( current->domain == d )
- break;
-
ret = monitor_domctl(d, &op->u.monitor_op);
if ( !ret )
copyback = 1;
if ( rc )
return rc;
- if ( unlikely(d == current->domain) )
+ if ( unlikely(d == current->domain) ) /* no domain_pause() */
{
gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
return -EINVAL;
if ( p2m->pod.entry_count )
break;
+ /* domain_pause() not required here, see XSA-99 */
rc = vm_event_enable(d, vec, ved, _VPF_mem_paging,
HVM_PARAM_PAGING_RING_PFN,
mem_paging_notification);
case XEN_VM_EVENT_DISABLE:
if ( ved->ring_page )
+ {
+ domain_pause(d);
rc = vm_event_disable(d, ved);
+ domain_unpause(d);
+ }
break;
case XEN_VM_EVENT_RESUME:
switch( vec->op )
{
case XEN_VM_EVENT_ENABLE:
+ /* domain_pause() not required here, see XSA-99 */
rc = vm_event_enable(d, vec, ved, _VPF_mem_access,
HVM_PARAM_MONITOR_RING_PFN,
monitor_notification);
case XEN_VM_EVENT_DISABLE:
if ( ved->ring_page )
+ {
+ domain_pause(d);
rc = vm_event_disable(d, ved);
+ domain_unpause(d);
+ }
break;
case XEN_VM_EVENT_RESUME:
if ( !hap_enabled(d) )
break;
+ /* domain_pause() not required here, see XSA-99 */
rc = vm_event_enable(d, vec, ved, _VPF_mem_sharing,
HVM_PARAM_SHARING_RING_PFN,
mem_sharing_notification);
case XEN_VM_EVENT_DISABLE:
if ( ved->ring_page )
+ {
+ domain_pause(d);
rc = vm_event_disable(d, ved);
+ domain_unpause(d);
+ }
break;
case XEN_VM_EVENT_RESUME: