}
#endif
+ xc_set_hvm_param(xc_handle, domid, HVM_PARAM_DM_DOMAIN, DOMID_SELF);
xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn);
shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE,
if ( op == HVMOP_set_param )
{
+ rc = 0;
+
switch ( a.index )
{
case HVM_PARAM_IOREQ_PFN:
iorp = &d->arch.hvm_domain.ioreq;
- rc = hvm_set_ioreq_page(d, iorp, a.value);
+ if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 )
+ break;
spin_lock(&iorp->lock);
- if ( (rc == 0) && (iorp->va != NULL) )
+ if ( iorp->va != NULL )
/* Initialise evtchn port info if VCPUs already created. */
for_each_vcpu ( d, v )
get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
hvm_latch_shinfo_size(d);
break;
case HVM_PARAM_TIMER_MODE:
- rc = -EINVAL;
if ( a.value > HVMPTM_one_missed_tick_pending )
- goto param_fail;
+ rc = -EINVAL;
break;
case HVM_PARAM_IDENT_PT:
rc = -EPERM;
- if ( current->domain->domain_id != 0 )
- goto param_fail;
+ if ( !IS_PRIV(current->domain) )
+ break;
rc = -EINVAL;
if ( d->arch.hvm_domain.params[a.index] != 0 )
- goto param_fail;
+ break;
+ rc = 0;
if ( !paging_mode_hap(d) )
break;
paging_update_cr3(v);
spin_unlock(&domctl_lock);
+ domain_unpause(d);
+ break;
+ case HVM_PARAM_DM_DOMAIN:
+ /* Privileged domains only, as we must domain_pause(d). */
+ rc = -EPERM;
+ if ( !IS_PRIV_FOR(current->domain, d) )
+ break;
+
+ if ( a.value == DOMID_SELF )
+ a.value = current->domain->domain_id;
+
+ rc = 0;
+ domain_pause(d); /* safe to change per-vcpu xen_port */
+ iorp = &d->arch.hvm_domain.ioreq;
+ for_each_vcpu ( d, v )
+ {
+ int old_port, new_port;
+ new_port = alloc_unbound_xen_event_channel(v, a.value);
+ if ( new_port < 0 )
+ {
+ rc = new_port;
+ break;
+ }
+ /* xchg() ensures that only we free_xen_event_channel() */
+ old_port = xchg(&v->arch.hvm_vcpu.xen_port, new_port);
+ free_xen_event_channel(v, old_port);
+ spin_lock(&iorp->lock);
+ if ( iorp->va != NULL )
+ get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
+ spin_unlock(&iorp->lock);
+ }
domain_unpause(d);
break;
}
- d->arch.hvm_domain.params[a.index] = a.value;
- rc = 0;
+
+ if ( rc == 0 )
+ d->arch.hvm_domain.params[a.index] = a.value;
}
else
{
if ( xsm_alloc_security_evtchn(&chn[i]) )
{
for ( j = 0; j < i; j++ )
- {
xsm_free_security_evtchn(&chn[j]);
- }
xfree(chn);
return -ENOMEM;
}
struct domain *d = local_vcpu->domain;
spin_lock(&d->evtchn_lock);
+
+ if ( unlikely(d->is_dying) )
+ {
+ spin_unlock(&d->evtchn_lock);
+ return;
+ }
+
+ BUG_ON(!port_is_valid(d, port));
chn = evtchn_from_port(d, port);
BUG_ON(!chn->consumer_is_xen);
chn->consumer_is_xen = 0;
+
spin_unlock(&d->evtchn_lock);
(void)__evtchn_close(d, port);
{
xsm_free_security_evtchn(d->evtchn[i]);
xfree(d->evtchn[i]);
+ d->evtchn[i] = NULL;
}
spin_unlock(&d->evtchn_lock);
}
/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
#define HVM_PARAM_HPET_ENABLED 11
+
+/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
#define HVM_PARAM_IDENT_PT 12
-#define HVM_NR_PARAMS 13
+/* Device Model domain, defaults to 0. */
+#define HVM_PARAM_DM_DOMAIN 13
+
+#define HVM_NR_PARAMS 14
#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */