return -EINVAL;
}
+ if ( !(config->flags & XEN_DOMCTL_CDF_hvm_guest) )
+ /*
+ * It is only meaningful for XEN_DOMCTL_CDF_oos_off to be clear
+ * for HVM guests.
+ */
+ config->flags |= XEN_DOMCTL_CDF_oos_off;
+
return 0;
}
/* Set up the shadow-specific parts of a domain struct at start of day.
* Called for every domain from arch_domain_create() */
-int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
+int shadow_domain_init(struct domain *d)
{
static const struct log_dirty_ops sh_ops = {
.enable = sh_enable_log_dirty,
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
d->arch.paging.shadow.oos_active = 0;
- d->arch.paging.shadow.oos_off = domcr_flags & XEN_DOMCTL_CDF_oos_off;
#endif
d->arch.paging.shadow.pagetable_dying_op = 0;
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* We need to check that all the vcpus have paging enabled to
* unsync PTs. */
- if ( is_hvm_domain(d) && !d->arch.paging.shadow.oos_off )
+ if ( !(d->options & XEN_DOMCTL_CDF_oos_off) )
{
int pe = 1;
struct vcpu *vptr;
+ ASSERT(is_hvm_domain(d));
+
for_each_vcpu(d, vptr)
{
if ( !hvm_paging_enabled(vptr) )
ASSERT(is_pv_domain(d));
}
-int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
+int shadow_domain_init(struct domain *d)
{
static const struct log_dirty_ops sh_none_ops = {
.enable = _enable_log_dirty,
/* Set up the shadow-specific parts of a domain struct at start of day.
* Called from paging_domain_init(). */
-int shadow_domain_init(struct domain *d, unsigned int domcr_flags);
+int shadow_domain_init(struct domain *d);
/* Setup the shadow-specific parts of a vcpu struct. It is called by
* paging_vcpu_init() in paging.c */