void arch_domain_creation_finished(struct domain *d)
{
+ if ( is_hvm_domain(d) )
+ hvm_domain_creation_finished(d);
}
#define xen_vcpu_guest_context vcpu_guest_context
vmx_free_vlapic_mapping(d);
}
+static void domain_creation_finished(struct domain *d)
+{
+ if ( has_vlapic(d) && !mfn_eq(d->arch.hvm.vmx.apic_access_mfn, _mfn(0)) &&
+ set_mmio_p2m_entry(d, gaddr_to_gfn(APIC_DEFAULT_PHYS_BASE),
+ d->arch.hvm.vmx.apic_access_mfn, PAGE_ORDER_4K) )
+ domain_crash(d);
+}
+
static void vmx_init_ipt(struct vcpu *v)
{
unsigned int size = v->domain->vmtrace_size;
.cpu_dead = vmx_cpu_dead,
.domain_initialise = vmx_domain_initialise,
.domain_relinquish_resources = vmx_domain_relinquish_resources,
+ .domain_creation_finished = domain_creation_finished,
.vcpu_initialise = vmx_vcpu_initialise,
.vcpu_destroy = vmx_vcpu_destroy,
.save_cpu_ctxt = vmx_save_vmcs_ctxt,
clear_domain_page(mfn);
d->arch.hvm.vmx.apic_access_mfn = mfn;
- return set_mmio_p2m_entry(d, gaddr_to_gfn(APIC_DEFAULT_PHYS_BASE), mfn,
- PAGE_ORDER_4K);
+ return 0;
}
static void vmx_free_vlapic_mapping(struct domain *d)
* Initialise/destroy HVM domain/vcpu resources
*/
int (*domain_initialise)(struct domain *d);
+ void (*domain_creation_finished)(struct domain *d);
void (*domain_relinquish_resources)(struct domain *d);
void (*domain_destroy)(struct domain *d);
int (*vcpu_initialise)(struct vcpu *v);
return hvm_funcs.set_descriptor_access_exiting;
}
+static inline void hvm_domain_creation_finished(struct domain *d)
+{
+ if ( hvm_funcs.domain_creation_finished )
+ alternative_vcall(hvm_funcs.domain_creation_finished, d);
+}
+
static inline int
hvm_guest_x86_mode(struct vcpu *v)
{
ASSERT_UNREACHABLE();
}
+static inline void hvm_domain_creation_finished(struct domain *d)
+{
+ ASSERT_UNREACHABLE();
+}
+
/*
* Shadow code needs further cleanup to eliminate some HVM-only paths. For
* now provide the stubs here but assert they will never be reached.