bool_t vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
{
+ if ( !has_vlapic(vlapic_domain(vlapic)) )
+ return 0;
+
if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE )
{
if ( unlikely(value & MSR_IA32_APICBASE_EXTD) )
struct vcpu *v = vlapic_vcpu(vlapic);
int i;
+ if ( !has_vlapic(v->domain) )
+ return;
+
vlapic_set_reg(vlapic, APIC_ID, (v->vcpu_id * 2) << 24);
vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
struct vlapic *s;
int rc = 0;
+ if ( !has_vlapic(d) )
+ return 0;
+
for_each_vcpu ( d, v )
{
s = vcpu_vlapic(v);
struct vlapic *s;
int rc = 0;
+ if ( !has_vlapic(d) )
+ return 0;
+
for_each_vcpu ( d, v )
{
if ( hvm_funcs.sync_pir_to_irr )
uint16_t vcpuid;
struct vcpu *v;
struct vlapic *s;
-
+
+ if ( !has_vlapic(d) )
+ return -ENODEV;
+
/* Which vlapic to load? */
vcpuid = hvm_load_instance(h);
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
uint16_t vcpuid;
struct vcpu *v;
struct vlapic *s;
-
+
+ if ( !has_vlapic(d) )
+ return -ENODEV;
+
/* Which vlapic to load? */
vcpuid = hvm_load_instance(h);
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
- if ( is_pvh_vcpu(v) )
+ if ( !has_vlapic(v->domain) )
{
vlapic->hw.disabled = VLAPIC_HW_DISABLED;
return 0;
{
struct vlapic *vlapic = vcpu_vlapic(v);
+ if ( !has_vlapic(v->domain) )
+ return;
+
tasklet_kill(&vlapic->init_sipi.tasklet);
TRACE_0D(TRC_HVM_EMUL_LAPIC_STOP_TIMER);
destroy_periodic_time(&vlapic->pt);
ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
+ if ( !has_vlapic(d) )
+ return -ENODEV;
+
/*
* xmalloc() with irq_disabled causes the failure of check_lock()
* for xenpool->lock. So we allocate an entry beforehand.
ASSERT(spin_is_locked(&pcidevs_lock));
ASSERT(spin_is_locked(&d->event_lock));
+ if ( !has_vlapic(d) )
+ return;
+
irq_desc = pirq_spin_lock_irq_desc(pirq, NULL);
if ( !irq_desc )
return;
void msixtbl_init(struct domain *d)
{
+ if ( !has_vlapic(d) )
+ return;
+
INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
struct msixtbl_entry *entry, *temp;
unsigned long flags;
+ if ( !has_vlapic(d) )
+ return;
+
/* msixtbl_list_lock must be acquired with irq_disabled for check_lock() */
local_irq_save(flags);
spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
~(SECONDARY_EXEC_ENABLE_VM_FUNCTIONS |
SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS);
- if ( is_pvh_domain(d) )
+ if ( !has_vlapic(d) )
{
/* Disable virtual apics, TPR */
v->arch.hvm_vmx.secondary_exec_control &=
/* In turn, disable posted interrupts. */
__vmwrite(PIN_BASED_VM_EXEC_CONTROL,
vmx_pin_based_exec_control & ~PIN_BASED_POSTED_INTERRUPT);
+ }
+ if ( is_pvh_domain(d) )
+ {
/* Unrestricted guest (real mode for EPT) */
v->arch.hvm_vmx.secondary_exec_control &=
~SECONDARY_EXEC_UNRESTRICTED_GUEST;
{
int rc;
+ if ( !has_vlapic(d) )
+ return 0;
+
if ( (rc = vmx_alloc_vlapic_mapping(d)) != 0 )
return rc;
static void vmx_domain_destroy(struct domain *d)
{
+ if ( !has_vlapic(d) )
+ return;
+
vmx_free_vlapic_mapping(d);
}