struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
bool_t debug_state = v->domain->debugger_attached;
bool_t vcpu_guestmode = 0;
+ struct vlapic *vlapic = vcpu_vlapic(v);
if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
vcpu_guestmode = 1;
hvm_asid_flush_vcpu(v);
}
- if ( !vcpu_guestmode )
+ if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) )
{
vintr_t intr;
/* Reflect the vlapic's TPR in the hardware vtpr */
intr = vmcb_get_vintr(vmcb);
intr.fields.tpr =
- (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4;
+ (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
vmcb_set_vintr(vmcb, intr);
}
int inst_len, rc;
vintr_t intr;
bool_t vcpu_guestmode = 0;
+ struct vlapic *vlapic = vcpu_vlapic(v);
hvm_invalidate_regs_fields(regs);
* NB. We need to preserve the low bits of the TPR to make checked builds
* of Windows work, even though they don't actually do anything.
*/
- if ( !vcpu_guestmode ) {
+ if ( !vcpu_guestmode && !vlapic_hw_disabled(vlapic) )
+ {
intr = vmcb_get_vintr(vmcb);
- vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI,
+ vlapic_set_reg(vlapic, APIC_TASKPRI,
((intr.fields.tpr & 0x0F) << 4) |
- (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0x0F));
+ (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0x0F));
}
exit_reason = vmcb->exitcode;
}
out:
- if ( vcpu_guestmode )
- /* Don't clobber TPR of the nested guest. */
+ if ( vcpu_guestmode || vlapic_hw_disabled(vlapic) )
return;
/* The exit may have updated the TPR: reflect this in the hardware vtpr */
intr = vmcb_get_vintr(vmcb);
intr.fields.tpr =
- (vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI) & 0xFF) >> 4;
+ (vlapic_get_reg(vlapic, APIC_TASKPRI) & 0xFF) >> 4;
vmcb_set_vintr(vmcb, intr);
}
uint64_t guest_tsc;
struct vcpu *v = vlapic_vcpu(vlapic);
- /* may need to exclude some other conditions like vlapic->hw.disabled */
+ if ( vlapic_hw_disabled(vlapic) )
+ return;
+
if ( !vlapic_lvtt_tdt(vlapic) )
{
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "ignore tsc deadline msr write");
int vlapic_accept_pic_intr(struct vcpu *v)
{
+ if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
+ return 0;
+
TRACE_2D(TRC_HVM_EMUL_LAPIC_PIC_INTR,
(v == v->domain->arch.hvm_domain.i8259_target),
v ? __vlapic_accept_pic_intr(v) : -1);
{
paddr_t virt_page_ma, apic_page_ma;
- if ( !cpu_has_vmx_virtualize_apic_accesses )
+ if ( v->domain->arch.hvm_domain.vmx.apic_access_mfn == 0 )
return;
+ ASSERT(cpu_has_vmx_virtualize_apic_accesses);
+
virt_page_ma = page_to_maddr(vcpu_vlapic(v)->regs_page);
apic_page_ma = v->domain->arch.hvm_domain.vmx.apic_access_mfn;
apic_page_ma <<= PAGE_SHIFT;