vlapic_EOI_set(vcpu_vlapic(v));
break;
- case HV_X64_MSR_ICR: {
- u32 eax = (u32)val, edx = (u32)(val >> 32);
- struct vlapic *vlapic = vcpu_vlapic(v);
- eax &= ~(1 << 12);
- edx &= 0xff000000;
- vlapic_set_reg(vlapic, APIC_ICR2, edx);
- vlapic_ipi(vlapic, eax, edx);
- vlapic_set_reg(vlapic, APIC_ICR, eax);
+ case HV_X64_MSR_ICR:
+ vlapic_reg_write(v, APIC_ICR2, val >> 32);
+ vlapic_reg_write(v, APIC_ICR, val);
break;
- }
+
case HV_X64_MSR_TPR:
- vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI, (uint8_t)val);
+ vlapic_reg_write(v, APIC_TASKPRI, val);
break;
case HV_X64_MSR_VP_ASSIST_PAGE:
}
}
-static void vlapic_reg_write(struct vcpu *v,
- unsigned int offset, uint32_t val)
+void vlapic_reg_write(struct vcpu *v, unsigned int reg, uint32_t val)
{
struct vlapic *vlapic = vcpu_vlapic(v);
memset(&vlapic->loaded, 0, sizeof(vlapic->loaded));
- switch ( offset )
+ switch ( reg )
{
case APIC_ID:
vlapic_set_reg(vlapic, APIC_ID, val);
case APIC_LVTERR: /* LVT Error Reg */
if ( vlapic_sw_disabled(vlapic) )
val |= APIC_LVT_MASKED;
- val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
- vlapic_set_reg(vlapic, offset, val);
- if ( offset == APIC_LVT0 )
+ val &= vlapic_lvt_mask[(reg - APIC_LVTT) >> 4];
+ vlapic_set_reg(vlapic, reg, val);
+ if ( reg == APIC_LVT0 )
{
vlapic_adjust_i8259_target(v->domain);
pt_may_unmask_irq(v->domain, NULL);
}
- if ( (offset == APIC_LVTT) && !(val & APIC_LVT_MASKED) )
+ if ( (reg == APIC_LVTT) && !(val & APIC_LVT_MASKED) )
pt_may_unmask_irq(NULL, &vlapic->pt);
- if ( offset == APIC_LVTPC )
+ if ( reg == APIC_LVTPC )
vpmu_lvtpc_update(val);
break;
*((uint32_t *)(&vlapic->regs->data[reg])) = val;
}
+void vlapic_reg_write(struct vcpu *v, unsigned int reg, uint32_t val);
+
bool_t is_vlapic_lvtpc_enabled(struct vlapic *vlapic);
bool vlapic_test_irq(const struct vlapic *vlapic, uint8_t vec);