/*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
}
-
-void hvm_vcpu_reset(struct vcpu *v)
-{
- vcpu_pause(v);
-
- vlapic_reset(vcpu_vlapic(v));
-
- hvm_funcs.vcpu_initialise(v);
-
- set_bit(_VPF_down, &v->pause_flags);
- clear_bit(_VPF_blocked, &v->pause_flags);
- v->fpu_initialised = 0;
- v->fpu_dirtied = 0;
- v->is_initialised = 0;
-
- vcpu_unpause(v);
-}
-
static void hvm_vcpu_down(void)
{
struct vcpu *v = current;
hvm_funcs.init_hypercall_page(d, hypercall_page);
}
-int hvm_bringup_ap(int vcpuid, int trampoline_vector)
-{
- struct domain *d = current->domain;
- struct vcpu *v;
- struct vcpu_guest_context *ctxt;
- struct segment_register reg;
-
- ASSERT(is_hvm_domain(d));
-
- if ( (v = d->vcpu[vcpuid]) == NULL )
- return -ENOENT;
-
- v->fpu_initialised = 0;
- v->arch.flags |= TF_kernel_mode;
- v->is_initialised = 1;
-
- ctxt = &v->arch.guest_context;
- memset(ctxt, 0, sizeof(*ctxt));
- ctxt->flags = VGCF_online;
- ctxt->user_regs.eflags = 2;
-
- v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
- hvm_update_guest_cr(v, 0);
-
- v->arch.hvm_vcpu.guest_cr[2] = 0;
- hvm_update_guest_cr(v, 2);
-
- v->arch.hvm_vcpu.guest_cr[3] = 0;
- hvm_update_guest_cr(v, 3);
-
- v->arch.hvm_vcpu.guest_cr[4] = 0;
- hvm_update_guest_cr(v, 4);
-
- v->arch.hvm_vcpu.guest_efer = 0;
- hvm_update_guest_efer(v);
-
- reg.sel = trampoline_vector << 8;
- reg.base = (uint32_t)reg.sel << 4;
- reg.limit = 0xffff;
- reg.attr.bytes = 0x89b;
- hvm_set_segment_register(v, x86_seg_cs, ®);
-
- reg.sel = reg.base = 0;
- reg.limit = 0xffff;
- reg.attr.bytes = 0x893;
- hvm_set_segment_register(v, x86_seg_ds, ®);
- hvm_set_segment_register(v, x86_seg_es, ®);
- hvm_set_segment_register(v, x86_seg_fs, ®);
- hvm_set_segment_register(v, x86_seg_gs, ®);
- hvm_set_segment_register(v, x86_seg_ss, ®);
-
- reg.attr.bytes = 0x82; /* LDT */
- hvm_set_segment_register(v, x86_seg_ldtr, ®);
-
- reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
- hvm_set_segment_register(v, x86_seg_tr, ®);
-
- reg.attr.bytes = 0;
- hvm_set_segment_register(v, x86_seg_gdtr, ®);
- hvm_set_segment_register(v, x86_seg_idtr, ®);
-
- /* Sync AP's TSC with BSP's. */
- v->arch.hvm_vcpu.cache_tsc_offset =
- v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
- hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
-
- if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
- vcpu_wake(v);
-
- gdprintk(XENLOG_INFO, "AP %d bringup succeeded.\n", vcpuid);
- return 0;
-}
-
static int hvmop_set_pci_intx_level(
XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
{
return result;
}
+static int vlapic_vcpu_pause_async(struct vcpu *v)
+{
+ vcpu_pause_nosync(v);
+
+ if ( v->is_running )
+ {
+ vcpu_unpause(v);
+ return 0;
+ }
+
+ sync_vcpu_execstate(v);
+ return 1;
+}
+
+static void vlapic_init_action(unsigned long _vcpu)
+{
+ struct vcpu *v = (struct vcpu *)_vcpu;
+ struct domain *d = v->domain;
+
+ /* If the VCPU is not on its way down we have nothing to do. */
+ if ( !test_bit(_VPF_down, &v->pause_flags) )
+ return;
+
+ if ( !vlapic_vcpu_pause_async(v) )
+ {
+ tasklet_schedule(&vcpu_vlapic(v)->init_tasklet);
+ return;
+ }
+
+ domain_lock(d);
+
+ /* Paranoia makes us re-assert VPF_down under the domain lock. */
+ set_bit(_VPF_down, &v->pause_flags);
+ v->is_initialised = 0;
+ clear_bit(_VPF_blocked, &v->pause_flags);
+
+ vlapic_reset(vcpu_vlapic(v));
+
+ domain_unlock(d);
+
+ vcpu_unpause(v);
+}
+
+static int vlapic_accept_init(struct vcpu *v)
+{
+ /* Nothing to do if the VCPU is already reset. */
+ if ( !v->is_initialised )
+ return X86EMUL_OKAY;
+
+ /* Asynchronously take the VCPU down and schedule reset work. */
+ set_bit(_VPF_down, &v->pause_flags);
+ vcpu_sleep_nosync(v);
+ tasklet_schedule(&vcpu_vlapic(v)->init_tasklet);
+ return X86EMUL_RETRY;
+}
+
+static int vlapic_accept_sipi(struct vcpu *v, int trampoline_vector)
+{
+ struct domain *d = current->domain;
+ struct vcpu_guest_context *ctxt;
+ struct segment_register reg;
+
+ /* If the VCPU is not on its way down we have nothing to do. */
+ if ( !test_bit(_VPF_down, &v->pause_flags) )
+ return X86EMUL_OKAY;
+
+ if ( !vlapic_vcpu_pause_async(v) )
+ return X86EMUL_RETRY;
+
+ domain_lock(d);
+
+ if ( v->is_initialised )
+ goto out;
+
+ ctxt = &v->arch.guest_context;
+ memset(ctxt, 0, sizeof(*ctxt));
+ ctxt->flags = VGCF_online;
+ ctxt->user_regs.eflags = 2;
+
+ v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
+ hvm_update_guest_cr(v, 0);
+
+ v->arch.hvm_vcpu.guest_cr[2] = 0;
+ hvm_update_guest_cr(v, 2);
+
+ v->arch.hvm_vcpu.guest_cr[3] = 0;
+ hvm_update_guest_cr(v, 3);
+
+ v->arch.hvm_vcpu.guest_cr[4] = 0;
+ hvm_update_guest_cr(v, 4);
+
+ v->arch.hvm_vcpu.guest_efer = 0;
+ hvm_update_guest_efer(v);
+
+ reg.sel = trampoline_vector << 8;
+ reg.base = (uint32_t)reg.sel << 4;
+ reg.limit = 0xffff;
+ reg.attr.bytes = 0x89b;
+ hvm_set_segment_register(v, x86_seg_cs, ®);
+
+ reg.sel = reg.base = 0;
+ reg.limit = 0xffff;
+ reg.attr.bytes = 0x893;
+ hvm_set_segment_register(v, x86_seg_ds, ®);
+ hvm_set_segment_register(v, x86_seg_es, ®);
+ hvm_set_segment_register(v, x86_seg_fs, ®);
+ hvm_set_segment_register(v, x86_seg_gs, ®);
+ hvm_set_segment_register(v, x86_seg_ss, ®);
+
+ reg.attr.bytes = 0x82; /* LDT */
+ hvm_set_segment_register(v, x86_seg_ldtr, ®);
+
+ reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
+ hvm_set_segment_register(v, x86_seg_tr, ®);
+
+ reg.attr.bytes = 0;
+ hvm_set_segment_register(v, x86_seg_gdtr, ®);
+ hvm_set_segment_register(v, x86_seg_idtr, ®);
+
+ /* Sync AP's TSC with BSP's. */
+ v->arch.hvm_vcpu.cache_tsc_offset =
+ v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
+ hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
+
+ v->arch.flags |= TF_kernel_mode;
+ v->is_initialised = 1;
+ clear_bit(_VPF_down, &v->pause_flags);
+
+ out:
+ domain_unlock(d);
+ vcpu_unpause(v);
+ return X86EMUL_OKAY;
+}
+
/* Add a pending IRQ into lapic. */
static int vlapic_accept_irq(struct vcpu *v, int delivery_mode,
int vector, int level, int trig_mode)
{
- int result = 0;
struct vlapic *vlapic = vcpu_vlapic(v);
+ int rc = X86EMUL_OKAY;
switch ( delivery_mode )
{
}
vcpu_kick(v);
-
- result = 1;
break;
case APIC_DM_REMRD:
/* No work on INIT de-assert for P4-type APIC. */
if ( trig_mode && !(level & APIC_INT_ASSERT) )
break;
- /* FIXME How to check the situation after vcpu reset? */
- if ( v->is_initialised )
- hvm_vcpu_reset(v);
- v->arch.hvm_vcpu.init_sipi_sipi_state =
- HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI;
- result = 1;
+ rc = vlapic_accept_init(v);
break;
case APIC_DM_STARTUP:
- if ( v->arch.hvm_vcpu.init_sipi_sipi_state ==
- HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM )
- break;
-
- v->arch.hvm_vcpu.init_sipi_sipi_state =
- HVM_VCPU_INIT_SIPI_SIPI_STATE_NORM;
-
- if ( v->is_initialised )
- {
- gdprintk(XENLOG_ERR, "SIPI for initialized vcpu %x\n", v->vcpu_id);
- goto exit_and_crash;
- }
-
- if ( hvm_bringup_ap(v->vcpu_id, vector) != 0 )
- result = 0;
+ rc = vlapic_accept_sipi(v, vector);
break;
default:
gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode %x\n",
delivery_mode);
- goto exit_and_crash;
+ domain_crash(v->domain);
}
- return result;
-
- exit_and_crash:
- domain_crash(v->domain);
- return 0;
+ return rc;
}
/* This function is used by both ioapic and lapic.The bitmap is for vcpu_id. */
vioapic_update_EOI(vlapic_domain(vlapic), vector);
}
-static void vlapic_ipi(struct vlapic *vlapic)
+static int vlapic_ipi(
+ struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high)
{
- uint32_t icr_low = vlapic_get_reg(vlapic, APIC_ICR);
- uint32_t icr_high = vlapic_get_reg(vlapic, APIC_ICR2);
-
unsigned int dest = GET_APIC_DEST_FIELD(icr_high);
unsigned int short_hand = icr_low & APIC_SHORT_MASK;
unsigned int trig_mode = icr_low & APIC_INT_LEVELTRIG;
struct vlapic *target;
struct vcpu *v;
uint32_t lpr_map = 0;
+ int rc = X86EMUL_OKAY;
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr_high 0x%x, icr_low 0x%x, "
"short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
if ( delivery_mode == APIC_DM_LOWEST )
__set_bit(v->vcpu_id, &lpr_map);
else
- vlapic_accept_irq(v, delivery_mode,
- vector, level, trig_mode);
+ rc = vlapic_accept_irq(v, delivery_mode,
+ vector, level, trig_mode);
}
+
+ if ( rc != X86EMUL_OKAY )
+ break;
}
if ( delivery_mode == APIC_DM_LOWEST )
{
target = apic_round_robin(vlapic_domain(v), vector, lpr_map);
if ( target != NULL )
- vlapic_accept_irq(vlapic_vcpu(target), delivery_mode,
- vector, level, trig_mode);
+ rc = vlapic_accept_irq(vlapic_vcpu(target), delivery_mode,
+ vector, level, trig_mode);
}
+
+ return rc;
}
static uint32_t vlapic_get_tmcct(struct vlapic *vlapic)
{
struct vlapic *vlapic = vcpu_vlapic(v);
unsigned int offset = address - vlapic_base_address(vlapic);
+ int rc = X86EMUL_OKAY;
if ( offset != 0xb0 )
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
break;
case APIC_ICR:
- /* No delay here, so we always clear the pending bit*/
- vlapic_set_reg(vlapic, APIC_ICR, val & ~(1 << 12));
- vlapic_ipi(vlapic);
+ val &= ~(1 << 12); /* always clear the pending bit */
+ rc = vlapic_ipi(vlapic, val, vlapic_get_reg(vlapic, APIC_ICR2));
+ if ( rc == X86EMUL_OKAY )
+ vlapic_set_reg(vlapic, APIC_ICR, val);
break;
case APIC_ICR2:
break;
}
- return X86EMUL_OKAY;
+ return rc;
unaligned_exit_and_crash:
gdprintk(XENLOG_ERR, "Unaligned LAPIC write len=0x%lx at offset=0x%x.\n",
len, offset);
exit_and_crash:
domain_crash(v->domain);
- return X86EMUL_OKAY;
+ return rc;
}
static int vlapic_range(struct vcpu *v, unsigned long addr)
if ( v->vcpu_id == 0 )
vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP;
+ tasklet_init(&vlapic->init_tasklet, vlapic_init_action, (unsigned long)v);
+
return 0;
}
{
struct vlapic *vlapic = vcpu_vlapic(v);
+ tasklet_kill(&vlapic->init_tasklet);
destroy_periodic_time(&vlapic->pt);
unmap_domain_page_global(vlapic->regs);
free_domheap_page(vlapic->regs_page);