.bitmap = IOBMP_INVALID_OFFSET,
};
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
_set_tssldt_desc(
gdt + TSS_ENTRY,
(unsigned long)tss,
offsetof(struct tss_struct, __cacheline_filler) - 1,
SYS_DESC_tss_avail);
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
_set_tssldt_desc(
compat_gdt + TSS_ENTRY,
(unsigned long)tss,
offsetof(struct tss_struct, __cacheline_filler) - 1,
SYS_DESC_tss_busy);
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
lgdt(&gdtr);
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
lidt(&idtr);
ltr(TSS_ENTRY << 3);
lldt(0);
switch ( action )
{
case CPU_UP_PREPARE:
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
rc = cpu_bank_alloc(cpu);
break;
switch (action) {
case CPU_UP_PREPARE:
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
per_cpu(cpu_2_logical_apicid, cpu) = BAD_APICID;
if ( !cluster_cpus_spare )
cluster_cpus_spare = xzalloc(cpumask_t);
switch ( action )
{
case CPU_UP_PREPARE:
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
rc = hvm_funcs.cpu_up_prepare(cpu);
break;
case CPU_DYING:
switch ( action )
{
case CPU_UP_PREPARE:
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
init_timer(&per_cpu(nmi_timer, cpu), nmi_timer_fn, NULL, cpu);
set_timer(&per_cpu(nmi_timer, cpu), NOW());
break;
unsigned int cpu = info->cpu;
char *p = __per_cpu_start + __per_cpu_offset[cpu];
+ printk(" XXX freeing %d per-cpu area\n", cpu);
free_xenheap_pages(p, PERCPU_ORDER);
__per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
}
switch ( action )
{
case CPU_UP_PREPARE:
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
rc = init_percpu_area(cpu);
break;
case CPU_UP_CANCELED:
switch ( action )
{
case CPU_UP_PREPARE:
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
rc = psr_cpu_prepare();
break;
case CPU_STARTING:
}
}
+#undef Dprintk
+#define Dprintk printk
+
static void smp_callin(void)
{
unsigned int cpu = smp_processor_id();
*/
unsigned int cpu = booting_cpu;
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
/* Critical region without IDT or TSS. Any fault is deadly! */
+ printk(" XXX cpuinfo %p\n", get_cpu_info());
+
set_processor_id(cpu);
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
set_current(idle_vcpu[cpu]);
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
this_cpu(curr_vcpu) = idle_vcpu[cpu];
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
rdmsrl(MSR_EFER, this_cpu(efer));
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
init_shadow_spec_ctrl_state();
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
/*
* Just as during early bootstrap, it is convenient here to disable
* visible in cpu_online_map. Hence such a deadlock is not possible.
*/
spin_debug_disable();
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
get_cpu_info()->use_pv_cr3 = false;
get_cpu_info()->xen_cr3 = 0;
get_cpu_info()->pv_cr3 = 0;
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
+
load_system_tables();
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
/* Full exception support from here on in. */
(linear >= VMAP_VIRT_END && linear < XEN_VIRT_START) ||
(linear >= XEN_VIRT_END && linear < DIRECTMAP_VIRT_START) )
{
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
rc = -EINVAL;
goto out;
}
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
+
pl3e = map_xen_pagetable(
l4e_get_mfn(idle_pg_table[root_table_offset(linear)]));
pl3e += l3_table_offset(linear);
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
+
flags = l3e_get_flags(*pl3e);
ASSERT(flags & _PAGE_PRESENT);
if ( flags & _PAGE_PSE )
UNMAP_XEN_PAGETABLE(pl1e);
UNMAP_XEN_PAGETABLE(pl2e);
UNMAP_XEN_PAGETABLE(pl3e);
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
if ( !(root_get_flags(rpt[root_table_offset(linear)]) & _PAGE_PRESENT) )
{
ASSERT(!(l3e_get_flags(*pl3e) & _PAGE_PSE));
pl2e = map_xen_pagetable(l3e_get_mfn(*pl3e));
}
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
pl2e += l2_table_offset(linear);
}
else
l1e_write(pl1e, l1e_from_pfn(pfn, flags));
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
rc = 0;
out:
/* Install direct map page table entries for stack, IDT, and TSS. */
for ( off = rc = 0; !rc && off < STACK_SIZE; off += PAGE_SIZE )
if ( !memguard_is_stack_guard_page(off) )
+ {
+ printk(" PPP cloning cpu %u stack %p\n",
+ cpu, __va(__pa(stack_base[cpu])));
rc = clone_mapping(__va(__pa(stack_base[cpu])) + off, rpt);
+ }
if ( !rc )
rc = clone_mapping(idt_tables[cpu], rpt);
rc = setup_cpu_root_pgt(cpu);
if ( rc )
+ {
goto out;
+ }
rc = -ENOMEM;
if ( secondary_socket_cpumask == NULL &&
if ( rc )
cpu_smpboot_free(cpu, true);
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
return rc;
}
switch ( action )
{
case CPU_UP_PREPARE:
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
rc = cpu_smpboot_alloc(cpu);
break;
case CPU_UP_CANCELED:
if ( (apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID )
return -ENODEV;
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
if ( (ret = do_boot_cpu(apicid, cpu)) != 0 )
return ret;
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
time_latch_stamps();
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
set_cpu_state(CPU_STATE_ONLINE);
while ( !cpu_online(cpu) )
cpu_relax();
process_pending_softirqs();
}
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
return 0;
}
void *__maddr_to_virt(unsigned long ma)
{
- /* XXX ??? */
+ /* XXX how can this be fixed ??? Given a valid ma, it could have
+ * two mappings: one from vmap, the other from direct map.
+ *
+ * We can try to distinguish them with system state? If the fametable
+ * has been set up, it should be safe to prefer page->virtual?
+ *
+ * Maybe I should go through this function's users to see what I can do?
+ */
if ( pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT) )
return (void *)(DIRECTMAP_VIRT_START +
((ma & ma_va_bottom_mask) |
if ( notifier_rc != NOTIFY_DONE )
{
err = notifier_to_errno(notifier_rc);
+ printk(" BBB %d\n", err);
goto fail;
}
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
err = __cpu_up(cpu);
if ( err < 0 )
+ {
+ printk(" CCC %d\n", err);
goto fail;
+ }
+ printk(" DDD %s %d\n", __FILE__, __LINE__);
notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL);
BUG_ON(notifier_rc != NOTIFY_DONE);
* manner of problems elsewhere very soon, and if it is during runtime,
* then failing to allocate crash notes is not a good enough reason to
* fail the CPU_UP_PREPARE */
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
kexec_init_cpu_notes(cpu);
break;
default:
static void rcu_offline_cpu(struct rcu_data *this_rdp,
struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
{
+ printk(" XXX offline this cpu %d cpu %d %p timer function %p\n",
+ this_rdp->cpu,
+ rdp->cpu, &rdp->idle_timer, rdp->idle_timer.function);
kill_timer(&rdp->idle_timer);
/* If the cpu going offline owns the grace period we can block
rdp->cpu = cpu;
rdp->blimit = blimit;
init_timer(&rdp->idle_timer, rcu_idle_timer_handler, rdp, cpu);
+ printk(" XXX init cpu %d %p timer cpu %d\n", rdp->cpu, &rdp->idle_timer,
+ rdp->idle_timer.cpu);
}
static int cpu_callback(
unsigned int cpu = (unsigned long)hcpu;
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ printk(" AAA action %lx\n", action);
+
switch ( action )
{
case CPU_UP_PREPARE:
break;
case CPU_UP_CANCELED:
case CPU_DEAD:
+ printk(" YYYY %d\n", cpu);
rcu_offline_cpu(&this_cpu(rcu_data), &rcu_ctrlblk, rdp);
break;
default:
SCHED_OP(sched, init_pdata, sd->sched_priv, cpu);
break;
case CPU_UP_PREPARE:
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
rc = cpu_schedule_up(cpu);
break;
case CPU_DEAD:
switch ( action )
{
case CPU_UP_PREPARE:
+ printk(" UUU %s %d\n", __FILE__, __LINE__);
INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu));
INIT_LIST_HEAD(&per_cpu(softirq_tasklet_list, cpu));
break;
static bool_t active_timer(struct timer *timer)
{
+ if ( timer->status < TIMER_STATUS_inactive ||
+ timer->status > TIMER_STATUS_in_list )
+ printk(" XXXX %p %p %d\n", timer, virt_to_page(timer), timer->status);
ASSERT(timer->status >= TIMER_STATUS_inactive);
ASSERT(timer->status <= TIMER_STATUS_in_list);
return (timer->status >= TIMER_STATUS_in_heap);
timer->data = data;
write_atomic(&timer->cpu, cpu);
timer->status = TIMER_STATUS_inactive;
+ printk(" XXX cpu %d timer %p %p status %d\n", cpu, timer,
+ virt_to_page(timer), timer->status);
if ( !timer_lock_irqsave(timer, flags) )
BUG();
list_add(&timer->inactive, &per_cpu(timers, cpu).inactive);