From 14fdc5ed3662dd3f044acd412552b55f00b1aeb0 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Wed, 13 Mar 2019 15:21:25 +0000 Subject: [PATCH] xxx --- xen/arch/x86/cpu/common.c | 4 ++++ xen/arch/x86/cpu/mcheck/mce.c | 1 + xen/arch/x86/genapic/x2apic.c | 1 + xen/arch/x86/hvm/hvm.c | 1 + xen/arch/x86/nmi.c | 1 + xen/arch/x86/percpu.c | 2 ++ xen/arch/x86/psr.c | 1 + xen/arch/x86/smpboot.c | 35 +++++++++++++++++++++++++++++++++++ xen/arch/x86/x86_64/mm.c | 9 ++++++++- xen/common/cpu.c | 6 ++++++ xen/common/kexec.c | 1 + xen/common/rcupdate.c | 8 ++++++++ xen/common/schedule.c | 1 + xen/common/tasklet.c | 1 + xen/common/timer.c | 5 +++++ 15 files changed, 76 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c index de6c5c903f..67a677886c 100644 --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -762,19 +762,23 @@ void load_system_tables(void) .bitmap = IOBMP_INVALID_OFFSET, }; + printk(" DDD %s %d\n", __FILE__, __LINE__); _set_tssldt_desc( gdt + TSS_ENTRY, (unsigned long)tss, offsetof(struct tss_struct, __cacheline_filler) - 1, SYS_DESC_tss_avail); + printk(" DDD %s %d\n", __FILE__, __LINE__); _set_tssldt_desc( compat_gdt + TSS_ENTRY, (unsigned long)tss, offsetof(struct tss_struct, __cacheline_filler) - 1, SYS_DESC_tss_busy); + printk(" DDD %s %d\n", __FILE__, __LINE__); lgdt(&gdtr); + printk(" DDD %s %d\n", __FILE__, __LINE__); lidt(&idtr); ltr(TSS_ENTRY << 3); lldt(0); diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c index 30cdb06401..4fb6fedad8 100644 --- a/xen/arch/x86/cpu/mcheck/mce.c +++ b/xen/arch/x86/cpu/mcheck/mce.c @@ -726,6 +726,7 @@ static int cpu_callback( switch ( action ) { case CPU_UP_PREPARE: + printk(" UUU %s %d\n", __FILE__, __LINE__); rc = cpu_bank_alloc(cpu); break; diff --git a/xen/arch/x86/genapic/x2apic.c b/xen/arch/x86/genapic/x2apic.c index 334dae527e..2f8cb88ff0 100644 --- a/xen/arch/x86/genapic/x2apic.c +++ b/xen/arch/x86/genapic/x2apic.c @@ -195,6 +195,7 @@ static int update_clusterinfo( switch (action) { case CPU_UP_PREPARE: + printk(" UUU %s %d\n", __FILE__, __LINE__); per_cpu(cpu_2_logical_apicid, cpu) = BAD_APICID; if ( !cluster_cpus_spare ) cluster_cpus_spare = xzalloc(cpumask_t); diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 21944e9306..503caf1795 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -121,6 +121,7 @@ static int cpu_callback( switch ( action ) { case CPU_UP_PREPARE: + printk(" UUU %s %d\n", __FILE__, __LINE__); rc = hvm_funcs.cpu_up_prepare(cpu); break; case CPU_DYING: diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c index d7fce28805..2243f4dd2e 100644 --- a/xen/arch/x86/nmi.c +++ b/xen/arch/x86/nmi.c @@ -424,6 +424,7 @@ static int cpu_nmi_callback( switch ( action ) { case CPU_UP_PREPARE: + printk(" UUU %s %d\n", __FILE__, __LINE__); init_timer(&per_cpu(nmi_timer, cpu), nmi_timer_fn, NULL, cpu); set_timer(&per_cpu(nmi_timer, cpu), NOW()); break; diff --git a/xen/arch/x86/percpu.c b/xen/arch/x86/percpu.c index 8be4ebddf4..9d2d5e41b1 100644 --- a/xen/arch/x86/percpu.c +++ b/xen/arch/x86/percpu.c @@ -51,6 +51,7 @@ static void _free_percpu_area(struct rcu_head *head) unsigned int cpu = info->cpu; char *p = __per_cpu_start + __per_cpu_offset[cpu]; + printk(" XXX freeing %d per-cpu area\n", cpu); free_xenheap_pages(p, PERCPU_ORDER); __per_cpu_offset[cpu] = INVALID_PERCPU_AREA; } @@ -72,6 +73,7 @@ static int cpu_percpu_callback( switch ( action ) { case CPU_UP_PREPARE: + printk(" UUU %s %d\n", __FILE__, __LINE__); rc = init_percpu_area(cpu); break; case CPU_UP_CANCELED: diff --git a/xen/arch/x86/psr.c b/xen/arch/x86/psr.c index 5866a261e3..b44122e65b 100644 --- a/xen/arch/x86/psr.c +++ b/xen/arch/x86/psr.c @@ -1639,6 +1639,7 @@ static int cpu_callback( switch ( action ) { case CPU_UP_PREPARE: + printk(" UUU %s %d\n", __FILE__, __LINE__); rc = psr_cpu_prepare(); break; case CPU_STARTING: diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 4df8a7e645..1421de92c5 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -171,6 +171,9 @@ static void synchronize_tsc_slave(unsigned int slave) } } +#undef Dprintk +#define Dprintk printk + static void smp_callin(void) { unsigned int cpu = smp_processor_id(); @@ -320,13 +323,21 @@ void start_secondary(void *unused) */ unsigned int cpu = booting_cpu; + printk(" DDD %s %d\n", __FILE__, __LINE__); /* Critical region without IDT or TSS. Any fault is deadly! */ + printk(" XXX cpuinfo %p\n", get_cpu_info()); + set_processor_id(cpu); + printk(" DDD %s %d\n", __FILE__, __LINE__); set_current(idle_vcpu[cpu]); + printk(" DDD %s %d\n", __FILE__, __LINE__); this_cpu(curr_vcpu) = idle_vcpu[cpu]; + printk(" DDD %s %d\n", __FILE__, __LINE__); rdmsrl(MSR_EFER, this_cpu(efer)); + printk(" DDD %s %d\n", __FILE__, __LINE__); init_shadow_spec_ctrl_state(); + printk(" DDD %s %d\n", __FILE__, __LINE__); /* * Just as during early bootstrap, it is convenient here to disable @@ -345,12 +356,16 @@ void start_secondary(void *unused) * visible in cpu_online_map. Hence such a deadlock is not possible. */ spin_debug_disable(); + printk(" DDD %s %d\n", __FILE__, __LINE__); get_cpu_info()->use_pv_cr3 = false; get_cpu_info()->xen_cr3 = 0; get_cpu_info()->pv_cr3 = 0; + printk(" DDD %s %d\n", __FILE__, __LINE__); + load_system_tables(); + printk(" DDD %s %d\n", __FILE__, __LINE__); /* Full exception support from here on in. */ @@ -692,14 +707,19 @@ static int clone_mapping(const void *ptr, root_pgentry_t *rpt) (linear >= VMAP_VIRT_END && linear < XEN_VIRT_START) || (linear >= XEN_VIRT_END && linear < DIRECTMAP_VIRT_START) ) { + printk(" DDD %s %d\n", __FILE__, __LINE__); rc = -EINVAL; goto out; } + printk(" DDD %s %d\n", __FILE__, __LINE__); + pl3e = map_xen_pagetable( l4e_get_mfn(idle_pg_table[root_table_offset(linear)])); pl3e += l3_table_offset(linear); + printk(" DDD %s %d\n", __FILE__, __LINE__); + flags = l3e_get_flags(*pl3e); ASSERT(flags & _PAGE_PRESENT); if ( flags & _PAGE_PSE ) @@ -737,6 +757,7 @@ static int clone_mapping(const void *ptr, root_pgentry_t *rpt) UNMAP_XEN_PAGETABLE(pl1e); UNMAP_XEN_PAGETABLE(pl2e); UNMAP_XEN_PAGETABLE(pl3e); + printk(" DDD %s %d\n", __FILE__, __LINE__); if ( !(root_get_flags(rpt[root_table_offset(linear)]) & _PAGE_PRESENT) ) { @@ -778,6 +799,7 @@ static int clone_mapping(const void *ptr, root_pgentry_t *rpt) ASSERT(!(l3e_get_flags(*pl3e) & _PAGE_PSE)); pl2e = map_xen_pagetable(l3e_get_mfn(*pl3e)); } + printk(" DDD %s %d\n", __FILE__, __LINE__); pl2e += l2_table_offset(linear); @@ -811,6 +833,7 @@ static int clone_mapping(const void *ptr, root_pgentry_t *rpt) } else l1e_write(pl1e, l1e_from_pfn(pfn, flags)); + printk(" DDD %s %d\n", __FILE__, __LINE__); rc = 0; out: @@ -875,7 +898,11 @@ static int setup_cpu_root_pgt(unsigned int cpu) /* Install direct map page table entries for stack, IDT, and TSS. */ for ( off = rc = 0; !rc && off < STACK_SIZE; off += PAGE_SIZE ) if ( !memguard_is_stack_guard_page(off) ) + { + printk(" PPP cloning cpu %u stack %p\n", + cpu, __va(__pa(stack_base[cpu]))); rc = clone_mapping(__va(__pa(stack_base[cpu])) + off, rpt); + } if ( !rc ) rc = clone_mapping(idt_tables[cpu], rpt); @@ -1091,7 +1118,9 @@ static int cpu_smpboot_alloc(unsigned int cpu) rc = setup_cpu_root_pgt(cpu); if ( rc ) + { goto out; + } rc = -ENOMEM; if ( secondary_socket_cpumask == NULL && @@ -1109,6 +1138,7 @@ static int cpu_smpboot_alloc(unsigned int cpu) if ( rc ) cpu_smpboot_free(cpu, true); + printk(" DDD %s %d\n", __FILE__, __LINE__); return rc; } @@ -1121,6 +1151,7 @@ static int cpu_smpboot_callback( switch ( action ) { case CPU_UP_PREPARE: + printk(" UUU %s %d\n", __FILE__, __LINE__); rc = cpu_smpboot_alloc(cpu); break; case CPU_UP_CANCELED: @@ -1391,11 +1422,14 @@ int __cpu_up(unsigned int cpu) if ( (apicid = x86_cpu_to_apicid[cpu]) == BAD_APICID ) return -ENODEV; + printk(" DDD %s %d\n", __FILE__, __LINE__); if ( (ret = do_boot_cpu(apicid, cpu)) != 0 ) return ret; + printk(" DDD %s %d\n", __FILE__, __LINE__); time_latch_stamps(); + printk(" DDD %s %d\n", __FILE__, __LINE__); set_cpu_state(CPU_STATE_ONLINE); while ( !cpu_online(cpu) ) @@ -1403,6 +1437,7 @@ int __cpu_up(unsigned int cpu) cpu_relax(); process_pending_softirqs(); } + printk(" DDD %s %d\n", __FILE__, __LINE__); return 0; } diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index 0c46293f80..e0f29bc008 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -1613,7 +1613,14 @@ unsigned long __virt_to_maddr(unsigned long va) void *__maddr_to_virt(unsigned long ma) { - /* XXX ??? */ + /* XXX how can this be fixed ??? Given a valid ma, it could have + * two mappings: one from vmap, the other from direct map. + * + * We can try to distinguish them with system state? If the fametable + * has been set up, it should be safe to prefer page->virtual? + * + * Maybe I should go through this function's users to see what I can do? + */ if ( pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT) ) return (void *)(DIRECTMAP_VIRT_START + ((ma & ma_va_bottom_mask) | diff --git a/xen/common/cpu.c b/xen/common/cpu.c index 653a56b840..21860db351 100644 --- a/xen/common/cpu.c +++ b/xen/common/cpu.c @@ -144,12 +144,18 @@ int cpu_up(unsigned int cpu) if ( notifier_rc != NOTIFY_DONE ) { err = notifier_to_errno(notifier_rc); + printk(" BBB %d\n", err); goto fail; } + printk(" DDD %s %d\n", __FILE__, __LINE__); err = __cpu_up(cpu); if ( err < 0 ) + { + printk(" CCC %d\n", err); goto fail; + } + printk(" DDD %s %d\n", __FILE__, __LINE__); notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL); BUG_ON(notifier_rc != NOTIFY_DONE); diff --git a/xen/common/kexec.c b/xen/common/kexec.c index c14cbb2b9c..b66477f855 100644 --- a/xen/common/kexec.c +++ b/xen/common/kexec.c @@ -542,6 +542,7 @@ static int cpu_callback( * manner of problems elsewhere very soon, and if it is during runtime, * then failing to allocate crash notes is not a good enough reason to * fail the CPU_UP_PREPARE */ + printk(" UUU %s %d\n", __FILE__, __LINE__); kexec_init_cpu_notes(cpu); break; default: diff --git a/xen/common/rcupdate.c b/xen/common/rcupdate.c index 3517790913..e3901aef13 100644 --- a/xen/common/rcupdate.c +++ b/xen/common/rcupdate.c @@ -533,6 +533,9 @@ static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, static void rcu_offline_cpu(struct rcu_data *this_rdp, struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { + printk(" XXX offline this cpu %d cpu %d %p timer function %p\n", + this_rdp->cpu, + rdp->cpu, &rdp->idle_timer, rdp->idle_timer.function); kill_timer(&rdp->idle_timer); /* If the cpu going offline owns the grace period we can block @@ -564,6 +567,8 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, rdp->cpu = cpu; rdp->blimit = blimit; init_timer(&rdp->idle_timer, rcu_idle_timer_handler, rdp, cpu); + printk(" XXX init cpu %d %p timer cpu %d\n", rdp->cpu, &rdp->idle_timer, + rdp->idle_timer.cpu); } static int cpu_callback( @@ -572,6 +577,8 @@ static int cpu_callback( unsigned int cpu = (unsigned long)hcpu; struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + printk(" AAA action %lx\n", action); + switch ( action ) { case CPU_UP_PREPARE: @@ -579,6 +586,7 @@ static int cpu_callback( break; case CPU_UP_CANCELED: case CPU_DEAD: + printk(" YYYY %d\n", cpu); rcu_offline_cpu(&this_cpu(rcu_data), &rcu_ctrlblk, rdp); break; default: diff --git a/xen/common/schedule.c b/xen/common/schedule.c index fd587622f4..6ecd33825b 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -1744,6 +1744,7 @@ static int cpu_schedule_callback( SCHED_OP(sched, init_pdata, sd->sched_priv, cpu); break; case CPU_UP_PREPARE: + printk(" UUU %s %d\n", __FILE__, __LINE__); rc = cpu_schedule_up(cpu); break; case CPU_DEAD: diff --git a/xen/common/tasklet.c b/xen/common/tasklet.c index d4fea3151c..8627613594 100644 --- a/xen/common/tasklet.c +++ b/xen/common/tasklet.c @@ -224,6 +224,7 @@ static int cpu_callback( switch ( action ) { case CPU_UP_PREPARE: + printk(" UUU %s %d\n", __FILE__, __LINE__); INIT_LIST_HEAD(&per_cpu(tasklet_list, cpu)); INIT_LIST_HEAD(&per_cpu(softirq_tasklet_list, cpu)); break; diff --git a/xen/common/timer.c b/xen/common/timer.c index 376581bd54..b7e3b66693 100644 --- a/xen/common/timer.c +++ b/xen/common/timer.c @@ -275,6 +275,9 @@ static inline void timer_unlock(struct timer *timer) static bool_t active_timer(struct timer *timer) { + if ( timer->status < TIMER_STATUS_inactive || + timer->status > TIMER_STATUS_in_list ) + printk(" XXXX %p %p %d\n", timer, virt_to_page(timer), timer->status); ASSERT(timer->status >= TIMER_STATUS_inactive); ASSERT(timer->status <= TIMER_STATUS_in_list); return (timer->status >= TIMER_STATUS_in_heap); @@ -293,6 +296,8 @@ void init_timer( timer->data = data; write_atomic(&timer->cpu, cpu); timer->status = TIMER_STATUS_inactive; + printk(" XXX cpu %d timer %p %p status %d\n", cpu, timer, + virt_to_page(timer), timer->status); if ( !timer_lock_irqsave(timer, flags) ) BUG(); list_add(&timer->inactive, &per_cpu(timers, cpu).inactive); -- 2.39.5