]> xenbits.xensource.com Git - people/liuw/libxenctrl-split/xen.git/commitdiff
[IA64] fix early access to per cpu area.
authorIsaku Yamahata <yamahata@valinux.co.jp>
Tue, 21 Apr 2009 09:27:59 +0000 (18:27 +0900)
committerIsaku Yamahata <yamahata@valinux.co.jp>
Tue, 21 Apr 2009 09:27:59 +0000 (18:27 +0900)
The following changeset broke booting xen-ia64 on some kinds of ia64 boxes.
http://xenbits.xensource.com/ext/ia64/xen-unstable.hg/rev/3fd8f9b34941

The tasklet_schedule call raise_softirq().
Because raise_softirq() use per_cpu, if we access per_cpu before cpu_init()
the behavior would be unexpected.

There was a similar issue on Linux/ia64.
The following change sets resolved it.
10617bbe84628eb18ab5f723d3ba35005adde143
c459ce8b5a7d933a3bcf6915ab17ac1e036e2ac4

This patch fixes the issue following the linux/ia64 solution.
Allocate per cpu area for cpu0 in .data section and initialize
it early.

reported-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
--HG--
rename : xen/include/asm-ia64/linux/asm/sections.h => xen/include/asm-ia64/linux-xen/asm/sections.h

xen/arch/ia64/linux-xen/head.S
xen/arch/ia64/linux-xen/mm_contig.c
xen/arch/ia64/linux-xen/smpboot.c
xen/arch/ia64/xen/xen.lds.S
xen/include/asm-ia64/linux-xen/asm/README.origin
xen/include/asm-ia64/linux-xen/asm/sections.h [new file with mode: 0644]
xen/include/asm-ia64/linux/asm/README.origin
xen/include/asm-ia64/linux/asm/sections.h [deleted file]

index 2ef757b46d8414310292e4b92ae809cc9dbae236..c1fa5ed21d5ec7249eb08ba632a2b6891cbe10f5 100644 (file)
@@ -382,6 +382,35 @@ start_ap:
        mov ar.rsc=0            // place RSE in enforced lazy mode
        ;;
        loadrs                  // clear the dirty partition
+#ifdef XEN
+(isAP) br.few 2f
+       movl r19=__phys_per_cpu_start
+       mov r18=PERCPU_PAGE_SIZE
+#ifndef CONFIG_SMP
+       add r19=r19,r18
+       ;;
+#else
+       movl r20=__cpu0_per_cpu
+       ;;
+       shr.u r18=r18,3
+1:
+       ld8 r21=[r19],8 ;;
+       st8[r20]=r21,8
+       adds r18=-1,r18
+       ;;
+       cmp4.lt p7,p6=0,r18
+(p7)   br.cond.dptk.few 1b
+       ;;
+#endif
+       movl r18=__per_cpu_offset
+       movl r19=__cpu0_per_cpu
+       movl r20=__per_cpu_start
+       ;;
+       sub r20=r19,r20
+       ;;
+       st8 [r18]=r20
+2:
+#endif
        ;;
        mov ar.bspstore=r2      // establish the new RSE stack
        ;;
index 12462dc711514b43930a198ff828fcdf0b2e2334..f2326eb3947f4ea3cd566d561b21952638be488a 100644 (file)
@@ -183,7 +183,7 @@ void *percpu_area __initdata = NULL;
 void* __init
 per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa)
 {
-       int order = get_order(NR_CPUS * PERCPU_PAGE_SIZE);
+       int order = get_order((NR_CPUS - 1) * PERCPU_PAGE_SIZE);
        unsigned long size = 1UL << (order + PAGE_SHIFT);
        unsigned long start = ALIGN_UP((unsigned long)xen_heap_start,
                                       PERCPU_PAGE_SIZE);
@@ -226,19 +226,31 @@ per_cpu_init (void)
         */
        if (smp_processor_id() == 0) {
 #ifdef XEN
+               void *cpu0_data = __cpu0_per_cpu;
+
+               __per_cpu_offset[0] = (char *)cpu0_data - __per_cpu_start;
+               per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
+
                cpu_data = get_per_cpu_area();
                if (cpu_data == NULL) 
                        panic("can't allocate per cpu area.\n");
+
+               for (cpu = 1; cpu < NR_CPUS; cpu++) {
+                       memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
+                       __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
+                       cpu_data += PERCPU_PAGE_SIZE;
+                       per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+               }
 #else
                cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
                                           PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
-#endif
                for (cpu = 0; cpu < NR_CPUS; cpu++) {
                        memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
                        __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
                        cpu_data += PERCPU_PAGE_SIZE;
                        per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
                }
+#endif
        }
        return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
 }
index ef7f9ea2ff54fc0fe6bb64d5e535fae5d97adf34..a450dec730b47caf5a88539d85d4044d6b5c1863 100644 (file)
@@ -449,8 +449,8 @@ start_secondary (void *unused)
 {
        /* Early console may use I/O ports */
        ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
-       Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
 #ifndef XEN
+       Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
        efi_map_pal_code();
 #endif
        cpu_init();
index 96cd1ce14f2677b7470726e07d7fd7c67d381792..4daf6adfb97b71d47db11794687356da3738a0c8 100644 (file)
@@ -195,7 +195,17 @@ SECTIONS
 
   data : { } :data
   .data : AT(ADDR(.data) - LOAD_OFFSET)
-       { *(.data) *(.data1) *(.gnu.linkonce.d*) CONSTRUCTORS }
+       {
+#ifdef CONFIG_SMP
+  . = ALIGN(PERCPU_PAGE_SIZE);
+               __cpu0_per_cpu = .;
+  . = . + PERCPU_PAGE_SIZE;    /* cpu0 per-cpu space */
+#endif
+               *(.data)
+               *(.data1)
+               *(.gnu.linkonce.d*)
+               CONSTRUCTORS
+       }
 
   . = ALIGN(16);       /* gp must be 16-byte aligned for exc. table */
   .got : AT(ADDR(.got) - LOAD_OFFSET)
index 4e0986fdd5bab1f17b1274ceea8486e17b8060f9..e3cc246bf9d52ed35aba81981a0a18d548fe9cd9 100644 (file)
@@ -22,6 +22,7 @@ pgtable.h             -> linux/include/asm-ia64/pgtable.h
 processor.h            -> linux/include/asm-ia64/processor.h
 ptrace.h               -> linux/include/asm-ia64/ptrace.h
 sal.h                  -> linux/include/asm-ia64/sal.h
+sections.h             -> linux/include/asm-ia64/sections.h
 smp.h                  -> linux/include/asm-ia64/smp.h
 spinlock.h             -> linux/include/asm-ia64/spinlock.h
 system.h               -> linux/include/asm-ia64/system.h
diff --git a/xen/include/asm-ia64/linux-xen/asm/sections.h b/xen/include/asm-ia64/linux-xen/asm/sections.h
new file mode 100644 (file)
index 0000000..a6334c6
--- /dev/null
@@ -0,0 +1,28 @@
+#ifndef _ASM_IA64_SECTIONS_H
+#define _ASM_IA64_SECTIONS_H
+
+/*
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <asm-generic/sections.h>
+
+extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
+#ifdef XEN
+#ifdef CONFIG_SMP
+extern char __cpu0_per_cpu[];
+#endif
+#endif
+extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
+extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
+extern char __start_gate_section[];
+extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
+extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
+extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
+extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
+extern char __start_unwind[], __end_unwind[];
+extern char __start_ivt_text[], __end_ivt_text[];
+
+#endif /* _ASM_IA64_SECTIONS_H */
+
index 778d9a823d6e1fe3837a8a59e66c9d90c54a2a21..25e1204f3ed56a88efc10a2af4b6bb5c4fd46809 100644 (file)
@@ -29,7 +29,6 @@ param.h                       -> linux/include/asm-ia64/param.h
 patch.h                        -> linux/include/asm-ia64/patch.h
 pci.h                  -> linux/include/asm-ia64/pci.h
 rse.h                  -> linux/include/asm-ia64/rse.h
-sections.h             -> linux/include/asm-ia64/sections.h
 setup.h                        -> linux/include/asm-ia64/setup.h
 string.h               -> linux/include/asm-ia64/string.h
 thread_info.h          -> linux/include/asm-ia64/thread_info.h
diff --git a/xen/include/asm-ia64/linux/asm/sections.h b/xen/include/asm-ia64/linux/asm/sections.h
deleted file mode 100644 (file)
index e9eb7f6..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _ASM_IA64_SECTIONS_H
-#define _ASM_IA64_SECTIONS_H
-
-/*
- * Copyright (C) 1998-2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <asm-generic/sections.h>
-
-extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
-extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
-extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
-extern char __start_gate_section[];
-extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
-extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
-extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
-extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
-extern char __start_unwind[], __end_unwind[];
-extern char __start_ivt_text[], __end_ivt_text[];
-
-#endif /* _ASM_IA64_SECTIONS_H */
-