ia64/xen-unstable

changeset 18139:7affdebb7a1e

[IA64] allocate percpu area in the xen va area.

To guarantee that the percpu is pinned down,
move its virtual address from the xen identity mapped area
to the xen va area which is pinned by DTR[IA64_TR_KERNEL].
Then unnecessary tlb miss fault will be avoided.
Sometimes per cpu area is accessed from very critial
point where tlb miss isn't allowed.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Thu Aug 07 11:47:34 2008 +0900 (2008-08-07)
parents e9706492e960
children a39913db6e51
files xen/arch/ia64/linux-xen/mm_contig.c xen/arch/ia64/xen/xensetup.c xen/include/asm-ia64/linux-xen/asm/percpu.h
line diff
     1.1 --- a/xen/arch/ia64/linux-xen/mm_contig.c	Thu Jul 31 12:25:50 2008 +0900
     1.2 +++ b/xen/arch/ia64/linux-xen/mm_contig.c	Thu Aug 07 11:47:34 2008 +0900
     1.3 @@ -175,6 +175,39 @@ find_memory (void)
     1.4  #endif
     1.5  
     1.6  #ifdef CONFIG_SMP
     1.7 +#ifdef XEN
     1.8 +#include <asm/elf.h>
     1.9 +
    1.10 +void *percpu_area __initdata = NULL;
    1.11 +
    1.12 +void* __init
    1.13 +per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa)
    1.14 +{
    1.15 +	int order = get_order(NR_CPUS * PERCPU_PAGE_SIZE);
    1.16 +	unsigned long size = 1UL << (order + PAGE_SHIFT);
    1.17 +	unsigned long start = ALIGN_UP((unsigned long)xen_heap_start,
    1.18 +				       PERCPU_PAGE_SIZE);
    1.19 +	unsigned long end = start + size;
    1.20 +
    1.21 +	if (__pa(end) < end_in_pa) {
    1.22 +		init_xenheap_pages(__pa(xen_heap_start), __pa(start));
    1.23 +		xen_heap_start = (void*)end;
    1.24 +		percpu_area = (void*)virt_to_xenva(start);
    1.25 +		printk("allocate percpu area 0x%lx@0x%lx 0x%p\n",
    1.26 +		       size, start, percpu_area);
    1.27 +	} else {
    1.28 +		panic("can't allocate percpu area. size 0x%lx\n", size);
    1.29 +	}
    1.30 +	return xen_heap_start;
    1.31 +}
    1.32 +
    1.33 +static void* __init
    1.34 +get_per_cpu_area(void)
    1.35 +{
    1.36 +	return percpu_area;
    1.37 +}
    1.38 +#endif
    1.39 +
    1.40  /**
    1.41   * per_cpu_init - setup per-cpu variables
    1.42   *
    1.43 @@ -193,13 +226,9 @@ per_cpu_init (void)
    1.44  	 */
    1.45  	if (smp_processor_id() == 0) {
    1.46  #ifdef XEN
    1.47 -		struct page_info *page;
    1.48 -		page = alloc_domheap_pages(NULL,
    1.49 -					   get_order(NR_CPUS *
    1.50 -						     PERCPU_PAGE_SIZE), 0);
    1.51 -		if (page == NULL) 
    1.52 +		cpu_data = get_per_cpu_area();
    1.53 +		if (cpu_data == NULL) 
    1.54  			panic("can't allocate per cpu area.\n");
    1.55 -		cpu_data = page_to_virt(page);
    1.56  #else
    1.57  		cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
    1.58  					   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
     2.1 --- a/xen/arch/ia64/xen/xensetup.c	Thu Jul 31 12:25:50 2008 +0900
     2.2 +++ b/xen/arch/ia64/xen/xensetup.c	Thu Aug 07 11:47:34 2008 +0900
     2.3 @@ -566,6 +566,13 @@ skip_move:
     2.4      if (vmx_enabled)
     2.5          xen_heap_start = vmx_init_env(xen_heap_start, xenheap_phys_end);
     2.6  
     2.7 +    /* allocate memory for percpu area
     2.8 +     * per_cpu_init() called from late_set_arch() is called after
     2.9 +     * end_boot_allocate(). It's too late to allocate memory in
    2.10 +     * xenva.
    2.11 +     */
    2.12 +    xen_heap_start = per_cpu_allocate(xen_heap_start, xenheap_phys_end);
    2.13 +
    2.14      heap_desc.xen_heap_start   = xen_heap_start;
    2.15      heap_desc.xenheap_phys_end = xenheap_phys_end;
    2.16      heap_desc.kern_md          = kern_md;
     3.1 --- a/xen/include/asm-ia64/linux-xen/asm/percpu.h	Thu Jul 31 12:25:50 2008 +0900
     3.2 +++ b/xen/include/asm-ia64/linux-xen/asm/percpu.h	Thu Aug 07 11:47:34 2008 +0900
     3.3 @@ -50,12 +50,22 @@ DECLARE_PER_CPU(unsigned long, local_per
     3.4  extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
     3.5  extern void setup_per_cpu_areas (void);
     3.6  extern void *per_cpu_init(void);
     3.7 +#ifdef XEN
     3.8 +extern void *per_cpu_allocate(void *xen_heap_start, unsigned long end_in_pa);
     3.9 +#endif
    3.10  
    3.11  #else /* ! SMP */
    3.12  
    3.13  #define per_cpu(var, cpu)			(*((void)(cpu), &per_cpu__##var))
    3.14  #define __get_cpu_var(var)			per_cpu__##var
    3.15  #define per_cpu_init()				(__phys_per_cpu_start)
    3.16 +#ifdef XEN
    3.17 +static inline void *per_cpu_allocate(void *xen_heap_start,
    3.18 +				     unsigned long end_in_pa)
    3.19 +{
    3.20 +	return xen_heap_start;
    3.21 +}
    3.22 +#endif
    3.23  
    3.24  #endif	/* SMP */
    3.25