ia64/xen-unstable

changeset 9818:9b1c9d4133f8

Pull the Linux percpu interface into Xen. Implemented for
x86 and used it to eliminate the percpu_ctxt struct from
arch/x86/domain.c.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Apr 21 17:35:15 2006 +0100 (2006-04-21)
parents 42a398e1daf1
children b15b33817f7b a182b0178262
files xen/arch/x86/domain.c xen/arch/x86/setup.c xen/arch/x86/smpboot.c xen/arch/x86/x86_32/xen.lds.S xen/arch/x86/x86_64/xen.lds.S xen/include/asm-x86/current.h xen/include/asm-x86/percpu.h xen/include/xen/compiler.h xen/include/xen/percpu.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri Apr 21 17:19:31 2006 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Fri Apr 21 17:35:15 2006 +0100
     1.3 @@ -21,6 +21,12 @@
     1.4  #include <xen/softirq.h>
     1.5  #include <xen/grant_table.h>
     1.6  #include <xen/iocap.h>
     1.7 +#include <xen/kernel.h>
     1.8 +#include <xen/multicall.h>
     1.9 +#include <xen/irq.h>
    1.10 +#include <xen/event.h>
    1.11 +#include <xen/console.h>
    1.12 +#include <xen/percpu.h>
    1.13  #include <asm/regs.h>
    1.14  #include <asm/mc146818rtc.h>
    1.15  #include <asm/system.h>
    1.16 @@ -30,22 +36,12 @@
    1.17  #include <asm/i387.h>
    1.18  #include <asm/mpspec.h>
    1.19  #include <asm/ldt.h>
    1.20 -#include <xen/irq.h>
    1.21 -#include <xen/event.h>
    1.22  #include <asm/shadow.h>
    1.23 -#include <xen/console.h>
    1.24 -#include <xen/elf.h>
    1.25  #include <asm/hvm/hvm.h>
    1.26  #include <asm/hvm/support.h>
    1.27  #include <asm/msr.h>
    1.28 -#include <xen/kernel.h>
    1.29 -#include <xen/multicall.h>
    1.30  
    1.31 -struct percpu_ctxt {
    1.32 -    struct vcpu *curr_vcpu;
    1.33 -    unsigned int dirty_segment_mask;
    1.34 -} __cacheline_aligned;
    1.35 -static struct percpu_ctxt percpu_ctxt[NR_CPUS];
    1.36 +DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
    1.37  
    1.38  static void paravirt_ctxt_switch_from(struct vcpu *v);
    1.39  static void paravirt_ctxt_switch_to(struct vcpu *v);
    1.40 @@ -123,11 +119,6 @@ void dump_pageframe_info(struct domain *
    1.41      }
    1.42  }
    1.43  
    1.44 -void set_current_execstate(struct vcpu *v)
    1.45 -{
    1.46 -    percpu_ctxt[smp_processor_id()].curr_vcpu = v;
    1.47 -}
    1.48 -
    1.49  struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
    1.50  {
    1.51      struct vcpu *v;
    1.52 @@ -459,6 +450,7 @@ void new_thread(struct vcpu *d,
    1.53   * allowing load_segments() to avoid some expensive segment loads and
    1.54   * MSR writes.
    1.55   */
    1.56 +static DEFINE_PER_CPU(unsigned int, dirty_segment_mask);
    1.57  #define DIRTY_DS           0x01
    1.58  #define DIRTY_ES           0x02
    1.59  #define DIRTY_FS           0x04
    1.60 @@ -473,8 +465,8 @@ static void load_segments(struct vcpu *n
    1.61      unsigned int dirty_segment_mask, cpu = smp_processor_id();
    1.62  
    1.63      /* Load and clear the dirty segment mask. */
    1.64 -    dirty_segment_mask = percpu_ctxt[cpu].dirty_segment_mask;
    1.65 -    percpu_ctxt[cpu].dirty_segment_mask = 0;
    1.66 +    dirty_segment_mask = per_cpu(dirty_segment_mask, cpu);
    1.67 +    per_cpu(dirty_segment_mask, cpu) = 0;
    1.68  
    1.69      /* Either selector != 0 ==> reload. */
    1.70      if ( unlikely((dirty_segment_mask & DIRTY_DS) | nctxt->user_regs.ds) )
    1.71 @@ -601,7 +593,7 @@ static void save_segments(struct vcpu *v
    1.72          dirty_segment_mask |= DIRTY_GS_BASE_USER;
    1.73      }
    1.74  
    1.75 -    percpu_ctxt[smp_processor_id()].dirty_segment_mask = dirty_segment_mask;
    1.76 +    this_cpu(dirty_segment_mask) = dirty_segment_mask;
    1.77  }
    1.78  
    1.79  #define switch_kernel_stack(v) ((void)0)
    1.80 @@ -638,7 +630,7 @@ static void __context_switch(void)
    1.81  {
    1.82      struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
    1.83      unsigned int          cpu = smp_processor_id();
    1.84 -    struct vcpu          *p = percpu_ctxt[cpu].curr_vcpu;
    1.85 +    struct vcpu          *p = per_cpu(curr_vcpu, cpu);
    1.86      struct vcpu          *n = current;
    1.87  
    1.88      ASSERT(p != n);
    1.89 @@ -692,7 +684,7 @@ static void __context_switch(void)
    1.90          cpu_clear(cpu, p->domain->domain_dirty_cpumask);
    1.91      cpu_clear(cpu, p->vcpu_dirty_cpumask);
    1.92  
    1.93 -    percpu_ctxt[cpu].curr_vcpu = n;
    1.94 +    per_cpu(curr_vcpu, cpu) = n;
    1.95  }
    1.96  
    1.97  
    1.98 @@ -716,7 +708,7 @@ void context_switch(struct vcpu *prev, s
    1.99  
   1.100      set_current(next);
   1.101  
   1.102 -    if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_vcpu(next) )
   1.103 +    if ( (per_cpu(curr_vcpu, cpu) == next) || is_idle_vcpu(next) )
   1.104      {
   1.105          local_irq_enable();
   1.106      }
   1.107 @@ -758,7 +750,7 @@ int __sync_lazy_execstate(void)
   1.108  
   1.109      local_irq_save(flags);
   1.110  
   1.111 -    switch_required = (percpu_ctxt[smp_processor_id()].curr_vcpu != current);
   1.112 +    switch_required = (this_cpu(curr_vcpu) != current);
   1.113  
   1.114      if ( switch_required )
   1.115          __context_switch();
     2.1 --- a/xen/arch/x86/setup.c	Fri Apr 21 17:19:31 2006 +0100
     2.2 +++ b/xen/arch/x86/setup.c	Fri Apr 21 17:35:15 2006 +0100
     2.3 @@ -14,6 +14,7 @@
     2.4  #include <xen/domain_page.h>
     2.5  #include <xen/compile.h>
     2.6  #include <xen/gdbstub.h>
     2.7 +#include <xen/percpu.h>
     2.8  #include <public/version.h>
     2.9  #include <asm/bitops.h>
    2.10  #include <asm/smp.h>
    2.11 @@ -159,6 +160,38 @@ void discard_initial_images(void)
    2.12      init_domheap_pages(initial_images_start, initial_images_end);
    2.13  }
    2.14  
    2.15 +extern char __per_cpu_start[], __per_cpu_data_end[], __per_cpu_end[];
    2.16 +
    2.17 +static void percpu_init_areas(void)
    2.18 +{
    2.19 +    unsigned int i, data_size = __per_cpu_data_end - __per_cpu_start;
    2.20 +
    2.21 +    BUG_ON(data_size > PERCPU_SIZE);
    2.22 +
    2.23 +    for ( i = 1; i < NR_CPUS; i++ )
    2.24 +        memcpy(__per_cpu_start + (i << PERCPU_SHIFT),
    2.25 +               __per_cpu_start,
    2.26 +               data_size);
    2.27 +}
    2.28 +
    2.29 +static void percpu_free_unused_areas(void)
    2.30 +{
    2.31 +    unsigned int i, first_unused;
    2.32 +
    2.33 +    /* Find first unused CPU number. */
    2.34 +    for ( i = 0; i < NR_CPUS; i++ )
    2.35 +        if ( !cpu_online(i) )
    2.36 +            break;
    2.37 +    first_unused = i;
    2.38 +
    2.39 +    /* Check that there are no holes in cpu_online_map. */
    2.40 +    for ( ; i < NR_CPUS; i++ )
    2.41 +        BUG_ON(cpu_online(i));
    2.42 +
    2.43 +    init_xenheap_pages(__pa(__per_cpu_start) + (first_unused << PERCPU_SHIFT),
    2.44 +                       __pa(__per_cpu_end));
    2.45 +}
    2.46 +
    2.47  void __init __start_xen(multiboot_info_t *mbi)
    2.48  {
    2.49      char *cmdline;
    2.50 @@ -209,6 +242,8 @@ void __init __start_xen(multiboot_info_t
    2.51          EARLY_FAIL();
    2.52      }
    2.53  
    2.54 +    percpu_init_areas();
    2.55 +
    2.56      xenheap_phys_end = opt_xenheap_megabytes << 20;
    2.57  
    2.58      if ( mbi->flags & MBI_MEMMAP )
    2.59 @@ -405,7 +440,7 @@ void __init __start_xen(multiboot_info_t
    2.60      BUG_ON(idle_domain == NULL);
    2.61  
    2.62      set_current(idle_domain->vcpu[0]);
    2.63 -    set_current_execstate(idle_domain->vcpu[0]);
    2.64 +    this_cpu(curr_vcpu) = idle_domain->vcpu[0];
    2.65      idle_vcpu[0] = current;
    2.66  
    2.67      paging_init();
    2.68 @@ -482,6 +517,8 @@ void __init __start_xen(multiboot_info_t
    2.69      printk("Brought up %ld CPUs\n", (long)num_online_cpus());
    2.70      smp_cpus_done(max_cpus);
    2.71  
    2.72 +    percpu_free_unused_areas();
    2.73 +
    2.74      initialise_gdb(); /* could be moved earlier */
    2.75  
    2.76      do_initcalls();
     3.1 --- a/xen/arch/x86/smpboot.c	Fri Apr 21 17:19:31 2006 +0100
     3.2 +++ b/xen/arch/x86/smpboot.c	Fri Apr 21 17:35:15 2006 +0100
     3.3 @@ -531,7 +531,7 @@ void __devinit start_secondary(void *unu
     3.4  
     3.5  	set_processor_id(cpu);
     3.6  	set_current(idle_vcpu[cpu]);
     3.7 -	set_current_execstate(idle_vcpu[cpu]);
     3.8 +        this_cpu(curr_vcpu) = idle_vcpu[cpu];
     3.9  
    3.10  	percpu_traps_init();
    3.11  
     4.1 --- a/xen/arch/x86/x86_32/xen.lds.S	Fri Apr 21 17:19:31 2006 +0100
     4.2 +++ b/xen/arch/x86/x86_32/xen.lds.S	Fri Apr 21 17:35:15 2006 +0100
     4.3 @@ -5,6 +5,7 @@
     4.4  
     4.5  #include <xen/config.h>
     4.6  #include <asm/page.h>
     4.7 +#include <asm/percpu.h>
     4.8  #undef ENTRY
     4.9  #undef ALIGN
    4.10  
    4.11 @@ -56,8 +57,15 @@ SECTIONS
    4.12    __initcall_start = .;
    4.13    .initcall.init : { *(.initcall.init) } :text
    4.14    __initcall_end = .;
    4.15 +  . = ALIGN(PAGE_SIZE);
    4.16 +  __init_end = .;
    4.17 +
    4.18 +  __per_cpu_start = .;
    4.19 +  .data.percpu : { *(.data.percpu) } :text
    4.20 +  __per_cpu_data_end = .;
    4.21 +  . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
    4.22    . = ALIGN(STACK_SIZE);
    4.23 -  __init_end = .;
    4.24 +  __per_cpu_end = .;
    4.25  
    4.26    __bss_start = .;		/* BSS */
    4.27    .bss : {
     5.1 --- a/xen/arch/x86/x86_64/xen.lds.S	Fri Apr 21 17:19:31 2006 +0100
     5.2 +++ b/xen/arch/x86/x86_64/xen.lds.S	Fri Apr 21 17:35:15 2006 +0100
     5.3 @@ -3,6 +3,7 @@
     5.4  
     5.5  #include <xen/config.h>
     5.6  #include <asm/page.h>
     5.7 +#include <asm/percpu.h>
     5.8  #undef ENTRY
     5.9  #undef ALIGN
    5.10  
    5.11 @@ -54,8 +55,15 @@ SECTIONS
    5.12    __initcall_start = .;
    5.13    .initcall.init : { *(.initcall.init) } :text
    5.14    __initcall_end = .;
    5.15 +  . = ALIGN(PAGE_SIZE);
    5.16 +  __init_end = .;
    5.17 +
    5.18 +  __per_cpu_start = .;
    5.19 +  .data.percpu : { *(.data.percpu) } :text
    5.20 +  __per_cpu_data_end = .;
    5.21 +  . = __per_cpu_start + (NR_CPUS << PERCPU_SHIFT);
    5.22    . = ALIGN(STACK_SIZE);
    5.23 -  __init_end = .;
    5.24 +  __per_cpu_end = .;
    5.25  
    5.26    __bss_start = .;		/* BSS */
    5.27    .bss : {
     6.1 --- a/xen/include/asm-x86/current.h	Fri Apr 21 17:19:31 2006 +0100
     6.2 +++ b/xen/include/asm-x86/current.h	Fri Apr 21 17:35:15 2006 +0100
     6.3 @@ -16,7 +16,7 @@ struct vcpu;
     6.4  struct cpu_info {
     6.5      struct cpu_user_regs guest_cpu_user_regs;
     6.6      unsigned int         processor_id;
     6.7 -    struct vcpu  *current_ed;
     6.8 +    struct vcpu         *current_vcpu;
     6.9  };
    6.10  
    6.11  static inline struct cpu_info *get_cpu_info(void)
    6.12 @@ -29,12 +29,12 @@ static inline struct cpu_info *get_cpu_i
    6.13      return cpu_info;
    6.14  }
    6.15  
    6.16 -#define get_current()         (get_cpu_info()->current_ed)
    6.17 -#define set_current(_ed)      (get_cpu_info()->current_ed = (_ed))
    6.18 +#define get_current()         (get_cpu_info()->current_vcpu)
    6.19 +#define set_current(vcpu)     (get_cpu_info()->current_vcpu = (vcpu))
    6.20  #define current               (get_current())
    6.21  
    6.22  #define get_processor_id()    (get_cpu_info()->processor_id)
    6.23 -#define set_processor_id(_id) (get_cpu_info()->processor_id = (_id))
    6.24 +#define set_processor_id(id)  (get_cpu_info()->processor_id = (id))
    6.25  
    6.26  #define guest_cpu_user_regs() (&get_cpu_info()->guest_cpu_user_regs)
    6.27  
    6.28 @@ -51,8 +51,14 @@ static inline struct cpu_info *get_cpu_i
    6.29          "mov %0,%%"__OP"sp; jmp "STR(__fn)      \
    6.30          : : "r" (guest_cpu_user_regs()) : "memory" )
    6.31  
    6.32 -#define schedule_tail(_ed) (((_ed)->arch.schedule_tail)(_ed))
    6.33 +#define schedule_tail(vcpu) (((vcpu)->arch.schedule_tail)(vcpu))
    6.34  
    6.35 -extern void set_current_execstate(struct vcpu *v);
    6.36 +#include <xen/percpu.h>
    6.37 +/*
    6.38 + * Which VCPU's state is currently running on each CPU?
    6.39 + * This is not necesasrily the same as 'current' as a CPU may be
    6.40 + * executing a lazy state switch.
    6.41 + */
    6.42 +DECLARE_PER_CPU(struct vcpu *, curr_vcpu);
    6.43  
    6.44  #endif /* __X86_CURRENT_H__ */
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/include/asm-x86/percpu.h	Fri Apr 21 17:35:15 2006 +0100
     7.3 @@ -0,0 +1,20 @@
     7.4 +#ifndef __X86_PERCPU_H__
     7.5 +#define __X86_PERCPU_H__
     7.6 +
     7.7 +#define PERCPU_SHIFT 12
     7.8 +#define PERCPU_SIZE  (1UL << PERCPU_SHIFT)
     7.9 +
    7.10 +/* Separate out the type, so (int[3], foo) works. */
    7.11 +#define DEFINE_PER_CPU(type, name)                      \
    7.12 +    __attribute__((__section__(".data.percpu")))        \
    7.13 +    __typeof__(type) per_cpu__##name
    7.14 +
    7.15 +/* var is in discarded region: offset to particular copy we want */
    7.16 +#define per_cpu(var, cpu)  \
    7.17 +    (*RELOC_HIDE(&per_cpu__##var, ((unsigned int)(cpu))<<PERCPU_SHIFT))
    7.18 +#define __get_cpu_var(var) \
    7.19 +    (per_cpu(var, smp_processor_id()))
    7.20 +
    7.21 +#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
    7.22 +
    7.23 +#endif /* __X86_PERCPU_H__ */
     8.1 --- a/xen/include/xen/compiler.h	Fri Apr 21 17:19:31 2006 +0100
     8.2 +++ b/xen/include/xen/compiler.h	Fri Apr 21 17:35:15 2006 +0100
     8.3 @@ -25,4 +25,17 @@
     8.4  #define __must_check
     8.5  #endif
     8.6  
     8.7 +/* This macro obfuscates arithmetic on a variable address so that gcc
     8.8 +   shouldn't recognize the original var, and make assumptions about it */
     8.9 +/*
    8.10 + * Versions of the ppc64 compiler before 4.1 had a bug where use of
    8.11 + * RELOC_HIDE could trash r30. The bug can be worked around by changing
    8.12 + * the inline assembly constraint from =g to =r, in this particular
    8.13 + * case either is valid.
    8.14 + */
    8.15 +#define RELOC_HIDE(ptr, off)                    \
    8.16 +  ({ unsigned long __ptr;                       \
    8.17 +    __asm__ ("" : "=r"(__ptr) : "0"(ptr));      \
    8.18 +    (typeof(ptr)) (__ptr + (off)); })
    8.19 +
    8.20  #endif /* __LINUX_COMPILER_H */
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/xen/include/xen/percpu.h	Fri Apr 21 17:35:15 2006 +0100
     9.3 @@ -0,0 +1,15 @@
     9.4 +#ifndef __XEN_PERCPU_H__
     9.5 +#define __XEN_PERCPU_H__
     9.6 +
     9.7 +#include <xen/config.h>
     9.8 +#include <xen/smp.h>
     9.9 +#include <asm/percpu.h>
    9.10 +
    9.11 +/* Preferred on Xen. Also see arch-defined per_cpu(). */
    9.12 +#define this_cpu(var)    __get_cpu_var(var)
    9.13 +
    9.14 +/* Linux compatibility. */
    9.15 +#define get_cpu_var(var) this_cpu(var)
    9.16 +#define put_cpu_var(var)
    9.17 +
    9.18 +#endif /* __XEN_PERCPU_H__ */