ia64/xen-unstable

changeset 11014:b3dd6ceda9bc

[XEN] Assorted further PER_CPU- or read_mostly-ifications.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Aug 08 15:43:54 2006 +0100 (2006-08-08)
parents 6b821e310597
children 58a04bfedf6b
files xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/irq.c xen/arch/x86/nmi.c xen/arch/x86/x86_32/traps.c xen/common/domain.c xen/common/schedule.c
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Tue Aug 08 15:14:43 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Aug 08 15:43:54 2006 +0100
     1.3 @@ -156,7 +156,7 @@ static void vmx_relinquish_guest_resourc
     1.4  
     1.5  #ifdef __x86_64__
     1.6  
     1.7 -static struct vmx_msr_state percpu_msr[NR_CPUS];
     1.8 +static DEFINE_PER_CPU(struct vmx_msr_state, percpu_msr);
     1.9  
    1.10  static u32 msr_data_index[VMX_MSR_COUNT] =
    1.11  {
    1.12 @@ -177,7 +177,7 @@ static void vmx_save_segments(struct vcp
    1.13   */
    1.14  static void vmx_load_msrs(void)
    1.15  {
    1.16 -    struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
    1.17 +    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
    1.18      int i;
    1.19  
    1.20      while ( host_state->flags )
    1.21 @@ -190,7 +190,7 @@ static void vmx_load_msrs(void)
    1.22  
    1.23  static void vmx_save_init_msrs(void)
    1.24  {
    1.25 -    struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
    1.26 +    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
    1.27      int i;
    1.28  
    1.29      for ( i = 0; i < VMX_MSR_COUNT; i++ )
    1.30 @@ -279,7 +279,7 @@ static inline int long_mode_do_msr_write
    1.31      u64 msr_content = regs->eax | ((u64)regs->edx << 32);
    1.32      struct vcpu *v = current;
    1.33      struct vmx_msr_state *msr = &v->arch.hvm_vmx.msr_content;
    1.34 -    struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
    1.35 +    struct vmx_msr_state *host_state = &this_cpu(percpu_msr);
    1.36  
    1.37      HVM_DBG_LOG(DBG_LEVEL_1, "msr 0x%lx msr_content 0x%"PRIx64"\n",
    1.38                  (unsigned long)regs->ecx, msr_content);
    1.39 @@ -361,7 +361,7 @@ static void vmx_restore_msrs(struct vcpu
    1.40      unsigned long guest_flags ;
    1.41  
    1.42      guest_state = &v->arch.hvm_vmx.msr_content;;
    1.43 -    host_state = &percpu_msr[smp_processor_id()];
    1.44 +    host_state = &this_cpu(percpu_msr);
    1.45  
    1.46      wrmsrl(MSR_SHADOW_GS_BASE, guest_state->shadow_gs);
    1.47      guest_flags = guest_state->flags;
     2.1 --- a/xen/arch/x86/irq.c	Tue Aug 08 15:14:43 2006 +0100
     2.2 +++ b/xen/arch/x86/irq.c	Tue Aug 08 15:43:54 2006 +0100
     2.3 @@ -673,7 +673,7 @@ static int __init setup_dump_irqs(void)
     2.4  }
     2.5  __initcall(setup_dump_irqs);
     2.6  
     2.7 -static struct timer end_irq_timer[NR_CPUS];
     2.8 +static DEFINE_PER_CPU(struct timer, end_irq_timer);
     2.9  
    2.10  /*
    2.11   * force_intack: Forcibly emit all pending EOIs on each CPU every second.
    2.12 @@ -682,22 +682,13 @@ static struct timer end_irq_timer[NR_CPU
    2.13  
    2.14  static void end_irq_timeout(void *unused)
    2.15  {
    2.16 -    int cpu = smp_processor_id();
    2.17 -
    2.18      local_irq_disable();
    2.19      flush_all_pending_eoi(NULL);
    2.20      local_irq_enable();
    2.21  
    2.22      on_selected_cpus(cpu_online_map, flush_ready_eoi, NULL, 1, 0);
    2.23  
    2.24 -    set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000));
    2.25 -}
    2.26 -
    2.27 -static void __init __setup_irq_timeout(void *unused)
    2.28 -{
    2.29 -    int cpu = smp_processor_id();
    2.30 -    init_timer(&end_irq_timer[cpu], end_irq_timeout, NULL, cpu);
    2.31 -    set_timer(&end_irq_timer[cpu], NOW() + MILLISECS(1000));
    2.32 +    set_timer(&this_cpu(end_irq_timer), NOW() + MILLISECS(1000));
    2.33  }
    2.34  
    2.35  static int force_intack;
    2.36 @@ -705,8 +696,17 @@ boolean_param("force_intack", force_inta
    2.37  
    2.38  static int __init setup_irq_timeout(void)
    2.39  {
    2.40 -    if ( force_intack )
    2.41 -        on_each_cpu(__setup_irq_timeout, NULL, 1, 1);
    2.42 +    unsigned int cpu;
    2.43 +
    2.44 +    if ( !force_intack )
    2.45 +        return 0;
    2.46 +
    2.47 +    for_each_online_cpu ( cpu )
    2.48 +    {
    2.49 +        init_timer(&per_cpu(end_irq_timer, cpu), end_irq_timeout, NULL, cpu);
    2.50 +        set_timer(&per_cpu(end_irq_timer, cpu), NOW() + MILLISECS(1000));
    2.51 +    }
    2.52 +
    2.53      return 0;
    2.54  }
    2.55  __initcall(setup_irq_timeout);
     3.1 --- a/xen/arch/x86/nmi.c	Tue Aug 08 15:14:43 2006 +0100
     3.2 +++ b/xen/arch/x86/nmi.c	Tue Aug 08 15:43:54 2006 +0100
     3.3 @@ -36,8 +36,8 @@ unsigned int nmi_watchdog = NMI_NONE;
     3.4  static unsigned int nmi_hz = HZ;
     3.5  static unsigned int nmi_perfctr_msr;	/* the MSR to reset in NMI handler */
     3.6  static unsigned int nmi_p4_cccr_val;
     3.7 -static struct timer nmi_timer[NR_CPUS];
     3.8 -static unsigned int nmi_timer_ticks[NR_CPUS];
     3.9 +static DEFINE_PER_CPU(struct timer, nmi_timer);
    3.10 +static DEFINE_PER_CPU(unsigned int, nmi_timer_ticks);
    3.11  
    3.12  /*
    3.13   * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
    3.14 @@ -132,9 +132,8 @@ int __init check_nmi_watchdog (void)
    3.15  
    3.16  static void nmi_timer_fn(void *unused)
    3.17  {
    3.18 -    int cpu = smp_processor_id();
    3.19 -    nmi_timer_ticks[cpu]++;
    3.20 -    set_timer(&nmi_timer[cpu], NOW() + MILLISECS(1000));
    3.21 +    this_cpu(nmi_timer_ticks)++;
    3.22 +    set_timer(&this_cpu(nmi_timer), NOW() + MILLISECS(1000));
    3.23  }
    3.24  
    3.25  static void disable_lapic_nmi_watchdog(void)
    3.26 @@ -340,9 +339,8 @@ void __pminit setup_apic_nmi_watchdog(vo
    3.27      nmi_active = 1;
    3.28  }
    3.29  
    3.30 -static unsigned int
    3.31 -last_irq_sums [NR_CPUS],
    3.32 -    alert_counter [NR_CPUS];
    3.33 +static DEFINE_PER_CPU(unsigned int, last_irq_sums);
    3.34 +static DEFINE_PER_CPU(unsigned int, alert_counter);
    3.35  
    3.36  static atomic_t watchdog_disable_count = ATOMIC_INIT(1);
    3.37  
    3.38 @@ -366,35 +364,35 @@ void watchdog_enable(void)
    3.39       */
    3.40      for_each_online_cpu ( cpu )
    3.41      {
    3.42 -        init_timer(&nmi_timer[cpu], nmi_timer_fn, NULL, cpu);
    3.43 -        set_timer(&nmi_timer[cpu], NOW());
    3.44 +        init_timer(&per_cpu(nmi_timer, cpu), nmi_timer_fn, NULL, cpu);
    3.45 +        set_timer(&per_cpu(nmi_timer, cpu), NOW());
    3.46      }
    3.47  }
    3.48  
    3.49  void nmi_watchdog_tick(struct cpu_user_regs * regs)
    3.50  {
    3.51 -    int sum, cpu = smp_processor_id();
    3.52 +    unsigned int sum = this_cpu(nmi_timer_ticks);
    3.53  
    3.54 -    sum = nmi_timer_ticks[cpu];
    3.55 -
    3.56 -    if ( (last_irq_sums[cpu] == sum) && !atomic_read(&watchdog_disable_count) )
    3.57 +    if ( (this_cpu(last_irq_sums) == sum) &&
    3.58 +         !atomic_read(&watchdog_disable_count) )
    3.59      {
    3.60          /*
    3.61           * Ayiee, looks like this CPU is stuck ... wait a few IRQs (5 seconds) 
    3.62           * before doing the oops ...
    3.63           */
    3.64 -        alert_counter[cpu]++;
    3.65 -        if ( alert_counter[cpu] == 5*nmi_hz )
    3.66 +        this_cpu(alert_counter)++;
    3.67 +        if ( this_cpu(alert_counter) == 5*nmi_hz )
    3.68          {
    3.69              console_force_unlock();
    3.70 -            printk("Watchdog timer detects that CPU%d is stuck!\n", cpu);
    3.71 +            printk("Watchdog timer detects that CPU%d is stuck!\n",
    3.72 +                   smp_processor_id());
    3.73              fatal_trap(TRAP_nmi, regs);
    3.74          }
    3.75      } 
    3.76      else 
    3.77      {
    3.78 -        last_irq_sums[cpu] = sum;
    3.79 -        alert_counter[cpu] = 0;
    3.80 +        this_cpu(last_irq_sums) = sum;
    3.81 +        this_cpu(alert_counter) = 0;
    3.82      }
    3.83  
    3.84      if ( nmi_perfctr_msr )
     4.1 --- a/xen/arch/x86/x86_32/traps.c	Tue Aug 08 15:14:43 2006 +0100
     4.2 +++ b/xen/arch/x86/x86_32/traps.c	Tue Aug 08 15:43:54 2006 +0100
     4.3 @@ -19,7 +19,7 @@
     4.4  #include <public/callback.h>
     4.5  
     4.6  /* All CPUs have their own IDT to allow int80 direct trap. */
     4.7 -idt_entry_t *idt_tables[NR_CPUS] = { 0 };
     4.8 +idt_entry_t *idt_tables[NR_CPUS] __read_mostly;
     4.9  
    4.10  void show_registers(struct cpu_user_regs *regs)
    4.11  {
     5.1 --- a/xen/common/domain.c	Tue Aug 08 15:14:43 2006 +0100
     5.2 +++ b/xen/common/domain.c	Tue Aug 08 15:43:54 2006 +0100
     5.3 @@ -33,7 +33,7 @@ struct domain *domain_list;
     5.4  
     5.5  struct domain *dom0;
     5.6  
     5.7 -struct vcpu *idle_vcpu[NR_CPUS];
     5.8 +struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
     5.9  
    5.10  struct domain *alloc_domain(domid_t domid)
    5.11  {
    5.12 @@ -245,15 +245,15 @@ void __domain_crash_synchronous(void)
    5.13  }
    5.14  
    5.15  
    5.16 -static struct domain *domain_shuttingdown[NR_CPUS];
    5.17 +static DEFINE_PER_CPU(struct domain *, domain_shuttingdown);
    5.18  
    5.19  static void domain_shutdown_finalise(void)
    5.20  {
    5.21      struct domain *d;
    5.22      struct vcpu *v;
    5.23  
    5.24 -    d = domain_shuttingdown[smp_processor_id()];
    5.25 -    domain_shuttingdown[smp_processor_id()] = NULL;
    5.26 +    d = this_cpu(domain_shuttingdown);
    5.27 +    this_cpu(domain_shuttingdown) = NULL;
    5.28  
    5.29      BUG_ON(d == NULL);
    5.30      BUG_ON(d == current->domain);
    5.31 @@ -302,7 +302,7 @@ void domain_shutdown(struct domain *d, u
    5.32          vcpu_sleep_nosync(v);
    5.33  
    5.34      get_knownalive_domain(d);
    5.35 -    domain_shuttingdown[smp_processor_id()] = d;
    5.36 +    this_cpu(domain_shuttingdown) = d;
    5.37      raise_softirq(DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ);
    5.38  }
    5.39  
     6.1 --- a/xen/common/schedule.c	Tue Aug 08 15:14:43 2006 +0100
     6.2 +++ b/xen/common/schedule.c	Tue Aug 08 15:43:54 2006 +0100
     6.3 @@ -67,7 +67,7 @@ static struct scheduler ops;
     6.4            : (typeof(ops.fn(__VA_ARGS__)))0 )
     6.5  
     6.6  /* Per-CPU periodic timer sends an event to the currently-executing domain. */
     6.7 -static struct timer t_timer[NR_CPUS]; 
     6.8 +static DEFINE_PER_CPU(struct timer, t_timer);
     6.9  
    6.10  static inline void vcpu_runstate_change(
    6.11      struct vcpu *v, int new_state, s_time_t new_entry_time)
    6.12 @@ -593,10 +593,9 @@ static void s_timer_fn(void *unused)
    6.13  /* Periodic tick timer: send timer event to current domain */
    6.14  static void t_timer_fn(void *unused)
    6.15  {
    6.16 -    struct vcpu  *v   = current;
    6.17 -    unsigned int  cpu = smp_processor_id();
    6.18 +    struct vcpu *v   = current;
    6.19  
    6.20 -    per_cpu(schedule_data, cpu).tick++;
    6.21 +    this_cpu(schedule_data).tick++;
    6.22  
    6.23      if ( !is_idle_vcpu(v) )
    6.24      {
    6.25 @@ -606,9 +605,9 @@ static void t_timer_fn(void *unused)
    6.26  
    6.27      page_scrub_schedule_work();
    6.28  
    6.29 -    SCHED_OP(tick, cpu);
    6.30 +    SCHED_OP(tick, smp_processor_id());
    6.31  
    6.32 -    set_timer(&t_timer[cpu], NOW() + MILLISECS(10));
    6.33 +    set_timer(&this_cpu(t_timer), NOW() + MILLISECS(10));
    6.34  }
    6.35  
    6.36  /* Per-VCPU timer function: sends a virtual timer interrupt. */
    6.37 @@ -637,7 +636,7 @@ void __init scheduler_init(void)
    6.38      {
    6.39          spin_lock_init(&per_cpu(schedule_data, i).schedule_lock);
    6.40          init_timer(&per_cpu(schedule_data, i).s_timer, s_timer_fn, NULL, i);
    6.41 -        init_timer(&t_timer[i], t_timer_fn, NULL, i);
    6.42 +        init_timer(&per_cpu(t_timer, i), t_timer_fn, NULL, i);
    6.43      }
    6.44  
    6.45      for ( i = 0; schedulers[i] != NULL; i++ )