ia64/xen-unstable

changeset 14011:168030c8e0a9

linux: Introduce {current_,}vcpu_info.

Also consolidate time-xen.c in giving get_time_values_from_xen() a
'cpu' parameter to match other functions (this consolidation could of
course also be done the other way around, but I think this way the
resulting code can be more efficient).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Mon Feb 19 16:11:56 2007 +0000 (2007-02-19)
parents 01476c7804b2
children 0b882c911b88
files linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c linux-2.6-xen-sparse/arch/x86_64/mm/fault-xen.c linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/irqflags.h linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/system.h linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/irqflags.h
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c	Mon Feb 19 16:06:03 2007 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/time-xen.c	Mon Feb 19 16:11:56 2007 +0000
     1.3 @@ -222,8 +222,7 @@ int read_current_timer(unsigned long *ti
     1.4  void init_cpu_khz(void)
     1.5  {
     1.6  	u64 __cpu_khz = 1000000ULL << 32;
     1.7 -	struct vcpu_time_info *info;
     1.8 -	info = &HYPERVISOR_shared_info->vcpu_info[0].time;
     1.9 +	struct vcpu_time_info *info = &vcpu_info(0)->time;
    1.10  	do_div(__cpu_khz, info->tsc_to_system_mul);
    1.11  	if (info->tsc_shift < 0)
    1.12  		cpu_khz = __cpu_khz << -info->tsc_shift;
    1.13 @@ -293,14 +292,13 @@ static void update_wallclock(void)
    1.14   * Reads a consistent set of time-base values from Xen, into a shadow data
    1.15   * area.
    1.16   */
    1.17 -static void get_time_values_from_xen(void)
    1.18 +static void get_time_values_from_xen(int cpu)
    1.19  {
    1.20 -	shared_info_t           *s = HYPERVISOR_shared_info;
    1.21  	struct vcpu_time_info   *src;
    1.22  	struct shadow_time_info *dst;
    1.23  
    1.24 -	src = &s->vcpu_info[smp_processor_id()].time;
    1.25 -	dst = &per_cpu(shadow_time, smp_processor_id());
    1.26 +	src = &vcpu_info(cpu)->time;
    1.27 +	dst = &per_cpu(shadow_time, cpu);
    1.28  
    1.29  	do {
    1.30  		dst->version = src->version;
    1.31 @@ -320,7 +318,7 @@ static inline int time_values_up_to_date
    1.32  	struct vcpu_time_info   *src;
    1.33  	struct shadow_time_info *dst;
    1.34  
    1.35 -	src = &HYPERVISOR_shared_info->vcpu_info[cpu].time;
    1.36 +	src = &vcpu_info(cpu)->time;
    1.37  	dst = &per_cpu(shadow_time, cpu);
    1.38  
    1.39  	rmb();
    1.40 @@ -412,7 +410,7 @@ void do_gettimeofday(struct timeval *tv)
    1.41  			 * overflowed). Detect that and recalculate
    1.42  			 * with fresh values.
    1.43  			 */
    1.44 -			get_time_values_from_xen();
    1.45 +			get_time_values_from_xen(cpu);
    1.46  			continue;
    1.47  		}
    1.48  	} while (read_seqretry(&xtime_lock, seq) ||
    1.49 @@ -456,7 +454,7 @@ int do_settimeofday(struct timespec *tv)
    1.50  		nsec = tv->tv_nsec - get_nsec_offset(shadow);
    1.51  		if (time_values_up_to_date(cpu))
    1.52  			break;
    1.53 -		get_time_values_from_xen();
    1.54 +		get_time_values_from_xen(cpu);
    1.55  	}
    1.56  	sec = tv->tv_sec;
    1.57  	__normalize_time(&sec, &nsec);
    1.58 @@ -551,7 +549,7 @@ unsigned long long monotonic_clock(void)
    1.59  		barrier();
    1.60  		time = shadow->system_timestamp + get_nsec_offset(shadow);
    1.61  		if (!time_values_up_to_date(cpu))
    1.62 -			get_time_values_from_xen();
    1.63 +			get_time_values_from_xen(cpu);
    1.64  		barrier();
    1.65  	} while (local_time_version != shadow->version);
    1.66  
    1.67 @@ -621,7 +619,7 @@ irqreturn_t timer_interrupt(int irq, voi
    1.68  	write_seqlock(&xtime_lock);
    1.69  
    1.70  	do {
    1.71 -		get_time_values_from_xen();
    1.72 +		get_time_values_from_xen(cpu);
    1.73  
    1.74  		/* Obtain a consistent snapshot of elapsed wallclock cycles. */
    1.75  		delta = delta_cpu =
    1.76 @@ -921,7 +919,7 @@ void __init time_init(void)
    1.77  		return;
    1.78  	}
    1.79  #endif
    1.80 -	get_time_values_from_xen();
    1.81 +	get_time_values_from_xen(0);
    1.82  
    1.83  	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
    1.84  	per_cpu(processed_system_time, 0) = processed_system_time;
    1.85 @@ -1029,7 +1027,7 @@ void time_resume(void)
    1.86  {
    1.87  	init_cpu_khz();
    1.88  
    1.89 -	get_time_values_from_xen();
    1.90 +	get_time_values_from_xen(0);
    1.91  
    1.92  	processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
    1.93  	per_cpu(processed_system_time, 0) = processed_system_time;
     2.1 --- a/linux-2.6-xen-sparse/arch/x86_64/mm/fault-xen.c	Mon Feb 19 16:06:03 2007 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/x86_64/mm/fault-xen.c	Mon Feb 19 16:11:56 2007 +0000
     2.3 @@ -411,8 +411,7 @@ asmlinkage void __kprobes do_page_fault(
     2.4  	prefetchw(&mm->mmap_sem);
     2.5  
     2.6  	/* get the address */
     2.7 -	address = HYPERVISOR_shared_info->vcpu_info[
     2.8 -		smp_processor_id()].arch.cr2;
     2.9 +	address = current_vcpu_info()->arch.cr2;
    2.10  
    2.11  	info.si_code = SEGV_MAPERR;
    2.12  
     3.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h	Mon Feb 19 16:06:03 2007 +0000
     3.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/hypervisor.h	Mon Feb 19 16:11:56 2007 +0000
     3.3 @@ -46,16 +46,12 @@
     3.4  #include <xen/interface/nmi.h>
     3.5  #include <asm/ptrace.h>
     3.6  #include <asm/page.h>
     3.7 -#if defined(__i386__)
     3.8 -#  ifdef CONFIG_X86_PAE
     3.9 -#   include <asm-generic/pgtable-nopud.h>
    3.10 -#  else
    3.11 -#   include <asm-generic/pgtable-nopmd.h>
    3.12 -#  endif
    3.13 -#endif
    3.14  
    3.15  extern shared_info_t *HYPERVISOR_shared_info;
    3.16  
    3.17 +#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
    3.18 +#define current_vcpu_info() vcpu_info(smp_processor_id())
    3.19 +
    3.20  #ifdef CONFIG_X86_32
    3.21  extern unsigned long hypervisor_virt_start;
    3.22  #endif
     4.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/irqflags.h	Mon Feb 19 16:06:03 2007 +0000
     4.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/irqflags.h	Mon Feb 19 16:11:56 2007 +0000
     4.3 @@ -12,12 +12,6 @@
     4.4  
     4.5  #ifndef __ASSEMBLY__
     4.6  
     4.7 -#ifdef CONFIG_SMP
     4.8 -#define __vcpu_id smp_processor_id()
     4.9 -#else
    4.10 -#define __vcpu_id 0
    4.11 -#endif
    4.12 -
    4.13  /* 
    4.14   * The use of 'barrier' in the following reflects their use as local-lock
    4.15   * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
    4.16 @@ -26,8 +20,7 @@
    4.17   * includes these barriers, for example.
    4.18   */
    4.19  
    4.20 -#define __raw_local_save_flags()					\
    4.21 -	(&HYPERVISOR_shared_info->vcpu_info[__vcpu_id])->evtchn_upcall_mask;
    4.22 +#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
    4.23  
    4.24  #define raw_local_save_flags(flags) \
    4.25  		do { (flags) = __raw_local_save_flags(); } while (0)
    4.26 @@ -36,7 +29,7 @@
    4.27  do {									\
    4.28  	vcpu_info_t *_vcpu;						\
    4.29  	barrier();							\
    4.30 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
    4.31 +	_vcpu = current_vcpu_info();					\
    4.32  	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
    4.33  		barrier(); /* unmask then check (avoid races) */	\
    4.34  		if (unlikely(_vcpu->evtchn_upcall_pending))		\
    4.35 @@ -46,9 +39,7 @@ do {									\
    4.36  
    4.37  #define raw_local_irq_disable()						\
    4.38  do {									\
    4.39 -	vcpu_info_t *_vcpu;						\
    4.40 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
    4.41 -	_vcpu->evtchn_upcall_mask = 1;					\
    4.42 +	current_vcpu_info()->evtchn_upcall_mask = 1;			\
    4.43  	barrier();							\
    4.44  } while (0)
    4.45  
    4.46 @@ -56,7 +47,7 @@ do {									\
    4.47  do {									\
    4.48  	vcpu_info_t *_vcpu;						\
    4.49  	barrier();							\
    4.50 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
    4.51 +	_vcpu = current_vcpu_info();					\
    4.52  	_vcpu->evtchn_upcall_mask = 0;					\
    4.53  	barrier(); /* unmask then check (avoid races) */		\
    4.54  	if (unlikely(_vcpu->evtchn_upcall_pending))			\
     5.1 --- a/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/system.h	Mon Feb 19 16:06:03 2007 +0000
     5.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/system.h	Mon Feb 19 16:11:56 2007 +0000
     5.3 @@ -100,8 +100,7 @@ extern struct task_struct * FASTCALL(__s
     5.4  #define write_cr0(x) \
     5.5  	__asm__ __volatile__("movl %0,%%cr0": :"r" (x))
     5.6  
     5.7 -#define read_cr2() \
     5.8 -	(HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
     5.9 +#define read_cr2() (current_vcpu_info()->arch.cr2)
    5.10  #define write_cr2(x) \
    5.11  	__asm__ __volatile__("movl %0,%%cr2": :"r" (x))
    5.12  
     6.1 --- a/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/irqflags.h	Mon Feb 19 16:06:03 2007 +0000
     6.2 +++ b/linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/irqflags.h	Mon Feb 19 16:11:56 2007 +0000
     6.3 @@ -15,12 +15,6 @@
     6.4   * Interrupt control:
     6.5   */
     6.6  
     6.7 -#ifdef CONFIG_SMP
     6.8 -#define __vcpu_id smp_processor_id()
     6.9 -#else
    6.10 -#define __vcpu_id 0
    6.11 -#endif
    6.12 -
    6.13  /*
    6.14   * The use of 'barrier' in the following reflects their use as local-lock
    6.15   * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
    6.16 @@ -29,8 +23,7 @@
    6.17   * includes these barriers, for example.
    6.18   */
    6.19  
    6.20 -#define __raw_local_save_flags()					\
    6.21 -	(&HYPERVISOR_shared_info->vcpu_info[__vcpu_id])->evtchn_upcall_mask;
    6.22 +#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
    6.23  
    6.24  #define raw_local_save_flags(flags) \
    6.25  		do { (flags) = __raw_local_save_flags(); } while (0)
    6.26 @@ -39,7 +32,7 @@
    6.27  do {									\
    6.28  	vcpu_info_t *_vcpu;						\
    6.29  	barrier();							\
    6.30 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
    6.31 +	_vcpu = current_vcpu_info();		\
    6.32  	if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {			\
    6.33  		barrier(); /* unmask then check (avoid races) */	\
    6.34  		if ( unlikely(_vcpu->evtchn_upcall_pending) )		\
    6.35 @@ -76,9 +69,7 @@ static inline int raw_irqs_disabled_flag
    6.36  
    6.37  #define raw_local_irq_disable()						\
    6.38  do {									\
    6.39 -	vcpu_info_t *_vcpu;						\
    6.40 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
    6.41 -	_vcpu->evtchn_upcall_mask = 1;					\
    6.42 +	current_vcpu_info()->evtchn_upcall_mask = 1;					\
    6.43  	barrier();							\
    6.44  } while (0)
    6.45  
    6.46 @@ -86,7 +77,7 @@ do {									\
    6.47  do {									\
    6.48  	vcpu_info_t *_vcpu;						\
    6.49  	barrier();							\
    6.50 -	_vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];		\
    6.51 +	_vcpu = current_vcpu_info();		\
    6.52  	_vcpu->evtchn_upcall_mask = 0;					\
    6.53  	barrier(); /* unmask then check (avoid races) */		\
    6.54  	if ( unlikely(_vcpu->evtchn_upcall_pending) )			\