ia64/xen-unstable

changeset 5600:0b5fab43ff59

bitkeeper revision 1.1768 (42c18d2259NPELcGV7ohyZNh72ufSw)

Based on the Keir's suggestion yesterday, I fixed the bug in xenlinux.
Now the LTP test cases pass well in domU; I ran LTP in domU along with
an infinite loop of "make clean; make -j4" on dom0 for sanity tests.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Jun 28 17:47:14 2005 +0000 (2005-06-28)
parents 348b774a8f97
children 41a5181f74df
files linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h
line diff
     1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c	Tue Jun 28 17:42:53 2005 +0000
     1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/x86_64/kernel/process.c	Tue Jun 28 17:47:14 2005 +0000
     1.3 @@ -474,51 +474,26 @@ struct task_struct *__switch_to(struct t
     1.4  	 * Switch DS and ES.
     1.5  	 * This won't pick up thread selector changes, but I guess that is ok.
     1.6  	 */
     1.7 -	asm volatile("movl %%es,%0" : "=m" (prev->es)); 
     1.8 -	if (unlikely(next->es | prev->es))
     1.9 +	if (unlikely(next->es))
    1.10  		loadsegment(es, next->es); 
    1.11  	
    1.12 -	asm volatile ("movl %%ds,%0" : "=m" (prev->ds)); 
    1.13 -	if (unlikely(next->ds | prev->ds))
    1.14 +	if (unlikely(next->ds))
    1.15  		loadsegment(ds, next->ds);
    1.16  
    1.17  	/* 
    1.18  	 * Switch FS and GS.
    1.19  	 */
    1.20 -	{ 
    1.21 -		unsigned fsindex;
    1.22 -		asm volatile("movl %%fs,%0" : "=g" (fsindex)); 
    1.23 -		/* segment register != 0 always requires a reload. 
    1.24 -		   also reload when it has changed. 
    1.25 -		   when prev process used 64bit base always reload
    1.26 -		   to avoid an information leak. */
    1.27 -		if (unlikely(fsindex | next->fsindex | prev->fs)) {
    1.28 -			loadsegment(fs, next->fsindex);
    1.29 -			/* check if the user used a selector != 0
    1.30 -	                 * if yes clear 64bit base, since overloaded base
    1.31 -                         * is always mapped to the Null selector
    1.32 -                         */
    1.33 -			if (fsindex)
    1.34 -			prev->fs = 0;				
    1.35 -		}
    1.36 -		/* when next process has a 64bit base use it */
    1.37 -		if (next->fs) {
    1.38 -                        HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs); 
    1.39 -                }
    1.40 -		prev->fsindex = fsindex;
    1.41 -	}
    1.42 -	{ 
    1.43 -		unsigned gsindex;
    1.44 -		asm volatile("movl %%gs,%0" : "=g" (gsindex)); 
    1.45 -		if (unlikely(gsindex | next->gsindex | prev->gs)) {
    1.46 -			load_gs_index(next->gsindex);
    1.47 -			if (gsindex)
    1.48 -			prev->gs = 0;				
    1.49 -		}
    1.50 -		if (next->gs)
    1.51 -                    HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs); 
    1.52 -		prev->gsindex = gsindex;
    1.53 -	}
    1.54 +	if (unlikely(next->fsindex))
    1.55 +		loadsegment(fs, next->fsindex);
    1.56 +
    1.57 +	if (next->fs)
    1.58 +		HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs); 
    1.59 +	
    1.60 +	if (unlikely(next->gsindex))
    1.61 +		load_gs_index(next->gsindex);
    1.62 +
    1.63 +	if (next->gs)
    1.64 +		HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs); 
    1.65  
    1.66  	/* 
    1.67  	 * Switch the PDA context.
    1.68 @@ -660,7 +635,6 @@ long do_arch_prctl(struct task_struct *t
    1.69  			if (doit) {
    1.70  		load_gs_index(0);
    1.71                  ret = HYPERVISOR_set_segment_base(SEGBASE_GS_USER, addr);
    1.72 -                printk("do_arch_prctl: SET_SET: addr = %lx\n", addr);
    1.73  			} 
    1.74  		}
    1.75  		put_cpu();
    1.76 @@ -700,7 +674,6 @@ long do_arch_prctl(struct task_struct *t
    1.77  			base = read_32bit_tls(task, FS_TLS);
    1.78  		else if (doit) {
    1.79  			rdmsrl(MSR_FS_BASE, base);
    1.80 -                        printk("do_arch_prctl: GET_FS: addr = %lx\n", addr);
    1.81  		} else
    1.82  			base = task->thread.fs;
    1.83  		ret = put_user(base, (unsigned long __user *)addr); 
    1.84 @@ -712,7 +685,6 @@ long do_arch_prctl(struct task_struct *t
    1.85  			base = read_32bit_tls(task, GS_TLS);
    1.86  		else if (doit) {
    1.87  			rdmsrl(MSR_KERNEL_GS_BASE, base);
    1.88 -                        printk("do_arch_prctl: GET_GS: addr = %lx\n", addr);
    1.89  		} else
    1.90  			base = task->thread.gs;
    1.91  		ret = put_user(base, (unsigned long __user *)addr); 
     2.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h	Tue Jun 28 17:42:53 2005 +0000
     2.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h	Tue Jun 28 17:47:14 2005 +0000
     2.3 @@ -16,18 +16,48 @@
     2.4  int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
     2.5  void destroy_context(struct mm_struct *mm);
     2.6  
     2.7 -#ifdef CONFIG_SMP
     2.8 -
     2.9  static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
    2.10  {
    2.11 +#if 0 /*  XEN: no lazy tlb */
    2.12  	if (read_pda(mmu_state) == TLBSTATE_OK) 
    2.13  		write_pda(mmu_state, TLBSTATE_LAZY);
    2.14 +#endif
    2.15  }
    2.16 -#else
    2.17 -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
    2.18 +
    2.19 +#define prepare_arch_switch(rq,next)	__prepare_arch_switch()
    2.20 +#define finish_arch_switch(rq, next)	spin_unlock_irq(&(rq)->lock)
    2.21 +#define task_running(rq, p)		((rq)->curr == (p))
    2.22 +
    2.23 +static inline void __prepare_arch_switch(void)
    2.24  {
    2.25 +	/*
    2.26 +	 * Save away %es, %ds, %fs and %gs. Must happen before reload
    2.27 +	 * of cr3/ldt (i.e., not in __switch_to).
    2.28 +	 */
    2.29 +	__asm__ __volatile__ (
    2.30 +		"movl %%es,%0 ; movl %%ds,%1 ; movl %%fs,%2 ; movl %%gs,%3"
    2.31 +		: "=m" (current->thread.es),
    2.32 +		  "=m" (current->thread.ds),
    2.33 +		  "=m" (current->thread.fsindex),
    2.34 +		  "=m" (current->thread.gsindex) );
    2.35 +
    2.36 +	if (current->thread.ds)
    2.37 +		__asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
    2.38 +
    2.39 +	if (current->thread.es)
    2.40 +		__asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
    2.41 +
    2.42 +	if (current->thread.fsindex) {
    2.43 +		__asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
    2.44 +		current->thread.fs = 0;
    2.45 +	}
    2.46 +
    2.47 +	if (current->thread.gsindex) {
    2.48 +		load_gs_index(0);
    2.49 +		current->thread.gs = 0;
    2.50 +	}
    2.51  }
    2.52 -#endif
    2.53 +
    2.54  
    2.55  static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 
    2.56  			     struct task_struct *tsk)