direct-io.hg

changeset 4355:41b4061f42cb

bitkeeper revision 1.1159.258.72 (4248442au44vPuBRKPCu7xAuIZfS3Q)

Upgrade MTRR support to that from Linux 2.6.11.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Mar 28 17:51:38 2005 +0000 (2005-03-28)
parents d1189200b017
children cc2c40950d59 f620c41a1fef df27b2234149
files xen/arch/x86/mtrr/generic.c xen/arch/x86/mtrr/main.c xen/arch/x86/smpboot.c
line diff
     1.1 --- a/xen/arch/x86/mtrr/generic.c	Sat Mar 26 01:25:46 2005 +0000
     1.2 +++ b/xen/arch/x86/mtrr/generic.c	Mon Mar 28 17:51:38 2005 +0000
     1.3 @@ -8,7 +8,6 @@
     1.4  #include <asm/msr.h>
     1.5  #include <asm/system.h>
     1.6  #include <asm/cpufeature.h>
     1.7 -//#include <asm/tlbflush.h>
     1.8  #include "mtrr.h"
     1.9  
    1.10  struct mtrr_state {
    1.11 @@ -232,6 +231,13 @@ static unsigned long cr4 = 0;
    1.12  static u32 deftype_lo, deftype_hi;
    1.13  static spinlock_t set_atomicity_lock = SPIN_LOCK_UNLOCKED;
    1.14  
    1.15 +/*
    1.16 + * Since we are disabling the cache don't allow any interrupts - they
    1.17 + * would run extremely slow and would only increase the pain.  The caller must
    1.18 + * ensure that local interrupts are disabled and are reenabled after post_set()
    1.19 + * has been called.
    1.20 + */
    1.21 +
    1.22  static void prepare_set(void)
    1.23  {
    1.24  	unsigned long cr0;
    1.25 @@ -239,18 +245,18 @@ static void prepare_set(void)
    1.26  	/*  Note that this is not ideal, since the cache is only flushed/disabled
    1.27  	   for this CPU while the MTRRs are changed, but changing this requires
    1.28  	   more invasive changes to the way the kernel boots  */
    1.29 +
    1.30  	spin_lock(&set_atomicity_lock);
    1.31  
    1.32  	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
    1.33  	cr0 = read_cr0() | 0x40000000;	/* set CD flag */
    1.34 -	wbinvd();
    1.35  	write_cr0(cr0);
    1.36  	wbinvd();
    1.37  
    1.38  	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
    1.39  	if ( cpu_has_pge ) {
    1.40  		cr4 = read_cr4();
    1.41 -		write_cr4(cr4 & (unsigned char) ~(1 << 7));
    1.42 +		write_cr4(cr4 & ~X86_CR4_PGE);
    1.43  	}
    1.44  
    1.45  	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
    1.46 @@ -265,8 +271,7 @@ static void prepare_set(void)
    1.47  
    1.48  static void post_set(void)
    1.49  {
    1.50 -	/*  Flush caches and TLBs  */
    1.51 -	wbinvd();
    1.52 +	/*  Flush TLBs (no need to flush caches - they are disabled)  */
    1.53  	__flush_tlb();
    1.54  
    1.55  	/* Intel (P6) standard MTRRs */
    1.56 @@ -284,13 +289,16 @@ static void post_set(void)
    1.57  static void generic_set_all(void)
    1.58  {
    1.59  	unsigned long mask, count;
    1.60 +	unsigned long flags;
    1.61  
    1.62 +	local_irq_save(flags);
    1.63  	prepare_set();
    1.64  
    1.65  	/* Actually set the state */
    1.66  	mask = set_mtrr_state(deftype_lo,deftype_hi);
    1.67  
    1.68  	post_set();
    1.69 +	local_irq_restore(flags);
    1.70  
    1.71  	/*  Use the atomic bitops to update the global mask  */
    1.72  	for (count = 0; count < sizeof mask * 8; ++count) {
    1.73 @@ -313,6 +321,9 @@ static void generic_set_mtrr(unsigned in
    1.74      [RETURNS] Nothing.
    1.75  */
    1.76  {
    1.77 +	unsigned long flags;
    1.78 +
    1.79 +	local_irq_save(flags);
    1.80  	prepare_set();
    1.81  
    1.82  	if (size == 0) {
    1.83 @@ -327,6 +338,7 @@ static void generic_set_mtrr(unsigned in
    1.84  	}
    1.85  
    1.86  	post_set();
    1.87 +	local_irq_restore(flags);
    1.88  }
    1.89  
    1.90  int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
     2.1 --- a/xen/arch/x86/mtrr/main.c	Sat Mar 26 01:25:46 2005 +0000
     2.2 +++ b/xen/arch/x86/mtrr/main.c	Mon Mar 28 17:51:38 2005 +0000
     2.3 @@ -167,10 +167,8 @@ static void ipi_handler(void *info)
     2.4  	local_irq_save(flags);
     2.5  
     2.6  	atomic_dec(&data->count);
     2.7 -	while(!atomic_read(&data->gate)) {
     2.8 +	while(!atomic_read(&data->gate))
     2.9  		cpu_relax();
    2.10 -		barrier();
    2.11 -	}
    2.12  
    2.13  	/*  The master has cleared me to execute  */
    2.14  	if (data->smp_reg != ~0U) 
    2.15 @@ -180,10 +178,9 @@ static void ipi_handler(void *info)
    2.16  		mtrr_if->set_all();
    2.17  
    2.18  	atomic_dec(&data->count);
    2.19 -	while(atomic_read(&data->gate)) {
    2.20 +	while(atomic_read(&data->gate))
    2.21  		cpu_relax();
    2.22 -		barrier();
    2.23 -	}
    2.24 +
    2.25  	atomic_dec(&data->count);
    2.26  	local_irq_restore(flags);
    2.27  }
    2.28 @@ -248,10 +245,9 @@ static void set_mtrr(unsigned int reg, u
    2.29  
    2.30  	local_irq_save(flags);
    2.31  
    2.32 -	while(atomic_read(&data.count)) {
    2.33 +	while(atomic_read(&data.count))
    2.34  		cpu_relax();
    2.35 -		barrier();
    2.36 -	}
    2.37 +
    2.38  	/* ok, reset count and toggle gate */
    2.39  	atomic_set(&data.count, num_booting_cpus() - 1);
    2.40  	atomic_set(&data.gate,1);
    2.41 @@ -268,10 +264,9 @@ static void set_mtrr(unsigned int reg, u
    2.42  		mtrr_if->set(reg,base,size,type);
    2.43  
    2.44  	/* wait for the others */
    2.45 -	while(atomic_read(&data.count)) {
    2.46 +	while(atomic_read(&data.count))
    2.47  		cpu_relax();
    2.48 -		barrier();
    2.49 -	}
    2.50 +
    2.51  	atomic_set(&data.count, num_booting_cpus() - 1);
    2.52  	atomic_set(&data.gate,0);
    2.53  
    2.54 @@ -279,10 +274,9 @@ static void set_mtrr(unsigned int reg, u
    2.55  	 * Wait here for everyone to have seen the gate change
    2.56  	 * So we're the last ones to touch 'data'
    2.57  	 */
    2.58 -	while(atomic_read(&data.count)) {
    2.59 +	while(atomic_read(&data.count))
    2.60  		cpu_relax();
    2.61 -		barrier();
    2.62 -	}
    2.63 +
    2.64  	local_irq_restore(flags);
    2.65  }
    2.66  
     3.1 --- a/xen/arch/x86/smpboot.c	Sat Mar 26 01:25:46 2005 +0000
     3.2 +++ b/xen/arch/x86/smpboot.c	Mon Mar 28 17:51:38 2005 +0000
     3.3 @@ -351,13 +351,6 @@ void __init smp_callin(void)
     3.4  
     3.5      __sti();
     3.6  
     3.7 -#ifdef CONFIG_MTRR
     3.8 -    /*
     3.9 -     * Must be done before calibration delay is computed
    3.10 -     */
    3.11 -    mtrr_init_secondary_cpu ();
    3.12 -#endif
    3.13 -
    3.14      Dprintk("Stack at about %p\n",&cpuid);
    3.15  
    3.16      /*
    3.17 @@ -771,10 +764,6 @@ void __init smp_boot_cpus(void)
    3.18  {
    3.19      int apicid, bit;
    3.20  
    3.21 -#ifdef CONFIG_MTRR
    3.22 -    /*  Must be done before other processors booted  */
    3.23 -    mtrr_init_boot_cpu ();
    3.24 -#endif
    3.25      /* Initialize the logical to physical CPU number mapping */
    3.26      init_cpu_to_apicid();
    3.27