ia64/xen-unstable

changeset 16257:05f257f4f3c7

x86: Replace FLUSH_LEVEL() parameter to flush_area() with rather
clearer FLUSH_ORDER(). Also remove bogus assertion.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Fri Oct 26 16:06:49 2007 +0100 (2007-10-26)
parents 537b8edb1efa
children aa56bb2fe7d9
files xen/arch/x86/flushtlb.c xen/arch/x86/mm.c xen/include/asm-x86/flushtlb.h
line diff
     1.1 --- a/xen/arch/x86/flushtlb.c	Fri Oct 26 15:14:38 2007 +0100
     1.2 +++ b/xen/arch/x86/flushtlb.c	Fri Oct 26 16:06:49 2007 +0100
     1.3 @@ -98,17 +98,15 @@ void write_cr3(unsigned long cr3)
     1.4  void flush_area_local(const void *va, unsigned int flags)
     1.5  {
     1.6      const struct cpuinfo_x86 *c = &current_cpu_data;
     1.7 -    unsigned int level = flags & FLUSH_LEVEL_MASK;
     1.8 +    unsigned int order = (flags - 1) & FLUSH_ORDER_MASK;
     1.9      unsigned long irqfl;
    1.10  
    1.11 -    ASSERT(level < CONFIG_PAGING_LEVELS);
    1.12 -
    1.13      /* This non-reentrant function is sometimes called in interrupt context. */
    1.14      local_irq_save(irqfl);
    1.15  
    1.16      if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) )
    1.17      {
    1.18 -        if ( level == 1 )
    1.19 +        if ( order == 0 )
    1.20          {
    1.21              /*
    1.22               * We don't INVLPG multi-page regions because the 2M/4M/1G
    1.23 @@ -146,14 +144,14 @@ void flush_area_local(const void *va, un
    1.24  
    1.25      if ( flags & FLUSH_CACHE )
    1.26      {
    1.27 -        unsigned long i, sz;
    1.28 -
    1.29 -        sz = level ? (1UL << ((level - 1) * PAGETABLE_ORDER)) : ULONG_MAX;
    1.30 +        unsigned long i, sz = 0;
    1.31  
    1.32 -        if ( c->x86_clflush_size && c->x86_cache_size &&
    1.33 -             (sz < (c->x86_cache_size >> (PAGE_SHIFT - 10))) )
    1.34 +        if ( order < (BITS_PER_LONG - PAGE_SHIFT - 1) )
    1.35 +            sz = 1UL << (order + PAGE_SHIFT);
    1.36 +
    1.37 +        if ( c->x86_clflush_size && c->x86_cache_size && sz &&
    1.38 +             ((sz >> 10) < c->x86_cache_size) )
    1.39          {
    1.40 -            sz <<= PAGE_SHIFT;
    1.41              va = (const void *)((unsigned long)va & ~(sz - 1));
    1.42              for ( i = 0; i < sz; i += c->x86_clflush_size )
    1.43                   asm volatile ( "clflush %0"
     2.1 --- a/xen/arch/x86/mm.c	Fri Oct 26 15:14:38 2007 +0100
     2.2 +++ b/xen/arch/x86/mm.c	Fri Oct 26 16:06:49 2007 +0100
     2.3 @@ -3582,7 +3582,8 @@ int map_pages_to_xen(
     2.4  
     2.5              if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
     2.6              {
     2.7 -                unsigned int flush_flags = FLUSH_TLB | FLUSH_LEVEL(2);
     2.8 +                unsigned int flush_flags =
     2.9 +                    FLUSH_TLB | FLUSH_ORDER(PAGETABLE_ORDER);
    2.10  
    2.11                  if ( l2e_get_flags(ol2e) & _PAGE_PSE )
    2.12                  {
    2.13 @@ -3627,7 +3628,8 @@ int map_pages_to_xen(
    2.14              }
    2.15              else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
    2.16              {
    2.17 -                unsigned int flush_flags = FLUSH_TLB | FLUSH_LEVEL(2);
    2.18 +                unsigned int flush_flags =
    2.19 +                    FLUSH_TLB | FLUSH_ORDER(PAGETABLE_ORDER);
    2.20  
    2.21                  /* Skip this PTE if there is no change. */
    2.22                  if ( (((l2e_get_pfn(*pl2e) & ~(L1_PAGETABLE_ENTRIES - 1)) +
    2.23 @@ -3663,7 +3665,7 @@ int map_pages_to_xen(
    2.24              l1e_write_atomic(pl1e, l1e_from_pfn(mfn, flags));
    2.25              if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
    2.26              {
    2.27 -                unsigned int flush_flags = FLUSH_TLB | FLUSH_LEVEL(1);
    2.28 +                unsigned int flush_flags = FLUSH_TLB | FLUSH_ORDER(0);
    2.29                  if ( l1e_get_flags(ol1e) & _PAGE_GLOBAL )
    2.30                      flush_flags |= FLUSH_TLB_GLOBAL;
    2.31                  if ( (l1e_get_flags(ol1e) ^ flags) & PAGE_CACHE_ATTRS )
    2.32 @@ -3692,7 +3694,8 @@ int map_pages_to_xen(
    2.33                      ol2e = *pl2e;
    2.34                      l2e_write_atomic(pl2e, l2e_from_pfn(base_mfn,
    2.35                                                          l1f_to_l2f(flags)));
    2.36 -                    flush_area(virt, FLUSH_TLB_GLOBAL | FLUSH_LEVEL(2));
    2.37 +                    flush_area(virt, (FLUSH_TLB_GLOBAL |
    2.38 +                                      FLUSH_ORDER(PAGETABLE_ORDER)));
    2.39                      free_xen_pagetable(l2e_to_l1e(ol2e));
    2.40                  }
    2.41              }
     3.1 --- a/xen/include/asm-x86/flushtlb.h	Fri Oct 26 15:14:38 2007 +0100
     3.2 +++ b/xen/include/asm-x86/flushtlb.h	Fri Oct 26 16:06:49 2007 +0100
     3.3 @@ -73,21 +73,17 @@ void write_cr3(unsigned long cr3);
     3.4  
     3.5  /* flush_* flag fields: */
     3.6   /*
     3.7 -  * Area to flush:
     3.8 -  *  0 -> flush entire address space
     3.9 -  *  1 -> 4kB area containing specified virtual address
    3.10 -  *  2 -> 4MB/2MB area containing specified virtual address
    3.11 -  *  3 -> 1GB area containing specified virtual address (x86/64 only)
    3.12 +  * Area to flush: 2^flush_order pages. Default is flush entire address space.
    3.13    * NB. Multi-page areas do not need to have been mapped with a superpage.
    3.14    */
    3.15 -#define FLUSH_LEVEL_MASK 0x0f
    3.16 -#define FLUSH_LEVEL(x)   (x)
    3.17 +#define FLUSH_ORDER_MASK 0xff
    3.18 +#define FLUSH_ORDER(x)   ((x)+1)
    3.19   /* Flush TLBs (or parts thereof) */
    3.20 -#define FLUSH_TLB        0x10
    3.21 +#define FLUSH_TLB        0x100
    3.22   /* Flush TLBs (or parts thereof) including global mappings */
    3.23 -#define FLUSH_TLB_GLOBAL 0x20
    3.24 +#define FLUSH_TLB_GLOBAL 0x200
    3.25   /* Flush data caches */
    3.26 -#define FLUSH_CACHE      0x40
    3.27 +#define FLUSH_CACHE      0x400
    3.28  
    3.29  /* Flush local TLBs/caches. */
    3.30  void flush_area_local(const void *va, unsigned int flags);
    3.31 @@ -105,13 +101,13 @@ void flush_area_mask(cpumask_t, const vo
    3.32  #define flush_tlb_local()                       \
    3.33      flush_local(FLUSH_TLB)
    3.34  #define flush_tlb_one_local(v)                  \
    3.35 -    flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_LEVEL(1))
    3.36 +    flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
    3.37  
    3.38  /* Flush specified CPUs' TLBs */
    3.39  #define flush_tlb_mask(mask)                    \
    3.40      flush_mask(mask, FLUSH_TLB)
    3.41  #define flush_tlb_one_mask(mask,v)              \
    3.42 -    flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_LEVEL(1))
    3.43 +    flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
    3.44  
    3.45  /* Flush all CPUs' TLBs */
    3.46  #define flush_tlb_all()                         \