clearer FLUSH_ORDER(). Also remove bogus assertion.
Signed-off-by: Keir Fraser <keir@xensource.com>
void flush_area_local(const void *va, unsigned int flags)
{
const struct cpuinfo_x86 *c = ¤t_cpu_data;
- unsigned int level = flags & FLUSH_LEVEL_MASK;
+ unsigned int order = (flags - 1) & FLUSH_ORDER_MASK;
unsigned long irqfl;
- ASSERT(level < CONFIG_PAGING_LEVELS);
-
/* This non-reentrant function is sometimes called in interrupt context. */
local_irq_save(irqfl);
if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) )
{
- if ( level == 1 )
+ if ( order == 0 )
{
/*
* We don't INVLPG multi-page regions because the 2M/4M/1G
if ( flags & FLUSH_CACHE )
{
- unsigned long i, sz;
+ unsigned long i, sz = 0;
- sz = level ? (1UL << ((level - 1) * PAGETABLE_ORDER)) : ULONG_MAX;
+ if ( order < (BITS_PER_LONG - PAGE_SHIFT - 1) )
+ sz = 1UL << (order + PAGE_SHIFT);
- if ( c->x86_clflush_size && c->x86_cache_size &&
- (sz < (c->x86_cache_size >> (PAGE_SHIFT - 10))) )
+ if ( c->x86_clflush_size && c->x86_cache_size && sz &&
+ ((sz >> 10) < c->x86_cache_size) )
{
- sz <<= PAGE_SHIFT;
va = (const void *)((unsigned long)va & ~(sz - 1));
for ( i = 0; i < sz; i += c->x86_clflush_size )
asm volatile ( "clflush %0"
if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
{
- unsigned int flush_flags = FLUSH_TLB | FLUSH_LEVEL(2);
+ unsigned int flush_flags =
+ FLUSH_TLB | FLUSH_ORDER(PAGETABLE_ORDER);
if ( l2e_get_flags(ol2e) & _PAGE_PSE )
{
}
else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
{
- unsigned int flush_flags = FLUSH_TLB | FLUSH_LEVEL(2);
+ unsigned int flush_flags =
+ FLUSH_TLB | FLUSH_ORDER(PAGETABLE_ORDER);
/* Skip this PTE if there is no change. */
if ( (((l2e_get_pfn(*pl2e) & ~(L1_PAGETABLE_ENTRIES - 1)) +
l1e_write_atomic(pl1e, l1e_from_pfn(mfn, flags));
if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
{
- unsigned int flush_flags = FLUSH_TLB | FLUSH_LEVEL(1);
+ unsigned int flush_flags = FLUSH_TLB | FLUSH_ORDER(0);
if ( l1e_get_flags(ol1e) & _PAGE_GLOBAL )
flush_flags |= FLUSH_TLB_GLOBAL;
if ( (l1e_get_flags(ol1e) ^ flags) & PAGE_CACHE_ATTRS )
ol2e = *pl2e;
l2e_write_atomic(pl2e, l2e_from_pfn(base_mfn,
l1f_to_l2f(flags)));
- flush_area(virt, FLUSH_TLB_GLOBAL | FLUSH_LEVEL(2));
+ flush_area(virt, (FLUSH_TLB_GLOBAL |
+ FLUSH_ORDER(PAGETABLE_ORDER)));
free_xen_pagetable(l2e_to_l1e(ol2e));
}
}
/* flush_* flag fields: */
/*
- * Area to flush:
- * 0 -> flush entire address space
- * 1 -> 4kB area containing specified virtual address
- * 2 -> 4MB/2MB area containing specified virtual address
- * 3 -> 1GB area containing specified virtual address (x86/64 only)
+ * Area to flush: 2^flush_order pages. Default is flush entire address space.
* NB. Multi-page areas do not need to have been mapped with a superpage.
*/
-#define FLUSH_LEVEL_MASK 0x0f
-#define FLUSH_LEVEL(x) (x)
+#define FLUSH_ORDER_MASK 0xff
+#define FLUSH_ORDER(x) ((x)+1)
/* Flush TLBs (or parts thereof) */
-#define FLUSH_TLB 0x10
+#define FLUSH_TLB 0x100
/* Flush TLBs (or parts thereof) including global mappings */
-#define FLUSH_TLB_GLOBAL 0x20
+#define FLUSH_TLB_GLOBAL 0x200
/* Flush data caches */
-#define FLUSH_CACHE 0x40
+#define FLUSH_CACHE 0x400
/* Flush local TLBs/caches. */
void flush_area_local(const void *va, unsigned int flags);
#define flush_tlb_local() \
flush_local(FLUSH_TLB)
#define flush_tlb_one_local(v) \
- flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_LEVEL(1))
+ flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
/* Flush specified CPUs' TLBs */
#define flush_tlb_mask(mask) \
flush_mask(mask, FLUSH_TLB)
#define flush_tlb_one_mask(mask,v) \
- flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_LEVEL(1))
+ flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
/* Flush all CPUs' TLBs */
#define flush_tlb_all() \