ia64/xen-unstable

view xen/include/asm-x86/flushtlb.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 822ea2bf0c54
children
line source
1 /******************************************************************************
2 * flushtlb.h
3 *
4 * TLB flushes are timestamped using a global virtual 'clock' which ticks
5 * on any TLB flush on any processor.
6 *
7 * Copyright (c) 2003-2004, K A Fraser
8 */
10 #ifndef __FLUSHTLB_H__
11 #define __FLUSHTLB_H__
13 #include <xen/config.h>
14 #include <xen/percpu.h>
15 #include <xen/smp.h>
16 #include <xen/types.h>
18 /* The current time as shown by the virtual TLB clock. */
19 extern u32 tlbflush_clock;
21 /* Time at which each CPU's TLB was last flushed. */
22 DECLARE_PER_CPU(u32, tlbflush_time);
24 #define tlbflush_current_time() tlbflush_clock
26 /*
27 * @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing.
28 * @lastuse_stamp is a timestamp taken when the PFN we are testing was last
29 * used for a purpose that may have caused the CPU's TLB to become tainted.
30 */
31 static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
32 {
33 u32 curr_time = tlbflush_current_time();
34 /*
35 * Two cases:
36 * 1. During a wrap, the clock ticks over to 0 while CPUs catch up. For
37 * safety during this period, we force a flush if @curr_time == 0.
38 * 2. Otherwise, we look to see if @cpu_stamp <= @lastuse_stamp.
39 * To detect false positives because @cpu_stamp has wrapped, we
40 * also check @curr_time. If less than @lastuse_stamp we definitely
41 * wrapped, so there's no need for a flush (one is forced every wrap).
42 */
43 return ((curr_time == 0) ||
44 ((cpu_stamp <= lastuse_stamp) &&
45 (lastuse_stamp <= curr_time)));
46 }
48 /*
49 * Filter the given set of CPUs, removing those that definitely flushed their
50 * TLB since @page_timestamp.
51 */
52 #define tlbflush_filter(mask, page_timestamp) \
53 do { \
54 unsigned int cpu; \
55 for_each_cpu_mask ( cpu, mask ) \
56 if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) \
57 cpu_clear(cpu, mask); \
58 } while ( 0 )
60 void new_tlbflush_clock_period(void);
62 /* Read pagetable base. */
63 static inline unsigned long read_cr3(void)
64 {
65 unsigned long cr3;
66 __asm__ __volatile__ (
67 "mov %%cr3, %0" : "=r" (cr3) : );
68 return cr3;
69 }
71 /* Write pagetable base and implicitly tick the tlbflush clock. */
72 void write_cr3(unsigned long cr3);
74 /* flush_* flag fields: */
75 /*
76 * Area to flush: 2^flush_order pages. Default is flush entire address space.
77 * NB. Multi-page areas do not need to have been mapped with a superpage.
78 */
79 #define FLUSH_ORDER_MASK 0xff
80 #define FLUSH_ORDER(x) ((x)+1)
81 /* Flush TLBs (or parts thereof) */
82 #define FLUSH_TLB 0x100
83 /* Flush TLBs (or parts thereof) including global mappings */
84 #define FLUSH_TLB_GLOBAL 0x200
85 /* Flush data caches */
86 #define FLUSH_CACHE 0x400
88 /* Flush local TLBs/caches. */
89 void flush_area_local(const void *va, unsigned int flags);
90 #define flush_local(flags) flush_area_local(NULL, flags)
92 /* Flush specified CPUs' TLBs/caches */
93 void flush_area_mask(const cpumask_t *, const void *va, unsigned int flags);
94 #define flush_mask(mask, flags) flush_area_mask(mask, NULL, flags)
96 /* Flush all CPUs' TLBs/caches */
97 #define flush_area_all(va, flags) flush_area_mask(&cpu_online_map, va, flags)
98 #define flush_all(flags) flush_mask(&cpu_online_map, flags)
100 /* Flush local TLBs */
101 #define flush_tlb_local() \
102 flush_local(FLUSH_TLB)
103 #define flush_tlb_one_local(v) \
104 flush_area_local((const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
106 /* Flush specified CPUs' TLBs */
107 #define flush_tlb_mask(mask) \
108 flush_mask(mask, FLUSH_TLB)
109 #define flush_tlb_one_mask(mask,v) \
110 flush_area_mask(mask, (const void *)(v), FLUSH_TLB|FLUSH_ORDER(0))
112 /* Flush all CPUs' TLBs */
113 #define flush_tlb_all() \
114 flush_tlb_mask(&cpu_online_map)
115 #define flush_tlb_one_all(v) \
116 flush_tlb_one_mask(&cpu_online_map, v)
118 #endif /* __FLUSHTLB_H__ */