ia64/xen-unstable

view xen/arch/x86/flushtlb.c @ 18806:ed8524f4a044

x86: Re-initialise HPET on resume from S3

Signed-off-by: Guanqun Lu <guanqun.lu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 15:55:14 2008 +0000 (2008-11-18)
parents 4034317507de
children
line source
1 /******************************************************************************
2 * flushtlb.c
3 *
4 * TLB flushes are timestamped using a global virtual 'clock' which ticks
5 * on any TLB flush on any processor.
6 *
7 * Copyright (c) 2003-2006, K A Fraser
8 */
10 #include <xen/config.h>
11 #include <xen/sched.h>
12 #include <xen/softirq.h>
13 #include <asm/flushtlb.h>
14 #include <asm/page.h>
16 /* Debug builds: Wrap frequently to stress-test the wrap logic. */
17 #ifdef NDEBUG
18 #define WRAP_MASK (0xFFFFFFFFU)
19 #else
20 #define WRAP_MASK (0x000003FFU)
21 #endif
23 u32 tlbflush_clock = 1U;
24 DEFINE_PER_CPU(u32, tlbflush_time);
26 /*
27 * pre_flush(): Increment the virtual TLB-flush clock. Returns new clock value.
28 *
29 * This must happen *before* we flush the TLB. If we do it after, we race other
30 * CPUs invalidating PTEs. For example, a page invalidated after the flush
31 * might get the old timestamp, but this CPU can speculatively fetch the
32 * mapping into its TLB after the flush but before inc'ing the clock.
33 */
34 static u32 pre_flush(void)
35 {
36 u32 t, t1, t2;
38 t = tlbflush_clock;
39 do {
40 t1 = t2 = t;
41 /* Clock wrapped: someone else is leading a global TLB shootdown. */
42 if ( unlikely(t1 == 0) )
43 goto skip_clocktick;
44 t2 = (t + 1) & WRAP_MASK;
45 }
46 while ( unlikely((t = cmpxchg(&tlbflush_clock, t1, t2)) != t1) );
48 /* Clock wrapped: we will lead a global TLB shootdown. */
49 if ( unlikely(t2 == 0) )
50 raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
52 skip_clocktick:
53 return t2;
54 }
56 /*
57 * post_flush(): Update this CPU's timestamp with specified clock value.
58 *
59 * Note that this happens *after* flushing the TLB, as otherwise we can race a
60 * NEED_FLUSH() test on another CPU. (e.g., other CPU sees the updated CPU
61 * stamp and so does not force a synchronous TLB flush, but the flush in this
62 * function hasn't yet occurred and so the TLB might be stale). The ordering
63 * would only actually matter if this function were interruptible, and
64 * something that abuses the stale mapping could exist in an interrupt
65 * handler. In fact neither of these is the case, so really we are being ultra
66 * paranoid.
67 */
68 static void post_flush(u32 t)
69 {
70 this_cpu(tlbflush_time) = t;
71 }
73 void write_cr3(unsigned long cr3)
74 {
75 unsigned long flags;
76 u32 t;
78 /* This non-reentrant function is sometimes called in interrupt context. */
79 local_irq_save(flags);
81 t = pre_flush();
83 hvm_flush_guest_tlbs();
85 #ifdef USER_MAPPINGS_ARE_GLOBAL
86 {
87 unsigned long cr4 = read_cr4();
88 write_cr4(cr4 & ~X86_CR4_PGE);
89 asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
90 write_cr4(cr4);
91 }
92 #else
93 asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
94 #endif
96 post_flush(t);
98 local_irq_restore(flags);
99 }
101 void flush_area_local(const void *va, unsigned int flags)
102 {
103 const struct cpuinfo_x86 *c = &current_cpu_data;
104 unsigned int order = (flags - 1) & FLUSH_ORDER_MASK;
105 unsigned long irqfl;
107 /* This non-reentrant function is sometimes called in interrupt context. */
108 local_irq_save(irqfl);
110 if ( flags & (FLUSH_TLB|FLUSH_TLB_GLOBAL) )
111 {
112 if ( order == 0 )
113 {
114 /*
115 * We don't INVLPG multi-page regions because the 2M/4M/1G
116 * region may not have been mapped with a superpage. Also there
117 * are various errata surrounding INVLPG usage on superpages, and
118 * a full flush is in any case not *that* expensive.
119 */
120 asm volatile ( "invlpg %0"
121 : : "m" (*(const char *)(va)) : "memory" );
122 }
123 else
124 {
125 u32 t = pre_flush();
127 hvm_flush_guest_tlbs();
129 #ifndef USER_MAPPINGS_ARE_GLOBAL
130 if ( !(flags & FLUSH_TLB_GLOBAL) || !(read_cr4() & X86_CR4_PGE) )
131 {
132 asm volatile ( "mov %0, %%cr3"
133 : : "r" (read_cr3()) : "memory" );
134 }
135 else
136 #endif
137 {
138 unsigned long cr4 = read_cr4();
139 write_cr4(cr4 & ~X86_CR4_PGE);
140 barrier();
141 write_cr4(cr4);
142 }
144 post_flush(t);
145 }
146 }
148 if ( flags & FLUSH_CACHE )
149 {
150 unsigned long i, sz = 0;
152 if ( order < (BITS_PER_LONG - PAGE_SHIFT - 1) )
153 sz = 1UL << (order + PAGE_SHIFT);
155 if ( c->x86_clflush_size && c->x86_cache_size && sz &&
156 ((sz >> 10) < c->x86_cache_size) )
157 {
158 va = (const void *)((unsigned long)va & ~(sz - 1));
159 for ( i = 0; i < sz; i += c->x86_clflush_size )
160 asm volatile ( "clflush %0"
161 : : "m" (((const char *)va)[i]) );
162 }
163 else
164 {
165 wbinvd();
166 }
167 }
169 local_irq_restore(irqfl);
170 }