ia64/xen-unstable

view xen/include/asm-x86/flushtlb.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 6374af16a8a3
children 9488d3166553
line source
1 /******************************************************************************
2 * flushtlb.h
3 *
4 * TLB flushes are timestamped using a global virtual 'clock' which ticks
5 * on any TLB flush on any processor.
6 *
7 * Copyright (c) 2003-2004, K A Fraser
8 */
10 #ifndef __FLUSHTLB_H__
11 #define __FLUSHTLB_H__
13 #include <xen/config.h>
14 #include <xen/percpu.h>
15 #include <xen/smp.h>
16 #include <xen/types.h>
18 /* The current time as shown by the virtual TLB clock. */
19 extern u32 tlbflush_clock;
21 /* Time at which each CPU's TLB was last flushed. */
22 DECLARE_PER_CPU(u32, tlbflush_time);
24 #define tlbflush_current_time() tlbflush_clock
26 /*
27 * @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing.
28 * @lastuse_stamp is a timestamp taken when the PFN we are testing was last
29 * used for a purpose that may have caused the CPU's TLB to become tainted.
30 */
31 static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
32 {
33 u32 curr_time = tlbflush_current_time();
34 /*
35 * Two cases:
36 * 1. During a wrap, the clock ticks over to 0 while CPUs catch up. For
37 * safety during this period, we force a flush if @curr_time == 0.
38 * 2. Otherwise, we look to see if @cpu_stamp <= @lastuse_stamp.
39 * To detect false positives because @cpu_stamp has wrapped, we
40 * also check @curr_time. If less than @lastuse_stamp we definitely
41 * wrapped, so there's no need for a flush (one is forced every wrap).
42 */
43 return ((curr_time == 0) ||
44 ((cpu_stamp <= lastuse_stamp) &&
45 (lastuse_stamp <= curr_time)));
46 }
48 /*
49 * Filter the given set of CPUs, removing those that definitely flushed their
50 * TLB since @page_timestamp.
51 */
52 #define tlbflush_filter(mask, page_timestamp) \
53 do { \
54 unsigned int cpu; \
55 for_each_cpu_mask ( cpu, mask ) \
56 if ( !NEED_FLUSH(per_cpu(tlbflush_time, cpu), page_timestamp) ) \
57 cpu_clear(cpu, mask); \
58 } while ( 0 )
60 extern void new_tlbflush_clock_period(void);
62 /* Read pagetable base. */
63 static inline unsigned long read_cr3(void)
64 {
65 unsigned long cr3;
66 __asm__ __volatile__ (
67 "mov %%cr3, %0" : "=r" (cr3) : );
68 return cr3;
69 }
71 /* Write pagetable base and implicitly tick the tlbflush clock. */
72 extern void write_cr3(unsigned long cr3);
74 /* Flush guest mappings from the TLB and implicitly tick the tlbflush clock. */
75 extern void local_flush_tlb(void);
77 #define local_flush_tlb_pge() \
78 do { \
79 __pge_off(); \
80 local_flush_tlb(); \
81 __pge_on(); \
82 } while ( 0 )
84 #define local_flush_tlb_one(__addr) \
85 __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
87 #define flush_tlb_all() flush_tlb_mask(cpu_online_map)
89 #ifndef CONFIG_SMP
90 #define flush_tlb_all_pge() local_flush_tlb_pge()
91 #define flush_tlb_mask(mask) local_flush_tlb()
92 #define flush_tlb_one_mask(mask,v) local_flush_tlb_one(_v)
93 #else
94 #include <xen/smp.h>
95 #define FLUSHVA_ALL (~0UL)
96 extern void flush_tlb_all_pge(void);
97 extern void __flush_tlb_mask(cpumask_t mask, unsigned long va);
98 #define flush_tlb_mask(mask) __flush_tlb_mask(mask,FLUSHVA_ALL)
99 #define flush_tlb_one_mask(mask,v) __flush_tlb_mask(mask,(unsigned long)(v))
100 #endif
102 #endif /* __FLUSHTLB_H__ */