ia64/xen-unstable

view xen/arch/ia64/xen/vhpt.c @ 10929:7cde0d938ef4

[IA64] convert more privop_stat to perfc

Convert most privop stats to perfc.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Fri Aug 04 09:02:43 2006 -0600 (2006-08-04)
parents 86e5d8458c08
children 7c79d49033c6
line source
1 /*
2 * Initialize VHPT support.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <asm/processor.h>
12 #include <asm/system.h>
13 #include <asm/pgalloc.h>
14 #include <asm/page.h>
15 #include <asm/vhpt.h>
16 #include <asm/vcpu.h>
18 /* Defined in tlb.c */
19 extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
21 extern long running_on_sim;
23 DEFINE_PER_CPU (unsigned long, vhpt_paddr);
24 DEFINE_PER_CPU (unsigned long, vhpt_pend);
26 void vhpt_flush(void)
27 {
28 struct vhpt_lf_entry *v = __va(__ia64_per_cpu_var(vhpt_paddr));
29 int i;
31 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
32 v->ti_tag = INVALID_TI_TAG;
33 }
35 static void vhpt_erase(void)
36 {
37 struct vhpt_lf_entry *v = (struct vhpt_lf_entry *)VHPT_ADDR;
38 int i;
40 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) {
41 v->itir = 0;
42 v->CChain = 0;
43 v->page_flags = 0;
44 v->ti_tag = INVALID_TI_TAG;
45 }
46 // initialize cache too???
47 }
50 static void vhpt_map(unsigned long pte)
51 {
52 unsigned long psr;
54 psr = ia64_clear_ic();
55 ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2);
56 ia64_set_psr(psr);
57 ia64_srlz_i();
58 }
60 void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps)
61 {
62 struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
63 unsigned long tag = ia64_ttag (vadr);
65 /* No need to first disable the entry, since VHPT is per LP
66 and VHPT is TR mapped. */
67 vlfe->itir = logps;
68 vlfe->page_flags = pte | _PAGE_P;
69 vlfe->ti_tag = tag;
70 }
72 void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
73 {
74 unsigned long mask = (1L << logps) - 1;
75 int i;
77 if (logps-PAGE_SHIFT > 10 && !running_on_sim) {
78 // if this happens, we may want to revisit this algorithm
79 panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
80 }
81 if (logps-PAGE_SHIFT > 2) {
82 // FIXME: Should add counter here to see how often this
83 // happens (e.g. for 16MB pages!) and determine if it
84 // is a performance problem. On a quick look, it takes
85 // about 39000 instrs for a 16MB page and it seems to occur
86 // only a few times/second, so OK for now.
87 // An alternate solution would be to just insert the one
88 // 16KB in the vhpt (but with the full mapping)?
89 //printf("vhpt_multiple_insert: logps-PAGE_SHIFT==%d,"
90 //"va=%p, pa=%p, pa-masked=%p\n",
91 //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK,
92 //(pte&_PFN_MASK)&~mask);
93 }
94 vaddr &= ~mask;
95 pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
96 for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
97 vhpt_insert(vaddr,pte,logps<<2);
98 vaddr += PAGE_SIZE;
99 }
100 }
102 void vhpt_init(void)
103 {
104 unsigned long paddr, pte;
105 struct page_info *page;
106 #if !VHPT_ENABLED
107 return;
108 #endif
109 /* This allocation only holds true if vhpt table is unique for
110 * all domains. Or else later new vhpt table should be allocated
111 * from domain heap when each domain is created. Assume xen buddy
112 * allocator can provide natural aligned page by order?
113 */
114 page = alloc_domheap_pages(NULL, VHPT_SIZE_LOG2 - PAGE_SHIFT, 0);
115 if (!page)
116 panic("vhpt_init: can't allocate VHPT!\n");
117 paddr = page_to_maddr(page);
118 if (paddr & ((1 << VHPT_SIZE_LOG2) - 1))
119 panic("vhpt_init: bad VHPT alignment!\n");
120 __get_cpu_var(vhpt_paddr) = paddr;
121 __get_cpu_var(vhpt_pend) = paddr + (1 << VHPT_SIZE_LOG2) - 1;
122 printf("vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
123 paddr, __get_cpu_var(vhpt_pend));
124 pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
125 vhpt_map(pte);
126 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
127 VHPT_ENABLED);
128 vhpt_erase();
129 }
132 void vcpu_flush_vtlb_all(struct vcpu *v)
133 {
134 /* First VCPU tlb. */
135 vcpu_purge_tr_entry(&PSCBX(v,dtlb));
136 vcpu_purge_tr_entry(&PSCBX(v,itlb));
138 /* Then VHPT. */
139 vhpt_flush ();
141 /* Then mTLB. */
142 local_flush_tlb_all ();
144 /* We could clear bit in d->domain_dirty_cpumask only if domain d in
145 not running on this processor. There is currently no easy way to
146 check this. */
147 }
149 static void __vcpu_flush_vtlb_all(void *vcpu)
150 {
151 vcpu_flush_vtlb_all((struct vcpu*)vcpu);
152 }
154 void domain_flush_vtlb_all (void)
155 {
156 int cpu = smp_processor_id ();
157 struct vcpu *v;
159 for_each_vcpu (current->domain, v) {
160 if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
161 continue;
163 if (v->processor == cpu)
164 vcpu_flush_vtlb_all(v);
165 else
166 smp_call_function_single(v->processor,
167 __vcpu_flush_vtlb_all,
168 v, 1, 1);
169 }
170 }
172 static void cpu_flush_vhpt_range (int cpu, u64 vadr, u64 addr_range)
173 {
174 void *vhpt_base = __va(per_cpu(vhpt_paddr, cpu));
176 while ((long)addr_range > 0) {
177 /* Get the VHPT entry. */
178 unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
179 volatile struct vhpt_lf_entry *v;
180 v = vhpt_base + off;
181 v->ti_tag = INVALID_TI_TAG;
182 addr_range -= PAGE_SIZE;
183 vadr += PAGE_SIZE;
184 }
185 }
187 void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range)
188 {
189 cpu_flush_vhpt_range (current->processor, vadr, 1UL << log_range);
190 ia64_ptcl(vadr, log_range << 2);
191 ia64_srlz_i();
192 }
194 void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
195 {
196 struct vcpu *v;
198 #if 0
199 // this only seems to occur at shutdown, but it does occur
200 if ((!addr_range) || addr_range & (addr_range - 1)) {
201 printf("vhpt_flush_address: weird range, spinning...\n");
202 while(1);
203 }
204 #endif
206 for_each_vcpu (d, v) {
207 if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
208 continue;
210 /* Purge TC entries.
211 FIXME: clear only if match. */
212 vcpu_purge_tr_entry(&PSCBX(v,dtlb));
213 vcpu_purge_tr_entry(&PSCBX(v,itlb));
214 }
215 smp_mb();
217 for_each_vcpu (d, v) {
218 if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
219 continue;
221 /* Invalidate VHPT entries. */
222 cpu_flush_vhpt_range (v->processor, vadr, addr_range);
223 }
224 // ptc.ga has release semantics.
226 /* ptc.ga */
227 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
228 }
230 static void flush_tlb_vhpt_all (struct domain *d)
231 {
232 /* First VHPT. */
233 vhpt_flush ();
235 /* Then mTLB. */
236 local_flush_tlb_all ();
237 }
239 void domain_flush_tlb_vhpt(struct domain *d)
240 {
241 /* Very heavy... */
242 on_each_cpu ((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
243 cpus_clear (d->domain_dirty_cpumask);
244 }
246 void flush_tlb_mask(cpumask_t mask)
247 {
248 int cpu;
250 cpu = smp_processor_id();
251 if (cpu_isset (cpu, mask)) {
252 cpu_clear(cpu, mask);
253 flush_tlb_vhpt_all (NULL);
254 }
256 if (cpus_empty(mask))
257 return;
259 for_each_cpu_mask (cpu, mask)
260 smp_call_function_single
261 (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
262 }
264 int dump_vhpt_stats(char *buf)
265 {
266 int i, cpu;
267 char *s = buf;
269 s += sprintf(s,"VHPT usage (%ld entries):\n",
270 (unsigned long) VHPT_NUM_ENTRIES);
272 for_each_present_cpu (cpu) {
273 struct vhpt_lf_entry *v = __va(per_cpu(vhpt_paddr, cpu));
274 unsigned long vhpt_valid = 0;
276 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
277 if (!(v->ti_tag & INVALID_TI_TAG))
278 vhpt_valid++;
279 s += sprintf(s," cpu %d: %ld\n", cpu, vhpt_valid);
280 }
282 return s - buf;
283 }