ia64/xen-unstable

view xen/arch/ia64/xen/vhpt.c @ 10443:0e5635d68de3

[IA64] per cpu vhpt stats

dump_vhpt_stats: disp usage per cpu.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Wed Jun 21 11:17:08 2006 -0600 (2006-06-21)
parents ea306829506c
children 6d8136d0b302
line source
1 /*
2 * Initialize VHPT support.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <asm/processor.h>
12 #include <asm/system.h>
13 #include <asm/pgalloc.h>
14 #include <asm/page.h>
15 #include <asm/vhpt.h>
16 #include <asm/vcpu.h>
18 /* Defined in tlb.c */
19 extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
21 extern long running_on_sim;
23 DEFINE_PER_CPU (unsigned long, vhpt_paddr);
24 DEFINE_PER_CPU (unsigned long, vhpt_pend);
26 static void vhpt_flush(void)
27 {
28 struct vhpt_lf_entry *v = __va(__ia64_per_cpu_var(vhpt_paddr));
29 int i;
31 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
32 v->ti_tag = INVALID_TI_TAG;
33 }
35 static void vhpt_erase(void)
36 {
37 struct vhpt_lf_entry *v = (struct vhpt_lf_entry *)VHPT_ADDR;
38 int i;
40 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) {
41 v->itir = 0;
42 v->CChain = 0;
43 v->page_flags = 0;
44 v->ti_tag = INVALID_TI_TAG;
45 }
46 // initialize cache too???
47 }
50 static void vhpt_map(unsigned long pte)
51 {
52 unsigned long psr;
54 psr = ia64_clear_ic();
55 ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2);
56 ia64_set_psr(psr);
57 ia64_srlz_i();
58 }
60 void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps)
61 {
62 struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
63 unsigned long tag = ia64_ttag (vadr);
65 /* No need to first disable the entry, since VHPT is per LP
66 and VHPT is TR mapped. */
67 vlfe->itir = logps;
68 vlfe->page_flags = pte | _PAGE_P;
69 vlfe->ti_tag = tag;
70 }
72 void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
73 {
74 unsigned long mask = (1L << logps) - 1;
75 int i;
77 if (logps-PAGE_SHIFT > 10 && !running_on_sim) {
78 // if this happens, we may want to revisit this algorithm
79 panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
80 }
81 if (logps-PAGE_SHIFT > 2) {
82 // FIXME: Should add counter here to see how often this
83 // happens (e.g. for 16MB pages!) and determine if it
84 // is a performance problem. On a quick look, it takes
85 // about 39000 instrs for a 16MB page and it seems to occur
86 // only a few times/second, so OK for now.
87 // An alternate solution would be to just insert the one
88 // 16KB in the vhpt (but with the full mapping)?
89 //printf("vhpt_multiple_insert: logps-PAGE_SHIFT==%d,"
90 //"va=%p, pa=%p, pa-masked=%p\n",
91 //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK,
92 //(pte&_PFN_MASK)&~mask);
93 }
94 vaddr &= ~mask;
95 pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
96 for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
97 vhpt_insert(vaddr,pte,logps<<2);
98 vaddr += PAGE_SIZE;
99 }
100 }
102 void vhpt_init(void)
103 {
104 unsigned long paddr, pte;
105 struct page_info *page;
106 #if !VHPT_ENABLED
107 return;
108 #endif
109 /* This allocation only holds true if vhpt table is unique for
110 * all domains. Or else later new vhpt table should be allocated
111 * from domain heap when each domain is created. Assume xen buddy
112 * allocator can provide natural aligned page by order?
113 */
114 page = alloc_domheap_pages(NULL, VHPT_SIZE_LOG2 - PAGE_SHIFT, 0);
115 if (!page)
116 panic("vhpt_init: can't allocate VHPT!\n");
117 paddr = page_to_maddr(page);
118 if (paddr & ((1 << VHPT_SIZE_LOG2) - 1))
119 panic("vhpt_init: bad VHPT alignment!\n");
120 __get_cpu_var(vhpt_paddr) = paddr;
121 __get_cpu_var(vhpt_pend) = paddr + (1 << VHPT_SIZE_LOG2) - 1;
122 printf("vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
123 paddr, __get_cpu_var(vhpt_pend));
124 pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
125 vhpt_map(pte);
126 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
127 VHPT_ENABLED);
128 vhpt_erase();
129 }
132 void vcpu_flush_vtlb_all (void)
133 {
134 struct vcpu *v = current;
136 /* First VCPU tlb. */
137 vcpu_purge_tr_entry(&PSCBX(v,dtlb));
138 vcpu_purge_tr_entry(&PSCBX(v,itlb));
140 /* Then VHPT. */
141 vhpt_flush ();
143 /* Then mTLB. */
144 local_flush_tlb_all ();
146 /* We could clear bit in d->domain_dirty_cpumask only if domain d in
147 not running on this processor. There is currently no easy way to
148 check this. */
149 }
151 void domain_flush_vtlb_all (void)
152 {
153 int cpu = smp_processor_id ();
154 struct vcpu *v;
156 for_each_vcpu (current->domain, v) {
157 if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
158 continue;
160 if (v->processor == cpu)
161 vcpu_flush_vtlb_all ();
162 else
163 smp_call_function_single
164 (v->processor,
165 (void(*)(void *))vcpu_flush_vtlb_all,
166 NULL,1,1);
167 }
168 }
170 static void cpu_flush_vhpt_range (int cpu, u64 vadr, u64 addr_range)
171 {
172 void *vhpt_base = __va(per_cpu(vhpt_paddr, cpu));
174 while ((long)addr_range > 0) {
175 /* Get the VHPT entry. */
176 unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
177 volatile struct vhpt_lf_entry *v;
178 v = vhpt_base + off;
179 v->ti_tag = INVALID_TI_TAG;
180 addr_range -= PAGE_SIZE;
181 vadr += PAGE_SIZE;
182 }
183 }
185 void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range)
186 {
187 cpu_flush_vhpt_range (current->processor, vadr, 1UL << log_range);
188 ia64_ptcl(vadr, log_range << 2);
189 ia64_srlz_i();
190 }
192 void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
193 {
194 struct vcpu *v;
196 #if 0
197 // this only seems to occur at shutdown, but it does occur
198 if ((!addr_range) || addr_range & (addr_range - 1)) {
199 printf("vhpt_flush_address: weird range, spinning...\n");
200 while(1);
201 }
202 #endif
204 for_each_vcpu (d, v) {
205 if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
206 continue;
208 /* Purge TC entries.
209 FIXME: clear only if match. */
210 vcpu_purge_tr_entry(&PSCBX(v,dtlb));
211 vcpu_purge_tr_entry(&PSCBX(v,itlb));
212 }
213 smp_mb();
215 for_each_vcpu (d, v) {
216 if (!test_bit(_VCPUF_initialised, &v->vcpu_flags))
217 continue;
219 /* Invalidate VHPT entries. */
220 cpu_flush_vhpt_range (v->processor, vadr, addr_range);
221 }
222 // ptc.ga has release semantics.
224 /* ptc.ga */
225 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
226 }
228 static void flush_tlb_vhpt_all (struct domain *d)
229 {
230 /* First VHPT. */
231 vhpt_flush ();
233 /* Then mTLB. */
234 local_flush_tlb_all ();
235 }
237 void domain_flush_destroy (struct domain *d)
238 {
239 /* Very heavy... */
240 on_each_cpu ((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
241 cpus_clear (d->domain_dirty_cpumask);
242 }
244 void flush_tlb_mask(cpumask_t mask)
245 {
246 int cpu;
248 cpu = smp_processor_id();
249 if (cpu_isset (cpu, mask)) {
250 cpu_clear(cpu, mask);
251 flush_tlb_vhpt_all (NULL);
252 }
254 if (cpus_empty(mask))
255 return;
257 for_each_cpu_mask (cpu, mask)
258 smp_call_function_single
259 (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
260 }
262 void zero_vhpt_stats(void)
263 {
264 return;
265 }
267 int dump_vhpt_stats(char *buf)
268 {
269 int i, cpu;
270 char *s = buf;
272 s += sprintf(s,"VHPT usage (%ld entries):\n",
273 (unsigned long) VHPT_NUM_ENTRIES);
275 for_each_present_cpu (cpu) {
276 struct vhpt_lf_entry *v = __va(per_cpu(vhpt_paddr, cpu));
277 unsigned long vhpt_valid = 0;
279 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
280 if (!(v->ti_tag & INVALID_TI_TAG))
281 vhpt_valid++;
282 s += sprintf(s," cpu %d: %ld\n", cpu, vhpt_valid);
283 }
285 return s - buf;
286 }