ia64/xen-unstable

view xen/arch/ia64/xen/vhpt.c @ 10016:998aa66b6502

[IA64] Fix VTi domain destroy bug

This patch will fix VTi domain destroy bug. On tip, we couldn't
destroy VTi successfully at times. It was caused by misusing
vhpt base address for VTi domain in vhpt_flush function.

Signed-off-by : Zhang xiantao <xiantao.zhang@intel.com>
author awilliam@xenbuild.aw
date Wed May 17 15:52:10 2006 -0600 (2006-05-17)
parents 77ccce98ddef
children 7da52d016bcc
line source
1 /*
2 * Initialize VHPT support.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <asm/processor.h>
12 #include <asm/system.h>
13 #include <asm/pgalloc.h>
14 #include <asm/page.h>
15 #include <asm/vhpt.h>
16 #include <asm/vcpu.h>
18 /* Defined in tlb.c */
19 extern void ia64_global_tlb_purge(UINT64 start, UINT64 end, UINT64 nbits);
21 extern long running_on_sim;
23 DEFINE_PER_CPU (unsigned long, vhpt_paddr);
24 DEFINE_PER_CPU (unsigned long, vhpt_pend);
26 static void vhpt_flush(void)
27 {
28 struct vhpt_lf_entry *v = __va(__ia64_per_cpu_var(vhpt_paddr));
29 int i;
31 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)
32 v->ti_tag = INVALID_TI_TAG;
33 }
35 static void vhpt_erase(void)
36 {
37 struct vhpt_lf_entry *v = (struct vhpt_lf_entry *)VHPT_ADDR;
38 int i;
40 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) {
41 v->itir = 0;
42 v->CChain = 0;
43 v->page_flags = 0;
44 v->ti_tag = INVALID_TI_TAG;
45 }
46 // initialize cache too???
47 }
50 static void vhpt_map(unsigned long pte)
51 {
52 unsigned long psr;
54 psr = ia64_clear_ic();
55 ia64_itr(0x2, IA64_TR_VHPT, VHPT_ADDR, pte, VHPT_SIZE_LOG2);
56 ia64_set_psr(psr);
57 ia64_srlz_i();
58 }
60 void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps)
61 {
62 struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
63 unsigned long tag = ia64_ttag (vadr);
65 /* No need to first disable the entry, since VHPT is per LP
66 and VHPT is TR mapped. */
67 vlfe->itir = logps;
68 vlfe->page_flags = pte | _PAGE_P;
69 vlfe->ti_tag = tag;
70 }
72 void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
73 {
74 unsigned long mask = (1L << logps) - 1;
75 int i;
77 if (logps-PAGE_SHIFT > 10 && !running_on_sim) {
78 // if this happens, we may want to revisit this algorithm
79 panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
80 }
81 if (logps-PAGE_SHIFT > 2) {
82 // FIXME: Should add counter here to see how often this
83 // happens (e.g. for 16MB pages!) and determine if it
84 // is a performance problem. On a quick look, it takes
85 // about 39000 instrs for a 16MB page and it seems to occur
86 // only a few times/second, so OK for now.
87 // An alternate solution would be to just insert the one
88 // 16KB in the vhpt (but with the full mapping)?
89 //printf("vhpt_multiple_insert: logps-PAGE_SHIFT==%d,"
90 //"va=%p, pa=%p, pa-masked=%p\n",
91 //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK,
92 //(pte&_PFN_MASK)&~mask);
93 }
94 vaddr &= ~mask;
95 pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
96 for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
97 vhpt_insert(vaddr,pte,logps<<2);
98 vaddr += PAGE_SIZE;
99 }
100 }
102 void vhpt_init(void)
103 {
104 unsigned long paddr, pte;
105 struct page_info *page;
106 #if !VHPT_ENABLED
107 return;
108 #endif
109 /* This allocation only holds true if vhpt table is unique for
110 * all domains. Or else later new vhpt table should be allocated
111 * from domain heap when each domain is created. Assume xen buddy
112 * allocator can provide natural aligned page by order?
113 */
114 page = alloc_domheap_pages(NULL, VHPT_SIZE_LOG2 - PAGE_SHIFT, 0);
115 if (!page)
116 panic("vhpt_init: can't allocate VHPT!\n");
117 paddr = page_to_maddr(page);
118 if (paddr & ((1 << VHPT_SIZE_LOG2) - 1))
119 panic("vhpt_init: bad VHPT alignment!\n");
120 __get_cpu_var(vhpt_paddr) = paddr;
121 __get_cpu_var(vhpt_pend) = paddr + (1 << VHPT_SIZE_LOG2) - 1;
122 printf("vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n",
123 paddr, __get_cpu_var(vhpt_pend));
124 pte = pte_val(pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL));
125 vhpt_map(pte);
126 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
127 VHPT_ENABLED);
128 vhpt_erase();
129 }
132 void vcpu_flush_vtlb_all (void)
133 {
134 struct vcpu *v = current;
136 /* First VCPU tlb. */
137 vcpu_purge_tr_entry(&PSCBX(v,dtlb));
138 vcpu_purge_tr_entry(&PSCBX(v,itlb));
140 /* Then VHPT. */
141 vhpt_flush ();
143 /* Then mTLB. */
144 local_flush_tlb_all ();
146 /* We could clear bit in d->domain_dirty_cpumask only if domain d in
147 not running on this processor. There is currently no easy way to
148 check this. */
149 }
151 void domain_flush_vtlb_all (void)
152 {
153 int cpu = smp_processor_id ();
154 struct vcpu *v;
156 for_each_vcpu (current->domain, v)
157 if (v->processor == cpu)
158 vcpu_flush_vtlb_all ();
159 else
160 smp_call_function_single
161 (v->processor,
162 (void(*)(void *))vcpu_flush_vtlb_all,
163 NULL,1,1);
164 }
166 static void cpu_flush_vhpt_range (int cpu, u64 vadr, u64 addr_range)
167 {
168 void *vhpt_base = __va(per_cpu(vhpt_paddr, cpu));
170 while ((long)addr_range > 0) {
171 /* Get the VHPT entry. */
172 unsigned int off = ia64_thash(vadr) - VHPT_ADDR;
173 volatile struct vhpt_lf_entry *v;
174 v = vhpt_base + off;
175 v->ti_tag = INVALID_TI_TAG;
176 addr_range -= PAGE_SIZE;
177 vadr += PAGE_SIZE;
178 }
179 }
181 void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range)
182 {
183 cpu_flush_vhpt_range (current->processor, vadr, 1UL << log_range);
184 ia64_ptcl(vadr, log_range << 2);
185 ia64_srlz_i();
186 }
188 void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range)
189 {
190 struct vcpu *v;
192 #if 0
193 // this only seems to occur at shutdown, but it does occur
194 if ((!addr_range) || addr_range & (addr_range - 1)) {
195 printf("vhpt_flush_address: weird range, spinning...\n");
196 while(1);
197 }
198 #endif
200 for_each_vcpu (d, v) {
201 /* Purge TC entries.
202 FIXME: clear only if match. */
203 vcpu_purge_tr_entry(&PSCBX(v,dtlb));
204 vcpu_purge_tr_entry(&PSCBX(v,itlb));
206 /* Invalidate VHPT entries. */
207 cpu_flush_vhpt_range (v->processor, vadr, addr_range);
208 }
210 /* ptc.ga */
211 ia64_global_tlb_purge(vadr,vadr+addr_range,PAGE_SHIFT);
212 }
214 static void flush_tlb_vhpt_all (struct domain *d)
215 {
216 /* First VHPT. */
217 vhpt_flush ();
219 /* Then mTLB. */
220 local_flush_tlb_all ();
221 }
223 void domain_flush_destroy (struct domain *d)
224 {
225 /* Very heavy... */
226 on_each_cpu ((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);
227 cpus_clear (d->domain_dirty_cpumask);
228 }
230 void flush_tlb_mask(cpumask_t mask)
231 {
232 int cpu;
234 cpu = smp_processor_id();
235 if (cpu_isset (cpu, mask)) {
236 cpu_clear(cpu, mask);
237 flush_tlb_vhpt_all (NULL);
238 }
240 if (cpus_empty(mask))
241 return;
243 for_each_cpu_mask (cpu, mask)
244 smp_call_function_single
245 (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);
246 }
248 void zero_vhpt_stats(void)
249 {
250 return;
251 }
253 int dump_vhpt_stats(char *buf)
254 {
255 int i;
256 char *s = buf;
257 struct vhpt_lf_entry *v = (void *)VHPT_ADDR;
258 unsigned long vhpt_valid = 0, vhpt_chains = 0;
260 for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++) {
261 if (!(v->ti_tag & INVALID_TI_TAG)) vhpt_valid++;
262 if (v->CChain) vhpt_chains++;
263 }
264 s += sprintf(s,"VHPT usage: %ld/%ld (%ld collision chains)\n",
265 vhpt_valid, (unsigned long) VHPT_NUM_ENTRIES, vhpt_chains);
266 return s - buf;
267 }