ia64/xen-unstable

view xen/arch/ia64/xen/regionreg.c @ 10434:c78f750a264c

[IA64] Fix for ptc.ga emulation

If switch to idle domain after vhpt_purge in ptc_ga emulation,
shouldn't switch rr7 and pta. Only disable VHPT needed. If no,
Smp VTi will hang whole system due to fault.

Signed-off-by: Zhang xiantao <xiantao.zhang@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@lappy
date Tue Jun 20 16:05:17 2006 -0600 (2006-06-20)
parents ea306829506c
children e448723613ab
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <asm/page.h>
15 #include <asm/regionreg.h>
16 #include <asm/vhpt.h>
17 #include <asm/vcpu.h>
19 /* Defined in xemasm.S */
20 extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long p_vhpt);
22 /* RID virtualization mechanism is really simple: domains have less rid bits
23 than the host and the host rid space is shared among the domains. (Values
24 in parenthesis are usual default values).
26 The host rid space is partitionned into MAX_RID_BLOCKS (= 64)
27 blocks of 2**IA64_MIN_IMPL_RID_BITS (= 18) rids. The first block is also
28 partitionned into MAX_RID_BLOCKS small blocks. Small blocks are used for
29 metaphysical rids. Small block 0 can't be allocated and is reserved for
30 Xen own rids during boot.
32 Blocks and small blocks are allocated together and a domain may
33 have one or more consecutive blocks (and small blocks).
34 */
36 /* Minimum number of RID bits for a domain. The current value is 18, which is
37 the minimum defined by the itanium architecture, but it can be lowered
38 to increase the number of domain. */
39 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
40 /* Maximum number of RID bits. This is definitly 24. */
41 #define IA64_MAX_IMPL_RID_BITS 24
43 /* Maximum number of blocks. */
44 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
46 /* Default number of rid bits for domains. */
47 static unsigned int domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
48 integer_param("dom_rid_bits", domain_rid_bits_default);
50 #if 0
51 // following already defined in include/asm-ia64/gcc_intrin.h
52 // it should probably be ifdef'd out from there to ensure all region
53 // register usage is encapsulated in this file
54 static inline unsigned long
55 ia64_get_rr (unsigned long rr)
56 {
57 unsigned long r;
58 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
59 return r;
60 }
62 static inline void
63 ia64_set_rr (unsigned long rr, unsigned long rrv)
64 {
65 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
66 }
67 #endif
69 static unsigned long allocate_metaphysical_rr(struct domain *d, int n)
70 {
71 ia64_rr rrv;
73 rrv.rrval = 0; // Or else may see reserved bit fault
74 rrv.rid = d->arch.starting_mp_rid + n;
75 rrv.ps = PAGE_SHIFT;
76 rrv.ve = 0;
77 /* Mangle metaphysical rid */
78 rrv.rrval = vmMangleRID(rrv.rrval);
79 return rrv.rrval;
80 }
82 /*************************************
83 Region Block setup/management
84 *************************************/
86 static int implemented_rid_bits = 0;
87 static int mp_rid_shift;
88 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
90 void init_rid_allocator (void)
91 {
92 int log_blocks;
93 pal_vm_info_2_u_t vm_info_2;
95 /* Get machine rid_size. */
96 BUG_ON (ia64_pal_vm_summary (NULL, &vm_info_2) != 0);
97 implemented_rid_bits = vm_info_2.pal_vm_info_2_s.rid_size;
99 /* We need at least a few space... */
100 BUG_ON (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS);
102 /* And we can accept too much space. */
103 if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
104 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
106 /* Due to RID mangling, we expect 24 RID bits!
107 This test should be removed if RID mangling is removed/modified. */
108 if (implemented_rid_bits != 24) {
109 printf ("RID mangling expected 24 RID bits, got only %d!\n",
110 implemented_rid_bits);
111 BUG();
112 }
114 /* Allow the creation of at least domain 0. */
115 if (domain_rid_bits_default > implemented_rid_bits - 1)
116 domain_rid_bits_default = implemented_rid_bits - 1;
118 /* Check for too small values. */
119 if (domain_rid_bits_default < IA64_MIN_IMPL_RID_BITS) {
120 printf ("Default domain rid bits %d is too small, use %d\n",
121 domain_rid_bits_default, IA64_MIN_IMPL_RID_BITS);
122 domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
123 }
125 log_blocks = (implemented_rid_bits - IA64_MIN_IMPL_RID_BITS);
127 printf ("Maximum number of domains: %d; %d RID bits per domain\n",
128 (1 << (implemented_rid_bits - domain_rid_bits_default)) - 1,
129 domain_rid_bits_default);
131 mp_rid_shift = IA64_MIN_IMPL_RID_BITS - log_blocks;
132 BUG_ON (mp_rid_shift < 3);
133 }
136 /*
137 * Allocate a power-of-two-sized chunk of region id space -- one or more
138 * "rid blocks"
139 */
140 int allocate_rid_range(struct domain *d, unsigned long ridbits)
141 {
142 int i, j, n_rid_blocks;
144 if (ridbits == 0)
145 ridbits = domain_rid_bits_default;
147 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
148 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
150 if (ridbits < IA64_MIN_IMPL_RID_BITS)
151 ridbits = IA64_MIN_IMPL_RID_BITS;
153 // convert to rid_blocks and find one
154 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
156 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
157 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
158 if (ridblock_owner[i] == NULL) {
159 for (j = i; j < i + n_rid_blocks; ++j) {
160 if (ridblock_owner[j])
161 break;
162 }
163 if (ridblock_owner[j] == NULL)
164 break;
165 }
166 }
168 if (i >= MAX_RID_BLOCKS)
169 return 0;
171 // found an unused block:
172 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
173 // mark this block as owned
174 for (j = i; j < i + n_rid_blocks; ++j)
175 ridblock_owner[j] = d;
177 // setup domain struct
178 d->arch.rid_bits = ridbits;
179 d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS;
180 d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
182 d->arch.starting_mp_rid = i << mp_rid_shift;
183 d->arch.ending_mp_rid = (i + 1) << mp_rid_shift;
185 d->arch.metaphysical_rr0 = allocate_metaphysical_rr(d, 0);
186 d->arch.metaphysical_rr4 = allocate_metaphysical_rr(d, 1);
188 printf("### domain %p: rid=%x-%x mp_rid=%x\n",
189 d, d->arch.starting_rid, d->arch.ending_rid,
190 d->arch.starting_mp_rid);
192 return 1;
193 }
196 int deallocate_rid_range(struct domain *d)
197 {
198 int i;
199 int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
200 int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
202 /* Sanity check. */
203 if (d->arch.rid_bits == 0)
204 return 1;
207 for (i = rid_block_start; i < rid_block_end; ++i) {
208 ASSERT(ridblock_owner[i] == d);
209 ridblock_owner[i] = NULL;
210 }
212 d->arch.rid_bits = 0;
213 d->arch.starting_rid = 0;
214 d->arch.ending_rid = 0;
215 d->arch.starting_mp_rid = 0;
216 d->arch.ending_mp_rid = 0;
217 return 1;
218 }
220 static void
221 set_rr(unsigned long rr, unsigned long rrval)
222 {
223 ia64_set_rr(rr, vmMangleRID(rrval));
224 ia64_srlz_d();
225 }
227 // validates and changes a single region register
228 // in the currently executing domain
229 // Passing a value of -1 is a (successful) no-op
230 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
231 int set_one_rr(unsigned long rr, unsigned long val)
232 {
233 struct vcpu *v = current;
234 unsigned long rreg = REGION_NUMBER(rr);
235 ia64_rr rrv, newrrv, memrrv;
236 unsigned long newrid;
238 if (val == -1) return 1;
240 rrv.rrval = val;
241 newrrv.rrval = 0;
242 newrid = v->arch.starting_rid + rrv.rid;
244 if (newrid > v->arch.ending_rid) {
245 printk("can't set rr%d to %lx, starting_rid=%x,"
246 "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
247 v->arch.starting_rid,v->arch.ending_rid,val);
248 return 0;
249 }
251 memrrv.rrval = rrv.rrval;
252 newrrv.rid = newrid;
253 newrrv.ve = 1; // VHPT now enabled for region 7!!
254 newrrv.ps = PAGE_SHIFT;
256 if (rreg == 0) {
257 v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
258 if (!PSCB(v,metaphysical_mode))
259 set_rr(rr,newrrv.rrval);
260 } else if (rreg == 7) {
261 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
262 v->arch.privregs, v->domain->arch.shared_info_va,
263 __get_cpu_var(vhpt_paddr));
264 } else {
265 set_rr(rr,newrrv.rrval);
266 }
267 return 1;
268 }
270 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
271 int set_metaphysical_rr0(void)
272 {
273 struct vcpu *v = current;
274 // ia64_rr rrv;
276 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
277 ia64_set_rr(0,v->arch.metaphysical_rr0);
278 ia64_srlz_d();
279 return 1;
280 }
282 void init_all_rr(struct vcpu *v)
283 {
284 ia64_rr rrv;
286 rrv.rrval = 0;
287 //rrv.rrval = v->domain->arch.metaphysical_rr0;
288 rrv.ps = PAGE_SHIFT;
289 rrv.ve = 1;
290 if (!v->vcpu_info) { panic("Stopping in init_all_rr\n"); }
291 VCPU(v,rrs[0]) = -1;
292 VCPU(v,rrs[1]) = rrv.rrval;
293 VCPU(v,rrs[2]) = rrv.rrval;
294 VCPU(v,rrs[3]) = rrv.rrval;
295 VCPU(v,rrs[4]) = rrv.rrval;
296 VCPU(v,rrs[5]) = rrv.rrval;
297 rrv.ve = 0;
298 VCPU(v,rrs[6]) = rrv.rrval;
299 // v->shared_info->arch.rrs[7] = rrv.rrval;
300 }
303 /* XEN/ia64 INTERNAL ROUTINES */
305 // loads a thread's region register (0-6) state into
306 // the real physical region registers. Returns the
307 // (possibly mangled) bits to store into rr7
308 // iff it is different than what is currently in physical
309 // rr7 (because we have to to assembly and physical mode
310 // to change rr7). If no change to rr7 is required, returns 0.
311 //
312 void load_region_regs(struct vcpu *v)
313 {
314 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
315 // TODO: These probably should be validated
316 unsigned long bad = 0;
318 if (VCPU(v,metaphysical_mode)) {
319 rr0 = v->domain->arch.metaphysical_rr0;
320 ia64_set_rr(0x0000000000000000L, rr0);
321 ia64_srlz_d();
322 }
323 else {
324 rr0 = VCPU(v,rrs[0]);
325 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
326 }
327 rr1 = VCPU(v,rrs[1]);
328 rr2 = VCPU(v,rrs[2]);
329 rr3 = VCPU(v,rrs[3]);
330 rr4 = VCPU(v,rrs[4]);
331 rr5 = VCPU(v,rrs[5]);
332 rr6 = VCPU(v,rrs[6]);
333 rr7 = VCPU(v,rrs[7]);
334 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
335 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
336 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
337 if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
338 if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
339 if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
340 if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
341 if (bad) {
342 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
343 }
344 }
346 void load_region_reg7_and_pta(struct vcpu *v)
347 {
348 unsigned long rr7, pta;
350 if (!is_idle_domain(v->domain)) {
351 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) |
352 VHPT_ENABLED);
354 // TODO: These probably should be validated
355 rr7 = VCPU(v,rrs[7]);
356 if (!set_one_rr(0xe000000000000000L, rr7))
357 panic_domain(0, "%s: can't set!\n", __func__);
358 }
359 else {
360 pta = ia64_get_pta();
361 ia64_set_pta(pta & ~VHPT_ENABLED);
362 }
363 }