ia64/xen-unstable

view xen/arch/ia64/xen/regionreg.c @ 9741:2de95fd92e74

[IA64] dom_rid_bits

dom_rid_bits command line parameter added (set default domain rid bits).
Comments added and cleanups in regionreg.c
panic_domain declared in domain.h

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Wed Apr 19 10:26:50 2006 -0600 (2006-04-19)
parents 4e8a64d8bd0e
children fcfc614d3713
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <asm/page.h>
15 #include <asm/regionreg.h>
16 #include <asm/vhpt.h>
17 #include <asm/vcpu.h>
19 /* Defined in xemasm.S */
20 extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, unsigned long p_vhpt, unsigned long v_pal);
22 extern void *pal_vaddr;
24 /* RID virtualization mechanism is really simple: domains have less rid bits
25 than the host and the host rid space is shared among the domains. (Values
26 in parenthesis are usual default values).
28 The host rid space is partitionned into MAX_RID_BLOCKS (= 64)
29 blocks of 2**IA64_MIN_IMPL_RID_BITS (= 18) rids. The first block is also
30 partitionned into MAX_RID_BLOCKS small blocks. Small blocks are used for
31 metaphysical rids. Small block 0 can't be allocated and is reserved for
32 Xen own rids during boot.
34 Blocks and small blocks are allocated together and a domain may
35 have one or more consecutive blocks (and small blocks).
36 */
38 /* Minimum number of RID bits for a domain. The current value is 18, which is
39 the minimum defined by the itanium architecture, but it can be lowered
40 to increase the number of domain. */
41 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
42 /* Maximum number of RID bits. This is definitly 24. */
43 #define IA64_MAX_IMPL_RID_BITS 24
45 /* Maximum number of blocks. */
46 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
48 /* Default number of rid bits for domains. */
49 static unsigned int domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
50 integer_param("dom_rid_bits", domain_rid_bits_default);
52 #if 0
53 // following already defined in include/asm-ia64/gcc_intrin.h
54 // it should probably be ifdef'd out from there to ensure all region
55 // register usage is encapsulated in this file
56 static inline unsigned long
57 ia64_get_rr (unsigned long rr)
58 {
59 unsigned long r;
60 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
61 return r;
62 }
64 static inline void
65 ia64_set_rr (unsigned long rr, unsigned long rrv)
66 {
67 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
68 }
69 #endif
71 static unsigned long allocate_metaphysical_rr(struct domain *d, int n)
72 {
73 ia64_rr rrv;
75 rrv.rrval = 0; // Or else may see reserved bit fault
76 rrv.rid = d->arch.starting_mp_rid + n;
77 rrv.ps = PAGE_SHIFT;
78 rrv.ve = 0;
79 /* Mangle metaphysical rid */
80 rrv.rrval = vmMangleRID(rrv.rrval);
81 return rrv.rrval;
82 }
84 /*************************************
85 Region Block setup/management
86 *************************************/
88 static int implemented_rid_bits = 0;
89 static int mp_rid_shift;
90 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
92 void init_rid_allocator (void)
93 {
94 int log_blocks;
95 pal_vm_info_2_u_t vm_info_2;
97 /* Get machine rid_size. */
98 BUG_ON (ia64_pal_vm_summary (NULL, &vm_info_2) != 0);
99 implemented_rid_bits = vm_info_2.pal_vm_info_2_s.rid_size;
101 /* We need at least a few space... */
102 BUG_ON (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS);
104 /* And we can accept too much space. */
105 if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
106 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
108 /* Due to RID mangling, we expect 24 RID bits!
109 This test should be removed if RID mangling is removed/modified. */
110 if (implemented_rid_bits != 24) {
111 printf ("RID mangling expected 24 RID bits, got only %d!\n",
112 implemented_rid_bits);
113 BUG();
114 }
116 /* Allow the creation of at least domain 0. */
117 if (domain_rid_bits_default > implemented_rid_bits - 1)
118 domain_rid_bits_default = implemented_rid_bits - 1;
120 /* Check for too small values. */
121 if (domain_rid_bits_default < IA64_MIN_IMPL_RID_BITS) {
122 printf ("Default domain rid bits %d is too small, use %d\n",
123 domain_rid_bits_default, IA64_MIN_IMPL_RID_BITS);
124 domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
125 }
127 log_blocks = (implemented_rid_bits - IA64_MIN_IMPL_RID_BITS);
129 printf ("Maximum number of domains: %d; %d RID bits per domain\n",
130 (1 << (implemented_rid_bits - domain_rid_bits_default)) - 1,
131 domain_rid_bits_default);
133 mp_rid_shift = IA64_MIN_IMPL_RID_BITS - log_blocks;
134 BUG_ON (mp_rid_shift < 3);
135 }
138 /*
139 * Allocate a power-of-two-sized chunk of region id space -- one or more
140 * "rid blocks"
141 */
142 int allocate_rid_range(struct domain *d, unsigned long ridbits)
143 {
144 int i, j, n_rid_blocks;
146 if (ridbits == 0)
147 ridbits = domain_rid_bits_default;
149 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
150 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
152 if (ridbits < IA64_MIN_IMPL_RID_BITS)
153 ridbits = IA64_MIN_IMPL_RID_BITS;
155 // convert to rid_blocks and find one
156 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
158 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
159 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
160 if (ridblock_owner[i] == NULL) {
161 for (j = i; j < i + n_rid_blocks; ++j) {
162 if (ridblock_owner[j])
163 break;
164 }
165 if (ridblock_owner[j] == NULL)
166 break;
167 }
168 }
170 if (i >= MAX_RID_BLOCKS)
171 return 0;
173 // found an unused block:
174 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
175 // mark this block as owned
176 for (j = i; j < i + n_rid_blocks; ++j)
177 ridblock_owner[j] = d;
179 // setup domain struct
180 d->arch.rid_bits = ridbits;
181 d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS;
182 d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
184 d->arch.starting_mp_rid = i << mp_rid_shift;
185 d->arch.ending_mp_rid = (i + 1) << mp_rid_shift;
187 d->arch.metaphysical_rr0 = allocate_metaphysical_rr(d, 0);
188 d->arch.metaphysical_rr4 = allocate_metaphysical_rr(d, 1);
190 printf("### domain %p: rid=%x-%x mp_rid=%x\n",
191 d, d->arch.starting_rid, d->arch.ending_rid,
192 d->arch.starting_mp_rid);
194 return 1;
195 }
198 int deallocate_rid_range(struct domain *d)
199 {
200 int i;
201 int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
202 int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
204 /* Sanity check. */
205 if (d->arch.rid_bits == 0)
206 return 1;
209 for (i = rid_block_start; i < rid_block_end; ++i) {
210 ASSERT(ridblock_owner[i] == d);
211 ridblock_owner[i] = NULL;
212 }
214 d->arch.rid_bits = 0;
215 d->arch.starting_rid = 0;
216 d->arch.ending_rid = 0;
217 d->arch.starting_mp_rid = 0;
218 d->arch.ending_mp_rid = 0;
219 return 1;
220 }
222 static void
223 set_rr(unsigned long rr, unsigned long rrval)
224 {
225 ia64_set_rr(rr, vmMangleRID(rrval));
226 ia64_srlz_d();
227 }
229 // validates and changes a single region register
230 // in the currently executing domain
231 // Passing a value of -1 is a (successful) no-op
232 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
233 int set_one_rr(unsigned long rr, unsigned long val)
234 {
235 struct vcpu *v = current;
236 unsigned long rreg = REGION_NUMBER(rr);
237 ia64_rr rrv, newrrv, memrrv;
238 unsigned long newrid;
240 if (val == -1) return 1;
242 rrv.rrval = val;
243 newrrv.rrval = 0;
244 newrid = v->arch.starting_rid + rrv.rid;
246 if (newrid > v->arch.ending_rid) {
247 printk("can't set rr%d to %lx, starting_rid=%x,"
248 "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
249 v->arch.starting_rid,v->arch.ending_rid,val);
250 return 0;
251 }
253 #if 0
254 memrrv.rrval = rrv.rrval;
255 if (rreg == 7) {
256 newrrv.rid = newrid;
257 newrrv.ve = VHPT_ENABLED_REGION_7;
258 newrrv.ps = IA64_GRANULE_SHIFT;
259 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
260 v->arch.privregs);
261 }
262 else {
263 newrrv.rid = newrid;
264 // FIXME? region 6 needs to be uncached for EFI to work
265 if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
266 else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
267 newrrv.ps = PAGE_SHIFT;
268 if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
269 set_rr(rr,newrrv.rrval);
270 }
271 #else
272 memrrv.rrval = rrv.rrval;
273 newrrv.rid = newrid;
274 newrrv.ve = 1; // VHPT now enabled for region 7!!
275 newrrv.ps = PAGE_SHIFT;
277 if (rreg == 0) {
278 v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
279 if (!PSCB(v,metaphysical_mode))
280 set_rr(rr,newrrv.rrval);
281 } else if (rreg == 7) {
282 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
283 v->arch.privregs, __get_cpu_var(vhpt_paddr),
284 (unsigned long) pal_vaddr);
285 } else {
286 set_rr(rr,newrrv.rrval);
287 }
288 #endif
289 return 1;
290 }
292 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
293 int set_metaphysical_rr0(void)
294 {
295 struct vcpu *v = current;
296 // ia64_rr rrv;
298 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
299 ia64_set_rr(0,v->arch.metaphysical_rr0);
300 ia64_srlz_d();
301 return 1;
302 }
304 void init_all_rr(struct vcpu *v)
305 {
306 ia64_rr rrv;
308 rrv.rrval = 0;
309 //rrv.rrval = v->domain->arch.metaphysical_rr0;
310 rrv.ps = PAGE_SHIFT;
311 rrv.ve = 1;
312 if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
313 VCPU(v,rrs[0]) = -1;
314 VCPU(v,rrs[1]) = rrv.rrval;
315 VCPU(v,rrs[2]) = rrv.rrval;
316 VCPU(v,rrs[3]) = rrv.rrval;
317 VCPU(v,rrs[4]) = rrv.rrval;
318 VCPU(v,rrs[5]) = rrv.rrval;
319 rrv.ve = 0;
320 VCPU(v,rrs[6]) = rrv.rrval;
321 // v->shared_info->arch.rrs[7] = rrv.rrval;
322 }
325 /* XEN/ia64 INTERNAL ROUTINES */
327 // loads a thread's region register (0-6) state into
328 // the real physical region registers. Returns the
329 // (possibly mangled) bits to store into rr7
330 // iff it is different than what is currently in physical
331 // rr7 (because we have to to assembly and physical mode
332 // to change rr7). If no change to rr7 is required, returns 0.
333 //
334 void load_region_regs(struct vcpu *v)
335 {
336 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
337 // TODO: These probably should be validated
338 unsigned long bad = 0;
340 if (VCPU(v,metaphysical_mode)) {
341 rr0 = v->domain->arch.metaphysical_rr0;
342 ia64_set_rr(0x0000000000000000L, rr0);
343 ia64_srlz_d();
344 }
345 else {
346 rr0 = VCPU(v,rrs[0]);
347 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
348 }
349 rr1 = VCPU(v,rrs[1]);
350 rr2 = VCPU(v,rrs[2]);
351 rr3 = VCPU(v,rrs[3]);
352 rr4 = VCPU(v,rrs[4]);
353 rr5 = VCPU(v,rrs[5]);
354 rr6 = VCPU(v,rrs[6]);
355 rr7 = VCPU(v,rrs[7]);
356 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
357 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
358 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
359 if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
360 if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
361 if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
362 if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
363 if (bad) {
364 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
365 }
366 }