ia64/xen-unstable

view xen/arch/ia64/regionreg.c @ 4146:f2d61710e4d9

bitkeeper revision 1.1236.25.24 (42366e9aQ71LQ8uCB-Y1IwVNqx5eqA)

Merge djm@kirby.fc.hp.com://home/djm/src/xen/xeno-unstable-ia64.bk
into sportsman.spdomain:/home/djm/xeno-unstable-ia64.bk
author djm@sportsman.spdomain
date Tue Mar 15 05:11:54 2005 +0000 (2005-03-15)
parents f8026d38aa87 0c846e77cca4
children 74080d40b2e9 58f33dec606f
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <asm/page.h>
15 #include <asm/regionreg.h>
16 #include <asm/vhpt.h>
19 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
20 #define IA64_MAX_IMPL_RID_BITS 24
22 #define MIN_RIDS (1 << IA64_MIN_IMPL_RID_BITS)
23 #define MIN_RID_MAX (MIN_RIDS - 1)
24 #define MIN_RID_MASK (MIN_RIDS - 1)
25 #define MAX_RIDS (1 << (IA64_MAX_IMPL_RID_BITS))
26 #define MAX_RID (MAX_RIDS - 1)
27 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
28 #define RIDS_PER_RIDBLOCK MIN_RIDS
30 // This is the one global memory representation of the default Xen region reg
31 ia64_rr xen_rr;
33 #if 0
34 // following already defined in include/asm-ia64/gcc_intrin.h
35 // it should probably be ifdef'd out from there to ensure all region
36 // register usage is encapsulated in this file
37 static inline unsigned long
38 ia64_get_rr (unsigned long rr)
39 {
40 unsigned long r;
41 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
42 return r;
43 }
45 static inline void
46 ia64_set_rr (unsigned long rr, unsigned long rrv)
47 {
48 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
49 }
50 #endif
52 // use this to allocate a rid out of the "Xen reserved rid block"
53 unsigned long allocate_reserved_rid(void)
54 {
55 static unsigned long currentrid = XEN_DEFAULT_RID;
56 unsigned long t = currentrid;
58 unsigned long max = RIDS_PER_RIDBLOCK;
60 if (++currentrid >= max) return(-1UL);
61 return t;
62 }
65 // returns -1 if none available
66 unsigned long allocate_metaphysical_rid(void)
67 {
68 unsigned long rid = allocate_reserved_rid();
69 }
71 int deallocate_metaphysical_rid(unsigned long rid)
72 {
73 // fix this when the increment allocation mechanism is fixed.
74 return 1;
75 }
78 void init_rr(void)
79 {
80 xen_rr.rrval = 0;
81 xen_rr.ve = 0;
82 xen_rr.rid = allocate_reserved_rid();
83 xen_rr.ps = PAGE_SHIFT;
85 printf("initialized xen_rr.rid=0x%lx\n", xen_rr.rid);
86 }
88 /*************************************
89 Region Block setup/management
90 *************************************/
92 static int implemented_rid_bits = 0;
93 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
95 void get_impl_rid_bits(void)
96 {
97 // FIXME (call PAL)
98 //#ifdef CONFIG_MCKINLEY
99 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
100 //#else
101 //#error "rid ranges won't work on Merced"
102 //#endif
103 if (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS ||
104 implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
105 BUG();
106 }
109 /*
110 * Allocate a power-of-two-sized chunk of region id space -- one or more
111 * "rid blocks"
112 */
113 int allocate_rid_range(struct domain *d, unsigned long ridbits)
114 {
115 int i, j, n_rid_blocks;
117 if (implemented_rid_bits == 0) get_impl_rid_bits();
119 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
120 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
122 if (ridbits < IA64_MIN_IMPL_RID_BITS)
123 ridbits = IA64_MIN_IMPL_RID_BITS;
125 // convert to rid_blocks and find one
126 n_rid_blocks = ridbits - IA64_MIN_IMPL_RID_BITS + 1;
128 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
129 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
130 if (ridblock_owner[i] == NULL) {
131 for (j = i; j < i + n_rid_blocks; ++j) {
132 if (ridblock_owner[j]) break;
133 }
134 if (ridblock_owner[j] == NULL) break;
135 }
136 }
138 if (i >= MAX_RID_BLOCKS) return 0;
140 // found an unused block:
141 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
142 // mark this block as owned
143 for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d;
145 // setup domain struct
146 d->rid_bits = ridbits;
147 d->starting_rid = i << IA64_MIN_IMPL_RID_BITS;
148 d->ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
149 printf("###allocating rid_range, domain %p: starting_rid=%lx, ending_rid=%lx\n",
150 d,d->starting_rid, d->ending_rid);
152 return 1;
153 }
156 int deallocate_rid_range(struct domain *d)
157 {
158 int i;
159 int rid_block_end = d->ending_rid >> IA64_MIN_IMPL_RID_BITS;
160 int rid_block_start = d->starting_rid >> IA64_MIN_IMPL_RID_BITS;
162 return 1; // KLUDGE ALERT
163 //
164 // not all domains will have allocated RIDs (physical mode loaders for instance)
165 //
166 if (d->rid_bits == 0) return 1;
168 #ifdef DEBUG
169 for (i = rid_block_start; i < rid_block_end; ++i) {
170 ASSERT(ridblock_owner[i] == d);
171 }
172 #endif
174 for (i = rid_block_start; i < rid_block_end; ++i)
175 ridblock_owner[i] = NULL;
177 d->rid_bits = 0;
178 d->starting_rid = 0;
179 d->ending_rid = 0;
180 return 1;
181 }
184 // This function is purely for performance... apparently scrambling
185 // bits in the region id makes for better hashing, which means better
186 // use of the VHPT, which means better performance
187 // Note that the only time a RID should be mangled is when it is stored in
188 // a region register; anytime it is "viewable" outside of this module,
189 // it should be unmangled
191 //This appears to work in Xen... turn it on later so no complications yet
192 //#define CONFIG_MANGLE_RIDS
193 #ifdef CONFIG_MANGLE_RIDS
194 static inline unsigned long
195 vmMangleRID(unsigned long RIDVal)
196 {
197 union bits64 { unsigned char bytes[4]; unsigned long uint; };
199 union bits64 t;
200 unsigned char tmp;
202 t.uint = RIDVal;
203 tmp = t.bytes[1];
204 t.bytes[1] = t.bytes[3];
205 t.bytes[3] = tmp;
207 return t.uint;
208 }
210 // since vmMangleRID is symmetric, use it for unmangling also
211 #define vmUnmangleRID(x) vmMangleRID(x)
212 #else
213 // no mangling/unmangling
214 #define vmMangleRID(x) (x)
215 #define vmUnmangleRID(x) (x)
216 #endif
218 static inline void
219 set_rr_no_srlz(unsigned long rr, unsigned long rrval)
220 {
221 ia64_set_rr(rr, vmMangleRID(rrval));
222 }
224 void
225 set_rr(unsigned long rr, unsigned long rrval)
226 {
227 ia64_set_rr(rr, vmMangleRID(rrval));
228 ia64_srlz_d();
229 }
231 unsigned long
232 get_rr(unsigned long rr)
233 {
234 return vmUnmangleRID(ia64_get_rr(rr));
235 }
237 static inline int validate_page_size(unsigned long ps)
238 {
239 switch(ps) {
240 case 12: case 13: case 14: case 16: case 18:
241 case 20: case 22: case 24: case 26: case 28:
242 return 1;
243 default:
244 return 0;
245 }
246 }
248 // validates and changes a single region register
249 // in the currently executing domain
250 // Passing a value of -1 is a (successful) no-op
251 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
252 int set_one_rr(unsigned long rr, unsigned long val)
253 {
254 struct exec_domain *ed = current;
255 unsigned long rreg = REGION_NUMBER(rr);
256 ia64_rr rrv, newrrv, memrrv;
257 unsigned long newrid;
259 if (val == -1) return 1;
261 rrv.rrval = val;
262 newrrv.rrval = 0;
263 newrid = ed->domain->starting_rid + rrv.rid;
265 if (newrid > ed->domain->ending_rid) {
266 printk("can't set rr%d to %lx, starting_rid=%lx,"
267 "ending_rid=%lx, val=%lx\n", rreg, newrid,
268 ed->domain->starting_rid,ed->domain->ending_rid,val);
269 return 0;
270 }
272 memrrv.rrval = rrv.rrval;
273 if (rreg == 7) {
274 newrrv.rid = newrid;
275 newrrv.ve = VHPT_ENABLED_REGION_7;
276 newrrv.ps = IA64_GRANULE_SHIFT;
277 ia64_new_rr7(vmMangleRID(newrrv.rrval),ed->vcpu_info);
278 }
279 else {
280 newrrv.rid = newrid;
281 // FIXME? region 6 needs to be uncached for EFI to work
282 if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
283 else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
284 newrrv.ps = PAGE_SHIFT;
285 set_rr(rr,newrrv.rrval);
286 }
287 return 1;
288 }
290 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
291 int set_metaphysical_rr(unsigned long rr, unsigned long rid)
292 {
293 ia64_rr rrv;
295 rrv.rrval = 0;
296 rrv.rid = rid;
297 rrv.ps = PAGE_SHIFT;
298 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
299 rrv.ve = 0;
300 set_rr(rr,rrv.rrval);
301 }
303 // validates/changes region registers 0-6 in the currently executing domain
304 // Note that this is the one and only SP API (other than executing a privop)
305 // for a domain to use to change region registers
306 int set_all_rr( u64 rr0, u64 rr1, u64 rr2, u64 rr3,
307 u64 rr4, u64 rr5, u64 rr6, u64 rr7)
308 {
309 if (!set_one_rr(0x0000000000000000L, rr0)) return 0;
310 if (!set_one_rr(0x2000000000000000L, rr1)) return 0;
311 if (!set_one_rr(0x4000000000000000L, rr2)) return 0;
312 if (!set_one_rr(0x6000000000000000L, rr3)) return 0;
313 if (!set_one_rr(0x8000000000000000L, rr4)) return 0;
314 if (!set_one_rr(0xa000000000000000L, rr5)) return 0;
315 if (!set_one_rr(0xc000000000000000L, rr6)) return 0;
316 if (!set_one_rr(0xe000000000000000L, rr7)) return 0;
317 return 1;
318 }
320 void init_all_rr(struct exec_domain *ed)
321 {
322 ia64_rr rrv;
324 rrv.rrval = 0;
325 rrv.rid = ed->domain->metaphysical_rid;
326 rrv.ps = PAGE_SHIFT;
327 rrv.ve = 1;
328 if (!ed->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
329 ed->vcpu_info->arch.rrs[0] = -1;
330 ed->vcpu_info->arch.rrs[1] = rrv.rrval;
331 ed->vcpu_info->arch.rrs[2] = rrv.rrval;
332 ed->vcpu_info->arch.rrs[3] = rrv.rrval;
333 ed->vcpu_info->arch.rrs[4] = rrv.rrval;
334 ed->vcpu_info->arch.rrs[5] = rrv.rrval;
335 rrv.ve = 0;
336 ed->vcpu_info->arch.rrs[6] = rrv.rrval;
337 // ed->shared_info->arch.rrs[7] = rrv.rrval;
338 }
341 /* XEN/ia64 INTERNAL ROUTINES */
343 unsigned long physicalize_rid(struct exec_domain *ed, unsigned long rrval)
344 {
345 ia64_rr rrv;
347 rrv.rrval = rrval;
348 rrv.rid += ed->domain->starting_rid;
349 return rrv.rrval;
350 }
352 unsigned long
353 virtualize_rid(struct exec_domain *ed, unsigned long rrval)
354 {
355 ia64_rr rrv;
357 rrv.rrval = rrval;
358 rrv.rid -= ed->domain->starting_rid;
359 return rrv.rrval;
360 }
362 // loads a thread's region register (0-6) state into
363 // the real physical region registers. Returns the
364 // (possibly mangled) bits to store into rr7
365 // iff it is different than what is currently in physical
366 // rr7 (because we have to to assembly and physical mode
367 // to change rr7). If no change to rr7 is required, returns 0.
368 //
369 unsigned long load_region_regs(struct exec_domain *ed)
370 {
371 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
372 // TODO: These probably should be validated
373 unsigned long bad = 0;
375 if (ed->vcpu_info->arch.metaphysical_mode) {
376 ia64_rr rrv;
378 rrv.rrval = 0;
379 rrv.rid = ed->domain->metaphysical_rid;
380 rrv.ps = PAGE_SHIFT;
381 rrv.ve = 1;
382 rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = rrv.rrval;
383 rrv.ve = 0;
384 rr6 = rrv.rrval;
385 set_rr_no_srlz(0x0000000000000000L, rr0);
386 set_rr_no_srlz(0x2000000000000000L, rr1);
387 set_rr_no_srlz(0x4000000000000000L, rr2);
388 set_rr_no_srlz(0x6000000000000000L, rr3);
389 set_rr_no_srlz(0x8000000000000000L, rr4);
390 set_rr_no_srlz(0xa000000000000000L, rr5);
391 set_rr_no_srlz(0xc000000000000000L, rr6);
392 // skip rr7 when in metaphysical mode
393 }
394 else {
395 rr0 = ed->vcpu_info->arch.rrs[0];
396 rr1 = ed->vcpu_info->arch.rrs[1];
397 rr2 = ed->vcpu_info->arch.rrs[2];
398 rr3 = ed->vcpu_info->arch.rrs[3];
399 rr4 = ed->vcpu_info->arch.rrs[4];
400 rr5 = ed->vcpu_info->arch.rrs[5];
401 rr6 = ed->vcpu_info->arch.rrs[6];
402 rr7 = ed->vcpu_info->arch.rrs[7];
403 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
404 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
405 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
406 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
407 if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
408 if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
409 if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
410 if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
411 }
412 ia64_srlz_d();
413 if (bad) {
414 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
415 }
416 }