ia64/xen-unstable

view xen/arch/ia64/regionreg.c @ 3108:85d6a1145160

bitkeeper revision 1.1159.187.7 (41a4e12eWWEz6Rwd4YlbRFZKcBjaMQ)

Merge arcadians.cl.cam.ac.uk:/auto/groups/xeno/BK/xen-2.0-testing.bk
into arcadians.cl.cam.ac.uk:/local/scratch-2/cl349/xen-2.0-testing.bk
author cl349@arcadians.cl.cam.ac.uk
date Wed Nov 24 19:29:50 2004 +0000 (2004-11-24)
parents b7cbbc4c7a3e
children 7ef582b6c9c4
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <asm/page.h>
15 #include <asm/regionreg.h>
16 #include <asm/vhpt.h>
19 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
20 #define IA64_MAX_IMPL_RID_BITS 24
22 #define MIN_RIDS (1 << IA64_MIN_IMPL_RID_BITS)
23 #define MIN_RID_MAX (MIN_RIDS - 1)
24 #define MIN_RID_MASK (MIN_RIDS - 1)
25 #define MAX_RIDS (1 << (IA64_MAX_IMPL_RID_BITS))
26 #define MAX_RID (MAX_RIDS - 1)
27 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
28 #define RIDS_PER_RIDBLOCK MIN_RIDS
30 // This is the one global memory representation of the default Xen region reg
31 ia64_rr xen_rr;
33 #if 0
34 // following already defined in include/asm-ia64/gcc_intrin.h
35 // it should probably be ifdef'd out from there to ensure all region
36 // register usage is encapsulated in this file
37 static inline unsigned long
38 ia64_get_rr (unsigned long rr)
39 {
40 unsigned long r;
41 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
42 return r;
43 }
45 static inline void
46 ia64_set_rr (unsigned long rr, unsigned long rrv)
47 {
48 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
49 }
50 #endif
52 // use this to allocate a rid out of the "Xen reserved rid block"
53 unsigned long allocate_reserved_rid(void)
54 {
55 static unsigned long currentrid = XEN_DEFAULT_RID;
56 unsigned long t = currentrid;
58 unsigned long max = RIDS_PER_RIDBLOCK;
60 if (++currentrid >= max) return(-1UL);
61 return t;
62 }
65 // returns -1 if none available
66 unsigned long allocate_metaphysical_rid(void)
67 {
68 unsigned long rid = allocate_reserved_rid();
69 }
71 int deallocate_metaphysical_rid(unsigned long rid)
72 {
73 // fix this when the increment allocation mechanism is fixed.
74 return 1;
75 }
78 void init_rr(void)
79 {
80 xen_rr.rrval = 0;
81 xen_rr.ve = 0;
82 xen_rr.rid = allocate_reserved_rid();
83 xen_rr.ps = PAGE_SHIFT;
85 printf("initialized xen_rr.rid=0x%lx\n", xen_rr.rid);
86 }
88 /*************************************
89 Region Block setup/management
90 *************************************/
92 static int implemented_rid_bits = 0;
93 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
95 void get_impl_rid_bits(void)
96 {
97 // FIXME (call PAL)
98 //#ifdef CONFIG_MCKINLEY
99 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
100 //#else
101 //#error "rid ranges won't work on Merced"
102 //#endif
103 if (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS ||
104 implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
105 BUG();
106 }
109 /*
110 * Allocate a power-of-two-sized chunk of region id space -- one or more
111 * "rid blocks"
112 */
113 int allocate_rid_range(struct domain *d, unsigned long ridbits)
114 {
115 int i, j, n_rid_blocks;
117 if (implemented_rid_bits == 0) get_impl_rid_bits();
119 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
120 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
122 if (ridbits < IA64_MIN_IMPL_RID_BITS)
123 ridbits = IA64_MIN_IMPL_RID_BITS;
125 // convert to rid_blocks and find one
126 n_rid_blocks = ridbits - IA64_MIN_IMPL_RID_BITS + 1;
128 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
129 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
130 if (ridblock_owner[i] == NULL) {
131 for (j = i; j < i + n_rid_blocks; ++j) {
132 if (ridblock_owner[j]) break;
133 }
134 if (ridblock_owner[j] == NULL) break;
135 }
136 }
138 if (i >= MAX_RID_BLOCKS) return 0;
140 // found an unused block:
141 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
142 // mark this block as owned
143 for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d;
145 // setup domain struct
146 d->rid_bits = ridbits;
147 d->starting_rid = i << IA64_MIN_IMPL_RID_BITS;
148 d->ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
150 return 1;
151 }
154 int deallocate_rid_range(struct domain *d)
155 {
156 int i;
157 int rid_block_end = d->ending_rid >> IA64_MIN_IMPL_RID_BITS;
158 int rid_block_start = d->starting_rid >> IA64_MIN_IMPL_RID_BITS;
160 return 1; // KLUDGE ALERT
161 //
162 // not all domains will have allocated RIDs (physical mode loaders for instance)
163 //
164 if (d->rid_bits == 0) return 1;
166 #ifdef DEBUG
167 for (i = rid_block_start; i < rid_block_end; ++i) {
168 ASSERT(ridblock_owner[i] == d);
169 }
170 #endif
172 for (i = rid_block_start; i < rid_block_end; ++i)
173 ridblock_owner[i] = NULL;
175 d->rid_bits = 0;
176 d->starting_rid = 0;
177 d->ending_rid = 0;
178 return 1;
179 }
182 // This function is purely for performance... apparently scrambling
183 // bits in the region id makes for better hashing, which means better
184 // use of the VHPT, which means better performance
185 // Note that the only time a RID should be mangled is when it is stored in
186 // a region register; anytime it is "viewable" outside of this module,
187 // it should be unmangled
189 //This appears to work in Xen... turn it on later so no complications yet
190 //#define CONFIG_MANGLE_RIDS
191 #ifdef CONFIG_MANGLE_RIDS
192 static inline unsigned long
193 vmMangleRID(unsigned long RIDVal)
194 {
195 union bits64 { unsigned char bytes[4]; unsigned long uint; };
197 union bits64 t;
198 unsigned char tmp;
200 t.uint = RIDVal;
201 tmp = t.bytes[1];
202 t.bytes[1] = t.bytes[3];
203 t.bytes[3] = tmp;
205 return t.uint;
206 }
208 // since vmMangleRID is symmetric, use it for unmangling also
209 #define vmUnmangleRID(x) vmMangleRID(x)
210 #else
211 // no mangling/unmangling
212 #define vmMangleRID(x) (x)
213 #define vmUnmangleRID(x) (x)
214 #endif
216 static inline void
217 set_rr_no_srlz(unsigned long rr, unsigned long rrval)
218 {
219 ia64_set_rr(rr, vmMangleRID(rrval));
220 }
222 void
223 set_rr(unsigned long rr, unsigned long rrval)
224 {
225 ia64_set_rr(rr, vmMangleRID(rrval));
226 ia64_srlz_d();
227 }
229 unsigned long
230 get_rr(unsigned long rr)
231 {
232 return vmUnmangleRID(ia64_get_rr(rr));
233 }
235 static inline int validate_page_size(unsigned long ps)
236 {
237 switch(ps) {
238 case 12: case 13: case 14: case 16: case 18:
239 case 20: case 22: case 24: case 26: case 28:
240 return 1;
241 default:
242 return 0;
243 }
244 }
246 // validates and changes a single region register
247 // in the currently executing domain
248 // Passing a value of -1 is a (successful) no-op
249 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
250 int set_one_rr(unsigned long rr, unsigned long val)
251 {
252 struct domain *d = current;
253 unsigned long rreg = REGION_NUMBER(rr);
254 ia64_rr rrv, newrrv, memrrv;
255 unsigned long newrid;
257 if (val == -1) return 1;
259 rrv.rrval = val;
260 newrrv.rrval = 0;
261 newrid = d->starting_rid + rrv.rid;
263 if (newrid > d->ending_rid) return 0;
265 memrrv.rrval = rrv.rrval;
266 if (rreg == 7) {
267 newrrv.rid = newrid;
268 newrrv.ve = VHPT_ENABLED_REGION_7;
269 newrrv.ps = IA64_GRANULE_SHIFT;
270 ia64_new_rr7(vmMangleRID(newrrv.rrval));
271 }
272 else {
273 newrrv.rid = newrid;
274 // FIXME? region 6 needs to be uncached for EFI to work
275 if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
276 else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
277 newrrv.ps = PAGE_SHIFT;
278 set_rr(rr,newrrv.rrval);
279 }
280 return 1;
281 }
283 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
284 int set_metaphysical_rr(unsigned long rr, unsigned long rid)
285 {
286 ia64_rr rrv;
288 rrv.rrval = 0;
289 rrv.rid = rid;
290 rrv.ps = PAGE_SHIFT;
291 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
292 rrv.ve = 0;
293 set_rr(rr,rrv.rrval);
294 }
296 // validates/changes region registers 0-6 in the currently executing domain
297 // Note that this is the one and only SP API (other than executing a privop)
298 // for a domain to use to change region registers
299 int set_all_rr( u64 rr0, u64 rr1, u64 rr2, u64 rr3,
300 u64 rr4, u64 rr5, u64 rr6, u64 rr7)
301 {
302 if (!set_one_rr(0x0000000000000000L, rr0)) return 0;
303 if (!set_one_rr(0x2000000000000000L, rr1)) return 0;
304 if (!set_one_rr(0x4000000000000000L, rr2)) return 0;
305 if (!set_one_rr(0x6000000000000000L, rr3)) return 0;
306 if (!set_one_rr(0x8000000000000000L, rr4)) return 0;
307 if (!set_one_rr(0xa000000000000000L, rr5)) return 0;
308 if (!set_one_rr(0xc000000000000000L, rr6)) return 0;
309 if (!set_one_rr(0xe000000000000000L, rr7)) return 0;
310 return 1;
311 }
313 void init_all_rr(struct domain *d)
314 {
315 ia64_rr rrv;
317 rrv.rrval = 0;
318 rrv.rid = d->metaphysical_rid;
319 rrv.ps = PAGE_SHIFT;
320 rrv.ve = 1;
321 d->shared_info->arch.rrs[0] = -1;
322 d->shared_info->arch.rrs[1] = rrv.rrval;
323 d->shared_info->arch.rrs[2] = rrv.rrval;
324 d->shared_info->arch.rrs[3] = rrv.rrval;
325 d->shared_info->arch.rrs[4] = rrv.rrval;
326 d->shared_info->arch.rrs[5] = rrv.rrval;
327 d->shared_info->arch.rrs[6] = rrv.rrval;
328 // d->shared_info->arch.rrs[7] = rrv.rrval;
329 }
332 /* XEN/ia64 INTERNAL ROUTINES */
334 unsigned long physicalize_rid(struct domain *d, unsigned long rid)
335 {
336 ia64_rr rrv;
338 rrv.rrval = rid;
339 rrv.rid += d->starting_rid;
340 return rrv.rrval;
341 }
343 unsigned long
344 virtualize_rid(struct domain *d, unsigned long rid)
345 {
346 ia64_rr rrv;
348 rrv.rrval = rid;
349 rrv.rid -= d->starting_rid;
350 return rrv.rrval;
351 }
353 // loads a thread's region register (0-6) state into
354 // the real physical region registers. Returns the
355 // (possibly mangled) bits to store into rr7
356 // iff it is different than what is currently in physical
357 // rr7 (because we have to to assembly and physical mode
358 // to change rr7). If no change to rr7 is required, returns 0.
359 //
360 unsigned long load_region_regs(struct domain *d)
361 {
362 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6;
363 unsigned long oldrr7, newrr7;
364 // TODO: These probably should be validated
366 if (d->metaphysical_mode) {
367 ia64_rr rrv;
369 rrv.rid = d->metaphysical_rid;
370 rrv.ps = PAGE_SHIFT;
371 rrv.ve = 1;
372 rr0 = rr1 = rr2 = rr3 = rr4 = rr5 = rr6 = newrr7 = rrv.rrval;
373 }
374 else {
375 rr0 = physicalize_rid(d, d->shared_info->arch.rrs[0]);
376 rr1 = physicalize_rid(d, d->shared_info->arch.rrs[1]);
377 rr2 = physicalize_rid(d, d->shared_info->arch.rrs[2]);
378 rr3 = physicalize_rid(d, d->shared_info->arch.rrs[3]);
379 rr4 = physicalize_rid(d, d->shared_info->arch.rrs[4]);
380 rr5 = physicalize_rid(d, d->shared_info->arch.rrs[5]);
381 rr6 = physicalize_rid(d, d->shared_info->arch.rrs[6]);
382 newrr7 = physicalize_rid(d, d->shared_info->arch.rrs[7]);
383 }
385 set_rr_no_srlz(0x0000000000000000L, rr0);
386 set_rr_no_srlz(0x2000000000000000L, rr1);
387 set_rr_no_srlz(0x4000000000000000L, rr2);
388 set_rr_no_srlz(0x6000000000000000L, rr3);
389 set_rr_no_srlz(0x8000000000000000L, rr4);
390 set_rr_no_srlz(0xa000000000000000L, rr5);
391 set_rr_no_srlz(0xc000000000000000L, rr6);
392 ia64_srlz_d();
393 oldrr7 = get_rr(0xe000000000000000L);
394 if (oldrr7 != newrr7) {
395 newrr7 = (newrr7 & ~0xff) | (PAGE_SHIFT << 2) | 1;
396 return vmMangleRID(newrr7);
397 }
398 else return 0;
399 }