ia64/xen-unstable

view xen/arch/ia64/xen/regionreg.c @ 6462:af3750d1ec53

Bug fixes from Kevin (x2) and Anthony
Missing prototypes (Kevin)
Bad n_rid_blocks computation (Anthony)
Bad pte when single-entry dtlb lookup is successful (Kevin)
author djm@kirby.fc.hp.com
date Fri Sep 02 11:59:08 2005 -0600 (2005-09-02)
parents 3ca4ca7a9cc2
children bf3fdeeba48b
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <asm/page.h>
15 #include <asm/regionreg.h>
16 #include <asm/vhpt.h>
17 #include <asm/vcpu.h>
18 extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info);
21 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
22 #define IA64_MAX_IMPL_RID_BITS 24
24 #define MIN_RIDS (1 << IA64_MIN_IMPL_RID_BITS)
25 #define MIN_RID_MAX (MIN_RIDS - 1)
26 #define MIN_RID_MASK (MIN_RIDS - 1)
27 #define MAX_RIDS (1 << (IA64_MAX_IMPL_RID_BITS))
28 #define MAX_RID (MAX_RIDS - 1)
29 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
30 #define RIDS_PER_RIDBLOCK MIN_RIDS
32 #if 0
33 // following already defined in include/asm-ia64/gcc_intrin.h
34 // it should probably be ifdef'd out from there to ensure all region
35 // register usage is encapsulated in this file
36 static inline unsigned long
37 ia64_get_rr (unsigned long rr)
38 {
39 unsigned long r;
40 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
41 return r;
42 }
44 static inline void
45 ia64_set_rr (unsigned long rr, unsigned long rrv)
46 {
47 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
48 }
49 #endif
51 // use this to allocate a rid out of the "Xen reserved rid block"
52 unsigned long allocate_reserved_rid(void)
53 {
54 static unsigned long currentrid = XEN_DEFAULT_RID;
55 unsigned long t = currentrid;
57 unsigned long max = RIDS_PER_RIDBLOCK;
59 if (++currentrid >= max) return(-1UL);
60 return t;
61 }
64 // returns -1 if none available
65 unsigned long allocate_metaphysical_rr(void)
66 {
67 ia64_rr rrv;
69 rrv.rid = allocate_reserved_rid();
70 rrv.ps = PAGE_SHIFT;
71 rrv.ve = 0;
72 return rrv.rrval;
73 }
75 int deallocate_metaphysical_rid(unsigned long rid)
76 {
77 // fix this when the increment allocation mechanism is fixed.
78 return 1;
79 }
81 /*************************************
82 Region Block setup/management
83 *************************************/
85 static int implemented_rid_bits = 0;
86 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
88 void get_impl_rid_bits(void)
89 {
90 // FIXME (call PAL)
91 //#ifdef CONFIG_MCKINLEY
92 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
93 //#else
94 //#error "rid ranges won't work on Merced"
95 //#endif
96 if (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS ||
97 implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
98 BUG();
99 }
102 /*
103 * Allocate a power-of-two-sized chunk of region id space -- one or more
104 * "rid blocks"
105 */
106 int allocate_rid_range(struct domain *d, unsigned long ridbits)
107 {
108 int i, j, n_rid_blocks;
110 if (implemented_rid_bits == 0) get_impl_rid_bits();
112 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
113 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
115 if (ridbits < IA64_MIN_IMPL_RID_BITS)
116 ridbits = IA64_MIN_IMPL_RID_BITS;
118 // convert to rid_blocks and find one
119 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
121 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
122 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
123 if (ridblock_owner[i] == NULL) {
124 for (j = i; j < i + n_rid_blocks; ++j) {
125 if (ridblock_owner[j]) break;
126 }
127 if (ridblock_owner[j] == NULL) break;
128 }
129 }
131 if (i >= MAX_RID_BLOCKS) return 0;
133 // found an unused block:
134 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
135 // mark this block as owned
136 for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d;
138 // setup domain struct
139 d->arch.rid_bits = ridbits;
140 d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
141 printf("###allocating rid_range, domain %p: starting_rid=%lx, ending_rid=%lx\n",
142 d,d->arch.starting_rid, d->arch.ending_rid);
144 return 1;
145 }
148 int deallocate_rid_range(struct domain *d)
149 {
150 int i;
151 int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
152 int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
154 return 1; // KLUDGE ALERT
155 //
156 // not all domains will have allocated RIDs (physical mode loaders for instance)
157 //
158 if (d->arch.rid_bits == 0) return 1;
160 #ifdef DEBUG
161 for (i = rid_block_start; i < rid_block_end; ++i) {
162 ASSERT(ridblock_owner[i] == d);
163 }
164 #endif
166 for (i = rid_block_start; i < rid_block_end; ++i)
167 ridblock_owner[i] = NULL;
169 d->arch.rid_bits = 0;
170 d->arch.starting_rid = 0;
171 d->arch.ending_rid = 0;
172 return 1;
173 }
176 static inline void
177 set_rr_no_srlz(unsigned long rr, unsigned long rrval)
178 {
179 ia64_set_rr(rr, vmMangleRID(rrval));
180 }
182 void
183 set_rr(unsigned long rr, unsigned long rrval)
184 {
185 ia64_set_rr(rr, vmMangleRID(rrval));
186 ia64_srlz_d();
187 }
189 unsigned long
190 get_rr(unsigned long rr)
191 {
192 return vmUnmangleRID(ia64_get_rr(rr));
193 }
195 static inline int validate_page_size(unsigned long ps)
196 {
197 switch(ps) {
198 case 12: case 13: case 14: case 16: case 18:
199 case 20: case 22: case 24: case 26: case 28:
200 return 1;
201 default:
202 return 0;
203 }
204 }
206 // validates and changes a single region register
207 // in the currently executing domain
208 // Passing a value of -1 is a (successful) no-op
209 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
210 int set_one_rr(unsigned long rr, unsigned long val)
211 {
212 struct vcpu *v = current;
213 unsigned long rreg = REGION_NUMBER(rr);
214 ia64_rr rrv, newrrv, memrrv;
215 unsigned long newrid;
217 if (val == -1) return 1;
219 rrv.rrval = val;
220 newrrv.rrval = 0;
221 newrid = v->arch.starting_rid + rrv.rid;
223 if (newrid > v->arch.ending_rid) {
224 printk("can't set rr%d to %lx, starting_rid=%lx,"
225 "ending_rid=%lx, val=%lx\n", rreg, newrid,
226 v->arch.starting_rid,v->arch.ending_rid,val);
227 return 0;
228 }
230 #ifdef CONFIG_VTI
231 memrrv.rrval = rrv.rrval;
232 if (rreg == 7) {
233 newrrv.rid = newrid;
234 newrrv.ve = VHPT_ENABLED_REGION_7;
235 newrrv.ps = IA64_GRANULE_SHIFT;
236 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
237 v->vcpu_info->arch.privregs);
238 }
239 else {
240 newrrv.rid = newrid;
241 // FIXME? region 6 needs to be uncached for EFI to work
242 if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
243 else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
244 newrrv.ps = PAGE_SHIFT;
245 if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
246 set_rr(rr,newrrv.rrval);
247 }
248 #else
249 memrrv.rrval = rrv.rrval;
250 newrrv.rid = newrid;
251 newrrv.ve = 1; // VHPT now enabled for region 7!!
252 newrrv.ps = PAGE_SHIFT;
253 if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
254 if (rreg == 7) ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
255 v->vcpu_info->arch.privregs);
256 else set_rr(rr,newrrv.rrval);
257 #endif
258 return 1;
259 }
261 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
262 int set_metaphysical_rr0(void)
263 {
264 struct vcpu *v = current;
265 ia64_rr rrv;
267 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
268 set_rr(0,v->arch.metaphysical_rr0);
269 }
271 // validates/changes region registers 0-6 in the currently executing domain
272 // Note that this is the one and only SP API (other than executing a privop)
273 // for a domain to use to change region registers
274 int set_all_rr( u64 rr0, u64 rr1, u64 rr2, u64 rr3,
275 u64 rr4, u64 rr5, u64 rr6, u64 rr7)
276 {
277 if (!set_one_rr(0x0000000000000000L, rr0)) return 0;
278 if (!set_one_rr(0x2000000000000000L, rr1)) return 0;
279 if (!set_one_rr(0x4000000000000000L, rr2)) return 0;
280 if (!set_one_rr(0x6000000000000000L, rr3)) return 0;
281 if (!set_one_rr(0x8000000000000000L, rr4)) return 0;
282 if (!set_one_rr(0xa000000000000000L, rr5)) return 0;
283 if (!set_one_rr(0xc000000000000000L, rr6)) return 0;
284 if (!set_one_rr(0xe000000000000000L, rr7)) return 0;
285 return 1;
286 }
288 void init_all_rr(struct vcpu *v)
289 {
290 ia64_rr rrv;
292 rrv.rrval = 0;
293 rrv.rrval = v->domain->arch.metaphysical_rr0;
294 rrv.ps = PAGE_SHIFT;
295 rrv.ve = 1;
296 if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
297 VCPU(v,rrs[0]) = -1;
298 VCPU(v,rrs[1]) = rrv.rrval;
299 VCPU(v,rrs[2]) = rrv.rrval;
300 VCPU(v,rrs[3]) = rrv.rrval;
301 VCPU(v,rrs[4]) = rrv.rrval;
302 VCPU(v,rrs[5]) = rrv.rrval;
303 rrv.ve = 0;
304 VCPU(v,rrs[6]) = rrv.rrval;
305 // v->shared_info->arch.rrs[7] = rrv.rrval;
306 }
309 /* XEN/ia64 INTERNAL ROUTINES */
311 unsigned long physicalize_rid(struct vcpu *v, unsigned long rrval)
312 {
313 ia64_rr rrv;
315 rrv.rrval = rrval;
316 rrv.rid += v->arch.starting_rid;
317 return rrv.rrval;
318 }
320 unsigned long
321 virtualize_rid(struct vcpu *v, unsigned long rrval)
322 {
323 ia64_rr rrv;
325 rrv.rrval = rrval;
326 rrv.rid -= v->arch.starting_rid;
327 return rrv.rrval;
328 }
330 // loads a thread's region register (0-6) state into
331 // the real physical region registers. Returns the
332 // (possibly mangled) bits to store into rr7
333 // iff it is different than what is currently in physical
334 // rr7 (because we have to to assembly and physical mode
335 // to change rr7). If no change to rr7 is required, returns 0.
336 //
337 unsigned long load_region_regs(struct vcpu *v)
338 {
339 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
340 // TODO: These probably should be validated
341 unsigned long bad = 0;
343 if (VCPU(v,metaphysical_mode)) {
344 ia64_rr rrv;
346 rrv.rrval = 0;
347 rrv.rid = v->domain->arch.metaphysical_rr0;
348 rrv.ps = PAGE_SHIFT;
349 rrv.ve = 1;
350 rr0 = rrv.rrval;
351 set_rr_no_srlz(0x0000000000000000L, rr0);
352 ia64_srlz_d();
353 }
354 else {
355 rr0 = VCPU(v,rrs[0]);
356 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
357 }
358 rr1 = VCPU(v,rrs[1]);
359 rr2 = VCPU(v,rrs[2]);
360 rr3 = VCPU(v,rrs[3]);
361 rr4 = VCPU(v,rrs[4]);
362 rr5 = VCPU(v,rrs[5]);
363 rr6 = VCPU(v,rrs[6]);
364 rr7 = VCPU(v,rrs[7]);
365 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
366 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
367 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
368 if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
369 if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
370 if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
371 if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
372 if (bad) {
373 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
374 }
375 return 0;
376 }