ia64/xen-unstable

view xen/arch/ia64/xen/regionreg.c @ 9405:29dfadcc5029

[IA64] Followup to xen time cleanup

Clean up to xen time handler. Tristan #if 0 some code because it seems
redundant, which however is actually problematic logic as a reason for
an intermittent timer oops issue of dom0. So delete it now.

Also remove vcpu_wake, since wakeup current has nothing meaningful and
simply waste cpu cycle.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Mon Mar 27 15:32:08 2006 -0700 (2006-03-27)
parents edc63b5dd71d
children cc94ab1e0de0
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <asm/page.h>
15 #include <asm/regionreg.h>
16 #include <asm/vhpt.h>
17 #include <asm/vcpu.h>
18 extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, unsigned long p_vhpt, unsigned long v_pal);
19 extern void *pal_vaddr;
21 /* FIXME: where these declarations should be there ? */
22 extern void panic_domain(struct pt_regs *, const char *, ...);
24 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
25 #define IA64_MAX_IMPL_RID_BITS 24
27 #define MIN_RIDS (1 << IA64_MIN_IMPL_RID_BITS)
28 #define MIN_RID_MAX (MIN_RIDS - 1)
29 #define MIN_RID_MASK (MIN_RIDS - 1)
30 #define MAX_RIDS (1 << (IA64_MAX_IMPL_RID_BITS))
31 #define MAX_RID (MAX_RIDS - 1)
32 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
33 #define RIDS_PER_RIDBLOCK MIN_RIDS
35 #if 0
36 // following already defined in include/asm-ia64/gcc_intrin.h
37 // it should probably be ifdef'd out from there to ensure all region
38 // register usage is encapsulated in this file
39 static inline unsigned long
40 ia64_get_rr (unsigned long rr)
41 {
42 unsigned long r;
43 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
44 return r;
45 }
47 static inline void
48 ia64_set_rr (unsigned long rr, unsigned long rrv)
49 {
50 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
51 }
52 #endif
54 // use this to allocate a rid out of the "Xen reserved rid block"
55 unsigned long allocate_reserved_rid(void)
56 {
57 static unsigned long currentrid = XEN_DEFAULT_RID+1;
58 unsigned long t = currentrid;
60 unsigned long max = RIDS_PER_RIDBLOCK;
62 if (++currentrid >= max) return(-1UL);
63 return t;
64 }
67 // returns -1 if none available
68 unsigned long allocate_metaphysical_rr(void)
69 {
70 ia64_rr rrv;
72 rrv.rrval = 0; // Or else may see reserved bit fault
73 rrv.rid = allocate_reserved_rid();
74 rrv.ps = PAGE_SHIFT;
75 rrv.ve = 0;
76 /* Mangle metaphysical rid */
77 rrv.rrval = vmMangleRID(rrv.rrval);
78 return rrv.rrval;
79 }
81 int deallocate_metaphysical_rid(unsigned long rid)
82 {
83 // fix this when the increment allocation mechanism is fixed.
84 return 1;
85 }
87 /*************************************
88 Region Block setup/management
89 *************************************/
91 static int implemented_rid_bits = 0;
92 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
94 void get_impl_rid_bits(void)
95 {
96 // FIXME (call PAL)
97 //#ifdef CONFIG_MCKINLEY
98 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
99 //#else
100 //#error "rid ranges won't work on Merced"
101 //#endif
102 if (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS ||
103 implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
104 BUG();
105 }
108 /*
109 * Allocate a power-of-two-sized chunk of region id space -- one or more
110 * "rid blocks"
111 */
112 int allocate_rid_range(struct domain *d, unsigned long ridbits)
113 {
114 int i, j, n_rid_blocks;
116 if (implemented_rid_bits == 0) get_impl_rid_bits();
118 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
119 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
121 if (ridbits < IA64_MIN_IMPL_RID_BITS)
122 ridbits = IA64_MIN_IMPL_RID_BITS;
124 // convert to rid_blocks and find one
125 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
127 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
128 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
129 if (ridblock_owner[i] == NULL) {
130 for (j = i; j < i + n_rid_blocks; ++j) {
131 if (ridblock_owner[j]) break;
132 }
133 if (ridblock_owner[j] == NULL) break;
134 }
135 }
137 if (i >= MAX_RID_BLOCKS) return 0;
139 // found an unused block:
140 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
141 // mark this block as owned
142 for (j = i; j < i + n_rid_blocks; ++j) ridblock_owner[j] = d;
144 // setup domain struct
145 d->arch.rid_bits = ridbits;
146 d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS; d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
147 printf("###allocating rid_range, domain %p: starting_rid=%x, ending_rid=%x\n",
148 d,d->arch.starting_rid, d->arch.ending_rid);
150 return 1;
151 }
154 int deallocate_rid_range(struct domain *d)
155 {
156 int i;
157 int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
158 int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
160 //
161 // not all domains will have allocated RIDs (physical mode loaders for instance)
162 //
163 if (d->arch.rid_bits == 0) return 1;
165 #ifdef DEBUG
166 for (i = rid_block_start; i < rid_block_end; ++i) {
167 ASSERT(ridblock_owner[i] == d);
168 }
169 #endif
171 for (i = rid_block_start; i < rid_block_end; ++i)
172 ridblock_owner[i] = NULL;
174 d->arch.rid_bits = 0;
175 d->arch.starting_rid = 0;
176 d->arch.ending_rid = 0;
177 return 1;
178 }
181 static inline void
182 set_rr_no_srlz(unsigned long rr, unsigned long rrval)
183 {
184 ia64_set_rr(rr, vmMangleRID(rrval));
185 }
187 void
188 set_rr(unsigned long rr, unsigned long rrval)
189 {
190 ia64_set_rr(rr, vmMangleRID(rrval));
191 ia64_srlz_d();
192 }
194 static inline int validate_page_size(unsigned long ps)
195 {
196 switch(ps) {
197 case 12: case 13: case 14: case 16: case 18:
198 case 20: case 22: case 24: case 26: case 28:
199 return 1;
200 default:
201 return 0;
202 }
203 }
205 // validates and changes a single region register
206 // in the currently executing domain
207 // Passing a value of -1 is a (successful) no-op
208 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
209 int set_one_rr(unsigned long rr, unsigned long val)
210 {
211 struct vcpu *v = current;
212 unsigned long rreg = REGION_NUMBER(rr);
213 ia64_rr rrv, newrrv, memrrv;
214 unsigned long newrid;
216 if (val == -1) return 1;
218 rrv.rrval = val;
219 newrrv.rrval = 0;
220 newrid = v->arch.starting_rid + rrv.rid;
222 if (newrid > v->arch.ending_rid) {
223 printk("can't set rr%d to %lx, starting_rid=%x,"
224 "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
225 v->arch.starting_rid,v->arch.ending_rid,val);
226 return 0;
227 }
229 #if 0
230 memrrv.rrval = rrv.rrval;
231 if (rreg == 7) {
232 newrrv.rid = newrid;
233 newrrv.ve = VHPT_ENABLED_REGION_7;
234 newrrv.ps = IA64_GRANULE_SHIFT;
235 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
236 v->arch.privregs);
237 }
238 else {
239 newrrv.rid = newrid;
240 // FIXME? region 6 needs to be uncached for EFI to work
241 if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
242 else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
243 newrrv.ps = PAGE_SHIFT;
244 if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
245 set_rr(rr,newrrv.rrval);
246 }
247 #else
248 memrrv.rrval = rrv.rrval;
249 newrrv.rid = newrid;
250 newrrv.ve = 1; // VHPT now enabled for region 7!!
251 newrrv.ps = PAGE_SHIFT;
253 if (rreg == 0) {
254 v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
255 if (!PSCB(v,metaphysical_mode))
256 set_rr(rr,newrrv.rrval);
257 } else if (rreg == 7) {
258 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
259 v->arch.privregs, __get_cpu_var(vhpt_paddr),
260 (unsigned long) pal_vaddr);
261 } else {
262 set_rr(rr,newrrv.rrval);
263 }
264 #endif
265 return 1;
266 }
268 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
269 int set_metaphysical_rr0(void)
270 {
271 struct vcpu *v = current;
272 // ia64_rr rrv;
274 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
275 ia64_set_rr(0,v->arch.metaphysical_rr0);
276 ia64_srlz_d();
277 return 1;
278 }
280 // validates/changes region registers 0-6 in the currently executing domain
281 // Note that this is the one and only SP API (other than executing a privop)
282 // for a domain to use to change region registers
283 int set_all_rr( u64 rr0, u64 rr1, u64 rr2, u64 rr3,
284 u64 rr4, u64 rr5, u64 rr6, u64 rr7)
285 {
286 if (!set_one_rr(0x0000000000000000L, rr0)) return 0;
287 if (!set_one_rr(0x2000000000000000L, rr1)) return 0;
288 if (!set_one_rr(0x4000000000000000L, rr2)) return 0;
289 if (!set_one_rr(0x6000000000000000L, rr3)) return 0;
290 if (!set_one_rr(0x8000000000000000L, rr4)) return 0;
291 if (!set_one_rr(0xa000000000000000L, rr5)) return 0;
292 if (!set_one_rr(0xc000000000000000L, rr6)) return 0;
293 if (!set_one_rr(0xe000000000000000L, rr7)) return 0;
294 return 1;
295 }
297 void init_all_rr(struct vcpu *v)
298 {
299 ia64_rr rrv;
301 rrv.rrval = 0;
302 //rrv.rrval = v->domain->arch.metaphysical_rr0;
303 rrv.ps = PAGE_SHIFT;
304 rrv.ve = 1;
305 if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
306 VCPU(v,rrs[0]) = -1;
307 VCPU(v,rrs[1]) = rrv.rrval;
308 VCPU(v,rrs[2]) = rrv.rrval;
309 VCPU(v,rrs[3]) = rrv.rrval;
310 VCPU(v,rrs[4]) = rrv.rrval;
311 VCPU(v,rrs[5]) = rrv.rrval;
312 rrv.ve = 0;
313 VCPU(v,rrs[6]) = rrv.rrval;
314 // v->shared_info->arch.rrs[7] = rrv.rrval;
315 }
318 /* XEN/ia64 INTERNAL ROUTINES */
320 unsigned long physicalize_rid(struct vcpu *v, unsigned long rrval)
321 {
322 ia64_rr rrv;
324 rrv.rrval = rrval;
325 rrv.rid += v->arch.starting_rid;
326 return rrv.rrval;
327 }
329 unsigned long
330 virtualize_rid(struct vcpu *v, unsigned long rrval)
331 {
332 ia64_rr rrv;
334 rrv.rrval = rrval;
335 rrv.rid -= v->arch.starting_rid;
336 return rrv.rrval;
337 }
339 // loads a thread's region register (0-6) state into
340 // the real physical region registers. Returns the
341 // (possibly mangled) bits to store into rr7
342 // iff it is different than what is currently in physical
343 // rr7 (because we have to to assembly and physical mode
344 // to change rr7). If no change to rr7 is required, returns 0.
345 //
346 void load_region_regs(struct vcpu *v)
347 {
348 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
349 // TODO: These probably should be validated
350 unsigned long bad = 0;
352 if (VCPU(v,metaphysical_mode)) {
353 rr0 = v->domain->arch.metaphysical_rr0;
354 ia64_set_rr(0x0000000000000000L, rr0);
355 ia64_srlz_d();
356 }
357 else {
358 rr0 = VCPU(v,rrs[0]);
359 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
360 }
361 rr1 = VCPU(v,rrs[1]);
362 rr2 = VCPU(v,rrs[2]);
363 rr3 = VCPU(v,rrs[3]);
364 rr4 = VCPU(v,rrs[4]);
365 rr5 = VCPU(v,rrs[5]);
366 rr6 = VCPU(v,rrs[6]);
367 rr7 = VCPU(v,rrs[7]);
368 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
369 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
370 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
371 if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
372 if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
373 if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
374 if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
375 if (bad) {
376 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
377 }
378 }