ia64/xen-unstable

view xen/arch/ia64/xen/regionreg.c @ 9685:4e8a64d8bd0e

[IA64] regionreg.c: deallocate metaphysical rids

allocate_rid_range also allocates metaphysical rids.
deallocate_rid_range also deallocates mp rids.
init_rid_allocator() added.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Fri Apr 14 14:13:13 2006 -0600 (2006-04-14)
parents cc94ab1e0de0
children 2de95fd92e74
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <asm/page.h>
15 #include <asm/regionreg.h>
16 #include <asm/vhpt.h>
17 #include <asm/vcpu.h>
18 extern void ia64_new_rr7(unsigned long rid,void *shared_info, void *shared_arch_info, unsigned long p_vhpt, unsigned long v_pal);
19 extern void *pal_vaddr;
21 /* FIXME: where these declarations should be there ? */
22 extern void panic_domain(struct pt_regs *, const char *, ...);
24 #define DOMAIN_RID_BITS_DEFAULT 18
26 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
27 #define IA64_MAX_IMPL_RID_BITS 24
29 #define MIN_RIDS (1 << IA64_MIN_IMPL_RID_BITS)
30 #define MIN_RID_MAX (MIN_RIDS - 1)
31 #define MIN_RID_MASK (MIN_RIDS - 1)
32 #define MAX_RIDS (1 << (IA64_MAX_IMPL_RID_BITS))
33 #define MAX_RID (MAX_RIDS - 1)
34 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
35 #define RIDS_PER_RIDBLOCK MIN_RIDS
37 #if 0
38 // following already defined in include/asm-ia64/gcc_intrin.h
39 // it should probably be ifdef'd out from there to ensure all region
40 // register usage is encapsulated in this file
41 static inline unsigned long
42 ia64_get_rr (unsigned long rr)
43 {
44 unsigned long r;
45 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
46 return r;
47 }
49 static inline void
50 ia64_set_rr (unsigned long rr, unsigned long rrv)
51 {
52 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
53 }
54 #endif
56 static unsigned long allocate_metaphysical_rr(struct domain *d, int n)
57 {
58 ia64_rr rrv;
60 rrv.rrval = 0; // Or else may see reserved bit fault
61 rrv.rid = d->arch.starting_mp_rid + n;
62 rrv.ps = PAGE_SHIFT;
63 rrv.ve = 0;
64 /* Mangle metaphysical rid */
65 rrv.rrval = vmMangleRID(rrv.rrval);
66 return rrv.rrval;
67 }
69 /*************************************
70 Region Block setup/management
71 *************************************/
73 static int implemented_rid_bits = 0;
74 static int mp_rid_shift;
75 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
77 void init_rid_allocator (void)
78 {
79 int log_blocks;
80 pal_vm_info_2_u_t vm_info_2;
82 /* Get machine rid_size. */
83 BUG_ON (ia64_pal_vm_summary (NULL, &vm_info_2) != 0);
84 implemented_rid_bits = vm_info_2.pal_vm_info_2_s.rid_size;
86 /* We need at least a few space... */
87 BUG_ON (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS);
89 /* And we can accept too much space. */
90 if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
91 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
93 log_blocks = (implemented_rid_bits - IA64_MIN_IMPL_RID_BITS);
95 printf ("Maximum of simultaneous domains: %d\n",
96 (1 << log_blocks) - 1);
98 mp_rid_shift = IA64_MIN_IMPL_RID_BITS - log_blocks;
99 BUG_ON (mp_rid_shift < 3);
100 }
103 /*
104 * Allocate a power-of-two-sized chunk of region id space -- one or more
105 * "rid blocks"
106 */
107 int allocate_rid_range(struct domain *d, unsigned long ridbits)
108 {
109 int i, j, n_rid_blocks;
111 if (ridbits == 0)
112 ridbits = DOMAIN_RID_BITS_DEFAULT;
114 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
115 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
117 if (ridbits < IA64_MIN_IMPL_RID_BITS)
118 ridbits = IA64_MIN_IMPL_RID_BITS;
120 // convert to rid_blocks and find one
121 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
123 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
124 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
125 if (ridblock_owner[i] == NULL) {
126 for (j = i; j < i + n_rid_blocks; ++j) {
127 if (ridblock_owner[j])
128 break;
129 }
130 if (ridblock_owner[j] == NULL)
131 break;
132 }
133 }
135 if (i >= MAX_RID_BLOCKS)
136 return 0;
138 // found an unused block:
139 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
140 // mark this block as owned
141 for (j = i; j < i + n_rid_blocks; ++j)
142 ridblock_owner[j] = d;
144 // setup domain struct
145 d->arch.rid_bits = ridbits;
146 d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS;
147 d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
149 d->arch.starting_mp_rid = i << mp_rid_shift;
150 d->arch.ending_mp_rid = (i + 1) << mp_rid_shift;
152 d->arch.metaphysical_rr0 = allocate_metaphysical_rr(d, 0);
153 d->arch.metaphysical_rr4 = allocate_metaphysical_rr(d, 1);
155 printf("###allocating rid_range, domain %p: rid=%x-%x mp_rid=%x\n",
156 d, d->arch.starting_rid, d->arch.ending_rid,
157 d->arch.starting_mp_rid);
159 return 1;
160 }
163 int deallocate_rid_range(struct domain *d)
164 {
165 int i;
166 int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
167 int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
169 //
170 // not all domains will have allocated RIDs (physical mode loaders for instance)
171 //
172 if (d->arch.rid_bits == 0) return 1;
174 #ifdef DEBUG
175 for (i = rid_block_start; i < rid_block_end; ++i) {
176 ASSERT(ridblock_owner[i] == d);
177 }
178 #endif
180 for (i = rid_block_start; i < rid_block_end; ++i)
181 ridblock_owner[i] = NULL;
183 d->arch.rid_bits = 0;
184 d->arch.starting_rid = 0;
185 d->arch.ending_rid = 0;
186 d->arch.starting_mp_rid = 0;
187 d->arch.ending_mp_rid = 0;
188 return 1;
189 }
191 static void
192 set_rr(unsigned long rr, unsigned long rrval)
193 {
194 ia64_set_rr(rr, vmMangleRID(rrval));
195 ia64_srlz_d();
196 }
198 // validates and changes a single region register
199 // in the currently executing domain
200 // Passing a value of -1 is a (successful) no-op
201 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
202 int set_one_rr(unsigned long rr, unsigned long val)
203 {
204 struct vcpu *v = current;
205 unsigned long rreg = REGION_NUMBER(rr);
206 ia64_rr rrv, newrrv, memrrv;
207 unsigned long newrid;
209 if (val == -1) return 1;
211 rrv.rrval = val;
212 newrrv.rrval = 0;
213 newrid = v->arch.starting_rid + rrv.rid;
215 if (newrid > v->arch.ending_rid) {
216 printk("can't set rr%d to %lx, starting_rid=%x,"
217 "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
218 v->arch.starting_rid,v->arch.ending_rid,val);
219 return 0;
220 }
222 #if 0
223 memrrv.rrval = rrv.rrval;
224 if (rreg == 7) {
225 newrrv.rid = newrid;
226 newrrv.ve = VHPT_ENABLED_REGION_7;
227 newrrv.ps = IA64_GRANULE_SHIFT;
228 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
229 v->arch.privregs);
230 }
231 else {
232 newrrv.rid = newrid;
233 // FIXME? region 6 needs to be uncached for EFI to work
234 if (rreg == 6) newrrv.ve = VHPT_ENABLED_REGION_7;
235 else newrrv.ve = VHPT_ENABLED_REGION_0_TO_6;
236 newrrv.ps = PAGE_SHIFT;
237 if (rreg == 0) v->arch.metaphysical_saved_rr0 = newrrv.rrval;
238 set_rr(rr,newrrv.rrval);
239 }
240 #else
241 memrrv.rrval = rrv.rrval;
242 newrrv.rid = newrid;
243 newrrv.ve = 1; // VHPT now enabled for region 7!!
244 newrrv.ps = PAGE_SHIFT;
246 if (rreg == 0) {
247 v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
248 if (!PSCB(v,metaphysical_mode))
249 set_rr(rr,newrrv.rrval);
250 } else if (rreg == 7) {
251 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->vcpu_info,
252 v->arch.privregs, __get_cpu_var(vhpt_paddr),
253 (unsigned long) pal_vaddr);
254 } else {
255 set_rr(rr,newrrv.rrval);
256 }
257 #endif
258 return 1;
259 }
261 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
262 int set_metaphysical_rr0(void)
263 {
264 struct vcpu *v = current;
265 // ia64_rr rrv;
267 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
268 ia64_set_rr(0,v->arch.metaphysical_rr0);
269 ia64_srlz_d();
270 return 1;
271 }
273 void init_all_rr(struct vcpu *v)
274 {
275 ia64_rr rrv;
277 rrv.rrval = 0;
278 //rrv.rrval = v->domain->arch.metaphysical_rr0;
279 rrv.ps = PAGE_SHIFT;
280 rrv.ve = 1;
281 if (!v->vcpu_info) { printf("Stopping in init_all_rr\n"); dummy(); }
282 VCPU(v,rrs[0]) = -1;
283 VCPU(v,rrs[1]) = rrv.rrval;
284 VCPU(v,rrs[2]) = rrv.rrval;
285 VCPU(v,rrs[3]) = rrv.rrval;
286 VCPU(v,rrs[4]) = rrv.rrval;
287 VCPU(v,rrs[5]) = rrv.rrval;
288 rrv.ve = 0;
289 VCPU(v,rrs[6]) = rrv.rrval;
290 // v->shared_info->arch.rrs[7] = rrv.rrval;
291 }
294 /* XEN/ia64 INTERNAL ROUTINES */
296 // loads a thread's region register (0-6) state into
297 // the real physical region registers. Returns the
298 // (possibly mangled) bits to store into rr7
299 // iff it is different than what is currently in physical
300 // rr7 (because we have to to assembly and physical mode
301 // to change rr7). If no change to rr7 is required, returns 0.
302 //
303 void load_region_regs(struct vcpu *v)
304 {
305 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
306 // TODO: These probably should be validated
307 unsigned long bad = 0;
309 if (VCPU(v,metaphysical_mode)) {
310 rr0 = v->domain->arch.metaphysical_rr0;
311 ia64_set_rr(0x0000000000000000L, rr0);
312 ia64_srlz_d();
313 }
314 else {
315 rr0 = VCPU(v,rrs[0]);
316 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
317 }
318 rr1 = VCPU(v,rrs[1]);
319 rr2 = VCPU(v,rrs[2]);
320 rr3 = VCPU(v,rrs[3]);
321 rr4 = VCPU(v,rrs[4]);
322 rr5 = VCPU(v,rrs[5]);
323 rr6 = VCPU(v,rrs[6]);
324 rr7 = VCPU(v,rrs[7]);
325 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
326 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
327 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
328 if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
329 if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
330 if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
331 if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
332 if (bad) {
333 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
334 }
335 }