ia64/xen-unstable

view xen/arch/ia64/xen/regionreg.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 166bf3b04495
children 0b72d16e794b
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <asm/page.h>
15 #include <asm/regionreg.h>
16 #include <asm/vhpt.h>
17 #include <asm/vcpu.h>
19 /* Defined in xemasm.S */
20 extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
22 /* RID virtualization mechanism is really simple: domains have less rid bits
23 than the host and the host rid space is shared among the domains. (Values
24 in parenthesis are usual default values).
26 The host rid space is partitionned into MAX_RID_BLOCKS (= 64)
27 blocks of 2**IA64_MIN_IMPL_RID_BITS (= 18) rids. The first block is also
28 partitionned into MAX_RID_BLOCKS small blocks. Small blocks are used for
29 metaphysical rids. Small block 0 can't be allocated and is reserved for
30 Xen own rids during boot.
32 Blocks and small blocks are allocated together and a domain may
33 have one or more consecutive blocks (and small blocks).
34 */
36 /* Minimum number of RID bits for a domain. The current value is 18, which is
37 the minimum defined by the itanium architecture, but it can be lowered
38 to increase the number of domain. */
39 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
40 /* Maximum number of RID bits. This is definitly 24. */
41 #define IA64_MAX_IMPL_RID_BITS 24
43 /* Maximum number of blocks. */
44 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
46 /* Default number of rid bits for domains. */
47 static unsigned int domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
48 integer_param("dom_rid_bits", domain_rid_bits_default);
50 #if 0
51 // following already defined in include/asm-ia64/gcc_intrin.h
52 // it should probably be ifdef'd out from there to ensure all region
53 // register usage is encapsulated in this file
54 static inline unsigned long
55 ia64_get_rr (unsigned long rr)
56 {
57 unsigned long r;
58 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
59 return r;
60 }
62 static inline void
63 ia64_set_rr (unsigned long rr, unsigned long rrv)
64 {
65 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
66 }
67 #endif
69 static unsigned long allocate_metaphysical_rr(struct domain *d, int n)
70 {
71 ia64_rr rrv;
73 rrv.rrval = 0; // Or else may see reserved bit fault
74 rrv.rid = d->arch.starting_mp_rid + n;
75 rrv.ps = PAGE_SHIFT; // only used at domain creation
76 rrv.ve = 0;
77 /* Mangle metaphysical rid */
78 rrv.rrval = vmMangleRID(rrv.rrval);
79 return rrv.rrval;
80 }
82 /*************************************
83 Region Block setup/management
84 *************************************/
86 static int implemented_rid_bits = 0;
87 static int mp_rid_shift;
88 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
90 void __init init_rid_allocator (void)
91 {
92 int log_blocks;
93 pal_vm_info_2_u_t vm_info_2;
95 /* Get machine rid_size. */
96 BUG_ON (ia64_pal_vm_summary (NULL, &vm_info_2) != 0);
97 implemented_rid_bits = vm_info_2.pal_vm_info_2_s.rid_size;
99 /* We need at least a few space... */
100 BUG_ON (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS);
102 /* And we can accept too much space. */
103 if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
104 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
106 /* Due to RID mangling, we expect 24 RID bits!
107 This test should be removed if RID mangling is removed/modified. */
108 if (implemented_rid_bits != 24) {
109 printk ("RID mangling expected 24 RID bits, got only %d!\n",
110 implemented_rid_bits);
111 BUG();
112 }
114 /* Allow the creation of at least domain 0. */
115 if (domain_rid_bits_default > implemented_rid_bits - 1)
116 domain_rid_bits_default = implemented_rid_bits - 1;
118 /* Check for too small values. */
119 if (domain_rid_bits_default < IA64_MIN_IMPL_RID_BITS) {
120 printk ("Default domain rid bits %d is too small, use %d\n",
121 domain_rid_bits_default, IA64_MIN_IMPL_RID_BITS);
122 domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
123 }
125 log_blocks = (implemented_rid_bits - IA64_MIN_IMPL_RID_BITS);
127 printk ("Maximum number of domains: %d; %d RID bits per domain\n",
128 (1 << (implemented_rid_bits - domain_rid_bits_default)) - 1,
129 domain_rid_bits_default);
131 mp_rid_shift = IA64_MIN_IMPL_RID_BITS - log_blocks;
132 BUG_ON (mp_rid_shift < 3);
133 }
136 /*
137 * Allocate a power-of-two-sized chunk of region id space -- one or more
138 * "rid blocks"
139 */
140 int allocate_rid_range(struct domain *d, unsigned long ridbits)
141 {
142 int i, j, n_rid_blocks;
144 if (ridbits == 0)
145 ridbits = domain_rid_bits_default;
147 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
148 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
150 if (ridbits < IA64_MIN_IMPL_RID_BITS)
151 ridbits = IA64_MIN_IMPL_RID_BITS;
153 // convert to rid_blocks and find one
154 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
156 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
157 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
158 if (ridblock_owner[i] == NULL) {
159 for (j = i; j < i + n_rid_blocks; ++j) {
160 if (ridblock_owner[j]) {
161 ++j;
162 break;
163 }
164 }
165 --j;
166 if (ridblock_owner[j] == NULL)
167 break;
168 }
169 }
171 if (i >= MAX_RID_BLOCKS)
172 return 0;
174 // found an unused block:
175 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
176 // mark this block as owned
177 for (j = i; j < i + n_rid_blocks; ++j)
178 ridblock_owner[j] = d;
180 // setup domain struct
181 d->arch.rid_bits = ridbits;
182 d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS;
183 d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
185 d->arch.starting_mp_rid = i << mp_rid_shift;
186 d->arch.ending_mp_rid = (i + 1) << mp_rid_shift;
188 d->arch.metaphysical_rid_dt = allocate_metaphysical_rr(d, 0);
189 d->arch.metaphysical_rid_d = allocate_metaphysical_rr(d, 1);
191 dprintk(XENLOG_DEBUG, "### domain %p: rid=%x-%x mp_rid=%x\n",
192 d, d->arch.starting_rid, d->arch.ending_rid,
193 d->arch.starting_mp_rid);
195 return 1;
196 }
199 int deallocate_rid_range(struct domain *d)
200 {
201 int i;
202 int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
203 int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
205 /* Sanity check. */
206 if (d->arch.rid_bits == 0)
207 return 1;
210 for (i = rid_block_start; i < rid_block_end; ++i) {
211 ASSERT(ridblock_owner[i] == d);
212 ridblock_owner[i] = NULL;
213 }
215 d->arch.rid_bits = 0;
216 d->arch.starting_rid = 0;
217 d->arch.ending_rid = 0;
218 d->arch.starting_mp_rid = 0;
219 d->arch.ending_mp_rid = 0;
220 return 1;
221 }
223 static void
224 set_rr(unsigned long rr, unsigned long rrval)
225 {
226 ia64_set_rr(rr, vmMangleRID(rrval));
227 ia64_srlz_d();
228 }
230 // validates and changes a single region register
231 // in the currently executing domain
232 // Passing a value of -1 is a (successful) no-op
233 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
234 int set_one_rr(unsigned long rr, unsigned long val)
235 {
236 struct vcpu *v = current;
237 unsigned long rreg = REGION_NUMBER(rr);
238 ia64_rr rrv, newrrv, memrrv;
239 unsigned long newrid;
241 rrv.rrval = val;
242 newrrv.rrval = 0;
243 newrid = v->arch.starting_rid + rrv.rid;
245 // avoid reserved register/field fault
246 if (unlikely(is_reserved_rr_field(v, val))) {
247 printk("can't set rr%d to %lx, starting_rid=%x,"
248 "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
249 v->arch.starting_rid,v->arch.ending_rid,val);
250 return 0;
251 }
253 memrrv.rrval = rrv.rrval;
254 newrrv.rid = newrid;
255 newrrv.ve = 1; // VHPT now enabled for region 7!!
256 newrrv.ps = v->arch.vhpt_pg_shift;
258 if (rreg == 0) {
259 v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
260 if (!PSCB(v,metaphysical_mode))
261 set_rr(rr,newrrv.rrval);
262 } else if (rreg == 7) {
263 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
264 v->arch.privregs, v->domain->arch.shared_info_va,
265 __va_ul(vcpu_vhpt_maddr(v)));
266 } else {
267 set_rr(rr,newrrv.rrval);
268 }
269 return 1;
270 }
272 void set_virtual_rr0(void)
273 {
274 struct vcpu *v = current;
276 ia64_set_rr(0, v->arch.metaphysical_saved_rr0);
277 ia64_srlz_d();
278 }
280 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
281 void set_metaphysical_rr0(void)
282 {
283 struct vcpu *v = current;
284 // ia64_rr rrv;
286 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
287 ia64_set_rr(0, v->arch.metaphysical_rid_dt);
288 ia64_srlz_d();
289 }
291 void init_all_rr(struct vcpu *v)
292 {
293 ia64_rr rrv;
295 rrv.rrval = 0;
296 rrv.ps = v->arch.vhpt_pg_shift;
297 rrv.ve = 1;
298 if (!v->vcpu_info)
299 panic("Stopping in init_all_rr\n");
300 VCPU(v,rrs[0]) = rrv.rrval;
301 VCPU(v,rrs[1]) = rrv.rrval;
302 VCPU(v,rrs[2]) = rrv.rrval;
303 VCPU(v,rrs[3]) = rrv.rrval;
304 VCPU(v,rrs[4]) = rrv.rrval;
305 VCPU(v,rrs[5]) = rrv.rrval;
306 rrv.ve = 0;
307 VCPU(v,rrs[6]) = rrv.rrval;
308 VCPU(v,rrs[7]) = rrv.rrval;
309 }
312 /* XEN/ia64 INTERNAL ROUTINES */
314 // loads a thread's region register (0-6) state into
315 // the real physical region registers. Returns the
316 // (possibly mangled) bits to store into rr7
317 // iff it is different than what is currently in physical
318 // rr7 (because we have to to assembly and physical mode
319 // to change rr7). If no change to rr7 is required, returns 0.
320 //
321 void load_region_regs(struct vcpu *v)
322 {
323 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
324 // TODO: These probably should be validated
325 unsigned long bad = 0;
327 if (VCPU(v,metaphysical_mode)) {
328 rr0 = v->domain->arch.metaphysical_rid_dt;
329 ia64_set_rr(0x0000000000000000L, rr0);
330 ia64_srlz_d();
331 }
332 else {
333 rr0 = VCPU(v,rrs[0]);
334 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
335 }
336 rr1 = VCPU(v,rrs[1]);
337 rr2 = VCPU(v,rrs[2]);
338 rr3 = VCPU(v,rrs[3]);
339 rr4 = VCPU(v,rrs[4]);
340 rr5 = VCPU(v,rrs[5]);
341 rr6 = VCPU(v,rrs[6]);
342 rr7 = VCPU(v,rrs[7]);
343 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
344 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
345 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
346 if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
347 if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
348 if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
349 if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
350 if (bad) {
351 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
352 }
353 }