ia64/xen-unstable

view xen/arch/ia64/xen/regionreg.c @ 18359:de736eefb6e1

[IA64] add one BUG_ON() to allocate_metaphysical_rr()

add one BUG_ON() to allocate_metaphysical_rr() to make sure
that rid for metaphysical address isn't collided.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Aug 25 19:04:37 2008 +0900 (2008-08-25)
parents e9706492e960
children 6607624285b2
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <linux/percpu.h>
15 #include <asm/page.h>
16 #include <asm/regionreg.h>
17 #include <asm/vhpt.h>
18 #include <asm/vcpu.h>
19 #include <asm/percpu.h>
20 #include <asm/pal.h>
22 /* Defined in xemasm.S */
23 extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
24 extern void ia64_new_rr7_efi(unsigned long rid, unsigned long repin_percpu,
25 unsigned long vpd);
27 /* RID virtualization mechanism is really simple: domains have less rid bits
28 than the host and the host rid space is shared among the domains. (Values
29 in parenthesis are usual default values).
31 The host rid space is partitionned into MAX_RID_BLOCKS (= 64)
32 blocks of 2**IA64_MIN_IMPL_RID_BITS (= 18) rids. The first block is also
33 partitionned into MAX_RID_BLOCKS small blocks. Small blocks are used for
34 metaphysical rids. Small block 0 can't be allocated and is reserved for
35 Xen own rids during boot.
37 Blocks and small blocks are allocated together and a domain may
38 have one or more consecutive blocks (and small blocks).
39 */
41 /* Minimum number of RID bits for a domain. The current value is 18, which is
42 the minimum defined by the itanium architecture, but it can be lowered
43 to increase the number of domain. */
44 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
45 /* Maximum number of RID bits. This is definitly 24. */
46 #define IA64_MAX_IMPL_RID_BITS 24
48 /* Maximum number of blocks. */
49 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
51 /* Default number of rid bits for domains. */
52 static unsigned int domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
53 integer_param("dom_rid_bits", domain_rid_bits_default);
55 DEFINE_PER_CPU(unsigned long, domain_shared_info);
56 DEFINE_PER_CPU(unsigned long, inserted_vhpt);
57 DEFINE_PER_CPU(unsigned long, inserted_shared_info);
58 DEFINE_PER_CPU(unsigned long, inserted_mapped_regs);
59 DEFINE_PER_CPU(unsigned long, inserted_vpd);
61 #if 0
62 // following already defined in include/asm-ia64/gcc_intrin.h
63 // it should probably be ifdef'd out from there to ensure all region
64 // register usage is encapsulated in this file
65 static inline unsigned long
66 ia64_get_rr (unsigned long rr)
67 {
68 unsigned long r;
69 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
70 return r;
71 }
73 static inline void
74 ia64_set_rr (unsigned long rr, unsigned long rrv)
75 {
76 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
77 }
78 #endif
80 static unsigned long allocate_metaphysical_rr(struct domain *d, int n)
81 {
82 ia64_rr rrv;
83 BUG_ON(d->arch.starting_mp_rid + n >= d->arch.ending_mp_rid);
85 rrv.rrval = 0; // Or else may see reserved bit fault
86 rrv.rid = d->arch.starting_mp_rid + n;
87 rrv.ps = PAGE_SHIFT; // only used at domain creation
88 rrv.ve = 0;
89 /* Mangle metaphysical rid */
90 rrv.rrval = vmMangleRID(rrv.rrval);
91 return rrv.rrval;
92 }
94 /*************************************
95 Region Block setup/management
96 *************************************/
98 static int implemented_rid_bits = 0;
99 static int mp_rid_shift;
100 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
102 void __init init_rid_allocator (void)
103 {
104 int log_blocks;
105 pal_vm_info_2_u_t vm_info_2;
107 /* Get machine rid_size. */
108 BUG_ON (ia64_pal_vm_summary (NULL, &vm_info_2) != 0);
109 implemented_rid_bits = vm_info_2.pal_vm_info_2_s.rid_size;
111 /* We need at least a few space... */
112 BUG_ON (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS);
114 /* And we can accept too much space. */
115 if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
116 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
118 /* Due to RID mangling, we expect 24 RID bits!
119 This test should be removed if RID mangling is removed/modified. */
120 if (implemented_rid_bits != 24) {
121 printk ("RID mangling expected 24 RID bits, got only %d!\n",
122 implemented_rid_bits);
123 BUG();
124 }
126 /* Allow the creation of at least domain 0. */
127 if (domain_rid_bits_default > implemented_rid_bits - 1)
128 domain_rid_bits_default = implemented_rid_bits - 1;
130 /* Check for too small values. */
131 if (domain_rid_bits_default < IA64_MIN_IMPL_RID_BITS) {
132 printk ("Default domain rid bits %d is too small, use %d\n",
133 domain_rid_bits_default, IA64_MIN_IMPL_RID_BITS);
134 domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
135 }
137 log_blocks = (implemented_rid_bits - IA64_MIN_IMPL_RID_BITS);
139 printk ("Maximum number of domains: %d; %d RID bits per domain\n",
140 (1 << (implemented_rid_bits - domain_rid_bits_default)) - 1,
141 domain_rid_bits_default);
143 mp_rid_shift = IA64_MIN_IMPL_RID_BITS - log_blocks;
144 BUG_ON (mp_rid_shift < 3);
145 }
148 /*
149 * Allocate a power-of-two-sized chunk of region id space -- one or more
150 * "rid blocks"
151 */
152 int allocate_rid_range(struct domain *d, unsigned long ridbits)
153 {
154 int i, j, n_rid_blocks;
156 if (ridbits == 0)
157 ridbits = domain_rid_bits_default;
159 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
160 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
162 if (ridbits < IA64_MIN_IMPL_RID_BITS)
163 ridbits = IA64_MIN_IMPL_RID_BITS;
165 // convert to rid_blocks and find one
166 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
168 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
169 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
170 if (ridblock_owner[i] == NULL) {
171 for (j = i; j < i + n_rid_blocks; ++j) {
172 if (ridblock_owner[j]) {
173 ++j;
174 break;
175 }
176 }
177 --j;
178 if (ridblock_owner[j] == NULL)
179 break;
180 }
181 }
183 if (i >= MAX_RID_BLOCKS)
184 return 0;
186 // found an unused block:
187 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
188 // mark this block as owned
189 for (j = i; j < i + n_rid_blocks; ++j)
190 ridblock_owner[j] = d;
192 // setup domain struct
193 d->arch.rid_bits = ridbits;
194 d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS;
195 d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
197 d->arch.starting_mp_rid = i << mp_rid_shift;
198 d->arch.ending_mp_rid = (i + 1) << mp_rid_shift;
200 d->arch.metaphysical_rid_dt = allocate_metaphysical_rr(d, 0);
201 d->arch.metaphysical_rid_d = allocate_metaphysical_rr(d, 1);
203 dprintk(XENLOG_DEBUG, "### domain %p: rid=%x-%x mp_rid=%x\n",
204 d, d->arch.starting_rid, d->arch.ending_rid,
205 d->arch.starting_mp_rid);
207 return 1;
208 }
211 int deallocate_rid_range(struct domain *d)
212 {
213 int i;
214 int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
215 int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
217 /* Sanity check. */
218 if (d->arch.rid_bits == 0)
219 return 1;
222 for (i = rid_block_start; i < rid_block_end; ++i) {
223 ASSERT(ridblock_owner[i] == d);
224 ridblock_owner[i] = NULL;
225 }
227 d->arch.rid_bits = 0;
228 d->arch.starting_rid = 0;
229 d->arch.ending_rid = 0;
230 d->arch.starting_mp_rid = 0;
231 d->arch.ending_mp_rid = 0;
232 return 1;
233 }
235 static void
236 set_rr(unsigned long rr, unsigned long rrval)
237 {
238 ia64_set_rr(rr, vmMangleRID(rrval));
239 ia64_srlz_d();
240 }
242 // validates and changes a single region register
243 // in the currently executing domain
244 // Passing a value of -1 is a (successful) no-op
245 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
246 int set_one_rr(unsigned long rr, unsigned long val)
247 {
248 struct vcpu *v = current;
249 unsigned long rreg = REGION_NUMBER(rr);
250 ia64_rr rrv, newrrv, memrrv;
251 unsigned long newrid;
253 rrv.rrval = val;
254 newrrv.rrval = 0;
255 newrid = v->arch.starting_rid + rrv.rid;
257 // avoid reserved register/field fault
258 if (unlikely(is_reserved_rr_field(v, val))) {
259 printk("can't set rr%d to %lx, starting_rid=%x,"
260 "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
261 v->arch.starting_rid,v->arch.ending_rid,val);
262 return 0;
263 }
265 memrrv.rrval = rrv.rrval;
266 newrrv.rid = newrid;
267 newrrv.ve = 1; // VHPT now enabled for region 7!!
268 newrrv.ps = v->arch.vhpt_pg_shift;
270 if (rreg == 0) {
271 v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
272 if (!PSCB(v,metaphysical_mode))
273 set_rr(rr,newrrv.rrval);
274 } else if (rreg == 7) {
275 __get_cpu_var(domain_shared_info) =
276 (unsigned long)v->domain->shared_info;
277 #if VHPT_ENABLED
278 __get_cpu_var(inserted_vhpt) = __va_ul(vcpu_vhpt_maddr(v));
279 #endif
280 __get_cpu_var(inserted_shared_info) =
281 v->domain->arch.shared_info_va;
282 __get_cpu_var(inserted_mapped_regs) =
283 v->domain->arch.shared_info_va +
284 XMAPPEDREGS_OFS;
285 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
286 v->arch.privregs, v->domain->arch.shared_info_va,
287 __va_ul(vcpu_vhpt_maddr(v)));
288 } else {
289 set_rr(rr,newrrv.rrval);
290 }
291 return 1;
292 }
294 int set_one_rr_efi(unsigned long rr, unsigned long val)
295 {
296 unsigned long rreg = REGION_NUMBER(rr);
297 unsigned long vpd = 0UL;
299 BUG_ON(rreg != 6 && rreg != 7);
301 if (rreg == 6) {
302 ia64_set_rr(rr, val);
303 ia64_srlz_d();
304 }
305 else {
306 if (current && VMX_DOMAIN(current))
307 vpd = __get_cpu_var(inserted_vpd);
308 ia64_new_rr7_efi(val, cpu_isset(smp_processor_id(),
309 percpu_set), vpd);
310 }
312 return 1;
313 }
315 void set_virtual_rr0(void)
316 {
317 struct vcpu *v = current;
319 ia64_set_rr(0, v->arch.metaphysical_saved_rr0);
320 ia64_srlz_d();
321 }
323 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
324 void set_metaphysical_rr0(void)
325 {
326 struct vcpu *v = current;
327 // ia64_rr rrv;
329 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
330 ia64_set_rr(0, v->arch.metaphysical_rid_dt);
331 ia64_srlz_d();
332 }
334 void init_all_rr(struct vcpu *v)
335 {
336 ia64_rr rrv;
338 rrv.rrval = 0;
339 rrv.ps = v->arch.vhpt_pg_shift;
340 rrv.ve = 1;
341 if (!v->vcpu_info)
342 panic("Stopping in init_all_rr\n");
343 VCPU(v,rrs[0]) = rrv.rrval;
344 VCPU(v,rrs[1]) = rrv.rrval;
345 VCPU(v,rrs[2]) = rrv.rrval;
346 VCPU(v,rrs[3]) = rrv.rrval;
347 VCPU(v,rrs[4]) = rrv.rrval;
348 VCPU(v,rrs[5]) = rrv.rrval;
349 rrv.ve = 0;
350 VCPU(v,rrs[6]) = rrv.rrval;
351 VCPU(v,rrs[7]) = rrv.rrval;
352 }
355 /* XEN/ia64 INTERNAL ROUTINES */
357 // loads a thread's region register (0-6) state into
358 // the real physical region registers. Returns the
359 // (possibly mangled) bits to store into rr7
360 // iff it is different than what is currently in physical
361 // rr7 (because we have to to assembly and physical mode
362 // to change rr7). If no change to rr7 is required, returns 0.
363 //
364 void load_region_regs(struct vcpu *v)
365 {
366 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
367 // TODO: These probably should be validated
368 unsigned long bad = 0;
370 if (VCPU(v,metaphysical_mode)) {
371 rr0 = v->domain->arch.metaphysical_rid_dt;
372 ia64_set_rr(0x0000000000000000L, rr0);
373 ia64_srlz_d();
374 }
375 else {
376 rr0 = VCPU(v,rrs[0]);
377 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
378 }
379 rr1 = VCPU(v,rrs[1]);
380 rr2 = VCPU(v,rrs[2]);
381 rr3 = VCPU(v,rrs[3]);
382 rr4 = VCPU(v,rrs[4]);
383 rr5 = VCPU(v,rrs[5]);
384 rr6 = VCPU(v,rrs[6]);
385 rr7 = VCPU(v,rrs[7]);
386 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
387 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
388 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
389 if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
390 if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
391 if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
392 if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
393 if (bad) {
394 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
395 }
396 }