ia64/xen-unstable

view xen/arch/ia64/xen/regionreg.c @ 18085:4f0428e4dd15

[IA64] kexec: Unpin shared_info, mapped_regs and VPD TR in ia64_do_tlb_purge

Unpinning shared_info, mapped_regs and VPD seems to be missing
from ia64_do_tlb_purge and seems to be needed for kexec.

Like VHPT, the pinned value is recored in a percpu variable
so that the correct value can be unpinned.

Cc: Isaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: Simon Horman <horms@verge.net.au>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Jul 22 12:15:02 2008 +0900 (2008-07-22)
parents 0b72d16e794b
children 246a179ebb6d
line source
1 /*
2 * Region register and region id management
3 *
4 * Copyright (C) 2001-2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com
6 * Bret Mckee (bret.mckee@hp.com)
7 *
8 */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/sched.h>
14 #include <asm/page.h>
15 #include <asm/regionreg.h>
16 #include <asm/vhpt.h>
17 #include <asm/vcpu.h>
18 #include <asm/percpu.h>
20 /* Defined in xemasm.S */
21 extern void ia64_new_rr7(unsigned long rid, void *shared_info, void *shared_arch_info, unsigned long shared_info_va, unsigned long va_vhpt);
23 /* RID virtualization mechanism is really simple: domains have less rid bits
24 than the host and the host rid space is shared among the domains. (Values
25 in parenthesis are usual default values).
27 The host rid space is partitionned into MAX_RID_BLOCKS (= 64)
28 blocks of 2**IA64_MIN_IMPL_RID_BITS (= 18) rids. The first block is also
29 partitionned into MAX_RID_BLOCKS small blocks. Small blocks are used for
30 metaphysical rids. Small block 0 can't be allocated and is reserved for
31 Xen own rids during boot.
33 Blocks and small blocks are allocated together and a domain may
34 have one or more consecutive blocks (and small blocks).
35 */
37 /* Minimum number of RID bits for a domain. The current value is 18, which is
38 the minimum defined by the itanium architecture, but it can be lowered
39 to increase the number of domain. */
40 #define IA64_MIN_IMPL_RID_BITS (IA64_MIN_IMPL_RID_MSB+1)
41 /* Maximum number of RID bits. This is definitly 24. */
42 #define IA64_MAX_IMPL_RID_BITS 24
44 /* Maximum number of blocks. */
45 #define MAX_RID_BLOCKS (1 << (IA64_MAX_IMPL_RID_BITS-IA64_MIN_IMPL_RID_BITS))
47 /* Default number of rid bits for domains. */
48 static unsigned int domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
49 integer_param("dom_rid_bits", domain_rid_bits_default);
51 DEFINE_PER_CPU(unsigned long, inserted_vhpt);
52 DEFINE_PER_CPU(unsigned long, inserted_shared_info);
53 DEFINE_PER_CPU(unsigned long, inserted_mapped_regs);
54 DEFINE_PER_CPU(unsigned long, inserted_vpd);
56 #if 0
57 // following already defined in include/asm-ia64/gcc_intrin.h
58 // it should probably be ifdef'd out from there to ensure all region
59 // register usage is encapsulated in this file
60 static inline unsigned long
61 ia64_get_rr (unsigned long rr)
62 {
63 unsigned long r;
64 __asm__ __volatile__ (";;mov %0=rr[%1];;":"=r"(r):"r"(rr):"memory");
65 return r;
66 }
68 static inline void
69 ia64_set_rr (unsigned long rr, unsigned long rrv)
70 {
71 __asm__ __volatile__ (";;mov rr[%0]=%1;;"::"r"(rr),"r"(rrv):"memory");
72 }
73 #endif
75 static unsigned long allocate_metaphysical_rr(struct domain *d, int n)
76 {
77 ia64_rr rrv;
79 rrv.rrval = 0; // Or else may see reserved bit fault
80 rrv.rid = d->arch.starting_mp_rid + n;
81 rrv.ps = PAGE_SHIFT; // only used at domain creation
82 rrv.ve = 0;
83 /* Mangle metaphysical rid */
84 rrv.rrval = vmMangleRID(rrv.rrval);
85 return rrv.rrval;
86 }
88 /*************************************
89 Region Block setup/management
90 *************************************/
92 static int implemented_rid_bits = 0;
93 static int mp_rid_shift;
94 static struct domain *ridblock_owner[MAX_RID_BLOCKS] = { 0 };
96 void __init init_rid_allocator (void)
97 {
98 int log_blocks;
99 pal_vm_info_2_u_t vm_info_2;
101 /* Get machine rid_size. */
102 BUG_ON (ia64_pal_vm_summary (NULL, &vm_info_2) != 0);
103 implemented_rid_bits = vm_info_2.pal_vm_info_2_s.rid_size;
105 /* We need at least a few space... */
106 BUG_ON (implemented_rid_bits <= IA64_MIN_IMPL_RID_BITS);
108 /* And we can accept too much space. */
109 if (implemented_rid_bits > IA64_MAX_IMPL_RID_BITS)
110 implemented_rid_bits = IA64_MAX_IMPL_RID_BITS;
112 /* Due to RID mangling, we expect 24 RID bits!
113 This test should be removed if RID mangling is removed/modified. */
114 if (implemented_rid_bits != 24) {
115 printk ("RID mangling expected 24 RID bits, got only %d!\n",
116 implemented_rid_bits);
117 BUG();
118 }
120 /* Allow the creation of at least domain 0. */
121 if (domain_rid_bits_default > implemented_rid_bits - 1)
122 domain_rid_bits_default = implemented_rid_bits - 1;
124 /* Check for too small values. */
125 if (domain_rid_bits_default < IA64_MIN_IMPL_RID_BITS) {
126 printk ("Default domain rid bits %d is too small, use %d\n",
127 domain_rid_bits_default, IA64_MIN_IMPL_RID_BITS);
128 domain_rid_bits_default = IA64_MIN_IMPL_RID_BITS;
129 }
131 log_blocks = (implemented_rid_bits - IA64_MIN_IMPL_RID_BITS);
133 printk ("Maximum number of domains: %d; %d RID bits per domain\n",
134 (1 << (implemented_rid_bits - domain_rid_bits_default)) - 1,
135 domain_rid_bits_default);
137 mp_rid_shift = IA64_MIN_IMPL_RID_BITS - log_blocks;
138 BUG_ON (mp_rid_shift < 3);
139 }
142 /*
143 * Allocate a power-of-two-sized chunk of region id space -- one or more
144 * "rid blocks"
145 */
146 int allocate_rid_range(struct domain *d, unsigned long ridbits)
147 {
148 int i, j, n_rid_blocks;
150 if (ridbits == 0)
151 ridbits = domain_rid_bits_default;
153 if (ridbits >= IA64_MAX_IMPL_RID_BITS)
154 ridbits = IA64_MAX_IMPL_RID_BITS - 1;
156 if (ridbits < IA64_MIN_IMPL_RID_BITS)
157 ridbits = IA64_MIN_IMPL_RID_BITS;
159 // convert to rid_blocks and find one
160 n_rid_blocks = 1UL << (ridbits - IA64_MIN_IMPL_RID_BITS);
162 // skip over block 0, reserved for "meta-physical mappings (and Xen)"
163 for (i = n_rid_blocks; i < MAX_RID_BLOCKS; i += n_rid_blocks) {
164 if (ridblock_owner[i] == NULL) {
165 for (j = i; j < i + n_rid_blocks; ++j) {
166 if (ridblock_owner[j]) {
167 ++j;
168 break;
169 }
170 }
171 --j;
172 if (ridblock_owner[j] == NULL)
173 break;
174 }
175 }
177 if (i >= MAX_RID_BLOCKS)
178 return 0;
180 // found an unused block:
181 // (i << min_rid_bits) <= rid < ((i + n) << min_rid_bits)
182 // mark this block as owned
183 for (j = i; j < i + n_rid_blocks; ++j)
184 ridblock_owner[j] = d;
186 // setup domain struct
187 d->arch.rid_bits = ridbits;
188 d->arch.starting_rid = i << IA64_MIN_IMPL_RID_BITS;
189 d->arch.ending_rid = (i+n_rid_blocks) << IA64_MIN_IMPL_RID_BITS;
191 d->arch.starting_mp_rid = i << mp_rid_shift;
192 d->arch.ending_mp_rid = (i + 1) << mp_rid_shift;
194 d->arch.metaphysical_rid_dt = allocate_metaphysical_rr(d, 0);
195 d->arch.metaphysical_rid_d = allocate_metaphysical_rr(d, 1);
197 dprintk(XENLOG_DEBUG, "### domain %p: rid=%x-%x mp_rid=%x\n",
198 d, d->arch.starting_rid, d->arch.ending_rid,
199 d->arch.starting_mp_rid);
201 return 1;
202 }
205 int deallocate_rid_range(struct domain *d)
206 {
207 int i;
208 int rid_block_end = d->arch.ending_rid >> IA64_MIN_IMPL_RID_BITS;
209 int rid_block_start = d->arch.starting_rid >> IA64_MIN_IMPL_RID_BITS;
211 /* Sanity check. */
212 if (d->arch.rid_bits == 0)
213 return 1;
216 for (i = rid_block_start; i < rid_block_end; ++i) {
217 ASSERT(ridblock_owner[i] == d);
218 ridblock_owner[i] = NULL;
219 }
221 d->arch.rid_bits = 0;
222 d->arch.starting_rid = 0;
223 d->arch.ending_rid = 0;
224 d->arch.starting_mp_rid = 0;
225 d->arch.ending_mp_rid = 0;
226 return 1;
227 }
229 static void
230 set_rr(unsigned long rr, unsigned long rrval)
231 {
232 ia64_set_rr(rr, vmMangleRID(rrval));
233 ia64_srlz_d();
234 }
236 // validates and changes a single region register
237 // in the currently executing domain
238 // Passing a value of -1 is a (successful) no-op
239 // NOTE: DOES NOT SET VCPU's rrs[x] value!!
240 int set_one_rr(unsigned long rr, unsigned long val)
241 {
242 struct vcpu *v = current;
243 unsigned long rreg = REGION_NUMBER(rr);
244 ia64_rr rrv, newrrv, memrrv;
245 unsigned long newrid;
247 rrv.rrval = val;
248 newrrv.rrval = 0;
249 newrid = v->arch.starting_rid + rrv.rid;
251 // avoid reserved register/field fault
252 if (unlikely(is_reserved_rr_field(v, val))) {
253 printk("can't set rr%d to %lx, starting_rid=%x,"
254 "ending_rid=%x, val=%lx\n", (int) rreg, newrid,
255 v->arch.starting_rid,v->arch.ending_rid,val);
256 return 0;
257 }
259 memrrv.rrval = rrv.rrval;
260 newrrv.rid = newrid;
261 newrrv.ve = 1; // VHPT now enabled for region 7!!
262 newrrv.ps = v->arch.vhpt_pg_shift;
264 if (rreg == 0) {
265 v->arch.metaphysical_saved_rr0 = vmMangleRID(newrrv.rrval);
266 if (!PSCB(v,metaphysical_mode))
267 set_rr(rr,newrrv.rrval);
268 } else if (rreg == 7) {
269 #if VHPT_ENABLED
270 __get_cpu_var(inserted_vhpt) = __va_ul(vcpu_vhpt_maddr(v));
271 #endif
272 __get_cpu_var(inserted_shared_info) =
273 v->domain->arch.shared_info_va;
274 __get_cpu_var(inserted_mapped_regs) =
275 v->domain->arch.shared_info_va +
276 XMAPPEDREGS_OFS;
277 ia64_new_rr7(vmMangleRID(newrrv.rrval),v->domain->shared_info,
278 v->arch.privregs, v->domain->arch.shared_info_va,
279 __va_ul(vcpu_vhpt_maddr(v)));
280 } else {
281 set_rr(rr,newrrv.rrval);
282 }
283 return 1;
284 }
286 void set_virtual_rr0(void)
287 {
288 struct vcpu *v = current;
290 ia64_set_rr(0, v->arch.metaphysical_saved_rr0);
291 ia64_srlz_d();
292 }
294 // set rr0 to the passed rid (for metaphysical mode so don't use domain offset
295 void set_metaphysical_rr0(void)
296 {
297 struct vcpu *v = current;
298 // ia64_rr rrv;
300 // rrv.ve = 1; FIXME: TURN ME BACK ON WHEN VHPT IS WORKING
301 ia64_set_rr(0, v->arch.metaphysical_rid_dt);
302 ia64_srlz_d();
303 }
305 void init_all_rr(struct vcpu *v)
306 {
307 ia64_rr rrv;
309 rrv.rrval = 0;
310 rrv.ps = v->arch.vhpt_pg_shift;
311 rrv.ve = 1;
312 if (!v->vcpu_info)
313 panic("Stopping in init_all_rr\n");
314 VCPU(v,rrs[0]) = rrv.rrval;
315 VCPU(v,rrs[1]) = rrv.rrval;
316 VCPU(v,rrs[2]) = rrv.rrval;
317 VCPU(v,rrs[3]) = rrv.rrval;
318 VCPU(v,rrs[4]) = rrv.rrval;
319 VCPU(v,rrs[5]) = rrv.rrval;
320 rrv.ve = 0;
321 VCPU(v,rrs[6]) = rrv.rrval;
322 VCPU(v,rrs[7]) = rrv.rrval;
323 }
326 /* XEN/ia64 INTERNAL ROUTINES */
328 // loads a thread's region register (0-6) state into
329 // the real physical region registers. Returns the
330 // (possibly mangled) bits to store into rr7
331 // iff it is different than what is currently in physical
332 // rr7 (because we have to to assembly and physical mode
333 // to change rr7). If no change to rr7 is required, returns 0.
334 //
335 void load_region_regs(struct vcpu *v)
336 {
337 unsigned long rr0, rr1,rr2, rr3, rr4, rr5, rr6, rr7;
338 // TODO: These probably should be validated
339 unsigned long bad = 0;
341 if (VCPU(v,metaphysical_mode)) {
342 rr0 = v->domain->arch.metaphysical_rid_dt;
343 ia64_set_rr(0x0000000000000000L, rr0);
344 ia64_srlz_d();
345 }
346 else {
347 rr0 = VCPU(v,rrs[0]);
348 if (!set_one_rr(0x0000000000000000L, rr0)) bad |= 1;
349 }
350 rr1 = VCPU(v,rrs[1]);
351 rr2 = VCPU(v,rrs[2]);
352 rr3 = VCPU(v,rrs[3]);
353 rr4 = VCPU(v,rrs[4]);
354 rr5 = VCPU(v,rrs[5]);
355 rr6 = VCPU(v,rrs[6]);
356 rr7 = VCPU(v,rrs[7]);
357 if (!set_one_rr(0x2000000000000000L, rr1)) bad |= 2;
358 if (!set_one_rr(0x4000000000000000L, rr2)) bad |= 4;
359 if (!set_one_rr(0x6000000000000000L, rr3)) bad |= 8;
360 if (!set_one_rr(0x8000000000000000L, rr4)) bad |= 0x10;
361 if (!set_one_rr(0xa000000000000000L, rr5)) bad |= 0x20;
362 if (!set_one_rr(0xc000000000000000L, rr6)) bad |= 0x40;
363 if (!set_one_rr(0xe000000000000000L, rr7)) bad |= 0x80;
364 if (bad) {
365 panic_domain(0,"load_region_regs: can't set! bad=%lx\n",bad);
366 }
367 }