ia64/xen-unstable

view xen/arch/x86/x86_64/mm.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 67a0ffade665
children
line source
1 /******************************************************************************
2 * arch/x86/x86_64/mm.c
3 *
4 * Modifications to Linux original are copyright (c) 2004, K A Fraser tr This
5 * program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
20 #include <xen/config.h>
21 #include <xen/lib.h>
22 #include <xen/init.h>
23 #include <xen/mm.h>
24 #include <xen/sched.h>
25 #include <xen/guest_access.h>
26 #include <asm/current.h>
27 #include <asm/asm_defns.h>
28 #include <asm/page.h>
29 #include <asm/flushtlb.h>
30 #include <asm/fixmap.h>
31 #include <asm/hypercall.h>
32 #include <asm/msr.h>
33 #include <asm/numa.h>
34 #include <public/memory.h>
36 #ifdef CONFIG_COMPAT
37 unsigned int m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
38 #endif
40 DEFINE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
42 /* Top-level master (and idle-domain) page directory. */
43 l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
44 idle_pg_table[L4_PAGETABLE_ENTRIES];
46 /* Enough page directories to map bottom 4GB of the memory map. */
47 l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
48 l3_identmap[L3_PAGETABLE_ENTRIES];
49 l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
50 l2_identmap[4*L2_PAGETABLE_ENTRIES];
52 /* Enough page directories to map the Xen text and static data. */
53 l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
54 l3_xenmap[L3_PAGETABLE_ENTRIES];
55 l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
56 l2_xenmap[L2_PAGETABLE_ENTRIES];
58 void *alloc_xen_pagetable(void)
59 {
60 extern int early_boot;
61 unsigned long mfn;
63 if ( !early_boot )
64 {
65 struct page_info *pg = alloc_domheap_page(NULL, 0);
66 BUG_ON(pg == NULL);
67 return page_to_virt(pg);
68 }
70 mfn = alloc_boot_pages(1, 1);
71 BUG_ON(mfn == 0);
72 return mfn_to_virt(mfn);
73 }
75 l3_pgentry_t *virt_to_xen_l3e(unsigned long v)
76 {
77 l4_pgentry_t *pl4e;
79 pl4e = &idle_pg_table[l4_table_offset(v)];
80 if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) )
81 {
82 l3_pgentry_t *pl3e = alloc_xen_pagetable();
83 clear_page(pl3e);
84 l4e_write(pl4e, l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR));
85 }
87 return l4e_to_l3e(*pl4e) + l3_table_offset(v);
88 }
90 l2_pgentry_t *virt_to_xen_l2e(unsigned long v)
91 {
92 l3_pgentry_t *pl3e;
94 pl3e = virt_to_xen_l3e(v);
95 if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
96 {
97 l2_pgentry_t *pl2e = alloc_xen_pagetable();
98 clear_page(pl2e);
99 l3e_write(pl3e, l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR));
100 }
102 BUG_ON(l3e_get_flags(*pl3e) & _PAGE_PSE);
103 return l3e_to_l2e(*pl3e) + l2_table_offset(v);
104 }
106 void *do_page_walk(struct vcpu *v, unsigned long addr)
107 {
108 unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
109 l4_pgentry_t l4e, *l4t;
110 l3_pgentry_t l3e, *l3t;
111 l2_pgentry_t l2e, *l2t;
112 l1_pgentry_t l1e, *l1t;
114 if ( is_hvm_vcpu(v) )
115 return NULL;
117 l4t = mfn_to_virt(mfn);
118 l4e = l4t[l4_table_offset(addr)];
119 mfn = l4e_get_pfn(l4e);
120 if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
121 return NULL;
123 l3t = mfn_to_virt(mfn);
124 l3e = l3t[l3_table_offset(addr)];
125 mfn = l3e_get_pfn(l3e);
126 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
127 if ( (l3e_get_flags(l3e) & _PAGE_PSE) )
128 return mfn_to_virt(mfn) + (addr & ((1UL << L3_PAGETABLE_SHIFT) - 1));
130 l2t = mfn_to_virt(mfn);
131 l2e = l2t[l2_table_offset(addr)];
132 mfn = l2e_get_pfn(l2e);
133 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
134 return NULL;
135 if ( (l2e_get_flags(l2e) & _PAGE_PSE) )
136 return mfn_to_virt(mfn) + (addr & ((1UL << L2_PAGETABLE_SHIFT) - 1));
138 l1t = mfn_to_virt(mfn);
139 l1e = l1t[l1_table_offset(addr)];
140 mfn = l1e_get_pfn(l1e);
141 if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(mfn) )
142 return NULL;
144 return mfn_to_virt(mfn) + (addr & ~PAGE_MASK);
145 }
147 void __init paging_init(void)
148 {
149 unsigned long i, mpt_size, va;
150 unsigned int memflags;
151 l3_pgentry_t *l3_ro_mpt;
152 l2_pgentry_t *l2_ro_mpt = NULL;
153 struct page_info *l1_pg, *l2_pg, *l3_pg;
155 /* Create user-accessible L2 directory to map the MPT for guests. */
156 if ( (l3_pg = alloc_domheap_page(NULL, 0)) == NULL )
157 goto nomem;
158 l3_ro_mpt = page_to_virt(l3_pg);
159 clear_page(l3_ro_mpt);
160 l4e_write(&idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)],
161 l4e_from_page(l3_pg, __PAGE_HYPERVISOR | _PAGE_USER));
163 /*
164 * Allocate and map the machine-to-phys table.
165 * This also ensures L3 is present for fixmaps.
166 */
167 mpt_size = (max_page * BYTES_PER_LONG) + (1UL << L2_PAGETABLE_SHIFT) - 1;
168 mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL);
169 for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
170 {
171 BUILD_BUG_ON(RO_MPT_VIRT_START & ((1UL << L3_PAGETABLE_SHIFT) - 1));
172 va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT);
173 memflags = MEMF_node(phys_to_nid(i <<
174 (L2_PAGETABLE_SHIFT - 3 + PAGE_SHIFT)));
176 if ( cpu_has_page1gb &&
177 !((unsigned long)l2_ro_mpt & ~PAGE_MASK) &&
178 (mpt_size >> L3_PAGETABLE_SHIFT) > (i >> PAGETABLE_ORDER) &&
179 (l1_pg = alloc_domheap_pages(NULL, 2 * PAGETABLE_ORDER,
180 memflags)) != NULL )
181 {
182 map_pages_to_xen(
183 RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT),
184 page_to_mfn(l1_pg),
185 1UL << (2 * PAGETABLE_ORDER),
186 PAGE_HYPERVISOR);
187 memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)),
188 0x77, 1UL << L3_PAGETABLE_SHIFT);
190 ASSERT(!l2_table_offset(va));
191 /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
192 l3e_write(&l3_ro_mpt[l3_table_offset(va)],
193 l3e_from_page(l1_pg,
194 /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
195 i += (1UL << PAGETABLE_ORDER) - 1;
196 continue;
197 }
199 if ( (l1_pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER,
200 memflags)) == NULL )
201 goto nomem;
202 map_pages_to_xen(
203 RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT),
204 page_to_mfn(l1_pg),
205 1UL << PAGETABLE_ORDER,
206 PAGE_HYPERVISOR);
207 memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0x55,
208 1UL << L2_PAGETABLE_SHIFT);
209 if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) )
210 {
211 if ( (l2_pg = alloc_domheap_page(NULL, memflags)) == NULL )
212 goto nomem;
213 l2_ro_mpt = page_to_virt(l2_pg);
214 clear_page(l2_ro_mpt);
215 l3e_write(&l3_ro_mpt[l3_table_offset(va)],
216 l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER));
217 ASSERT(!l2_table_offset(va));
218 }
219 /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
220 l2e_write(l2_ro_mpt, l2e_from_page(
221 l1_pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
222 l2_ro_mpt++;
223 }
225 /* Create user-accessible L2 directory to map the MPT for compat guests. */
226 BUILD_BUG_ON(l4_table_offset(RDWR_MPT_VIRT_START) !=
227 l4_table_offset(HIRO_COMPAT_MPT_VIRT_START));
228 l3_ro_mpt = l4e_to_l3e(idle_pg_table[l4_table_offset(
229 HIRO_COMPAT_MPT_VIRT_START)]);
230 if ( (l2_pg = alloc_domheap_page(NULL, 0)) == NULL )
231 goto nomem;
232 compat_idle_pg_table_l2 = l2_ro_mpt = page_to_virt(l2_pg);
233 clear_page(l2_ro_mpt);
234 l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
235 l3e_from_page(l2_pg, __PAGE_HYPERVISOR));
236 l2_ro_mpt += l2_table_offset(HIRO_COMPAT_MPT_VIRT_START);
237 /* Allocate and map the compatibility mode machine-to-phys table. */
238 mpt_size = (mpt_size >> 1) + (1UL << (L2_PAGETABLE_SHIFT - 1));
239 if ( mpt_size > RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START )
240 mpt_size = RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START;
241 mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL);
242 if ( (m2p_compat_vstart + mpt_size) < MACH2PHYS_COMPAT_VIRT_END )
243 m2p_compat_vstart = MACH2PHYS_COMPAT_VIRT_END - mpt_size;
244 for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
245 {
246 memflags = MEMF_node(phys_to_nid(i <<
247 (L2_PAGETABLE_SHIFT - 2 + PAGE_SHIFT)));
248 if ( (l1_pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER,
249 memflags)) == NULL )
250 goto nomem;
251 map_pages_to_xen(
252 RDWR_COMPAT_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT),
253 page_to_mfn(l1_pg),
254 1UL << PAGETABLE_ORDER,
255 PAGE_HYPERVISOR);
256 memset((void *)(RDWR_COMPAT_MPT_VIRT_START +
257 (i << L2_PAGETABLE_SHIFT)),
258 0x55,
259 1UL << L2_PAGETABLE_SHIFT);
260 /* NB. Cannot be GLOBAL as the ptes get copied into per-VM space. */
261 l2e_write(l2_ro_mpt, l2e_from_page(l1_pg, _PAGE_PSE|_PAGE_PRESENT));
262 l2_ro_mpt++;
263 }
265 /* Set up linear page table mapping. */
266 l4e_write(&idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)],
267 l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR));
268 return;
270 nomem:
271 panic("Not enough memory for m2p table\n");
272 }
274 void __init setup_idle_pagetable(void)
275 {
276 /* Install per-domain mappings for idle domain. */
277 l4e_write(&idle_pg_table[l4_table_offset(PERDOMAIN_VIRT_START)],
278 l4e_from_page(
279 virt_to_page(idle_vcpu[0]->domain->arch.mm_perdomain_l3),
280 __PAGE_HYPERVISOR));
281 }
283 void __init zap_low_mappings(void)
284 {
285 BUG_ON(num_online_cpus() != 1);
287 /* Remove aliased mapping of first 1:1 PML4 entry. */
288 l4e_write(&idle_pg_table[0], l4e_empty());
289 flush_local(FLUSH_TLB_GLOBAL);
291 /* Replace with mapping of the boot trampoline only. */
292 map_pages_to_xen(BOOT_TRAMPOLINE, BOOT_TRAMPOLINE >> PAGE_SHIFT,
293 0x10, __PAGE_HYPERVISOR);
294 }
296 void __init subarch_init_memory(void)
297 {
298 unsigned long i, n, v, m2p_start_mfn;
299 l3_pgentry_t l3e;
300 l2_pgentry_t l2e;
302 BUILD_BUG_ON(RDWR_MPT_VIRT_START & ((1UL << L3_PAGETABLE_SHIFT) - 1));
303 BUILD_BUG_ON(RDWR_MPT_VIRT_END & ((1UL << L3_PAGETABLE_SHIFT) - 1));
304 /* M2P table is mappable read-only by privileged domains. */
305 for ( v = RDWR_MPT_VIRT_START;
306 v != RDWR_MPT_VIRT_END;
307 v += n << PAGE_SHIFT )
308 {
309 n = L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES;
310 l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
311 l3_table_offset(v)];
312 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
313 continue;
314 if ( !(l3e_get_flags(l3e) & _PAGE_PSE) )
315 {
316 n = L1_PAGETABLE_ENTRIES;
317 l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
318 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
319 continue;
320 m2p_start_mfn = l2e_get_pfn(l2e);
321 }
322 else
323 {
324 m2p_start_mfn = l3e_get_pfn(l3e);
325 }
327 for ( i = 0; i < n; i++ )
328 {
329 struct page_info *page = mfn_to_page(m2p_start_mfn + i);
330 share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
331 }
332 }
334 for ( v = RDWR_COMPAT_MPT_VIRT_START;
335 v != RDWR_COMPAT_MPT_VIRT_END;
336 v += 1 << L2_PAGETABLE_SHIFT )
337 {
338 l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
339 l3_table_offset(v)];
340 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
341 continue;
342 l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
343 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
344 continue;
345 m2p_start_mfn = l2e_get_pfn(l2e);
347 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
348 {
349 struct page_info *page = mfn_to_page(m2p_start_mfn + i);
350 share_xen_page_with_privileged_guests(page, XENSHARE_readonly);
351 }
352 }
353 }
355 long subarch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
356 {
357 struct xen_machphys_mfn_list xmml;
358 l3_pgentry_t l3e;
359 l2_pgentry_t l2e;
360 unsigned long v;
361 xen_pfn_t mfn;
362 unsigned int i;
363 long rc = 0;
365 switch ( op )
366 {
367 case XENMEM_machphys_mfn_list:
368 if ( copy_from_guest(&xmml, arg, 1) )
369 return -EFAULT;
371 BUILD_BUG_ON(RDWR_MPT_VIRT_START & ((1UL << L3_PAGETABLE_SHIFT) - 1));
372 BUILD_BUG_ON(RDWR_MPT_VIRT_END & ((1UL << L3_PAGETABLE_SHIFT) - 1));
373 for ( i = 0, v = RDWR_MPT_VIRT_START;
374 (i != xmml.max_extents) && (v != RDWR_MPT_VIRT_END);
375 i++, v += 1UL << L2_PAGETABLE_SHIFT )
376 {
377 l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
378 l3_table_offset(v)];
379 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
380 break;
381 if ( !(l3e_get_flags(l3e) & _PAGE_PSE) )
382 {
383 l2e = l3e_to_l2e(l3e)[l2_table_offset(v)];
384 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
385 break;
386 mfn = l2e_get_pfn(l2e);
387 }
388 else
389 {
390 mfn = l3e_get_pfn(l3e)
391 + (l2_table_offset(v) << PAGETABLE_ORDER);
392 }
393 ASSERT(!l1_table_offset(v));
394 if ( copy_to_guest_offset(xmml.extent_start, i, &mfn, 1) )
395 return -EFAULT;
396 }
398 xmml.nr_extents = i;
399 if ( copy_to_guest(arg, &xmml, 1) )
400 return -EFAULT;
402 break;
404 default:
405 rc = -ENOSYS;
406 break;
407 }
409 return rc;
410 }
412 long do_stack_switch(unsigned long ss, unsigned long esp)
413 {
414 fixup_guest_stack_selector(current->domain, ss);
415 current->arch.guest_context.kernel_ss = ss;
416 current->arch.guest_context.kernel_sp = esp;
417 return 0;
418 }
420 long do_set_segment_base(unsigned int which, unsigned long base)
421 {
422 struct vcpu *v = current;
423 long ret = 0;
425 switch ( which )
426 {
427 case SEGBASE_FS:
428 if ( wrmsr_safe(MSR_FS_BASE, base, base>>32) )
429 ret = -EFAULT;
430 else
431 v->arch.guest_context.fs_base = base;
432 break;
434 case SEGBASE_GS_USER:
435 if ( wrmsr_safe(MSR_SHADOW_GS_BASE, base, base>>32) )
436 ret = -EFAULT;
437 else
438 v->arch.guest_context.gs_base_user = base;
439 break;
441 case SEGBASE_GS_KERNEL:
442 if ( wrmsr_safe(MSR_GS_BASE, base, base>>32) )
443 ret = -EFAULT;
444 else
445 v->arch.guest_context.gs_base_kernel = base;
446 break;
448 case SEGBASE_GS_USER_SEL:
449 __asm__ __volatile__ (
450 " swapgs \n"
451 "1: movl %k0,%%gs \n"
452 " "safe_swapgs" \n"
453 ".section .fixup,\"ax\" \n"
454 "2: xorl %k0,%k0 \n"
455 " jmp 1b \n"
456 ".previous \n"
457 ".section __ex_table,\"a\"\n"
458 " .align 8 \n"
459 " .quad 1b,2b \n"
460 ".previous "
461 : : "r" (base&0xffff) );
462 break;
464 default:
465 ret = -EINVAL;
466 break;
467 }
469 return ret;
470 }
473 /* Returns TRUE if given descriptor is valid for GDT or LDT. */
474 int check_descriptor(const struct domain *dom, struct desc_struct *d)
475 {
476 u32 a = d->a, b = d->b;
477 u16 cs;
478 unsigned int dpl;
480 /* A not-present descriptor will always fault, so is safe. */
481 if ( !(b & _SEGMENT_P) )
482 goto good;
484 /* Check and fix up the DPL. */
485 dpl = (b >> 13) & 3;
486 __fixup_guest_selector(dom, dpl);
487 b = (b & ~_SEGMENT_DPL) | (dpl << 13);
489 /* All code and data segments are okay. No base/limit checking. */
490 if ( (b & _SEGMENT_S) )
491 {
492 if ( is_pv_32bit_domain(dom) )
493 {
494 unsigned long base, limit;
496 if ( b & _SEGMENT_L )
497 goto bad;
499 /*
500 * Older PAE Linux guests use segments which are limited to
501 * 0xf6800000. Extend these to allow access to the larger read-only
502 * M2P table available in 32on64 mode.
503 */
504 base = (b & (0xff << 24)) | ((b & 0xff) << 16) | (a >> 16);
506 limit = (b & 0xf0000) | (a & 0xffff);
507 limit++; /* We add one because limit is inclusive. */
509 if ( (b & _SEGMENT_G) )
510 limit <<= 12;
512 if ( (base == 0) && (limit > HYPERVISOR_COMPAT_VIRT_START(dom)) )
513 {
514 a |= 0x0000ffff;
515 b |= 0x000f0000;
516 }
517 }
519 goto good;
520 }
522 /* Invalid type 0 is harmless. It is used for 2nd half of a call gate. */
523 if ( (b & _SEGMENT_TYPE) == 0x000 )
524 goto good;
526 /* Everything but a call gate is discarded here. */
527 if ( (b & _SEGMENT_TYPE) != 0xc00 )
528 goto bad;
530 /* Validate the target code selector. */
531 cs = a >> 16;
532 if ( !guest_gate_selector_okay(dom, cs) )
533 goto bad;
534 /*
535 * Force DPL to zero, causing a GP fault with its error code indicating
536 * the gate in use, allowing emulation. This is necessary because with
537 * native guests (kernel in ring 3) call gates cannot be used directly
538 * to transition from user to kernel mode (and whether a gate is used
539 * to enter the kernel can only be determined when the gate is being
540 * used), and with compat guests call gates cannot be used at all as
541 * there are only 64-bit ones.
542 * Store the original DPL in the selector's RPL field.
543 */
544 b &= ~_SEGMENT_DPL;
545 cs = (cs & ~3) | dpl;
546 a = (a & 0xffffU) | (cs << 16);
548 /* Reserved bits must be zero. */
549 if ( b & (is_pv_32bit_domain(dom) ? 0xe0 : 0xff) )
550 goto bad;
552 good:
553 d->a = a;
554 d->b = b;
555 return 1;
556 bad:
557 return 0;
558 }
560 void domain_set_alloc_bitsize(struct domain *d)
561 {
562 if ( !is_pv_32on64_domain(d) ||
563 (MACH2PHYS_COMPAT_NR_ENTRIES(d) >= max_page) ||
564 d->arch.physaddr_bitsize > 0 )
565 return;
566 d->arch.physaddr_bitsize =
567 /* 2^n entries can be contained in guest's p2m mapping space */
568 fls(MACH2PHYS_COMPAT_NR_ENTRIES(d)) - 1
569 /* 2^n pages -> 2^(n+PAGE_SHIFT) bits */
570 + PAGE_SHIFT;
571 }
573 unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits)
574 {
575 if ( (d == NULL) || (d->arch.physaddr_bitsize == 0) )
576 return bits;
577 return min(d->arch.physaddr_bitsize, bits);
578 }
580 #include "compat/mm.c"
582 /*
583 * Local variables:
584 * mode: C
585 * c-set-style: "BSD"
586 * c-basic-offset: 4
587 * tab-width: 4
588 * indent-tabs-mode: nil
589 * End:
590 */