ia64/xen-unstable
changeset 12414:69388eba4c03
[XEN] x86-64: Since all memory is visible to Xen on x86-64, there is
no need to allocate from the special Xen heap for allocations specific
to this subarch.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
no need to allocate from the special Xen heap for allocations specific
to this subarch.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
author | kfraser@localhost.localdomain |
---|---|
date | Mon Nov 13 13:50:14 2006 +0000 (2006-11-13) |
parents | 02899109a3ac |
children | bfe3f8f35e87 |
files | xen/arch/x86/domain.c xen/arch/x86/x86_64/mm.c |
line diff
1.1 --- a/xen/arch/x86/domain.c Mon Nov 13 13:40:21 2006 +0000 1.2 +++ b/xen/arch/x86/domain.c Mon Nov 13 13:50:14 2006 +0000 1.3 @@ -166,6 +166,9 @@ void vcpu_destroy(struct vcpu *v) 1.4 1.5 int arch_domain_create(struct domain *d) 1.6 { 1.7 +#ifdef __x86_64__ 1.8 + struct page_info *pg; 1.9 +#endif 1.10 l1_pgentry_t gdt_l1e; 1.11 int vcpuid, pdpt_order; 1.12 int i, rc = -ENOMEM; 1.13 @@ -194,19 +197,17 @@ int arch_domain_create(struct domain *d) 1.14 1.15 #else /* __x86_64__ */ 1.16 1.17 - d->arch.mm_perdomain_l2 = alloc_xenheap_page(); 1.18 - d->arch.mm_perdomain_l3 = alloc_xenheap_page(); 1.19 - if ( (d->arch.mm_perdomain_l2 == NULL) || 1.20 - (d->arch.mm_perdomain_l3 == NULL) ) 1.21 + if ( (pg = alloc_domheap_page(NULL)) == NULL ) 1.22 goto fail; 1.23 - 1.24 - memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE); 1.25 + d->arch.mm_perdomain_l2 = clear_page(page_to_virt(pg)); 1.26 for ( i = 0; i < (1 << pdpt_order); i++ ) 1.27 d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+i] = 1.28 l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i, 1.29 __PAGE_HYPERVISOR); 1.30 1.31 - memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE); 1.32 + if ( (pg = alloc_domheap_page(NULL)) == NULL ) 1.33 + goto fail; 1.34 + d->arch.mm_perdomain_l3 = clear_page(page_to_virt(pg)); 1.35 d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = 1.36 l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2), 1.37 __PAGE_HYPERVISOR); 1.38 @@ -240,8 +241,8 @@ int arch_domain_create(struct domain *d) 1.39 fail: 1.40 free_xenheap_page(d->shared_info); 1.41 #ifdef __x86_64__ 1.42 - free_xenheap_page(d->arch.mm_perdomain_l2); 1.43 - free_xenheap_page(d->arch.mm_perdomain_l3); 1.44 + free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2)); 1.45 + free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3)); 1.46 #endif 1.47 free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order); 1.48 return rc; 1.49 @@ -265,8 +266,8 @@ void arch_domain_destroy(struct domain * 1.50 get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t))); 1.51 1.52 #ifdef __x86_64__ 1.53 - free_xenheap_page(d->arch.mm_perdomain_l2); 1.54 - free_xenheap_page(d->arch.mm_perdomain_l3); 1.55 + free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2)); 1.56 + free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3)); 1.57 #endif 1.58 1.59 free_xenheap_page(d->shared_info);
2.1 --- a/xen/arch/x86/x86_64/mm.c Mon Nov 13 13:40:21 2006 +0000 2.2 +++ b/xen/arch/x86/x86_64/mm.c Mon Nov 13 13:50:14 2006 +0000 2.3 @@ -76,17 +76,17 @@ l2_pgentry_t *virt_to_xen_l2e(unsigned l 2.4 2.5 void __init paging_init(void) 2.6 { 2.7 - unsigned long i, mpt_size; 2.8 + unsigned long i, mpt_size, va; 2.9 l3_pgentry_t *l3_ro_mpt; 2.10 l2_pgentry_t *l2_ro_mpt = NULL; 2.11 - struct page_info *pg; 2.12 + struct page_info *l1_pg, *l2_pg; 2.13 2.14 /* Create user-accessible L2 directory to map the MPT for guests. */ 2.15 - l3_ro_mpt = alloc_xenheap_page(); 2.16 - clear_page(l3_ro_mpt); 2.17 + if ( (l2_pg = alloc_domheap_page(NULL)) == NULL ) 2.18 + goto nomem; 2.19 + l3_ro_mpt = clear_page(page_to_virt(l2_pg)); 2.20 idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] = 2.21 - l4e_from_page( 2.22 - virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER); 2.23 + l4e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER); 2.24 2.25 /* 2.26 * Allocate and map the machine-to-phys table. 2.27 @@ -96,33 +96,37 @@ void __init paging_init(void) 2.28 mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL); 2.29 for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ ) 2.30 { 2.31 - if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL ) 2.32 - panic("Not enough memory for m2p table\n"); 2.33 + if ( (l1_pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL ) 2.34 + goto nomem; 2.35 map_pages_to_xen( 2.36 - RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), page_to_mfn(pg), 2.37 + RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), 2.38 + page_to_mfn(l1_pg), 2.39 1UL << PAGETABLE_ORDER, 2.40 PAGE_HYPERVISOR); 2.41 memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0x55, 2.42 1UL << L2_PAGETABLE_SHIFT); 2.43 if ( !((unsigned long)l2_ro_mpt & ~PAGE_MASK) ) 2.44 { 2.45 - unsigned long va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT); 2.46 - 2.47 - l2_ro_mpt = alloc_xenheap_page(); 2.48 - clear_page(l2_ro_mpt); 2.49 + if ( (l2_pg = alloc_domheap_page(NULL)) == NULL ) 2.50 + goto nomem; 2.51 + va = RO_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT); 2.52 + l2_ro_mpt = clear_page(page_to_virt(l2_pg)); 2.53 l3_ro_mpt[l3_table_offset(va)] = 2.54 - l3e_from_page( 2.55 - virt_to_page(l2_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER); 2.56 + l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER); 2.57 l2_ro_mpt += l2_table_offset(va); 2.58 } 2.59 /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */ 2.60 *l2_ro_mpt++ = l2e_from_page( 2.61 - pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT); 2.62 + l1_pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT); 2.63 } 2.64 2.65 /* Set up linear page table mapping. */ 2.66 idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] = 2.67 l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR); 2.68 + return; 2.69 + 2.70 + nomem: 2.71 + panic("Not enough memory for m2p table\n"); 2.72 } 2.73 2.74 void __init setup_idle_pagetable(void)