ia64/xen-unstable
changeset 16548:cd5e1e76d0bc
32-on-64: Fix domain address-size clamping, implement
copy-on-grant-transfer, and eliminate 166GB memory limit for x86/64
Xen.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
copy-on-grant-transfer, and eliminate 166GB memory limit for x86/64
Xen.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu Dec 06 13:39:19 2007 +0000 (2007-12-06) |
parents | 3221dff4b460 |
children | baf90ee3c1da |
files | xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/e820.c xen/arch/x86/x86_64/mm.c xen/common/grant_table.c xen/common/memory.c xen/common/page_alloc.c |
line diff
1.1 --- a/xen/arch/x86/domain.c Thu Dec 06 12:13:12 2007 +0000 1.2 +++ b/xen/arch/x86/domain.c Thu Dec 06 13:39:19 2007 +0000 1.3 @@ -319,8 +319,6 @@ int switch_native(struct domain *d) 1.4 release_compat_l4(d->vcpu[vcpuid]); 1.5 } 1.6 1.7 - d->arch.physaddr_bitsize = 64; 1.8 - 1.9 return 0; 1.10 } 1.11
2.1 --- a/xen/arch/x86/domain_build.c Thu Dec 06 12:13:12 2007 +0000 2.2 +++ b/xen/arch/x86/domain_build.c Thu Dec 06 13:39:19 2007 +0000 2.3 @@ -367,7 +367,7 @@ int __init construct_dom0( 2.4 #ifdef CONFIG_COMPAT 2.5 HYPERVISOR_COMPAT_VIRT_START(d) = 2.6 max_t(unsigned int, m2p_compat_vstart, value); 2.7 - d->arch.physaddr_bitsize = !is_pv_32on64_domain(d) ? 64 : 2.8 + d->arch.physaddr_bitsize = 2.9 fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1 2.10 + (PAGE_SIZE - 2); 2.11 if ( value > (!is_pv_32on64_domain(d) ?
3.1 --- a/xen/arch/x86/e820.c Thu Dec 06 12:13:12 2007 +0000 3.2 +++ b/xen/arch/x86/e820.c Thu Dec 06 13:39:19 2007 +0000 3.3 @@ -370,13 +370,6 @@ static void __init machine_specific_memo 3.4 "can be accessed by Xen in 32-bit mode."); 3.5 #endif 3.6 3.7 -#ifdef __x86_64__ 3.8 - clip_to_limit((uint64_t)(MACH2PHYS_COMPAT_VIRT_END - 3.9 - __HYPERVISOR_COMPAT_VIRT_START) << 10, 3.10 - "Only the first %u GB of the physical memory map " 3.11 - "can be accessed by 32-on-64 guests."); 3.12 -#endif 3.13 - 3.14 reserve_dmi_region(); 3.15 } 3.16
4.1 --- a/xen/arch/x86/x86_64/mm.c Thu Dec 06 12:13:12 2007 +0000 4.2 +++ b/xen/arch/x86/x86_64/mm.c Thu Dec 06 13:39:19 2007 +0000 4.3 @@ -442,7 +442,7 @@ int check_descriptor(const struct domain 4.4 4.5 unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits) 4.6 { 4.7 - if ( d == NULL ) 4.8 + if ( (d == NULL) || !is_pv_32on64_domain(d) ) 4.9 return bits; 4.10 return min(d->arch.physaddr_bitsize, bits); 4.11 }
5.1 --- a/xen/common/grant_table.c Thu Dec 06 12:13:12 2007 +0000 5.2 +++ b/xen/common/grant_table.c Thu Dec 06 13:39:19 2007 +0000 5.3 @@ -1081,11 +1081,36 @@ gnttab_transfer( 5.4 5.5 if ( xsm_grant_transfer(d, e) ) 5.6 { 5.7 + unlock_and_copyback: 5.8 rcu_unlock_domain(e); 5.9 + page->count_info &= ~(PGC_count_mask|PGC_allocated); 5.10 + free_domheap_page(page); 5.11 gop.status = GNTST_permission_denied; 5.12 goto copyback; 5.13 } 5.14 5.15 + if ( (1UL << domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1)) <= mfn ) 5.16 + { 5.17 + struct page_info *new_page; 5.18 + void *sp, *dp; 5.19 + 5.20 + new_page = alloc_domheap_pages( 5.21 + NULL, 0, 5.22 + MEMF_bits(domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1))); 5.23 + if ( new_page == NULL ) 5.24 + goto unlock_and_copyback; 5.25 + 5.26 + sp = map_domain_page(mfn); 5.27 + dp = map_domain_page(page_to_mfn(new_page)); 5.28 + memcpy(dp, sp, PAGE_SIZE); 5.29 + unmap_domain_page(dp); 5.30 + unmap_domain_page(sp); 5.31 + 5.32 + page->count_info &= ~(PGC_count_mask|PGC_allocated); 5.33 + free_domheap_page(page); 5.34 + page = new_page; 5.35 + } 5.36 + 5.37 spin_lock(&e->page_alloc_lock); 5.38 5.39 /*
6.1 --- a/xen/common/memory.c Thu Dec 06 12:13:12 2007 +0000 6.2 +++ b/xen/common/memory.c Thu Dec 06 13:39:19 2007 +0000 6.3 @@ -319,18 +319,6 @@ static long memory_exchange(XEN_GUEST_HA 6.4 goto fail_early; 6.5 } 6.6 6.7 - if ( (exch.out.address_bits != 0) && 6.8 - (exch.out.address_bits < 6.9 - (get_order_from_pages(max_page) + PAGE_SHIFT)) ) 6.10 - { 6.11 - if ( exch.out.address_bits <= PAGE_SHIFT ) 6.12 - { 6.13 - rc = -ENOMEM; 6.14 - goto fail_early; 6.15 - } 6.16 - memflags = MEMF_bits(exch.out.address_bits); 6.17 - } 6.18 - 6.19 if ( exch.in.extent_order <= exch.out.extent_order ) 6.20 { 6.21 in_chunk_order = exch.out.extent_order - exch.in.extent_order; 6.22 @@ -353,6 +341,9 @@ static long memory_exchange(XEN_GUEST_HA 6.23 } 6.24 d = current->domain; 6.25 6.26 + memflags |= MEMF_bits(domain_clamp_alloc_bitsize( 6.27 + d, exch.out.address_bits ? : BITS_PER_LONG)); 6.28 + 6.29 cpu = select_local_cpu(d); 6.30 6.31 for ( i = (exch.nr_exchanged >> in_chunk_order);
7.1 --- a/xen/common/page_alloc.c Thu Dec 06 12:13:12 2007 +0000 7.2 +++ b/xen/common/page_alloc.c Thu Dec 06 13:39:19 2007 +0000 7.3 @@ -786,15 +786,13 @@ struct page_info *__alloc_domheap_pages( 7.4 7.5 ASSERT(!in_irq()); 7.6 7.7 - if ( bits ) 7.8 - { 7.9 - bits = domain_clamp_alloc_bitsize(d, bits); 7.10 - if ( bits <= (PAGE_SHIFT + 1) ) 7.11 - return NULL; 7.12 - bits -= PAGE_SHIFT + 1; 7.13 - if ( bits < zone_hi ) 7.14 - zone_hi = bits; 7.15 - } 7.16 + bits = domain_clamp_alloc_bitsize(d, bits ? : BITS_PER_LONG); 7.17 + if ( bits <= (PAGE_SHIFT + 1) ) 7.18 + return NULL; 7.19 + 7.20 + bits -= PAGE_SHIFT + 1; 7.21 + if ( bits < zone_hi ) 7.22 + zone_hi = bits; 7.23 7.24 if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize ) 7.25 {