]> xenbits.xensource.com Git - people/liuw/xen.git/commitdiff
32-on-64: Fix domain address-size clamping, implement
authorKeir Fraser <keir.fraser@citrix.com>
Thu, 6 Dec 2007 13:39:19 +0000 (13:39 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Thu, 6 Dec 2007 13:39:19 +0000 (13:39 +0000)
copy-on-grant-transfer, and eliminate 166GB memory limit for x86/64
Xen.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/domain.c
xen/arch/x86/domain_build.c
xen/arch/x86/e820.c
xen/arch/x86/x86_64/mm.c
xen/common/grant_table.c
xen/common/memory.c
xen/common/page_alloc.c

index 4da77927d5cbe9e3a98dcbd79ded06f876d96cb0..588a7fb1f74fc89bea255b8dbcad1fbb83792a92 100644 (file)
@@ -319,8 +319,6 @@ int switch_native(struct domain *d)
             release_compat_l4(d->vcpu[vcpuid]);
     }
 
-    d->arch.physaddr_bitsize = 64;
-
     return 0;
 }
 
index 7279f20075474563f5902685e3a5b550f1d4587e..e0fb1967e20477ecb55b09f2cba650fb40bd2108 100644 (file)
@@ -367,7 +367,7 @@ int __init construct_dom0(
 #ifdef CONFIG_COMPAT
         HYPERVISOR_COMPAT_VIRT_START(d) =
             max_t(unsigned int, m2p_compat_vstart, value);
-        d->arch.physaddr_bitsize = !is_pv_32on64_domain(d) ? 64 :
+        d->arch.physaddr_bitsize =
             fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
             + (PAGE_SIZE - 2);
         if ( value > (!is_pv_32on64_domain(d) ?
index 2fbfa8716070528e90a3880e7b7cedcafa2f49e3..f09a48aed7efb249bec4c42b1953653a61de77cc 100644 (file)
@@ -370,13 +370,6 @@ static void __init machine_specific_memory_setup(
                   "can be accessed by Xen in 32-bit mode.");
 #endif
 
-#ifdef __x86_64__
-    clip_to_limit((uint64_t)(MACH2PHYS_COMPAT_VIRT_END -
-                             __HYPERVISOR_COMPAT_VIRT_START) << 10,
-                  "Only the first %u GB of the physical memory map "
-                  "can be accessed by 32-on-64 guests.");
-#endif
-
     reserve_dmi_region();
 }
 
index 5bf706e04b6ada3ef01303627d6e1ec32f1cb753..5d9490a4fef08cf5d032af42e6b33fdd307426c2 100644 (file)
@@ -442,7 +442,7 @@ int check_descriptor(const struct domain *dom, struct desc_struct *d)
 
 unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits)
 {
-    if ( d == NULL )
+    if ( (d == NULL) || !is_pv_32on64_domain(d) )
         return bits;
     return min(d->arch.physaddr_bitsize, bits);
 }
index 5e5b353ed92d755cd7601b990819c805d244791d..9ba4ed11ad74c37d6a4c957c99fe20b2664c9779 100644 (file)
@@ -1081,11 +1081,36 @@ gnttab_transfer(
 
         if ( xsm_grant_transfer(d, e) )
         {
+        unlock_and_copyback:
             rcu_unlock_domain(e);
+            page->count_info &= ~(PGC_count_mask|PGC_allocated);
+            free_domheap_page(page);
             gop.status = GNTST_permission_denied;
             goto copyback;
         }
 
+        if ( (1UL << domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1)) <= mfn )
+        {
+            struct page_info *new_page;
+            void *sp, *dp;
+
+            new_page = alloc_domheap_pages(
+                NULL, 0, 
+                MEMF_bits(domain_clamp_alloc_bitsize(e, BITS_PER_LONG-1)));
+            if ( new_page == NULL )
+                goto unlock_and_copyback;
+
+            sp = map_domain_page(mfn);
+            dp = map_domain_page(page_to_mfn(new_page));
+            memcpy(dp, sp, PAGE_SIZE);
+            unmap_domain_page(dp);
+            unmap_domain_page(sp);
+
+            page->count_info &= ~(PGC_count_mask|PGC_allocated);
+            free_domheap_page(page);
+            page = new_page;
+        }
+
         spin_lock(&e->page_alloc_lock);
 
         /*
index 704497944af79f30ae1879c3cca29027781617d4..90ff8a982767c1a6aa17f6c5a35e296b8e2a23e4 100644 (file)
@@ -319,18 +319,6 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
         goto fail_early;
     }
 
-    if ( (exch.out.address_bits != 0) &&
-         (exch.out.address_bits <
-          (get_order_from_pages(max_page) + PAGE_SHIFT)) )
-    {
-        if ( exch.out.address_bits <= PAGE_SHIFT )
-        {
-            rc = -ENOMEM;
-            goto fail_early;
-        }
-        memflags = MEMF_bits(exch.out.address_bits);
-    }
-
     if ( exch.in.extent_order <= exch.out.extent_order )
     {
         in_chunk_order  = exch.out.extent_order - exch.in.extent_order;
@@ -353,6 +341,9 @@ static long memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
     }
     d = current->domain;
 
+    memflags |= MEMF_bits(domain_clamp_alloc_bitsize(
+        d, exch.out.address_bits ? : BITS_PER_LONG));
+
     cpu = select_local_cpu(d);
 
     for ( i = (exch.nr_exchanged >> in_chunk_order);
index 65639c1f183dab122f444e4bb06789d4095c6a16..9de3f31ebe53d4118ef7172c35ba7fa6e5dd90db 100644 (file)
@@ -786,15 +786,13 @@ struct page_info *__alloc_domheap_pages(
 
     ASSERT(!in_irq());
 
-    if ( bits )
-    {
-        bits = domain_clamp_alloc_bitsize(d, bits);
-        if ( bits <= (PAGE_SHIFT + 1) )
-            return NULL;
-        bits -= PAGE_SHIFT + 1;
-        if ( bits < zone_hi )
-            zone_hi = bits;
-    }
+    bits = domain_clamp_alloc_bitsize(d, bits ? : BITS_PER_LONG);
+    if ( bits <= (PAGE_SHIFT + 1) )
+        return NULL;
+
+    bits -= PAGE_SHIFT + 1;
+    if ( bits < zone_hi )
+        zone_hi = bits;
 
     if ( (zone_hi + PAGE_SHIFT) >= dma_bitsize )
     {