]> xenbits.xensource.com Git - people/vhanquez/xen-unstable.git/commitdiff
x86/hvm: Use get_page_from_gfn() instead of get_gfn()/put_gfn.
authorTim Deegan <tim@xen.org>
Thu, 17 May 2012 09:24:54 +0000 (10:24 +0100)
committerTim Deegan <tim@xen.org>
Thu, 17 May 2012 09:24:54 +0000 (10:24 +0100)
Signed-off-by: Tim Deegan <tim@xen.org>
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
xen/arch/x86/hvm/emulate.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/stdvga.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/viridian.c
xen/arch/x86/hvm/vmx/vmx.c

index 3a7fe952937de9fb70144ea3f15ad06ba2c03ed1..2979aa2d466f9cde8817cbdd4eba273e7636b5cf 100644 (file)
@@ -60,34 +60,25 @@ static int hvmemul_do_io(
     ioreq_t *p = get_ioreq(curr);
     unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
     p2m_type_t p2mt;
-    mfn_t ram_mfn;
+    struct page_info *ram_page;
     int rc;
 
     /* Check for paged out page */
-    ram_mfn = get_gfn_unshare(curr->domain, ram_gfn, &p2mt);
+    ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
     if ( p2m_is_paging(p2mt) )
     {
-        put_gfn(curr->domain, ram_gfn); 
+        if ( ram_page )
+            put_page(ram_page);
         p2m_mem_paging_populate(curr->domain, ram_gfn);
         return X86EMUL_RETRY;
     }
     if ( p2m_is_shared(p2mt) )
     {
-        put_gfn(curr->domain, ram_gfn); 
+        if ( ram_page )
+            put_page(ram_page);
         return X86EMUL_RETRY;
     }
 
-    /* Maintain a ref on the mfn to ensure liveness. Put the gfn
-     * to avoid potential deadlock wrt event channel lock, later. */
-    if ( mfn_valid(mfn_x(ram_mfn)) )
-        if ( !get_page(mfn_to_page(mfn_x(ram_mfn)),
-             curr->domain) )
-        {
-            put_gfn(curr->domain, ram_gfn);
-            return X86EMUL_RETRY;
-        }
-    put_gfn(curr->domain, ram_gfn);
-
     /*
      * Weird-sized accesses have undefined behaviour: we discard writes
      * and read all-ones.
@@ -98,8 +89,8 @@ static int hvmemul_do_io(
         ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
         if ( dir == IOREQ_READ )
             memset(p_data, ~0, size);
-        if ( mfn_valid(mfn_x(ram_mfn)) )
-            put_page(mfn_to_page(mfn_x(ram_mfn)));
+        if ( ram_page )
+            put_page(ram_page);
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -120,8 +111,8 @@ static int hvmemul_do_io(
             unsigned int bytes = vio->mmio_large_write_bytes;
             if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
             {
-                if ( mfn_valid(mfn_x(ram_mfn)) )
-                    put_page(mfn_to_page(mfn_x(ram_mfn)));
+                if ( ram_page )
+                    put_page(ram_page);
                 return X86EMUL_OKAY;
             }
         }
@@ -133,8 +124,8 @@ static int hvmemul_do_io(
             {
                 memcpy(p_data, &vio->mmio_large_read[addr - pa],
                        size);
-                if ( mfn_valid(mfn_x(ram_mfn)) )
-                    put_page(mfn_to_page(mfn_x(ram_mfn)));
+                if ( ram_page )
+                    put_page(ram_page);
                 return X86EMUL_OKAY;
             }
         }
@@ -148,8 +139,8 @@ static int hvmemul_do_io(
         vio->io_state = HVMIO_none;
         if ( p_data == NULL )
         {
-            if ( mfn_valid(mfn_x(ram_mfn)) )
-                put_page(mfn_to_page(mfn_x(ram_mfn)));
+            if ( ram_page )
+                put_page(ram_page);
             return X86EMUL_UNHANDLEABLE;
         }
         goto finish_access;
@@ -159,13 +150,13 @@ static int hvmemul_do_io(
              (addr == (vio->mmio_large_write_pa +
                        vio->mmio_large_write_bytes)) )
         {
-            if ( mfn_valid(mfn_x(ram_mfn)) )
-                put_page(mfn_to_page(mfn_x(ram_mfn)));
+            if ( ram_page )
+                put_page(ram_page);
             return X86EMUL_RETRY;
         }
     default:
-        if ( mfn_valid(mfn_x(ram_mfn)) )
-            put_page(mfn_to_page(mfn_x(ram_mfn)));
+        if ( ram_page )
+            put_page(ram_page);
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -173,8 +164,8 @@ static int hvmemul_do_io(
     {
         gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n",
                  p->state);
-        if ( mfn_valid(mfn_x(ram_mfn)) )
-            put_page(mfn_to_page(mfn_x(ram_mfn)));
+        if ( ram_page )
+            put_page(ram_page);
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -227,8 +218,8 @@ static int hvmemul_do_io(
 
     if ( rc != X86EMUL_OKAY )
     {
-        if ( mfn_valid(mfn_x(ram_mfn)) )
-            put_page(mfn_to_page(mfn_x(ram_mfn)));
+        if ( ram_page )
+            put_page(ram_page);
         return rc;
     }
 
@@ -267,8 +258,8 @@ static int hvmemul_do_io(
         }
     }
 
-    if ( mfn_valid(mfn_x(ram_mfn)) )
-        put_page(mfn_to_page(mfn_x(ram_mfn)));
+    if ( ram_page )
+        put_page(ram_page);
     return X86EMUL_OKAY;
 }
 
index 42d545ba400e0ab5fe49ff790a9847b2adcc88aa..efd5587d01fb977e6f13d53821320e2f4623c97b 100644 (file)
@@ -395,48 +395,41 @@ int prepare_ring_for_helper(
 {
     struct page_info *page;
     p2m_type_t p2mt;
-    unsigned long mfn;
     void *va;
 
-    mfn = mfn_x(get_gfn_unshare(d, gmfn, &p2mt));
-    if ( !p2m_is_ram(p2mt) )
-    {
-        put_gfn(d, gmfn);
-        return -EINVAL;
-    }
+    page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE);
     if ( p2m_is_paging(p2mt) )
     {
-        put_gfn(d, gmfn);
+        if ( page )
+            put_page(page);
         p2m_mem_paging_populate(d, gmfn);
         return -ENOENT;
     }
     if ( p2m_is_shared(p2mt) )
     {
-        put_gfn(d, gmfn);
+        if ( page )
+            put_page(page);
         return -ENOENT;
     }
-    ASSERT(mfn_valid(mfn));
+    if ( !page )
+        return -EINVAL;
 
-    page = mfn_to_page(mfn);
-    if ( !get_page_and_type(page, d, PGT_writable_page) )
+    if ( !get_page_type(page, PGT_writable_page) )
     {
-        put_gfn(d, gmfn);
+        put_page(page);
         return -EINVAL;
     }
 
-    va = map_domain_page_global(mfn);
+    va = __map_domain_page_global(page);
     if ( va == NULL )
     {
         put_page_and_type(page);
-        put_gfn(d, gmfn);
         return -ENOMEM;
     }
 
     *_va = va;
     *_page = page;
 
-    put_gfn(d, gmfn);
-
     return 0;
 }
 
@@ -1607,8 +1600,8 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
 int hvm_set_cr0(unsigned long value)
 {
     struct vcpu *v = current;
-    p2m_type_t p2mt;
-    unsigned long gfn, mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
+    unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
+    struct page_info *page;
 
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
 
@@ -1647,23 +1640,20 @@ int hvm_set_cr0(unsigned long value)
         {
             /* The guest CR3 must be pointing to the guest physical. */
             gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
-            mfn = mfn_x(get_gfn(v->domain, gfn, &p2mt));
-            if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
-                 !get_page(mfn_to_page(mfn), v->domain))
+            page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC);
+            if ( !page )
             {
-                put_gfn(v->domain, gfn);
-                gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
-                         v->arch.hvm_vcpu.guest_cr[3], mfn);
+                gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx\n",
+                         v->arch.hvm_vcpu.guest_cr[3]);
                 domain_crash(v->domain);
                 return X86EMUL_UNHANDLEABLE;
             }
 
             /* Now arch.guest_table points to machine physical. */
-            v->arch.guest_table = pagetable_from_pfn(mfn);
+            v->arch.guest_table = pagetable_from_page(page);
 
             HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
-                        v->arch.hvm_vcpu.guest_cr[3], mfn);
-            put_gfn(v->domain, gfn);
+                        v->arch.hvm_vcpu.guest_cr[3], page_to_mfn(page));
         }
     }
     else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
@@ -1738,26 +1728,21 @@ int hvm_set_cr0(unsigned long value)
 
 int hvm_set_cr3(unsigned long value)
 {
-    unsigned long mfn;
-    p2m_type_t p2mt;
     struct vcpu *v = current;
+    struct page_info *page;
 
     if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
          (value != v->arch.hvm_vcpu.guest_cr[3]) )
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-        mfn = mfn_x(get_gfn(v->domain, value >> PAGE_SHIFT, &p2mt));
-        if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
-             !get_page(mfn_to_page(mfn), v->domain) )
-        {
-              put_gfn(v->domain, value >> PAGE_SHIFT);
-              goto bad_cr3;
-        }
+        page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
+                                 NULL, P2M_ALLOC);
+        if ( !page )
+            goto bad_cr3;
 
         put_page(pagetable_get_page(v->arch.guest_table));
-        v->arch.guest_table = pagetable_from_pfn(mfn);
-        put_gfn(v->domain, value >> PAGE_SHIFT);
+        v->arch.guest_table = pagetable_from_page(page);
 
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
     }
@@ -1914,46 +1899,29 @@ int hvm_virtual_to_linear_addr(
 static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable)
 {
     void *map;
-    unsigned long mfn;
     p2m_type_t p2mt;
-    struct page_info *pg;
+    struct page_info *page;
     struct domain *d = current->domain;
-    int rc;
 
-    mfn = mfn_x(writable
-                ? get_gfn_unshare(d, gfn, &p2mt)
-                : get_gfn(d, gfn, &p2mt));
-    if ( (p2m_is_shared(p2mt) && writable) || !p2m_is_ram(p2mt) )
+    page = get_page_from_gfn(d, gfn, &p2mt,
+                             writable ? P2M_UNSHARE : P2M_ALLOC);
+    if ( (p2m_is_shared(p2mt) && writable) || !page )
     {
-        put_gfn(d, gfn);
+        if ( page )
+            put_page(page);
         return NULL;
     }
     if ( p2m_is_paging(p2mt) )
     {
-        put_gfn(d, gfn);
+        put_page(page);
         p2m_mem_paging_populate(d, gfn);
         return NULL;
     }
 
-    ASSERT(mfn_valid(mfn));
-
     if ( writable )
-        paging_mark_dirty(d, mfn);
-
-    /* Get a ref on the page, considering that it could be shared */
-    pg = mfn_to_page(mfn);
-    rc = get_page(pg, d);
-    if ( !rc && !writable )
-        /* Page could be shared */
-        rc = get_page(pg, dom_cow);
-    if ( !rc )
-    {
-        put_gfn(d, gfn);
-        return NULL;
-    }
+        paging_mark_dirty(d, page_to_mfn(page));
 
-    map = map_domain_page(mfn);
-    put_gfn(d, gfn);
+    map = __map_domain_page(page);
     return map;
 }
 
@@ -2358,7 +2326,8 @@ static enum hvm_copy_result __hvm_copy(
     void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
 {
     struct vcpu *curr = current;
-    unsigned long gfn, mfn;
+    unsigned long gfn;
+    struct page_info *page;
     p2m_type_t p2mt;
     char *p;
     int count, todo = size;
@@ -2402,32 +2371,33 @@ static enum hvm_copy_result __hvm_copy(
             gfn = addr >> PAGE_SHIFT;
         }
 
-        mfn = mfn_x(get_gfn_unshare(curr->domain, gfn, &p2mt));
+        page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
 
         if ( p2m_is_paging(p2mt) )
         {
-            put_gfn(curr->domain, gfn);
+            if ( page )
+                put_page(page);
             p2m_mem_paging_populate(curr->domain, gfn);
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
         {
-            put_gfn(curr->domain, gfn);
+            if ( page )
+                put_page(page);
             return HVMCOPY_gfn_shared;
         }
         if ( p2m_is_grant(p2mt) )
         {
-            put_gfn(curr->domain, gfn);
+            if ( page )
+                put_page(page);
             return HVMCOPY_unhandleable;
         }
-        if ( !p2m_is_ram(p2mt) )
+        if ( !page )
         {
-            put_gfn(curr->domain, gfn);
             return HVMCOPY_bad_gfn_to_mfn;
         }
-        ASSERT(mfn_valid(mfn));
 
-        p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
+        p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
 
         if ( flags & HVMCOPY_to_guest )
         {
@@ -2437,12 +2407,12 @@ static enum hvm_copy_result __hvm_copy(
                 if ( xchg(&lastpage, gfn) != gfn )
                     gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
                              " memory page. gfn=%#lx, mfn=%#lx\n",
-                             gfn, mfn);
+                             gfn, page_to_mfn(page));
             }
             else
             {
                 memcpy(p, buf, count);
-                paging_mark_dirty(curr->domain, mfn);
+                paging_mark_dirty(curr->domain, page_to_mfn(page));
             }
         }
         else
@@ -2455,7 +2425,7 @@ static enum hvm_copy_result __hvm_copy(
         addr += count;
         buf  += count;
         todo -= count;
-        put_gfn(curr->domain, gfn);
+        put_page(page);
     }
 
     return HVMCOPY_okay;
@@ -2464,7 +2434,8 @@ static enum hvm_copy_result __hvm_copy(
 static enum hvm_copy_result __hvm_clear(paddr_t addr, int size)
 {
     struct vcpu *curr = current;
-    unsigned long gfn, mfn;
+    unsigned long gfn;
+    struct page_info *page;
     p2m_type_t p2mt;
     char *p;
     int count, todo = size;
@@ -2500,32 +2471,35 @@ static enum hvm_copy_result __hvm_clear(paddr_t addr, int size)
             return HVMCOPY_bad_gva_to_gfn;
         }
 
-        mfn = mfn_x(get_gfn_unshare(curr->domain, gfn, &p2mt));
+        page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
 
         if ( p2m_is_paging(p2mt) )
         {
+            if ( page )
+                put_page(page);
             p2m_mem_paging_populate(curr->domain, gfn);
-            put_gfn(curr->domain, gfn);
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
         {
-            put_gfn(curr->domain, gfn);
+            if ( page )
+                put_page(page);
             return HVMCOPY_gfn_shared;
         }
         if ( p2m_is_grant(p2mt) )
         {
-            put_gfn(curr->domain, gfn);
+            if ( page )
+                put_page(page);
             return HVMCOPY_unhandleable;
         }
-        if ( !p2m_is_ram(p2mt) )
+        if ( !page )
         {
-            put_gfn(curr->domain, gfn);
+            if ( page )
+                put_page(page);
             return HVMCOPY_bad_gfn_to_mfn;
         }
-        ASSERT(mfn_valid(mfn));
 
-        p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
+        p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
 
         if ( p2mt == p2m_ram_ro )
         {
@@ -2533,19 +2507,19 @@ static enum hvm_copy_result __hvm_clear(paddr_t addr, int size)
             if ( xchg(&lastpage, gfn) != gfn )
                 gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
                         " memory page. gfn=%#lx, mfn=%#lx\n",
-                        gfn, mfn);
+                         gfn, page_to_mfn(page));
         }
         else
         {
             memset(p, 0x00, count);
-            paging_mark_dirty(curr->domain, mfn);
+            paging_mark_dirty(curr->domain, page_to_mfn(page));
         }
 
         unmap_domain_page(p);
 
         addr += count;
         todo -= count;
-        put_gfn(curr->domain, gfn);
+        put_page(page);
     }
 
     return HVMCOPY_okay;
@@ -4000,35 +3974,16 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
 
         for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
         {
-            p2m_type_t t;
-            mfn_t mfn = get_gfn_unshare(d, pfn, &t);
-            if ( p2m_is_paging(t) )
+            struct page_info *page;
+            page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
+            if ( page )
             {
-                put_gfn(d, pfn);
-                p2m_mem_paging_populate(d, pfn);
-                rc = -EINVAL;
-                goto param_fail3;
-            }
-            if( p2m_is_shared(t) )
-            {
-                /* If it insists on not unsharing itself, crash the domain 
-                 * rather than crashing the host down in mark dirty */
-                gdprintk(XENLOG_WARNING,
-                         "shared pfn 0x%lx modified?\n", pfn);
-                domain_crash(d);
-                put_gfn(d, pfn);
-                rc = -EINVAL;
-                goto param_fail3;
-            }
-            
-            if ( mfn_x(mfn) != INVALID_MFN )
-            {
-                paging_mark_dirty(d, mfn_x(mfn));
+                paging_mark_dirty(d, page_to_mfn(page));
                 /* These are most probably not page tables any more */
                 /* don't take a long time and don't die either */
-                sh_remove_shadows(d->vcpu[0], mfn, 1, 0);
+                sh_remove_shadows(d->vcpu[0], _mfn(page_to_mfn(page)), 1, 0);
+                put_page(page);
             }
-            put_gfn(d, pfn);
         }
 
     param_fail3:
index ddbd9970f3d91a75fd60d10c5c48cf21aaf8030f..0dae29b12e762bc874a04b4cb77bb298b9fd7d3e 100644 (file)
@@ -482,7 +482,8 @@ static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
                 if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
+                    struct page_info *dp = get_page_from_gfn(
+                            d, data >> PAGE_SHIFT, &p2mt, P2M_ALLOC);
                     /*
                      * The only case we handle is vga_mem <-> vga_mem.
                      * Anything else disables caching and leaves it to qemu-dm.
@@ -490,11 +491,12 @@ static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
                     if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
                          ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
                     {
-                        put_gfn(d, data >> PAGE_SHIFT);
+                        if ( dp )
+                            put_page(dp);
                         return 0;
                     }
+                    ASSERT(!dp);
                     stdvga_mem_write(data, tmp, p->size);
-                    put_gfn(d, data >> PAGE_SHIFT);
                 }
                 data += sign * p->size;
                 addr += sign * p->size;
@@ -508,15 +510,16 @@ static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
                 if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
+                    struct page_info *dp = get_page_from_gfn(
+                        d, data >> PAGE_SHIFT, &p2mt, P2M_ALLOC);
                     if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
                          ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
                     {
-                        put_gfn(d, data >> PAGE_SHIFT);
+                        if ( dp )
+                            put_page(dp);
                         return 0;
                     }
                     tmp = stdvga_mem_read(data, p->size);
-                    put_gfn(d, data >> PAGE_SHIFT);
                 }
                 stdvga_mem_write(addr, tmp, p->size);
                 data += sign * p->size;
index 47fe9846fe388390bdd11247d034711387b11680..8604259042dc269813cede078174789409e6ee75 100644 (file)
@@ -232,8 +232,7 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
 
 static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
 {
-    unsigned long mfn = 0;
-    p2m_type_t p2mt;
+    struct page_info *page = NULL;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
 
@@ -250,10 +249,10 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
     {
         if ( c->cr0 & X86_CR0_PG )
         {
-            mfn = mfn_x(get_gfn(v->domain, c->cr3 >> PAGE_SHIFT, &p2mt));
-            if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
+            page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT,
+                                     NULL, P2M_ALLOC);
+            if ( !page )
             {
-                put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
                 gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n",
                          c->cr3);
                 return -EINVAL;
@@ -263,9 +262,8 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
         if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
             put_page(pagetable_get_page(v->arch.guest_table));
 
-        v->arch.guest_table = pagetable_from_pfn(mfn);
-        if ( c->cr0 & X86_CR0_PG )
-            put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
+        v->arch.guest_table =
+            page ? pagetable_from_page(page) : pagetable_null();
     }
 
     v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
index 326b3bd8a4aeab7cc965628b4dfd8f887caa2c15..00a199071e2a592b5ae4c7dcdf3271ba2fd67fed 100644 (file)
@@ -134,18 +134,19 @@ void dump_apic_assist(struct vcpu *v)
 static void enable_hypercall_page(struct domain *d)
 {
     unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn;
-    unsigned long mfn = get_gfn_untyped(d, gmfn);
+    struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
     uint8_t *p;
 
-    if ( !mfn_valid(mfn) ||
-         !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+    if ( !page || !get_page_type(page, PGT_writable_page) )
     {
-        put_gfn(d, gmfn); 
-        gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
+        if ( page )
+            put_page(page);
+        gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
+                 page_to_mfn(page));
         return;
     }
 
-    p = map_domain_page(mfn);
+    p = __map_domain_page(page);
 
     /*
      * We set the bit 31 in %eax (reserved field in the Viridian hypercall
@@ -162,15 +163,14 @@ static void enable_hypercall_page(struct domain *d)
 
     unmap_domain_page(p);
 
-    put_page_and_type(mfn_to_page(mfn));
-    put_gfn(d, gmfn); 
+    put_page_and_type(page);
 }
 
 void initialize_apic_assist(struct vcpu *v)
 {
     struct domain *d = v->domain;
     unsigned long gmfn = v->arch.hvm_vcpu.viridian.apic_assist.fields.pfn;
-    unsigned long mfn = get_gfn_untyped(d, gmfn);
+    struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
     uint8_t *p;
 
     /*
@@ -183,22 +183,22 @@ void initialize_apic_assist(struct vcpu *v)
      * details of how Windows uses the page.
      */
 
-    if ( !mfn_valid(mfn) ||
-         !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+    if ( !page || !get_page_type(page, PGT_writable_page) )
     {
-        put_gfn(d, gmfn); 
-        gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
+        if ( page )
+            put_page(page);
+        gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
+                 page_to_mfn(page));
         return;
     }
 
-    p = map_domain_page(mfn);
+    p = __map_domain_page(page);
 
     *(u32 *)p = 0;
 
     unmap_domain_page(p);
 
-    put_page_and_type(mfn_to_page(mfn));
-    put_gfn(d, gmfn); 
+    put_page_and_type(page);
 }
 
 int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
index a73350b11265e89afbb921c5c55b4a7410e1726d..d5cb2796fb35b1ac5619b5e12720403d0152dfa3 100644 (file)
@@ -480,17 +480,16 @@ static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
 static int vmx_restore_cr0_cr3(
     struct vcpu *v, unsigned long cr0, unsigned long cr3)
 {
-    unsigned long mfn = 0;
-    p2m_type_t p2mt;
+    struct page_info *page = NULL;
 
     if ( paging_mode_shadow(v->domain) )
     {
         if ( cr0 & X86_CR0_PG )
         {
-            mfn = mfn_x(get_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
-            if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
+            page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT,
+                                     NULL, P2M_ALLOC);
+            if ( !page )
             {
-                put_gfn(v->domain, cr3 >> PAGE_SHIFT);
                 gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3);
                 return -EINVAL;
             }
@@ -499,9 +498,8 @@ static int vmx_restore_cr0_cr3(
         if ( hvm_paging_enabled(v) )
             put_page(pagetable_get_page(v->arch.guest_table));
 
-        v->arch.guest_table = pagetable_from_pfn(mfn);
-        if ( cr0 & X86_CR0_PG )
-            put_gfn(v->domain, cr3 >> PAGE_SHIFT);
+        v->arch.guest_table =
+            page ? pagetable_from_page(page) : pagetable_null();
     }
 
     v->arch.hvm_vcpu.guest_cr[0] = cr0 | X86_CR0_ET;
@@ -1035,8 +1033,9 @@ static void vmx_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
 
 static void vmx_load_pdptrs(struct vcpu *v)
 {
-    unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3], mfn;
+    unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
     uint64_t *guest_pdptrs;
+    struct page_info *page;
     p2m_type_t p2mt;
     char *p;
 
@@ -1047,24 +1046,19 @@ static void vmx_load_pdptrs(struct vcpu *v)
     if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
         goto crash;
 
-    mfn = mfn_x(get_gfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
-    if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) || 
-         /* If we didn't succeed in unsharing, get_page will fail
-          * (page still belongs to dom_cow) */
-         !get_page(mfn_to_page(mfn), v->domain) )
+    page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
+    if ( !page )
     {
         /* Ideally you don't want to crash but rather go into a wait 
          * queue, but this is the wrong place. We're holding at least
          * the paging lock */
         gdprintk(XENLOG_ERR,
-                 "Bad cr3 on load pdptrs gfn %lx mfn %lx type %d\n",
-                 cr3 >> PAGE_SHIFT, mfn, (int) p2mt);
-        put_gfn(v->domain, cr3 >> PAGE_SHIFT);
+                 "Bad cr3 on load pdptrs gfn %lx type %d\n",
+                 cr3 >> PAGE_SHIFT, (int) p2mt);
         goto crash;
     }
-    put_gfn(v->domain, cr3 >> PAGE_SHIFT);
 
-    p = map_domain_page(mfn);
+    p = __map_domain_page(page);
 
     guest_pdptrs = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
 
@@ -1090,7 +1084,7 @@ static void vmx_load_pdptrs(struct vcpu *v)
     vmx_vmcs_exit(v);
 
     unmap_domain_page(p);
-    put_page(mfn_to_page(mfn));
+    put_page(page);
     return;
 
  crash: