]> xenbits.xensource.com Git - xen.git/commitdiff
x86/mm: prefer is_..._domain() over is_..._vcpu()
authorJan Beulich <jbeulich@suse.com>
Thu, 18 Jun 2015 14:42:26 +0000 (16:42 +0200)
committerJan Beulich <jbeulich@suse.com>
Thu, 18 Jun 2015 14:42:26 +0000 (16:42 +0200)
... when the domain pointer is already available.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
xen/arch/x86/mm/guest_walk.c
xen/arch/x86/mm/shadow/common.c
xen/arch/x86/mm/shadow/multi.c

index d8f5a356ca8703ef88d97e8b1793cead1b1c2327..9c6c74f39f8411b1115bee43277d6d0c140862d0 100644 (file)
@@ -159,7 +159,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
     mflags = mandatory_flags(v, pfec);
     iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
 
-    if ( is_hvm_vcpu(v) && !(pfec & PFEC_user_mode) )
+    if ( is_hvm_domain(d) && !(pfec & PFEC_user_mode) )
     {
         struct segment_register seg;
         const struct cpu_user_regs *regs = guest_cpu_user_regs();
index 919b15b777dc4fd5624da14b41e1e670949d8034..c7e0e54a98ff5ae55672e74e1b44d1819ef97bab 100644 (file)
@@ -3157,12 +3157,10 @@ void shadow_teardown(struct domain *d)
      * destroyed any shadows of it or sh_destroy_shadow will get confused. */
     if ( !pagetable_is_null(d->arch.paging.shadow.unpaged_pagetable) )
     {
+        ASSERT(is_hvm_domain(d));
         for_each_vcpu(d, v)
-        {
-            ASSERT(is_hvm_vcpu(v));
             if ( !hvm_paging_enabled(v) )
                 v->arch.guest_table = pagetable_null();
-        }
         unpaged_pagetable =
             pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable);
         d->arch.paging.shadow.unpaged_pagetable = pagetable_null();
index 6edac715eed6bf280851eaa14b70a1a1cc96d101..42204d966133a166bbbf6f6225abc65cb0e944f5 100644 (file)
@@ -2043,7 +2043,7 @@ void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn)
         sh_unmap_domain_page(l3e);
         shadow_free(d, m3mfn);
 
-        if ( is_pv_32on64_vcpu(v) )
+        if ( is_pv_32on64_domain(d) )
         {
             /* Need to destroy the l3 and l2 monitor pages that map the
              * Xen VAs at 3GB-4GB */
@@ -3963,7 +3963,7 @@ sh_update_cr3(struct vcpu *v, int do_locking)
                    (unsigned long)pagetable_get_pfn(v->arch.guest_table));
 
 #if GUEST_PAGING_LEVELS == 4
-    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_vcpu(v) )
+    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_domain(d) )
         gmfn = pagetable_get_mfn(v->arch.guest_table_user);
     else
 #endif
@@ -4674,7 +4674,7 @@ static void *emulate_map_dest(struct vcpu *v,
 
         /* Cross-page emulated writes are only supported for HVM guests;
          * PV guests ought to know better */
-        if ( !is_hvm_vcpu(v) )
+        if ( !is_hvm_domain(d) )
             return MAPPING_UNHANDLEABLE;
 
         /* This write crosses a page boundary.  Translate the second page */
@@ -5104,7 +5104,7 @@ int sh_audit_l3_table(struct vcpu *v, mfn_t sl3mfn, mfn_t x)
             gmfn = get_shadow_status(d, get_gfn_query_unlocked(
                                         d, gfn_x(gfn), &p2mt),
                                      ((GUEST_PAGING_LEVELS == 3 ||
-                                       is_pv_32on64_vcpu(v))
+                                       is_pv_32on64_domain(d))
                                       && !shadow_mode_external(d)
                                       && (guest_index(gl3e) % 4) == 3)
                                      ? SH_type_l2h_shadow