]> xenbits.xensource.com Git - people/aperard/xen-arm.git/commitdiff
nestedhap: Change hostcr3 and p2m->cr3 to meaningful words
authorZhang Xiantao <xiantao.zhang@intel.com>
Tue, 15 Jan 2013 10:09:33 +0000 (11:09 +0100)
committerZhang Xiantao <xiantao.zhang@intel.com>
Tue, 15 Jan 2013 10:09:33 +0000 (11:09 +0100)
VMX doesn't have the concept about host cr3 for nested p2m,
and only SVM has, so change it to netural words.

Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Jun Nakajima <jun.nakajima@intel.com>
Acked-by: Eddie Dong <eddie.dong@intel.com>
Committed-by: Jan Beulich <jbeulich@suse.com>
xen/arch/x86/hvm/hvm.c
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/hvm/vmx/vvmx.c
xen/arch/x86/mm/hap/nested_hap.c
xen/arch/x86/mm/mm-locks.h
xen/arch/x86/mm/p2m.c
xen/include/asm-x86/hvm/hvm.h
xen/include/asm-x86/hvm/vmx/vvmx.h
xen/include/asm-x86/p2m.h

index b5d795ae4166c8f2a2e9a49c14ebfa4a1fe452d4..292559dae270cb4505baf1b5c25027939e7135a6 100644 (file)
@@ -4538,10 +4538,10 @@ uint64_t nhvm_vcpu_guestcr3(struct vcpu *v)
     return -EOPNOTSUPP;
 }
 
-uint64_t nhvm_vcpu_hostcr3(struct vcpu *v)
+uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
 {
-    if (hvm_funcs.nhvm_vcpu_hostcr3)
-        return hvm_funcs.nhvm_vcpu_hostcr3(v);
+    if ( hvm_funcs.nhvm_vcpu_p2m_base )
+        return hvm_funcs.nhvm_vcpu_p2m_base(v);
     return -EOPNOTSUPP;
 }
 
index 55a5ae5ab5d10c85ea57f88e1eff983f71199484..2c8504a881c1faf6c3686f164ffedae5882b96f3 100644 (file)
@@ -2003,7 +2003,7 @@ static struct hvm_function_table __read_mostly svm_function_table = {
     .nhvm_vcpu_vmexit = nsvm_vcpu_vmexit_inject,
     .nhvm_vcpu_vmexit_trap = nsvm_vcpu_vmexit_trap,
     .nhvm_vcpu_guestcr3 = nsvm_vcpu_guestcr3,
-    .nhvm_vcpu_hostcr3 = nsvm_vcpu_hostcr3,
+    .nhvm_vcpu_p2m_base = nsvm_vcpu_hostcr3,
     .nhvm_vcpu_asid = nsvm_vcpu_asid,
     .nhvm_vmcx_guest_intercepts_trap = nsvm_vmcb_guest_intercepts_trap,
     .nhvm_vmcx_hap_enabled = nsvm_vmcb_hap_enabled,
index aee1f9e13831504c76958da600740bb9e478c183..98309da504e095beca0985831d8cf62deb7d8785 100644 (file)
@@ -1504,7 +1504,7 @@ static struct hvm_function_table __read_mostly vmx_function_table = {
     .nhvm_vcpu_destroy    = nvmx_vcpu_destroy,
     .nhvm_vcpu_reset      = nvmx_vcpu_reset,
     .nhvm_vcpu_guestcr3   = nvmx_vcpu_guestcr3,
-    .nhvm_vcpu_hostcr3    = nvmx_vcpu_hostcr3,
+    .nhvm_vcpu_p2m_base   = nvmx_vcpu_eptp_base,
     .nhvm_vcpu_asid       = nvmx_vcpu_asid,
     .nhvm_vmcx_guest_intercepts_trap = nvmx_intercepts_exception,
     .nhvm_vcpu_vmexit_trap = nvmx_vmexit_trap,
index 8a2c95a6a2d09014940f499e2c216c7b0e199ad1..f7e00de1bd289e49ddabbadd73f7e821883281dc 100644 (file)
@@ -94,7 +94,7 @@ uint64_t nvmx_vcpu_guestcr3(struct vcpu *v)
     return 0;
 }
 
-uint64_t nvmx_vcpu_hostcr3(struct vcpu *v)
+uint64_t nvmx_vcpu_eptp_base(struct vcpu *v)
 {
     /* TODO */
     ASSERT(0);
index 317875d682b9a12d207c1597ae948b379c2590f4..9a97d366fd9a63cd55d700e74b322c86ca08487d 100644 (file)
  *    1. If #NPF is from L1 guest, then we crash the guest VM (same as old 
  *       code)
  *    2. If #NPF is from L2 guest, then we continue from (3)
- *    3. Get h_cr3 from L1 guest. Map h_cr3 into L0 hypervisor address space.
- *    4. Walk the h_cr3 page table
- *    5.    - if not present, then we inject #NPF back to L1 guest and 
+ *    3. Get np2m base from L1 guest. Map np2m base into L0 hypervisor address
+ *       space.
+ *    4. Walk the np2m's  page table
+ *    5.    - if not present or permission check failure, then we inject #NPF
+ *            back to L1 guest and
  *            re-launch L1 guest (L1 guest will either treat this #NPF as MMIO,
  *            or fix its p2m table for L2 guest)
  *    6.    - if present, then we will get the a new translated value L1-GPA 
@@ -89,7 +91,7 @@ nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
 
     if (old_flags & _PAGE_PRESENT)
         flush_tlb_mask(p2m->dirty_cpumask);
-    
+
     paging_unlock(d);
 }
 
@@ -110,7 +112,7 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
     /* If this p2m table has been flushed or recycled under our feet, 
      * leave it alone.  We'll pick up the right one as we try to 
      * vmenter the guest. */
-    if ( p2m->cr3 == nhvm_vcpu_hostcr3(v) )
+    if ( p2m->np2m_base == nhvm_vcpu_p2m_base(v) )
     {
         unsigned long gfn, mask;
         mfn_t mfn;
@@ -186,7 +188,7 @@ nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
     uint32_t pfec;
     unsigned long nested_cr3, gfn;
     
-    nested_cr3 = nhvm_vcpu_hostcr3(v);
+    nested_cr3 = nhvm_vcpu_p2m_base(v);
 
     pfec = PFEC_user_mode | PFEC_page_present;
     if (access_w)
@@ -221,7 +223,7 @@ nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
     p2m_type_t p2mt_10;
 
     p2m = p2m_get_hostp2m(d); /* L0 p2m */
-    nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v));
+    nested_p2m = p2m_get_nestedp2m(v, nhvm_vcpu_p2m_base(v));
 
     /* walk the L1 P2M table */
     rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21,
index 3700e32cc83f1e4be4f7fc32ee0898947356e8a1..3ce3489f209d58172310eda1844c29fc66598007 100644 (file)
@@ -249,7 +249,7 @@ declare_mm_order_constraint(per_page_sharing)
  * A per-domain lock that protects the mapping from nested-CR3 to 
  * nested-p2m.  In particular it covers:
  * - the array of nested-p2m tables, and all LRU activity therein; and
- * - setting the "cr3" field of any p2m table to a non-CR3_EADDR value. 
+ * - setting the "cr3" field of any p2m table to a non-P2M_BASE_EAADR value.
  *   (i.e. assigning a p2m table to be the shadow of that cr3 */
 
 /* PoD lock (per-p2m-table)
index 258f46e21a8a751b30f0c0cd71011f1aa1a88376..41a461b55b33d5caab9d2c43d52b3881d5b42720 100644 (file)
@@ -69,7 +69,7 @@ static void p2m_initialise(struct domain *d, struct p2m_domain *p2m)
     p2m->domain = d;
     p2m->default_access = p2m_access_rwx;
 
-    p2m->cr3 = CR3_EADDR;
+    p2m->np2m_base = P2M_BASE_EADDR;
 
     if ( hap_enabled(d) && cpu_has_vmx )
         ept_p2m_init(p2m);
@@ -1433,7 +1433,7 @@ p2m_flush_table(struct p2m_domain *p2m)
     ASSERT(page_list_empty(&p2m->pod.single));
 
     /* This is no longer a valid nested p2m for any address space */
-    p2m->cr3 = CR3_EADDR;
+    p2m->np2m_base = P2M_BASE_EADDR;
     
     /* Zap the top level of the trie */
     top = mfn_to_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
@@ -1471,7 +1471,7 @@ p2m_flush_nestedp2m(struct domain *d)
 }
 
 struct p2m_domain *
-p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
+p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base)
 {
     /* Use volatile to prevent gcc to cache nv->nv_p2m in a cpu register as
      * this may change within the loop by an other (v)cpu.
@@ -1480,8 +1480,8 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
     struct domain *d;
     struct p2m_domain *p2m;
 
-    /* Mask out low bits; this avoids collisions with CR3_EADDR */
-    cr3 &= ~(0xfffull);
+    /* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */
+    np2m_base &= ~(0xfffull);
 
     if (nv->nv_flushp2m && nv->nv_p2m) {
         nv->nv_p2m = NULL;
@@ -1493,14 +1493,14 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
     if ( p2m ) 
     {
         p2m_lock(p2m);
-        if ( p2m->cr3 == cr3 || p2m->cr3 == CR3_EADDR )
+        if ( p2m->np2m_base == np2m_base || p2m->np2m_base == P2M_BASE_EADDR )
         {
             nv->nv_flushp2m = 0;
             p2m_getlru_nestedp2m(d, p2m);
             nv->nv_p2m = p2m;
-            if (p2m->cr3 == CR3_EADDR)
+            if ( p2m->np2m_base == P2M_BASE_EADDR )
                 hvm_asid_flush_vcpu(v);
-            p2m->cr3 = cr3;
+            p2m->np2m_base = np2m_base;
             cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
             p2m_unlock(p2m);
             nestedp2m_unlock(d);
@@ -1515,7 +1515,7 @@ p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3)
     p2m_flush_table(p2m);
     p2m_lock(p2m);
     nv->nv_p2m = p2m;
-    p2m->cr3 = cr3;
+    p2m->np2m_base = np2m_base;
     nv->nv_flushp2m = 0;
     hvm_asid_flush_vcpu(v);
     cpumask_set_cpu(v->processor, p2m->dirty_cpumask);
@@ -1531,7 +1531,7 @@ p2m_get_p2m(struct vcpu *v)
     if (!nestedhvm_is_n2(v))
         return p2m_get_hostp2m(v->domain);
 
-    return p2m_get_nestedp2m(v, nhvm_vcpu_hostcr3(v));
+    return p2m_get_nestedp2m(v, nhvm_vcpu_p2m_base(v));
 }
 
 unsigned long paging_gva_to_gfn(struct vcpu *v,
@@ -1549,15 +1549,15 @@ unsigned long paging_gva_to_gfn(struct vcpu *v,
         struct p2m_domain *p2m;
         const struct paging_mode *mode;
         uint32_t pfec_21 = *pfec;
-        uint64_t ncr3 = nhvm_vcpu_hostcr3(v);
+        uint64_t np2m_base = nhvm_vcpu_p2m_base(v);
 
         /* translate l2 guest va into l2 guest gfn */
-        p2m = p2m_get_nestedp2m(v, ncr3);
+        p2m = p2m_get_nestedp2m(v, np2m_base);
         mode = paging_get_nestedmode(v);
         gfn = mode->gva_to_gfn(v, p2m, va, pfec);
 
         /* translate l2 guest gfn into l1 guest gfn */
-        return hostmode->p2m_ga_to_gfn(v, hostp2m, ncr3,
+        return hostmode->p2m_ga_to_gfn(v, hostp2m, np2m_base,
                                        gfn << PAGE_SHIFT, &pfec_21, NULL);
     }
 
index fdb0f58fd4c92223415e94a8a771d33f39759437..d3535b6f506296b5567a865fb691e28b534ca853 100644 (file)
@@ -170,7 +170,7 @@ struct hvm_function_table {
                                 uint64_t exitcode);
     int (*nhvm_vcpu_vmexit_trap)(struct vcpu *v, struct hvm_trap *trap);
     uint64_t (*nhvm_vcpu_guestcr3)(struct vcpu *v);
-    uint64_t (*nhvm_vcpu_hostcr3)(struct vcpu *v);
+    uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v);
     uint32_t (*nhvm_vcpu_asid)(struct vcpu *v);
     int (*nhvm_vmcx_guest_intercepts_trap)(struct vcpu *v, 
                                unsigned int trapnr, int errcode);
@@ -475,7 +475,7 @@ uint64_t nhvm_vcpu_guestcr3(struct vcpu *v);
 /* returns l1 guest's cr3 that points to the page table used to
  * translate l2 guest physical address to l1 guest physical address.
  */
-uint64_t nhvm_vcpu_hostcr3(struct vcpu *v);
+uint64_t nhvm_vcpu_p2m_base(struct vcpu *v);
 /* returns the asid number l1 guest wants to use to run the l2 guest */
 uint32_t nhvm_vcpu_asid(struct vcpu *v);
 
index dce2cd82f00da07ecaab6a315c22c82305b99d60..d97011dfe599a64d49ce76cf4bd2d501f2033350 100644 (file)
@@ -99,7 +99,7 @@ int nvmx_vcpu_initialise(struct vcpu *v);
 void nvmx_vcpu_destroy(struct vcpu *v);
 int nvmx_vcpu_reset(struct vcpu *v);
 uint64_t nvmx_vcpu_guestcr3(struct vcpu *v);
-uint64_t nvmx_vcpu_hostcr3(struct vcpu *v);
+uint64_t nvmx_vcpu_eptp_base(struct vcpu *v);
 uint32_t nvmx_vcpu_asid(struct vcpu *v);
 enum hvm_intblk nvmx_intr_blocked(struct vcpu *v);
 int nvmx_intercepts_exception(struct vcpu *v, 
index 2bd2048de8776227663ba66d465073f0ef470eab..4205a137641ed2435264b5906aec8b59bbc9a2bd 100644 (file)
@@ -197,17 +197,17 @@ struct p2m_domain {
 
     struct domain     *domain;   /* back pointer to domain */
 
-    /* Nested p2ms only: nested-CR3 value that this p2m shadows. 
-     * This can be cleared to CR3_EADDR under the per-p2m lock but
+    /* Nested p2ms only: nested p2m base value that this p2m shadows.
+     * This can be cleared to P2M_BASE_EADDR under the per-p2m lock but
      * needs both the per-p2m lock and the per-domain nestedp2m lock
      * to set it to any other value. */
-#define CR3_EADDR     (~0ULL)
-    uint64_t           cr3;
+#define P2M_BASE_EADDR     (~0ULL)
+    uint64_t           np2m_base;
 
     /* Nested p2ms: linked list of n2pms allocated to this domain. 
      * The host p2m hasolds the head of the list and the np2ms are 
      * threaded on in LRU order. */
-    struct list_head np2m_list; 
+    struct list_head   np2m_list;
 
 
     /* Host p2m: when this flag is set, don't flush all the nested-p2m 
@@ -282,11 +282,11 @@ struct p2m_domain {
 /* get host p2m table */
 #define p2m_get_hostp2m(d)      ((d)->arch.p2m)
 
-/* Get p2m table (re)usable for specified cr3.
+/* Get p2m table (re)usable for specified np2m base.
  * Automatically destroys and re-initializes a p2m if none found.
- * If cr3 == 0 then v->arch.hvm_vcpu.guest_cr[3] is used.
+ * If np2m_base == 0 then v->arch.hvm_vcpu.guest_cr[3] is used.
  */
-struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v, uint64_t cr3);
+struct p2m_domain *p2m_get_nestedp2m(struct vcpu *v, uint64_t np2m_base);
 
 /* If vcpu is in host mode then behaviour matches p2m_get_hostp2m().
  * If vcpu is in guest mode then behaviour matches p2m_get_nestedp2m().