uint32_t rwx_rights = (access_x << 2) | (access_w << 1) | access_r;
struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
+ vmx_vmcs_enter(v);
+
__vmread(EXIT_QUALIFICATION, &exit_qual);
rc = nept_translate_l2ga(v, L2_gpa, page_order, rwx_rights, &gfn, p2m_acc,
&exit_qual, &exit_reason);
break;
}
+ vmx_vmcs_exit(v);
+
return rc;
}
* walk is successful, the translated value is returned in
* L1_gpa. The result value tells what to do next.
*/
-static int
+int
nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
unsigned int *page_order, uint8_t *p2m_acc,
bool_t access_r, bool_t access_w, bool_t access_x)
&& paging_mode_hap(v->domain)
&& nestedhvm_is_n2(v) )
{
- unsigned long gfn;
+ unsigned long l2_gfn, l1_gfn;
struct p2m_domain *p2m;
const struct paging_mode *mode;
- uint32_t pfec_21 = *pfec;
uint64_t np2m_base = nhvm_vcpu_p2m_base(v);
+ uint8_t l1_p2ma;
+ unsigned int l1_page_order;
+ int rv;
/* translate l2 guest va into l2 guest gfn */
p2m = p2m_get_nestedp2m(v, np2m_base);
mode = paging_get_nestedmode(v);
- gfn = mode->gva_to_gfn(v, p2m, va, pfec);
+ l2_gfn = mode->gva_to_gfn(v, p2m, va, pfec);
+
+ if ( l2_gfn == INVALID_GFN )
+ return INVALID_GFN;
/* translate l2 guest gfn into l1 guest gfn */
- return hostmode->p2m_ga_to_gfn(v, hostp2m, np2m_base,
- gfn << PAGE_SHIFT, &pfec_21, NULL);
+ rv = nestedhap_walk_L1_p2m(v, l2_gfn, &l1_gfn, &l1_page_order, &l1_p2ma,
+ 1,
+ !!(*pfec & PFEC_write_access),
+ !!(*pfec & PFEC_insn_fetch));
+
+ if ( rv != NESTEDHVM_PAGEFAULT_DONE )
+ return INVALID_GFN;
+
+ /*
+ * Sanity check that l1_gfn can be used properly as a 4K mapping, even
+ * if it mapped by a nested superpage.
+ */
+ ASSERT((l2_gfn & ((1ul << l1_page_order) - 1)) ==
+ (l1_gfn & ((1ul << l1_page_order) - 1)));
+
+ return l1_gfn;
}
return hostmode->gva_to_gfn(v, hostp2m, va, pfec);
int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
bool_t access_r, bool_t access_w, bool_t access_x);
+int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa,
+ unsigned int *page_order, uint8_t *p2m_acc,
+ bool_t access_r, bool_t access_w, bool_t access_x);
+
/* IO permission map */
unsigned long *nestedhvm_vcpu_iomap_get(bool_t ioport_80, bool_t ioport_ed);