if ( p2m_is_paging(p2mt) )
{
ASSERT(p2m_is_hostp2m(p2m));
- pfec[0] = PFEC_page_paged;
+ *pfec = PFEC_page_paged;
if ( top_page )
put_page(top_page);
p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
}
if ( p2m_is_shared(p2mt) )
{
- pfec[0] = PFEC_page_shared;
+ *pfec = PFEC_page_shared;
if ( top_page )
put_page(top_page);
return gfn_x(INVALID_GFN);
}
if ( !top_page )
{
- pfec[0] &= ~PFEC_page_present;
+ *pfec &= ~PFEC_page_present;
goto out_tweak_pfec;
}
top_mfn = _mfn(page_to_mfn(top_page));
#if GUEST_PAGING_LEVELS == 3
top_map += (cr3 & ~(PAGE_MASK | 31));
#endif
- missing = guest_walk_tables(v, p2m, ga, &gw, pfec[0], top_mfn, top_map);
+ missing = guest_walk_tables(v, p2m, ga, &gw, *pfec, top_mfn, top_map);
unmap_domain_page(top_map);
put_page(top_page);
if ( p2m_is_paging(p2mt) )
{
ASSERT(p2m_is_hostp2m(p2m));
- pfec[0] = PFEC_page_paged;
+ *pfec = PFEC_page_paged;
p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
return gfn_x(INVALID_GFN);
}
if ( p2m_is_shared(p2mt) )
{
- pfec[0] = PFEC_page_shared;
+ *pfec = PFEC_page_shared;
return gfn_x(INVALID_GFN);
}
}
if ( missing & _PAGE_PRESENT )
- pfec[0] &= ~PFEC_page_present;
+ *pfec &= ~PFEC_page_present;
if ( missing & _PAGE_INVALID_BITS )
- pfec[0] |= PFEC_reserved_bit;
+ *pfec |= PFEC_reserved_bit;
if ( missing & _PAGE_PKEY_BITS )
- pfec[0] |= PFEC_prot_key;
+ *pfec |= PFEC_prot_key;
if ( missing & _PAGE_PAGED )
- pfec[0] = PFEC_page_paged;
+ *pfec = PFEC_page_paged;
if ( missing & _PAGE_SHARED )
- pfec[0] = PFEC_page_shared;
+ *pfec = PFEC_page_shared;
out_tweak_pfec:
/*
* The PFEC_insn_fetch flag is set only when NX or SMEP are enabled.
*/
if ( !hvm_nx_enabled(v) && !hvm_smep_enabled(v) )
- pfec[0] &= ~PFEC_insn_fetch;
+ *pfec &= ~PFEC_insn_fetch;
return gfn_x(INVALID_GFN);
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
/* Check the vTLB cache first */
- unsigned long vtlb_gfn = vtlb_lookup(v, va, pfec[0]);
+ unsigned long vtlb_gfn = vtlb_lookup(v, va, *pfec);
if ( VALID_GFN(vtlb_gfn) )
return vtlb_gfn;
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
- if ( (missing = sh_walk_guest_tables(v, va, &gw, pfec[0])) != 0 )
+ if ( (missing = sh_walk_guest_tables(v, va, &gw, *pfec)) != 0 )
{
if ( (missing & _PAGE_PRESENT) )
- pfec[0] &= ~PFEC_page_present;
+ *pfec &= ~PFEC_page_present;
if ( missing & _PAGE_INVALID_BITS )
- pfec[0] |= PFEC_reserved_bit;
+ *pfec |= PFEC_reserved_bit;
/*
* SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS:
* The PFEC_insn_fetch flag is set only when NX or SMEP are enabled.
*/
if ( is_hvm_vcpu(v) && !hvm_nx_enabled(v) && !hvm_smep_enabled(v) )
- pfec[0] &= ~PFEC_insn_fetch;
+ *pfec &= ~PFEC_insn_fetch;
return gfn_x(INVALID_GFN);
}
gfn = guest_walk_to_gfn(&gw);
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
/* Remember this successful VA->GFN translation for later. */
- vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), pfec[0]);
+ vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), *pfec);
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
return gfn_x(gfn);
/* Handle invlpg requests on vcpus. */
void paging_invlpg(struct vcpu *v, unsigned long va);
-/* Translate a guest virtual address to the frame number that the
+/*
+ * Translate a guest virtual address to the frame number that the
* *guest* pagetables would map it to. Returns INVALID_GFN if the guest
* tables don't map this address for this kind of access.
- * pfec[0] is used to determine which kind of access this is when
+ * *pfec is used to determine which kind of access this is when
* walking the tables. The caller should set the PFEC_page_present bit
- * in pfec[0]; in the failure case, that bit will be cleared if appropriate.
+ * in *pfec; in the failure case, that bit will be cleared if appropriate.
*
* SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS:
* The PFEC_insn_fetch flag is set only when NX or SMEP are enabled.