#include <asm/x86_64/page.h>
/* Read a pte atomically from memory. */
-#define l1e_read_atomic(l1ep) \
- l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep))))
-#define l2e_read_atomic(l2ep) \
- l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep))))
-#define l3e_read_atomic(l3ep) \
- l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep))))
-#define l4e_read_atomic(l4ep) \
- l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep))))
+#define l1e_read(l1ep) \
+ l1e_from_intpte(read_atomic(&l1e_get_intpte(*(l1ep))))
+#define l2e_read(l2ep) \
+ l2e_from_intpte(read_atomic(&l2e_get_intpte(*(l2ep))))
+#define l3e_read(l3ep) \
+ l3e_from_intpte(read_atomic(&l3e_get_intpte(*(l3ep))))
+#define l4e_read(l4ep) \
+ l4e_from_intpte(read_atomic(&l4e_get_intpte(*(l4ep))))
/* Write a pte atomically to memory. */
#define l1e_write(l1ep, l1e) \
#endif /* !__ASSEMBLY__ */
-#define pte_read_atomic(ptep) read_atomic(ptep)
-
/* Given a virtual address, get an entry offset into a linear page table. */
#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)
#define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT)
struct vcpu *pt_vcpu, struct domain *pg_dom)
{
bool preserve_ad = (cmd == MMU_PT_UPDATE_PRESERVE_AD);
- l1_pgentry_t ol1e = l1e_read_atomic(pl1e);
+ l1_pgentry_t ol1e = l1e_read(pl1e);
struct domain *pt_dom = pt_vcpu->domain;
int rc = 0;
return -EPERM;
}
- ol2e = l2e_read_atomic(pl2e);
+ ol2e = l2e_read(pl2e);
if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
{
if ( pgentry_ptr_to_slot(pl3e) >= 3 && is_pv_32bit_domain(d) )
return -EINVAL;
- ol3e = l3e_read_atomic(pl3e);
+ ol3e = l3e_read(pl3e);
if ( l3e_get_flags(nl3e) & _PAGE_PRESENT )
{
return -EINVAL;
}
- ol4e = l4e_read_atomic(pl4e);
+ ol4e = l4e_read(pl4e);
if ( l4e_get_flags(nl4e) & _PAGE_PRESENT )
{
while ( v < e )
{
l2_pgentry_t *pl2e = &l2_xenmap[l2_table_offset(v)];
- l2_pgentry_t l2e = l2e_read_atomic(pl2e);
+ l2_pgentry_t l2e = l2e_read(pl2e);
unsigned int l2f = l2e_get_flags(l2e);
ASSERT(l2f & _PAGE_PRESENT);
while ( v < e )
{
l1_pgentry_t *pl1e = &pl1t[l1_table_offset(v)];
- l1_pgentry_t l1e = l1e_read_atomic(pl1e);
+ l1_pgentry_t l1e = l1e_read(pl1e);
unsigned int l1f = l1e_get_flags(l1e);
ASSERT(l1f & _PAGE_PRESENT);
mfn = cr3 >> PAGE_SHIFT;
l4t = map_domain_page(_mfn(mfn));
- l4e = l4e_read_atomic(&l4t[l4_table_offset(addr)]);
+ l4e = l4e_read(&l4t[l4_table_offset(addr)]);
mfn = l4e_get_pfn(l4e);
unmap_domain_page(l4t);
if ( ((l4e_get_flags(l4e) & required_flags) != required_flags) ||
page_user &= l4e_get_flags(l4e);
l3t = map_domain_page(_mfn(mfn));
- l3e = l3e_read_atomic(&l3t[l3_table_offset(addr)]);
+ l3e = l3e_read(&l3t[l3_table_offset(addr)]);
mfn = l3e_get_pfn(l3e);
unmap_domain_page(l3t);
if ( ((l3e_get_flags(l3e) & required_flags) != required_flags) ||
goto leaf;
l2t = map_domain_page(_mfn(mfn));
- l2e = l2e_read_atomic(&l2t[l2_table_offset(addr)]);
+ l2e = l2e_read(&l2t[l2_table_offset(addr)]);
mfn = l2e_get_pfn(l2e);
unmap_domain_page(l2t);
if ( ((l2e_get_flags(l2e) & required_flags) != required_flags) ||
goto leaf;
l1t = map_domain_page(_mfn(mfn));
- l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
+ l1e = l1e_read(&l1t[l1_table_offset(addr)]);
mfn = l1e_get_pfn(l1e);
unmap_domain_page(l1t);
if ( ((l1e_get_flags(l1e) & required_flags) != required_flags) ||