As XTF uses a flat segmentation model, virtual address == linear address.
However, the terminology should be kept correct.
As a rule of thumb, a C pointer is a virtual address, while `unsigned long va`
is actually a linear address. All pagetables are indexed in terms of linear
addresses, including the (misnamed) update_va_mapping() hypercall.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
*
* XTF memory layout.
*
- * Wherever possible, identity layout for simplicity.
+ * Wherever possible, identity layout for simplicity. Flat segments are also
+ * used, so virtual address == linear address.
*
* PV guests: VIRT_OFFSET is 0 which causes all linked virtual addresses to be
* contiguous in the pagetables created by the domain builder. Therefore,
typedef uint64_t pae_intpte_t;
#define PAE_PRIpte "016"PRIx64
-static inline unsigned int pae_l1_table_offset(unsigned long va)
+static inline unsigned int pae_l1_table_offset(unsigned long linear)
{
- return (va >> PAE_L1_PT_SHIFT) & (PAE_L1_PT_ENTRIES - 1);
+ return (linear >> PAE_L1_PT_SHIFT) & (PAE_L1_PT_ENTRIES - 1);
}
-static inline unsigned int pae_l2_table_offset(unsigned long va)
+static inline unsigned int pae_l2_table_offset(unsigned long linear)
{
- return (va >> PAE_L2_PT_SHIFT) & (PAE_L2_PT_ENTRIES - 1);
+ return (linear >> PAE_L2_PT_SHIFT) & (PAE_L2_PT_ENTRIES - 1);
}
-static inline unsigned int pae_l3_table_offset(unsigned long va)
+static inline unsigned int pae_l3_table_offset(unsigned long linear)
{
- return (va >> PAE_L3_PT_SHIFT) & (PAE_L3_PT_ENTRIES - 1);
+ return (linear >> PAE_L3_PT_SHIFT) & (PAE_L3_PT_ENTRIES - 1);
}
#ifdef __x86_64__
-static inline unsigned int pae_l4_table_offset(unsigned long va)
+static inline unsigned int pae_l4_table_offset(unsigned long linear)
{
- return (va >> PAE_L4_PT_SHIFT) & (PAE_L4_PT_ENTRIES - 1);
+ return (linear >> PAE_L4_PT_SHIFT) & (PAE_L4_PT_ENTRIES - 1);
}
#endif /* __x86_64__ */
typedef uint32_t pse_intpte_t;
#define PSE_PRIpte "08"PRIx32
-static inline unsigned int pse_l1_table_offset(unsigned long va)
+static inline unsigned int pse_l1_table_offset(unsigned long linear)
{
- return (va >> PSE_L1_PT_SHIFT) & (PSE_L1_PT_ENTRIES - 1);
+ return (linear >> PSE_L1_PT_SHIFT) & (PSE_L1_PT_ENTRIES - 1);
}
-static inline unsigned int pse_l2_table_offset(unsigned long va)
+static inline unsigned int pse_l2_table_offset(unsigned long linear)
{
- return (va >> PSE_L2_PT_SHIFT) & (PSE_L2_PT_ENTRIES - 1);
+ return (linear >> PSE_L2_PT_SHIFT) & (PSE_L2_PT_ENTRIES - 1);
}
static inline uint32_t fold_pse36(uint64_t val)
typedef pse_intpte_t intpte_t;
#define PRIpte PSE_PRIpte
-static inline unsigned int l1_table_offset(unsigned long va)
+static inline unsigned int l1_table_offset(unsigned long linear)
{
- return pse_l1_table_offset(va);
+ return pse_l1_table_offset(linear);
}
-static inline unsigned int l2_table_offset(unsigned long va)
+static inline unsigned int l2_table_offset(unsigned long linear)
{
- return pse_l2_table_offset(va);
+ return pse_l2_table_offset(linear);
}
#else /* CONFIG_PAGING_LEVELS == 2 */ /* PAE Paging */
typedef pae_intpte_t intpte_t;
#define PRIpte PAE_PRIpte
-static inline unsigned int l1_table_offset(unsigned long va)
+static inline unsigned int l1_table_offset(unsigned long linear)
{
- return pae_l1_table_offset(va);
+ return pae_l1_table_offset(linear);
}
-static inline unsigned int l2_table_offset(unsigned long va)
+static inline unsigned int l2_table_offset(unsigned long linear)
{
- return pae_l2_table_offset(va);
+ return pae_l2_table_offset(linear);
}
#endif /* !CONFIG_PAGING_LEVELS == 2 */
#if CONFIG_PAGING_LEVELS >= 3 /* PAE Paging */
-static inline unsigned int l3_table_offset(unsigned long va)
+static inline unsigned int l3_table_offset(unsigned long linear)
{
- return pae_l3_table_offset(va);
+ return pae_l3_table_offset(linear);
}
#endif /* CONFIG_PAGING_LEVELS >= 3 */
#if CONFIG_PAGING_LEVELS >= 4 /* PAE Paging */
-static inline unsigned int l4_table_offset(unsigned long va)
+static inline unsigned int l4_table_offset(unsigned long linear)
{
- return pae_l4_table_offset(va);
+ return pae_l4_table_offset(linear);
}
#endif /* CONFIG_PAGING_LEVELS >= 4 */
bool leaked = false;
/* Remap the page at 0 with _PAGE_USER. */
- rc = hypercall_update_va_mapping(NULL, nl1e, UVMF_INVLPG);
+ rc = hypercall_update_va_mapping(0, nl1e, UVMF_INVLPG);
if ( rc )
panic("Failed to remap page at NULL with _PAGE_USER: %d\n", rc);
* context. Proceed with remapping all mappings as _PAGE_USER.
*/
uint64_t *l3 = _p(start_info->pt_base);
- unsigned long va = 0;
+ unsigned long linear = 0;
- while ( va < __HYPERVISOR_VIRT_START_PAE )
+ while ( linear < __HYPERVISOR_VIRT_START_PAE )
{
- unsigned int i3 = l3_table_offset(va);
+ unsigned int i3 = l3_table_offset(linear);
if ( !(l3[i3] & _PAGE_PRESENT) )
{
- va += 1UL << L3_PT_SHIFT;
+ linear += 1UL << L3_PT_SHIFT;
continue;
}
uint64_t *l2 = maddr_to_virt(pte_to_paddr(l3[i3]));
- unsigned int i2 = l2_table_offset(va);
+ unsigned int i2 = l2_table_offset(linear);
if ( !(l2[i2] & _PAGE_PRESENT) )
{
- va += 1UL << L2_PT_SHIFT;
+ linear += 1UL << L2_PT_SHIFT;
continue;
}
uint64_t *l1 = maddr_to_virt(pte_to_paddr(l2[i2]));
- unsigned int i1 = l1_table_offset(va);
+ unsigned int i1 = l1_table_offset(linear);
if ( !(l1[i1] & _PAGE_PRESENT) )
{
- va += 1UL << L1_PT_SHIFT;
+ linear += 1UL << L1_PT_SHIFT;
continue;
}
if ( !(l1[i1] & _PAGE_USER) )
{
- rc = hypercall_update_va_mapping(_p(va), l1[i1] | _PAGE_USER,
- UVMF_INVLPG);
+ rc = hypercall_update_va_mapping(
+ linear, l1[i1] | _PAGE_USER, UVMF_INVLPG);
if ( rc )
panic("update_va_mapping(%p, 0x%016"PRIx64") failed: %d\n",
- _p(va), l1[i1] | _PAGE_USER, rc);
+ _p(linear), l1[i1] | _PAGE_USER, rc);
}
- va += 1UL << L1_PT_SHIFT;
+ linear += 1UL << L1_PT_SHIFT;
}
}
else
*/
intpte_t nl1e = pte_from_virt(user_stack, PF_SYM(AD, U, RW, P));
- if ( hypercall_update_va_mapping(user_stack, nl1e, UVMF_INVLPG) )
+ if ( hypercall_update_va_mapping(_u(user_stack), nl1e, UVMF_INVLPG) )
panic("Unable to remap user_stack with _PAGE_USER\n");
extern const char __start_user_text[], __end_user_text[];
- unsigned long va = _u(__start_user_text);
+ unsigned long linear = _u(__start_user_text);
- while ( va < _u(__end_user_text) )
+ while ( linear < _u(__end_user_text) )
{
- nl1e = pte_from_virt(_p(va), PF_SYM(AD, U, RW, P));
+ nl1e = pte_from_virt(_p(linear), PF_SYM(AD, U, RW, P));
- if ( hypercall_update_va_mapping(_p(va), nl1e, UVMF_INVLPG) )
+ if ( hypercall_update_va_mapping(linear, nl1e, UVMF_INVLPG) )
panic("Unable to remap user_text with _PAGE_USER\n");
- va += PAGE_SIZE;
+ linear += PAGE_SIZE;
}
}
#endif
/* Unmap page at 0 to catch errors with NULL pointers. */
- rc = hypercall_update_va_mapping(NULL, 0, UVMF_INVLPG);
+ rc = hypercall_update_va_mapping(0, 0, UVMF_INVLPG);
if ( rc )
panic("Failed to unmap page at NULL: %d\n", rc);
}
}
else /* PV */
rc = hypercall_update_va_mapping(
- &shared_info, start_info->shared_info | PF_SYM(RW, P),
+ _u(&shared_info), start_info->shared_info | PF_SYM(RW, P),
UVMF_INVLPG);
if ( rc )
return HYPERCALL2(long, __HYPERVISOR_xen_version, cmd, arg);
}
-static inline long hypercall_update_va_mapping(void *va, uint64_t npte,
- enum XEN_UVMF flags)
+/*
+ * This hypercall is misnamed in the Xen ABI, and actually operates on a
+ * linear address, not a virtual address.
+ */
+static inline long hypercall_update_va_mapping(
+ unsigned long linear, uint64_t npte, enum XEN_UVMF flags)
{
#ifdef __x86_64__
- return HYPERCALL3(long, __HYPERVISOR_update_va_mapping, va, npte, flags);
+ return HYPERCALL3(long, __HYPERVISOR_update_va_mapping, linear, npte, flags);
#else
return HYPERCALL4(long, __HYPERVISOR_update_va_mapping,
- va, npte, npte >> 32, flags);
+ linear, npte, npte >> 32, flags);
#endif
}
intpte_t *l4 = _p(KB(4));
/* Map the L4 at 4K virtual. */
- if ( hypercall_update_va_mapping(l4, nl1e, UVMF_INVLPG) )
+ if ( hypercall_update_va_mapping(_u(l4), nl1e, UVMF_INVLPG) )
return xtf_error("Error: Update va failed\n");
unsigned long map_slot = 0, test_slot = 0;
unsigned long mem_adjust = (8 + ((-idx - 1) >> 3)) & ~7;
/*
- * va is the memory target which the `bt` instruction will actually hit.
+ * linear is the memory target which the `bt` instruction will actually hit.
* A vulnerable Xen mis-calculates the memory adjustment, meaning that it
* will attempt to read from some other address.
*/
- unsigned long va = _u(mem) - mem_adjust;
+ unsigned long linear = _u(mem) - mem_adjust;
/*
* Make all of the virtual address space readable, so Xen's data fetch
for ( i = 1; i < L4_PT_ENTRIES; ++i )
pae_l4_identmap[i] = pae_l4_identmap[0] & ~PF_SYM(RW);
- /* Map va to pointing specifically to gfn 0. */
- nl1t[l1_table_offset(va)] = pte_from_gfn(0, PF_SYM(U, P));
- nl2t[l2_table_offset(va)] = pte_from_virt(nl1t, PF_SYM(U, P));
- nl3t[l3_table_offset(va)] = pte_from_virt(nl2t, PF_SYM(U, P));
- pae_l4_identmap[l4_table_offset(va)] = pte_from_virt(nl3t, PF_SYM(U, P));
+ /* Map linear to pointing specifically to gfn 0. */
+ nl1t[l1_table_offset(linear)] = pte_from_gfn(0, PF_SYM(U, P));
+ nl2t[l2_table_offset(linear)] = pte_from_virt(nl1t, PF_SYM(U, P));
+ nl3t[l3_table_offset(linear)] = pte_from_virt(nl2t, PF_SYM(U, P));
+ pae_l4_identmap[l4_table_offset(linear)] = pte_from_virt(nl3t, PF_SYM(U, P));
/* Remove gfn 0 from the p2m, to cause `bt` to trap for emulation. */
static unsigned long extent = 0;