This allows us to set aside some address space for executable mapping.
This fixed map range starts from XEN_VIRT_END so that it is within reach
of the .text section.
Shift the percpu stub range and shrink livepatch range accordingly.
Signed-off-by: Wei Liu <liuwe@microsoft.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
.size l2_directmap, . - l2_directmap
/*
- * L2 mapping the Xen text/data/bss region, constructed dynamically. Uses 1x
- * 4k page.
+ * L2 mapping the Xen text/data/bss region, constructed dynamically.
+ * Executable fixmap is hooked up statically.
+ * Uses 1x 4k page.
*/
GLOBAL(l2_xenmap)
- .fill L2_PAGETABLE_ENTRIES, 8, 0
+ idx = 0
+ .rept L2_PAGETABLE_ENTRIES
+ .if idx == l2_table_offset(FIXADDR_X_TOP - 1)
+ .quad sym_offs(l1_fixmap_x) + __PAGE_HYPERVISOR
+ .else
+ .quad 0
+ .endif
+ idx = idx + 1
+ .endr
.size l2_xenmap, . - l2_xenmap
/* L2 mapping the fixmap. Uses 1x 4k page. */
#include <xen/livepatch.h>
#include <xen/sched.h>
+#include <asm/fixmap.h>
#include <asm/nmi.h>
#include <asm/livepatch.h>
void *start, *end;
start = (void *)xen_virt_end;
- end = (void *)(XEN_VIRT_END - NR_CPUS * PAGE_SIZE);
+ end = (void *)(XEN_VIRT_END - FIXADDR_X_SIZE - NR_CPUS * PAGE_SIZE);
BUG_ON(end <= start);
/* Mapping of the fixmap space needed early. */
l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
l1_fixmap[L1_PAGETABLE_ENTRIES];
+l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
+ l1_fixmap_x[L1_PAGETABLE_ENTRIES];
paddr_t __read_mostly mem_hotplug;
}
}
#endif
+
+ /* Generate a symbol to be used in linker script */
+ ASM_CONSTANT(FIXADDR_X_SIZE, FIXADDR_X_SIZE);
}
int page_is_ram_type(unsigned long mfn, unsigned long mem_type)
void __set_fixmap(
enum fixed_addresses idx, unsigned long mfn, unsigned long flags)
{
- BUG_ON(idx >= __end_of_fixed_addresses);
+ BUG_ON(idx >= __end_of_fixed_addresses || idx <= FIX_RESERVED);
map_pages_to_xen(__fix_to_virt(idx), _mfn(mfn), 1, flags);
}
+void __set_fixmap_x(
+ enum fixed_addresses_x idx, unsigned long mfn, unsigned long flags)
+{
+ BUG_ON(idx >= __end_of_fixed_addresses_x || idx <= FIX_X_RESERVED);
+ map_pages_to_xen(__fix_x_to_virt(idx), _mfn(mfn), 1, flags);
+}
+
void *__init arch_vmap_virt_end(void)
{
return fix_to_virt(__end_of_fixed_addresses);
unmap_domain_page(memset(__map_domain_page(pg), 0xcc, PAGE_SIZE));
}
- stub_va = XEN_VIRT_END - (cpu + 1) * PAGE_SIZE;
+ stub_va = XEN_VIRT_END - FIXADDR_X_SIZE - (cpu + 1) * PAGE_SIZE;
if ( map_pages_to_xen(stub_va, page_to_mfn(pg), 1,
PAGE_HYPERVISOR_RX | MAP_SMALL_PAGES) )
{
}
ASSERT(__2M_rwdata_end <= XEN_VIRT_END - XEN_VIRT_START + __XEN_VIRT_START -
+ FIXADDR_X_SIZE -
NR_CPUS * PAGE_SIZE,
"Xen image overlaps stubs area")
.size label, . - label; \
.type label, @object
+#define ASM_CONSTANT(name, value) \
+ asm ( ".equ " #name ", %P0; .global " #name \
+ :: "i" ((value)) );
#endif /* __X86_ASM_DEFNS_H__ */
/* Slot 261: high read-only compat machine-to-phys conversion table (1GB). */
#define HIRO_COMPAT_MPT_VIRT_START RDWR_COMPAT_MPT_VIRT_END
#define HIRO_COMPAT_MPT_VIRT_END (HIRO_COMPAT_MPT_VIRT_START + GB(1))
-/* Slot 261: xen text, static data and bss (1GB). */
+/* Slot 261: xen text, static data, bss, per-cpu stubs and executable fixmap (1GB). */
#define XEN_VIRT_START (HIRO_COMPAT_MPT_VIRT_END)
#define XEN_VIRT_END (XEN_VIRT_START + GB(1))
#include <asm/page.h>
#define FIXADDR_TOP (VMAP_VIRT_END - PAGE_SIZE)
+#define FIXADDR_X_TOP (XEN_VIRT_END - PAGE_SIZE)
#ifndef __ASSEMBLY__
return __virt_to_fix(vaddr);
}
+enum fixed_addresses_x {
+ /* Index 0 is reserved since fix_x_to_virt(0) == FIXADDR_X_TOP. */
+ FIX_X_RESERVED,
+#ifdef CONFIG_HYPERV_GUEST
+ FIX_X_HYPERV_HCALL,
+#endif
+ __end_of_fixed_addresses_x
+};
+
+#define FIXADDR_X_SIZE (__end_of_fixed_addresses_x << PAGE_SHIFT)
+#define FIXADDR_X_START (FIXADDR_X_TOP - FIXADDR_X_SIZE)
+
+extern void __set_fixmap_x(
+ enum fixed_addresses_x idx, unsigned long mfn, unsigned long flags);
+
+#define set_fixmap_x(idx, phys) \
+ __set_fixmap_x(idx, (phys)>>PAGE_SHIFT, PAGE_HYPERVISOR_RX | MAP_SMALL_PAGES)
+
+#define clear_fixmap_x(idx) __set_fixmap_x(idx, 0, 0)
+
+#define __fix_x_to_virt(x) (FIXADDR_X_TOP - ((x) << PAGE_SHIFT))
+#define fix_x_to_virt(x) ((void *)__fix_x_to_virt(x))
+
#endif /* __ASSEMBLY__ */
#endif