Introduce HYPERVISOR_SHSTK pagetable constants, which are Read-Only + Dirty.
Use these in place of _PAGE_RW for memguard_guard_stack(), to create real
shadow stacks on capable hardware.
Supervisor shadow stacks need a token written at the top, which is most easily
done before making the frame read only.
Allocate the shadow IST stack block in struct tss_page. It doesn't strictly
need to live here, but it is a convenient location (and XPTI-safe, for testing
purposes), and placing it ahead of the TSS doesn't risk colliding with a bad
IO Bitmap offset and turning into some IO port permissions.
Have load_system_tables() set up the shadow IST stack table when setting up
the regular IST in the TSS.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
tss->rsp1 = 0x8600111111111111ul;
tss->rsp2 = 0x8600111111111111ul;
+ /*
+ * Set up the shadow stack IST. Used entries must point at the
+ * supervisor stack token. Unused entries are poisoned.
+ *
+ * This IST Table may be live, and the NMI/#MC entries must
+ * remain valid on every instruction boundary, hence the
+ * volatile qualifier.
+ */
+ if (cpu_has_xen_shstk) {
+ volatile uint64_t *ist_ssp = this_cpu(tss_page).ist_ssp;
+
+ ist_ssp[0] = 0x8600111111111111ul;
+ ist_ssp[IST_MCE] = stack_top + (IST_MCE * IST_SHSTK_SIZE) - 8;
+ ist_ssp[IST_NMI] = stack_top + (IST_NMI * IST_SHSTK_SIZE) - 8;
+ ist_ssp[IST_DB] = stack_top + (IST_DB * IST_SHSTK_SIZE) - 8;
+ ist_ssp[IST_DF] = stack_top + (IST_DF * IST_SHSTK_SIZE) - 8;
+ for ( i = IST_DF + 1;
+ i < ARRAY_SIZE(this_cpu(tss_page).ist_ssp); ++i )
+ ist_ssp[i] = 0x8600111111111111ul;
+
+ wrmsrl(MSR_INTERRUPT_SSP_TABLE, (unsigned long)ist_ssp);
+ }
+
BUILD_BUG_ON(sizeof(*tss) <= 0x67); /* Mandated by the architecture. */
_set_tssldt_desc(gdt + TSS_ENTRY, (unsigned long)tss,
#endif
+static void write_sss_token(unsigned long *ptr)
+{
+ /*
+ * A supervisor shadow stack token is its own linear address, with the
+ * busy bit (0) clear.
+ */
+ *ptr = (unsigned long)ptr;
+}
+
void memguard_guard_stack(void *p)
{
- map_pages_to_xen((unsigned long)p, virt_to_mfn(p), 1, _PAGE_NONE);
+ /* IST Shadow stacks. 4x 1k in stack page 0. */
+ if ( IS_ENABLED(CONFIG_XEN_SHSTK) )
+ {
+ write_sss_token(p + (IST_MCE * IST_SHSTK_SIZE) - 8);
+ write_sss_token(p + (IST_NMI * IST_SHSTK_SIZE) - 8);
+ write_sss_token(p + (IST_DB * IST_SHSTK_SIZE) - 8);
+ write_sss_token(p + (IST_DF * IST_SHSTK_SIZE) - 8);
+ }
+ map_pages_to_xen((unsigned long)p, virt_to_mfn(p), 1, PAGE_HYPERVISOR_SHSTK);
+ /* Primary Shadow Stack. 1x 4k in stack page 5. */
p += PRIMARY_SHSTK_SLOT * PAGE_SIZE;
- map_pages_to_xen((unsigned long)p, virt_to_mfn(p), 1, _PAGE_NONE);
+ if ( IS_ENABLED(CONFIG_XEN_SHSTK) )
+ write_sss_token(p + PAGE_SIZE - 8);
+
+ map_pages_to_xen((unsigned long)p, virt_to_mfn(p), 1, PAGE_HYPERVISOR_SHSTK);
}
void memguard_unguard_stack(void *p)
#define STACK_ORDER 3
#define STACK_SIZE (PAGE_SIZE << STACK_ORDER)
+#define IST_SHSTK_SIZE 1024
+
#define TRAMPOLINE_STACK_SPACE PAGE_SIZE
#define TRAMPOLINE_SPACE (KB(64) - TRAMPOLINE_STACK_SPACE)
#define WAKEUP_STACK_MIN 3072
_PAGE_DIRTY | _PAGE_RW)
#define __PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR | _PAGE_PCD)
#define __PAGE_HYPERVISOR_UC (__PAGE_HYPERVISOR | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_HYPERVISOR_SHSTK (__PAGE_HYPERVISOR_RO | _PAGE_DIRTY)
#define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages mappings */
uint16_t :16, bitmap;
};
struct tss_page {
- struct tss64 __aligned(PAGE_SIZE) tss;
+ uint64_t __aligned(PAGE_SIZE) ist_ssp[8];
+ struct tss64 tss;
};
DECLARE_PER_CPU(struct tss_page, tss_page);
#define PAGE_HYPERVISOR_RW (__PAGE_HYPERVISOR_RW | _PAGE_GLOBAL)
#define PAGE_HYPERVISOR_RX (__PAGE_HYPERVISOR_RX | _PAGE_GLOBAL)
#define PAGE_HYPERVISOR_RWX (__PAGE_HYPERVISOR | _PAGE_GLOBAL)
+#define PAGE_HYPERVISOR_SHSTK (__PAGE_HYPERVISOR_SHSTK | _PAGE_GLOBAL)
#define PAGE_HYPERVISOR PAGE_HYPERVISOR_RW
#define PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR_UCMINUS | \