* - Loads GDT, IDT, TR then null LDT
* - Sets up IST references in the IDT
*/
-void load_system_tables(void)
+void load_system_tables(void *df_stacktop)
{
unsigned int i, cpu = smp_processor_id();
unsigned long stack_bottom = get_stack_bottom(),
tss->ist[IST_MCE - 1] = stack_top + (1 + IST_MCE) * PAGE_SIZE;
tss->ist[IST_NMI - 1] = stack_top + (1 + IST_NMI) * PAGE_SIZE;
tss->ist[IST_DB - 1] = stack_top + (1 + IST_DB) * PAGE_SIZE;
- tss->ist[IST_DF - 1] = stack_top + (1 + IST_DF) * PAGE_SIZE;
+ tss->ist[IST_DF - 1] = ((unsigned long)df_stacktop ?: stack_top) + (1 + IST_DF) * PAGE_SIZE;
tss->bitmap = IOBMP_INVALID_OFFSET;
/* All other stack pointers poisioned. */
return true;
}
+extern char cpu0_stack[STACK_SIZE];
+
+void populate_perdom_stack(struct domain *d)
+{
+ int idx;
+
+ l3_pgentry_t *l3t = __map_domain_page(d->arch.perdomain_l3_pg);
+ l2_pgentry_t *l2t = map_l2t_from_l3e(l3t[l3_table_offset(PERDOMAIN_STACK_BOTTOM)]);
+ l1_pgentry_t *l1t = map_l1t_from_l2e(l2t[l2_table_offset(PERDOMAIN_STACK_BOTTOM)]);
+ /* Point the mappings to cpu0_stack. */
+ printk("The perdom stack bottom is %#lx\n", virt_to_maddr(cpu0_stack));
+ for ( idx = 0; idx < (1 << STACK_ORDER); idx++ )
+ l1e_write(&l1t[idx],
+ l1e_from_pfn(virt_to_mfn(cpu0_stack) + idx, PAGE_HYPERVISOR));
+
+ unmap_domain_page(l1t);
+ unmap_domain_page(l2t);
+ unmap_domain_page(l3t);
+}
+
int arch_domain_create(struct domain *d,
struct xen_domctl_createdomain *config)
{
spin_lock_init(&d->arch.e820_lock);
+ /* Create the page table entries, but they are still not mapped. */
+ rc = create_perdomain_mapping(d, PERDOMAIN_STACK_BOTTOM, 1U << STACK_ORDER,
+ NIL(l1_pgentry_t *), NULL);
+ if ( rc )
+ return rc;
+ populate_perdom_stack(d);
+
/* Minimal initialisation for the idle domain. */
if ( unlikely(is_idle_domain(d)) )
{
unsigned long *stack = (void*)(get_stack_bottom() & ~(STACK_SIZE - 1));
/* Update TSS and ISTs */
- load_system_tables();
+ load_system_tables(__va(__pa(cpu0_stack)));
/* Update SYSCALL trampolines */
percpu_traps_init();
stack_base[0] = stack;
- memguard_guard_stack(stack);
+ //memguard_guard_stack(stack);
if ( IS_ENABLED(CONFIG_XEN_SHSTK) && cpu_has_xen_shstk )
{
percpu_init_areas();
init_idt_traps();
- load_system_tables();
+ load_system_tables(NULL);
smp_prepare_boot_cpu();
sort_exception_tables();
wrmsrl(MSR_SPEC_CTRL, default_xen_spec_ctrl);
}
+ printk("The stack bottom is %#lx\n", __pa(get_stack_bottom()));
+
/* Jump to the 1:1 virtual mappings of cpu0_stack. */
asm volatile ("mov %[stk], %%rsp; jmp %c[fn]" ::
- [stk] "g" (__va(__pa(get_stack_bottom()))),
+ [stk] "g" (PERDOMAIN_STACK_BOTTOM + (get_stack_bottom() & (STACK_SIZE-1))),
[fn] "i" (reinit_bsp_stack) : "memory");
unreachable();
}
get_cpu_info()->xen_cr3 = 0;
get_cpu_info()->pv_cr3 = 0;
- load_system_tables();
+ load_system_tables(NULL);
/* Full exception support from here on in. */
_show_registers(regs, crs, CTXT_hypervisor, NULL);
show_code(regs);
show_stack_overflow(cpu, regs);
+ show_page_walk(read_cr2());
panic("DOUBLE FAULT -- system shutdown\n");
}
void trap_init(void);
void init_idt_traps(void);
-void load_system_tables(void);
+void load_system_tables(void*);
void percpu_traps_init(void);
void subarch_percpu_traps_init(void);