--- /dev/null
+#include <xtf/asm_macros.h>
+
+#include <arch/x86/page.h>
+
+ .data
+ .p2align PAGE_SHIFT
+
+/* Mapping of first 4G of memory in 2M superpages. Uses 4x 4k pages. */
+GLOBAL(l2_identmap)
+ .rept (4 * L2_PT_ENTRIES)
+ .long (((. - l2_identmap) / 8) << (PAGE_ORDER_2M + PAGE_SHIFT)) + \
+ _PAGE_PSE + _PAGE_USER + _PAGE_RW + _PAGE_PRESENT
+ .long 0
+ .endr
+SIZE(l2_identmap)
+
+/* l3 pagetable. Maps 4x l2 tables. */
+GLOBAL(l3_identmap)
+ .rept 4
+ .long l2_identmap + (((. - l3_identmap) / 8 ) << PAGE_SHIFT) + \
+ _PAGE_USER + _PAGE_RW + _PAGE_PRESENT
+ .long 0
+ .endr
+ .fill L3_PT_ENTRIES - 4, 8, 0
+SIZE(l3_identmap)
+
+/* l4 pagetable. Maps 1x l3 table. */
+GLOBAL(l4_identmap)
+ .long l3_identmap + _PAGE_USER + _PAGE_RW + _PAGE_PRESENT
+ .long 0
+ .fill L4_PT_ENTRIES - 1, 8, 0
+SIZE(l4_identmap)
+
+/* l3 32bit PAE quad. Contains 4 entries. */
+GLOBAL(l3_paemap)
+ .rept 4
+ .long l2_identmap + (((. - l3_paemap) / 8 ) << PAGE_SHIFT) + _PAGE_PRESENT
+ .long 0
+ .endr
+SIZE(l3_paemap)
/*
* PV guests should have hypercalls set up by the domain builder, due to the
- * HYPERCALL_PAGE ELFNOTE being filled.
+ * HYPERCALL_PAGE ELFNOTE being filled. HVM guests have to locate the
+ * hypervisor cpuid leaves to find correct MSR to requst that Xen writes a
+ * hypercall page.
*/
static void init_hypercalls(void)
{
+#ifdef CONFIG_ENV_hvm
+ uint32_t eax, ebx, ecx, edx, base;
+ bool found = false;
+
+ for ( base = XEN_CPUID_FIRST_LEAF;
+ base < XEN_CPUID_FIRST_LEAF + 0x10000; base += 0x100 )
+ {
+ cpuid(base, &eax, &ebx, &ecx, &edx);
+
+ if ( (ebx == XEN_CPUID_SIGNATURE_EBX) &&
+ (ecx == XEN_CPUID_SIGNATURE_ECX) &&
+ (edx == XEN_CPUID_SIGNATURE_EDX) &&
+ ((eax - base) >= 2) )
+ {
+ found = true;
+ break;
+ }
+ }
+
+ if ( !found )
+ panic("Unable to locate Xen CPUID leaves\n");
+
+ cpuid(base + 2, &eax, &ebx, &ecx, &edx);
+ wrmsr(ebx, (unsigned long)&hypercall_page);
+ barrier();
+#endif
+
/*
* Confirm that the `ret` poision has been overwritten with a real
* hypercall page. At the time of writing, a legitimate hypercall page
#if defined(CONFIG_ENV_pv)
cons_ring = mfn_to_virt(start_info->console.domU.mfn);
cons_evtchn = start_info->console.domU.evtchn;
+#elif defined(CONFIG_ENV_hvm)
+ {
+ uint64_t raw_pfn, raw_evtchn;
+
+ if ( hvm_get_param(HVM_PARAM_CONSOLE_PFN, &raw_pfn) != 0 ||
+ hvm_get_param(HVM_PARAM_CONSOLE_EVTCHN, &raw_evtchn) != 0 )
+ return;
+
+ cons_ring = pfn_to_virt(raw_pfn);
+ cons_evtchn = raw_evtchn;
+ }
#endif
init_pv_console(cons_ring, cons_evtchn);
}
+#if defined(CONFIG_ENV_hvm)
+static void qemu_console_write(const char *buf, size_t len)
+{
+ asm volatile("rep; outsb"
+ : "+S" (buf), "+c" (len)
+ : "d" (0x12));
+}
+#endif
+
static void xen_console_write(const char *buf, size_t len)
{
hypercall_console_write(buf, len);
void arch_setup(void)
{
+#if defined(CONFIG_ENV_hvm)
+ register_console_callback(qemu_console_write);
+#endif
+
register_console_callback(xen_console_write);
init_hypercalls();
#include <xtf/traps.h>
+#include <arch/x86/config.h>
/*
* Getting called means that a shutdown(crash) hypercall has not succeeded.
asm volatile("movabs %0, %%rsp; pushf"
:: "i" (0x800000000badc0deUL) : "memory");
+#elif defined(CONFIG_ENV_hvm)
+ /*
+ * HVM - clear interrupts and halt. Xen should catch this condition and
+ * shut the VM down.
+ */
+ asm volatile("cli; hlt");
+
#endif
/*
* Possibilities:
* - Xen hypervisor console
* - PV console
+ * - Qemu debug console
*/
-static cons_output_cb output_fns[2];
+static cons_output_cb output_fns[3];
static unsigned int nr_cons_cb;
/* Guest PV console details. */
# Always link hypercall_page.S last as it is a page of data replaced by the hyperisor
obj-perenv += $(ROOT)/arch/x86/hypercall_page.o
+
+# HVM specific objects
+obj-hvm += $(ROOT)/arch/x86/hvm_pagetables.o
+
+obj-hvm32 += $(obj-hvm)
+obj-hvm64 += $(obj-hvm)
* PV guests: VIRT_OFFSET is 0 which causes all linked virtual addresses to be
* contiguous in the pagetables created by the domain builder. Therefore,
* virt == pfn << PAGE_SHIFT for any pfn constructed by the domain builder.
+ *
+ * HVM guests: All memory from 0 to 4GB is identity mapped.
*/
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+ return (void *)(pfn << PAGE_SHIFT);
+}
+
#if defined(CONFIG_ENV_pv)
#define m2p ((unsigned long *)MACH2PHYS_VIRT_START)
static inline void *mfn_to_virt(unsigned long mfn)
{
- return (void *)(m2p[mfn] << PAGE_SHIFT);
+ return pfn_to_virt(m2p[mfn]);
}
#undef m2p
#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define PAGE_ORDER_4K 0
+#define PAGE_ORDER_2M 9
+#define PAGE_ORDER_1G 18
+
+#define _PAGE_PRESENT 0x001
+#define _PAGE_RW 0x002
+#define _PAGE_USER 0x004
+#define _PAGE_PSE 0x080
+
+#define L1_PT_ENTRIES 512
+#define L2_PT_ENTRIES 512
+#define L3_PT_ENTRIES 512
+#define L4_PT_ENTRIES 512
+
#endif /* XTF_X86_PAGE_H */
/*