This uses 32bit paging, along with the PSE extension.
Regular 32bit paging and PSE paging differ only in whether the PSE bit may be
set, to create 4M superpages. Since PSE is available on all hardware Xen will
now run on, forgo the `hvm32pg` environment to avoid the overhead of requiring
small pages for all mappings.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
mov $X86_CR4_PAE, %eax
mov $pae32_l3_identmap, %ebx
+#elif CONFIG_PAGING_LEVELS == 2 /* 32bit PSE, 2 levels */
+
+ mov $X86_CR4_PSE, %eax
+ mov $pse_l2_identmap, %ebx
+
#else
# error Bad paging mode
#endif
#define PAGE_COMMON _PAGE_DIRTY + _PAGE_ACCESSED + _PAGE_USER + _PAGE_RW + _PAGE_PRESENT
#define PAE_IDX(sym) ((. - (sym)) / PAE_PTE_SZ)
+#define PSE_IDX(sym) ((. - (sym)) / PSE_PTE_SZ)
.data
.p2align PAGE_SHIFT
.fill PAE_L4_PT_ENTRIES - 1, 8, 0
SIZE(pae_l4_identmap)
+/* PSE mappings of the first 4M of memory in 4k pages. Uses 1x 4k page. */
+GLOBAL(pse_l1_identmap)
+ .long 0 /* Unmap page at 0 to catch errors with NULL pointers. */
+ .rept PSE_L1_PT_ENTRIES - 1
+ .long (PSE_IDX(pse_l1_identmap) << PSE_L1_PT_SHIFT) + PAGE_COMMON
+ .endr
+SIZE(pse_l1_identmap)
+
+/* PSE mappings up to 4G, mostly in 4M superpages. Uses 1x 4k page. */
+GLOBAL(pse_l2_identmap)
+ .long pse_l1_identmap + PAGE_COMMON
+ .rept PSE_L2_PT_ENTRIES - 1
+ .long (PSE_IDX(pse_l2_identmap) << PSE_L2_PT_SHIFT) + _PAGE_PSE + PAGE_COMMON
+ .endr
+SIZE(pse_l1_identmap)
+
/* PAE l3 32bit quad. Contains 4 64bit entries. */
GLOBAL(pae32_l3_identmap)
.rept PAE32_L3_ENTRIES
ALL_CATEGORIES := special functional xsa utility
-ALL_ENVIRONMENTS := pv64 pv32pae hvm64 hvm32pae hvm32
+ALL_ENVIRONMENTS := pv64 pv32pae hvm64 hvm32pae hvm32pse hvm32
PV_ENVIRONMENTS := $(filter pv%,$(ALL_ENVIRONMENTS))
HVM_ENVIRONMENTS := $(filter hvm%,$(ALL_ENVIRONMENTS))
pv32pae_arch := x86_32
hvm64_arch := x86_64
hvm32pae_arch := x86_32
+hvm32pse_arch := x86_32
hvm32_arch := x86_32
COMMON_FLAGS := -pipe -I$(ROOT)/include -MMD -MP
head-pv32pae := $(ROOT)/arch/x86/boot/head_pv32pae.o
head-hvm64 := $(ROOT)/arch/x86/boot/head_hvm64.o
head-hvm32pae := $(ROOT)/arch/x86/boot/head_hvm32pae.o
+head-hvm32pse := $(ROOT)/arch/x86/boot/head_hvm32pse.o
head-hvm32 := $(ROOT)/arch/x86/boot/head_hvm32.o
defcfg-pv := $(ROOT)/config/default-pv.cfg.in
defcfg-pv32pae := $(defcfg-pv)
defcfg-hvm64 := $(defcfg-hvm)
defcfg-hvm32pae := $(defcfg-hvm)
+defcfg-hvm32pse := $(defcfg-hvm)
defcfg-hvm32 := $(defcfg-hvm)
obj-perarch :=
`pv32pae` | PV | 32bit | PAE
`pv64` | PV | 64bit | Long mode
`hvm32` | HVM | 32bit | None
+`hvm32pse` | HVM | 32bit | PSE
`hvm32pae` | HVM | 32bit | PAE
`hvm64` | HVM | 64bit | Long mode
-@todo Possibly implement `hvm32pg`, `hvm32pse` and `hvm16`.
-
@section getting-started Getting Started
#undef CONFIG_ENV_hvm32pae
+#elif defined(CONFIG_ENV_hvm32pse)
+
+#define CONFIG_HVM
+#define CONFIG_PAGING_LEVELS 2
+#define ENVIRONMENT_DESCRIPTION "HVM 32bit (PSE 2 levels)"
+
+#undef CONFIG_ENV_hvm32pse
+
#elif defined(CONFIG_ENV_hvm32)
#define CONFIG_HVM
--- /dev/null
+/**
+ * @file include/arch/x86/page-pse.h
+ *
+ * Definitions and helpers for PSE pagetable handling.
+ */
+#ifndef XTF_X86_PAGE_PSE_H
+#define XTF_X86_PAGE_PSE_H
+
+/** PSE pagetable entries are 32 bits wide. */
+#define PSE_PTE_SZ 4
+
+/** @{ */
+/** All PSE pagetables contain 1024 entries. */
+#define PSE_L1_PT_ENTRIES (PAGE_SIZE / PSE_PTE_SZ)
+#define PSE_L2_PT_ENTRIES (PAGE_SIZE / PSE_PTE_SZ)
+/** @} */
+
+#define PSE_L1_PT_SHIFT 12
+#define PSE_L2_PT_SHIFT 22
+
+#ifndef __ASSEMBLY__
+
+/** Integer representation of a PTE. */
+typedef uint32_t pse_intpte_t;
+
+static inline unsigned int pse_l1_table_offset(unsigned long va)
+{ return (va >> PSE_L1_PT_SHIFT) & (PSE_L1_PT_ENTRIES - 1); }
+static inline unsigned int pse_l2_table_offset(unsigned long va)
+{ return (va >> PSE_L2_PT_SHIFT) & (PSE_L2_PT_ENTRIES - 1); }
+
+#endif /* __ASSEMBLY__ */
+#endif /* XTF_X86_PAGE_PSE_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
#define PAGE_MASK (~(PAGE_SIZE - 1))
#include "page-pae.h"
+#include "page-pse.h"
#define PAGE_ORDER_4K 0
#define PAGE_ORDER_2M 9
#define _PAGE_DIRTY 0x040
#define _PAGE_PSE 0x080
-#if CONFIG_PAGING_LEVELS >= 3 /* PAE Paging */
+#if CONFIG_PAGING_LEVELS == 2 /* PSE Paging */
+
+#define L1_PT_SHIFT PSE_L1_PT_SHIFT
+#define L2_PT_SHIFT PSE_L2_PT_SHIFT
+
+#else /* CONFIG_PAGING_LEVELS == 2 */ /* PAE Paging */
#define L1_PT_SHIFT PAE_L1_PT_SHIFT
#define L2_PT_SHIFT PAE_L2_PT_SHIFT
+
+#endif /* !CONFIG_PAGING_LEVELS == 2 */
+
+#if CONFIG_PAGING_LEVELS >= 3 /* PAE Paging */
+
#define L3_PT_SHIFT PAE_L3_PT_SHIFT
#endif /* CONFIG_PAGING_LEVELS >= 3 */
#ifndef __ASSEMBLY__
-#if CONFIG_PAGING_LEVELS >= 3 /* PAE Paging */
+#if CONFIG_PAGING_LEVELS == 2 /* PSE Paging */
+
+typedef pse_intpte_t intpte_t;
+
+static inline unsigned int l1_table_offset(unsigned long va)
+{ return pse_l1_table_offset(va); }
+static inline unsigned int l2_table_offset(unsigned long va)
+{ return pse_l2_table_offset(va); }
+
+#else /* CONFIG_PAGING_LEVELS == 2 */ /* PAE Paging */
typedef pae_intpte_t intpte_t;
{ return pae_l1_table_offset(va); }
static inline unsigned int l2_table_offset(unsigned long va)
{ return pae_l2_table_offset(va); }
+
+#endif /* !CONFIG_PAGING_LEVELS == 2 */
+
+#if CONFIG_PAGING_LEVELS >= 3 /* PAE Paging */
+
static inline unsigned int l3_table_offset(unsigned long va)
{ return pae_l3_table_offset(va); }