#include <asm/hvm/grant_table.h>
#include <asm/pv/grant_table.h>
+#include <asm/pv/mm.h>
#include "pv/mm.h"
memset(end_pg, -1, (unsigned long)top_pg - (unsigned long)end_pg);
}
-#ifndef NDEBUG
-static unsigned int __read_mostly root_pgt_pv_xen_slots
- = ROOT_PAGETABLE_PV_XEN_SLOTS;
-static l4_pgentry_t __read_mostly split_l4e;
-#else
-#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS
-#endif
-
-static void pv_arch_init_memory(void)
-{
-#ifndef NDEBUG
- unsigned int i;
-
- if ( highmem_start )
- {
- unsigned long split_va = (unsigned long)__va(highmem_start);
-
- if ( split_va < HYPERVISOR_VIRT_END &&
- split_va - 1 == (unsigned long)__va(highmem_start - 1) )
- {
- root_pgt_pv_xen_slots = l4_table_offset(split_va) -
- ROOT_PAGETABLE_FIRST_XEN_SLOT;
- ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
- if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
- {
- l3_pgentry_t *l3tab = alloc_xen_pagetable();
-
- if ( l3tab )
- {
- const l3_pgentry_t *l3idle =
- l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
-
- for ( i = 0; i < l3_table_offset(split_va); ++i )
- l3tab[i] = l3idle[i];
- for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
- l3tab[i] = l3e_empty();
- split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
- __PAGE_HYPERVISOR_RW);
- }
- else
- ++root_pgt_pv_xen_slots;
- }
- }
- }
-#endif
-}
-
void __init arch_init_memory(void)
{
unsigned long i, pfn, rstart_pfn, rend_pfn, iostart_pfn, ioend_pfn;
return rc > 0 ? 0 : rc;
}
-/*
- * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
- * values a guest may have left there from alloc_l4_table().
- */
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
- bool zap_ro_mpt)
-{
- /* Xen private mappings. */
- memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
- root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
-#ifndef NDEBUG
- if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
- {
- l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
- root_pgt_pv_xen_slots];
-
- if ( l4e_get_intpte(split_l4e) )
- *next++ = split_l4e;
-
- memset(next, 0,
- _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
- }
-#else
- BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
-#endif
- l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
- l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
- if ( zap_ro_mpt || is_pv_32bit_domain(d) )
- l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
-}
-
bool fill_ro_mpt(mfn_t mfn)
{
l4_pgentry_t *l4tab = map_domain_page(mfn);
#include <asm/page.h>
#include <asm/setup.h>
+#include "mm.h"
+
/* Allow ring-3 access in long mode as guest cannot use ring 1 ... */
#define BASE_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
#define L1_PROT (BASE_PROT|_PAGE_GUEST_KERNEL)
#include <xen/lib.h>
#include <xen/sched.h>
+#include <asm/p2m.h>
+#include <asm/paging.h>
+#include <asm/setup.h>
#include <asm/pv/domain.h>
+#include "mm.h"
+
/* Override macros from asm/page.h to make them work with mfn_t */
#undef mfn_to_page
#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
#include <asm/current.h>
#include <asm/p2m.h>
+#include <asm/setup.h>
#include "mm.h"
return true;
}
+#ifndef NDEBUG
+static unsigned int __read_mostly root_pgt_pv_xen_slots
+ = ROOT_PAGETABLE_PV_XEN_SLOTS;
+static l4_pgentry_t __read_mostly split_l4e;
+#else
+#define root_pgt_pv_xen_slots ROOT_PAGETABLE_PV_XEN_SLOTS
+#endif
+
+/*
+ * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
+ * values a guest may have left there from alloc_l4_table().
+ */
+void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
+ bool zap_ro_mpt)
+{
+ /* Xen private mappings. */
+ memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+ &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
+ root_pgt_pv_xen_slots * sizeof(l4_pgentry_t));
+#ifndef NDEBUG
+ if ( unlikely(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS) )
+ {
+ l4_pgentry_t *next = &l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT +
+ root_pgt_pv_xen_slots];
+
+ if ( l4e_get_intpte(split_l4e) )
+ *next++ = split_l4e;
+
+ memset(next, 0,
+ _p(&l4tab[ROOT_PAGETABLE_LAST_XEN_SLOT + 1]) - _p(next));
+ }
+#else
+ BUILD_BUG_ON(root_pgt_pv_xen_slots != ROOT_PAGETABLE_PV_XEN_SLOTS);
+#endif
+ l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] =
+ l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR_RW);
+ l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
+ l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
+ if ( zap_ro_mpt || is_pv_32bit_domain(d) )
+ l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
+}
+
+void pv_arch_init_memory(void)
+{
+#ifndef NDEBUG
+ unsigned int i;
+
+ if ( highmem_start )
+ {
+ unsigned long split_va = (unsigned long)__va(highmem_start);
+
+ if ( split_va < HYPERVISOR_VIRT_END &&
+ split_va - 1 == (unsigned long)__va(highmem_start - 1) )
+ {
+ root_pgt_pv_xen_slots = l4_table_offset(split_va) -
+ ROOT_PAGETABLE_FIRST_XEN_SLOT;
+ ASSERT(root_pgt_pv_xen_slots < ROOT_PAGETABLE_PV_XEN_SLOTS);
+ if ( l4_table_offset(split_va) == l4_table_offset(split_va - 1) )
+ {
+ l3_pgentry_t *l3tab = alloc_xen_pagetable();
+
+ if ( l3tab )
+ {
+ const l3_pgentry_t *l3idle =
+ l4e_to_l3e(idle_pg_table[l4_table_offset(split_va)]);
+
+ for ( i = 0; i < l3_table_offset(split_va); ++i )
+ l3tab[i] = l3idle[i];
+ for ( ; i < L3_PAGETABLE_ENTRIES; ++i )
+ l3tab[i] = l3e_empty();
+ split_l4e = l4e_from_pfn(virt_to_mfn(l3tab),
+ __PAGE_HYPERVISOR_RW);
+ }
+ else
+ ++root_pgt_pv_xen_slots;
+ }
+ }
+ }
+#endif
+}
+
/*
* Local variables:
* mode: C
l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn);
+void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
+ bool zap_ro_mpt);
+
/* Read a PV guest's l1e that maps this linear address. */
static inline l1_pgentry_t guest_get_eff_l1e(unsigned long linear)
{
int free_page_type(struct page_info *page, unsigned long type,
int preemptible);
-void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
- bool_t zap_ro_mpt);
bool fill_ro_mpt(mfn_t mfn);
void zap_ro_mpt(mfn_t mfn);
bool pv_map_ldt_shadow_page(unsigned int off);
+void pv_arch_init_memory(void);
+
#else
#include <xen/errno.h>
static inline bool pv_map_ldt_shadow_page(unsigned int off) { return false; }
+static inline void pv_arch_init_memory(void) {}
+
#endif
#endif /* __X86_PV_MM_H__ */