d->shared_info = (void *)alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
- d->shared_info->arch.mfn_to_pfn_start =
- virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
+ d->shared_info->arch.mfn_to_pfn_start = m2p_start_mfn;
SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
PAGE_SHIFT] = INVALID_P2M_ENTRY;
void arch_init_memory(void)
{
- unsigned long mfn, i;
+ unsigned long i;
/*
* We are rather picky about the layout of 'struct pfn_info'. The
dom_io->id = DOMID_IO;
/* M2P table is mappable read-only by privileged domains. */
- mfn = l2_pgentry_to_pagenr(
- idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]);
for ( i = 0; i < 1024; i++ )
{
- frame_table[mfn+i].count_info = PGC_allocated | 1;
- frame_table[mfn+i].u.inuse.type_info = PGT_gdt_page | 1; /* non-RW */
- frame_table[mfn+i].u.inuse.domain = dom_xen;
+ frame_table[m2p_start_mfn+i].count_info = PGC_allocated | 1;
+ /* gdt to make sure it's only mapped read-only by non-privileged
+ domains. */
+ frame_table[m2p_start_mfn+i].u.inuse.type_info = PGT_gdt_page | 1;
+ frame_table[m2p_start_mfn+i].u.inuse.domain = dom_xen;
}
}
#include <asm/fixmap.h>
#include <asm/domain_page.h>
+unsigned long m2p_start_mfn;
+
static inline void set_pte_phys(unsigned long vaddr,
l1_pgentry_t entry)
{
/* Allocate and map the machine-to-phys table. */
if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
panic("Not enough memory to bootstrap Xen.\n");
+ m2p_start_mfn = page_to_pfn(pg);
idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
mk_l2_pgentry(page_to_phys(pg) | __PAGE_HYPERVISOR | _PAGE_PSE);
#ifdef __x86_64__
extern unsigned long *machine_to_phys_mapping;
#else
+/* Don't call virt_to_phys on this: it isn't direct mapped. Using
+ m2p_start_mfn instead. */
#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
+extern unsigned long m2p_start_mfn;
#endif
#define DEFAULT_GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY+1)