#include <public/xen.h>
#include <asm/asm_defns.h>
#include <asm/desc.h>
+#include <asm/fixmap.h>
#include <asm/page.h>
#include <asm/msr.h>
add $8,%edx
add $(1<<L2_PAGETABLE_SHIFT),%eax
loop 1b
+ /* Initialise L2 fixmap page directory entry. */
+ mov $(sym_phys(l1_fixmap)+7),%eax
+ mov %eax,sym_phys(l2_fixmap) + l2_table_offset(FIXADDR_TOP-1)*8
/* Initialise L3 identity-map page directory entries. */
mov $sym_phys(l3_identmap),%edi
mov $(sym_phys(l2_identmap)+7),%eax
add $8,%edi
add $PAGE_SIZE,%eax
loop 1b
- /* Initialise L3 xen-map page directory entry. */
+ /* Initialise L3 xen-map and fixmap page directory entries. */
mov $(sym_phys(l2_xenmap)+7),%eax
mov %eax,sym_phys(l3_xenmap) + l3_table_offset(XEN_VIRT_START)*8
+ mov $(sym_phys(l2_fixmap)+7),%eax
+ mov %eax,sym_phys(l3_xenmap) + l3_table_offset(FIXADDR_TOP-1)*8
/* Initialise L3 boot-map page directory entry. */
mov $(sym_phys(l2_bootmap)+7),%eax
mov %eax,sym_phys(l3_bootmap) + 0*8
add $(1<<L2_PAGETABLE_SHIFT),%eax
cmp $(16<<20)+0xe3,%eax
jne 1b
+ /* Initialise L2 fixmap page directory entry. */
+ mov $(sym_phys(l1_fixmap)+7),%eax
+ mov %eax,sym_phys(idle_pg_table_l2) + l2_table_offset(FIXADDR_TOP-1)*8
#endif
/* Initialize 4kB mappings of first 2MB or 4MB of memory. */
#include <xen/vga.h>
#include <asm/e820.h>
#include <asm/edd.h>
+#define __ASSEMBLY__ /* avoid pulling in ACPI stuff (conflicts with EFI) */
+#include <asm/fixmap.h>
+#undef __ASSEMBLY__
#include <asm/mm.h>
#include <asm/msr.h>
#include <asm/processor.h>
slot &= L2_PAGETABLE_ENTRIES - 1;
l2_bootmap[slot] = l2e_from_paddr(addr, __PAGE_HYPERVISOR|_PAGE_PSE);
}
+ /* Initialise L2 fixmap page directory entry. */
+ l2_fixmap[l2_table_offset(FIXADDR_TOP - 1)] =
+ l2e_from_paddr((UINTN)l1_fixmap, __PAGE_HYPERVISOR);
/* Initialise L3 identity-map page directory entries. */
for ( i = 0; i < ARRAY_SIZE(l2_identmap) / L2_PAGETABLE_ENTRIES; ++i )
l3_identmap[i] = l3e_from_paddr((UINTN)(l2_identmap +
i * L2_PAGETABLE_ENTRIES),
__PAGE_HYPERVISOR);
- /* Initialise L3 xen-map page directory entry. */
+ /* Initialise L3 xen-map and fixmap page directory entries. */
l3_xenmap[l3_table_offset(XEN_VIRT_START)] =
l3e_from_paddr((UINTN)l2_xenmap, __PAGE_HYPERVISOR);
+ l3_xenmap[l3_table_offset(FIXADDR_TOP - 1)] =
+ l3e_from_paddr((UINTN)l2_fixmap, __PAGE_HYPERVISOR);
/* Initialise L3 boot-map page directory entries. */
l3_bootmap[l3_table_offset(xen_phys_start)] =
l3e_from_paddr((UINTN)l2_bootmap, __PAGE_HYPERVISOR);
l1_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
l1_identmap[L1_PAGETABLE_ENTRIES];
+/* Mapping of the fixmap space needed early. */
+l1_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
+ l1_fixmap[L1_PAGETABLE_ENTRIES];
+
#define MEM_LOG(_f, _a...) gdprintk(XENLOG_WARNING , _f "\n" , ## _a)
/*
l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
l2_xenmap[L2_PAGETABLE_ENTRIES];
+/* Enough page directories to map the early fixmap space. */
+l2_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
+ l2_fixmap[L2_PAGETABLE_ENTRIES];
+
/* Enough page directories to map into the bottom 1GB. */
l3_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
l3_bootmap[L3_PAGETABLE_ENTRIES];
#define MACHPHYS_MBYTES 16 /* 1 MB needed per 1 GB memory */
#define FRAMETABLE_MBYTES (MACHPHYS_MBYTES * 6)
-#define IOREMAP_VIRT_END 0UL
+#define IOREMAP_VIRT_END _AC(0,UL)
#define IOREMAP_VIRT_START (IOREMAP_VIRT_END - (IOREMAP_MBYTES<<20))
#define DIRECTMAP_VIRT_END IOREMAP_VIRT_START
#define DIRECTMAP_VIRT_START (DIRECTMAP_VIRT_END - (DIRECTMAP_MBYTES<<20))
#define _ASM_FIXMAP_H
#include <xen/config.h>
+#include <asm/page.h>
+
+#define FIXADDR_TOP (IOREMAP_VIRT_END - PAGE_SIZE)
+
+#ifndef __ASSEMBLY__
+
#include <xen/pfn.h>
#include <xen/kexec.h>
#include <xen/iommu.h>
#include <asm/apicdef.h>
#include <asm/acpi.h>
-#include <asm/page.h>
#include <asm/amd-iommu.h>
#include <asm/msi.h>
#include <acpi/apei.h>
__end_of_fixed_addresses
};
-#define FIXADDR_TOP (IOREMAP_VIRT_END - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
return __virt_to_fix(vaddr);
}
+#endif /* __ASSEMBLY__ */
+
#endif
#ifndef __X86_PAGE_H__
#define __X86_PAGE_H__
+#include <xen/const.h>
+
/*
* It is important that the masks are signed quantities. This ensures that
* the compiler sign-extends a 32-bit mask to 64 bits if that is required.
extern l2_pgentry_t *compat_idle_pg_table_l2;
extern unsigned int m2p_compat_vstart;
extern l2_pgentry_t l2_xenmap[L2_PAGETABLE_ENTRIES],
+ l2_fixmap[L2_PAGETABLE_ENTRIES],
l2_bootmap[L2_PAGETABLE_ENTRIES];
extern l3_pgentry_t l3_xenmap[L3_PAGETABLE_ENTRIES],
l3_identmap[L3_PAGETABLE_ENTRIES],
l3_bootmap[L3_PAGETABLE_ENTRIES];
#endif
extern l2_pgentry_t l2_identmap[4*L2_PAGETABLE_ENTRIES];
-extern l1_pgentry_t l1_identmap[L1_PAGETABLE_ENTRIES];
+extern l1_pgentry_t l1_identmap[L1_PAGETABLE_ENTRIES],
+ l1_fixmap[L1_PAGETABLE_ENTRIES];
void paging_init(void);
void setup_idle_pagetable(void);
#endif /* !defined(__ASSEMBLY__) */
--- /dev/null
+/* const.h: Macros for dealing with constants. */
+
+#ifndef __XEN_CONST_H__
+#define __XEN_CONST_H__
+
+/* Some constant macros are used in both assembler and
+ * C code. Therefore we cannot annotate them always with
+ * 'UL' and other type specifiers unilaterally. We
+ * use the following macros to deal with this.
+ *
+ * Similarly, _AT() will cast an expression with a type in C, but
+ * leave it unchanged in asm.
+ */
+
+#ifdef __ASSEMBLY__
+#define _AC(X,Y) X
+#define _AT(T,X) X
+#else
+#define __AC(X,Y) (X##Y)
+#define _AC(X,Y) __AC(X,Y)
+#define _AT(T,X) ((T)(X))
+#endif
+
+#endif /* __XEN_CONST_H__ */