ia64/linux-2.6.18-xen.hg

view include/asm-arm/pgtable-nommu.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 /*
2 * linux/include/asm-arm/pgtable-nommu.h
3 *
4 * Copyright (C) 1995-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #ifndef _ASMARM_PGTABLE_NOMMU_H
12 #define _ASMARM_PGTABLE_NOMMU_H
14 #ifndef __ASSEMBLY__
16 #include <linux/config.h>
17 #include <linux/slab.h>
18 #include <asm/processor.h>
19 #include <asm/page.h>
20 #include <asm/io.h>
22 /*
23 * Trivial page table functions.
24 */
25 #define pgd_present(pgd) (1)
26 #define pgd_none(pgd) (0)
27 #define pgd_bad(pgd) (0)
28 #define pgd_clear(pgdp)
29 #define kern_addr_valid(addr) (1)
30 #define pmd_offset(a, b) ((void *)0)
31 /* FIXME */
32 /*
33 * PMD_SHIFT determines the size of the area a second-level page table can map
34 * PGDIR_SHIFT determines what a third-level page table entry can map
35 */
36 #define PGDIR_SHIFT 21
38 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
39 #define PGDIR_MASK (~(PGDIR_SIZE-1))
40 /* FIXME */
42 #define PAGE_NONE __pgprot(0)
43 #define PAGE_SHARED __pgprot(0)
44 #define PAGE_COPY __pgprot(0)
45 #define PAGE_READONLY __pgprot(0)
46 #define PAGE_KERNEL __pgprot(0)
48 //extern void paging_init(struct meminfo *, struct machine_desc *);
49 #define swapper_pg_dir ((pgd_t *) 0)
51 #define __swp_type(x) (0)
52 #define __swp_offset(x) (0)
53 #define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
54 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
55 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
58 typedef pte_t *pte_addr_t;
60 static inline int pte_file(pte_t pte) { return 0; }
62 /*
63 * ZERO_PAGE is a global shared page that is always zero: used
64 * for zero-mapped memory areas etc..
65 */
66 #define ZERO_PAGE(vaddr) (virt_to_page(0))
68 /*
69 * Mark the prot value as uncacheable and unbufferable.
70 */
71 #define pgprot_noncached(prot) __pgprot(0)
72 #define pgprot_writecombine(prot) __pgprot(0)
75 /*
76 * These would be in other places but having them here reduces the diffs.
77 */
78 extern unsigned int kobjsize(const void *objp);
79 extern int is_in_rom(unsigned long);
81 /*
82 * No page table caches to initialise.
83 */
84 #define pgtable_cache_init() do { } while (0)
85 #define io_remap_page_range remap_page_range
86 #define io_remap_pfn_range remap_pfn_range
88 #define MK_IOSPACE_PFN(space, pfn) (pfn)
89 #define GET_IOSPACE(pfn) 0
90 #define GET_PFN(pfn) (pfn)
93 /*
94 * All 32bit addresses are effectively valid for vmalloc...
95 * Sort of meaningless for non-VM targets.
96 */
97 #define VMALLOC_START 0
98 #define VMALLOC_END 0xffffffff
100 #define FIRST_USER_ADDRESS (0)
102 #else
104 /*
105 * dummy tlb and user structures.
106 */
107 #define v3_tlb_fns (0)
108 #define v4_tlb_fns (0)
109 #define v4wb_tlb_fns (0)
110 #define v4wbi_tlb_fns (0)
111 #define v6_tlb_fns (0)
113 #define v3_user_fns (0)
114 #define v4_user_fns (0)
115 #define v4_mc_user_fns (0)
116 #define v4wb_user_fns (0)
117 #define v4wt_user_fns (0)
118 #define v6_user_fns (0)
119 #define xscale_mc_user_fns (0)
121 #endif /*__ASSEMBLY__*/
123 #endif /* _ASMARM_PGTABLE_H */