ia64/xen-unstable
changeset 9033:dbec76a720f8
Add include/asm-i386/{fixmap,page}.h to sparse tree.
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
author | cl349@firebug.cl.cam.ac.uk |
---|---|
date | Mon Feb 27 10:26:01 2006 +0000 (2006-02-27) |
parents | ab982f583b73 |
children | 55f597e929f3 |
files | linux-2.6-xen-sparse/include/asm-i386/fixmap.h linux-2.6-xen-sparse/include/asm-i386/page.h |
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/fixmap.h Mon Feb 27 10:26:01 2006 +0000 1.3 @@ -0,0 +1,158 @@ 1.4 +/* 1.5 + * fixmap.h: compile-time virtual memory allocation 1.6 + * 1.7 + * This file is subject to the terms and conditions of the GNU General Public 1.8 + * License. See the file "COPYING" in the main directory of this archive 1.9 + * for more details. 1.10 + * 1.11 + * Copyright (C) 1998 Ingo Molnar 1.12 + * 1.13 + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 1.14 + */ 1.15 + 1.16 +#ifndef _ASM_FIXMAP_H 1.17 +#define _ASM_FIXMAP_H 1.18 + 1.19 +#include <linux/config.h> 1.20 + 1.21 +/* used by vmalloc.c, vsyscall.lds.S. 1.22 + * 1.23 + * Leave one empty page between vmalloc'ed areas and 1.24 + * the start of the fixmap. 1.25 + */ 1.26 +#define __FIXADDR_TOP 0xfffff000 1.27 + 1.28 +#ifndef __ASSEMBLY__ 1.29 +#include <linux/kernel.h> 1.30 +#include <asm/acpi.h> 1.31 +#include <asm/apicdef.h> 1.32 +#include <asm/page.h> 1.33 +#ifdef CONFIG_HIGHMEM 1.34 +#include <linux/threads.h> 1.35 +#include <asm/kmap_types.h> 1.36 +#endif 1.37 + 1.38 +/* 1.39 + * Here we define all the compile-time 'special' virtual 1.40 + * addresses. The point is to have a constant address at 1.41 + * compile time, but to set the physical address only 1.42 + * in the boot process. We allocate these special addresses 1.43 + * from the end of virtual memory (0xfffff000) backwards. 1.44 + * Also this lets us do fail-safe vmalloc(), we 1.45 + * can guarantee that these special addresses and 1.46 + * vmalloc()-ed addresses never overlap. 1.47 + * 1.48 + * these 'compile-time allocated' memory buffers are 1.49 + * fixed-size 4k pages. (or larger if used with an increment 1.50 + * highger than 1) use fixmap_set(idx,phys) to associate 1.51 + * physical memory with fixmap indices. 1.52 + * 1.53 + * TLB entries of such buffers will not be flushed across 1.54 + * task switches. 1.55 + */ 1.56 +enum fixed_addresses { 1.57 + FIX_HOLE, 1.58 + FIX_VSYSCALL, 1.59 +#ifdef CONFIG_X86_LOCAL_APIC 1.60 + FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ 1.61 +#endif 1.62 +#ifdef CONFIG_X86_IO_APIC 1.63 + FIX_IO_APIC_BASE_0, 1.64 + FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, 1.65 +#endif 1.66 +#ifdef CONFIG_X86_VISWS_APIC 1.67 + FIX_CO_CPU, /* Cobalt timer */ 1.68 + FIX_CO_APIC, /* Cobalt APIC Redirection Table */ 1.69 + FIX_LI_PCIA, /* Lithium PCI Bridge A */ 1.70 + FIX_LI_PCIB, /* Lithium PCI Bridge B */ 1.71 +#endif 1.72 +#ifdef CONFIG_X86_F00F_BUG 1.73 + FIX_F00F_IDT, /* Virtual mapping for IDT */ 1.74 +#endif 1.75 +#ifdef CONFIG_X86_CYCLONE_TIMER 1.76 + FIX_CYCLONE_TIMER, /*cyclone timer register*/ 1.77 +#endif 1.78 +#ifdef CONFIG_HIGHMEM 1.79 + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 1.80 + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 1.81 +#endif 1.82 +#ifdef CONFIG_ACPI 1.83 + FIX_ACPI_BEGIN, 1.84 + FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, 1.85 +#endif 1.86 +#ifdef CONFIG_PCI_MMCONFIG 1.87 + FIX_PCIE_MCFG, 1.88 +#endif 1.89 + __end_of_permanent_fixed_addresses, 1.90 + /* temporary boot-time mappings, used before ioremap() is functional */ 1.91 +#define NR_FIX_BTMAPS 16 1.92 + FIX_BTMAP_END = __end_of_permanent_fixed_addresses, 1.93 + FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, 1.94 + FIX_WP_TEST, 1.95 + __end_of_fixed_addresses 1.96 +}; 1.97 + 1.98 +extern void __set_fixmap (enum fixed_addresses idx, 1.99 + unsigned long phys, pgprot_t flags); 1.100 + 1.101 +#define set_fixmap(idx, phys) \ 1.102 + __set_fixmap(idx, phys, PAGE_KERNEL) 1.103 +/* 1.104 + * Some hardware wants to get fixmapped without caching. 1.105 + */ 1.106 +#define set_fixmap_nocache(idx, phys) \ 1.107 + __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) 1.108 + 1.109 +#define clear_fixmap(idx) \ 1.110 + __set_fixmap(idx, 0, __pgprot(0)) 1.111 + 1.112 +#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) 1.113 + 1.114 +#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) 1.115 +#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 1.116 +#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) 1.117 +#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) 1.118 + 1.119 +#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) 1.120 +#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) 1.121 + 1.122 +/* 1.123 + * This is the range that is readable by user mode, and things 1.124 + * acting like user mode such as get_user_pages. 1.125 + */ 1.126 +#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL)) 1.127 +#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE) 1.128 + 1.129 + 1.130 +extern void __this_fixmap_does_not_exist(void); 1.131 + 1.132 +/* 1.133 + * 'index to address' translation. If anyone tries to use the idx 1.134 + * directly without tranlation, we catch the bug with a NULL-deference 1.135 + * kernel oops. Illegal ranges of incoming indices are caught too. 1.136 + */ 1.137 +static __always_inline unsigned long fix_to_virt(const unsigned int idx) 1.138 +{ 1.139 + /* 1.140 + * this branch gets completely eliminated after inlining, 1.141 + * except when someone tries to use fixaddr indices in an 1.142 + * illegal way. (such as mixing up address types or using 1.143 + * out-of-range indices). 1.144 + * 1.145 + * If it doesn't get removed, the linker will complain 1.146 + * loudly with a reasonably clear error message.. 1.147 + */ 1.148 + if (idx >= __end_of_fixed_addresses) 1.149 + __this_fixmap_does_not_exist(); 1.150 + 1.151 + return __fix_to_virt(idx); 1.152 +} 1.153 + 1.154 +static inline unsigned long virt_to_fix(const unsigned long vaddr) 1.155 +{ 1.156 + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 1.157 + return __virt_to_fix(vaddr); 1.158 +} 1.159 + 1.160 +#endif /* !__ASSEMBLY__ */ 1.161 +#endif
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/linux-2.6-xen-sparse/include/asm-i386/page.h Mon Feb 27 10:26:01 2006 +0000 2.3 @@ -0,0 +1,146 @@ 2.4 +#ifndef _I386_PAGE_H 2.5 +#define _I386_PAGE_H 2.6 + 2.7 +/* PAGE_SHIFT determines the page size */ 2.8 +#define PAGE_SHIFT 12 2.9 +#define PAGE_SIZE (1UL << PAGE_SHIFT) 2.10 +#define PAGE_MASK (~(PAGE_SIZE-1)) 2.11 + 2.12 +#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) 2.13 +#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) 2.14 + 2.15 +#ifdef __KERNEL__ 2.16 +#ifndef __ASSEMBLY__ 2.17 + 2.18 +#include <linux/config.h> 2.19 + 2.20 +#ifdef CONFIG_X86_USE_3DNOW 2.21 + 2.22 +#include <asm/mmx.h> 2.23 + 2.24 +#define clear_page(page) mmx_clear_page((void *)(page)) 2.25 +#define copy_page(to,from) mmx_copy_page(to,from) 2.26 + 2.27 +#else 2.28 + 2.29 +/* 2.30 + * On older X86 processors it's not a win to use MMX here it seems. 2.31 + * Maybe the K6-III ? 2.32 + */ 2.33 + 2.34 +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 2.35 +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) 2.36 + 2.37 +#endif 2.38 + 2.39 +#define clear_user_page(page, vaddr, pg) clear_page(page) 2.40 +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 2.41 + 2.42 +#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) 2.43 +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 2.44 + 2.45 +/* 2.46 + * These are used to make use of C type-checking.. 2.47 + */ 2.48 +extern int nx_enabled; 2.49 +#ifdef CONFIG_X86_PAE 2.50 +extern unsigned long long __supported_pte_mask; 2.51 +typedef struct { unsigned long pte_low, pte_high; } pte_t; 2.52 +typedef struct { unsigned long long pmd; } pmd_t; 2.53 +typedef struct { unsigned long long pgd; } pgd_t; 2.54 +typedef struct { unsigned long long pgprot; } pgprot_t; 2.55 +#define pmd_val(x) ((x).pmd) 2.56 +#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 2.57 +#define __pmd(x) ((pmd_t) { (x) } ) 2.58 +#define HPAGE_SHIFT 21 2.59 +#else 2.60 +typedef struct { unsigned long pte_low; } pte_t; 2.61 +typedef struct { unsigned long pgd; } pgd_t; 2.62 +typedef struct { unsigned long pgprot; } pgprot_t; 2.63 +#define boot_pte_t pte_t /* or would you rather have a typedef */ 2.64 +#define pte_val(x) ((x).pte_low) 2.65 +#define HPAGE_SHIFT 22 2.66 +#endif 2.67 +#define PTE_MASK PAGE_MASK 2.68 + 2.69 +#ifdef CONFIG_HUGETLB_PAGE 2.70 +#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 2.71 +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 2.72 +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 2.73 +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 2.74 +#endif 2.75 + 2.76 +#define pgd_val(x) ((x).pgd) 2.77 +#define pgprot_val(x) ((x).pgprot) 2.78 + 2.79 +#define __pte(x) ((pte_t) { (x) } ) 2.80 +#define __pgd(x) ((pgd_t) { (x) } ) 2.81 +#define __pgprot(x) ((pgprot_t) { (x) } ) 2.82 + 2.83 +#endif /* !__ASSEMBLY__ */ 2.84 + 2.85 +/* to align the pointer to the (next) page boundary */ 2.86 +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) 2.87 + 2.88 +/* 2.89 + * This handles the memory map.. We could make this a config 2.90 + * option, but too many people screw it up, and too few need 2.91 + * it. 2.92 + * 2.93 + * A __PAGE_OFFSET of 0xC0000000 means that the kernel has 2.94 + * a virtual address space of one gigabyte, which limits the 2.95 + * amount of physical memory you can use to about 950MB. 2.96 + * 2.97 + * If you want more physical memory than this then see the CONFIG_HIGHMEM4G 2.98 + * and CONFIG_HIGHMEM64G options in the kernel configuration. 2.99 + */ 2.100 + 2.101 +#ifndef __ASSEMBLY__ 2.102 + 2.103 +/* 2.104 + * This much address space is reserved for vmalloc() and iomap() 2.105 + * as well as fixmap mappings. 2.106 + */ 2.107 +extern unsigned int __VMALLOC_RESERVE; 2.108 + 2.109 +extern int sysctl_legacy_va_layout; 2.110 + 2.111 +extern int page_is_ram(unsigned long pagenr); 2.112 + 2.113 +#endif /* __ASSEMBLY__ */ 2.114 + 2.115 +#ifdef __ASSEMBLY__ 2.116 +#define __PAGE_OFFSET CONFIG_PAGE_OFFSET 2.117 +#define __PHYSICAL_START CONFIG_PHYSICAL_START 2.118 +#else 2.119 +#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) 2.120 +#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) 2.121 +#endif 2.122 +#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START) 2.123 + 2.124 + 2.125 +#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 2.126 +#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) 2.127 +#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) 2.128 +#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) 2.129 +#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 2.130 +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 2.131 +#ifdef CONFIG_FLATMEM 2.132 +#define pfn_to_page(pfn) (mem_map + (pfn)) 2.133 +#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) 2.134 +#define pfn_valid(pfn) ((pfn) < max_mapnr) 2.135 +#endif /* CONFIG_FLATMEM */ 2.136 +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 2.137 + 2.138 +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 2.139 + 2.140 +#define VM_DATA_DEFAULT_FLAGS \ 2.141 + (VM_READ | VM_WRITE | \ 2.142 + ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ 2.143 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 2.144 + 2.145 +#endif /* __KERNEL__ */ 2.146 + 2.147 +#include <asm-generic/page.h> 2.148 + 2.149 +#endif /* _I386_PAGE_H */