ia64/xen-unstable

view linux-2.4.29-xen-sparse/arch/xen/mm/ioremap.c @ 3516:1a4f61d36171

bitkeeper revision 1.1159.223.31 (41f599bcklevTYwPtWQUZ7QK-azDbg)

Fix recent patch to change the way the version string is generated.
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@freefall.cl.cam.ac.uk
date Tue Jan 25 00:58:36 2005 +0000 (2005-01-25)
parents ed0d4ce83995
children d126cac32f08
line source
1 /*
2 * arch/xen/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Modifications for Xenolinux (c) 2003-2004 Keir Fraser
9 */
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/vmalloc.h>
15 #include <asm/io.h>
16 #include <asm/pgalloc.h>
17 #include <asm/uaccess.h>
18 #include <asm/tlb.h>
19 #include <asm/mmu.h>
21 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
23 /* These hacky macros avoid phys->machine translations. */
24 #define __direct_pte(x) ((pte_t) { (x) } )
25 #define __direct_mk_pte(page_nr,pgprot) \
26 __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
27 #define direct_mk_pte_phys(physpage, pgprot) \
28 __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
30 static inline void direct_remap_area_pte(pte_t *pte,
31 unsigned long address,
32 unsigned long size,
33 mmu_update_t **v)
34 {
35 unsigned long end;
37 address &= ~PMD_MASK;
38 end = address + size;
39 if (end > PMD_SIZE)
40 end = PMD_SIZE;
41 if (address >= end)
42 BUG();
44 do {
45 (*v)->ptr = virt_to_machine(pte);
46 (*v)++;
47 address += PAGE_SIZE;
48 pte++;
49 } while (address && (address < end));
50 }
52 static inline int direct_remap_area_pmd(struct mm_struct *mm,
53 pmd_t *pmd,
54 unsigned long address,
55 unsigned long size,
56 mmu_update_t **v)
57 {
58 unsigned long end;
60 address &= ~PGDIR_MASK;
61 end = address + size;
62 if (end > PGDIR_SIZE)
63 end = PGDIR_SIZE;
64 if (address >= end)
65 BUG();
66 do {
67 pte_t *pte = pte_alloc(mm, pmd, address);
68 if (!pte)
69 return -ENOMEM;
70 direct_remap_area_pte(pte, address, end - address, v);
72 address = (address + PMD_SIZE) & PMD_MASK;
73 pmd++;
74 } while (address && (address < end));
75 return 0;
76 }
78 int __direct_remap_area_pages(struct mm_struct *mm,
79 unsigned long address,
80 unsigned long size,
81 mmu_update_t *v)
82 {
83 pgd_t * dir;
84 unsigned long end = address + size;
86 dir = pgd_offset(mm, address);
87 flush_cache_all();
88 if (address >= end)
89 BUG();
90 spin_lock(&mm->page_table_lock);
91 do {
92 pmd_t *pmd = pmd_alloc(mm, dir, address);
93 if (!pmd)
94 return -ENOMEM;
95 direct_remap_area_pmd(mm, pmd, address, end - address, &v);
96 address = (address + PGDIR_SIZE) & PGDIR_MASK;
97 dir++;
99 } while (address && (address < end));
100 spin_unlock(&mm->page_table_lock);
101 flush_tlb_all();
102 return 0;
103 }
106 int direct_remap_area_pages(struct mm_struct *mm,
107 unsigned long address,
108 unsigned long machine_addr,
109 unsigned long size,
110 pgprot_t prot,
111 domid_t domid)
112 {
113 int i;
114 unsigned long start_address;
115 #define MAX_DIRECTMAP_MMU_QUEUE 130
116 mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
118 u[0].ptr = MMU_EXTENDED_COMMAND;
119 u[0].val = MMUEXT_SET_FOREIGNDOM;
120 u[0].val |= (unsigned long)domid << 16;
121 v = w = &u[1];
123 start_address = address;
125 for( i = 0; i < size; i += PAGE_SIZE )
126 {
127 if ( (v - u) == MAX_DIRECTMAP_MMU_QUEUE )
128 {
129 /* Fill in the PTE pointers. */
130 __direct_remap_area_pages( mm,
131 start_address,
132 address-start_address,
133 w);
135 if ( HYPERVISOR_mmu_update(u, v - u, NULL) < 0 )
136 return -EFAULT;
137 v = w;
138 start_address = address;
139 }
141 /*
142 * Fill in the machine address: PTE ptr is done later by
143 * __direct_remap_area_pages().
144 */
145 v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
147 machine_addr += PAGE_SIZE;
148 address += PAGE_SIZE;
149 v++;
150 }
152 if ( v != w )
153 {
154 /* get the ptep's filled in */
155 __direct_remap_area_pages(mm,
156 start_address,
157 address-start_address,
158 w);
159 if ( unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0) )
160 return -EFAULT;
161 }
163 return 0;
164 }
167 #endif /* CONFIG_XEN_PRIVILEGED_GUEST */
170 /*
171 * Remap an arbitrary machine address space into the kernel virtual
172 * address space. Needed when a privileged instance of Xenolinux wants
173 * to access space outside its world directly.
174 *
175 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
176 * have to convert them into an offset in a page-aligned mapping, but the
177 * caller shouldn't need to know that small detail.
178 */
179 void * __ioremap(unsigned long machine_addr,
180 unsigned long size,
181 unsigned long flags)
182 {
183 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
184 void * addr;
185 struct vm_struct * area;
186 unsigned long offset, last_addr;
187 pgprot_t prot;
189 /* Don't allow wraparound or zero size */
190 last_addr = machine_addr + size - 1;
191 if (!size || last_addr < machine_addr)
192 return NULL;
194 /* Mappings have to be page-aligned */
195 offset = machine_addr & ~PAGE_MASK;
196 machine_addr &= PAGE_MASK;
197 size = PAGE_ALIGN(last_addr+1) - machine_addr;
199 /* Ok, go for it */
200 area = get_vm_area(size, VM_IOREMAP);
201 if (!area)
202 return NULL;
203 addr = area->addr;
204 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
205 _PAGE_ACCESSED | flags);
206 if (direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(addr),
207 machine_addr, size, prot, 0)) {
208 vfree(addr);
209 return NULL;
210 }
211 return (void *) (offset + (char *)addr);
212 #else
213 return NULL;
214 #endif
215 }
217 void iounmap(void *addr)
218 {
219 vfree((void *)((unsigned long)addr & PAGE_MASK));
220 }
222 /* implementation of boot time ioremap for purpose of provising access
223 to the vga console for privileged domains. Unlike boot time ioremap on
224 other architectures, ours is permanent and not reclaimed when then vmalloc
225 infrastructure is started */
227 void __init *bt_ioremap(unsigned long machine_addr, unsigned long size)
228 {
229 unsigned long offset, last_addr;
230 unsigned int nrpages;
231 enum fixed_addresses idx;
233 /* Don't allow wraparound or zero size */
234 last_addr = machine_addr + size - 1;
235 if (!size || last_addr < machine_addr)
236 return NULL;
238 /*
239 * Mappings have to be page-aligned
240 */
241 offset = machine_addr & ~PAGE_MASK;
242 machine_addr &= PAGE_MASK;
243 size = PAGE_ALIGN(last_addr) - machine_addr;
245 /*
246 * Mappings have to fit in the FIX_BTMAP area.
247 */
248 nrpages = size >> PAGE_SHIFT;
249 if (nrpages > NR_FIX_BTMAPS)
250 return NULL;
252 /*
253 * Ok, go for it..
254 */
255 idx = FIX_BTMAP_BEGIN;
256 while (nrpages > 0) {
257 __set_fixmap(idx, machine_addr, PAGE_KERNEL);
258 machine_addr += PAGE_SIZE;
259 --idx;
260 --nrpages;
261 }
263 flush_tlb_all();
265 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
266 }
269 #if 0 /* We don't support these functions. They shouldn't be required. */
270 void __init bt_iounmap(void *addr, unsigned long size) {}
271 #endif