ia64/xen-unstable

view linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c @ 4091:0b05ec3c5331

bitkeeper revision 1.1159.258.45 (4230e6ab4xRdK4pchNbpgZE-preulA)

Fix one more pud merge incident.
Signed-off-by: Christian Limpach <chris@xensource.com>
author cl349@firebug.cl.cam.ac.uk
date Fri Mar 11 00:30:35 2005 +0000 (2005-03-11)
parents cff0d3baf599
children db5a30a327e6
line source
1 /*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <asm/io.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
22 #ifndef CONFIG_XEN_PHYSDEV_ACCESS
24 void * __ioremap(unsigned long phys_addr, unsigned long size,
25 unsigned long flags)
26 {
27 return NULL;
28 }
30 void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
31 {
32 return NULL;
33 }
35 void iounmap(volatile void __iomem *addr)
36 {
37 }
39 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
40 {
41 return NULL;
42 }
44 void __init bt_iounmap(void *addr, unsigned long size)
45 {
46 }
48 #else
50 /*
51 * Does @address reside within a non-highmem page that is local to this virtual
52 * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
53 * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
54 * why this works.
55 */
56 static inline int is_local_lowmem(unsigned long address)
57 {
58 extern unsigned long max_low_pfn;
59 unsigned long mfn = address >> PAGE_SHIFT;
60 unsigned long pfn = mfn_to_pfn(mfn);
61 return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn));
62 }
64 /*
65 * Generic mapping function (not visible outside):
66 */
68 /*
69 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. Needed when the kernel wants to access high addresses
71 * directly.
72 *
73 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74 * have to convert them into an offset in a page-aligned mapping, but the
75 * caller shouldn't need to know that small detail.
76 */
77 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
78 {
79 void __iomem * addr;
80 struct vm_struct * area;
81 unsigned long offset, last_addr;
82 domid_t domid = DOMID_IO;
84 /* Don't allow wraparound or zero size */
85 last_addr = phys_addr + size - 1;
86 if (!size || last_addr < phys_addr)
87 return NULL;
89 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
90 /*
91 * Don't remap the low PCI/ISA area, it's always mapped..
92 */
93 if (phys_addr >= 0x0 && last_addr < 0x100000)
94 return isa_bus_to_virt(phys_addr);
95 #endif
97 /*
98 * Don't allow anybody to remap normal RAM that we're using..
99 */
100 if (is_local_lowmem(phys_addr)) {
101 char *t_addr, *t_end;
102 struct page *page;
104 t_addr = bus_to_virt(phys_addr);
105 t_end = t_addr + (size - 1);
107 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
108 if(!PageReserved(page))
109 return NULL;
111 domid = DOMID_LOCAL;
112 }
114 /*
115 * Mappings have to be page-aligned
116 */
117 offset = phys_addr & ~PAGE_MASK;
118 phys_addr &= PAGE_MASK;
119 size = PAGE_ALIGN(last_addr+1) - phys_addr;
121 /*
122 * Ok, go for it..
123 */
124 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
125 if (!area)
126 return NULL;
127 area->phys_addr = phys_addr;
128 addr = (void __iomem *) area->addr;
129 if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
130 size, __pgprot(_PAGE_PRESENT | _PAGE_RW |
131 _PAGE_DIRTY | _PAGE_ACCESSED
132 | flags), domid)) {
133 vunmap((void __force *) addr);
134 return NULL;
135 }
136 return (void __iomem *) (offset + (char __iomem *)addr);
137 }
140 /**
141 * ioremap_nocache - map bus memory into CPU space
142 * @offset: bus address of the memory
143 * @size: size of the resource to map
144 *
145 * ioremap_nocache performs a platform specific sequence of operations to
146 * make bus memory CPU accessible via the readb/readw/readl/writeb/
147 * writew/writel functions and the other mmio helpers. The returned
148 * address is not guaranteed to be usable directly as a virtual
149 * address.
150 *
151 * This version of ioremap ensures that the memory is marked uncachable
152 * on the CPU as well as honouring existing caching rules from things like
153 * the PCI bus. Note that there are other caches and buffers on many
154 * busses. In particular driver authors should read up on PCI writes
155 *
156 * It's useful if some control registers are in such an area and
157 * write combining or read caching is not desirable:
158 *
159 * Must be freed with iounmap.
160 */
162 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
163 {
164 unsigned long last_addr;
165 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
166 if (!p)
167 return p;
169 /* Guaranteed to be > phys_addr, as per __ioremap() */
170 last_addr = phys_addr + size - 1;
172 if (is_local_lowmem(last_addr)) {
173 struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
174 unsigned long npages;
176 phys_addr &= PAGE_MASK;
178 /* This might overflow and become zero.. */
179 last_addr = PAGE_ALIGN(last_addr);
181 /* .. but that's ok, because modulo-2**n arithmetic will make
182 * the page-aligned "last - first" come out right.
183 */
184 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
186 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
187 iounmap(p);
188 p = NULL;
189 }
190 global_flush_tlb();
191 }
193 return p;
194 }
196 void iounmap(volatile void __iomem *addr)
197 {
198 struct vm_struct *p;
199 if ((void __force *) addr <= high_memory)
200 return;
201 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
202 if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
203 return;
204 #endif
205 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
206 if (!p) {
207 printk("__iounmap: bad address %p\n", addr);
208 return;
209 }
211 if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
212 /* p->size includes the guard page, but cpa doesn't like that */
213 change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
214 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
215 PAGE_KERNEL);
216 global_flush_tlb();
217 }
218 kfree(p);
219 }
221 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
222 {
223 unsigned long offset, last_addr;
224 unsigned int nrpages;
225 enum fixed_addresses idx;
227 /* Don't allow wraparound or zero size */
228 last_addr = phys_addr + size - 1;
229 if (!size || last_addr < phys_addr)
230 return NULL;
232 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
233 /*
234 * Don't remap the low PCI/ISA area, it's always mapped..
235 */
236 if (phys_addr >= 0x0 && last_addr < 0x100000)
237 return isa_bus_to_virt(phys_addr);
238 #endif
240 /*
241 * Mappings have to be page-aligned
242 */
243 offset = phys_addr & ~PAGE_MASK;
244 phys_addr &= PAGE_MASK;
245 size = PAGE_ALIGN(last_addr) - phys_addr;
247 /*
248 * Mappings have to fit in the FIX_BTMAP area.
249 */
250 nrpages = size >> PAGE_SHIFT;
251 if (nrpages > NR_FIX_BTMAPS)
252 return NULL;
254 /*
255 * Ok, go for it..
256 */
257 idx = FIX_BTMAP_BEGIN;
258 while (nrpages > 0) {
259 set_fixmap_ma(idx, phys_addr);
260 phys_addr += PAGE_SIZE;
261 --idx;
262 --nrpages;
263 }
264 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
265 }
267 void __init bt_iounmap(void *addr, unsigned long size)
268 {
269 unsigned long virt_addr;
270 unsigned long offset;
271 unsigned int nrpages;
272 enum fixed_addresses idx;
274 virt_addr = (unsigned long)addr;
275 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
276 return;
277 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
278 if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
279 return;
280 #endif
281 offset = virt_addr & ~PAGE_MASK;
282 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
284 idx = FIX_BTMAP_BEGIN;
285 while (nrpages > 0) {
286 clear_fixmap(idx);
287 --idx;
288 --nrpages;
289 }
290 }
292 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */
294 /* These hacky macros avoid phys->machine translations. */
295 #define __direct_pte(x) ((pte_t) { (x) } )
296 #define __direct_mk_pte(page_nr,pgprot) \
297 __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
298 #define direct_mk_pte_phys(physpage, pgprot) \
299 __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
301 static inline void direct_remap_area_pte(pte_t *pte,
302 unsigned long address,
303 unsigned long size,
304 mmu_update_t **v)
305 {
306 unsigned long end;
308 address &= ~PMD_MASK;
309 end = address + size;
310 if (end > PMD_SIZE)
311 end = PMD_SIZE;
312 if (address >= end)
313 BUG();
315 do {
316 (*v)->ptr = virt_to_machine(pte);
317 (*v)++;
318 address += PAGE_SIZE;
319 pte++;
320 } while (address && (address < end));
321 }
323 static inline int direct_remap_area_pmd(struct mm_struct *mm,
324 pmd_t *pmd,
325 unsigned long address,
326 unsigned long size,
327 mmu_update_t **v)
328 {
329 unsigned long end;
331 address &= ~PGDIR_MASK;
332 end = address + size;
333 if (end > PGDIR_SIZE)
334 end = PGDIR_SIZE;
335 if (address >= end)
336 BUG();
337 do {
338 pte_t *pte = (mm == &init_mm) ?
339 pte_alloc_kernel(mm, pmd, address) :
340 pte_alloc_map(mm, pmd, address);
341 if (!pte)
342 return -ENOMEM;
343 direct_remap_area_pte(pte, address, end - address, v);
344 pte_unmap(pte);
345 address = (address + PMD_SIZE) & PMD_MASK;
346 pmd++;
347 } while (address && (address < end));
348 return 0;
349 }
351 int __direct_remap_area_pages(struct mm_struct *mm,
352 unsigned long address,
353 unsigned long size,
354 mmu_update_t *v)
355 {
356 pgd_t * dir;
357 unsigned long end = address + size;
358 int error;
360 dir = pgd_offset(mm, address);
361 if (address >= end)
362 BUG();
363 spin_lock(&mm->page_table_lock);
364 do {
365 pud_t *pud;
366 pmd_t *pmd;
368 error = -ENOMEM;
369 pud = pud_alloc(mm, dir, address);
370 if (!pud)
371 break;
372 pmd = pmd_alloc(mm, pud, address);
373 if (!pmd)
374 break;
375 error = 0;
376 direct_remap_area_pmd(mm, pmd, address, end - address, &v);
377 address = (address + PGDIR_SIZE) & PGDIR_MASK;
378 dir++;
380 } while (address && (address < end));
381 spin_unlock(&mm->page_table_lock);
382 return error;
383 }
386 int direct_remap_area_pages(struct mm_struct *mm,
387 unsigned long address,
388 unsigned long machine_addr,
389 unsigned long size,
390 pgprot_t prot,
391 domid_t domid)
392 {
393 int i;
394 unsigned long start_address;
395 #define MAX_DIRECTMAP_MMU_QUEUE 130
396 mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
398 v = w = &u[0];
399 if (domid != DOMID_LOCAL) {
400 u[0].ptr = MMU_EXTENDED_COMMAND;
401 u[0].val = MMUEXT_SET_FOREIGNDOM;
402 u[0].val |= (unsigned long)domid << 16;
403 v = w = &u[1];
404 }
406 start_address = address;
408 flush_cache_all();
410 for (i = 0; i < size; i += PAGE_SIZE) {
411 if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
412 /* Fill in the PTE pointers. */
413 __direct_remap_area_pages(mm,
414 start_address,
415 address-start_address,
416 w);
418 if (HYPERVISOR_mmu_update(u, v - u, NULL) < 0)
419 return -EFAULT;
420 v = w;
421 start_address = address;
422 }
424 /*
425 * Fill in the machine address: PTE ptr is done later by
426 * __direct_remap_area_pages().
427 */
428 v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
430 machine_addr += PAGE_SIZE;
431 address += PAGE_SIZE;
432 v++;
433 }
435 if (v != w) {
436 /* get the ptep's filled in */
437 __direct_remap_area_pages(mm,
438 start_address,
439 address-start_address,
440 w);
441 if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0))
442 return -EFAULT;
443 }
445 flush_tlb_all();
447 return 0;
448 }
450 EXPORT_SYMBOL(direct_remap_area_pages);