ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c @ 6697:a9a78ca76cd2

Replace direct_remap_area_pages with direct_remap_pfn_range to help fix PAE domain building.
Signed-off-by: ian@xensource.com
author iap10@freefall.cl.cam.ac.uk
date Wed Sep 07 23:10:49 2005 +0000 (2005-09-07)
parents 1f460d0fd6c6
children 4cdf880c9463
line source
1 /*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <asm/io.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
22 #define ISA_START_ADDRESS 0x0
23 #define ISA_END_ADDRESS 0x100000
25 #if 0 /* not PAE safe */
26 /* These hacky macros avoid phys->machine translations. */
27 #define __direct_pte(x) ((pte_t) { (x) } )
28 #define __direct_mk_pte(page_nr,pgprot) \
29 __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
30 #define direct_mk_pte_phys(physpage, pgprot) \
31 __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
32 #endif
34 static int direct_remap_area_pte_fn(pte_t *pte,
35 struct page *pte_page,
36 unsigned long address,
37 void *data)
38 {
39 mmu_update_t **v = (mmu_update_t **)data;
41 (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pte_page)) <<
42 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
43 (*v)++;
45 return 0;
46 }
48 int direct_remap_pfn_range(struct mm_struct *mm,
49 unsigned long address,
50 unsigned long mfn,
51 unsigned long size,
52 pgprot_t prot,
53 domid_t domid)
54 {
55 int i;
56 unsigned long start_address;
57 #define MAX_DIRECTMAP_MMU_QUEUE 130
58 mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u, *w = u;
60 start_address = address;
62 flush_cache_all();
64 for (i = 0; i < size; i += PAGE_SIZE) {
65 if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
66 /* Fill in the PTE pointers. */
67 generic_page_range(mm, start_address,
68 address - start_address,
69 direct_remap_area_pte_fn, &w);
70 w = u;
71 if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
72 return -EFAULT;
73 v = u;
74 start_address = address;
75 }
77 /*
78 * Fill in the machine address: PTE ptr is done later by
79 * __direct_remap_area_pages().
80 */
81 v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
83 mfn++;
84 address += PAGE_SIZE;
85 v++;
86 }
88 if (v != u) {
89 /* get the ptep's filled in */
90 generic_page_range(mm, start_address, address - start_address,
91 direct_remap_area_pte_fn, &w);
92 if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
93 return -EFAULT;
94 }
96 flush_tlb_all();
98 return 0;
99 }
101 EXPORT_SYMBOL(direct_remap_pfn_range);
104 /* FIXME: This is horribly broken on PAE */
105 static int lookup_pte_fn(
106 pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
107 {
108 unsigned long *ptep = (unsigned long *)data;
109 if (ptep)
110 *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) <<
111 PAGE_SHIFT) |
112 ((unsigned long)pte & ~PAGE_MASK);
113 return 0;
114 }
116 int create_lookup_pte_addr(struct mm_struct *mm,
117 unsigned long address,
118 unsigned long *ptep)
119 {
120 return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
121 }
123 EXPORT_SYMBOL(create_lookup_pte_addr);
125 static int noop_fn(
126 pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
127 {
128 return 0;
129 }
131 int touch_pte_range(struct mm_struct *mm,
132 unsigned long address,
133 unsigned long size)
134 {
135 return generic_page_range(mm, address, size, noop_fn, NULL);
136 }
138 EXPORT_SYMBOL(touch_pte_range);
140 #ifdef CONFIG_XEN_PHYSDEV_ACCESS
142 /*
143 * Does @address reside within a non-highmem page that is local to this virtual
144 * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
145 * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand
146 * why this works.
147 */
148 static inline int is_local_lowmem(unsigned long address)
149 {
150 extern unsigned long max_low_pfn;
151 unsigned long mfn = address >> PAGE_SHIFT;
152 unsigned long pfn = mfn_to_pfn(mfn);
153 return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn));
154 }
156 /*
157 * Generic mapping function (not visible outside):
158 */
160 /*
161 * Remap an arbitrary physical address space into the kernel virtual
162 * address space. Needed when the kernel wants to access high addresses
163 * directly.
164 *
165 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
166 * have to convert them into an offset in a page-aligned mapping, but the
167 * caller shouldn't need to know that small detail.
168 */
169 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
170 {
171 void __iomem * addr;
172 struct vm_struct * area;
173 unsigned long offset, last_addr;
174 domid_t domid = DOMID_IO;
176 /* Don't allow wraparound or zero size */
177 last_addr = phys_addr + size - 1;
178 if (!size || last_addr < phys_addr)
179 return NULL;
181 /*
182 * Don't remap the low PCI/ISA area, it's always mapped..
183 */
184 if (xen_start_info->flags & SIF_PRIVILEGED &&
185 phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
186 return (void __iomem *) isa_bus_to_virt(phys_addr);
188 /*
189 * Don't allow anybody to remap normal RAM that we're using..
190 */
191 if (is_local_lowmem(phys_addr)) {
192 char *t_addr, *t_end;
193 struct page *page;
195 t_addr = bus_to_virt(phys_addr);
196 t_end = t_addr + (size - 1);
198 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
199 if(!PageReserved(page))
200 return NULL;
202 domid = DOMID_SELF;
203 }
205 /*
206 * Mappings have to be page-aligned
207 */
208 offset = phys_addr & ~PAGE_MASK;
209 phys_addr &= PAGE_MASK;
210 size = PAGE_ALIGN(last_addr+1) - phys_addr;
212 /*
213 * Ok, go for it..
214 */
215 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
216 if (!area)
217 return NULL;
218 area->phys_addr = phys_addr;
219 addr = (void __iomem *) area->addr;
220 flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
221 #ifdef __x86_64__
222 flags |= _PAGE_USER;
223 #endif
224 if (direct_remap_pfn_range(&init_mm, (unsigned long) addr, phys_addr>>PAGE_SHIFT,
225 size, __pgprot(flags), domid)) {
226 vunmap((void __force *) addr);
227 return NULL;
228 }
229 return (void __iomem *) (offset + (char __iomem *)addr);
230 }
233 /**
234 * ioremap_nocache - map bus memory into CPU space
235 * @offset: bus address of the memory
236 * @size: size of the resource to map
237 *
238 * ioremap_nocache performs a platform specific sequence of operations to
239 * make bus memory CPU accessible via the readb/readw/readl/writeb/
240 * writew/writel functions and the other mmio helpers. The returned
241 * address is not guaranteed to be usable directly as a virtual
242 * address.
243 *
244 * This version of ioremap ensures that the memory is marked uncachable
245 * on the CPU as well as honouring existing caching rules from things like
246 * the PCI bus. Note that there are other caches and buffers on many
247 * busses. In particular driver authors should read up on PCI writes
248 *
249 * It's useful if some control registers are in such an area and
250 * write combining or read caching is not desirable:
251 *
252 * Must be freed with iounmap.
253 */
255 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
256 {
257 unsigned long last_addr;
258 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
259 if (!p)
260 return p;
262 /* Guaranteed to be > phys_addr, as per __ioremap() */
263 last_addr = phys_addr + size - 1;
265 if (is_local_lowmem(last_addr)) {
266 struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
267 unsigned long npages;
269 phys_addr &= PAGE_MASK;
271 /* This might overflow and become zero.. */
272 last_addr = PAGE_ALIGN(last_addr);
274 /* .. but that's ok, because modulo-2**n arithmetic will make
275 * the page-aligned "last - first" come out right.
276 */
277 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
279 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
280 iounmap(p);
281 p = NULL;
282 }
283 global_flush_tlb();
284 }
286 return p;
287 }
289 void iounmap(volatile void __iomem *addr)
290 {
291 struct vm_struct *p;
292 if ((void __force *) addr <= high_memory)
293 return;
295 /*
296 * __ioremap special-cases the PCI/ISA range by not instantiating a
297 * vm_area and by simply returning an address into the kernel mapping
298 * of ISA space. So handle that here.
299 */
300 if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
301 return;
303 write_lock(&vmlist_lock);
304 p = __remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
305 if (!p) {
306 printk("iounmap: bad address %p\n", addr);
307 goto out_unlock;
308 }
310 if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
311 /* p->size includes the guard page, but cpa doesn't like that */
312 change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
313 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
314 PAGE_KERNEL);
315 global_flush_tlb();
316 }
317 out_unlock:
318 write_unlock(&vmlist_lock);
319 kfree(p);
320 }
322 #ifdef __i386__
324 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
325 {
326 unsigned long offset, last_addr;
327 unsigned int nrpages;
328 enum fixed_addresses idx;
330 /* Don't allow wraparound or zero size */
331 last_addr = phys_addr + size - 1;
332 if (!size || last_addr < phys_addr)
333 return NULL;
335 /*
336 * Don't remap the low PCI/ISA area, it's always mapped..
337 */
338 if (xen_start_info->flags & SIF_PRIVILEGED &&
339 phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
340 return isa_bus_to_virt(phys_addr);
342 /*
343 * Mappings have to be page-aligned
344 */
345 offset = phys_addr & ~PAGE_MASK;
346 phys_addr &= PAGE_MASK;
347 size = PAGE_ALIGN(last_addr) - phys_addr;
349 /*
350 * Mappings have to fit in the FIX_BTMAP area.
351 */
352 nrpages = size >> PAGE_SHIFT;
353 if (nrpages > NR_FIX_BTMAPS)
354 return NULL;
356 /*
357 * Ok, go for it..
358 */
359 idx = FIX_BTMAP_BEGIN;
360 while (nrpages > 0) {
361 set_fixmap(idx, phys_addr);
362 phys_addr += PAGE_SIZE;
363 --idx;
364 --nrpages;
365 }
366 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
367 }
369 void __init bt_iounmap(void *addr, unsigned long size)
370 {
371 unsigned long virt_addr;
372 unsigned long offset;
373 unsigned int nrpages;
374 enum fixed_addresses idx;
376 virt_addr = (unsigned long)addr;
377 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
378 return;
379 if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
380 return;
381 offset = virt_addr & ~PAGE_MASK;
382 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
384 idx = FIX_BTMAP_BEGIN;
385 while (nrpages > 0) {
386 clear_fixmap(idx);
387 --idx;
388 --nrpages;
389 }
390 }
392 #endif /* __i386__ */
394 #else /* CONFIG_XEN_PHYSDEV_ACCESS */
396 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size,
397 unsigned long flags)
398 {
399 return NULL;
400 }
402 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
403 {
404 return NULL;
405 }
407 void iounmap(volatile void __iomem *addr)
408 {
409 }
411 #ifdef __i386__
413 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
414 {
415 return NULL;
416 }
418 void __init bt_iounmap(void *addr, unsigned long size)
419 {
420 }
422 #endif /* __i386__ */
424 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */
426 /*
427 * Local variables:
428 * c-file-style: "linux"
429 * indent-tabs-mode: t
430 * c-indent-level: 8
431 * c-basic-offset: 8
432 * tab-width: 8
433 * End:
434 */