ia64/xen-unstable

view linux-2.6-xen-sparse/arch/i386/mm/ioremap-xen.c @ 8772:55268b90a519

Remove the PHYSDEV_ACCESS configure option from Linux.
It saves very little space, no distro will disable it
in their unified build, and it's rather a pain to
implement it properly.

I've left the PRIVILEGED_GUEST option for now, as it
provides some indication of where our platform-bringup
hacks are in our Linux tree. We may wish to exclude
those from upstream merge in the first instance.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Feb 07 01:02:49 2006 +0100 (2006-02-07)
parents c84a051d8967
children c9edeb3bd652
line source
1 /*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <asm/io.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
22 #define ISA_START_ADDRESS 0x0
23 #define ISA_END_ADDRESS 0x100000
25 #if 0 /* not PAE safe */
26 /* These hacky macros avoid phys->machine translations. */
27 #define __direct_pte(x) ((pte_t) { (x) } )
28 #define __direct_mk_pte(page_nr,pgprot) \
29 __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
30 #define direct_mk_pte_phys(physpage, pgprot) \
31 __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
32 #endif
34 static int direct_remap_area_pte_fn(pte_t *pte,
35 struct page *pte_page,
36 unsigned long address,
37 void *data)
38 {
39 mmu_update_t **v = (mmu_update_t **)data;
41 (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pte_page)) <<
42 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
43 (*v)++;
45 return 0;
46 }
48 static int __direct_remap_pfn_range(struct mm_struct *mm,
49 unsigned long address,
50 unsigned long mfn,
51 unsigned long size,
52 pgprot_t prot,
53 domid_t domid)
54 {
55 int rc;
56 unsigned long i, start_address;
57 mmu_update_t *u, *v, *w;
59 u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
60 if (u == NULL)
61 return -ENOMEM;
63 start_address = address;
65 flush_cache_all();
67 for (i = 0; i < size; i += PAGE_SIZE) {
68 if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
69 /* Fill in the PTE pointers. */
70 rc = generic_page_range(mm, start_address,
71 address - start_address,
72 direct_remap_area_pte_fn, &w);
73 if (rc)
74 goto out;
75 w = u;
76 rc = -EFAULT;
77 if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
78 goto out;
79 v = u;
80 start_address = address;
81 }
83 /*
84 * Fill in the machine address: PTE ptr is done later by
85 * __direct_remap_area_pages().
86 */
87 v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
89 mfn++;
90 address += PAGE_SIZE;
91 v++;
92 }
94 if (v != u) {
95 /* get the ptep's filled in */
96 rc = generic_page_range(mm, start_address, address - start_address,
97 direct_remap_area_pte_fn, &w);
98 if (rc)
99 goto out;
100 rc = -EFAULT;
101 if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
102 goto out;
103 }
105 rc = 0;
107 out:
108 flush_tlb_all();
110 free_page((unsigned long)u);
112 return rc;
113 }
115 int direct_remap_pfn_range(struct vm_area_struct *vma,
116 unsigned long address,
117 unsigned long mfn,
118 unsigned long size,
119 pgprot_t prot,
120 domid_t domid)
121 {
122 /* Same as remap_pfn_range(). */
123 vma->vm_flags |= VM_IO | VM_RESERVED;
125 return __direct_remap_pfn_range(
126 vma->vm_mm, address, mfn, size, prot, domid);
127 }
128 EXPORT_SYMBOL(direct_remap_pfn_range);
130 int direct_kernel_remap_pfn_range(unsigned long address,
131 unsigned long mfn,
132 unsigned long size,
133 pgprot_t prot,
134 domid_t domid)
135 {
136 return __direct_remap_pfn_range(
137 &init_mm, address, mfn, size, prot, domid);
138 }
139 EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
141 static int lookup_pte_fn(
142 pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
143 {
144 uint64_t *ptep = (uint64_t *)data;
145 if (ptep)
146 *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
147 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
148 return 0;
149 }
151 int create_lookup_pte_addr(struct mm_struct *mm,
152 unsigned long address,
153 uint64_t *ptep)
154 {
155 return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
156 }
158 EXPORT_SYMBOL(create_lookup_pte_addr);
160 static int noop_fn(
161 pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
162 {
163 return 0;
164 }
166 int touch_pte_range(struct mm_struct *mm,
167 unsigned long address,
168 unsigned long size)
169 {
170 return generic_page_range(mm, address, size, noop_fn, NULL);
171 }
173 EXPORT_SYMBOL(touch_pte_range);
175 /*
176 * Does @address reside within a non-highmem page that is local to this virtual
177 * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
178 * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
179 * why this works.
180 */
181 static inline int is_local_lowmem(unsigned long address)
182 {
183 extern unsigned long max_low_pfn;
184 return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
185 }
187 /*
188 * Generic mapping function (not visible outside):
189 */
191 /*
192 * Remap an arbitrary physical address space into the kernel virtual
193 * address space. Needed when the kernel wants to access high addresses
194 * directly.
195 *
196 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
197 * have to convert them into an offset in a page-aligned mapping, but the
198 * caller shouldn't need to know that small detail.
199 */
200 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
201 {
202 void __iomem * addr;
203 struct vm_struct * area;
204 unsigned long offset, last_addr;
205 domid_t domid = DOMID_IO;
207 /* Don't allow wraparound or zero size */
208 last_addr = phys_addr + size - 1;
209 if (!size || last_addr < phys_addr)
210 return NULL;
212 /*
213 * Don't remap the low PCI/ISA area, it's always mapped..
214 */
215 if (xen_start_info->flags & SIF_PRIVILEGED &&
216 phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
217 return (void __iomem *) isa_bus_to_virt(phys_addr);
219 /*
220 * Don't allow anybody to remap normal RAM that we're using..
221 */
222 if (is_local_lowmem(phys_addr)) {
223 char *t_addr, *t_end;
224 struct page *page;
226 t_addr = bus_to_virt(phys_addr);
227 t_end = t_addr + (size - 1);
229 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
230 if(!PageReserved(page))
231 return NULL;
233 domid = DOMID_SELF;
234 }
236 /*
237 * Mappings have to be page-aligned
238 */
239 offset = phys_addr & ~PAGE_MASK;
240 phys_addr &= PAGE_MASK;
241 size = PAGE_ALIGN(last_addr+1) - phys_addr;
243 /*
244 * Ok, go for it..
245 */
246 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
247 if (!area)
248 return NULL;
249 area->phys_addr = phys_addr;
250 addr = (void __iomem *) area->addr;
251 flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
252 #ifdef __x86_64__
253 flags |= _PAGE_USER;
254 #endif
255 if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
256 phys_addr>>PAGE_SHIFT,
257 size, __pgprot(flags), domid)) {
258 vunmap((void __force *) addr);
259 return NULL;
260 }
261 return (void __iomem *) (offset + (char __iomem *)addr);
262 }
263 EXPORT_SYMBOL(__ioremap);
265 /**
266 * ioremap_nocache - map bus memory into CPU space
267 * @offset: bus address of the memory
268 * @size: size of the resource to map
269 *
270 * ioremap_nocache performs a platform specific sequence of operations to
271 * make bus memory CPU accessible via the readb/readw/readl/writeb/
272 * writew/writel functions and the other mmio helpers. The returned
273 * address is not guaranteed to be usable directly as a virtual
274 * address.
275 *
276 * This version of ioremap ensures that the memory is marked uncachable
277 * on the CPU as well as honouring existing caching rules from things like
278 * the PCI bus. Note that there are other caches and buffers on many
279 * busses. In particular driver authors should read up on PCI writes
280 *
281 * It's useful if some control registers are in such an area and
282 * write combining or read caching is not desirable:
283 *
284 * Must be freed with iounmap.
285 */
287 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
288 {
289 unsigned long last_addr;
290 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
291 if (!p)
292 return p;
294 /* Guaranteed to be > phys_addr, as per __ioremap() */
295 last_addr = phys_addr + size - 1;
297 if (is_local_lowmem(last_addr)) {
298 struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
299 unsigned long npages;
301 phys_addr &= PAGE_MASK;
303 /* This might overflow and become zero.. */
304 last_addr = PAGE_ALIGN(last_addr);
306 /* .. but that's ok, because modulo-2**n arithmetic will make
307 * the page-aligned "last - first" come out right.
308 */
309 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
311 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
312 iounmap(p);
313 p = NULL;
314 }
315 global_flush_tlb();
316 }
318 return p;
319 }
320 EXPORT_SYMBOL(ioremap_nocache);
322 /**
323 * iounmap - Free a IO remapping
324 * @addr: virtual address from ioremap_*
325 *
326 * Caller must ensure there is only one unmapping for the same pointer.
327 */
328 void iounmap(volatile void __iomem *addr)
329 {
330 struct vm_struct *p, *o;
332 if ((void __force *)addr <= high_memory)
333 return;
335 /*
336 * __ioremap special-cases the PCI/ISA range by not instantiating a
337 * vm_area and by simply returning an address into the kernel mapping
338 * of ISA space. So handle that here.
339 */
340 if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
341 return;
343 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
345 /* Use the vm area unlocked, assuming the caller
346 ensures there isn't another iounmap for the same address
347 in parallel. Reuse of the virtual address is prevented by
348 leaving it in the global lists until we're done with it.
349 cpa takes care of the direct mappings. */
350 read_lock(&vmlist_lock);
351 for (p = vmlist; p; p = p->next) {
352 if (p->addr == addr)
353 break;
354 }
355 read_unlock(&vmlist_lock);
357 if (!p) {
358 printk("iounmap: bad address %p\n", addr);
359 dump_stack();
360 return;
361 }
363 /* Reset the direct mapping. Can block */
364 if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
365 /* p->size includes the guard page, but cpa doesn't like that */
366 change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
367 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
368 PAGE_KERNEL);
369 global_flush_tlb();
370 }
372 /* Finally remove it */
373 o = remove_vm_area((void *)addr);
374 BUG_ON(p != o || o == NULL);
375 kfree(p);
376 }
377 EXPORT_SYMBOL(iounmap);
379 #ifdef __i386__
381 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
382 {
383 unsigned long offset, last_addr;
384 unsigned int nrpages;
385 enum fixed_addresses idx;
387 /* Don't allow wraparound or zero size */
388 last_addr = phys_addr + size - 1;
389 if (!size || last_addr < phys_addr)
390 return NULL;
392 /*
393 * Don't remap the low PCI/ISA area, it's always mapped..
394 */
395 if (xen_start_info->flags & SIF_PRIVILEGED &&
396 phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
397 return isa_bus_to_virt(phys_addr);
399 /*
400 * Mappings have to be page-aligned
401 */
402 offset = phys_addr & ~PAGE_MASK;
403 phys_addr &= PAGE_MASK;
404 size = PAGE_ALIGN(last_addr) - phys_addr;
406 /*
407 * Mappings have to fit in the FIX_BTMAP area.
408 */
409 nrpages = size >> PAGE_SHIFT;
410 if (nrpages > NR_FIX_BTMAPS)
411 return NULL;
413 /*
414 * Ok, go for it..
415 */
416 idx = FIX_BTMAP_BEGIN;
417 while (nrpages > 0) {
418 set_fixmap(idx, phys_addr);
419 phys_addr += PAGE_SIZE;
420 --idx;
421 --nrpages;
422 }
423 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
424 }
426 void __init bt_iounmap(void *addr, unsigned long size)
427 {
428 unsigned long virt_addr;
429 unsigned long offset;
430 unsigned int nrpages;
431 enum fixed_addresses idx;
433 virt_addr = (unsigned long)addr;
434 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
435 return;
436 if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
437 return;
438 offset = virt_addr & ~PAGE_MASK;
439 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
441 idx = FIX_BTMAP_BEGIN;
442 while (nrpages > 0) {
443 clear_fixmap(idx);
444 --idx;
445 --nrpages;
446 }
447 }
449 #endif /* __i386__ */
451 /*
452 * Local variables:
453 * c-file-style: "linux"
454 * indent-tabs-mode: t
455 * c-indent-level: 8
456 * c-basic-offset: 8
457 * tab-width: 8
458 * End:
459 */