direct-io.hg

view linux-2.4.30-xen-sparse/arch/xen/mm/ioremap.c @ 5647:6f462a11a08e

Register the portio handler only once.
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jul 01 21:25:45 2005 +0000 (2005-07-01)
parents 85fcf3b1b7a5
children 56a63f9f378f
line source
1 /*
2 * arch/xen/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Modifications for Xenolinux (c) 2003-2004 Keir Fraser
9 */
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/vmalloc.h>
15 #include <asm/io.h>
16 #include <asm/pgalloc.h>
17 #include <asm/uaccess.h>
18 #include <asm/tlb.h>
19 #include <asm/mmu.h>
21 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
23 /* These hacky macros avoid phys->machine translations. */
24 #define __direct_pte(x) ((pte_t) { (x) } )
25 #define __direct_mk_pte(page_nr,pgprot) \
26 __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
27 #define direct_mk_pte_phys(physpage, pgprot) \
28 __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
30 static inline void direct_remap_area_pte(pte_t *pte,
31 unsigned long address,
32 unsigned long size,
33 mmu_update_t **v)
34 {
35 unsigned long end;
37 address &= ~PMD_MASK;
38 end = address + size;
39 if (end > PMD_SIZE)
40 end = PMD_SIZE;
41 if (address >= end)
42 BUG();
44 do {
45 (*v)->ptr = virt_to_machine(pte);
46 (*v)++;
47 address += PAGE_SIZE;
48 pte++;
49 } while (address && (address < end));
50 }
52 static inline int direct_remap_area_pmd(struct mm_struct *mm,
53 pmd_t *pmd,
54 unsigned long address,
55 unsigned long size,
56 mmu_update_t **v)
57 {
58 unsigned long end;
60 address &= ~PGDIR_MASK;
61 end = address + size;
62 if (end > PGDIR_SIZE)
63 end = PGDIR_SIZE;
64 if (address >= end)
65 BUG();
66 do {
67 pte_t *pte = pte_alloc(mm, pmd, address);
68 if (!pte)
69 return -ENOMEM;
70 direct_remap_area_pte(pte, address, end - address, v);
72 address = (address + PMD_SIZE) & PMD_MASK;
73 pmd++;
74 } while (address && (address < end));
75 return 0;
76 }
78 int __direct_remap_area_pages(struct mm_struct *mm,
79 unsigned long address,
80 unsigned long size,
81 mmu_update_t *v)
82 {
83 pgd_t * dir;
84 unsigned long end = address + size;
86 dir = pgd_offset(mm, address);
87 flush_cache_all();
88 if (address >= end)
89 BUG();
90 spin_lock(&mm->page_table_lock);
91 do {
92 pmd_t *pmd = pmd_alloc(mm, dir, address);
93 if (!pmd)
94 return -ENOMEM;
95 direct_remap_area_pmd(mm, pmd, address, end - address, &v);
96 address = (address + PGDIR_SIZE) & PGDIR_MASK;
97 dir++;
99 } while (address && (address < end));
100 spin_unlock(&mm->page_table_lock);
101 flush_tlb_all();
102 return 0;
103 }
106 int direct_remap_area_pages(struct mm_struct *mm,
107 unsigned long address,
108 unsigned long machine_addr,
109 unsigned long size,
110 pgprot_t prot,
111 domid_t domid)
112 {
113 int i;
114 unsigned long start_address;
115 #define MAX_DIRECTMAP_MMU_QUEUE 130
116 mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u;
118 start_address = address;
120 for( i = 0; i < size; i += PAGE_SIZE )
121 {
122 if ( (v - u) == MAX_DIRECTMAP_MMU_QUEUE )
123 {
124 /* Fill in the PTE pointers. */
125 __direct_remap_area_pages( mm,
126 start_address,
127 address-start_address,
128 u);
130 if ( HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0 )
131 return -EFAULT;
132 v = u;
133 start_address = address;
134 }
136 /*
137 * Fill in the machine address: PTE ptr is done later by
138 * __direct_remap_area_pages().
139 */
140 v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
142 machine_addr += PAGE_SIZE;
143 address += PAGE_SIZE;
144 v++;
145 }
147 if ( v != u )
148 {
149 /* get the ptep's filled in */
150 __direct_remap_area_pages(mm,
151 start_address,
152 address-start_address,
153 u);
154 if ( unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0) )
155 return -EFAULT;
156 }
158 return 0;
159 }
162 #endif /* CONFIG_XEN_PRIVILEGED_GUEST */
165 /*
166 * Remap an arbitrary machine address space into the kernel virtual
167 * address space. Needed when a privileged instance of Xenolinux wants
168 * to access space outside its world directly.
169 *
170 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
171 * have to convert them into an offset in a page-aligned mapping, but the
172 * caller shouldn't need to know that small detail.
173 */
174 void * __ioremap(unsigned long machine_addr,
175 unsigned long size,
176 unsigned long flags)
177 {
178 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
179 void * addr;
180 struct vm_struct * area;
181 unsigned long offset, last_addr;
182 pgprot_t prot;
184 /* Don't allow wraparound or zero size */
185 last_addr = machine_addr + size - 1;
186 if (!size || last_addr < machine_addr)
187 return NULL;
189 /* Mappings have to be page-aligned */
190 offset = machine_addr & ~PAGE_MASK;
191 machine_addr &= PAGE_MASK;
192 size = PAGE_ALIGN(last_addr+1) - machine_addr;
194 /* Ok, go for it */
195 area = get_vm_area(size, VM_IOREMAP);
196 if (!area)
197 return NULL;
198 addr = area->addr;
199 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
200 _PAGE_ACCESSED | flags);
201 if (direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(addr),
202 machine_addr, size, prot, 0)) {
203 vfree(addr);
204 return NULL;
205 }
206 return (void *) (offset + (char *)addr);
207 #else
208 return NULL;
209 #endif
210 }
212 void iounmap(void *addr)
213 {
214 vfree((void *)((unsigned long)addr & PAGE_MASK));
215 }
217 /* implementation of boot time ioremap for purpose of provising access
218 to the vga console for privileged domains. Unlike boot time ioremap on
219 other architectures, ours is permanent and not reclaimed when then vmalloc
220 infrastructure is started */
222 void __init *bt_ioremap(unsigned long machine_addr, unsigned long size)
223 {
224 unsigned long offset, last_addr;
225 unsigned int nrpages;
226 enum fixed_addresses idx;
228 /* Don't allow wraparound or zero size */
229 last_addr = machine_addr + size - 1;
230 if (!size || last_addr < machine_addr)
231 return NULL;
233 /*
234 * Mappings have to be page-aligned
235 */
236 offset = machine_addr & ~PAGE_MASK;
237 machine_addr &= PAGE_MASK;
238 size = PAGE_ALIGN(last_addr) - machine_addr;
240 /*
241 * Mappings have to fit in the FIX_BTMAP area.
242 */
243 nrpages = size >> PAGE_SHIFT;
244 if (nrpages > NR_FIX_BTMAPS)
245 return NULL;
247 /*
248 * Ok, go for it..
249 */
250 idx = FIX_BTMAP_BEGIN;
251 while (nrpages > 0) {
252 __set_fixmap(idx, machine_addr, PAGE_KERNEL);
253 machine_addr += PAGE_SIZE;
254 --idx;
255 --nrpages;
256 }
258 flush_tlb_all();
260 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
261 }
264 #if 0 /* We don't support these functions. They shouldn't be required. */
265 void __init bt_iounmap(void *addr, unsigned long size) {}
266 #endif