ia64/xen-unstable

view linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c @ 1774:131c48baa117

bitkeeper revision 1.1071.1.5 (40f41ae00utn5d2f3tlNLcvG_QhiBA)

Fairly major fixes to the network frontend driver.
Much saner now.
author kaf24@scramble.cl.cam.ac.uk
date Tue Jul 13 17:24:48 2004 +0000 (2004-07-13)
parents cbee10dcdd93
children 20d763e472ae 3addc3532bc7
line source
1 /*
2 * arch/xen/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Modifications for Xenolinux (c) 2003-2004 Keir Fraser
9 */
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/vmalloc.h>
15 #include <asm/io.h>
16 #include <asm/pgalloc.h>
17 #include <asm/uaccess.h>
18 #include <asm/tlb.h>
19 #include <asm/mmu.h>
21 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
23 /* These hacky macros avoid phys->machine translations. */
24 #define __direct_pte(x) ((pte_t) { (x) } )
25 #define __direct_mk_pte(page_nr,pgprot) \
26 __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
27 #define direct_mk_pte_phys(physpage, pgprot) \
28 __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
30 static inline void direct_remap_area_pte(pte_t *pte,
31 unsigned long address,
32 unsigned long size,
33 mmu_update_t **v)
34 {
35 unsigned long end;
37 address &= ~PMD_MASK;
38 end = address + size;
39 if (end > PMD_SIZE)
40 end = PMD_SIZE;
41 if (address >= end)
42 BUG();
44 do {
45 (*v)->ptr = virt_to_machine(pte);
46 (*v)++;
47 address += PAGE_SIZE;
48 pte++;
49 } while (address && (address < end));
50 }
52 static inline int direct_remap_area_pmd(struct mm_struct *mm,
53 pmd_t *pmd,
54 unsigned long address,
55 unsigned long size,
56 mmu_update_t **v)
57 {
58 unsigned long end;
60 address &= ~PGDIR_MASK;
61 end = address + size;
62 if (end > PGDIR_SIZE)
63 end = PGDIR_SIZE;
64 if (address >= end)
65 BUG();
66 do {
67 pte_t *pte = pte_alloc(mm, pmd, address);
68 if (!pte)
69 return -ENOMEM;
70 direct_remap_area_pte(pte, address, end - address, v);
72 address = (address + PMD_SIZE) & PMD_MASK;
73 pmd++;
74 } while (address && (address < end));
75 return 0;
76 }
78 int __direct_remap_area_pages(struct mm_struct *mm,
79 unsigned long address,
80 unsigned long size,
81 mmu_update_t *v)
82 {
83 pgd_t * dir;
84 unsigned long end = address + size;
86 dir = pgd_offset(mm, address);
87 flush_cache_all();
88 if (address >= end)
89 BUG();
90 spin_lock(&mm->page_table_lock);
91 do {
92 pmd_t *pmd = pmd_alloc(mm, dir, address);
93 if (!pmd)
94 return -ENOMEM;
95 direct_remap_area_pmd(mm, pmd, address, end - address, &v);
96 address = (address + PGDIR_SIZE) & PGDIR_MASK;
97 dir++;
99 } while (address && (address < end));
100 spin_unlock(&mm->page_table_lock);
101 flush_tlb_all();
102 return 0;
103 }
106 int direct_remap_area_pages(struct mm_struct *mm,
107 unsigned long address,
108 unsigned long machine_addr,
109 unsigned long size,
110 pgprot_t prot,
111 domid_t domid)
112 {
113 int i;
114 unsigned long start_address;
115 #define MAX_DIRECTMAP_MMU_QUEUE 130
116 mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
118 if ( domid != 0 )
119 {
120 u[0].val = (unsigned long)(domid<<16) & ~0xFFFFUL;
121 u[0].ptr = (unsigned long)(domid<< 0) & ~0xFFFFUL;
122 u[0].ptr |= MMU_EXTENDED_COMMAND;
123 u[0].val |= MMUEXT_SET_SUBJECTDOM;
124 v = w = &u[1];
125 }
126 else
127 {
128 v = w = &u[0];
129 }
131 start_address = address;
133 for( i = 0; i < size; i += PAGE_SIZE )
134 {
135 if ( (v - u) == MAX_DIRECTMAP_MMU_QUEUE )
136 {
137 /* Fill in the PTE pointers. */
138 __direct_remap_area_pages( mm,
139 start_address,
140 address-start_address,
141 w);
143 if ( HYPERVISOR_mmu_update(u, v - u, NULL) < 0 )
144 return -EFAULT;
145 v = w;
146 start_address = address;
147 }
149 /*
150 * Fill in the machine address: PTE ptr is done later by
151 * __direct_remap_area_pages().
152 */
153 v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
155 machine_addr += PAGE_SIZE;
156 address += PAGE_SIZE;
157 v++;
158 }
160 if ( v != w )
161 {
162 /* get the ptep's filled in */
163 __direct_remap_area_pages(mm,
164 start_address,
165 address-start_address,
166 w);
167 if ( unlikely(HYPERVISOR_mmu_update(u, v - u, NULL) < 0) )
168 return -EFAULT;
169 }
171 return 0;
172 }
175 #endif /* CONFIG_XEN_PRIVILEGED_GUEST */
178 /*
179 * Remap an arbitrary machine address space into the kernel virtual
180 * address space. Needed when a privileged instance of Xenolinux wants
181 * to access space outside its world directly.
182 *
183 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
184 * have to convert them into an offset in a page-aligned mapping, but the
185 * caller shouldn't need to know that small detail.
186 */
187 void * __ioremap(unsigned long machine_addr,
188 unsigned long size,
189 unsigned long flags)
190 {
191 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
192 void * addr;
193 struct vm_struct * area;
194 unsigned long offset, last_addr;
195 pgprot_t prot;
197 /* Don't allow wraparound or zero size */
198 last_addr = machine_addr + size - 1;
199 if (!size || last_addr < machine_addr)
200 return NULL;
202 /* Mappings have to be page-aligned */
203 offset = machine_addr & ~PAGE_MASK;
204 machine_addr &= PAGE_MASK;
205 size = PAGE_ALIGN(last_addr+1) - machine_addr;
207 /* Ok, go for it */
208 area = get_vm_area(size, VM_IOREMAP);
209 if (!area)
210 return NULL;
211 addr = area->addr;
212 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
213 _PAGE_ACCESSED | flags);
214 if (direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(addr),
215 machine_addr, size, prot, 0)) {
216 vfree(addr);
217 return NULL;
218 }
219 return (void *) (offset + (char *)addr);
220 #else
221 return NULL;
222 #endif
223 }
225 void iounmap(void *addr)
226 {
227 vfree((void *)((unsigned long)addr & PAGE_MASK));
228 }
230 /* implementation of boot time ioremap for purpose of provising access
231 to the vga console for privileged domains. Unlike boot time ioremap on
232 other architectures, ours is permanent and not reclaimed when then vmalloc
233 infrastructure is started */
235 void __init *bt_ioremap(unsigned long machine_addr, unsigned long size)
236 {
237 unsigned long offset, last_addr;
238 unsigned int nrpages;
239 enum fixed_addresses idx;
241 /* Don't allow wraparound or zero size */
242 last_addr = machine_addr + size - 1;
243 if (!size || last_addr < machine_addr)
244 return NULL;
246 /*
247 * Mappings have to be page-aligned
248 */
249 offset = machine_addr & ~PAGE_MASK;
250 machine_addr &= PAGE_MASK;
251 size = PAGE_ALIGN(last_addr) - machine_addr;
253 /*
254 * Mappings have to fit in the FIX_BTMAP area.
255 */
256 nrpages = size >> PAGE_SHIFT;
257 if (nrpages > NR_FIX_BTMAPS)
258 return NULL;
260 /*
261 * Ok, go for it..
262 */
263 idx = FIX_BTMAP_BEGIN;
264 while (nrpages > 0) {
265 __set_fixmap(idx, machine_addr, PAGE_KERNEL);
266 machine_addr += PAGE_SIZE;
267 --idx;
268 --nrpages;
269 }
271 flush_tlb_all();
273 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
274 }
277 #if 0 /* We don't support these functions. They shouldn't be required. */
278 void __init bt_iounmap(void *addr, unsigned long size) {}
279 #endif