ia64/xen-unstable

view netbsd-2.0-xen-sparse/sys/arch/xen/x86/bus_space.c @ 6462:af3750d1ec53

Bug fixes from Kevin (x2) and Anthony
Missing prototypes (Kevin)
Bad n_rid_blocks computation (Anthony)
Bad pte when single-entry dtlb lookup is successful (Kevin)
author djm@kirby.fc.hp.com
date Fri Sep 02 11:59:08 2005 -0600 (2005-09-02)
parents 0a4b76b6b5a0
children
line source
1 /* $NetBSD: bus_space.c,v 1.2.2.1 2004/05/22 15:57:25 he Exp $ */
2 /* NetBSD: bus_space.c,v 1.2 2003/03/14 18:47:53 christos Exp */
4 /*-
5 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the NetBSD
23 * Foundation, Inc. and its contributors.
24 * 4. Neither the name of The NetBSD Foundation nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
39 */
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: bus_space.c,v 1.2.2.1 2004/05/22 15:57:25 he Exp $");
44 #include "opt_xen.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/malloc.h>
49 #include <sys/extent.h>
51 #include <uvm/uvm_extern.h>
53 #include <machine/bus.h>
55 #include <dev/isa/isareg.h>
56 #include <machine/isa_machdep.h>
58 #include <machine/hypervisor.h>
59 #include <machine/xenpmap.h>
61 /*
62 * Extent maps to manage I/O and memory space. Allocate
63 * storage for 8 regions in each, initially. Later, ioport_malloc_safe
64 * will indicate that it's safe to use malloc() to dynamically allocate
65 * region descriptors.
66 *
67 * N.B. At least two regions are _always_ allocated from the iomem
68 * extent map; (0 -> ISA hole) and (end of ISA hole -> end of RAM).
69 *
70 * The extent maps are not static! Machine-dependent ISA and EISA
71 * routines need access to them for bus address space allocation.
72 */
73 static long ioport_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
74 static long iomem_ex_storage[EXTENT_FIXED_STORAGE_SIZE(8) / sizeof(long)];
75 struct extent *ioport_ex;
76 struct extent *iomem_ex;
77 static int ioport_malloc_safe;
79 int x86_mem_add_mapping __P((bus_addr_t, bus_size_t,
80 int, bus_space_handle_t *));
82 void
83 x86_bus_space_init()
84 {
85 /*
86 * Initialize the I/O port and I/O mem extent maps.
87 * Note: we don't have to check the return value since
88 * creation of a fixed extent map will never fail (since
89 * descriptor storage has already been allocated).
90 *
91 * N.B. The iomem extent manages _all_ physical addresses
92 * on the machine. When the amount of RAM is found, the two
93 * extents of RAM are allocated from the map (0 -> ISA hole
94 * and end of ISA hole -> end of RAM).
95 */
96 ioport_ex = extent_create("ioport", 0x0, 0xffff, M_DEVBUF,
97 (caddr_t)ioport_ex_storage, sizeof(ioport_ex_storage),
98 EX_NOCOALESCE|EX_NOWAIT);
99 iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
100 (caddr_t)iomem_ex_storage, sizeof(iomem_ex_storage),
101 EX_NOCOALESCE|EX_NOWAIT);
103 /* We are privileged guest os - should have IO privileges. */
104 if (xen_start_info.flags & SIF_PRIVILEGED) {
105 dom0_op_t op;
106 op.cmd = DOM0_IOPL;
107 op.u.iopl.domain = DOMID_SELF;
108 op.u.iopl.iopl = 1;
109 if (HYPERVISOR_dom0_op(&op) != 0)
110 panic("Unable to obtain IOPL, "
111 "despite being SIF_PRIVILEGED");
112 }
113 }
115 void
116 x86_bus_space_mallocok()
117 {
119 ioport_malloc_safe = 1;
120 }
122 int
123 x86_memio_map(t, bpa, size, flags, bshp)
124 bus_space_tag_t t;
125 bus_addr_t bpa;
126 bus_size_t size;
127 int flags;
128 bus_space_handle_t *bshp;
129 {
130 int error;
131 struct extent *ex;
133 /*
134 * Pick the appropriate extent map.
135 */
136 if (t == X86_BUS_SPACE_IO) {
137 if (flags & BUS_SPACE_MAP_LINEAR)
138 return (EOPNOTSUPP);
139 ex = ioport_ex;
140 } else if (t == X86_BUS_SPACE_MEM)
141 ex = iomem_ex;
142 else
143 panic("x86_memio_map: bad bus space tag");
145 /*
146 * Before we go any further, let's make sure that this
147 * region is available.
148 */
149 error = extent_alloc_region(ex, bpa, size,
150 EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0));
151 if (error)
152 return (error);
154 /*
155 * For I/O space, that's all she wrote.
156 */
157 if (t == X86_BUS_SPACE_IO) {
158 *bshp = bpa;
159 return (0);
160 }
162 /*
163 * For memory space, map the bus physical address to
164 * a kernel virtual address.
165 */
166 error = x86_mem_add_mapping(bpa, size,
167 (flags & BUS_SPACE_MAP_CACHEABLE) != 0, bshp);
168 if (error) {
169 if (extent_free(ex, bpa, size, EX_NOWAIT |
170 (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
171 printf("x86_memio_map: pa 0x%lx, size 0x%lx\n",
172 bpa, size);
173 printf("x86_memio_map: can't free region\n");
174 }
175 }
177 return (error);
178 }
180 int
181 _x86_memio_map(t, bpa, size, flags, bshp)
182 bus_space_tag_t t;
183 bus_addr_t bpa;
184 bus_size_t size;
185 int flags;
186 bus_space_handle_t *bshp;
187 {
189 /*
190 * For I/O space, just fill in the handle.
191 */
192 if (t == X86_BUS_SPACE_IO) {
193 if (flags & BUS_SPACE_MAP_LINEAR)
194 return (EOPNOTSUPP);
195 *bshp = bpa;
196 return (0);
197 }
199 /*
200 * For memory space, map the bus physical address to
201 * a kernel virtual address.
202 */
203 return (x86_mem_add_mapping(bpa, size,
204 (flags & BUS_SPACE_MAP_CACHEABLE) != 0, bshp));
205 }
207 int
208 x86_memio_alloc(t, rstart, rend, size, alignment, boundary, flags,
209 bpap, bshp)
210 bus_space_tag_t t;
211 bus_addr_t rstart, rend;
212 bus_size_t size, alignment, boundary;
213 int flags;
214 bus_addr_t *bpap;
215 bus_space_handle_t *bshp;
216 {
217 struct extent *ex;
218 u_long bpa;
219 int error;
221 /*
222 * Pick the appropriate extent map.
223 */
224 if (t == X86_BUS_SPACE_IO) {
225 if (flags & BUS_SPACE_MAP_LINEAR)
226 return (EOPNOTSUPP);
227 ex = ioport_ex;
228 } else if (t == X86_BUS_SPACE_MEM)
229 ex = iomem_ex;
230 else
231 panic("x86_memio_alloc: bad bus space tag");
233 /*
234 * Sanity check the allocation against the extent's boundaries.
235 */
236 if (rstart < ex->ex_start || rend > ex->ex_end)
237 panic("x86_memio_alloc: bad region start/end");
239 /*
240 * Do the requested allocation.
241 */
242 error = extent_alloc_subregion(ex, rstart, rend, size, alignment,
243 boundary,
244 EX_FAST | EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0),
245 &bpa);
247 if (error)
248 return (error);
250 /*
251 * For I/O space, that's all she wrote.
252 */
253 if (t == X86_BUS_SPACE_IO) {
254 *bshp = *bpap = bpa;
255 return (0);
256 }
258 /*
259 * For memory space, map the bus physical address to
260 * a kernel virtual address.
261 */
262 error = x86_mem_add_mapping(bpa, size,
263 (flags & BUS_SPACE_MAP_CACHEABLE) != 0, bshp);
264 if (error) {
265 if (extent_free(iomem_ex, bpa, size, EX_NOWAIT |
266 (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
267 printf("x86_memio_alloc: pa 0x%lx, size 0x%lx\n",
268 bpa, size);
269 printf("x86_memio_alloc: can't free region\n");
270 }
271 }
273 *bpap = bpa;
275 return (error);
276 }
278 int
279 x86_mem_add_mapping(bpa, size, cacheable, bshp)
280 bus_addr_t bpa;
281 bus_size_t size;
282 int cacheable;
283 bus_space_handle_t *bshp;
284 {
285 u_long pa, endpa;
286 vaddr_t va;
287 pt_entry_t *pte;
288 pt_entry_t *maptp;
289 int32_t cpumask = 0;
291 pa = x86_trunc_page(bpa);
292 endpa = x86_round_page(bpa + size);
294 #ifdef DIAGNOSTIC
295 if (endpa <= pa)
296 panic("x86_mem_add_mapping: overflow");
297 #endif
299 if (bpa >= IOM_BEGIN && (bpa + size) <= IOM_END) {
300 va = (vaddr_t)ISA_HOLE_VADDR(pa);
301 } else {
302 va = uvm_km_valloc(kernel_map, endpa - pa);
303 if (va == 0)
304 return (ENOMEM);
305 }
307 *bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));
309 for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
310 pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
312 /*
313 * PG_N doesn't exist on 386's, so we assume that
314 * the mainboard has wired up device space non-cacheable
315 * on those machines.
316 *
317 * Note that it's not necessary to use atomic ops to
318 * fiddle with the PTE here, because we don't care
319 * about mod/ref information.
320 *
321 * XXX should hand this bit to pmap_kenter_pa to
322 * save the extra invalidate!
323 *
324 * XXX extreme paranoia suggests tlb shootdown belongs here.
325 */
326 if (pmap_cpu_has_pg_n()) {
327 pte = kvtopte(va);
328 maptp = (pt_entry_t *)vtomach((vaddr_t)pte);
329 if (cacheable)
330 PTE_CLEARBITS(pte, maptp, PG_N);
331 else
332 PTE_SETBITS(pte, maptp, PG_N);
333 pmap_tlb_shootdown(pmap_kernel(), va, *pte,
334 &cpumask);
335 }
336 }
338 pmap_tlb_shootnow(cpumask);
339 pmap_update(pmap_kernel());
341 return 0;
342 }
344 /*
345 * void _x86_memio_unmap(bus_space_tag bst, bus_space_handle bsh,
346 * bus_size_t size, bus_addr_t *adrp)
347 *
348 * This function unmaps memory- or io-space mapped by the function
349 * _x86_memio_map(). This function works nearly as same as
350 * x86_memio_unmap(), but this function does not ask kernel
351 * built-in extents and returns physical address of the bus space,
352 * for the convenience of the extra extent manager.
353 */
354 void
355 _x86_memio_unmap(t, bsh, size, adrp)
356 bus_space_tag_t t;
357 bus_space_handle_t bsh;
358 bus_size_t size;
359 bus_addr_t *adrp;
360 {
361 u_long va, endva;
362 bus_addr_t bpa;
364 /*
365 * Find the correct extent and bus physical address.
366 */
367 if (t == X86_BUS_SPACE_IO) {
368 bpa = bsh;
369 } else if (t == X86_BUS_SPACE_MEM) {
370 if (bsh >= atdevbase && (bsh + size) <= (atdevbase + IOM_SIZE)) {
371 bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
372 } else {
374 va = x86_trunc_page(bsh);
375 endva = x86_round_page(bsh + size);
377 #ifdef DIAGNOSTIC
378 if (endva <= va) {
379 panic("_x86_memio_unmap: overflow");
380 }
381 #endif
383 #if __NetBSD_Version__ > 104050000
384 if (pmap_extract(pmap_kernel(), va, &bpa) == FALSE) {
385 panic("_x86_memio_unmap:"
386 " wrong virtual address");
387 }
388 bpa += (bsh & PGOFSET);
389 #else
390 bpa = pmap_extract(pmap_kernel(), va) + (bsh & PGOFSET);
391 #endif
393 pmap_kremove(va, endva - va);
394 /*
395 * Free the kernel virtual mapping.
396 */
397 uvm_km_free(kernel_map, va, endva - va);
398 }
399 } else {
400 panic("_x86_memio_unmap: bad bus space tag");
401 }
403 if (adrp != NULL) {
404 *adrp = bpa;
405 }
406 }
408 void
409 x86_memio_unmap(t, bsh, size)
410 bus_space_tag_t t;
411 bus_space_handle_t bsh;
412 bus_size_t size;
413 {
414 struct extent *ex;
415 u_long va, endva;
416 bus_addr_t bpa;
418 /*
419 * Find the correct extent and bus physical address.
420 */
421 if (t == X86_BUS_SPACE_IO) {
422 ex = ioport_ex;
423 bpa = bsh;
424 } else if (t == X86_BUS_SPACE_MEM) {
425 ex = iomem_ex;
427 if (bsh >= atdevbase &&
428 (bsh + size) <= (atdevbase + IOM_SIZE)) {
429 bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
430 goto ok;
431 }
433 va = x86_trunc_page(bsh);
434 endva = x86_round_page(bsh + size);
436 #ifdef DIAGNOSTIC
437 if (endva <= va)
438 panic("x86_memio_unmap: overflow");
439 #endif
441 (void) pmap_extract(pmap_kernel(), va, &bpa);
442 bpa += (bsh & PGOFSET);
444 pmap_kremove(va, endva - va);
445 /*
446 * Free the kernel virtual mapping.
447 */
448 uvm_km_free(kernel_map, va, endva - va);
449 } else
450 panic("x86_memio_unmap: bad bus space tag");
452 ok:
453 if (extent_free(ex, bpa, size,
454 EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
455 printf("x86_memio_unmap: %s 0x%lx, size 0x%lx\n",
456 (t == X86_BUS_SPACE_IO) ? "port" : "pa", bpa, size);
457 printf("x86_memio_unmap: can't free region\n");
458 }
459 }
461 void
462 x86_memio_free(t, bsh, size)
463 bus_space_tag_t t;
464 bus_space_handle_t bsh;
465 bus_size_t size;
466 {
468 /* x86_memio_unmap() does all that we need to do. */
469 x86_memio_unmap(t, bsh, size);
470 }
472 int
473 x86_memio_subregion(t, bsh, offset, size, nbshp)
474 bus_space_tag_t t;
475 bus_space_handle_t bsh;
476 bus_size_t offset, size;
477 bus_space_handle_t *nbshp;
478 {
480 *nbshp = bsh + offset;
481 return (0);
482 }
484 paddr_t
485 x86_memio_mmap(t, addr, off, prot, flags)
486 bus_space_tag_t t;
487 bus_addr_t addr;
488 off_t off;
489 int prot;
490 int flags;
491 {
493 /* Can't mmap I/O space. */
494 if (t == X86_BUS_SPACE_IO)
495 return (-1);
497 /*
498 * "addr" is the base address of the device we're mapping.
499 * "off" is the offset into that device.
500 *
501 * Note we are called for each "page" in the device that
502 * the upper layers want to map.
503 */
504 return (x86_btop(addr + off));
505 }