ia64/xen-unstable

changeset 9498:c160c05da8d4

[IA64] dom0 vp model linux part: import io.h from linux-2.6.16-rc3

[note: verified same as linux-2.6.16]

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Fri Apr 07 13:46:23 2006 -0600 (2006-04-07)
parents 119effd15792
children aab421b5ad45
files linux-2.6-xen-sparse/include/asm-ia64/io.h
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/io.h	Fri Apr 07 13:46:23 2006 -0600
     1.3 @@ -0,0 +1,477 @@
     1.4 +#ifndef _ASM_IA64_IO_H
     1.5 +#define _ASM_IA64_IO_H
     1.6 +
     1.7 +/*
     1.8 + * This file contains the definitions for the emulated IO instructions
     1.9 + * inb/inw/inl/outb/outw/outl and the "string versions" of the same
    1.10 + * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
    1.11 + * versions of the single-IO instructions (inb_p/inw_p/..).
    1.12 + *
    1.13 + * This file is not meant to be obfuscating: it's just complicated to
    1.14 + * (a) handle it all in a way that makes gcc able to optimize it as
    1.15 + * well as possible and (b) trying to avoid writing the same thing
    1.16 + * over and over again with slight variations and possibly making a
    1.17 + * mistake somewhere.
    1.18 + *
    1.19 + * Copyright (C) 1998-2003 Hewlett-Packard Co
    1.20 + *	David Mosberger-Tang <davidm@hpl.hp.com>
    1.21 + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
    1.22 + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
    1.23 + */
    1.24 +
    1.25 +/* We don't use IO slowdowns on the ia64, but.. */
    1.26 +#define __SLOW_DOWN_IO	do { } while (0)
    1.27 +#define SLOW_DOWN_IO	do { } while (0)
    1.28 +
    1.29 +#define __IA64_UNCACHED_OFFSET	RGN_BASE(RGN_UNCACHED)
    1.30 +
    1.31 +/*
    1.32 + * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
    1.33 + * large machines may have multiple other I/O spaces so we can't place any a priori limit
    1.34 + * on IO_SPACE_LIMIT.  These additional spaces are described in ACPI.
    1.35 + */
    1.36 +#define IO_SPACE_LIMIT		0xffffffffffffffffUL
    1.37 +
    1.38 +#define MAX_IO_SPACES_BITS		4
    1.39 +#define MAX_IO_SPACES			(1UL << MAX_IO_SPACES_BITS)
    1.40 +#define IO_SPACE_BITS			24
    1.41 +#define IO_SPACE_SIZE			(1UL << IO_SPACE_BITS)
    1.42 +
    1.43 +#define IO_SPACE_NR(port)		((port) >> IO_SPACE_BITS)
    1.44 +#define IO_SPACE_BASE(space)		((space) << IO_SPACE_BITS)
    1.45 +#define IO_SPACE_PORT(port)		((port) & (IO_SPACE_SIZE - 1))
    1.46 +
    1.47 +#define IO_SPACE_SPARSE_ENCODING(p)	((((p) >> 2) << 12) | ((p) & 0xfff))
    1.48 +
    1.49 +struct io_space {
    1.50 +	unsigned long mmio_base;	/* base in MMIO space */
    1.51 +	int sparse;
    1.52 +};
    1.53 +
    1.54 +extern struct io_space io_space[];
    1.55 +extern unsigned int num_io_spaces;
    1.56 +
    1.57 +# ifdef __KERNEL__
    1.58 +
    1.59 +/*
    1.60 + * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
    1.61 + *	0xCxxxxxxxxxxxxxxx	MMIO cookie (return from ioremap)
    1.62 + *	0x000000001SPPPPPP	PIO cookie (S=space number, P..P=port)
    1.63 + *
    1.64 + * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
    1.65 + * code that uses bare port numbers without the prerequisite pci_iomap().
    1.66 + */
    1.67 +#define PIO_OFFSET		(1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
    1.68 +#define PIO_MASK		(PIO_OFFSET - 1)
    1.69 +#define PIO_RESERVED		__IA64_UNCACHED_OFFSET
    1.70 +#define HAVE_ARCH_PIO_SIZE
    1.71 +
    1.72 +#include <asm/intrinsics.h>
    1.73 +#include <asm/machvec.h>
    1.74 +#include <asm/page.h>
    1.75 +#include <asm/system.h>
    1.76 +#include <asm-generic/iomap.h>
    1.77 +
    1.78 +/*
    1.79 + * Change virtual addresses to physical addresses and vv.
    1.80 + */
    1.81 +static inline unsigned long
    1.82 +virt_to_phys (volatile void *address)
    1.83 +{
    1.84 +	return (unsigned long) address - PAGE_OFFSET;
    1.85 +}
    1.86 +
    1.87 +static inline void*
    1.88 +phys_to_virt (unsigned long address)
    1.89 +{
    1.90 +	return (void *) (address + PAGE_OFFSET);
    1.91 +}
    1.92 +
    1.93 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
    1.94 +extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */
    1.95 +extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count);
    1.96 +
    1.97 +/*
    1.98 + * The following two macros are deprecated and scheduled for removal.
    1.99 + * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
   1.100 + */
   1.101 +#define bus_to_virt	phys_to_virt
   1.102 +#define virt_to_bus	virt_to_phys
   1.103 +#define page_to_bus	page_to_phys
   1.104 +
   1.105 +# endif /* KERNEL */
   1.106 +
   1.107 +/*
   1.108 + * Memory fence w/accept.  This should never be used in code that is
   1.109 + * not IA-64 specific.
   1.110 + */
   1.111 +#define __ia64_mf_a()	ia64_mfa()
   1.112 +
   1.113 +/**
   1.114 + * ___ia64_mmiowb - I/O write barrier
   1.115 + *
   1.116 + * Ensure ordering of I/O space writes.  This will make sure that writes
   1.117 + * following the barrier will arrive after all previous writes.  For most
   1.118 + * ia64 platforms, this is a simple 'mf.a' instruction.
   1.119 + *
   1.120 + * See Documentation/DocBook/deviceiobook.tmpl for more information.
   1.121 + */
   1.122 +static inline void ___ia64_mmiowb(void)
   1.123 +{
   1.124 +	ia64_mfa();
   1.125 +}
   1.126 +
   1.127 +static inline void*
   1.128 +__ia64_mk_io_addr (unsigned long port)
   1.129 +{
   1.130 +	struct io_space *space;
   1.131 +	unsigned long offset;
   1.132 +
   1.133 +	space = &io_space[IO_SPACE_NR(port)];
   1.134 +	port = IO_SPACE_PORT(port);
   1.135 +	if (space->sparse)
   1.136 +		offset = IO_SPACE_SPARSE_ENCODING(port);
   1.137 +	else
   1.138 +		offset = port;
   1.139 +
   1.140 +	return (void *) (space->mmio_base | offset);
   1.141 +}
   1.142 +
   1.143 +#define __ia64_inb	___ia64_inb
   1.144 +#define __ia64_inw	___ia64_inw
   1.145 +#define __ia64_inl	___ia64_inl
   1.146 +#define __ia64_outb	___ia64_outb
   1.147 +#define __ia64_outw	___ia64_outw
   1.148 +#define __ia64_outl	___ia64_outl
   1.149 +#define __ia64_readb	___ia64_readb
   1.150 +#define __ia64_readw	___ia64_readw
   1.151 +#define __ia64_readl	___ia64_readl
   1.152 +#define __ia64_readq	___ia64_readq
   1.153 +#define __ia64_readb_relaxed	___ia64_readb
   1.154 +#define __ia64_readw_relaxed	___ia64_readw
   1.155 +#define __ia64_readl_relaxed	___ia64_readl
   1.156 +#define __ia64_readq_relaxed	___ia64_readq
   1.157 +#define __ia64_writeb	___ia64_writeb
   1.158 +#define __ia64_writew	___ia64_writew
   1.159 +#define __ia64_writel	___ia64_writel
   1.160 +#define __ia64_writeq	___ia64_writeq
   1.161 +#define __ia64_mmiowb	___ia64_mmiowb
   1.162 +
   1.163 +/*
   1.164 + * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
   1.165 + * that the access has completed before executing other I/O accesses.  Since we're doing
   1.166 + * the accesses through an uncachable (UC) translation, the CPU will execute them in
   1.167 + * program order.  However, we still need to tell the compiler not to shuffle them around
   1.168 + * during optimization, which is why we use "volatile" pointers.
   1.169 + */
   1.170 +
   1.171 +static inline unsigned int
   1.172 +___ia64_inb (unsigned long port)
   1.173 +{
   1.174 +	volatile unsigned char *addr = __ia64_mk_io_addr(port);
   1.175 +	unsigned char ret;
   1.176 +
   1.177 +	ret = *addr;
   1.178 +	__ia64_mf_a();
   1.179 +	return ret;
   1.180 +}
   1.181 +
   1.182 +static inline unsigned int
   1.183 +___ia64_inw (unsigned long port)
   1.184 +{
   1.185 +	volatile unsigned short *addr = __ia64_mk_io_addr(port);
   1.186 +	unsigned short ret;
   1.187 +
   1.188 +	ret = *addr;
   1.189 +	__ia64_mf_a();
   1.190 +	return ret;
   1.191 +}
   1.192 +
   1.193 +static inline unsigned int
   1.194 +___ia64_inl (unsigned long port)
   1.195 +{
   1.196 +	volatile unsigned int *addr = __ia64_mk_io_addr(port);
   1.197 +	unsigned int ret;
   1.198 +
   1.199 +	ret = *addr;
   1.200 +	__ia64_mf_a();
   1.201 +	return ret;
   1.202 +}
   1.203 +
   1.204 +static inline void
   1.205 +___ia64_outb (unsigned char val, unsigned long port)
   1.206 +{
   1.207 +	volatile unsigned char *addr = __ia64_mk_io_addr(port);
   1.208 +
   1.209 +	*addr = val;
   1.210 +	__ia64_mf_a();
   1.211 +}
   1.212 +
   1.213 +static inline void
   1.214 +___ia64_outw (unsigned short val, unsigned long port)
   1.215 +{
   1.216 +	volatile unsigned short *addr = __ia64_mk_io_addr(port);
   1.217 +
   1.218 +	*addr = val;
   1.219 +	__ia64_mf_a();
   1.220 +}
   1.221 +
   1.222 +static inline void
   1.223 +___ia64_outl (unsigned int val, unsigned long port)
   1.224 +{
   1.225 +	volatile unsigned int *addr = __ia64_mk_io_addr(port);
   1.226 +
   1.227 +	*addr = val;
   1.228 +	__ia64_mf_a();
   1.229 +}
   1.230 +
   1.231 +static inline void
   1.232 +__insb (unsigned long port, void *dst, unsigned long count)
   1.233 +{
   1.234 +	unsigned char *dp = dst;
   1.235 +
   1.236 +	while (count--)
   1.237 +		*dp++ = platform_inb(port);
   1.238 +}
   1.239 +
   1.240 +static inline void
   1.241 +__insw (unsigned long port, void *dst, unsigned long count)
   1.242 +{
   1.243 +	unsigned short *dp = dst;
   1.244 +
   1.245 +	while (count--)
   1.246 +		*dp++ = platform_inw(port);
   1.247 +}
   1.248 +
   1.249 +static inline void
   1.250 +__insl (unsigned long port, void *dst, unsigned long count)
   1.251 +{
   1.252 +	unsigned int *dp = dst;
   1.253 +
   1.254 +	while (count--)
   1.255 +		*dp++ = platform_inl(port);
   1.256 +}
   1.257 +
   1.258 +static inline void
   1.259 +__outsb (unsigned long port, const void *src, unsigned long count)
   1.260 +{
   1.261 +	const unsigned char *sp = src;
   1.262 +
   1.263 +	while (count--)
   1.264 +		platform_outb(*sp++, port);
   1.265 +}
   1.266 +
   1.267 +static inline void
   1.268 +__outsw (unsigned long port, const void *src, unsigned long count)
   1.269 +{
   1.270 +	const unsigned short *sp = src;
   1.271 +
   1.272 +	while (count--)
   1.273 +		platform_outw(*sp++, port);
   1.274 +}
   1.275 +
   1.276 +static inline void
   1.277 +__outsl (unsigned long port, const void *src, unsigned long count)
   1.278 +{
   1.279 +	const unsigned int *sp = src;
   1.280 +
   1.281 +	while (count--)
   1.282 +		platform_outl(*sp++, port);
   1.283 +}
   1.284 +
   1.285 +/*
   1.286 + * Unfortunately, some platforms are broken and do not follow the IA-64 architecture
   1.287 + * specification regarding legacy I/O support.  Thus, we have to make these operations
   1.288 + * platform dependent...
   1.289 + */
   1.290 +#define __inb		platform_inb
   1.291 +#define __inw		platform_inw
   1.292 +#define __inl		platform_inl
   1.293 +#define __outb		platform_outb
   1.294 +#define __outw		platform_outw
   1.295 +#define __outl		platform_outl
   1.296 +#define __mmiowb	platform_mmiowb
   1.297 +
   1.298 +#define inb(p)		__inb(p)
   1.299 +#define inw(p)		__inw(p)
   1.300 +#define inl(p)		__inl(p)
   1.301 +#define insb(p,d,c)	__insb(p,d,c)
   1.302 +#define insw(p,d,c)	__insw(p,d,c)
   1.303 +#define insl(p,d,c)	__insl(p,d,c)
   1.304 +#define outb(v,p)	__outb(v,p)
   1.305 +#define outw(v,p)	__outw(v,p)
   1.306 +#define outl(v,p)	__outl(v,p)
   1.307 +#define outsb(p,s,c)	__outsb(p,s,c)
   1.308 +#define outsw(p,s,c)	__outsw(p,s,c)
   1.309 +#define outsl(p,s,c)	__outsl(p,s,c)
   1.310 +#define mmiowb()	__mmiowb()
   1.311 +
   1.312 +/*
   1.313 + * The address passed to these functions are ioremap()ped already.
   1.314 + *
   1.315 + * We need these to be machine vectors since some platforms don't provide
   1.316 + * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
   1.317 + * a good idea).  Writes are ok though for all existing ia64 platforms (and
   1.318 + * hopefully it'll stay that way).
   1.319 + */
   1.320 +static inline unsigned char
   1.321 +___ia64_readb (const volatile void __iomem *addr)
   1.322 +{
   1.323 +	return *(volatile unsigned char __force *)addr;
   1.324 +}
   1.325 +
   1.326 +static inline unsigned short
   1.327 +___ia64_readw (const volatile void __iomem *addr)
   1.328 +{
   1.329 +	return *(volatile unsigned short __force *)addr;
   1.330 +}
   1.331 +
   1.332 +static inline unsigned int
   1.333 +___ia64_readl (const volatile void __iomem *addr)
   1.334 +{
   1.335 +	return *(volatile unsigned int __force *) addr;
   1.336 +}
   1.337 +
   1.338 +static inline unsigned long
   1.339 +___ia64_readq (const volatile void __iomem *addr)
   1.340 +{
   1.341 +	return *(volatile unsigned long __force *) addr;
   1.342 +}
   1.343 +
   1.344 +static inline void
   1.345 +__writeb (unsigned char val, volatile void __iomem *addr)
   1.346 +{
   1.347 +	*(volatile unsigned char __force *) addr = val;
   1.348 +}
   1.349 +
   1.350 +static inline void
   1.351 +__writew (unsigned short val, volatile void __iomem *addr)
   1.352 +{
   1.353 +	*(volatile unsigned short __force *) addr = val;
   1.354 +}
   1.355 +
   1.356 +static inline void
   1.357 +__writel (unsigned int val, volatile void __iomem *addr)
   1.358 +{
   1.359 +	*(volatile unsigned int __force *) addr = val;
   1.360 +}
   1.361 +
   1.362 +static inline void
   1.363 +__writeq (unsigned long val, volatile void __iomem *addr)
   1.364 +{
   1.365 +	*(volatile unsigned long __force *) addr = val;
   1.366 +}
   1.367 +
   1.368 +#define __readb		platform_readb
   1.369 +#define __readw		platform_readw
   1.370 +#define __readl		platform_readl
   1.371 +#define __readq		platform_readq
   1.372 +#define __readb_relaxed	platform_readb_relaxed
   1.373 +#define __readw_relaxed	platform_readw_relaxed
   1.374 +#define __readl_relaxed	platform_readl_relaxed
   1.375 +#define __readq_relaxed	platform_readq_relaxed
   1.376 +
   1.377 +#define readb(a)	__readb((a))
   1.378 +#define readw(a)	__readw((a))
   1.379 +#define readl(a)	__readl((a))
   1.380 +#define readq(a)	__readq((a))
   1.381 +#define readb_relaxed(a)	__readb_relaxed((a))
   1.382 +#define readw_relaxed(a)	__readw_relaxed((a))
   1.383 +#define readl_relaxed(a)	__readl_relaxed((a))
   1.384 +#define readq_relaxed(a)	__readq_relaxed((a))
   1.385 +#define __raw_readb	readb
   1.386 +#define __raw_readw	readw
   1.387 +#define __raw_readl	readl
   1.388 +#define __raw_readq	readq
   1.389 +#define __raw_readb_relaxed	readb_relaxed
   1.390 +#define __raw_readw_relaxed	readw_relaxed
   1.391 +#define __raw_readl_relaxed	readl_relaxed
   1.392 +#define __raw_readq_relaxed	readq_relaxed
   1.393 +#define writeb(v,a)	__writeb((v), (a))
   1.394 +#define writew(v,a)	__writew((v), (a))
   1.395 +#define writel(v,a)	__writel((v), (a))
   1.396 +#define writeq(v,a)	__writeq((v), (a))
   1.397 +#define __raw_writeb	writeb
   1.398 +#define __raw_writew	writew
   1.399 +#define __raw_writel	writel
   1.400 +#define __raw_writeq	writeq
   1.401 +
   1.402 +#ifndef inb_p
   1.403 +# define inb_p		inb
   1.404 +#endif
   1.405 +#ifndef inw_p
   1.406 +# define inw_p		inw
   1.407 +#endif
   1.408 +#ifndef inl_p
   1.409 +# define inl_p		inl
   1.410 +#endif
   1.411 +
   1.412 +#ifndef outb_p
   1.413 +# define outb_p		outb
   1.414 +#endif
   1.415 +#ifndef outw_p
   1.416 +# define outw_p		outw
   1.417 +#endif
   1.418 +#ifndef outl_p
   1.419 +# define outl_p		outl
   1.420 +#endif
   1.421 +
   1.422 +/*
   1.423 + * An "address" in IO memory space is not clearly either an integer or a pointer. We will
   1.424 + * accept both, thus the casts.
   1.425 + *
   1.426 + * On ia-64, we access the physical I/O memory space through the uncached kernel region.
   1.427 + */
   1.428 +static inline void __iomem *
   1.429 +ioremap (unsigned long offset, unsigned long size)
   1.430 +{
   1.431 +	return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
   1.432 +}
   1.433 +
   1.434 +static inline void
   1.435 +iounmap (volatile void __iomem *addr)
   1.436 +{
   1.437 +}
   1.438 +
   1.439 +#define ioremap_nocache(o,s)	ioremap(o,s)
   1.440 +
   1.441 +# ifdef __KERNEL__
   1.442 +
   1.443 +/*
   1.444 + * String version of IO memory access ops:
   1.445 + */
   1.446 +extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
   1.447 +extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
   1.448 +extern void memset_io(volatile void __iomem *s, int c, long n);
   1.449 +
   1.450 +#define dma_cache_inv(_start,_size)             do { } while (0)
   1.451 +#define dma_cache_wback(_start,_size)           do { } while (0)
   1.452 +#define dma_cache_wback_inv(_start,_size)       do { } while (0)
   1.453 +
   1.454 +# endif /* __KERNEL__ */
   1.455 +
   1.456 +/*
   1.457 + * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing.  It is said that
   1.458 + * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
   1.459 + * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
   1.460 + * SPECweb-like workloads on zx1-based machines.  Thus, for now we favor I/O MMU bypassing
   1.461 + * over BIO-level virtual merging.
   1.462 + */
   1.463 +extern unsigned long ia64_max_iommu_merge_mask;
   1.464 +#if 1
   1.465 +#define BIO_VMERGE_BOUNDARY	0
   1.466 +#else
   1.467 +/*
   1.468 + * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here.  Should be
   1.469 + * replaced by dma_merge_mask() or something of that sort.  Note: the only way
   1.470 + * BIO_VMERGE_BOUNDARY is used is to mask off bits.  Effectively, our definition gets
   1.471 + * expanded into:
   1.472 + *
   1.473 + *	addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
   1.474 + *
   1.475 + * which is precisely what we want.
   1.476 + */
   1.477 +#define BIO_VMERGE_BOUNDARY	(ia64_max_iommu_merge_mask + 1)
   1.478 +#endif
   1.479 +
   1.480 +#endif /* _ASM_IA64_IO_H */