ia64/xen-unstable

view xen/include/asm-ia64/vmx_uaccess.h @ 5704:9b73afea874e

Certain types of event channel are now auto-bound to vcpu0 by Xen.
Make sure that xenolinux agrees with this.
author sos22@douglas.cl.cam.ac.uk
date Fri Jul 08 15:35:43 2005 +0000 (2005-07-08)
parents 51d5c1d35710
children 7e74ac6fdea9 e173a853dc46 d4fd332df775 04dfb5158f3a f294acb25858
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_uaccess.h: Defines vmx specific macros to transfer memory areas
4 * across the domain/hypervisor boundary.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Note: For vmx enabled environment, poor man's policy is actually
20 * useless since HV resides in completely different address space as
21 * domain. So the only way to do the access is search vTLB first, and
22 * access identity mapped address if hit.
23 *
24 * Copyright (c) 2004, Intel Corporation.
25 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
26 */
28 #ifndef __ASM_IA64_VMX_UACCESS_H__
29 #define __ASM_IA64_VMX_UACCESS_H__
31 #include <xen/compiler.h>
32 #include <xen/errno.h>
33 #include <xen/sched.h>
35 #include <asm/intrinsics.h>
36 #include <asm/vmmu.h>
38 /* Since HV never accesses domain space directly, most security check can
39 * be dummy now
40 */
41 asm (".section \"__ex_table\", \"a\"\n\t.previous");
43 /* For back compatibility */
44 #define __access_ok(addr, size, segment) 1
45 #define access_ok(addr, size, segment) __access_ok((addr), (size), (segment))
47 /*
48 * These are the main single-value transfer routines. They automatically
49 * use the right size if we just have the right pointer type.
50 *
51 * Careful to not
52 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
53 * (b) require any knowledge of processes at this stage
54 */
55 #define put_user(x, ptr) __put_user((x), (ptr))
56 #define get_user(x, ptr) __get_user((x), (ptr))
58 #define __put_user(x, ptr) __do_put_user((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
59 #define __get_user(x, ptr) __do_get_user((x), (ptr), sizeof(*(ptr)))
61 /* TODO: add specific unaligned access later. If assuming aligned at
62 * 1,2,4,8 bytes by far, it's impossible for operand spaning two
63 * vTLB entry
64 */
65 extern long
66 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len);
68 #define __do_put_user(x, ptr, size) \
69 ({ \
70 __typeof__ (x) __pu_x = (x); \
71 __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
72 __typeof__ (size) __pu_size = (size); \
73 unsigned long __pu_ma; \
74 long __pu_err; \
75 \
76 __pu_err = __domain_va_to_ma((unsigned long)__pu_ptr, \
77 &__pu_ma, &__pu_size); \
78 __pu_err ? (__pu_err = -EFAULT) : \
79 (*((__typeof__ (*(ptr)) *)__va(__pu_ma)) = x); \
80 __pu_err; \
81 })
83 #define __do_get_user(x, ptr, size) \
84 ({ \
85 __typeof__ (x) __gu_x = (x); \
86 __typeof__ (*(ptr)) __user *__gu_ptr = (ptr); \
87 __typeof__ (size) __gu_size = (size); \
88 unsigned long __gu_ma; \
89 long __gu_err; \
90 \
91 __gu_err = __domain_va_to_ma((unsigned long)__gu_ptr, \
92 &__gu_ma, &__gu_size); \
93 __gu_err ? (__gu_err = -EFAULT) : \
94 (x = *((__typeof__ (*(ptr)) *)__va(__gu_ma))); \
95 __gu_err; \
96 })
98 /* More complex copy from domain */
99 #define copy_from_user(to, from, n) __copy_from_user((to), (from), (n))
100 #define copy_to_user(to, from, n) __copy_to_user((to), (from), (n))
101 #define clear_user(to, n) __clear_user((t0), (n))
103 static inline unsigned long
104 __copy_from_user(void *to, void *from, unsigned long n)
105 {
106 unsigned long ma, i;
108 i = n;
109 while(!__domain_va_to_ma((unsigned long)from, &ma, &i)) {
110 memcpy(to, (void *)__va(ma), i);
111 n -= i;
112 if (!n)
113 break;
114 from += i;
115 to += i;
116 i = n;
117 }
118 return n;
119 }
121 static inline unsigned long
122 __copy_to_user(void *to, void *from, unsigned long n)
123 {
124 unsigned long ma, i;
126 i = n;
127 while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
128 memcpy((void *)__va(ma), from, i);
129 n -= i;
130 if (!n)
131 break;
132 from += i;
133 to += i;
134 i = n;
135 }
136 return n;
137 }
139 static inline unsigned long
140 __clear_user(void *to, unsigned long n)
141 {
142 unsigned long ma, i;
144 i = n;
145 while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
146 memset((void *)__va(ma), 0, i);
147 n -= i;
148 if (!n)
149 break;
150 to += i;
151 i = n;
152 }
153 return n;
154 }
156 #endif // __ASM_IA64_VMX_UACCESS_H__