ia64/xen-unstable

view xen/include/asm-ia64/vmx_uaccess.h @ 6552:a9873d384da4

Merge.
author adsharma@los-vmm.sc.intel.com
date Thu Aug 25 12:24:48 2005 -0700 (2005-08-25)
parents 112d44270733 fa0754a9f64f
children dfaf788ab18c
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_uaccess.h: Defines vmx specific macros to transfer memory areas
4 * across the domain/hypervisor boundary.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Note: For vmx enabled environment, poor man's policy is actually
20 * useless since HV resides in completely different address space as
21 * domain. So the only way to do the access is search vTLB first, and
22 * access identity mapped address if hit.
23 *
24 * Copyright (c) 2004, Intel Corporation.
25 * Kun Tian (Kevin Tian) (kevin.tian@intel.com)
26 */
28 #ifndef __ASM_IA64_VMX_UACCESS_H__
29 #define __ASM_IA64_VMX_UACCESS_H__
31 #include <xen/compiler.h>
32 #include <xen/errno.h>
33 #include <xen/sched.h>
35 #include <asm/intrinsics.h>
36 #include <asm/vmmu.h>
38 /* Since HV never accesses domain space directly, most security check can
39 * be dummy now
40 */
41 asm (".section \"__ex_table\", \"a\"\n\t.previous");
43 /* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */
44 #define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
45 /* For back compatibility */
46 #define __access_ok(addr, size, segment) 1
47 #define access_ok(addr, size, segment) __access_ok((addr), (size), (segment))
49 /*
50 * These are the main single-value transfer routines. They automatically
51 * use the right size if we just have the right pointer type.
52 *
53 * Careful to not
54 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
55 * (b) require any knowledge of processes at this stage
56 */
57 #define put_user(x, ptr) __put_user((x), (ptr))
58 #define get_user(x, ptr) __get_user((x), (ptr))
60 #define __put_user(x, ptr) __do_put_user((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
61 #define __get_user(x, ptr) __do_get_user((x), (ptr), sizeof(*(ptr)))
63 /* TODO: add specific unaligned access later. If assuming aligned at
64 * 1,2,4,8 bytes by far, it's impossible for operand spaning two
65 * vTLB entry
66 */
67 extern long
68 __domain_va_to_ma(unsigned long va, unsigned long* ma, unsigned long *len);
70 #define __do_put_user(x, ptr, size) \
71 ({ \
72 __typeof__ (x) __pu_x = (x); \
73 __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
74 __typeof__ (size) __pu_size = (size); \
75 unsigned long __pu_ma; \
76 long __pu_err; \
77 \
78 __pu_err = __domain_va_to_ma((unsigned long)__pu_ptr, \
79 &__pu_ma, &__pu_size); \
80 __pu_err ? (__pu_err = -EFAULT) : \
81 (*((__typeof__ (*(ptr)) *)__va(__pu_ma)) = x); \
82 __pu_err; \
83 })
85 #define __do_get_user(x, ptr, size) \
86 ({ \
87 __typeof__ (x) __gu_x = (x); \
88 __typeof__ (*(ptr)) __user *__gu_ptr = (ptr); \
89 __typeof__ (size) __gu_size = (size); \
90 unsigned long __gu_ma; \
91 long __gu_err; \
92 \
93 __gu_err = __domain_va_to_ma((unsigned long)__gu_ptr, \
94 &__gu_ma, &__gu_size); \
95 __gu_err ? (__gu_err = -EFAULT) : \
96 (x = *((__typeof__ (*(ptr)) *)__va(__gu_ma))); \
97 __gu_err; \
98 })
100 /* More complex copy from domain */
101 #define copy_from_user(to, from, n) __copy_from_user((to), (from), (n))
102 #define copy_to_user(to, from, n) __copy_to_user((to), (from), (n))
103 #define clear_user(to, n) __clear_user((t0), (n))
105 static inline unsigned long
106 __copy_from_user(void *to, void *from, unsigned long n)
107 {
108 unsigned long ma, i;
110 i = n;
111 while(!__domain_va_to_ma((unsigned long)from, &ma, &i)) {
112 memcpy(to, (void *)__va(ma), i);
113 n -= i;
114 if (!n)
115 break;
116 from += i;
117 to += i;
118 i = n;
119 }
120 return n;
121 }
123 static inline unsigned long
124 __copy_to_user(void *to, void *from, unsigned long n)
125 {
126 unsigned long ma, i;
128 i = n;
129 while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
130 memcpy((void *)__va(ma), from, i);
131 n -= i;
132 if (!n)
133 break;
134 from += i;
135 to += i;
136 i = n;
137 }
138 return n;
139 }
141 static inline unsigned long
142 __clear_user(void *to, unsigned long n)
143 {
144 unsigned long ma, i;
146 i = n;
147 while(!__domain_va_to_ma((unsigned long)to, &ma, &i)) {
148 memset((void *)__va(ma), 0, i);
149 n -= i;
150 if (!n)
151 break;
152 to += i;
153 i = n;
154 }
155 return n;
156 }
158 #endif // __ASM_IA64_VMX_UACCESS_H__