ia64/xen-unstable

view xen/include/asm-x86/guest_access.h @ 16543:46776e65e679

[XEN, 32on64]: Correct continuation translation for large
compat_mmuext hypercalls.

At the point where we translate the continuation "nat_ops" points to
the beginning of the batch of "i" entries, therefore it must be
incremented by the number of entries processed "i - left". At the same
point "cmp_uops" points to the end of the batch of entries and must
therefore be decremented by "left".

The new count value has already been set by do_mmuext_op to "left" and
therefore it is correct to add "count - i" since that is the number of
entries that remain after this batch.

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Dec 06 11:24:02 2007 +0000 (2007-12-06)
parents 45a44a9cbe8d
children 6e92603ed9f2
line source
1 /******************************************************************************
2 * guest_access.h
3 *
4 * Copyright (c) 2006, K A Fraser
5 */
7 #ifndef __ASM_X86_GUEST_ACCESS_H__
8 #define __ASM_X86_GUEST_ACCESS_H__
10 #include <asm/uaccess.h>
11 #include <asm/shadow.h>
12 #include <asm/hvm/support.h>
13 #include <asm/hvm/guest_access.h>
15 /* Is the guest handle a NULL reference? */
16 #define guest_handle_is_null(hnd) ((hnd).p == NULL)
18 /* Offset the given guest handle into the array it refers to. */
19 #define guest_handle_add_offset(hnd, nr) ((hnd).p += (nr))
20 #define guest_handle_subtract_offset(hnd, nr) ((hnd).p -= (nr))
22 /* Cast a guest handle to the specified type of handle. */
23 #define guest_handle_cast(hnd, type) ({ \
24 type *_x = (hnd).p; \
25 (XEN_GUEST_HANDLE(type)) { _x }; \
26 })
28 #define guest_handle_from_ptr(ptr, type) \
29 ((XEN_GUEST_HANDLE(type)) { (type *)ptr })
31 /*
32 * Copy an array of objects to guest context via a guest handle,
33 * specifying an offset into the guest array.
34 */
35 #define copy_to_guest_offset(hnd, off, ptr, nr) ({ \
36 const typeof(*(ptr)) *_s = (ptr); \
37 char (*_d)[sizeof(*_s)] = (void *)(hnd).p; \
38 ((void)((hnd).p == (ptr))); \
39 is_hvm_vcpu(current) ? \
40 copy_to_user_hvm(_d+(off), _s, sizeof(*_s)*(nr)) : \
41 copy_to_user(_d+(off), _s, sizeof(*_s)*(nr)); \
42 })
44 /*
45 * Copy an array of objects from guest context via a guest handle,
46 * specifying an offset into the guest array.
47 */
48 #define copy_from_guest_offset(ptr, hnd, off, nr) ({ \
49 const typeof(*(ptr)) *_s = (hnd).p; \
50 typeof(*(ptr)) *_d = (ptr); \
51 is_hvm_vcpu(current) ? \
52 copy_from_user_hvm(_d, _s+(off), sizeof(*_d)*(nr)) :\
53 copy_from_user(_d, _s+(off), sizeof(*_d)*(nr)); \
54 })
56 /* Copy sub-field of a structure to guest context via a guest handle. */
57 #define copy_field_to_guest(hnd, ptr, field) ({ \
58 const typeof(&(ptr)->field) _s = &(ptr)->field; \
59 void *_d = &(hnd).p->field; \
60 ((void)(&(hnd).p->field == &(ptr)->field)); \
61 is_hvm_vcpu(current) ? \
62 copy_to_user_hvm(_d, _s, sizeof(*_s)) : \
63 copy_to_user(_d, _s, sizeof(*_s)); \
64 })
66 /* Copy sub-field of a structure from guest context via a guest handle. */
67 #define copy_field_from_guest(ptr, hnd, field) ({ \
68 const typeof(&(ptr)->field) _s = &(hnd).p->field; \
69 typeof(&(ptr)->field) _d = &(ptr)->field; \
70 is_hvm_vcpu(current) ? \
71 copy_from_user_hvm(_d, _s, sizeof(*_d)) : \
72 copy_from_user(_d, _s, sizeof(*_d)); \
73 })
75 /*
76 * Pre-validate a guest handle.
77 * Allows use of faster __copy_* functions.
78 */
79 #define guest_handle_okay(hnd, nr) \
80 (shadow_mode_external(current->domain) || \
81 array_access_ok((hnd).p, (nr), sizeof(*(hnd).p)))
83 #define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \
84 const typeof(*(ptr)) *_s = (ptr); \
85 char (*_d)[sizeof(*_s)] = (void *)(hnd).p; \
86 ((void)((hnd).p == (ptr))); \
87 is_hvm_vcpu(current) ? \
88 copy_to_user_hvm(_d+(off), _s, sizeof(*_s)*(nr)) : \
89 __copy_to_user(_d+(off), _s, sizeof(*_s)*(nr)); \
90 })
92 #define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \
93 const typeof(*(ptr)) *_s = (hnd).p; \
94 typeof(*(ptr)) *_d = (ptr); \
95 is_hvm_vcpu(current) ? \
96 copy_from_user_hvm(_d, _s+(off), sizeof(*_d)*(nr)) :\
97 __copy_from_user(_d, _s+(off), sizeof(*_d)*(nr)); \
98 })
100 #define __copy_field_to_guest(hnd, ptr, field) ({ \
101 const typeof(&(ptr)->field) _s = &(ptr)->field; \
102 void *_d = &(hnd).p->field; \
103 ((void)(&(hnd).p->field == &(ptr)->field)); \
104 is_hvm_vcpu(current) ? \
105 copy_to_user_hvm(_d, _s, sizeof(*_s)) : \
106 __copy_to_user(_d, _s, sizeof(*_s)); \
107 })
109 #define __copy_field_from_guest(ptr, hnd, field) ({ \
110 const typeof(&(ptr)->field) _s = &(hnd).p->field; \
111 typeof(&(ptr)->field) _d = &(ptr)->field; \
112 is_hvm_vcpu(current) ? \
113 copy_from_user_hvm(_d, _s, sizeof(*_d)) : \
114 __copy_from_user(_d, _s, sizeof(*_d)); \
115 })
117 #endif /* __ASM_X86_GUEST_ACCESS_H__ */