direct-io.hg

view xen/arch/x86/x86_64/compat/mm.c @ 13323:aa127e545b73

[XEN] 32on64: need to take multicall context into consideration
when determining the continuation arguments for mmuext ops.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Emmanuel Ackaouy <ack@xensource.com>
date Wed Jan 10 17:26:58 2007 +0000 (2007-01-10)
parents e5f24d5f71ac
children
line source
1 #ifdef CONFIG_COMPAT
3 #include <xen/event.h>
4 #include <xen/multicall.h>
5 #include <compat/memory.h>
6 #include <compat/xen.h>
8 int compat_set_gdt(XEN_GUEST_HANDLE(uint) frame_list, unsigned int entries)
9 {
10 unsigned int i, nr_pages = (entries + 511) / 512;
11 unsigned long frames[16];
12 long ret;
14 /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */
15 if ( entries > FIRST_RESERVED_GDT_ENTRY )
16 return -EINVAL;
18 if ( !guest_handle_okay(frame_list, nr_pages) )
19 return -EFAULT;
21 for ( i = 0; i < nr_pages; ++i )
22 {
23 unsigned int frame;
25 if ( __copy_from_guest(&frame, frame_list, 1) )
26 return -EFAULT;
27 frames[i] = frame;
28 guest_handle_add_offset(frame_list, 1);
29 }
31 LOCK_BIGLOCK(current->domain);
33 if ( (ret = set_gdt(current, frames, entries)) == 0 )
34 local_flush_tlb();
36 UNLOCK_BIGLOCK(current->domain);
38 return ret;
39 }
41 int compat_update_descriptor(u32 pa_lo, u32 pa_hi, u32 desc_lo, u32 desc_hi)
42 {
43 return do_update_descriptor(pa_lo | ((u64)pa_hi << 32),
44 desc_lo | ((u64)desc_hi << 32));
45 }
47 int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
48 {
49 struct compat_machphys_mfn_list xmml;
50 l2_pgentry_t l2e;
51 unsigned long v;
52 compat_pfn_t mfn;
53 unsigned int i;
54 int rc = 0;
56 switch ( op )
57 {
58 case XENMEM_add_to_physmap:
59 {
60 struct compat_add_to_physmap cmp;
61 struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
63 if ( copy_from_guest(&cmp, arg, 1) )
64 return -EFAULT;
66 XLAT_add_to_physmap(nat, &cmp);
67 rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
69 break;
70 }
72 case XENMEM_set_memory_map:
73 {
74 struct compat_foreign_memory_map cmp;
75 struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
77 if ( copy_from_guest(&cmp, arg, 1) )
78 return -EFAULT;
80 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \
81 guest_from_compat_handle((_d_)->buffer, (_s_)->buffer)
82 XLAT_foreign_memory_map(nat, &cmp);
83 #undef XLAT_memory_map_HNDL_buffer
85 rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
87 break;
88 }
90 case XENMEM_memory_map:
91 case XENMEM_machine_memory_map:
92 {
93 struct compat_memory_map cmp;
94 struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
96 if ( copy_from_guest(&cmp, arg, 1) )
97 return -EFAULT;
99 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) \
100 guest_from_compat_handle((_d_)->buffer, (_s_)->buffer)
101 XLAT_memory_map(nat, &cmp);
102 #undef XLAT_memory_map_HNDL_buffer
104 rc = arch_memory_op(op, guest_handle_from_ptr(nat, void));
105 if ( rc < 0 )
106 break;
108 #define XLAT_memory_map_HNDL_buffer(_d_, _s_) ((void)0)
109 XLAT_memory_map(&cmp, nat);
110 #undef XLAT_memory_map_HNDL_buffer
111 if ( copy_to_guest(arg, &cmp, 1) )
112 rc = -EFAULT;
114 break;
115 }
117 case XENMEM_machphys_mapping:
118 {
119 struct domain *d = current->domain;
120 struct compat_machphys_mapping mapping = {
121 .v_start = MACH2PHYS_COMPAT_VIRT_START(d),
122 .v_end = MACH2PHYS_COMPAT_VIRT_END,
123 .max_mfn = MACH2PHYS_COMPAT_NR_ENTRIES(d) - 1
124 };
126 if ( copy_to_guest(arg, &mapping, 1) )
127 rc = -EFAULT;
129 break;
130 }
132 case XENMEM_machphys_mfn_list:
133 if ( copy_from_guest(&xmml, arg, 1) )
134 return -EFAULT;
136 for ( i = 0, v = RDWR_COMPAT_MPT_VIRT_START;
137 (i != xmml.max_extents) && (v != RDWR_COMPAT_MPT_VIRT_END);
138 i++, v += 1 << L2_PAGETABLE_SHIFT )
139 {
140 l2e = compat_idle_pg_table_l2[l2_table_offset(v)];
141 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
142 break;
143 mfn = l2e_get_pfn(l2e) + l1_table_offset(v);
144 if ( copy_to_compat_offset(xmml.extent_start, i, &mfn, 1) )
145 return -EFAULT;
146 }
148 xmml.nr_extents = i;
149 if ( copy_to_guest(arg, &xmml, 1) )
150 rc = -EFAULT;
152 break;
154 default:
155 rc = -ENOSYS;
156 break;
157 }
159 return rc;
160 }
162 int compat_update_va_mapping(unsigned int va, u32 lo, u32 hi,
163 unsigned int flags)
164 {
165 return do_update_va_mapping(va, lo | ((u64)hi << 32), flags);
166 }
168 int compat_update_va_mapping_otherdomain(unsigned long va, u32 lo, u32 hi,
169 unsigned long flags,
170 domid_t domid)
171 {
172 return do_update_va_mapping_otherdomain(va, lo | ((u64)hi << 32), flags, domid);
173 }
175 DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t);
177 int compat_mmuext_op(XEN_GUEST_HANDLE(mmuext_op_compat_t) cmp_uops,
178 unsigned int count,
179 XEN_GUEST_HANDLE(uint) pdone,
180 unsigned int foreigndom)
181 {
182 unsigned int i, preempt_mask;
183 int rc = 0;
184 XEN_GUEST_HANDLE(mmuext_op_t) nat_ops;
186 preempt_mask = count & MMU_UPDATE_PREEMPTED;
187 count ^= preempt_mask;
189 if ( unlikely(!guest_handle_okay(cmp_uops, count)) )
190 return -EFAULT;
192 set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
194 for ( ; count; count -= i )
195 {
196 mmuext_op_t *nat_op = nat_ops.p;
197 unsigned int limit;
198 int err;
200 if ( hypercall_preempt_check() )
201 {
202 rc = hypercall_create_continuation(
203 __HYPERVISOR_mmuext_op, "hihi",
204 cmp_uops, count | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
205 break;
206 }
208 limit = COMPAT_ARG_XLAT_SIZE / sizeof(*nat_op);
210 for ( i = 0; i < min(limit, count); ++i )
211 {
212 mmuext_op_compat_t cmp_op;
213 enum XLAT_mmuext_op_arg1 arg1;
214 enum XLAT_mmuext_op_arg2 arg2;
216 if ( unlikely(__copy_from_guest(&cmp_op, cmp_uops, 1) != 0) )
217 {
218 rc = -EFAULT;
219 break;
220 }
222 switch ( cmp_op.cmd )
223 {
224 case MMUEXT_PIN_L1_TABLE:
225 case MMUEXT_PIN_L2_TABLE:
226 case MMUEXT_PIN_L3_TABLE:
227 case MMUEXT_PIN_L4_TABLE:
228 case MMUEXT_UNPIN_TABLE:
229 case MMUEXT_NEW_BASEPTR:
230 arg1 = XLAT_mmuext_op_arg1_mfn;
231 break;
232 default:
233 arg1 = XLAT_mmuext_op_arg1_linear_addr;
234 break;
235 case MMUEXT_NEW_USER_BASEPTR:
236 rc = -EINVAL;
237 case MMUEXT_TLB_FLUSH_LOCAL:
238 case MMUEXT_TLB_FLUSH_MULTI:
239 case MMUEXT_TLB_FLUSH_ALL:
240 case MMUEXT_FLUSH_CACHE:
241 arg1 = -1;
242 break;
243 }
245 if ( rc )
246 break;
248 switch ( cmp_op.cmd )
249 {
250 case MMUEXT_SET_LDT:
251 arg2 = XLAT_mmuext_op_arg2_nr_ents;
252 break;
253 case MMUEXT_TLB_FLUSH_MULTI:
254 case MMUEXT_INVLPG_MULTI:
255 arg2 = XLAT_mmuext_op_arg2_vcpumask;
256 break;
257 default:
258 arg2 = -1;
259 break;
260 }
262 #define XLAT_mmuext_op_HNDL_arg2_vcpumask(_d_, _s_) \
263 do \
264 { \
265 unsigned int vcpumask; \
266 if ( i < --limit ) \
267 { \
268 (_d_)->arg2.vcpumask.p = (void *)(nat_ops.p + limit); \
269 if ( copy_from_compat(&vcpumask, (_s_)->arg2.vcpumask, 1) == 0 ) \
270 *(unsigned long *)(_d_)->arg2.vcpumask.p = vcpumask; \
271 else \
272 rc = -EFAULT; \
273 } \
274 } while(0)
275 XLAT_mmuext_op(nat_op, &cmp_op);
276 #undef XLAT_mmuext_op_HNDL_arg2_vcpumask
278 if ( rc || i >= limit )
279 break;
281 guest_handle_add_offset(cmp_uops, 1);
282 ++nat_op;
283 }
285 err = do_mmuext_op(nat_ops, i | preempt_mask, pdone, foreigndom);
287 if ( err )
288 {
289 BUILD_BUG_ON(__HYPERVISOR_mmuext_op <= 0);
290 if ( err == __HYPERVISOR_mmuext_op )
291 {
292 struct cpu_user_regs *regs = guest_cpu_user_regs();
293 struct mc_state *mcs = &this_cpu(mc_state);
294 unsigned int arg1 = !test_bit(_MCSF_in_multicall, &mcs->flags)
295 ? regs->ecx
296 : mcs->call.args[1];
297 unsigned int left = arg1 & ~MMU_UPDATE_PREEMPTED;
299 BUG_ON(left == arg1);
300 BUG_ON(left > count);
301 guest_handle_add_offset(nat_ops, count - left);
302 BUG_ON(left + i < count);
303 guest_handle_add_offset(cmp_uops, (signed int)(count - left - i));
304 left = 1;
305 BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops, cmp_uops));
306 BUG_ON(left != arg1);
307 if (!test_bit(_MCSF_in_multicall, &mcs->flags))
308 regs->_ecx += count - i;
309 else
310 mcs->compat_call.args[1] += count - i;
311 }
312 else
313 BUG_ON(err > 0);
314 rc = err;
315 }
317 if ( rc )
318 break;
320 /* Force do_mmuext_op() to not start counting from zero again. */
321 preempt_mask = MMU_UPDATE_PREEMPTED;
322 }
324 return rc;
325 }
327 #endif /* CONFIG_COMPAT */
329 /*
330 * Local variables:
331 * mode: C
332 * c-set-style: "BSD"
333 * c-basic-offset: 4
334 * tab-width: 4
335 * indent-tabs-mode: nil
336 * End:
337 */