ia64/linux-2.6.18-xen.hg

view arch/ia64/xen/xcom_privcmd.c @ 776:93ea69924241

Sync Xen public memory.h header file.

Remove ia64 translations of removed hypercall structs.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 14 11:44:54 2009 +0000 (2009-01-14)
parents 45c3a3dfa5b5
children 9ab1c319531f
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
17 * Tristan Gingold <tristan.gingold@bull.net>
18 */
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/gfp.h>
23 #include <linux/module.h>
24 #include <xen/interface/xen.h>
25 #include <xen/interface/platform.h>
26 #define __XEN__
27 #include <xen/interface/domctl.h>
28 #include <xen/interface/sysctl.h>
29 #include <xen/interface/memory.h>
30 #include <xen/interface/version.h>
31 #include <xen/interface/event_channel.h>
32 #include <xen/interface/xsm/acm_ops.h>
33 #include <xen/interface/hvm/params.h>
34 #include <xen/interface/arch-ia64/debug_op.h>
35 #include <xen/public/privcmd.h>
36 #include <asm/hypercall.h>
37 #include <asm/page.h>
38 #include <asm/uaccess.h>
39 #include <asm/xen/xencomm.h>
41 #define ROUND_DIV(v,s) (((v) + (s) - 1) / (s))
43 static int
44 xencomm_privcmd_platform_op(privcmd_hypercall_t *hypercall)
45 {
46 struct xen_platform_op kern_op;
47 struct xen_platform_op __user *user_op = (struct xen_platform_op __user *)hypercall->arg[0];
48 struct xencomm_handle *op_desc;
49 struct xencomm_handle *desc = NULL;
50 int ret = 0;
52 if (copy_from_user(&kern_op, user_op, sizeof(struct xen_platform_op)))
53 return -EFAULT;
55 if (kern_op.interface_version != XENPF_INTERFACE_VERSION)
56 return -EACCES;
58 op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
60 switch (kern_op.cmd) {
61 default:
62 printk("%s: unknown platform cmd %d\n", __func__, kern_op.cmd);
63 return -ENOSYS;
64 }
66 if (ret) {
67 /* error mapping the nested pointer */
68 return ret;
69 }
71 ret = xencomm_arch_hypercall_platform_op(op_desc);
73 /* FIXME: should we restore the handle? */
74 if (copy_to_user(user_op, &kern_op, sizeof(struct xen_platform_op)))
75 ret = -EFAULT;
77 xencomm_free(desc);
78 return ret;
79 }
81 static int
82 xencomm_privcmd_sysctl(privcmd_hypercall_t *hypercall)
83 {
84 xen_sysctl_t kern_op;
85 xen_sysctl_t __user *user_op;
86 struct xencomm_handle *op_desc;
87 struct xencomm_handle *desc = NULL;
88 struct xencomm_handle *desc1 = NULL;
89 int ret = 0;
91 user_op = (xen_sysctl_t __user *)hypercall->arg[0];
93 if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t)))
94 return -EFAULT;
96 if (kern_op.interface_version != XEN_SYSCTL_INTERFACE_VERSION)
97 return -EACCES;
99 op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
101 switch (kern_op.cmd) {
102 case XEN_SYSCTL_readconsole:
103 desc = xencomm_map(
104 xen_guest_handle(kern_op.u.readconsole.buffer),
105 kern_op.u.readconsole.count);
106 if (xen_guest_handle(kern_op.u.readconsole.buffer) != NULL &&
107 kern_op.u.readconsole.count > 0 && desc == NULL)
108 return -ENOMEM;
109 set_xen_guest_handle(kern_op.u.readconsole.buffer,
110 (void *)desc);
111 break;
112 case XEN_SYSCTL_tbuf_op:
113 case XEN_SYSCTL_sched_id:
114 case XEN_SYSCTL_availheap:
115 break;
116 case XEN_SYSCTL_perfc_op:
117 {
118 struct xencomm_handle *tmp_desc;
119 xen_sysctl_t tmp_op = {
120 .cmd = XEN_SYSCTL_perfc_op,
121 .interface_version = XEN_SYSCTL_INTERFACE_VERSION,
122 .u.perfc_op = {
123 .cmd = XEN_SYSCTL_PERFCOP_query,
124 /* .desc.p = NULL, */
125 /* .val.p = NULL, */
126 },
127 };
129 if (xen_guest_handle(kern_op.u.perfc_op.desc) == NULL) {
130 if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL)
131 return -EINVAL;
132 break;
133 }
135 /* query the buffer size for xencomm */
136 tmp_desc = xencomm_map_no_alloc(&tmp_op, sizeof(tmp_op));
137 ret = xencomm_arch_hypercall_sysctl(tmp_desc);
138 if (ret)
139 return ret;
141 desc = xencomm_map(xen_guest_handle(kern_op.u.perfc_op.desc),
142 tmp_op.u.perfc_op.nr_counters *
143 sizeof(xen_sysctl_perfc_desc_t));
144 if (xen_guest_handle(kern_op.u.perfc_op.desc) != NULL &&
145 tmp_op.u.perfc_op.nr_counters > 0 && desc == NULL)
146 return -ENOMEM;
148 set_xen_guest_handle(kern_op.u.perfc_op.desc, (void *)desc);
150 desc1 = xencomm_map(xen_guest_handle(kern_op.u.perfc_op.val),
151 tmp_op.u.perfc_op.nr_vals *
152 sizeof(xen_sysctl_perfc_val_t));
153 if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL &&
154 tmp_op.u.perfc_op.nr_vals > 0 && desc1 == NULL) {
155 xencomm_free(desc);
156 return -ENOMEM;
157 }
159 set_xen_guest_handle(kern_op.u.perfc_op.val, (void *)desc1);
160 break;
161 }
162 case XEN_SYSCTL_getdomaininfolist:
163 desc = xencomm_map(
164 xen_guest_handle(kern_op.u.getdomaininfolist.buffer),
165 kern_op.u.getdomaininfolist.max_domains *
166 sizeof(xen_domctl_getdomaininfo_t));
167 if (xen_guest_handle(kern_op.u.getdomaininfolist.buffer) !=
168 NULL && kern_op.u.getdomaininfolist.max_domains > 0 &&
169 desc == NULL)
170 return -ENOMEM;
171 set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer,
172 (void *)desc);
173 break;
174 case XEN_SYSCTL_debug_keys:
175 desc = xencomm_map(
176 xen_guest_handle(kern_op.u.debug_keys.keys),
177 kern_op.u.debug_keys.nr_keys);
178 if (xen_guest_handle(kern_op.u.debug_keys.keys) != NULL &&
179 kern_op.u.debug_keys.nr_keys > 0 && desc == NULL)
180 return -ENOMEM;
181 set_xen_guest_handle(kern_op.u.debug_keys.keys,
182 (void *)desc);
183 break;
185 case XEN_SYSCTL_physinfo:
186 desc = xencomm_map(
187 xen_guest_handle(kern_op.u.physinfo.cpu_to_node),
188 kern_op.u.physinfo.max_cpu_id * sizeof(uint32_t));
189 if (xen_guest_handle(kern_op.u.physinfo.cpu_to_node) != NULL &&
190 kern_op.u.physinfo.max_cpu_id > 0 && desc == NULL)
191 return -ENOMEM;
193 set_xen_guest_handle(kern_op.u.physinfo.cpu_to_node,
194 (void *)desc);
195 break;
197 case XEN_SYSCTL_get_pmstat:
198 if (kern_op.u.get_pmstat.type == PMSTAT_get_pxstat) {
199 struct pm_px_stat *getpx =
200 &kern_op.u.get_pmstat.u.getpx;
201 desc = xencomm_map(
202 xen_guest_handle(getpx->trans_pt),
203 getpx->total * getpx->total *
204 sizeof(uint64_t));
205 if (xen_guest_handle(getpx->trans_pt) != NULL &&
206 getpx->total > 0 && desc == NULL)
207 return -ENOMEM;
209 set_xen_guest_handle(getpx->trans_pt, (void *)desc);
211 desc1 = xencomm_map(xen_guest_handle(getpx->pt),
212 getpx->total * sizeof(pm_px_val_t));
213 if (xen_guest_handle(getpx->pt) != NULL &&
214 getpx->total > 0 && desc1 == NULL)
215 return -ENOMEM;
217 set_xen_guest_handle(getpx->pt, (void *)desc1);
218 }
219 break;
221 default:
222 printk("%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
223 return -ENOSYS;
224 }
226 if (ret) {
227 /* error mapping the nested pointer */
228 return ret;
229 }
231 ret = xencomm_arch_hypercall_sysctl(op_desc);
233 /* FIXME: should we restore the handles? */
234 if (copy_to_user(user_op, &kern_op, sizeof(xen_sysctl_t)))
235 ret = -EFAULT;
237 xencomm_free(desc);
238 xencomm_free(desc1);
239 return ret;
240 }
242 static int
243 xencomm_privcmd_domctl(privcmd_hypercall_t *hypercall)
244 {
245 xen_domctl_t kern_op;
246 xen_domctl_t __user *user_op;
247 struct xencomm_handle *op_desc;
248 struct xencomm_handle *desc = NULL;
249 int ret = 0;
251 user_op = (xen_domctl_t __user *)hypercall->arg[0];
253 if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t)))
254 return -EFAULT;
256 if (kern_op.interface_version != XEN_DOMCTL_INTERFACE_VERSION)
257 return -EACCES;
259 op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
261 switch (kern_op.cmd) {
262 case XEN_DOMCTL_createdomain:
263 case XEN_DOMCTL_destroydomain:
264 case XEN_DOMCTL_pausedomain:
265 case XEN_DOMCTL_unpausedomain:
266 case XEN_DOMCTL_resumedomain:
267 case XEN_DOMCTL_getdomaininfo:
268 break;
269 case XEN_DOMCTL_getmemlist:
270 {
271 unsigned long nr_pages = kern_op.u.getmemlist.max_pfns;
273 desc = xencomm_map(
274 xen_guest_handle(kern_op.u.getmemlist.buffer),
275 nr_pages * sizeof(unsigned long));
276 if (xen_guest_handle(kern_op.u.getmemlist.buffer) != NULL &&
277 nr_pages > 0 && desc == NULL)
278 return -ENOMEM;
279 set_xen_guest_handle(kern_op.u.getmemlist.buffer,
280 (void *)desc);
281 break;
282 }
283 case XEN_DOMCTL_getpageframeinfo:
284 break;
285 case XEN_DOMCTL_getpageframeinfo2:
286 desc = xencomm_map(
287 xen_guest_handle(kern_op.u.getpageframeinfo2.array),
288 kern_op.u.getpageframeinfo2.num);
289 if (xen_guest_handle(kern_op.u.getpageframeinfo2.array) !=
290 NULL && kern_op.u.getpageframeinfo2.num > 0 &&
291 desc == NULL)
292 return -ENOMEM;
293 set_xen_guest_handle(kern_op.u.getpageframeinfo2.array,
294 (void *)desc);
295 break;
296 case XEN_DOMCTL_shadow_op:
297 desc = xencomm_map(
298 xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap),
299 ROUND_DIV(kern_op.u.shadow_op.pages, 8));
300 if (xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap) != NULL
301 && kern_op.u.shadow_op.pages > 0 && desc == NULL)
302 return -ENOMEM;
303 set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap,
304 (void *)desc);
305 break;
306 case XEN_DOMCTL_max_mem:
307 break;
308 case XEN_DOMCTL_setvcpucontext:
309 case XEN_DOMCTL_getvcpucontext:
310 desc = xencomm_map(
311 xen_guest_handle(kern_op.u.vcpucontext.ctxt),
312 sizeof(vcpu_guest_context_t));
313 if (xen_guest_handle(kern_op.u.vcpucontext.ctxt) != NULL &&
314 desc == NULL)
315 return -ENOMEM;
316 set_xen_guest_handle(kern_op.u.vcpucontext.ctxt, (void *)desc);
317 break;
318 case XEN_DOMCTL_getvcpuinfo:
319 break;
320 case XEN_DOMCTL_setvcpuaffinity:
321 case XEN_DOMCTL_getvcpuaffinity:
322 desc = xencomm_map(
323 xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap),
324 ROUND_DIV(kern_op.u.vcpuaffinity.cpumap.nr_cpus, 8));
325 if (xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap) !=
326 NULL && kern_op.u.vcpuaffinity.cpumap.nr_cpus > 0 &&
327 desc == NULL)
328 return -ENOMEM;
329 set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap,
330 (void *)desc);
331 break;
332 case XEN_DOMCTL_gethvmcontext:
333 case XEN_DOMCTL_sethvmcontext:
334 if (kern_op.u.hvmcontext.size > 0)
335 desc = xencomm_map(
336 xen_guest_handle(kern_op.u.hvmcontext.buffer),
337 kern_op.u.hvmcontext.size);
338 if (xen_guest_handle(kern_op.u.hvmcontext.buffer) != NULL &&
339 kern_op.u.hvmcontext.size > 0 && desc == NULL)
340 return -ENOMEM;
341 set_xen_guest_handle(kern_op.u.hvmcontext.buffer, (void*)desc);
342 break;
343 case XEN_DOMCTL_get_device_group:
344 {
345 struct xen_domctl_get_device_group *get_device_group =
346 &kern_op.u.get_device_group;
347 desc = xencomm_map(
348 xen_guest_handle(get_device_group->sdev_array),
349 get_device_group->max_sdevs * sizeof(uint32_t));
350 if (xen_guest_handle(get_device_group->sdev_array) != NULL &&
351 get_device_group->max_sdevs > 0 && desc == NULL)
352 return -ENOMEM;
353 set_xen_guest_handle(kern_op.u.get_device_group.sdev_array,
354 (void*)desc);
355 break;
356 }
357 case XEN_DOMCTL_max_vcpus:
358 case XEN_DOMCTL_scheduler_op:
359 case XEN_DOMCTL_setdomainhandle:
360 case XEN_DOMCTL_setdebugging:
361 case XEN_DOMCTL_irq_permission:
362 case XEN_DOMCTL_iomem_permission:
363 case XEN_DOMCTL_ioport_permission:
364 case XEN_DOMCTL_hypercall_init:
365 case XEN_DOMCTL_arch_setup:
366 case XEN_DOMCTL_settimeoffset:
367 case XEN_DOMCTL_sendtrigger:
368 case XEN_DOMCTL_set_opt_feature:
369 case XEN_DOMCTL_assign_device:
370 case XEN_DOMCTL_subscribe:
371 case XEN_DOMCTL_test_assign_device:
372 case XEN_DOMCTL_deassign_device:
373 case XEN_DOMCTL_bind_pt_irq:
374 case XEN_DOMCTL_unbind_pt_irq:
375 case XEN_DOMCTL_memory_mapping:
376 case XEN_DOMCTL_ioport_mapping:
377 break;
378 case XEN_DOMCTL_pin_mem_cacheattr:
379 return -ENOSYS;
380 default:
381 printk("%s: unknown domctl cmd %d\n", __func__, kern_op.cmd);
382 return -ENOSYS;
383 }
385 if (ret) {
386 /* error mapping the nested pointer */
387 return ret;
388 }
390 ret = xencomm_arch_hypercall_domctl (op_desc);
392 /* FIXME: should we restore the handle? */
393 if (copy_to_user(user_op, &kern_op, sizeof(xen_domctl_t)))
394 ret = -EFAULT;
396 xencomm_free(desc);
397 return ret;
398 }
400 static int
401 xencomm_privcmd_xsm_op(privcmd_hypercall_t *hypercall)
402 {
403 void __user *arg = (void __user *)hypercall->arg[0];
404 xen_acmctl_t kern_arg;
405 struct xencomm_handle *op_desc;
406 struct xencomm_handle *desc = NULL;
407 int ret;
409 if (copy_from_user(&kern_arg, arg, sizeof(kern_arg)))
410 return -EFAULT;
411 if (kern_arg.interface_version != ACM_INTERFACE_VERSION)
412 return -ENOSYS;
414 switch (kern_arg.cmd) {
415 case ACMOP_getssid: {
416 op_desc = xencomm_map_no_alloc(&kern_arg, sizeof(kern_arg));
418 desc = xencomm_map(
419 xen_guest_handle(kern_arg.u.getssid.ssidbuf),
420 kern_arg.u.getssid.ssidbuf_size);
421 if (xen_guest_handle(kern_arg.u.getssid.ssidbuf) != NULL &&
422 kern_arg.u.getssid.ssidbuf_size > 0 && desc == NULL)
423 return -ENOMEM;
425 set_xen_guest_handle(kern_arg.u.getssid.ssidbuf, (void *)desc);
427 ret = xencomm_arch_hypercall_xsm_op(op_desc);
429 xencomm_free(desc);
431 if (copy_to_user(arg, &kern_arg, sizeof(kern_arg)))
432 return -EFAULT;
433 return ret;
434 }
435 default:
436 printk("%s: unknown acm_op cmd %d\n", __func__, kern_arg.cmd);
437 return -ENOSYS;
438 }
440 return ret;
441 }
443 static int
444 xencomm_privcmd_memory_reservation_op(privcmd_hypercall_t *hypercall)
445 {
446 const unsigned long cmd = hypercall->arg[0];
447 int ret = 0;
448 xen_memory_reservation_t kern_op;
449 xen_memory_reservation_t __user *user_op;
450 struct xencomm_handle *desc = NULL;
451 struct xencomm_handle *desc_op;
453 user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
454 if (copy_from_user(&kern_op, user_op,
455 sizeof(xen_memory_reservation_t)))
456 return -EFAULT;
457 desc_op = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
459 if (!xen_guest_handle(kern_op.extent_start)) {
460 ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
461 if (ret < 0)
462 return ret;
463 } else {
464 xen_ulong_t nr_done = 0;
465 xen_ulong_t nr_extents = kern_op.nr_extents;
466 void *addr = xen_guest_handle(kern_op.extent_start);
468 /*
469 * Work around.
470 * Xencomm has single page size limit caused
471 * by xencomm_alloc()/xencomm_free() so that
472 * we have to repeat the hypercall.
473 * This limitation can be removed.
474 */
475 #define MEMORYOP_XENCOMM_LIMIT \
476 (((((PAGE_SIZE - sizeof(struct xencomm_desc)) / \
477 sizeof(uint64_t)) - 2) * PAGE_SIZE) / \
478 sizeof(*xen_guest_handle(kern_op.extent_start)))
480 /*
481 * Work around.
482 * Even if the above limitation is removed,
483 * the hypercall with large number of extents
484 * may cause the soft lockup warning.
485 * In order to avoid the warning, we limit
486 * the number of extents and repeat the hypercall.
487 * The following value is determined by evaluation.
488 * Time of one hypercall should be smaller than
489 * a vcpu time slice. The time with current
490 * MEMORYOP_MAX_EXTENTS is around 5 msec.
491 * If the following limit causes some issues,
492 * we should decrease this value.
493 *
494 * Another way would be that start with small value and
495 * increase adoptively measuring hypercall time.
496 * It might be over-kill.
497 */
498 #define MEMORYOP_MAX_EXTENTS (MEMORYOP_XENCOMM_LIMIT / 512)
500 while (nr_extents > 0) {
501 xen_ulong_t nr_tmp = nr_extents;
502 if (nr_tmp > MEMORYOP_MAX_EXTENTS)
503 nr_tmp = MEMORYOP_MAX_EXTENTS;
505 kern_op.nr_extents = nr_tmp;
506 desc = xencomm_map
507 (addr + nr_done * sizeof(*xen_guest_handle(kern_op.extent_start)),
508 nr_tmp * sizeof(*xen_guest_handle(kern_op.extent_start)));
509 if (addr != NULL && nr_tmp > 0 && desc == NULL)
510 return nr_done > 0 ? nr_done : -ENOMEM;
512 set_xen_guest_handle(kern_op.extent_start,
513 (void *)desc);
515 ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
516 xencomm_free(desc);
517 if (ret < 0)
518 return nr_done > 0 ? nr_done : ret;
520 nr_done += ret;
521 nr_extents -= ret;
522 if (ret < nr_tmp)
523 break;
525 /*
526 * prevent softlock up message.
527 * give cpu to soft lockup kernel thread.
528 */
529 if (nr_extents > 0)
530 schedule();
531 }
532 ret = nr_done;
533 set_xen_guest_handle(kern_op.extent_start, addr);
534 }
536 if (copy_to_user(user_op, &kern_op, sizeof(xen_memory_reservation_t)))
537 return -EFAULT;
539 return ret;
540 }
542 static int
543 xencomm_privcmd_memory_op(privcmd_hypercall_t *hypercall)
544 {
545 const unsigned long cmd = hypercall->arg[0];
546 int ret = 0;
548 switch (cmd) {
549 case XENMEM_increase_reservation:
550 case XENMEM_decrease_reservation:
551 case XENMEM_populate_physmap:
552 return xencomm_privcmd_memory_reservation_op(hypercall);
553 case XENMEM_maximum_gpfn:
554 {
555 domid_t kern_domid;
556 domid_t __user *user_domid;
557 struct xencomm_handle *desc;
559 user_domid = (domid_t __user *)hypercall->arg[1];
560 if (copy_from_user(&kern_domid, user_domid, sizeof(domid_t)))
561 return -EFAULT;
562 desc = xencomm_map_no_alloc(&kern_domid, sizeof(kern_domid));
564 ret = xencomm_arch_hypercall_memory_op(cmd, desc);
566 return ret;
567 }
568 case XENMEM_add_to_physmap:
569 {
570 void __user *arg = (void __user *)hypercall->arg[1];
571 struct xencomm_handle *desc;
573 desc = xencomm_map(arg, sizeof(struct xen_add_to_physmap));
574 if (desc == NULL)
575 return -ENOMEM;
577 ret = xencomm_arch_hypercall_memory_op(cmd, desc);
579 xencomm_free(desc);
580 return ret;
581 }
582 default:
583 printk("%s: unknown memory op %lu\n", __func__, cmd);
584 ret = -ENOSYS;
585 }
586 return ret;
587 }
589 static int
590 xencomm_privcmd_xen_version(privcmd_hypercall_t *hypercall)
591 {
592 int cmd = hypercall->arg[0];
593 void __user *arg = (void __user *)hypercall->arg[1];
594 struct xencomm_handle *desc;
595 size_t argsize;
596 int rc;
598 switch (cmd) {
599 case XENVER_version:
600 /* do not actually pass an argument */
601 return xencomm_arch_hypercall_xen_version(cmd, 0);
602 case XENVER_extraversion:
603 argsize = sizeof(xen_extraversion_t);
604 break;
605 case XENVER_compile_info:
606 argsize = sizeof(xen_compile_info_t);
607 break;
608 case XENVER_capabilities:
609 argsize = sizeof(xen_capabilities_info_t);
610 break;
611 case XENVER_changeset:
612 argsize = sizeof(xen_changeset_info_t);
613 break;
614 case XENVER_platform_parameters:
615 argsize = sizeof(xen_platform_parameters_t);
616 break;
617 case XENVER_pagesize:
618 argsize = (arg == NULL) ? 0 : sizeof(void *);
619 break;
620 case XENVER_get_features:
621 argsize = (arg == NULL) ? 0 : sizeof(xen_feature_info_t);
622 break;
624 default:
625 printk("%s: unknown version op %d\n", __func__, cmd);
626 return -ENOSYS;
627 }
629 desc = xencomm_map(arg, argsize);
630 if (arg != NULL && argsize > 0 && desc == NULL)
631 return -ENOMEM;
633 rc = xencomm_arch_hypercall_xen_version(cmd, desc);
635 xencomm_free(desc);
637 return rc;
638 }
640 static int
641 xencomm_privcmd_event_channel_op(privcmd_hypercall_t *hypercall)
642 {
643 int cmd = hypercall->arg[0];
644 struct xencomm_handle *desc;
645 unsigned int argsize;
646 int ret;
648 switch (cmd) {
649 case EVTCHNOP_alloc_unbound:
650 argsize = sizeof(evtchn_alloc_unbound_t);
651 break;
653 case EVTCHNOP_status:
654 argsize = sizeof(evtchn_status_t);
655 break;
657 default:
658 printk("%s: unknown EVTCHNOP %d\n", __func__, cmd);
659 return -EINVAL;
660 }
662 desc = xencomm_map((void *)hypercall->arg[1], argsize);
663 if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
664 return -ENOMEM;
666 ret = xencomm_arch_hypercall_event_channel_op(cmd, desc);
668 xencomm_free(desc);
669 return ret;
670 }
672 static int
673 xencomm_privcmd_hvm_op_track_dirty_vram(privcmd_hypercall_t *hypercall)
674 {
675 #if 1
676 /*
677 * At this moment HVMOP_track_dirty_vram isn't implemented
678 * on xen/ia64 so that it just returns -ENOSYS.
679 * Don't issue hypercall to get -ENOSYS.
680 * When the hypercall is implemented, enable the following codes.
681 */
682 return -ENOSYS;
683 #else
684 int cmd = hypercall->arg[0];
685 struct xen_hvm_track_dirty_vram *user_op = (void*)hypercall->arg[1];
686 struct xen_hvm_track_dirty_vram kern_op;
687 struct xencomm_handle *desc;
688 struct xencomm_handle *bitmap_desc;
689 int ret;
691 BUG_ON(cmd != HVMOP_track_dirty_vram);
692 if (copy_from_user(&kern_op, user_op, sizeof(kern_op)))
693 return -EFAULT;
694 desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
695 bitmap_desc = xencomm_map(xen_guest_handle(kern_op.dirty_bitmap),
696 kern_op.nr * sizeof(uint8_t));
697 if (bitmap_desc == NULL)
698 return -ENOMEM;
699 set_xen_guest_handle(kern_op.dirty_bitmap, (void*)bitmap_desc);
700 ret = xencomm_arch_hypercall_hvm_op(cmd, desc);
701 xencomm_free(bitmap_desc);
703 return ret;
704 #endif
705 }
707 static int
708 xencomm_privcmd_hvm_op(privcmd_hypercall_t *hypercall)
709 {
710 int cmd = hypercall->arg[0];
711 struct xencomm_handle *desc;
712 unsigned int argsize;
713 int ret;
715 switch (cmd) {
716 case HVMOP_get_param:
717 case HVMOP_set_param:
718 argsize = sizeof(xen_hvm_param_t);
719 break;
720 case HVMOP_set_pci_intx_level:
721 argsize = sizeof(xen_hvm_set_pci_intx_level_t);
722 break;
723 case HVMOP_set_isa_irq_level:
724 argsize = sizeof(xen_hvm_set_isa_irq_level_t);
725 break;
726 case HVMOP_set_pci_link_route:
727 argsize = sizeof(xen_hvm_set_pci_link_route_t);
728 break;
729 case HVMOP_set_mem_type:
730 argsize = sizeof(xen_hvm_set_mem_type_t);
731 break;
733 case HVMOP_track_dirty_vram:
734 return xencomm_privcmd_hvm_op_track_dirty_vram(hypercall);
736 default:
737 printk("%s: unknown HVMOP %d\n", __func__, cmd);
738 return -EINVAL;
739 }
741 desc = xencomm_map((void *)hypercall->arg[1], argsize);
742 if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
743 return -ENOMEM;
745 ret = xencomm_arch_hypercall_hvm_op(cmd, desc);
747 xencomm_free(desc);
748 return ret;
749 }
751 static int
752 xencomm_privcmd_sched_op(privcmd_hypercall_t *hypercall)
753 {
754 int cmd = hypercall->arg[0];
755 struct xencomm_handle *desc;
756 unsigned int argsize;
757 int ret;
759 switch (cmd) {
760 case SCHEDOP_remote_shutdown:
761 argsize = sizeof(sched_remote_shutdown_t);
762 break;
763 default:
764 printk("%s: unknown SCHEDOP %d\n", __func__, cmd);
765 return -EINVAL;
766 }
768 desc = xencomm_map((void *)hypercall->arg[1], argsize);
769 if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
770 return -ENOMEM;
772 ret = xencomm_arch_hypercall_sched_op(cmd, desc);
774 xencomm_free(desc);
775 return ret;
776 }
778 static int
779 xencomm_privcmd_dom0vp_get_memmap(domid_t domid,
780 char* __user buf, unsigned long bufsize)
781 {
782 int ret;
783 struct xencomm_handle *desc;
785 desc = xencomm_map(buf, bufsize);
786 if (bufsize > 0 && desc == NULL)
787 return -ENOMEM;
789 ret = xencomm_arch_hypercall_get_memmap((domid_t)domid, desc);
791 xencomm_free(desc);
792 return ret;
793 }
795 static int
796 xencomm_privcmd_ia64_dom0vp_op(privcmd_hypercall_t *hypercall)
797 {
798 int cmd = hypercall->arg[0];
799 int ret;
801 switch (cmd) {
802 case IA64_DOM0VP_fpswa_revision: {
803 unsigned int revision;
804 unsigned int __user *revision_user =
805 (unsigned int* __user)hypercall->arg[1];
806 struct xencomm_handle *desc;
807 desc = xencomm_map(&revision, sizeof(revision));
808 if (desc == NULL)
809 return -ENOMEM;
811 ret = xencomm_arch_hypercall_fpswa_revision(desc);
812 xencomm_free(desc);
813 if (ret)
814 break;
815 if (copy_to_user(revision_user, &revision, sizeof(revision)))
816 ret = -EFAULT;
817 break;
818 }
819 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
820 case IA64_DOM0VP_expose_foreign_p2m:
821 ret = xen_foreign_p2m_expose(hypercall);
822 break;
823 #endif
824 case IA64_DOM0VP_get_memmap:
825 ret = xencomm_privcmd_dom0vp_get_memmap(
826 (domid_t)hypercall->arg[1],
827 (char* __user)hypercall->arg[2], hypercall->arg[3]);
828 break;
829 default:
830 printk("%s: unknown IA64 DOM0VP op %d\n", __func__, cmd);
831 ret = -EINVAL;
832 break;
833 }
834 return ret;
835 }
837 static int
838 xencomm_privcmd_ia64_debug_op(privcmd_hypercall_t *hypercall)
839 {
840 int cmd = hypercall->arg[0];
841 unsigned long domain = hypercall->arg[1];
842 struct xencomm_handle *desc;
843 int ret;
845 switch (cmd) {
846 case XEN_IA64_DEBUG_OP_SET_FLAGS:
847 case XEN_IA64_DEBUG_OP_GET_FLAGS:
848 break;
849 default:
850 printk("%s: unknown IA64 DEBUGOP %d\n", __func__, cmd);
851 return -EINVAL;
852 }
854 desc = xencomm_map((void *)hypercall->arg[2],
855 sizeof(xen_ia64_debug_op_t));
856 if (desc == NULL)
857 return -ENOMEM;
859 ret = xencomm_arch_hypercall_ia64_debug_op(cmd, domain, desc);
861 xencomm_free(desc);
862 return ret;
863 }
865 static int
866 xencomm_privcmd_ia64_physdev_op(privcmd_hypercall_t *hypercall)
867 {
868 int cmd = hypercall->arg[0];
869 struct xencomm_handle *desc;
870 unsigned int argsize;
871 int ret;
873 switch (cmd) {
874 case PHYSDEVOP_map_pirq:
875 argsize = sizeof(physdev_map_pirq_t);
876 break;
877 case PHYSDEVOP_unmap_pirq:
878 argsize = sizeof(physdev_unmap_pirq_t);
879 break;
880 default:
881 printk("%s: unknown PHYSDEVOP %d\n", __func__, cmd);
882 return -EINVAL;
883 }
885 desc = xencomm_map((void *)hypercall->arg[1], argsize);
886 if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
887 return -ENOMEM;
889 ret = xencomm_arch_hypercall_physdev_op(cmd, desc);
891 xencomm_free(desc);
892 return ret;
893 }
895 int
896 privcmd_hypercall(privcmd_hypercall_t *hypercall)
897 {
898 switch (hypercall->op) {
899 case __HYPERVISOR_platform_op:
900 return xencomm_privcmd_platform_op(hypercall);
901 case __HYPERVISOR_domctl:
902 return xencomm_privcmd_domctl(hypercall);
903 case __HYPERVISOR_sysctl:
904 return xencomm_privcmd_sysctl(hypercall);
905 case __HYPERVISOR_xsm_op:
906 return xencomm_privcmd_xsm_op(hypercall);
907 case __HYPERVISOR_xen_version:
908 return xencomm_privcmd_xen_version(hypercall);
909 case __HYPERVISOR_memory_op:
910 return xencomm_privcmd_memory_op(hypercall);
911 case __HYPERVISOR_event_channel_op:
912 return xencomm_privcmd_event_channel_op(hypercall);
913 case __HYPERVISOR_hvm_op:
914 return xencomm_privcmd_hvm_op(hypercall);
915 case __HYPERVISOR_sched_op:
916 return xencomm_privcmd_sched_op(hypercall);
917 case __HYPERVISOR_ia64_dom0vp_op:
918 return xencomm_privcmd_ia64_dom0vp_op(hypercall);
919 case __HYPERVISOR_ia64_debug_op:
920 return xencomm_privcmd_ia64_debug_op(hypercall);
921 case __HYPERVISOR_physdev_op:
922 return xencomm_privcmd_ia64_physdev_op(hypercall);
923 default:
924 printk("%s: unknown hcall (%ld)\n", __func__, hypercall->op);
925 return -ENOSYS;
926 }
927 }