ia64/linux-2.6.18-xen.hg

view arch/ia64/xen/xcom_privcmd.c @ 912:dd42cdb0ab89

[IA64] Build blktap2 driver by default in x86 builds.

add CONFIG_XEN_BLKDEV_TAP2=y to buildconfigs/linux-defconfig_xen_ia64.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 12:09:16 2009 +0900 (2009-06-29)
parents 92a119f247f7
children
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
17 * Tristan Gingold <tristan.gingold@bull.net>
18 */
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/gfp.h>
23 #include <linux/module.h>
24 #include <xen/interface/xen.h>
25 #include <xen/interface/platform.h>
26 #define __XEN__
27 #include <xen/interface/domctl.h>
28 #include <xen/interface/sysctl.h>
29 #include <xen/interface/memory.h>
30 #include <xen/interface/version.h>
31 #include <xen/interface/event_channel.h>
32 #include <xen/interface/xsm/acm_ops.h>
33 #include <xen/interface/hvm/params.h>
34 #include <xen/interface/arch-ia64/debug_op.h>
35 #include <xen/interface/tmem.h>
36 #include <xen/public/privcmd.h>
37 #include <asm/hypercall.h>
38 #include <asm/page.h>
39 #include <asm/uaccess.h>
40 #include <asm/xen/xencomm.h>
42 #define ROUND_DIV(v,s) (((v) + (s) - 1) / (s))
44 static int
45 xencomm_privcmd_platform_op(privcmd_hypercall_t *hypercall)
46 {
47 struct xen_platform_op kern_op;
48 struct xen_platform_op __user *user_op = (struct xen_platform_op __user *)hypercall->arg[0];
49 struct xencomm_handle *op_desc;
50 struct xencomm_handle *desc = NULL;
51 int ret = 0;
53 if (copy_from_user(&kern_op, user_op, sizeof(struct xen_platform_op)))
54 return -EFAULT;
56 if (kern_op.interface_version != XENPF_INTERFACE_VERSION)
57 return -EACCES;
59 op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
61 switch (kern_op.cmd) {
62 default:
63 printk("%s: unknown platform cmd %d\n", __func__, kern_op.cmd);
64 return -ENOSYS;
65 }
67 if (ret) {
68 /* error mapping the nested pointer */
69 return ret;
70 }
72 ret = xencomm_arch_hypercall_platform_op(op_desc);
74 /* FIXME: should we restore the handle? */
75 if (copy_to_user(user_op, &kern_op, sizeof(struct xen_platform_op)))
76 ret = -EFAULT;
78 xencomm_free(desc);
79 return ret;
80 }
82 static int
83 xencomm_privcmd_sysctl(privcmd_hypercall_t *hypercall)
84 {
85 xen_sysctl_t kern_op;
86 xen_sysctl_t __user *user_op;
87 struct xencomm_handle *op_desc;
88 struct xencomm_handle *desc = NULL;
89 struct xencomm_handle *desc1 = NULL;
90 int ret = 0;
92 user_op = (xen_sysctl_t __user *)hypercall->arg[0];
94 if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t)))
95 return -EFAULT;
97 if (kern_op.interface_version != XEN_SYSCTL_INTERFACE_VERSION)
98 return -EACCES;
100 op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
102 switch (kern_op.cmd) {
103 case XEN_SYSCTL_readconsole:
104 desc = xencomm_map(
105 xen_guest_handle(kern_op.u.readconsole.buffer),
106 kern_op.u.readconsole.count);
107 if (xen_guest_handle(kern_op.u.readconsole.buffer) != NULL &&
108 kern_op.u.readconsole.count > 0 && desc == NULL)
109 return -ENOMEM;
110 set_xen_guest_handle(kern_op.u.readconsole.buffer,
111 (void *)desc);
112 break;
113 case XEN_SYSCTL_tbuf_op:
114 case XEN_SYSCTL_sched_id:
115 case XEN_SYSCTL_availheap:
116 break;
117 case XEN_SYSCTL_perfc_op:
118 {
119 struct xencomm_handle *tmp_desc;
120 xen_sysctl_t tmp_op = {
121 .cmd = XEN_SYSCTL_perfc_op,
122 .interface_version = XEN_SYSCTL_INTERFACE_VERSION,
123 .u.perfc_op = {
124 .cmd = XEN_SYSCTL_PERFCOP_query,
125 /* .desc.p = NULL, */
126 /* .val.p = NULL, */
127 },
128 };
130 if (xen_guest_handle(kern_op.u.perfc_op.desc) == NULL) {
131 if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL)
132 return -EINVAL;
133 break;
134 }
136 /* query the buffer size for xencomm */
137 tmp_desc = xencomm_map_no_alloc(&tmp_op, sizeof(tmp_op));
138 ret = xencomm_arch_hypercall_sysctl(tmp_desc);
139 if (ret)
140 return ret;
142 desc = xencomm_map(xen_guest_handle(kern_op.u.perfc_op.desc),
143 tmp_op.u.perfc_op.nr_counters *
144 sizeof(xen_sysctl_perfc_desc_t));
145 if (xen_guest_handle(kern_op.u.perfc_op.desc) != NULL &&
146 tmp_op.u.perfc_op.nr_counters > 0 && desc == NULL)
147 return -ENOMEM;
149 set_xen_guest_handle(kern_op.u.perfc_op.desc, (void *)desc);
151 desc1 = xencomm_map(xen_guest_handle(kern_op.u.perfc_op.val),
152 tmp_op.u.perfc_op.nr_vals *
153 sizeof(xen_sysctl_perfc_val_t));
154 if (xen_guest_handle(kern_op.u.perfc_op.val) != NULL &&
155 tmp_op.u.perfc_op.nr_vals > 0 && desc1 == NULL) {
156 xencomm_free(desc);
157 return -ENOMEM;
158 }
160 set_xen_guest_handle(kern_op.u.perfc_op.val, (void *)desc1);
161 break;
162 }
163 case XEN_SYSCTL_getdomaininfolist:
164 desc = xencomm_map(
165 xen_guest_handle(kern_op.u.getdomaininfolist.buffer),
166 kern_op.u.getdomaininfolist.max_domains *
167 sizeof(xen_domctl_getdomaininfo_t));
168 if (xen_guest_handle(kern_op.u.getdomaininfolist.buffer) !=
169 NULL && kern_op.u.getdomaininfolist.max_domains > 0 &&
170 desc == NULL)
171 return -ENOMEM;
172 set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer,
173 (void *)desc);
174 break;
175 case XEN_SYSCTL_debug_keys:
176 desc = xencomm_map(
177 xen_guest_handle(kern_op.u.debug_keys.keys),
178 kern_op.u.debug_keys.nr_keys);
179 if (xen_guest_handle(kern_op.u.debug_keys.keys) != NULL &&
180 kern_op.u.debug_keys.nr_keys > 0 && desc == NULL)
181 return -ENOMEM;
182 set_xen_guest_handle(kern_op.u.debug_keys.keys,
183 (void *)desc);
184 break;
186 case XEN_SYSCTL_physinfo:
187 desc = xencomm_map(
188 xen_guest_handle(kern_op.u.physinfo.cpu_to_node),
189 kern_op.u.physinfo.max_cpu_id * sizeof(uint32_t));
190 if (xen_guest_handle(kern_op.u.physinfo.cpu_to_node) != NULL &&
191 kern_op.u.physinfo.max_cpu_id > 0 && desc == NULL)
192 return -ENOMEM;
194 set_xen_guest_handle(kern_op.u.physinfo.cpu_to_node,
195 (void *)desc);
196 break;
198 case XEN_SYSCTL_get_pmstat:
199 if (kern_op.u.get_pmstat.type == PMSTAT_get_pxstat) {
200 struct pm_px_stat *getpx =
201 &kern_op.u.get_pmstat.u.getpx;
202 desc = xencomm_map(
203 xen_guest_handle(getpx->trans_pt),
204 getpx->total * getpx->total *
205 sizeof(uint64_t));
206 if (xen_guest_handle(getpx->trans_pt) != NULL &&
207 getpx->total > 0 && desc == NULL)
208 return -ENOMEM;
210 set_xen_guest_handle(getpx->trans_pt, (void *)desc);
212 desc1 = xencomm_map(xen_guest_handle(getpx->pt),
213 getpx->total * sizeof(pm_px_val_t));
214 if (xen_guest_handle(getpx->pt) != NULL &&
215 getpx->total > 0 && desc1 == NULL)
216 return -ENOMEM;
218 set_xen_guest_handle(getpx->pt, (void *)desc1);
219 }
220 break;
222 default:
223 printk("%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
224 return -ENOSYS;
225 }
227 if (ret) {
228 /* error mapping the nested pointer */
229 return ret;
230 }
232 ret = xencomm_arch_hypercall_sysctl(op_desc);
234 /* FIXME: should we restore the handles? */
235 if (copy_to_user(user_op, &kern_op, sizeof(xen_sysctl_t)))
236 ret = -EFAULT;
238 xencomm_free(desc);
239 xencomm_free(desc1);
240 return ret;
241 }
243 static int
244 xencomm_privcmd_domctl(privcmd_hypercall_t *hypercall)
245 {
246 xen_domctl_t kern_op;
247 xen_domctl_t __user *user_op;
248 struct xencomm_handle *op_desc;
249 struct xencomm_handle *desc = NULL;
250 int ret = 0;
252 user_op = (xen_domctl_t __user *)hypercall->arg[0];
254 if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t)))
255 return -EFAULT;
257 if (kern_op.interface_version != XEN_DOMCTL_INTERFACE_VERSION)
258 return -EACCES;
260 op_desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
262 switch (kern_op.cmd) {
263 case XEN_DOMCTL_createdomain:
264 case XEN_DOMCTL_destroydomain:
265 case XEN_DOMCTL_pausedomain:
266 case XEN_DOMCTL_unpausedomain:
267 case XEN_DOMCTL_resumedomain:
268 case XEN_DOMCTL_getdomaininfo:
269 break;
270 case XEN_DOMCTL_getmemlist:
271 {
272 unsigned long nr_pages = kern_op.u.getmemlist.max_pfns;
274 desc = xencomm_map(
275 xen_guest_handle(kern_op.u.getmemlist.buffer),
276 nr_pages * sizeof(unsigned long));
277 if (xen_guest_handle(kern_op.u.getmemlist.buffer) != NULL &&
278 nr_pages > 0 && desc == NULL)
279 return -ENOMEM;
280 set_xen_guest_handle(kern_op.u.getmemlist.buffer,
281 (void *)desc);
282 break;
283 }
284 case XEN_DOMCTL_getpageframeinfo:
285 break;
286 case XEN_DOMCTL_getpageframeinfo2:
287 desc = xencomm_map(
288 xen_guest_handle(kern_op.u.getpageframeinfo2.array),
289 kern_op.u.getpageframeinfo2.num);
290 if (xen_guest_handle(kern_op.u.getpageframeinfo2.array) !=
291 NULL && kern_op.u.getpageframeinfo2.num > 0 &&
292 desc == NULL)
293 return -ENOMEM;
294 set_xen_guest_handle(kern_op.u.getpageframeinfo2.array,
295 (void *)desc);
296 break;
297 case XEN_DOMCTL_shadow_op:
298 desc = xencomm_map(
299 xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap),
300 ROUND_DIV(kern_op.u.shadow_op.pages, 8));
301 if (xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap) != NULL
302 && kern_op.u.shadow_op.pages > 0 && desc == NULL)
303 return -ENOMEM;
304 set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap,
305 (void *)desc);
306 break;
307 case XEN_DOMCTL_max_mem:
308 break;
309 case XEN_DOMCTL_setvcpucontext:
310 case XEN_DOMCTL_getvcpucontext:
311 desc = xencomm_map(
312 xen_guest_handle(kern_op.u.vcpucontext.ctxt),
313 sizeof(vcpu_guest_context_t));
314 if (xen_guest_handle(kern_op.u.vcpucontext.ctxt) != NULL &&
315 desc == NULL)
316 return -ENOMEM;
317 set_xen_guest_handle(kern_op.u.vcpucontext.ctxt, (void *)desc);
318 break;
319 case XEN_DOMCTL_getvcpuinfo:
320 break;
321 case XEN_DOMCTL_setvcpuaffinity:
322 case XEN_DOMCTL_getvcpuaffinity:
323 desc = xencomm_map(
324 xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap),
325 ROUND_DIV(kern_op.u.vcpuaffinity.cpumap.nr_cpus, 8));
326 if (xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap) !=
327 NULL && kern_op.u.vcpuaffinity.cpumap.nr_cpus > 0 &&
328 desc == NULL)
329 return -ENOMEM;
330 set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap,
331 (void *)desc);
332 break;
333 case XEN_DOMCTL_gethvmcontext:
334 case XEN_DOMCTL_sethvmcontext:
335 if (kern_op.u.hvmcontext.size > 0)
336 desc = xencomm_map(
337 xen_guest_handle(kern_op.u.hvmcontext.buffer),
338 kern_op.u.hvmcontext.size);
339 if (xen_guest_handle(kern_op.u.hvmcontext.buffer) != NULL &&
340 kern_op.u.hvmcontext.size > 0 && desc == NULL)
341 return -ENOMEM;
342 set_xen_guest_handle(kern_op.u.hvmcontext.buffer, (void*)desc);
343 break;
344 case XEN_DOMCTL_get_device_group:
345 {
346 struct xen_domctl_get_device_group *get_device_group =
347 &kern_op.u.get_device_group;
348 desc = xencomm_map(
349 xen_guest_handle(get_device_group->sdev_array),
350 get_device_group->max_sdevs * sizeof(uint32_t));
351 if (xen_guest_handle(get_device_group->sdev_array) != NULL &&
352 get_device_group->max_sdevs > 0 && desc == NULL)
353 return -ENOMEM;
354 set_xen_guest_handle(kern_op.u.get_device_group.sdev_array,
355 (void*)desc);
356 break;
357 }
358 case XEN_DOMCTL_max_vcpus:
359 case XEN_DOMCTL_scheduler_op:
360 case XEN_DOMCTL_setdomainhandle:
361 case XEN_DOMCTL_setdebugging:
362 case XEN_DOMCTL_irq_permission:
363 case XEN_DOMCTL_iomem_permission:
364 case XEN_DOMCTL_ioport_permission:
365 case XEN_DOMCTL_hypercall_init:
366 case XEN_DOMCTL_arch_setup:
367 case XEN_DOMCTL_settimeoffset:
368 case XEN_DOMCTL_sendtrigger:
369 case XEN_DOMCTL_set_opt_feature:
370 case XEN_DOMCTL_assign_device:
371 case XEN_DOMCTL_subscribe:
372 case XEN_DOMCTL_test_assign_device:
373 case XEN_DOMCTL_deassign_device:
374 case XEN_DOMCTL_bind_pt_irq:
375 case XEN_DOMCTL_unbind_pt_irq:
376 case XEN_DOMCTL_memory_mapping:
377 case XEN_DOMCTL_ioport_mapping:
378 case XEN_DOMCTL_set_address_size:
379 case XEN_DOMCTL_get_address_size:
380 break;
381 case XEN_DOMCTL_pin_mem_cacheattr:
382 return -ENOSYS;
383 default:
384 printk("%s: unknown domctl cmd %d\n", __func__, kern_op.cmd);
385 return -ENOSYS;
386 }
388 if (ret) {
389 /* error mapping the nested pointer */
390 return ret;
391 }
393 ret = xencomm_arch_hypercall_domctl (op_desc);
395 /* FIXME: should we restore the handle? */
396 if (copy_to_user(user_op, &kern_op, sizeof(xen_domctl_t)))
397 ret = -EFAULT;
399 xencomm_free(desc);
400 return ret;
401 }
403 static int
404 xencomm_privcmd_xsm_op(privcmd_hypercall_t *hypercall)
405 {
406 void __user *arg = (void __user *)hypercall->arg[0];
407 xen_acmctl_t kern_arg;
408 struct xencomm_handle *op_desc;
409 struct xencomm_handle *desc = NULL;
410 int ret;
412 if (copy_from_user(&kern_arg, arg, sizeof(kern_arg)))
413 return -EFAULT;
414 if (kern_arg.interface_version != ACM_INTERFACE_VERSION)
415 return -ENOSYS;
417 switch (kern_arg.cmd) {
418 case ACMOP_getssid: {
419 op_desc = xencomm_map_no_alloc(&kern_arg, sizeof(kern_arg));
421 desc = xencomm_map(
422 xen_guest_handle(kern_arg.u.getssid.ssidbuf),
423 kern_arg.u.getssid.ssidbuf_size);
424 if (xen_guest_handle(kern_arg.u.getssid.ssidbuf) != NULL &&
425 kern_arg.u.getssid.ssidbuf_size > 0 && desc == NULL)
426 return -ENOMEM;
428 set_xen_guest_handle(kern_arg.u.getssid.ssidbuf, (void *)desc);
430 ret = xencomm_arch_hypercall_xsm_op(op_desc);
432 xencomm_free(desc);
434 if (copy_to_user(arg, &kern_arg, sizeof(kern_arg)))
435 return -EFAULT;
436 return ret;
437 }
438 default:
439 printk("%s: unknown acm_op cmd %d\n", __func__, kern_arg.cmd);
440 return -ENOSYS;
441 }
443 return ret;
444 }
446 static int
447 xencomm_privcmd_memory_reservation_op(privcmd_hypercall_t *hypercall)
448 {
449 const unsigned long cmd = hypercall->arg[0];
450 int ret = 0;
451 xen_memory_reservation_t kern_op;
452 xen_memory_reservation_t __user *user_op;
453 struct xencomm_handle *desc = NULL;
454 struct xencomm_handle *desc_op;
456 user_op = (xen_memory_reservation_t __user *)hypercall->arg[1];
457 if (copy_from_user(&kern_op, user_op,
458 sizeof(xen_memory_reservation_t)))
459 return -EFAULT;
460 desc_op = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
462 if (!xen_guest_handle(kern_op.extent_start)) {
463 ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
464 if (ret < 0)
465 return ret;
466 } else {
467 xen_ulong_t nr_done = 0;
468 xen_ulong_t nr_extents = kern_op.nr_extents;
469 void *addr = xen_guest_handle(kern_op.extent_start);
471 /*
472 * Work around.
473 * Xencomm has single page size limit caused
474 * by xencomm_alloc()/xencomm_free() so that
475 * we have to repeat the hypercall.
476 * This limitation can be removed.
477 */
478 #define MEMORYOP_XENCOMM_LIMIT \
479 (((((PAGE_SIZE - sizeof(struct xencomm_desc)) / \
480 sizeof(uint64_t)) - 2) * PAGE_SIZE) / \
481 sizeof(*xen_guest_handle(kern_op.extent_start)))
483 /*
484 * Work around.
485 * Even if the above limitation is removed,
486 * the hypercall with large number of extents
487 * may cause the soft lockup warning.
488 * In order to avoid the warning, we limit
489 * the number of extents and repeat the hypercall.
490 * The following value is determined by evaluation.
491 * Time of one hypercall should be smaller than
492 * a vcpu time slice. The time with current
493 * MEMORYOP_MAX_EXTENTS is around 5 msec.
494 * If the following limit causes some issues,
495 * we should decrease this value.
496 *
497 * Another way would be that start with small value and
498 * increase adoptively measuring hypercall time.
499 * It might be over-kill.
500 */
501 #define MEMORYOP_MAX_EXTENTS (MEMORYOP_XENCOMM_LIMIT / 512)
503 while (nr_extents > 0) {
504 xen_ulong_t nr_tmp = nr_extents;
505 if (nr_tmp > MEMORYOP_MAX_EXTENTS)
506 nr_tmp = MEMORYOP_MAX_EXTENTS;
508 kern_op.nr_extents = nr_tmp;
509 desc = xencomm_map
510 (addr + nr_done * sizeof(*xen_guest_handle(kern_op.extent_start)),
511 nr_tmp * sizeof(*xen_guest_handle(kern_op.extent_start)));
512 if (addr != NULL && nr_tmp > 0 && desc == NULL)
513 return nr_done > 0 ? nr_done : -ENOMEM;
515 set_xen_guest_handle(kern_op.extent_start,
516 (void *)desc);
518 ret = xencomm_arch_hypercall_memory_op(cmd, desc_op);
519 xencomm_free(desc);
520 if (ret < 0)
521 return nr_done > 0 ? nr_done : ret;
523 nr_done += ret;
524 nr_extents -= ret;
525 if (ret < nr_tmp)
526 break;
528 /*
529 * prevent softlock up message.
530 * give cpu to soft lockup kernel thread.
531 */
532 if (nr_extents > 0)
533 schedule();
534 }
535 ret = nr_done;
536 set_xen_guest_handle(kern_op.extent_start, addr);
537 }
539 if (copy_to_user(user_op, &kern_op, sizeof(xen_memory_reservation_t)))
540 return -EFAULT;
542 return ret;
543 }
545 static int
546 xencomm_privcmd_memory_op(privcmd_hypercall_t *hypercall)
547 {
548 const unsigned long cmd = hypercall->arg[0];
549 int ret = 0;
551 switch (cmd) {
552 case XENMEM_increase_reservation:
553 case XENMEM_decrease_reservation:
554 case XENMEM_populate_physmap:
555 return xencomm_privcmd_memory_reservation_op(hypercall);
556 case XENMEM_maximum_gpfn:
557 {
558 domid_t kern_domid;
559 domid_t __user *user_domid;
560 struct xencomm_handle *desc;
562 user_domid = (domid_t __user *)hypercall->arg[1];
563 if (copy_from_user(&kern_domid, user_domid, sizeof(domid_t)))
564 return -EFAULT;
565 desc = xencomm_map_no_alloc(&kern_domid, sizeof(kern_domid));
567 ret = xencomm_arch_hypercall_memory_op(cmd, desc);
569 return ret;
570 }
571 case XENMEM_add_to_physmap:
572 {
573 void __user *arg = (void __user *)hypercall->arg[1];
574 struct xencomm_handle *desc;
576 desc = xencomm_map(arg, sizeof(struct xen_add_to_physmap));
577 if (desc == NULL)
578 return -ENOMEM;
580 ret = xencomm_arch_hypercall_memory_op(cmd, desc);
582 xencomm_free(desc);
583 return ret;
584 }
585 default:
586 printk("%s: unknown memory op %lu\n", __func__, cmd);
587 ret = -ENOSYS;
588 }
589 return ret;
590 }
592 static int
593 xencomm_privcmd_xen_version(privcmd_hypercall_t *hypercall)
594 {
595 int cmd = hypercall->arg[0];
596 void __user *arg = (void __user *)hypercall->arg[1];
597 struct xencomm_handle *desc;
598 size_t argsize;
599 int rc;
601 switch (cmd) {
602 case XENVER_version:
603 /* do not actually pass an argument */
604 return xencomm_arch_hypercall_xen_version(cmd, 0);
605 case XENVER_extraversion:
606 argsize = sizeof(xen_extraversion_t);
607 break;
608 case XENVER_compile_info:
609 argsize = sizeof(xen_compile_info_t);
610 break;
611 case XENVER_capabilities:
612 argsize = sizeof(xen_capabilities_info_t);
613 break;
614 case XENVER_changeset:
615 argsize = sizeof(xen_changeset_info_t);
616 break;
617 case XENVER_platform_parameters:
618 argsize = sizeof(xen_platform_parameters_t);
619 break;
620 case XENVER_pagesize:
621 argsize = (arg == NULL) ? 0 : sizeof(void *);
622 break;
623 case XENVER_get_features:
624 argsize = (arg == NULL) ? 0 : sizeof(xen_feature_info_t);
625 break;
626 case XENVER_commandline:
627 argsize = sizeof(xen_commandline_t);
628 break;
630 default:
631 printk("%s: unknown version op %d\n", __func__, cmd);
632 return -ENOSYS;
633 }
635 desc = xencomm_map(arg, argsize);
636 if (arg != NULL && argsize > 0 && desc == NULL)
637 return -ENOMEM;
639 rc = xencomm_arch_hypercall_xen_version(cmd, desc);
641 xencomm_free(desc);
643 return rc;
644 }
646 static int
647 xencomm_privcmd_event_channel_op(privcmd_hypercall_t *hypercall)
648 {
649 int cmd = hypercall->arg[0];
650 struct xencomm_handle *desc;
651 unsigned int argsize;
652 int ret;
654 switch (cmd) {
655 case EVTCHNOP_alloc_unbound:
656 argsize = sizeof(evtchn_alloc_unbound_t);
657 break;
659 case EVTCHNOP_status:
660 argsize = sizeof(evtchn_status_t);
661 break;
663 default:
664 printk("%s: unknown EVTCHNOP %d\n", __func__, cmd);
665 return -EINVAL;
666 }
668 desc = xencomm_map((void *)hypercall->arg[1], argsize);
669 if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
670 return -ENOMEM;
672 ret = xencomm_arch_hypercall_event_channel_op(cmd, desc);
674 xencomm_free(desc);
675 return ret;
676 }
678 static int
679 xencomm_privcmd_hvm_op_track_dirty_vram(privcmd_hypercall_t *hypercall)
680 {
681 #if 1
682 /*
683 * At this moment HVMOP_track_dirty_vram isn't implemented
684 * on xen/ia64 so that it just returns -ENOSYS.
685 * Don't issue hypercall to get -ENOSYS.
686 * When the hypercall is implemented, enable the following codes.
687 */
688 return -ENOSYS;
689 #else
690 int cmd = hypercall->arg[0];
691 struct xen_hvm_track_dirty_vram *user_op = (void*)hypercall->arg[1];
692 struct xen_hvm_track_dirty_vram kern_op;
693 struct xencomm_handle *desc;
694 struct xencomm_handle *bitmap_desc;
695 int ret;
697 BUG_ON(cmd != HVMOP_track_dirty_vram);
698 if (copy_from_user(&kern_op, user_op, sizeof(kern_op)))
699 return -EFAULT;
700 desc = xencomm_map_no_alloc(&kern_op, sizeof(kern_op));
701 bitmap_desc = xencomm_map(xen_guest_handle(kern_op.dirty_bitmap),
702 kern_op.nr * sizeof(uint8_t));
703 if (bitmap_desc == NULL)
704 return -ENOMEM;
705 set_xen_guest_handle(kern_op.dirty_bitmap, (void*)bitmap_desc);
706 ret = xencomm_arch_hypercall_hvm_op(cmd, desc);
707 xencomm_free(bitmap_desc);
709 return ret;
710 #endif
711 }
713 static int
714 xencomm_privcmd_hvm_op(privcmd_hypercall_t *hypercall)
715 {
716 int cmd = hypercall->arg[0];
717 struct xencomm_handle *desc;
718 unsigned int argsize;
719 int ret;
721 switch (cmd) {
722 case HVMOP_get_param:
723 case HVMOP_set_param:
724 argsize = sizeof(xen_hvm_param_t);
725 break;
726 case HVMOP_set_pci_intx_level:
727 argsize = sizeof(xen_hvm_set_pci_intx_level_t);
728 break;
729 case HVMOP_set_isa_irq_level:
730 argsize = sizeof(xen_hvm_set_isa_irq_level_t);
731 break;
732 case HVMOP_set_pci_link_route:
733 argsize = sizeof(xen_hvm_set_pci_link_route_t);
734 break;
735 case HVMOP_set_mem_type:
736 argsize = sizeof(xen_hvm_set_mem_type_t);
737 break;
739 case HVMOP_track_dirty_vram:
740 return xencomm_privcmd_hvm_op_track_dirty_vram(hypercall);
742 default:
743 printk("%s: unknown HVMOP %d\n", __func__, cmd);
744 return -EINVAL;
745 }
747 desc = xencomm_map((void *)hypercall->arg[1], argsize);
748 if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
749 return -ENOMEM;
751 ret = xencomm_arch_hypercall_hvm_op(cmd, desc);
753 xencomm_free(desc);
754 return ret;
755 }
757 static int
758 xencomm_privcmd_sched_op(privcmd_hypercall_t *hypercall)
759 {
760 int cmd = hypercall->arg[0];
761 struct xencomm_handle *desc;
762 unsigned int argsize;
763 int ret;
765 switch (cmd) {
766 case SCHEDOP_remote_shutdown:
767 argsize = sizeof(sched_remote_shutdown_t);
768 break;
769 default:
770 printk("%s: unknown SCHEDOP %d\n", __func__, cmd);
771 return -EINVAL;
772 }
774 desc = xencomm_map((void *)hypercall->arg[1], argsize);
775 if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
776 return -ENOMEM;
778 ret = xencomm_arch_hypercall_sched_op(cmd, desc);
780 xencomm_free(desc);
781 return ret;
782 }
784 static int
785 xencomm_privcmd_dom0vp_get_memmap(domid_t domid,
786 char* __user buf, unsigned long bufsize)
787 {
788 int ret;
789 struct xencomm_handle *desc;
791 desc = xencomm_map(buf, bufsize);
792 if (bufsize > 0 && desc == NULL)
793 return -ENOMEM;
795 ret = xencomm_arch_hypercall_get_memmap((domid_t)domid, desc);
797 xencomm_free(desc);
798 return ret;
799 }
801 static int
802 xencomm_privcmd_ia64_dom0vp_op(privcmd_hypercall_t *hypercall)
803 {
804 int cmd = hypercall->arg[0];
805 int ret;
807 switch (cmd) {
808 case IA64_DOM0VP_fpswa_revision: {
809 unsigned int revision;
810 unsigned int __user *revision_user =
811 (unsigned int* __user)hypercall->arg[1];
812 struct xencomm_handle *desc;
813 desc = xencomm_map(&revision, sizeof(revision));
814 if (desc == NULL)
815 return -ENOMEM;
817 ret = xencomm_arch_hypercall_fpswa_revision(desc);
818 xencomm_free(desc);
819 if (ret)
820 break;
821 if (copy_to_user(revision_user, &revision, sizeof(revision)))
822 ret = -EFAULT;
823 break;
824 }
825 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
826 case IA64_DOM0VP_expose_foreign_p2m:
827 ret = xen_foreign_p2m_expose(hypercall);
828 break;
829 #endif
830 case IA64_DOM0VP_get_memmap:
831 ret = xencomm_privcmd_dom0vp_get_memmap(
832 (domid_t)hypercall->arg[1],
833 (char* __user)hypercall->arg[2], hypercall->arg[3]);
834 break;
835 default:
836 printk("%s: unknown IA64 DOM0VP op %d\n", __func__, cmd);
837 ret = -EINVAL;
838 break;
839 }
840 return ret;
841 }
843 static int
844 xencomm_privcmd_ia64_debug_op(privcmd_hypercall_t *hypercall)
845 {
846 int cmd = hypercall->arg[0];
847 unsigned long domain = hypercall->arg[1];
848 struct xencomm_handle *desc;
849 int ret;
851 switch (cmd) {
852 case XEN_IA64_DEBUG_OP_SET_FLAGS:
853 case XEN_IA64_DEBUG_OP_GET_FLAGS:
854 break;
855 default:
856 printk("%s: unknown IA64 DEBUGOP %d\n", __func__, cmd);
857 return -EINVAL;
858 }
860 desc = xencomm_map((void *)hypercall->arg[2],
861 sizeof(xen_ia64_debug_op_t));
862 if (desc == NULL)
863 return -ENOMEM;
865 ret = xencomm_arch_hypercall_ia64_debug_op(cmd, domain, desc);
867 xencomm_free(desc);
868 return ret;
869 }
871 static int
872 xencomm_privcmd_ia64_physdev_op(privcmd_hypercall_t *hypercall)
873 {
874 int cmd = hypercall->arg[0];
875 struct xencomm_handle *desc;
876 unsigned int argsize;
877 int ret;
879 switch (cmd) {
880 case PHYSDEVOP_map_pirq:
881 argsize = sizeof(physdev_map_pirq_t);
882 break;
883 case PHYSDEVOP_unmap_pirq:
884 argsize = sizeof(physdev_unmap_pirq_t);
885 break;
886 default:
887 printk("%s: unknown PHYSDEVOP %d\n", __func__, cmd);
888 return -EINVAL;
889 }
891 desc = xencomm_map((void *)hypercall->arg[1], argsize);
892 if ((void *)hypercall->arg[1] != NULL && argsize > 0 && desc == NULL)
893 return -ENOMEM;
895 ret = xencomm_arch_hypercall_physdev_op(cmd, desc);
897 xencomm_free(desc);
898 return ret;
899 }
901 static int
902 xencomm_privcmd_tmem_op(privcmd_hypercall_t *hypercall)
903 {
904 struct xencomm_handle *desc;
905 int ret;
907 desc = xencomm_map((void *)hypercall->arg[0], sizeof(struct tmem_op));
908 if (desc == NULL)
909 return -ENOMEM;
911 ret = xencomm_arch_hypercall_tmem_op(desc);
913 xencomm_free(desc);
914 return ret;
915 }
917 int
918 privcmd_hypercall(privcmd_hypercall_t *hypercall)
919 {
920 switch (hypercall->op) {
921 case __HYPERVISOR_platform_op:
922 return xencomm_privcmd_platform_op(hypercall);
923 case __HYPERVISOR_domctl:
924 return xencomm_privcmd_domctl(hypercall);
925 case __HYPERVISOR_sysctl:
926 return xencomm_privcmd_sysctl(hypercall);
927 case __HYPERVISOR_xsm_op:
928 return xencomm_privcmd_xsm_op(hypercall);
929 case __HYPERVISOR_xen_version:
930 return xencomm_privcmd_xen_version(hypercall);
931 case __HYPERVISOR_memory_op:
932 return xencomm_privcmd_memory_op(hypercall);
933 case __HYPERVISOR_event_channel_op:
934 return xencomm_privcmd_event_channel_op(hypercall);
935 case __HYPERVISOR_hvm_op:
936 return xencomm_privcmd_hvm_op(hypercall);
937 case __HYPERVISOR_sched_op:
938 return xencomm_privcmd_sched_op(hypercall);
939 case __HYPERVISOR_ia64_dom0vp_op:
940 return xencomm_privcmd_ia64_dom0vp_op(hypercall);
941 case __HYPERVISOR_ia64_debug_op:
942 return xencomm_privcmd_ia64_debug_op(hypercall);
943 case __HYPERVISOR_physdev_op:
944 return xencomm_privcmd_ia64_physdev_op(hypercall);
945 case __HYPERVISOR_tmem_op:
946 return xencomm_privcmd_tmem_op(hypercall);
947 default:
948 printk("%s: unknown hcall (%ld)\n", __func__, hypercall->op);
949 return -ENOSYS;
950 }
951 }