int HYPERVISOR_console_io(int cmd, int count, char *str)
{
- void *desc;
+ struct xencomm_handle *desc;
int rc;
desc = xencomm_map_no_alloc(str, count);
{
int rc;
- void *desc = xencomm_map_no_alloc(op, sizeof(evtchn_op_t));
+ struct xencomm_handle *desc =
+ xencomm_map_no_alloc(op, sizeof(evtchn_op_t));
if (desc == NULL)
return -EINVAL;
int HYPERVISOR_xen_version(int cmd, void *arg)
{
- void *desc;
+ struct xencomm_handle *desc;
const unsigned long hcall = __HYPERVISOR_xen_version;
int argsize;
int rc;
int HYPERVISOR_physdev_op(int cmd, void *op)
{
- void *desc = xencomm_map_no_alloc(op, sizeof(physdev_op_t));
+ struct xencomm_handle *desc =
+ xencomm_map_no_alloc(op, sizeof(physdev_op_t));
int rc;
if (desc == NULL)
{
int argsize = 0;
int rc = -EINVAL;
- void *desc;
- evtchn_port_t *ports = NULL;
+ struct xencomm_handle *desc;
+ struct xencomm_handle *ports = NULL;
switch (cmd) {
case SCHEDOP_yield:
if (ports == NULL)
return -ENOMEM;
- set_xen_guest_handle(sched_poll.ports, ports);
+ set_xen_guest_handle(sched_poll.ports, (evtchn_port_t *)ports);
memcpy(arg, &sched_poll, sizeof(sched_poll));
}
struct sched_shutdown sched_shutdown = {
.reason = SHUTDOWN_suspend,
};
- void *desc;
+ struct xencomm_handle *desc;
desc = xencomm_map_no_alloc(&sched_shutdown, sizeof(struct sched_shutdown));
int HYPERVISOR_kexec_op(unsigned long op, void *args)
{
unsigned long argsize;
- void *desc;
+ struct xencomm_handle *desc;
switch (op) {
case KEXEC_CMD_kexec_get_range:
{
xen_domctl_t kern_op;
xen_domctl_t __user *user_op = (xen_domctl_t __user *)hypercall->arg[0];
- void *op_desc;
- void *desc = NULL;
+ struct xencomm_handle *op_desc;
+ struct xencomm_handle *desc = NULL;
int ret = 0;
if (copy_from_user(&kern_op, user_op, sizeof(xen_domctl_t)))
ret = -ENOMEM;
set_xen_guest_handle(kern_op.u.getmemlist.buffer,
- desc);
+ (void *)desc);
break;
case XEN_DOMCTL_getpageframeinfo:
break;
ret = -ENOMEM;
set_xen_guest_handle(kern_op.u.getpageframeinfo2.array,
- desc);
+ (void *)desc);
break;
case XEN_DOMCTL_shadow_op:
ret = -ENOMEM;
set_xen_guest_handle(kern_op.u.shadow_op.dirty_bitmap,
- desc);
+ (void *)desc);
}
break;
case XEN_DOMCTL_max_mem:
ret = -ENOMEM;
set_xen_guest_handle(kern_op.u.vcpucontext.ctxt,
- desc);
+ (void *)desc);
break;
case XEN_DOMCTL_getvcpuinfo:
break;
ret = -ENOMEM;
set_xen_guest_handle(kern_op.u.vcpuaffinity.cpumap.bitmap,
- desc);
+ (void *)desc);
break;
case XEN_DOMCTL_max_vcpus:
case XEN_DOMCTL_scheduler_op:
{
xen_sysctl_t kern_op;
xen_sysctl_t __user *user_op = (xen_sysctl_t __user *)hypercall->arg[0];
- struct xencomm_desc *op_desc;
- void *desc = NULL;
+ struct xencomm_handle *op_desc;
+ struct xencomm_handle *desc = NULL;
int ret = 0;
if (copy_from_user(&kern_op, user_op, sizeof(xen_sysctl_t)))
ret = -ENOMEM;
set_xen_guest_handle(kern_op.u.readconsole.buffer,
- desc);
+ (void *)desc);
break;
case XEN_SYSCTL_tbuf_op:
case XEN_SYSCTL_physinfo:
ret = -ENOMEM;
set_xen_guest_handle(kern_op.u.getdomaininfolist.buffer,
- desc);
+ (void *)desc);
break;
default:
printk(KERN_ERR "%s: unknown sysctl cmd %d\n", __func__, kern_op.cmd);
xen_platform_op_t kern_op;
xen_platform_op_t __user *user_op =
(xen_platform_op_t __user *)hypercall->arg[0];
- void *op_desc;
- void *desc = NULL;
+ struct xencomm_handle *op_desc;
+ struct xencomm_handle *desc = NULL;
int ret = 0;
if (copy_from_user(&kern_op, user_op, sizeof(xen_platform_op_t)))
int HYPERVISOR_memory_op(unsigned int cmd, void *arg)
{
int ret;
- void *op_desc;
+ struct xencomm_handle *op_desc;
xen_memory_reservation_t *mop;
case XENMEM_increase_reservation:
case XENMEM_decrease_reservation:
case XENMEM_populate_physmap: {
- void *desc = NULL;
+ struct xencomm_handle *desc = NULL;
if (xen_guest_handle(mop->extent_start)) {
desc = xencomm_map(
}
set_xen_guest_handle(mop->extent_start,
- desc);
+ (void *)desc);
}
ret = plpar_hcall_norets(XEN_MARK(__HYPERVISOR_memory_op),
static int xenppc_privcmd_event_channel_op(privcmd_hypercall_t *hypercall)
{
- struct xencomm_desc *desc;
+ struct xencomm_handle *desc;
unsigned int argsize;
int ret;
{
int argsize;
const unsigned long hcall = __HYPERVISOR_vcpu_op;
- void *desc;
+ struct xencomm_handle *desc;
int rc;
switch (cmd) {
return desc;
}
-void xencomm_free(void *desc)
+void xencomm_free(struct xencomm_handle *desc)
{
if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG))
free_page((unsigned long)__va(desc));
rc = xencomm_init(desc, buffer, bytes);
if (rc) {
printk("%s failure: %d\n", "xencomm_init", rc);
- xencomm_free((void *)__pa(desc));
+ xencomm_free((struct xencomm_handle *)__pa(desc));
return rc;
}
return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
}
-static void *xencomm_create_inline(void *ptr)
+static struct xencomm_handle *xencomm_create_inline(void *ptr)
{
unsigned long paddr;
paddr = (unsigned long)xencomm_pa(ptr);
BUG_ON(paddr & XENCOMM_INLINE_FLAG);
- return (void *)(paddr | XENCOMM_INLINE_FLAG);
+ return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
}
/* "mini" routine, for stack-based communications: */
return rc;
}
-void *xencomm_map(void *ptr, unsigned long bytes)
+struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
{
int rc;
struct xencomm_desc *desc;
return xencomm_pa(desc);
}
-void *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
+struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
struct xencomm_mini *xc_desc)
{
int rc;