after Christian's interface cleanup.
Signed-off-by: Keir Fraser <keir@xensource.com>
#include <asm/dom_fw.h>
#include <xen/domain.h>
-extern long do_sched_op(int cmd, unsigned long arg);
+extern long do_sched_op_compat(int cmd, unsigned long arg);
extern unsigned long domain_mpa_to_imva(struct domain *,unsigned long mpaddr);
void hyper_not_support(void)
}
-void hyper_sched_op(void)
+void hyper_sched_op_compat(void)
{
VCPU *vcpu=current;
u64 r32,r33,ret;
vcpu_get_gr_nat(vcpu,16,&r32);
vcpu_get_gr_nat(vcpu,17,&r33);
- ret=do_sched_op(r32,r33);
+ ret=do_sched_op_compat(r32,r33);
vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
data8 hyper_not_support //hyper_stack_switch
data8 hyper_not_support //hyper_set_callbacks
data8 hyper_not_support //hyper_fpu_taskswitch /* 5 */
- data8 hyper_sched_op
+ data8 hyper_sched_op_compat
data8 hyper_dom0_op
data8 hyper_not_support //hyper_set_debugreg
data8 hyper_not_support //hyper_get_debugreg
do {
if (!test_bit(port,
&d->shared_info->evtchn_pending[0]))
- do_sched_op(SCHEDOP_block, 0);
+ do_sched_op_compat(SCHEDOP_block, 0);
/* Unblocked when some event is coming. Clear pending indication
* immediately if deciding to go for io assist
(hypercall_t)do_ni_hypercall, /* do_stack_switch */
(hypercall_t)do_ni_hypercall, /* do_set_callbacks */
(hypercall_t)do_ni_hypercall, /* do_fpu_taskswitch */ /* 5 */
- (hypercall_t)do_ni_hypercall, /* do_sched_op */
+ (hypercall_t)do_ni_hypercall, /* do_sched_op_compat */
(hypercall_t)do_dom0_op,
(hypercall_t)do_ni_hypercall, /* do_set_debugreg */
(hypercall_t)do_ni_hypercall, /* do_get_debugreg */
}
else {
pal_halt_light_count++;
- do_sched_op(SCHEDOP_yield, 0);
+ do_sched_op_compat(SCHEDOP_yield, 0);
}
regs->r8 = 0;
regs->r9 = 0;
if ( !test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
break;
- do_sched_op(SCHEDOP_block, 0);
+ do_sched_op_compat(SCHEDOP_block, 0);
}
/*
if ( test_bit(port, &d->shared_info->evtchn_pending[0]) )
break;
- do_sched_op(SCHEDOP_block, 0);
+ do_sched_op_compat(SCHEDOP_block, 0);
}
/* Reflect pending event in selector and master flags. */
addl $16,%esp
ret
-do_arch_sched_op:
+do_arch_sched_op_compat:
# Ensure we return success even if we return via schedule_tail()
xorl %eax,%eax
GET_GUEST_REGS(%ecx)
movl %eax,UREGS_eax(%ecx)
- jmp do_sched_op
+ jmp do_sched_op_compat
-do_arch_sched_op_new:
+do_arch_sched_op:
# Ensure we return success even if we return via schedule_tail()
xorl %eax,%eax
GET_GUEST_REGS(%ecx)
movl %eax,UREGS_eax(%ecx)
- jmp do_sched_op_new
+ jmp do_sched_op
.data
.long do_stack_switch
.long do_set_callbacks
.long do_fpu_taskswitch /* 5 */
- .long do_arch_sched_op
+ .long do_arch_sched_op_compat
.long do_dom0_op
.long do_set_debugreg
.long do_get_debugreg
.long do_mmuext_op
.long do_acm_op
.long do_nmi_op
- .long do_arch_sched_op_new
+ .long do_arch_sched_op
.rept NR_hypercalls-((.-hypercall_table)/4)
.long do_ni_hypercall
.endr
.byte 2 /* do_stack_switch */
.byte 4 /* do_set_callbacks */
.byte 1 /* do_fpu_taskswitch */ /* 5 */
- .byte 2 /* do_arch_sched_op */
+ .byte 2 /* do_arch_sched_op_compat */
.byte 1 /* do_dom0_op */
.byte 2 /* do_set_debugreg */
.byte 1 /* do_get_debugreg */
.byte 4 /* do_mmuext_op */
.byte 1 /* do_acm_op */
.byte 2 /* do_nmi_op */
- .byte 2 /* do_arch_sched_op_new */
+ .byte 2 /* do_arch_sched_op */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
call do_nmi
jmp ret_from_intr
-do_arch_sched_op:
+do_arch_sched_op_compat:
# Ensure we return success even if we return via schedule_tail()
xorl %eax,%eax
GET_GUEST_REGS(%r10)
movq %rax,UREGS_rax(%r10)
- jmp do_sched_op
+ jmp do_sched_op_compat
-do_arch_sched_op_new:
+do_arch_sched_op:
# Ensure we return success even if we return via schedule_tail()
xorl %eax,%eax
GET_GUEST_REGS(%r10)
movq %rax,UREGS_rax(%r10)
- jmp do_sched_op_new
+ jmp do_sched_op
.data
.quad do_stack_switch
.quad do_set_callbacks
.quad do_fpu_taskswitch /* 5 */
- .quad do_arch_sched_op
+ .quad do_arch_sched_op_compat
.quad do_dom0_op
.quad do_set_debugreg
.quad do_get_debugreg
.quad do_mmuext_op
.quad do_acm_op
.quad do_nmi_op
- .quad do_arch_sched_op_new
+ .quad do_arch_sched_op
.rept NR_hypercalls-((.-hypercall_table)/8)
.quad do_ni_hypercall
.endr
.byte 2 /* do_stack_switch */
.byte 3 /* do_set_callbacks */
.byte 1 /* do_fpu_taskswitch */ /* 5 */
- .byte 2 /* do_arch_sched_op */
+ .byte 2 /* do_arch_sched_op_compat */
.byte 1 /* do_dom0_op */
.byte 2 /* do_set_debugreg */
.byte 1 /* do_get_debugreg */
.byte 4 /* do_mmuext_op */
.byte 1 /* do_acm_op */
.byte 2 /* do_nmi_op */
- .byte 2 /* do_arch_sched_op_new */
+ .byte 2 /* do_arch_sched_op */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
return 0;
}
-long do_sched_op(int cmd, unsigned long arg)
+long do_sched_op_compat(int cmd, unsigned long arg)
{
long ret = 0;
return ret;
}
-long do_sched_op_new(int cmd, GUEST_HANDLE(void) arg)
+long do_sched_op(int cmd, GUEST_HANDLE(void) arg)
{
long ret = 0;
/*
* The prototype for this hypercall is:
- * long sched_op_new(int cmd, void *arg)
+ * long sched_op(int cmd, void *arg)
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == Operation-specific extra argument(s), as described below.
*
- * **NOTE**:
- * Versions of Xen prior to 3.0.2 provide only the following legacy version
+ * Versions of Xen prior to 3.0.2 provided only the following legacy version
* of this hypercall, supporting only the commands yield, block and shutdown:
* long sched_op(int cmd, unsigned long arg)
* @cmd == SCHEDOP_??? (scheduler operation).
* @arg == 0 (SCHEDOP_yield and SCHEDOP_block)
* == SHUTDOWN_* code (SCHEDOP_shutdown)
+ * This legacy version is available to new guests as sched_op_compat().
*/
/*
void);
extern long
-do_sched_op(
+do_sched_op_compat(
int cmd,
unsigned long arg);
+extern long
+do_sched_op(
+ int cmd,
+ GUEST_HANDLE(void) arg);
+
extern long
do_dom0_op(
GUEST_HANDLE(dom0_op_t) u_dom0_op);