callback address.
This new hypercall incorporates the functionality of the existing set_callbacks
hypercall in a more flexible manner. set_callbacks is retained for compatibility.
Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
return _hypercall2(int, nmi_op, op, arg);
}
+static inline int
+HYPERVISOR_callback_op(
+ int cmd,
+ void *arg)
+{
+ return _hypercall2(int, callback_op, cmd, arg);
+}
+
#endif /* __HYPERCALL_H__ */
/*
* use of all of the static functions.
**/
+#include <xen/interface/callback.h>
+
static char * __init machine_specific_memory_setup(void)
{
unsigned long max_pfn = xen_start_info->nr_pages;
static void __init machine_specific_arch_setup(void)
{
struct xen_platform_parameters pp;
+ callback_register_t event = {
+ .type = CALLBACKTYPE_event,
+ .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
+ };
+ callback_register_t failsafe = {
+ .type = CALLBACKTYPE_failsafe,
+ .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
+ };
struct xennmi_callback cb;
if (xen_feature(XENFEAT_auto_translated_physmap) &&
memset(empty_zero_page, 0, sizeof(empty_zero_page));
}
- HYPERVISOR_set_callbacks(
- __KERNEL_CS, (unsigned long)hypervisor_callback,
- __KERNEL_CS, (unsigned long)failsafe_callback);
+ HYPERVISOR_callback_op(CALLBACKOP_register, &event);
+ HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
cb.handler_address = (unsigned long)&nmi;
HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
return _hypercall2(int, nmi_op, op, arg);
}
+static inline int
+HYPERVISOR_callback_op(
+ int cmd, void *arg)
+{
+ return _hypercall2(int, callback_op, cmd, arg);
+}
+
#endif /* __HYPERCALL_H__ */
/*
* use of all of the static functions.
**/
+#include <xen/interface/callback.h>
+
extern void hypervisor_callback(void);
extern void failsafe_callback(void);
extern void nmi(void);
static void __init machine_specific_arch_setup(void)
{
+ callback_register_t event = {
+ .type = CALLBACKTYPE_event,
+ .address = (unsigned long) hypervisor_callback,
+ };
+ callback_register_t failsafe = {
+ .type = CALLBACKTYPE_failsafe,
+ .address = (unsigned long)failsafe_callback,
+ };
+ callback_register_t syscall = {
+ .type = CALLBACKTYPE_syscall,
+ .address = (unsigned long)system_call,
+ };
#ifdef CONFIG_X86_LOCAL_APIC
struct xennmi_callback cb;
#endif
- HYPERVISOR_set_callbacks(
- (unsigned long) hypervisor_callback,
- (unsigned long) failsafe_callback,
- (unsigned long) system_call);
+ HYPERVISOR_callback_op(CALLBACKOP_register, &event);
+ HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
+ HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
#ifdef CONFIG_X86_LOCAL_APIC
cb.handler_address = (unsigned long)&nmi;
.long do_acm_op
.long do_nmi_op
.long do_arch_sched_op
+ .long do_callback_op /* 30 */
.rept NR_hypercalls-((.-hypercall_table)/4)
.long do_ni_hypercall
.endr
.byte 1 /* do_acm_op */
.byte 2 /* do_nmi_op */
.byte 2 /* do_arch_sched_op */
+ .byte 2 /* do_callback_op */ /* 30 */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
+#include <public/callback.h>
+
/* All CPUs have their own IDT to allow int80 direct trap. */
idt_entry_t *idt_tables[NR_CPUS] = { 0 };
set_int80_direct_trap(v);
}
+static long register_guest_callback(struct callback_register *reg)
+{
+ long ret = 0;
+ struct vcpu *v = current;
+
+ if ( reg->address.cs )
+ fixup_guest_code_selector(reg->address.cs);
+
+ switch ( reg->type )
+ {
+ case CALLBACKTYPE_event:
+ v->arch.guest_context.event_callback_cs = reg->address.cs;
+ v->arch.guest_context.event_callback_eip = reg->address.eip;
+ break;
+
+ case CALLBACKTYPE_failsafe:
+ v->arch.guest_context.failsafe_callback_cs = reg->address.cs;
+ v->arch.guest_context.failsafe_callback_eip = reg->address.eip;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static long unregister_guest_callback(struct callback_unregister *unreg)
+{
+ long ret;
+
+ switch ( unreg->type )
+ {
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+
+long do_callback_op(int cmd, GUEST_HANDLE(void) arg)
+{
+ long ret;
+
+ switch ( cmd )
+ {
+ case CALLBACKOP_register:
+ {
+ struct callback_register reg;
+
+ ret = -EFAULT;
+ if ( copy_from_guest( ®, arg, 1 ) )
+ break;
+
+ ret = register_guest_callback(®);
+ }
+ break;
+
+ case CALLBACKOP_unregister:
+ {
+ struct callback_unregister unreg;
+
+ ret = -EFAULT;
+ if ( copy_from_guest( &unreg, arg, 1 ) )
+ break;
+
+ ret = unregister_guest_callback(&unreg);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
long do_set_callbacks(unsigned long event_selector,
unsigned long event_address,
unsigned long failsafe_selector,
unsigned long failsafe_address)
{
- struct vcpu *d = current;
-
- fixup_guest_code_selector(event_selector);
- fixup_guest_code_selector(failsafe_selector);
-
- d->arch.guest_context.event_callback_cs = event_selector;
- d->arch.guest_context.event_callback_eip = event_address;
- d->arch.guest_context.failsafe_callback_cs = failsafe_selector;
- d->arch.guest_context.failsafe_callback_eip = failsafe_address;
+ struct callback_register event = {
+ .type = CALLBACKTYPE_event,
+ .address = { event_selector, event_address },
+ };
+ struct callback_register failsafe = {
+ .type = CALLBACKTYPE_failsafe,
+ .address = { failsafe_selector, failsafe_address },
+ };
+
+ register_guest_callback(&event);
+ register_guest_callback(&failsafe);
return 0;
}
.quad do_acm_op
.quad do_nmi_op
.quad do_arch_sched_op
+ .quad do_callback_op /* 30 */
.rept NR_hypercalls-((.-hypercall_table)/8)
.quad do_ni_hypercall
.endr
.byte 1 /* do_acm_op */
.byte 2 /* do_nmi_op */
.byte 2 /* do_arch_sched_op */
+ .byte 2 /* do_callback_op */ /* 30 */
.rept NR_hypercalls-(.-hypercall_args_table)
.byte 0 /* do_ni_hypercall */
.endr
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
+#include <public/callback.h>
+
void show_registers(struct cpu_user_regs *regs)
{
struct cpu_user_regs fault_regs = *regs;
wrmsr(MSR_SYSCALL_MASK, EF_VM|EF_RF|EF_NT|EF_DF|EF_IE|EF_TF, 0U);
}
+static long register_guest_callback(struct callback_register *reg)
+{
+ long ret = 0;
+ struct vcpu *v = current;
+
+ switch ( reg->type )
+ {
+ case CALLBACKTYPE_event:
+ v->arch.guest_context.event_callback_eip = reg->address;
+ break;
+
+ case CALLBACKTYPE_failsafe:
+ v->arch.guest_context.failsafe_callback_eip = reg->address;
+ break;
+
+ case CALLBACKTYPE_syscall:
+ v->arch.guest_context.syscall_callback_eip = reg->address;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static long unregister_guest_callback(struct callback_unregister *unreg)
+{
+ long ret;
+
+ switch ( unreg->type )
+ {
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+
+long do_callback_op(int cmd, GUEST_HANDLE(void) arg)
+{
+ long ret;
+
+ switch ( cmd )
+ {
+ case CALLBACKOP_register:
+ {
+ struct callback_register reg;
+
+ ret = -EFAULT;
+ if ( copy_from_guest( ®, arg, 1 ) )
+ break;
+
+ ret = register_guest_callback(®);
+ }
+ break;
+
+ case CALLBACKOP_unregister:
+ {
+ struct callback_unregister unreg;
+
+ ret = -EFAULT;
+ if ( copy_from_guest( &unreg, arg, 1 ) )
+ break;
+
+ ret = unregister_guest_callback(&unreg);
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
long do_set_callbacks(unsigned long event_address,
unsigned long failsafe_address,
unsigned long syscall_address)
{
- struct vcpu *d = current;
-
- d->arch.guest_context.event_callback_eip = event_address;
- d->arch.guest_context.failsafe_callback_eip = failsafe_address;
- d->arch.guest_context.syscall_callback_eip = syscall_address;
+ callback_register_t event = {
+ .type = CALLBACKTYPE_event,
+ .address = event_address,
+ };
+ callback_register_t failsafe = {
+ .type = CALLBACKTYPE_failsafe,
+ .address = failsafe_address,
+ };
+ callback_register_t syscall = {
+ .type = CALLBACKTYPE_syscall,
+ .address = syscall_address,
+ };
+
+ register_guest_callback(&event);
+ register_guest_callback(&failsafe);
+ register_guest_callback(&syscall);
return 0;
}
unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
} arch_vcpu_info_t;
+typedef struct {
+ unsigned long cs;
+ unsigned long eip;
+} xen_callback_t;
+
#endif /* !__ASSEMBLY__ */
/*
unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
} arch_vcpu_info_t;
+typedef unsigned long xen_callback_t;
+
#endif /* !__ASSEMBLY__ */
/*
--- /dev/null
+/******************************************************************************
+ * callback.h
+ *
+ * Register guest OS callbacks with Xen.
+ *
+ * Copyright (c) 2006, Ian Campbell
+ */
+
+#ifndef __XEN_PUBLIC_CALLBACK_H__
+#define __XEN_PUBLIC_CALLBACK_H__
+
+#include "xen.h"
+
+/*
+ * Prototype for this hypercall is:
+ * long callback_op(int cmd, void *extra_args)
+ * @cmd == CALLBACKOP_??? (callback operation).
+ * @extra_args == Operation-specific extra arguments (NULL if none).
+ */
+
+#define CALLBACKTYPE_event 0
+#define CALLBACKTYPE_failsafe 1
+#define CALLBACKTYPE_syscall 2 /* x86_64 only */
+
+/*
+ * Register a callback.
+ */
+#define CALLBACKOP_register 0
+typedef struct callback_register {
+ int type;
+ xen_callback_t address;
+} callback_register_t;
+DEFINE_GUEST_HANDLE(callback_register_t);
+
+/*
+ * Unregister a callback.
+ *
+ * Not all callbacks can be unregistered. -EINVAL will be returned if
+ * you attempt to unregister such a callback.
+ */
+#define CALLBACKOP_unregister 1
+typedef struct callback_unregister {
+ int type;
+} callback_unregister_t;
+DEFINE_GUEST_HANDLE(callback_unregister_t);
+
+#endif /* __XEN_PUBLIC_CALLBACK_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
#define __HYPERVISOR_acm_op 27
#define __HYPERVISOR_nmi_op 28
#define __HYPERVISOR_sched_op 29
+#define __HYPERVISOR_callback_op 30
/*
* VIRTUAL INTERRUPTS