int xc_monitor_debug_exceptions(xc_interface *xch, domid_t domain_id,
bool enable, bool sync);
int xc_monitor_cpuid(xc_interface *xch, domid_t domain_id, bool enable);
+int xc_monitor_privileged_call(xc_interface *xch, domid_t domain_id,
+ bool enable);
/**
* This function enables / disables emulation for each REP for a
* REP-compatible instruction.
return do_domctl(xch, &domctl);
}
+int xc_monitor_privileged_call(xc_interface *xch, domid_t domain_id,
+ bool enable)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_monitor_op;
+ domctl.domain = domain_id;
+ domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
+ : XEN_DOMCTL_MONITOR_OP_DISABLE;
+ domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL;
+
+ return do_domctl(xch, &domctl);
+}
+
/*
* Local variables:
* mode: C
fprintf(stderr, "Usage: %s [-m] <domain_id> write|exec", progname);
#if defined(__i386__) || defined(__x86_64__)
fprintf(stderr, "|breakpoint|altp2m_write|altp2m_exec|debug|cpuid");
+#elif defined(__arm__) || defined(__aarch64__)
+ fprintf(stderr, "|privcall");
#endif
fprintf(stderr,
"\n"
int required = 0;
int breakpoint = 0;
int shutting_down = 0;
+ int privcall = 0;
int altp2m = 0;
int debug = 0;
int cpuid = 0;
{
cpuid = 1;
}
+#elif defined(__arm__) || defined(__aarch64__)
+ else if ( !strcmp(argv[0], "privcall") )
+ {
+ privcall = 1;
+ }
#endif
else
{
}
}
+ if ( privcall )
+ {
+ rc = xc_monitor_privileged_call(xch, domain_id, 1);
+ if ( rc < 0 )
+ {
+ ERROR("Error %d setting privileged call trapping with vm_event\n", rc);
+ goto exit;
+ }
+ }
+
/* Wait for access */
for (;;)
{
if ( cpuid )
rc = xc_monitor_cpuid(xch, domain_id, 0);
+ if ( privcall )
+ rc = xc_monitor_privileged_call(xch, domain_id, 0);
+
if ( altp2m )
{
rc = xc_altp2m_switch_to_view( xch, domain_id, 0 );
continue;
}
break;
+ case VM_EVENT_REASON_PRIVILEGED_CALL:
+ printf("Privileged call: pc=%"PRIx64" (vcpu %d)\n",
+ req.data.regs.arm.pc,
+ req.vcpu_id);
+
+ rsp.data.regs.arm = req.data.regs.arm;
+ rsp.data.regs.arm.pc += 4;
+ rsp.flags |= VM_EVENT_FLAG_SET_REGISTERS;
+ break;
case VM_EVENT_REASON_SINGLESTEP:
printf("Singlestep: rip=%016"PRIx64", vcpu %d, altp2m %u\n",
req.data.regs.x86.rip,
obj-y += kernel.o
obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-y += mm.o
+obj-y += monitor.o
obj-y += p2m.o
obj-y += percpu.o
obj-y += platform.o
--- /dev/null
+/*
+ * arch/arm/monitor.c
+ *
+ * Arch-specific monitor_op domctl handler.
+ *
+ * Copyright (c) 2016 Tamas K Lengyel (tamas.lengyel@zentific.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/vm_event.h>
+#include <xen/monitor.h>
+#include <asm/monitor.h>
+#include <asm/vm_event.h>
+#include <public/vm_event.h>
+
+int arch_monitor_domctl_event(struct domain *d,
+ struct xen_domctl_monitor_op *mop)
+{
+ struct arch_domain *ad = &d->arch;
+ bool_t requested_status = (XEN_DOMCTL_MONITOR_OP_ENABLE == mop->op);
+
+ switch ( mop->event )
+ {
+ case XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL:
+ {
+ bool_t old_status = ad->monitor.privileged_call_enabled;
+
+ if ( unlikely(old_status == requested_status) )
+ return -EEXIST;
+
+ domain_pause(d);
+ ad->monitor.privileged_call_enabled = requested_status;
+ domain_unpause(d);
+ break;
+ }
+
+ default:
+ /*
+ * Should not be reached unless arch_monitor_get_capabilities() is
+ * not properly implemented.
+ */
+ ASSERT_UNREACHABLE();
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int monitor_smc(void)
+{
+ vm_event_request_t req = {
+ .reason = VM_EVENT_REASON_PRIVILEGED_CALL
+ };
+
+ return monitor_traps(current, 1, &req);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
#include <asm/mmio.h>
#include <asm/cpufeature.h>
#include <asm/flushtlb.h>
+#include <asm/monitor.h>
#include "decode.h"
#include "vtimer.h"
inject_dabt_exception(regs, info.gva, hsr.len);
}
+static void do_trap_smc(struct cpu_user_regs *regs, const union hsr hsr)
+{
+ int rc = 0;
+
+ if ( current->domain->arch.monitor.privileged_call_enabled )
+ rc = monitor_smc();
+
+ if ( rc != 1 )
+ inject_undef_exception(regs, hsr);
+}
+
static void enter_hypervisor_head(struct cpu_user_regs *regs)
{
if ( guest_mode(regs) )
*/
GUEST_BUG_ON(!psr_mode_is_32bit(regs->cpsr));
perfc_incr(trap_smc32);
- inject_undef32_exception(regs);
+ do_trap_smc(regs, hsr);
break;
case HSR_EC_HVC32:
GUEST_BUG_ON(!psr_mode_is_32bit(regs->cpsr));
*/
GUEST_BUG_ON(psr_mode_is_32bit(regs->cpsr));
perfc_incr(trap_smc64);
- inject_undef64_exception(regs, hsr.len);
+ do_trap_smc(regs, hsr);
break;
case HSR_EC_SYSREG:
GUEST_BUG_ON(psr_mode_is_32bit(regs->cpsr));
paddr_t efi_acpi_gpa;
paddr_t efi_acpi_len;
#endif
+
+ /* Monitor options */
+ struct {
+ uint8_t privileged_call_enabled : 1;
+ } monitor;
} __cacheline_aligned;
struct arch_vcpu
return -EOPNOTSUPP;
}
-static inline
int arch_monitor_domctl_event(struct domain *d,
- struct xen_domctl_monitor_op *mop)
-{
- /*
- * No arch-specific monitor vm-events on ARM.
- *
- * Should not be reached unless arch_monitor_get_capabilities() is not
- * properly implemented.
- */
- ASSERT_UNREACHABLE();
- return -EOPNOTSUPP;
-}
+ struct xen_domctl_monitor_op *mop);
static inline
int arch_monitor_init_domain(struct domain *d)
{
uint32_t capabilities = 0;
- capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST);
+ capabilities = (1U << XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST |
+ 1U << XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL);
return capabilities;
}
+int monitor_smc(void);
+
#endif /* __ASM_ARM_MONITOR_H__ */
#define XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST 4
#define XEN_DOMCTL_MONITOR_EVENT_DEBUG_EXCEPTION 5
#define XEN_DOMCTL_MONITOR_EVENT_CPUID 6
+#define XEN_DOMCTL_MONITOR_EVENT_PRIVILEGED_CALL 7
struct xen_domctl_monitor_op {
uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
#define VM_EVENT_REASON_DEBUG_EXCEPTION 9
/* CPUID executed */
#define VM_EVENT_REASON_CPUID 10
+/*
+ * Privileged call executed (e.g. SMC).
+ * Note: event may be generated even if SMC condition check fails on some CPUs.
+ * As this behavior is CPU-specific, users are advised to not rely on it.
+ * These kinds of events will be filtered out in future versions.
+ */
+#define VM_EVENT_REASON_PRIVILEGED_CALL 11
/* Supported values for the vm_event_write_ctrlreg index. */
#define VM_EVENT_X86_CR0 0