break;
case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
printf("Breakpoint: rip=%016"PRIx64", gfn=%"PRIx64" (vcpu %d)\n",
- req.regs.x86.rip,
+ req.data.regs.x86.rip,
req.u.software_breakpoint.gfn,
req.vcpu_id);
void vcpu_destroy(struct vcpu *v)
{
+ xfree(v->arch.vm_event.emul_read_data);
+ v->arch.vm_event.emul_read_data = NULL;
+
if ( is_pv_32bit_vcpu(v) )
{
free_compat_arg_xlat(v);
return X86EMUL_OKAY;
}
+static int set_context_data(void *buffer, unsigned int size)
+{
+ struct vcpu *curr = current;
+
+ if ( curr->arch.vm_event.emul_read_data )
+ {
+ unsigned int safe_size =
+ min(size, curr->arch.vm_event.emul_read_data->size);
+
+ memcpy(buffer, curr->arch.vm_event.emul_read_data->data, safe_size);
+ memset(buffer + safe_size, 0, size - safe_size);
+ return X86EMUL_OKAY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
+}
+
static const struct hvm_io_ops null_ops = {
.read = null_read,
.write = null_write
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
+ struct hvm_emulate_ctxt *hvmemul_ctxt =
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+
+ if ( unlikely(hvmemul_ctxt->set_context) )
+ return set_context_data(p_data, bytes);
+
return __hvmemul_read(
seg, offset, p_data, bytes, hvm_access_read,
container_of(ctxt, struct hvm_emulate_ctxt, ctxt));
unsigned int bytes,
struct x86_emulate_ctxt *ctxt)
{
+ struct hvm_emulate_ctxt *hvmemul_ctxt =
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+
+ if ( unlikely(hvmemul_ctxt->set_context) )
+ {
+ int rc = set_context_data(p_new, bytes);
+
+ if ( rc != X86EMUL_OKAY )
+ return rc;
+ }
+
/* Fix this in case the guest is really relying on r-m-w atomicity. */
return hvmemul_write(seg, offset, p_new, bytes, ctxt);
}
!!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa);
}
+static int hvmemul_rep_outs_set_context(
+ enum x86_segment src_seg,
+ unsigned long src_offset,
+ uint16_t dst_port,
+ unsigned int bytes_per_rep,
+ unsigned long *reps,
+ struct x86_emulate_ctxt *ctxt)
+{
+ unsigned int bytes = *reps * bytes_per_rep;
+ char *buf;
+ int rc;
+
+ buf = xmalloc_array(char, bytes);
+
+ if ( buf == NULL )
+ return X86EMUL_UNHANDLEABLE;
+
+ rc = set_context_data(buf, bytes);
+
+ if ( rc == X86EMUL_OKAY )
+ rc = hvmemul_do_pio_buffer(dst_port, bytes, IOREQ_WRITE, buf);
+
+ xfree(buf);
+
+ return rc;
+}
+
static int hvmemul_rep_outs(
enum x86_segment src_seg,
unsigned long src_offset,
p2m_type_t p2mt;
int rc;
+ if ( unlikely(hvmemul_ctxt->set_context) )
+ return hvmemul_rep_outs_set_context(src_seg, src_offset, dst_port,
+ bytes_per_rep, reps, ctxt);
+
rc = hvmemul_virtual_to_linear(
src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,
hvmemul_ctxt, &addr);
if ( buf == NULL )
return X86EMUL_UNHANDLEABLE;
- /*
- * We do a modicum of checking here, just for paranoia's sake and to
- * definitely avoid copying an unitialised buffer into guest address space.
- */
- rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
+ if ( unlikely(hvmemul_ctxt->set_context) )
+ {
+ rc = set_context_data(buf, bytes);
+
+ if ( rc != X86EMUL_OKAY)
+ {
+ xfree(buf);
+ return rc;
+ }
+
+ rc = HVMCOPY_okay;
+ }
+ else
+ /*
+ * We do a modicum of checking here, just for paranoia's sake and to
+ * definitely avoid copying an unitialised buffer into guest address
+ * space.
+ */
+ rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
+
if ( rc == HVMCOPY_okay )
rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
unsigned long *val,
struct x86_emulate_ctxt *ctxt)
{
+ struct hvm_emulate_ctxt *hvmemul_ctxt =
+ container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
+
*val = 0;
+
+ if ( unlikely(hvmemul_ctxt->set_context) )
+ return set_context_data(val, bytes);
+
return hvmemul_do_pio_buffer(port, bytes, IOREQ_READ, val);
}
return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops_no_write);
}
-void hvm_mem_access_emulate_one(bool_t nowrite, unsigned int trapnr,
+void hvm_mem_access_emulate_one(enum emul_kind kind, unsigned int trapnr,
unsigned int errcode)
{
struct hvm_emulate_ctxt ctx = {{ 0 }};
hvm_emulate_prepare(&ctx, guest_cpu_user_regs());
- if ( nowrite )
+ switch ( kind )
+ {
+ case EMUL_KIND_NOWRITE:
rc = hvm_emulate_one_no_write(&ctx);
- else
+ break;
+ case EMUL_KIND_SET_CONTEXT:
+ ctx.set_context = 1;
+ /* Intentional fall-through. */
+ default:
rc = hvm_emulate_one(&ctx);
+ }
switch ( rc )
{
hvmemul_ctxt->ctxt.force_writeback = 1;
hvmemul_ctxt->seg_reg_accessed = 0;
hvmemul_ctxt->seg_reg_dirty = 0;
+ hvmemul_ctxt->set_context = 0;
hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
}
const struct cpu_user_regs *regs = guest_cpu_user_regs();
const struct vcpu *curr = current;
- req->regs.x86.rax = regs->eax;
- req->regs.x86.rcx = regs->ecx;
- req->regs.x86.rdx = regs->edx;
- req->regs.x86.rbx = regs->ebx;
- req->regs.x86.rsp = regs->esp;
- req->regs.x86.rbp = regs->ebp;
- req->regs.x86.rsi = regs->esi;
- req->regs.x86.rdi = regs->edi;
-
- req->regs.x86.r8 = regs->r8;
- req->regs.x86.r9 = regs->r9;
- req->regs.x86.r10 = regs->r10;
- req->regs.x86.r11 = regs->r11;
- req->regs.x86.r12 = regs->r12;
- req->regs.x86.r13 = regs->r13;
- req->regs.x86.r14 = regs->r14;
- req->regs.x86.r15 = regs->r15;
-
- req->regs.x86.rflags = regs->eflags;
- req->regs.x86.rip = regs->eip;
-
- req->regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
- req->regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
- req->regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
- req->regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
+ req->data.regs.x86.rax = regs->eax;
+ req->data.regs.x86.rcx = regs->ecx;
+ req->data.regs.x86.rdx = regs->edx;
+ req->data.regs.x86.rbx = regs->ebx;
+ req->data.regs.x86.rsp = regs->esp;
+ req->data.regs.x86.rbp = regs->ebp;
+ req->data.regs.x86.rsi = regs->esi;
+ req->data.regs.x86.rdi = regs->edi;
+
+ req->data.regs.x86.r8 = regs->r8;
+ req->data.regs.x86.r9 = regs->r9;
+ req->data.regs.x86.r10 = regs->r10;
+ req->data.regs.x86.r11 = regs->r11;
+ req->data.regs.x86.r12 = regs->r12;
+ req->data.regs.x86.r13 = regs->r13;
+ req->data.regs.x86.r14 = regs->r14;
+ req->data.regs.x86.r15 = regs->r15;
+
+ req->data.regs.x86.rflags = regs->eflags;
+ req->data.regs.x86.rip = regs->eip;
+
+ req->data.regs.x86.msr_efer = curr->arch.hvm_vcpu.guest_efer;
+ req->data.regs.x86.cr0 = curr->arch.hvm_vcpu.guest_cr[0];
+ req->data.regs.x86.cr3 = curr->arch.hvm_vcpu.guest_cr[3];
+ req->data.regs.x86.cr4 = curr->arch.hvm_vcpu.guest_cr[4];
}
static int hvm_event_traps(uint8_t sync, vm_event_request_t *req)
/* Architecture-specific vmcs/vmcb bits */
hvm_funcs.save_cpu_ctxt(curr, &ctxt);
- req->regs.x86.rax = regs->eax;
- req->regs.x86.rcx = regs->ecx;
- req->regs.x86.rdx = regs->edx;
- req->regs.x86.rbx = regs->ebx;
- req->regs.x86.rsp = regs->esp;
- req->regs.x86.rbp = regs->ebp;
- req->regs.x86.rsi = regs->esi;
- req->regs.x86.rdi = regs->edi;
-
- req->regs.x86.r8 = regs->r8;
- req->regs.x86.r9 = regs->r9;
- req->regs.x86.r10 = regs->r10;
- req->regs.x86.r11 = regs->r11;
- req->regs.x86.r12 = regs->r12;
- req->regs.x86.r13 = regs->r13;
- req->regs.x86.r14 = regs->r14;
- req->regs.x86.r15 = regs->r15;
-
- req->regs.x86.rflags = regs->eflags;
- req->regs.x86.rip = regs->eip;
-
- req->regs.x86.dr7 = curr->arch.debugreg[7];
- req->regs.x86.cr0 = ctxt.cr0;
- req->regs.x86.cr2 = ctxt.cr2;
- req->regs.x86.cr3 = ctxt.cr3;
- req->regs.x86.cr4 = ctxt.cr4;
-
- req->regs.x86.sysenter_cs = ctxt.sysenter_cs;
- req->regs.x86.sysenter_esp = ctxt.sysenter_esp;
- req->regs.x86.sysenter_eip = ctxt.sysenter_eip;
-
- req->regs.x86.msr_efer = ctxt.msr_efer;
- req->regs.x86.msr_star = ctxt.msr_star;
- req->regs.x86.msr_lstar = ctxt.msr_lstar;
+ req->data.regs.x86.rax = regs->eax;
+ req->data.regs.x86.rcx = regs->ecx;
+ req->data.regs.x86.rdx = regs->edx;
+ req->data.regs.x86.rbx = regs->ebx;
+ req->data.regs.x86.rsp = regs->esp;
+ req->data.regs.x86.rbp = regs->ebp;
+ req->data.regs.x86.rsi = regs->esi;
+ req->data.regs.x86.rdi = regs->edi;
+
+ req->data.regs.x86.r8 = regs->r8;
+ req->data.regs.x86.r9 = regs->r9;
+ req->data.regs.x86.r10 = regs->r10;
+ req->data.regs.x86.r11 = regs->r11;
+ req->data.regs.x86.r12 = regs->r12;
+ req->data.regs.x86.r13 = regs->r13;
+ req->data.regs.x86.r14 = regs->r14;
+ req->data.regs.x86.r15 = regs->r15;
+
+ req->data.regs.x86.rflags = regs->eflags;
+ req->data.regs.x86.rip = regs->eip;
+
+ req->data.regs.x86.dr7 = curr->arch.debugreg[7];
+ req->data.regs.x86.cr0 = ctxt.cr0;
+ req->data.regs.x86.cr2 = ctxt.cr2;
+ req->data.regs.x86.cr3 = ctxt.cr3;
+ req->data.regs.x86.cr4 = ctxt.cr4;
+
+ req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs;
+ req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp;
+ req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip;
+
+ req->data.regs.x86.msr_efer = ctxt.msr_efer;
+ req->data.regs.x86.msr_star = ctxt.msr_star;
+ req->data.regs.x86.msr_lstar = ctxt.msr_lstar;
hvm_get_segment_register(curr, x86_seg_fs, &seg);
- req->regs.x86.fs_base = seg.base;
+ req->data.regs.x86.fs_base = seg.base;
hvm_get_segment_register(curr, x86_seg_gs, &seg);
- req->regs.x86.gs_base = seg.base;
+ req->data.regs.x86.gs_base = seg.base;
hvm_get_segment_register(curr, x86_seg_cs, &seg);
- req->regs.x86.cs_arbytes = seg.attr.bytes;
+ req->data.regs.x86.cs_arbytes = seg.attr.bytes;
}
void p2m_mem_access_emulate_check(struct vcpu *v,
}
v->arch.vm_event.emulate_flags = violation ? rsp->flags : 0;
+
+ if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) &&
+ v->arch.vm_event.emul_read_data )
+ *v->arch.vm_event.emul_read_data = rsp->data.emul_read_data;
}
}
if ( v->arch.vm_event.emulate_flags )
{
- hvm_mem_access_emulate_one((v->arch.vm_event.emulate_flags &
- VM_EVENT_FLAG_EMULATE_NOWRITE) != 0,
- TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+ enum emul_kind kind = EMUL_KIND_NORMAL;
+
+ if ( v->arch.vm_event.emulate_flags &
+ VM_EVENT_FLAG_SET_EMUL_READ_DATA )
+ kind = EMUL_KIND_SET_CONTEXT;
+ else if ( v->arch.vm_event.emulate_flags &
+ VM_EVENT_FLAG_EMULATE_NOWRITE )
+ kind = EMUL_KIND_NOWRITE;
+
+ hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
+ HVM_DELIVER_NO_ERROR_CODE);
v->arch.vm_event.emulate_flags = 0;
return 1;
#include <xen/sched.h>
#include <asm/hvm/hvm.h>
+/* Implicitly serialized by the domctl lock. */
+int vm_event_init_domain(struct domain *d)
+{
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ {
+ if ( v->arch.vm_event.emul_read_data )
+ continue;
+
+ v->arch.vm_event.emul_read_data =
+ xzalloc(struct vm_event_emul_read_data);
+
+ if ( !v->arch.vm_event.emul_read_data )
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Implicitly serialized by the domctl lock,
+ * or on domain cleanup paths only.
+ */
+void vm_event_cleanup_domain(struct domain *d)
+{
+ struct vcpu *v;
+
+ for_each_vcpu ( d, v )
+ {
+ xfree(v->arch.vm_event.emul_read_data);
+ v->arch.vm_event.emul_read_data = NULL;
+ }
+}
+
void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
{
if ( !is_hvm_domain(d) || !atomic_read(&v->vm_event_pause_count) )
vm_event_ring_lock_init(ved);
vm_event_ring_lock(ved);
+ rc = vm_event_init_domain(d);
+
+ if ( rc < 0 )
+ goto err;
+
rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
&ved->ring_page);
if ( rc < 0 )
destroy_ring_for_helper(&ved->ring_page,
ved->ring_pg_struct);
+
+ vm_event_cleanup_domain(d);
+
vm_event_ring_unlock(ved);
}
#include <xen/sched.h>
+static inline
+int vm_event_init_domain(struct domain *d)
+{
+ /* Not supported on ARM. */
+ return 0;
+}
+
+static inline
+void vm_event_cleanup_domain(struct domain *d)
+{
+ /* Not supported on ARM. */
+}
+
static inline
void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
{
uint32_t emulate_flags;
unsigned long gpa;
unsigned long eip;
+ struct vm_event_emul_read_data *emul_read_data;
} vm_event;
};
struct hvm_trap trap;
uint32_t intr_shadow;
+
+ bool_t set_context;
+};
+
+enum emul_kind {
+ EMUL_KIND_NORMAL,
+ EMUL_KIND_NOWRITE,
+ EMUL_KIND_SET_CONTEXT
};
int hvm_emulate_one(
struct hvm_emulate_ctxt *hvmemul_ctxt);
int hvm_emulate_one_no_write(
struct hvm_emulate_ctxt *hvmemul_ctxt);
-void hvm_mem_access_emulate_one(bool_t nowrite,
+void hvm_mem_access_emulate_one(enum emul_kind kind,
unsigned int trapnr,
unsigned int errcode);
void hvm_emulate_prepare(
#include <xen/sched.h>
+int vm_event_init_domain(struct domain *d);
+
+void vm_event_cleanup_domain(struct domain *d);
+
void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v);
#endif /* __ASM_X86_VM_EVENT_H__ */
* paused
* VCPU_PAUSED in a response signals to unpause the vCPU
*/
-#define VM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
+#define VM_EVENT_FLAG_VCPU_PAUSED (1 << 0)
/* Flags to aid debugging vm_event */
-#define VM_EVENT_FLAG_FOREIGN (1 << 1)
+#define VM_EVENT_FLAG_FOREIGN (1 << 1)
/*
* The following flags can be set in response to a mem_access event.
*
* This will allow the guest to continue execution without lifting the page
* access restrictions.
*/
-#define VM_EVENT_FLAG_EMULATE (1 << 2)
+#define VM_EVENT_FLAG_EMULATE (1 << 2)
/*
- * Same as MEM_ACCESS_EMULATE, but with write operations or operations
+ * Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations
* potentially having side effects (like memory mapped or port I/O) disabled.
*/
-#define VM_EVENT_FLAG_EMULATE_NOWRITE (1 << 3)
+#define VM_EVENT_FLAG_EMULATE_NOWRITE (1 << 3)
/*
* Toggle singlestepping on vm_event response.
* Requires the vCPU to be paused already (synchronous events only).
*/
-#define VM_EVENT_FLAG_TOGGLE_SINGLESTEP (1 << 4)
+#define VM_EVENT_FLAG_TOGGLE_SINGLESTEP (1 << 4)
+/*
+ * Data is being sent back to the hypervisor in the event response, to be
+ * returned by the read function when emulating an instruction.
+ * This flag is only useful when combined with VM_EVENT_FLAG_EMULATE
+ * and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE
+ * (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and
+ * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
+ */
+#define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
/*
* Reasons for the vm event request
uint32_t _pad;
};
+struct vm_event_emul_read_data {
+ uint32_t size;
+ /* The struct is used in a union with vm_event_regs_x86. */
+ uint8_t data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
+};
+
typedef struct vm_event_st {
uint32_t version; /* VM_EVENT_INTERFACE_VERSION */
uint32_t flags; /* VM_EVENT_FLAG_* */
} u;
union {
- struct vm_event_regs_x86 x86;
- } regs;
+ union {
+ struct vm_event_regs_x86 x86;
+ } regs;
+
+ struct vm_event_emul_read_data emul_read_data;
+ } data;
} vm_event_request_t, vm_event_response_t;
DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);