v->arch.flags = TF_kernel_mode;
- /* By default, do not emulate */
- v->arch.vm_event.emulate_flags = 0;
-
rc = mapcache_vcpu_init(v);
if ( rc )
return rc;
void vcpu_destroy(struct vcpu *v)
{
- xfree(v->arch.vm_event.emul_read_data);
- v->arch.vm_event.emul_read_data = NULL;
+ xfree(v->arch.vm_event);
+ v->arch.vm_event = NULL;
if ( is_pv_32bit_vcpu(v) )
{
void arch_domain_destroy(struct domain *d)
{
- vfree(d->arch.event_write_data);
- d->arch.event_write_data = NULL;
-
if ( has_hvm_container_domain(d) )
hvm_domain_destroy(d);
#include <asm/hvm/trace.h>
#include <asm/hvm/support.h>
#include <asm/hvm/svm/svm.h>
+#include <asm/vm_event.h>
static void hvmtrace_io_assist(const ioreq_t *p)
{
{
struct vcpu *curr = current;
- if ( curr->arch.vm_event.emul_read_data )
+ if ( curr->arch.vm_event )
{
unsigned int safe_size =
- min(size, curr->arch.vm_event.emul_read_data->size);
+ min(size, curr->arch.vm_event->emul_read_data.size);
- memcpy(buffer, curr->arch.vm_event.emul_read_data->data, safe_size);
+ memcpy(buffer, curr->arch.vm_event->emul_read_data.data, safe_size);
memset(buffer + safe_size, 0, size - safe_size);
return X86EMUL_OKAY;
}
#include <asm/altp2m.h>
#include <asm/mtrr.h>
#include <asm/apic.h>
+#include <asm/vm_event.h>
#include <public/sched.h>
#include <public/hvm/ioreq.h>
#include <public/version.h>
break;
}
- if ( unlikely(d->arch.event_write_data) )
+ if ( unlikely(v->arch.vm_event) )
{
- struct monitor_write_data *w = &d->arch.event_write_data[v->vcpu_id];
+ struct monitor_write_data *w = &v->arch.vm_event->write_data;
if ( w->do_write.msr )
{
struct domain *d = v->domain;
unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
struct page_info *page;
- struct arch_domain *currad = &v->domain->arch;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
goto gpf;
}
- if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+ if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) )
{
- ASSERT(currad->event_write_data != NULL);
+ ASSERT(v->arch.vm_event);
if ( hvm_event_crX(CR0, value, old_value) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
- currad->event_write_data[v->vcpu_id].do_write.cr0 = 1;
- currad->event_write_data[v->vcpu_id].cr0 = value;
+ v->arch.vm_event->write_data.do_write.cr0 = 1;
+ v->arch.vm_event->write_data.cr0 = value;
return X86EMUL_OKAY;
}
struct vcpu *v = current;
struct page_info *page;
unsigned long old = v->arch.hvm_vcpu.guest_cr[3];
- struct arch_domain *currad = &v->domain->arch;
- if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+ if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) )
{
- ASSERT(currad->event_write_data != NULL);
+ ASSERT(v->arch.vm_event);
if ( hvm_event_crX(CR3, value, old) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
- currad->event_write_data[v->vcpu_id].do_write.cr3 = 1;
- currad->event_write_data[v->vcpu_id].cr3 = value;
+ v->arch.vm_event->write_data.do_write.cr3 = 1;
+ v->arch.vm_event->write_data.cr3 = value;
return X86EMUL_OKAY;
}
{
struct vcpu *v = current;
unsigned long old_cr;
- struct arch_domain *currad = &v->domain->arch;
if ( value & hvm_cr4_guest_reserved_bits(v, 0) )
{
goto gpf;
}
- if ( may_defer && unlikely(currad->monitor.write_ctrlreg_enabled &
+ if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) )
{
- ASSERT(currad->event_write_data != NULL);
+ ASSERT(v->arch.vm_event);
if ( hvm_event_crX(CR4, value, old_cr) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
- currad->event_write_data[v->vcpu_id].do_write.cr4 = 1;
- currad->event_write_data[v->vcpu_id].cr4 = value;
+ v->arch.vm_event->write_data.do_write.cr4 = 1;
+ v->arch.vm_event->write_data.cr4 = value;
return X86EMUL_OKAY;
}
if ( may_defer && unlikely(currad->monitor.mov_to_msr_enabled) )
{
- ASSERT(currad->event_write_data != NULL);
+ ASSERT(v->arch.vm_event);
/* The actual write will occur in hvm_do_resume() (if permitted). */
- currad->event_write_data[v->vcpu_id].do_write.msr = 1;
- currad->event_write_data[v->vcpu_id].msr = msr;
- currad->event_write_data[v->vcpu_id].value = msr_content;
+ v->arch.vm_event->write_data.do_write.msr = 1;
+ v->arch.vm_event->write_data.msr = msr;
+ v->arch.vm_event->write_data.value = msr_content;
hvm_event_msr(msr, msr_content);
return X86EMUL_OKAY;
#include <asm/hvm/nestedhvm.h>
#include <asm/altp2m.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
+#include <asm/vm_event.h>
#include <xsm/xsm.h>
#include "mm-locks.h"
}
}
- v->arch.vm_event.emulate_flags = violation ? rsp->flags : 0;
+ v->arch.vm_event->emulate_flags = violation ? rsp->flags : 0;
- if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) &&
- v->arch.vm_event.emul_read_data )
- *v->arch.vm_event.emul_read_data = rsp->data.emul_read_data;
+ if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) )
+ v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
}
}
}
/* The previous vm_event reply does not match the current state. */
- if ( v->arch.vm_event.gpa != gpa || v->arch.vm_event.eip != eip )
+ if ( unlikely(v->arch.vm_event) &&
+ (v->arch.vm_event->gpa != gpa || v->arch.vm_event->eip != eip) )
{
/* Don't emulate the current instruction, send a new vm_event. */
- v->arch.vm_event.emulate_flags = 0;
+ v->arch.vm_event->emulate_flags = 0;
/*
* Make sure to mark the current state to match it again against
* the new vm_event about to be sent.
*/
- v->arch.vm_event.gpa = gpa;
- v->arch.vm_event.eip = eip;
+ v->arch.vm_event->gpa = gpa;
+ v->arch.vm_event->eip = eip;
}
- if ( v->arch.vm_event.emulate_flags )
+ if ( unlikely(v->arch.vm_event) && v->arch.vm_event->emulate_flags )
{
enum emul_kind kind = EMUL_KIND_NORMAL;
- if ( v->arch.vm_event.emulate_flags &
+ if ( v->arch.vm_event->emulate_flags &
VM_EVENT_FLAG_SET_EMUL_READ_DATA )
kind = EMUL_KIND_SET_CONTEXT;
- else if ( v->arch.vm_event.emulate_flags &
+ else if ( v->arch.vm_event->emulate_flags &
VM_EVENT_FLAG_EMULATE_NOWRITE )
kind = EMUL_KIND_NOWRITE;
hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
HVM_DELIVER_NO_ERROR_CODE);
- v->arch.vm_event.emulate_flags = 0;
+ v->arch.vm_event->emulate_flags = 0;
return 1;
}
{
struct vcpu *v;
- if ( !d->arch.event_write_data )
- d->arch.event_write_data =
- vzalloc(sizeof(struct monitor_write_data) * d->max_vcpus);
-
- if ( !d->arch.event_write_data )
- return -ENOMEM;
-
for_each_vcpu ( d, v )
{
- if ( v->arch.vm_event.emul_read_data )
+ if ( v->arch.vm_event )
continue;
- v->arch.vm_event.emul_read_data =
- xzalloc(struct vm_event_emul_read_data);
+ v->arch.vm_event = xzalloc(struct arch_vm_event);
- if ( !v->arch.vm_event.emul_read_data )
+ if ( !v->arch.vm_event )
return -ENOMEM;
}
{
struct vcpu *v;
- vfree(d->arch.event_write_data);
- d->arch.event_write_data = NULL;
-
for_each_vcpu ( d, v )
{
- xfree(v->arch.vm_event.emul_read_data);
- v->arch.vm_event.emul_read_data = NULL;
+ xfree(v->arch.vm_event);
+ v->arch.vm_event = NULL;
}
}
{
if ( rsp->flags & VM_EVENT_FLAG_DENY )
{
- struct monitor_write_data *w =
- &v->domain->arch.event_write_data[v->vcpu_id];
+ struct monitor_write_data *w = &v->arch.vm_event->write_data;
- ASSERT(v->domain->arch.event_write_data != NULL);
+ ASSERT(w);
switch ( rsp->reason )
{
/* Mem_access emulation control */
bool_t mem_access_emulate_enabled;
-
- struct monitor_write_data *event_write_data;
} __cacheline_aligned;
#define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list))
/* A secondary copy of the vcpu time info. */
XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
- /*
- * Should we emulate the next matching instruction on VCPU resume
- * after a vm_event?
- */
- struct {
- uint32_t emulate_flags;
- unsigned long gpa;
- unsigned long eip;
- struct vm_event_emul_read_data *emul_read_data;
- } vm_event;
+ struct arch_vm_event *vm_event;
};
smap_check_policy_t smap_policy_change(struct vcpu *v,
#include <xen/sched.h>
#include <xen/vm_event.h>
+/*
+ * Should we emulate the next matching instruction on VCPU resume
+ * after a vm_event?
+ */
+struct arch_vm_event {
+ uint32_t emulate_flags;
+ unsigned long gpa;
+ unsigned long eip;
+ struct vm_event_emul_read_data emul_read_data;
+ struct monitor_write_data write_data;
+};
+
int vm_event_init_domain(struct domain *d);
void vm_event_cleanup_domain(struct domain *d);