#include <xen/mem_access.h>
#include <xen/monitor.h>
#include <asm/hvm/monitor.h>
+#include <asm/hvm/nestedhvm.h>
#include <asm/altp2m.h>
#include <asm/monitor.h>
#include <asm/p2m.h>
#include <asm/vm_event.h>
#include <public/vm_event.h>
+static void set_npt_base(struct vcpu *v, vm_event_request_t *req)
+{
+ if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
+ {
+ req->flags |= VM_EVENT_FLAG_NESTED_P2M;
+ req->data.regs.x86.npt_base = nhvm_vcpu_p2m_base(v);
+ }
+}
+
bool hvm_monitor_cr(unsigned int index, unsigned long value, unsigned long old)
{
struct vcpu *curr = current;
.u.write_ctrlreg.old_value = old
};
+ set_npt_base(curr, &req);
+
return monitor_traps(curr, sync, &req) >= 0 &&
curr->domain->arch.monitor.control_register_values;
}
.vcpu_id = curr->vcpu_id,
};
+ set_npt_base(curr, &req);
+
return curr->domain->arch.monitor.emul_unimplemented_enabled &&
monitor_traps(curr, true, &req) == 1;
}
.u.mov_to_msr.old_value = old_value
};
+ set_npt_base(curr, &req);
+
return monitor_traps(curr, 1, &req) >= 0 &&
curr->domain->arch.monitor.control_register_values;
}
uint64_t vmx_exit_qualification,
uint8_t descriptor, bool is_write)
{
+ struct vcpu *curr = current;
vm_event_request_t req = {
.reason = VM_EVENT_REASON_DESCRIPTOR_ACCESS,
.u.desc_access.descriptor = descriptor,
req.u.desc_access.arch.vmx.exit_qualification = vmx_exit_qualification;
}
- monitor_traps(current, true, &req);
+ set_npt_base(curr, &req);
+
+ monitor_traps(curr, true, &req);
}
static inline unsigned long gfn_of_rip(unsigned long rip)
return -EOPNOTSUPP;
}
+ set_npt_base(curr, &req);
+
return monitor_traps(curr, sync, &req);
}
req.u.cpuid.leaf = leaf;
req.u.cpuid.subleaf = subleaf;
+ set_npt_base(curr, &req);
+
return monitor_traps(curr, 1, &req);
}
void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
unsigned int err, uint64_t cr2)
{
+ struct vcpu *curr = current;
vm_event_request_t req = {
.reason = VM_EVENT_REASON_INTERRUPT,
.u.interrupt.x86.vector = vector,
.u.interrupt.x86.cr2 = cr2,
};
- monitor_traps(current, 1, &req);
+ set_npt_base(curr, &req);
+
+ monitor_traps(curr, 1, &req);
}
/*
req.u.mem_access.gla = gla;
req.u.mem_access.offset = gpa & ~PAGE_MASK;
+ set_npt_base(curr, &req);
+
return monitor_traps(curr, true, &req) >= 0;
}
#include "xen.h"
-#define VM_EVENT_INTERFACE_VERSION 0x00000006
+#define VM_EVENT_INTERFACE_VERSION 0x00000007
#if defined(__XEN__) || defined(__XEN_TOOLS__)
* which singlestep gets automatically disabled.
*/
#define VM_EVENT_FLAG_FAST_SINGLESTEP (1 << 11)
+/*
+ * Set if the event comes from a nested VM and thus npt_base is valid.
+ */
+#define VM_EVENT_FLAG_NESTED_P2M (1 << 12)
/*
* Reasons for the vm event request
uint64_t msr_star;
uint64_t msr_lstar;
uint64_t gdtr_base;
+
+ /*
+ * When VM_EVENT_FLAG_NESTED_P2M is set, this event comes from a nested
+ * VM. npt_base is the guest physical address of the L1 hypervisors
+ * EPT/NPT tables for the nested guest.
+ *
+ * All bits outside of architectural address ranges are reserved for
+ * future metadata.
+ */
+ uint64_t npt_base;
+
uint32_t cs_base;
uint32_t ss_base;
uint32_t ds_base;