static bool nvmm_allowed;
static struct qemu_machine qemu_mach;
-static AccelCPUState *
-get_qemu_vcpu(CPUState *cpu)
-{
- return cpu->accel;
-}
-
static struct nvmm_machine *
get_nvmm_mach(void)
{
{
CPUX86State *env = cpu->env_ptr;
struct nvmm_machine *mach = get_nvmm_mach();
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
struct nvmm_x64_state *state = vcpu->state;
uint64_t bitmap;
{
CPUX86State *env = cpu->env_ptr;
struct nvmm_machine *mach = get_nvmm_mach();
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_x64_state *state = vcpu->state;
nvmm_can_take_int(CPUState *cpu)
{
CPUX86State *env = cpu->env_ptr;
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
struct nvmm_machine *mach = get_nvmm_mach();
static bool
nvmm_can_take_nmi(CPUState *cpu)
{
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
/*
* Contrary to INTs, NMIs always schedule an exit when they are
{
CPUX86State *env = cpu->env_ptr;
struct nvmm_machine *mach = get_nvmm_mach();
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_x64_state *state = vcpu->state;
static void
nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit)
{
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
CPUX86State *env = cpu->env_ptr;
X86CPU *x86_cpu = X86_CPU(cpu);
uint64_t tpr;
nvmm_handle_rdmsr(struct nvmm_machine *mach, CPUState *cpu,
struct nvmm_vcpu_exit *exit)
{
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_x64_state *state = vcpu->state;
nvmm_handle_wrmsr(struct nvmm_machine *mach, CPUState *cpu,
struct nvmm_vcpu_exit *exit)
{
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_x64_state *state = vcpu->state;
{
CPUX86State *env = cpu->env_ptr;
struct nvmm_machine *mach = get_nvmm_mach();
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
X86CPU *x86_cpu = X86_CPU(cpu);
struct nvmm_vcpu_exit *exit = vcpu->exit;
nvmm_ipi_signal(int sigcpu)
{
if (current_cpu) {
- AccelCPUState *qcpu = get_qemu_vcpu(current_cpu);
+ AccelCPUState *qcpu = current_cpu->accel;
#if NVMM_USER_VERSION >= 2
struct nvmm_vcpu *vcpu = &qcpu->vcpu;
nvmm_vcpu_stop(vcpu);
nvmm_destroy_vcpu(CPUState *cpu)
{
struct nvmm_machine *mach = get_nvmm_mach();
- AccelCPUState *qcpu = get_qemu_vcpu(cpu);
+ AccelCPUState *qcpu = cpu->accel;
nvmm_vcpu_destroy(mach, &qcpu->vcpu);
g_free(cpu->accel);