static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content);
static void vmx_invlpg(struct vcpu *v, unsigned long vaddr);
+/* Values for domain's ->arch.hvm_domain.pi_ops.flags. */
+#define PI_CSW_FROM (1u << 0)
+#define PI_CSW_TO (1u << 1)
+
struct vmx_pi_blocking_vcpu {
struct list_head list;
spinlock_t lock;
* This can make sure the PI (especially the NDST feild) is
* in proper state when we call vmx_vcpu_block().
*/
- d->arch.hvm_domain.pi_ops.switch_from = vmx_pi_switch_from;
- d->arch.hvm_domain.pi_ops.switch_to = vmx_pi_switch_to;
+ d->arch.hvm_domain.pi_ops.flags = PI_CSW_FROM | PI_CSW_TO;
for_each_vcpu ( d, v )
{
}
d->arch.hvm_domain.pi_ops.vcpu_block = vmx_vcpu_block;
- d->arch.hvm_domain.pi_ops.do_resume = vmx_pi_do_resume;
}
/* This function is called when pcidevs_lock is held */
* 'switch_to' hook function.
*/
d->arch.hvm_domain.pi_ops.vcpu_block = NULL;
- d->arch.hvm_domain.pi_ops.switch_from = NULL;
- d->arch.hvm_domain.pi_ops.do_resume = NULL;
+ d->arch.hvm_domain.pi_ops.flags = PI_CSW_TO;
for_each_vcpu ( d, v )
vmx_pi_unblock_vcpu(v);
vmx_restore_host_msrs();
vmx_save_dr(v);
- if ( v->domain->arch.hvm_domain.pi_ops.switch_from )
- v->domain->arch.hvm_domain.pi_ops.switch_from(v);
+ if ( v->domain->arch.hvm_domain.pi_ops.flags & PI_CSW_FROM )
+ vmx_pi_switch_from(v);
}
static void vmx_ctxt_switch_to(struct vcpu *v)
vmx_restore_guest_msrs(v);
vmx_restore_dr(v);
- if ( v->domain->arch.hvm_domain.pi_ops.switch_to )
- v->domain->arch.hvm_domain.pi_ops.switch_to(v);
+ if ( v->domain->arch.hvm_domain.pi_ops.flags & PI_CSW_TO )
+ vmx_pi_switch_to(v);
}
if ( nestedhvm_vcpu_in_guestmode(curr) && vcpu_nestedhvm(curr).stale_np2m )
return false;
- if ( curr->domain->arch.hvm_domain.pi_ops.do_resume )
- curr->domain->arch.hvm_domain.pi_ops.do_resume(curr);
+ if ( curr->domain->arch.hvm_domain.pi_ops.vcpu_block )
+ vmx_pi_do_resume(curr);
if ( !cpu_has_vmx_vpid )
goto out;
* and actually has a physical device assigned .
*/
struct hvm_pi_ops {
- /* Hook into ctx_switch_from. */
- void (*switch_from)(struct vcpu *v);
-
- /* Hook into ctx_switch_to. */
- void (*switch_to)(struct vcpu *v);
+ unsigned int flags;
/*
* Hook into arch_vcpu_block(), which is called
* from vcpu_block() and vcpu_do_poll().
*/
void (*vcpu_block)(struct vcpu *);
-
- /* Hook into the vmentry path. */
- void (*do_resume)(struct vcpu *v);
};
#define MAX_NR_IOREQ_SERVERS 8