data->msr_cstar = vmcb->cstar;
data->msr_syscall_mask = vmcb->sfmask;
data->msr_efer = v->arch.hvm_vcpu.guest_efer;
- data->msr_flags = -1ULL;
+ data->msr_flags = 0;
}
vmx_disable_intercept_for_msr(v, MSR_IA32_BNDCFGS, MSR_TYPE_R | MSR_TYPE_W);
}
+ /* All guest MSR state is dirty. */
+ v->arch.hvm_vmx.msr_state.flags = ((1u << VMX_MSR_COUNT) - 1);
+
/* I/O access bitmap. */
__vmwrite(IO_BITMAP_A, __pa(d->arch.hvm_domain.io_bitmap));
__vmwrite(IO_BITMAP_B, __pa(d->arch.hvm_domain.io_bitmap) + PAGE_SIZE);
static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
{
struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
- unsigned long guest_flags = guest_state->flags;
data->shadow_gs = v->arch.hvm_vmx.shadow_gs;
data->msr_cstar = v->arch.hvm_vmx.cstar;
/* save msrs */
- data->msr_flags = guest_flags;
+ data->msr_flags = 0;
data->msr_lstar = guest_state->msrs[VMX_INDEX_MSR_LSTAR];
data->msr_star = guest_state->msrs[VMX_INDEX_MSR_STAR];
data->msr_syscall_mask = guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK];
struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
/* restore msrs */
- guest_state->flags = data->msr_flags & 7;
+ guest_state->flags = ((1u << VMX_MSR_COUNT) - 1);
guest_state->msrs[VMX_INDEX_MSR_LSTAR] = data->msr_lstar;
guest_state->msrs[VMX_INDEX_MSR_STAR] = data->msr_star;
guest_state->msrs[VMX_INDEX_MSR_SYSCALL_MASK] = data->msr_syscall_mask;
uint64_t shadow_gs;
/* msr content saved/restored. */
- uint64_t msr_flags;
+ uint64_t msr_flags; /* Obsolete, ignored. */
uint64_t msr_lstar;
uint64_t msr_star;
uint64_t msr_cstar;
uint64_t shadow_gs;
/* msr content saved/restored. */
- uint64_t msr_flags;
+ uint64_t msr_flags; /* Obsolete, ignored. */
uint64_t msr_lstar;
uint64_t msr_star;
uint64_t msr_cstar;