MSR_AMD64_DR2_ADDRESS_MASK,
MSR_AMD64_DR3_ADDRESS_MASK,
};
-static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
static int hvm_save_cpu_msrs(struct vcpu *v, hvm_domain_context_t *h)
{
int err;
err = _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
- HVM_CPU_MSR_SIZE(msr_count_max));
+ HVM_CPU_MSR_SIZE(ARRAY_SIZE(msrs_to_send)));
if ( err )
return err;
ctxt = (struct hvm_msr *)&h->data[h->cur];
ctxt->msr[ctxt->count++].val = val;
}
- if ( hvm_funcs.save_msr )
- hvm_funcs.save_msr(v, ctxt);
-
- ASSERT(ctxt->count <= msr_count_max);
+ ASSERT(ctxt->count <= ARRAY_SIZE(msrs_to_send));
for ( i = 0; i < ctxt->count; ++i )
ctxt->msr[i]._rsvd = 0;
return -EOPNOTSUPP;
/* Checking finished */
- if ( hvm_funcs.load_msr )
- err = hvm_funcs.load_msr(v, ctxt);
-
for ( i = 0; !err && i < ctxt->count; ++i )
{
switch ( ctxt->msr[i].index )
sizeof(struct hvm_save_descriptor),
HVMSR_PER_VCPU);
- if ( hvm_funcs.init_msr )
- msr_count_max += hvm_funcs.init_msr();
-
- if ( msr_count_max )
- hvm_register_savevm(CPU_MSR_CODE,
- "CPU_MSR",
- hvm_save_cpu_msrs,
- hvm_load_cpu_msrs,
- HVM_CPU_MSR_SIZE(msr_count_max) +
- sizeof(struct hvm_save_descriptor),
- HVMSR_PER_VCPU);
+ hvm_register_savevm(CPU_MSR_CODE,
+ "CPU_MSR",
+ hvm_save_cpu_msrs,
+ hvm_load_cpu_msrs,
+ HVM_CPU_MSR_SIZE(ARRAY_SIZE(msrs_to_send)) +
+ sizeof(struct hvm_save_descriptor),
+ HVMSR_PER_VCPU);
return 0;
}
void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
- unsigned int (*init_msr)(void);
- void (*save_msr)(struct vcpu *, struct hvm_msr *);
- int (*load_msr)(struct vcpu *, struct hvm_msr *);
-
/* Examine specifics of the guest state. */
unsigned int (*get_interrupt_shadow)(struct vcpu *v);
void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);