DEFINE_PER_CPU_READ_MOSTLY(void *, hv_vp_assist);
DEFINE_PER_CPU_READ_MOSTLY(unsigned int, hv_vp_index);
+unsigned int __read_mostly hv_max_vp_index;
static bool __read_mostly hcall_page_ready;
static uint64_t generate_guest_id(void)
rdmsrl(HV_X64_MSR_VP_INDEX, vp_index_msr);
this_cpu(hv_vp_index) = vp_index_msr;
+ if ( vp_index_msr > hv_max_vp_index )
+ hv_max_vp_index = vp_index_msr;
+
return 0;
}
DECLARE_PER_CPU(void *, hv_input_page);
DECLARE_PER_CPU(void *, hv_vp_assist);
DECLARE_PER_CPU(unsigned int, hv_vp_index);
+extern unsigned int hv_max_vp_index;
static inline unsigned int hv_vp_index(unsigned int cpu)
{
{
unsigned int vpid = hv_vp_index(cpu);
- if ( vpid >= ms_hyperv.max_vp_index )
+ if ( vpid > hv_max_vp_index )
{
local_irq_restore(irq_flags);
return -ENXIO;
{
int nr = 1;
unsigned int cpu, vcpu_bank, vcpu_offset;
- unsigned int max_banks = ms_hyperv.max_vp_index / 64;
+ unsigned int max_banks = hv_max_vp_index / 64;
/* Up to 64 banks can be represented by valid_bank_mask */
if ( max_banks > 64 )