Switches of altp2m-s always expect a valid altp2m to be in place (and
indeed altp2m_vcpu_initialise() sets the active one to be at index 0).
The compiler, however, cannot know that, and hence it cannot eliminate
p2m_get_altp2m()'s case of returnin (literal) NULL. If then the compiler
decides to special case that code path in the caller, the dereference in
instances of
atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
can, to the code generator, appear to be NULL dereferences, leading to
In function 'atomic_dec',
inlined from '...' at ...:
./arch/x86/include/asm/atomic.h:182:5: error: array subscript 0 is outside array bounds of 'int[0]' [-Werror=array-bounds=]
Aid the compiler by adding a BUG_ON() checking the return value of the
problematic p2m_get_altp2m(). Since with the use of the local variable
the 2nd p2m_get_altp2m() each will look questionable at the first glance
(Why is the local variable not used here?), open-code the only relevant
piece of p2m_get_altp2m() there.
To avoid repeatedly doing these transformations, and also to limit how
"bad" the open-coding really is, convert the entire operation to an
inline helper, used by all three instances (and accepting the redundant
BUG_ON(idx >= MAX_ALTP2M) in two of the three cases).
Reported-by: Charles Arnold <carnold@suse.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit:
be62b1fc2aa7375d553603fca07299da765a89fe
master date: 2023-03-13 15:16:21 +0100
}
}
- if ( idx != vcpu_altp2m(v).p2midx )
- {
- BUG_ON(idx >= MAX_ALTP2M);
- atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
- vcpu_altp2m(v).p2midx = idx;
- atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
- }
+ p2m_set_altp2m(v, idx);
}
/* XXX: This looks ugly, but we need a mechanism to ensure
if ( d->arch.altp2m_eptp[idx] != mfn_x(INVALID_MFN) )
{
- if ( idx != vcpu_altp2m(v).p2midx )
- {
- atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
- vcpu_altp2m(v).p2midx = idx;
- atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
+ if ( p2m_set_altp2m(v, idx) )
altp2m_vcpu_update_p2m(v);
- }
rc = 1;
}
if ( d->arch.altp2m_visible_eptp[idx] != mfn_x(INVALID_MFN) )
{
for_each_vcpu( d, v )
- if ( idx != vcpu_altp2m(v).p2midx )
- {
- atomic_dec(&p2m_get_altp2m(v)->active_vcpus);
- vcpu_altp2m(v).p2midx = idx;
- atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
+ if ( p2m_set_altp2m(v, idx) )
altp2m_vcpu_update_p2m(v);
- }
rc = 0;
}
return v->domain->arch.altp2m_p2m[index];
}
+/* set current alternate p2m table */
+static inline bool p2m_set_altp2m(struct vcpu *v, unsigned int idx)
+{
+ struct p2m_domain *orig;
+
+ BUG_ON(idx >= MAX_ALTP2M);
+
+ if ( idx == vcpu_altp2m(v).p2midx )
+ return false;
+
+ orig = p2m_get_altp2m(v);
+ BUG_ON(!orig);
+ atomic_dec(&orig->active_vcpus);
+
+ vcpu_altp2m(v).p2midx = idx;
+ atomic_inc(&v->domain->arch.altp2m_p2m[idx]->active_vcpus);
+
+ return true;
+}
+
/* Switch alternate p2m for a single vcpu */
bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx);