Dynamically allocated CPU mask objects may be smaller than cpumask_t, so
copying has to be restricted to the actual allocation size. This is
particulary important since the function doesn't bail early when tracing
is not active, so even production builds would be affected by potential
misbehavior here.
Take the opportunity and also
- use initializers instead of assignment + memset(),
- constify the cpumask_t input pointer,
- u32 -> uint32_t.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
master commit:
6fafb8befa99620a2d7323b9eca5c387bad1f59f
master date: 2019-05-13 16:41:03 +0200
spin_unlock(&vector_lock);
}
-static void trace_irq_mask(u32 event, int irq, int vector, cpumask_t *mask)
+static void trace_irq_mask(uint32_t event, int irq, int vector,
+ const cpumask_t *mask)
{
struct {
unsigned int irq:16, vec:16;
unsigned int mask[6];
- } d;
- d.irq = irq;
- d.vec = vector;
- memset(d.mask, 0, sizeof(d.mask));
- memcpy(d.mask, mask, min(sizeof(d.mask), sizeof(cpumask_t)));
+ } d = {
+ .irq = irq,
+ .vec = vector,
+ };
+
+ memcpy(d.mask, mask,
+ min(sizeof(d.mask), BITS_TO_LONGS(nr_cpu_ids) * sizeof(long)));
trace_var(event, 1, sizeof(d), &d);
}