if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) )
return -EINVAL;
+ /*
+ * Make sure the guest controlled value virq is bounded even during
+ * speculative execution.
+ */
+ virq = array_index_nospec(virq, ARRAY_SIZE(v->virq_to_evtchn));
+
if ( virq_is_global(virq) && (vcpu != 0) )
return -EINVAL;
- if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
- ((v = d->vcpu[vcpu]) == NULL) )
+ if ( (v = domain_vcpu(d, vcpu)) == NULL )
return -ENOENT;
spin_lock(&d->event_lock);
int port, vcpu = bind->vcpu;
long rc = 0;
- if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
- (d->vcpu[vcpu] == NULL) )
+ if ( domain_vcpu(d, vcpu) == NULL )
return -ENOENT;
spin_lock(&d->event_lock);
struct domain *d = current->domain;
struct evtchn *chn;
long rc = 0;
+ struct vcpu *v;
- if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
+ /* Use the vcpu info to prevent speculative out-of-bound accesses */
+ if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
return -ENOENT;
spin_lock(&d->event_lock);
{
case ECS_VIRQ:
if ( virq_is_global(chn->u.virq) )
- chn->notify_vcpu_id = vcpu_id;
+ chn->notify_vcpu_id = v->vcpu_id;
else
rc = -EINVAL;
break;
case ECS_UNBOUND:
case ECS_INTERDOMAIN:
- chn->notify_vcpu_id = vcpu_id;
+ chn->notify_vcpu_id = v->vcpu_id;
break;
case ECS_PIRQ:
- if ( chn->notify_vcpu_id == vcpu_id )
+ if ( chn->notify_vcpu_id == v->vcpu_id )
break;
unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]);
- chn->notify_vcpu_id = vcpu_id;
+ chn->notify_vcpu_id = v->vcpu_id;
pirq_set_affinity(d, chn->u.pirq.irq,
- cpumask_of(d->vcpu[vcpu_id]->processor));
- link_pirq_port(port, chn, d->vcpu[vcpu_id]);
+ cpumask_of(v->processor));
+ link_pirq_port(port, chn, v);
break;
default:
rc = -EINVAL;
*/
smp_rmb();
- p = port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
+ p = array_index_nospec(port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE,
+ d->evtchn_fifo->num_evtchns);
w = port % EVTCHN_FIFO_EVENT_WORDS_PER_PAGE;
return d->evtchn_fifo->event_array[p] + w;
gfn = init_control->control_gfn;
offset = init_control->offset;
- if ( vcpu_id >= d->max_vcpus || !d->vcpu[vcpu_id] )
+ if ( (v = domain_vcpu(d, vcpu_id)) == NULL )
return -ENOENT;
- v = d->vcpu[vcpu_id];
/* Must not cross page boundary. */
if ( offset > (PAGE_SIZE - sizeof(evtchn_fifo_control_block_t)) )
return -EINVAL;
+ /*
+ * Make sure the guest controlled value offset is bounded even during
+ * speculative execution.
+ */
+ offset = array_index_nospec(offset,
+ PAGE_SIZE - sizeof(evtchn_fifo_control_block_t) + 1);
+
/* Must be 8-bytes aligned. */
if ( offset & (8 - 1) )
return -EINVAL;
#include <xen/smp.h>
#include <xen/softirq.h>
#include <xen/bitops.h>
+#include <xen/nospec.h>
#include <asm/event.h>
/*
* The first bucket is directly accessed via d->evtchn.
*/
#define group_from_port(d, p) \
- ((d)->evtchn_group[(p) / EVTCHNS_PER_GROUP])
+ array_access_nospec((d)->evtchn_group, (p) / EVTCHNS_PER_GROUP)
#define bucket_from_port(d, p) \
((group_from_port(d, p))[((p) % EVTCHNS_PER_GROUP) / EVTCHNS_PER_BUCKET])
static inline struct evtchn *evtchn_from_port(struct domain *d, unsigned int p)
{
if ( p < EVTCHNS_PER_BUCKET )
- return &d->evtchn[p];
+ return &d->evtchn[array_index_nospec(p, EVTCHNS_PER_BUCKET)];
return bucket_from_port(d, p) + (p % EVTCHNS_PER_BUCKET);
}