From: Norbert Manthey Date: Tue, 26 Feb 2019 15:57:18 +0000 (+0100) Subject: evtchn: block speculative out-of-bound accesses X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=443d3ab6daee9bf77ec1cb2ea7e252fb0ce616a8;p=people%2Fpauldu%2Fxen.git evtchn: block speculative out-of-bound accesses Guests can issue event channel interaction with guest specified data. To avoid speculative out-of-bound accesses, we use the nospec macros, or the domain_vcpu function. Where appropriate, we use the vcpu_id of the seleceted vcpu instead of the parameter that can be influenced by the guest, so that only one access needs to be protected. This is part of the speculative hardening effort. Signed-off-by: Norbert Manthey Reviewed-by: Jan Beulich Release-acked-by: Juergen Gross --- diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 6fbe346490..e86e2bfab0 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -365,11 +365,16 @@ int evtchn_bind_virq(evtchn_bind_virq_t *bind, evtchn_port_t port) if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) ) return -EINVAL; + /* + * Make sure the guest controlled value virq is bounded even during + * speculative execution. + */ + virq = array_index_nospec(virq, ARRAY_SIZE(v->virq_to_evtchn)); + if ( virq_is_global(virq) && (vcpu != 0) ) return -EINVAL; - if ( (vcpu < 0) || (vcpu >= d->max_vcpus) || - ((v = d->vcpu[vcpu]) == NULL) ) + if ( (v = domain_vcpu(d, vcpu)) == NULL ) return -ENOENT; spin_lock(&d->event_lock); @@ -418,8 +423,7 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) int port, vcpu = bind->vcpu; long rc = 0; - if ( (vcpu < 0) || (vcpu >= d->max_vcpus) || - (d->vcpu[vcpu] == NULL) ) + if ( domain_vcpu(d, vcpu) == NULL ) return -ENOENT; spin_lock(&d->event_lock); @@ -930,8 +934,10 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) struct domain *d = current->domain; struct evtchn *chn; long rc = 0; + struct vcpu *v; - if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) ) + /* Use the vcpu info to prevent speculative out-of-bound accesses */ + if ( (v = domain_vcpu(d, vcpu_id)) == NULL ) return -ENOENT; spin_lock(&d->event_lock); @@ -955,22 +961,22 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) { case ECS_VIRQ: if ( virq_is_global(chn->u.virq) ) - chn->notify_vcpu_id = vcpu_id; + chn->notify_vcpu_id = v->vcpu_id; else rc = -EINVAL; break; case ECS_UNBOUND: case ECS_INTERDOMAIN: - chn->notify_vcpu_id = vcpu_id; + chn->notify_vcpu_id = v->vcpu_id; break; case ECS_PIRQ: - if ( chn->notify_vcpu_id == vcpu_id ) + if ( chn->notify_vcpu_id == v->vcpu_id ) break; unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]); - chn->notify_vcpu_id = vcpu_id; + chn->notify_vcpu_id = v->vcpu_id; pirq_set_affinity(d, chn->u.pirq.irq, - cpumask_of(d->vcpu[vcpu_id]->processor)); - link_pirq_port(port, chn, d->vcpu[vcpu_id]); + cpumask_of(v->processor)); + link_pirq_port(port, chn, v); break; default: rc = -EINVAL; diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c index c49f446754..3eecab3f22 100644 --- a/xen/common/event_fifo.c +++ b/xen/common/event_fifo.c @@ -33,7 +33,8 @@ static inline event_word_t *evtchn_fifo_word_from_port(const struct domain *d, */ smp_rmb(); - p = port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE; + p = array_index_nospec(port / EVTCHN_FIFO_EVENT_WORDS_PER_PAGE, + d->evtchn_fifo->num_evtchns); w = port % EVTCHN_FIFO_EVENT_WORDS_PER_PAGE; return d->evtchn_fifo->event_array[p] + w; @@ -516,14 +517,20 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) gfn = init_control->control_gfn; offset = init_control->offset; - if ( vcpu_id >= d->max_vcpus || !d->vcpu[vcpu_id] ) + if ( (v = domain_vcpu(d, vcpu_id)) == NULL ) return -ENOENT; - v = d->vcpu[vcpu_id]; /* Must not cross page boundary. */ if ( offset > (PAGE_SIZE - sizeof(evtchn_fifo_control_block_t)) ) return -EINVAL; + /* + * Make sure the guest controlled value offset is bounded even during + * speculative execution. + */ + offset = array_index_nospec(offset, + PAGE_SIZE - sizeof(evtchn_fifo_control_block_t) + 1); + /* Must be 8-bytes aligned. */ if ( offset & (8 - 1) ) return -EINVAL; diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h index 46508878bb..e91097d77e 100644 --- a/xen/include/xen/event.h +++ b/xen/include/xen/event.h @@ -13,6 +13,7 @@ #include #include #include +#include #include /* @@ -103,7 +104,7 @@ void arch_evtchn_inject(struct vcpu *v); * The first bucket is directly accessed via d->evtchn. */ #define group_from_port(d, p) \ - ((d)->evtchn_group[(p) / EVTCHNS_PER_GROUP]) + array_access_nospec((d)->evtchn_group, (p) / EVTCHNS_PER_GROUP) #define bucket_from_port(d, p) \ ((group_from_port(d, p))[((p) % EVTCHNS_PER_GROUP) / EVTCHNS_PER_BUCKET]) @@ -117,7 +118,7 @@ static inline bool_t port_is_valid(struct domain *d, unsigned int p) static inline struct evtchn *evtchn_from_port(struct domain *d, unsigned int p) { if ( p < EVTCHNS_PER_BUCKET ) - return &d->evtchn[p]; + return &d->evtchn[array_index_nospec(p, EVTCHNS_PER_BUCKET)]; return bucket_from_port(d, p) + (p % EVTCHNS_PER_BUCKET); }