]> xenbits.xensource.com Git - xen.git/commitdiff
x86/vlapic: don't silently accept bad vectors
authorJan Beulich <jbeulich@suse.com>
Thu, 25 Sep 2014 12:10:01 +0000 (14:10 +0200)
committerJan Beulich <jbeulich@suse.com>
Thu, 25 Sep 2014 12:10:01 +0000 (14:10 +0200)
Vectors 0-15 are reserved, and a physical LAPIC - upon sending or
receiving one - would generate an APIC error instead of doing the
requested action. Make our emulation behave similarly.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
xen/arch/x86/hvm/vlapic.c
xen/include/asm-x86/hvm/vlapic.h

index 99ae1be00b5923abdccc4ff1ed61874b4ebcf851..388e656baf7c92bf73cf3b091b3038dc1ff78122 100644 (file)
@@ -123,10 +123,34 @@ static int vlapic_find_highest_irr(struct vlapic *vlapic)
     return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
 }
 
+static void vlapic_error(struct vlapic *vlapic, unsigned int errmask)
+{
+    unsigned long flags;
+    uint32_t esr;
+
+    spin_lock_irqsave(&vlapic->esr_lock, flags);
+    esr = vlapic_get_reg(vlapic, APIC_ESR);
+    if ( (esr & errmask) != errmask )
+    {
+        uint32_t lvterr = vlapic_get_reg(vlapic, APIC_LVTERR);
+
+        vlapic_set_reg(vlapic, APIC_ESR, esr | errmask);
+        if ( !(lvterr & APIC_LVT_MASKED) )
+            vlapic_set_irq(vlapic, lvterr & APIC_VECTOR_MASK, 0);
+    }
+    spin_unlock_irqrestore(&vlapic->esr_lock, flags);
+}
+
 void vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig)
 {
     struct vcpu *target = vlapic_vcpu(vlapic);
 
+    if ( unlikely(vec < 16) )
+    {
+        vlapic_error(vlapic, APIC_ESR_RECVILL);
+        return;
+    }
+
     if ( trig )
         vlapic_set_vector(vec, &vlapic->regs->data[APIC_TMR]);
 
@@ -460,7 +484,12 @@ void vlapic_ipi(
         struct vlapic *target = vlapic_lowest_prio(
             vlapic_domain(vlapic), vlapic, short_hand, dest, dest_mode);
         if ( target != NULL )
-            vlapic_accept_irq(vlapic_vcpu(target), icr_low);
+        {
+            if ( likely((icr_low & APIC_VECTOR_MASK) >= 16) )
+                vlapic_accept_irq(vlapic_vcpu(target), icr_low);
+            else
+                vlapic_error(vlapic, APIC_ESR_SENDILL);
+        }
         break;
     }
 
@@ -468,6 +497,11 @@ void vlapic_ipi(
         struct vcpu *v;
         bool_t batch = is_multicast_dest(vlapic, short_hand, dest, dest_mode);
 
+        if ( unlikely((icr_low & APIC_VECTOR_MASK) < 16) )
+        {
+            vlapic_error(vlapic, APIC_ESR_SENDILL);
+            break;
+        }
         if ( batch )
             cpu_raise_softirq_batch_begin();
         for_each_vcpu ( vlapic_domain(vlapic), v )
@@ -1404,6 +1438,8 @@ int vlapic_init(struct vcpu *v)
     if ( v->vcpu_id == 0 )
         vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP;
 
+    spin_lock_init(&vlapic->esr_lock);
+
     tasklet_init(&vlapic->init_sipi.tasklet,
                  vlapic_init_sipi_action,
                  (unsigned long)v);
index bf59b9594cd401eed77140e75673e66a537e3915..16752b51957b43e0408d02f9c992a5778d39506a 100644 (file)
@@ -77,6 +77,7 @@ struct vlapic {
         bool_t               hw, regs;
         uint32_t             id, ldr;
     }                        loaded;
+    spinlock_t               esr_lock;
     struct periodic_time     pt;
     s_time_t                 timer_last_update;
     struct page_info         *regs_page;