]> xenbits.xensource.com Git - xen.git/commitdiff
xen/vm-event: Fix interactions with the vcpu list
authorAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 31 May 2019 19:29:27 +0000 (12:29 -0700)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Tue, 4 Jun 2019 13:43:51 +0000 (14:43 +0100)
vm_event_resume() should use domain_vcpu(), rather than opencoding it
without its Spectre v1 safety.

vm_event_wake_blocked() can't ever be invoked in a case where d->vcpu is
NULL, so drop the outer if() and reindent, fixing up style issues.

The comment, which is left alone, is false.  This algorithm still has
starvation issues when there is an asymetric rate of generated events.

However, the existing logic is sufficiently complicated and fragile that
I don't think I've followed it fully, and because we're trying to
obsolete this interface, the safest course of action is to leave it
alone, rather than to end up making things subtly different.

Therefore, no practical change that callers would notice.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
xen/common/vm_event.c

index dcba98cef784fcdf4bb4f3e2054e905465ae2bf5..72f42b408ac72e5670c4b1619d85424585f48f1c 100644 (file)
@@ -119,34 +119,29 @@ static unsigned int vm_event_ring_available(struct vm_event_domain *ved)
 static void vm_event_wake_blocked(struct domain *d, struct vm_event_domain *ved)
 {
     struct vcpu *v;
-    unsigned int avail_req = vm_event_ring_available(ved);
+    unsigned int i, j, k, avail_req = vm_event_ring_available(ved);
 
     if ( avail_req == 0 || ved->blocked == 0 )
         return;
 
     /* We remember which vcpu last woke up to avoid scanning always linearly
      * from zero and starving higher-numbered vcpus under high load */
-    if ( d->vcpu )
+    for ( i = ved->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++ )
     {
-        int i, j, k;
-
-        for (i = ved->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++)
-        {
-            k = i % d->max_vcpus;
-            v = d->vcpu[k];
-            if ( !v )
-                continue;
+        k = i % d->max_vcpus;
+        v = d->vcpu[k];
+        if ( !v )
+            continue;
 
-            if ( !(ved->blocked) || avail_req == 0 )
-               break;
+        if ( !ved->blocked || avail_req == 0 )
+            break;
 
-            if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
-            {
-                vcpu_unpause(v);
-                avail_req--;
-                ved->blocked--;
-                ved->last_vcpu_wake_up = k;
-            }
+        if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
+        {
+            vcpu_unpause(v);
+            avail_req--;
+            ved->blocked--;
+            ved->last_vcpu_wake_up = k;
         }
     }
 }
@@ -382,11 +377,10 @@ static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
         }
 
         /* Validate the vcpu_id in the response. */
-        if ( (rsp.vcpu_id >= d->max_vcpus) || !d->vcpu[rsp.vcpu_id] )
+        v = domain_vcpu(d, rsp.vcpu_id);
+        if ( !v )
             continue;
 
-        v = d->vcpu[rsp.vcpu_id];
-
         /*
          * In some cases the response type needs extra handling, so here
          * we call the appropriate handlers.