]> xenbits.xensource.com Git - people/andrewcoop/qemu-traditional.git/commitdiff
HVM: atomically access pointers in bufioreq handling
authorIan Jackson <ian.jackson@eu.citrix.com>
Fri, 28 Aug 2015 14:53:40 +0000 (15:53 +0100)
committerIan Jackson <Ian.Jackson@eu.citrix.com>
Fri, 28 Aug 2015 14:53:40 +0000 (15:53 +0100)
The number of slots per page being 511 (i.e. not a power of two) means
that the (32-bit) read and write indexes going beyond 2^32 will likely
disturb operation. The hypervisor side gets I/O req server creation
extended so we can indicate that we're using suitable atomic accesses
where needed, allowing it to atomically canonicalize both pointers when
both have gone through at least one cycle.

The Xen side counterpart (which is not a functional prereq to this
change, albeit the intention is for Xen to assume default servers
always use suitable atomic accesses) went in already (commit
b7007bc6f9).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
---
v2: Adjust description.

i386-dm/helper2.c

index 63a938bd82063e1b66a214ee9388c56d97971dd4..ede3105e5f183249b9bd37d290afd157f4eed764 100644 (file)
@@ -493,10 +493,19 @@ static int __handle_buffered_iopage(CPUState *env)
 
     memset(&req, 0x00, sizeof(req));
 
-    while (buffered_io_page->read_pointer !=
-           buffered_io_page->write_pointer) {
-        buf_req = &buffered_io_page->buf_ioreq[
-            buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
+    for (;;) {
+        uint32_t rdptr = buffered_io_page->read_pointer, wrptr;
+
+        xen_rmb();
+        wrptr = buffered_io_page->write_pointer;
+        xen_rmb();
+        if (rdptr != buffered_io_page->read_pointer) {
+            continue;
+        }
+        if (rdptr == wrptr) {
+            break;
+        }
+        buf_req = &buffered_io_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
         req.size = 1UL << buf_req->size;
         req.count = 1;
         req.addr = buf_req->addr;
@@ -508,15 +517,14 @@ static int __handle_buffered_iopage(CPUState *env)
         req.data_is_ptr = 0;
         qw = (req.size == 8);
         if (qw) {
-            buf_req = &buffered_io_page->buf_ioreq[
-                (buffered_io_page->read_pointer+1) % IOREQ_BUFFER_SLOT_NUM];
+            buf_req = &buffered_io_page->buf_ioreq[(rdptr + 1) %
+                                                   IOREQ_BUFFER_SLOT_NUM];
             req.data |= ((uint64_t)buf_req->data) << 32;
         }
 
         __handle_ioreq(env, &req);
 
-        xen_mb();
-        buffered_io_page->read_pointer += qw ? 2 : 1;
+        __sync_fetch_and_add(&buffered_io_page->read_pointer, qw + 1);
     }
 
     return req.count;