ia64/xen-unstable
changeset 2207:1ee88bcf9566
bitkeeper revision 1.1159.25.1 (411b92dbFatpuCFS9px_DggrIbxQsg)
Extra barriers in async shared-memory comms code.
Extra barriers in async shared-memory comms code.
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Thu Aug 12 15:55:07 2004 +0000 (2004-08-12) |
parents | be6e22d4e208 |
children | 0a0e66e9edc2 |
files | linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c tools/python/xen/lowlevel/xu/xu.c |
line diff
1.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c Thu Aug 12 09:26:13 2004 +0000 1.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/blkif/frontend/main.c Thu Aug 12 15:55:07 2004 +0000 1.3 @@ -16,8 +16,6 @@ 1.4 #include <scsi/scsi.h> 1.5 #include <asm/ctrl_if.h> 1.6 1.7 - 1.8 - 1.9 typedef unsigned char byte; /* from linux/ide.h */ 1.10 1.11 #define BLKIF_STATE_CLOSED 0 1.12 @@ -95,6 +93,7 @@ static inline void translate_req_to_mfn( 1.13 static inline void flush_requests(void) 1.14 { 1.15 DISABLE_SCATTERGATHER(); 1.16 + wmb(); /* Ensure that the frontend can see the requests. */ 1.17 blk_ring->req_prod = req_prod; 1.18 notify_via_evtchn(blkif_evtchn); 1.19 } 1.20 @@ -533,7 +532,7 @@ static void kick_pending_request_queues( 1.21 1.22 static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs) 1.23 { 1.24 - BLKIF_RING_IDX i; 1.25 + BLKIF_RING_IDX i, rp; 1.26 unsigned long flags; 1.27 struct buffer_head *bh, *next_bh; 1.28 1.29 @@ -541,13 +540,14 @@ static void blkif_int(int irq, void *dev 1.30 1.31 if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) ) 1.32 { 1.33 - printk("Bailed out\n"); 1.34 - 1.35 spin_unlock_irqrestore(&io_request_lock, flags); 1.36 return; 1.37 } 1.38 1.39 - for ( i = resp_cons; i != blk_ring->resp_prod; i++ ) 1.40 + rp = blk_ring->resp_prod; 1.41 + rmb(); /* Ensure we see queued responses up to 'rp'. */ 1.42 + 1.43 + for ( i = resp_cons; i != rp; i++ ) 1.44 { 1.45 blkif_response_t *bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp; 1.46 switch ( bret->operation )
2.1 --- a/linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c Thu Aug 12 09:26:13 2004 +0000 2.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/kernel/ctrl_if.c Thu Aug 12 15:55:07 2004 +0000 2.3 @@ -93,8 +93,12 @@ static void __ctrl_if_tx_tasklet(unsigne 2.4 control_if_t *ctrl_if = get_ctrl_if(); 2.5 ctrl_msg_t *msg; 2.6 int was_full = TX_FULL(ctrl_if); 2.7 + CONTROL_RING_IDX rp; 2.8 2.9 - while ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod ) 2.10 + rp = ctrl_if->tx_resp_prod; 2.11 + rmb(); /* Ensure we see all requests up to 'rp'. */ 2.12 + 2.13 + while ( ctrl_if_tx_resp_cons != rp ) 2.14 { 2.15 msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)]; 2.16 2.17 @@ -132,8 +136,12 @@ static void __ctrl_if_tx_tasklet(unsigne 2.18 static void __ctrl_if_rxmsg_deferred(void *unused) 2.19 { 2.20 ctrl_msg_t *msg; 2.21 + CONTROL_RING_IDX dp; 2.22 2.23 - while ( ctrl_if_rxmsg_deferred_cons != ctrl_if_rxmsg_deferred_prod ) 2.24 + dp = ctrl_if_rxmsg_deferred_prod; 2.25 + rmb(); /* Ensure we see all deferred requests up to 'dp'. */ 2.26 + 2.27 + while ( ctrl_if_rxmsg_deferred_cons != dp ) 2.28 { 2.29 msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX( 2.30 ctrl_if_rxmsg_deferred_cons++)]; 2.31 @@ -145,8 +153,13 @@ static void __ctrl_if_rx_tasklet(unsigne 2.32 { 2.33 control_if_t *ctrl_if = get_ctrl_if(); 2.34 ctrl_msg_t msg, *pmsg; 2.35 + CONTROL_RING_IDX rp, dp; 2.36 2.37 - while ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod ) 2.38 + dp = ctrl_if_rxmsg_deferred_prod; 2.39 + rp = ctrl_if->rx_req_prod; 2.40 + rmb(); /* Ensure we see all requests up to 'rp'. */ 2.41 + 2.42 + while ( ctrl_if_rx_req_cons != rp ) 2.43 { 2.44 pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)]; 2.45 memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg)); 2.46 @@ -161,20 +174,21 @@ static void __ctrl_if_rx_tasklet(unsigne 2.47 2.48 if ( test_bit(msg.type, 2.49 (unsigned long *)&ctrl_if_rxmsg_blocking_context) ) 2.50 - { 2.51 - pmsg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX( 2.52 - ctrl_if_rxmsg_deferred_prod++)]; 2.53 - memcpy(pmsg, &msg, offsetof(ctrl_msg_t, msg) + msg.length); 2.54 + memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)], 2.55 + &msg, offsetof(ctrl_msg_t, msg) + msg.length); 2.56 + else 2.57 + (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0); 2.58 + } 2.59 + 2.60 + if ( dp != ctrl_if_rxmsg_deferred_prod ) 2.61 + { 2.62 + wmb(); 2.63 + ctrl_if_rxmsg_deferred_prod = dp; 2.64 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) 2.65 - schedule_task(&ctrl_if_rxmsg_deferred_tq); 2.66 + schedule_task(&ctrl_if_rxmsg_deferred_tq); 2.67 #else 2.68 - schedule_work(&ctrl_if_rxmsg_deferred_work); 2.69 + schedule_work(&ctrl_if_rxmsg_deferred_work); 2.70 #endif 2.71 - } 2.72 - else 2.73 - { 2.74 - (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0); 2.75 - } 2.76 } 2.77 } 2.78
3.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c Thu Aug 12 09:26:13 2004 +0000 3.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c Thu Aug 12 15:55:07 2004 +0000 3.3 @@ -268,13 +268,15 @@ static int do_block_io_op(blkif_t *blkif 3.4 { 3.5 blkif_ring_t *blk_ring = blkif->blk_ring_base; 3.6 blkif_request_t *req; 3.7 - BLKIF_RING_IDX i; 3.8 + BLKIF_RING_IDX i, rp; 3.9 int more_to_do = 0; 3.10 3.11 + rp = blk_ring->req_prod; 3.12 + rmb(); /* Ensure we see queued requests up to 'rp'. */ 3.13 + 3.14 /* Take items off the comms ring, taking care not to overflow. */ 3.15 for ( i = blkif->blk_req_cons; 3.16 - (i != blk_ring->req_prod) && ((i-blkif->blk_resp_prod) != 3.17 - BLKIF_RING_SIZE); 3.18 + (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE); 3.19 i++ ) 3.20 { 3.21 if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) ) 3.22 @@ -533,7 +535,7 @@ static void make_response(blkif_t *blkif 3.23 resp->id = id; 3.24 resp->operation = op; 3.25 resp->status = st; 3.26 - wmb(); 3.27 + wmb(); /* Ensure other side can see the response fields. */ 3.28 blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod; 3.29 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); 3.30
4.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Aug 12 09:26:13 2004 +0000 4.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c Thu Aug 12 15:55:07 2004 +0000 4.3 @@ -82,6 +82,7 @@ static inline void translate_req_to_mfn( 4.4 4.5 static inline void flush_requests(void) 4.6 { 4.7 + wmb(); /* Ensure that the frontend can see the requests. */ 4.8 blk_ring->req_prod = req_prod; 4.9 notify_via_evtchn(blkif_evtchn); 4.10 } 4.11 @@ -363,34 +364,39 @@ static irqreturn_t blkif_int(int irq, vo 4.12 { 4.13 struct request *req; 4.14 blkif_response_t *bret; 4.15 - BLKIF_RING_IDX i; 4.16 + BLKIF_RING_IDX i, rp; 4.17 unsigned long flags; 4.18 4.19 spin_lock_irqsave(&blkif_io_lock, flags); 4.20 4.21 - if (unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery)) { 4.22 - printk("Bailed out\n"); 4.23 - 4.24 + if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) || 4.25 + unlikely(recovery) ) 4.26 + { 4.27 spin_unlock_irqrestore(&blkif_io_lock, flags); 4.28 return IRQ_HANDLED; 4.29 } 4.30 4.31 - for (i = resp_cons; i != blk_ring->resp_prod; i++) { 4.32 + rp = blk_ring->resp_prod; 4.33 + rmb(); /* Ensure we see queued responses up to 'rp'. */ 4.34 + 4.35 + for ( i = resp_cons; i != rp; i++ ) 4.36 + { 4.37 bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp; 4.38 - switch (bret->operation) { 4.39 + switch ( bret->operation ) 4.40 + { 4.41 case BLKIF_OP_READ: 4.42 case BLKIF_OP_WRITE: 4.43 - if (unlikely(bret->status != BLKIF_RSP_OKAY)) 4.44 + if ( unlikely(bret->status != BLKIF_RSP_OKAY) ) 4.45 DPRINTK("Bad return from blkdev data request: %lx\n", 4.46 bret->status); 4.47 req = (struct request *)bret->id; 4.48 - /* XXXcl pass up status */ 4.49 - if (unlikely(end_that_request_first(req, 1, 4.50 - req->hard_nr_sectors))) 4.51 + if ( unlikely(end_that_request_first 4.52 + (req, 4.53 + (bret->status != BLKIF_RSP_OKAY), 4.54 + req->hard_nr_sectors)) ) 4.55 BUG(); 4.56 - 4.57 end_that_request_last(req); 4.58 - blkif_completion( bret, req ); 4.59 + blkif_completion(bret, req); 4.60 break; 4.61 case BLKIF_OP_PROBE: 4.62 memcpy(&blkif_control_rsp, bret, sizeof(*bret)); 4.63 @@ -404,8 +410,9 @@ static irqreturn_t blkif_int(int irq, vo 4.64 resp_cons = i; 4.65 resp_cons_rec = i; 4.66 4.67 - if (xlbd_blk_queue && 4.68 - test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags)) { 4.69 + if ( (xlbd_blk_queue != NULL) && 4.70 + test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) ) 4.71 + { 4.72 blk_start_queue(xlbd_blk_queue); 4.73 /* XXXcl call to request_fn should not be needed but 4.74 * we get stuck without... needs investigating
5.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c Thu Aug 12 09:26:13 2004 +0000 5.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c Thu Aug 12 15:55:07 2004 +0000 5.3 @@ -446,6 +446,7 @@ static void net_tx_action(unsigned long 5.4 netif_put(netif); 5.5 continue; 5.6 } 5.7 + rmb(); /* Ensure that we see the request. */ 5.8 memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 5.9 sizeof(txreq)); 5.10 netif->tx_req_cons++;
6.1 --- a/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c Thu Aug 12 09:26:13 2004 +0000 6.2 +++ b/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c Thu Aug 12 15:55:07 2004 +0000 6.3 @@ -118,10 +118,8 @@ static void netctrl_init(void) 6.4 */ 6.5 static int netctrl_err(int err) 6.6 { 6.7 - if(err < 0 && !netctrl.err){ 6.8 + if ( (err < 0) && !netctrl.err ) 6.9 netctrl.err = err; 6.10 - printk(KERN_WARNING "%s> err=%d\n", __FUNCTION__, err); 6.11 - } 6.12 return netctrl.err; 6.13 } 6.14 6.15 @@ -177,7 +175,6 @@ static int network_open(struct net_devic 6.16 return 0; 6.17 } 6.18 6.19 - 6.20 static void network_tx_buf_gc(struct net_device *dev) 6.21 { 6.22 NETIF_RING_IDX i, prod; 6.23 @@ -190,6 +187,7 @@ static void network_tx_buf_gc(struct net 6.24 6.25 do { 6.26 prod = np->tx->resp_prod; 6.27 + rmb(); /* Ensure we see responses up to 'rp'. */ 6.28 6.29 for ( i = np->tx_resp_cons; i != prod; i++ ) 6.30 { 6.31 @@ -295,6 +293,7 @@ static void network_alloc_rx_buffers(str 6.32 if ( rx_mcl[nr_pfns].args[5] != nr_pfns ) 6.33 panic("Unable to reduce memory reservation\n"); 6.34 6.35 + /* Above is a suitable barrier to ensure backend will see requests. */ 6.36 np->rx->req_prod = i; 6.37 } 6.38 6.39 @@ -344,7 +343,7 @@ static int network_start_xmit(struct sk_ 6.40 tx->addr = virt_to_machine(skb->data); 6.41 tx->size = skb->len; 6.42 6.43 - wmb(); 6.44 + wmb(); /* Ensure that backend will see the request. */ 6.45 np->tx->req_prod = i + 1; 6.46 6.47 network_tx_buf_gc(dev); 6.48 @@ -392,7 +391,7 @@ static int netif_poll(struct net_device 6.49 struct net_private *np = dev->priv; 6.50 struct sk_buff *skb; 6.51 netif_rx_response_t *rx; 6.52 - NETIF_RING_IDX i; 6.53 + NETIF_RING_IDX i, rp; 6.54 mmu_update_t *mmu = rx_mmu; 6.55 multicall_entry_t *mcl = rx_mcl; 6.56 int work_done, budget, more_to_do = 1; 6.57 @@ -412,8 +411,11 @@ static int netif_poll(struct net_device 6.58 if ( (budget = *pbudget) > dev->quota ) 6.59 budget = dev->quota; 6.60 6.61 + rp = np->rx->resp_prod; 6.62 + rmb(); /* Ensure we see queued responses up to 'rp'. */ 6.63 + 6.64 for ( i = np->rx_resp_cons, work_done = 0; 6.65 - (i != np->rx->resp_prod) && (work_done < budget); 6.66 + (i != rp) && (work_done < budget); 6.67 i++, work_done++ ) 6.68 { 6.69 rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp; 6.70 @@ -904,9 +906,8 @@ void netif_suspend(void) 6.71 6.72 void netif_resume(void) 6.73 { 6.74 - ctrl_msg_t cmsg; 6.75 - netif_fe_interface_connect_t up; 6.76 -// netif_fe_driver_status_changed_t st; 6.77 + ctrl_msg_t cmsg; 6.78 + netif_fe_interface_connect_t up; 6.79 struct net_device *dev = NULL; 6.80 struct net_private *np = NULL; 6.81 int i;
7.1 --- a/tools/python/xen/lowlevel/xu/xu.c Thu Aug 12 09:26:13 2004 +0000 7.2 +++ b/tools/python/xen/lowlevel/xu/xu.c Thu Aug 12 15:55:07 2004 +0000 7.3 @@ -49,6 +49,13 @@ 7.4 /* Size of a machine page frame. */ 7.5 #define PAGE_SIZE 4096 7.6 7.7 +#if defined(__i386__) 7.8 +#define rmb() __asm__ __volatile__ ( "lock; addl $0,0(%%esp)" : : : "memory" ) 7.9 +#define wmb() __asm__ __volatile__ ( "" : : : "memory" ) 7.10 +#else 7.11 +#error "Define barriers" 7.12 +#endif 7.13 + 7.14 7.15 /* 7.16 * *********************** NOTIFIER *********************** 7.17 @@ -710,6 +717,9 @@ static PyObject *xu_port_read_request(Py 7.18 return NULL; 7.19 } 7.20 7.21 + /* Need to ensure we see the request, despite seeing the index update.*/ 7.22 + rmb(); 7.23 + 7.24 cmsg = &cif->tx_ring[MASK_CONTROL_IDX(c)]; 7.25 xum = PyObject_New(xu_message_object, &xu_message_type); 7.26 memcpy(&xum->msg, cmsg, sizeof(*cmsg)); 7.27 @@ -745,6 +755,7 @@ static PyObject *xu_port_write_request(P 7.28 cmsg = &cif->rx_ring[MASK_CONTROL_IDX(p)]; 7.29 memcpy(cmsg, &xum->msg, sizeof(*cmsg)); 7.30 7.31 + wmb(); 7.32 xup->rx_req_prod = cif->rx_req_prod = p + 1; 7.33 7.34 Py_INCREF(Py_None); 7.35 @@ -768,6 +779,9 @@ static PyObject *xu_port_read_response(P 7.36 return NULL; 7.37 } 7.38 7.39 + /* Need to ensure we see the response, despite seeing the index update.*/ 7.40 + rmb(); 7.41 + 7.42 cmsg = &cif->rx_ring[MASK_CONTROL_IDX(c)]; 7.43 xum = PyObject_New(xu_message_object, &xu_message_type); 7.44 memcpy(&xum->msg, cmsg, sizeof(*cmsg)); 7.45 @@ -803,6 +817,7 @@ static PyObject *xu_port_write_response( 7.46 cmsg = &cif->tx_ring[MASK_CONTROL_IDX(p)]; 7.47 memcpy(cmsg, &xum->msg, sizeof(*cmsg)); 7.48 7.49 + wmb(); 7.50 xup->tx_resp_prod = cif->tx_resp_prod = p + 1; 7.51 7.52 Py_INCREF(Py_None);