ia64/xen-unstable
changeset 1615:f9bbf7aa1596
bitkeeper revision 1.1026.2.1 (40e1764d1ndRTs9hmUyiBLEHi5_V3A)
Fix network backend bugs. It isn't safe to use skb->cb[] for our own
purposes after all. :-(
Fix network backend bugs. It isn't safe to use skb->cb[] for our own
purposes after all. :-(
author | kaf24@scramble.cl.cam.ac.uk |
---|---|
date | Tue Jun 29 14:01:49 2004 +0000 (2004-06-29) |
parents | cf2f7da0af83 |
children | 9ba1d5f8219c 954bece440ef |
files | linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/main.c linux-2.4.26-xen-sparse/arch/xen/drivers/netif/frontend/main.c |
line diff
1.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/main.c Tue Jun 29 00:51:10 2004 +0000 1.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/netif/backend/main.c Tue Jun 29 14:01:49 2004 +0000 1.3 @@ -28,12 +28,6 @@ static DECLARE_TASKLET(net_tx_tasklet, n 1.4 static void net_rx_action(unsigned long unused); 1.5 static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0); 1.6 1.7 -typedef struct { 1.8 - u16 id; 1.9 - unsigned long old_mach_ptr; 1.10 - unsigned long new_mach_pfn; 1.11 - netif_t *netif; 1.12 -} rx_info_t; 1.13 static struct sk_buff_head rx_queue; 1.14 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2]; 1.15 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3]; 1.16 @@ -48,8 +42,10 @@ static unsigned long mmap_vstart; 1.17 1.18 #define PKT_PROT_LEN (ETH_HLEN + 20) 1.19 1.20 -static u16 pending_id[MAX_PENDING_REQS]; 1.21 -static netif_t *pending_netif[MAX_PENDING_REQS]; 1.22 +static struct { 1.23 + netif_tx_request_t req; 1.24 + netif_t *netif; 1.25 +} pending_tx_info[MAX_PENDING_REQS]; 1.26 static u16 pending_ring[MAX_PENDING_REQS]; 1.27 typedef unsigned int PEND_RING_IDX; 1.28 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1)) 1.29 @@ -61,11 +57,6 @@ static u16 dealloc_ring[MAX_PENDING_REQS 1.30 static spinlock_t dealloc_lock = SPIN_LOCK_UNLOCKED; 1.31 static PEND_RING_IDX dealloc_prod, dealloc_cons; 1.32 1.33 -typedef struct { 1.34 - u16 idx; 1.35 - netif_tx_request_t req; 1.36 - netif_t *netif; 1.37 -} tx_info_t; 1.38 static struct sk_buff_head tx_queue; 1.39 static multicall_entry_t tx_mcl[MAX_PENDING_REQS]; 1.40 1.41 @@ -127,6 +118,8 @@ int netif_be_start_xmit(struct sk_buff * 1.42 { 1.43 netif_t *netif = (netif_t *)dev->priv; 1.44 1.45 + ASSERT(skb->dev == dev); 1.46 + 1.47 /* Drop the packet if the target domain has no receive buffers. */ 1.48 if ( (netif->rx_req_cons == netif->rx->req_prod) || 1.49 ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) ) 1.50 @@ -152,15 +145,14 @@ int netif_be_start_xmit(struct sk_buff * 1.51 skb_reserve(nskb, hlen); 1.52 __skb_put(nskb, skb->len); 1.53 (void)skb_copy_bits(skb, -hlen, nskb->head, hlen + skb->len); 1.54 + nskb->dev = skb->dev; 1.55 dev_kfree_skb(skb); 1.56 skb = nskb; 1.57 } 1.58 1.59 - ((rx_info_t *)&skb->cb[0])->id = 1.60 - netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_req_cons++)].req.id; 1.61 - ((rx_info_t *)&skb->cb[0])->netif = netif; 1.62 - 1.63 - __skb_queue_tail(&rx_queue, skb); 1.64 + netif->rx_req_cons++; 1.65 + 1.66 + skb_queue_tail(&rx_queue, skb); 1.67 tasklet_schedule(&net_rx_tasklet); 1.68 1.69 return 0; 1.70 @@ -195,7 +187,7 @@ static void net_rx_action(unsigned long 1.71 netif_t *netif; 1.72 s8 status; 1.73 u16 size, id, evtchn; 1.74 - mmu_update_t *mmu = rx_mmu; 1.75 + mmu_update_t *mmu; 1.76 multicall_entry_t *mcl; 1.77 unsigned long vdata, mdata, new_mfn; 1.78 struct sk_buff_head rxq; 1.79 @@ -206,9 +198,10 @@ static void net_rx_action(unsigned long 1.80 skb_queue_head_init(&rxq); 1.81 1.82 mcl = rx_mcl; 1.83 - while ( (skb = __skb_dequeue(&rx_queue)) != NULL ) 1.84 + mmu = rx_mmu; 1.85 + while ( (skb = skb_dequeue(&rx_queue)) != NULL ) 1.86 { 1.87 - netif = ((rx_info_t *)&skb->cb[0])->netif; 1.88 + netif = (netif_t *)skb->dev->priv; 1.89 vdata = (unsigned long)skb->data; 1.90 mdata = virt_to_machine(vdata); 1.91 new_mfn = get_new_mfn(); 1.92 @@ -231,11 +224,9 @@ static void net_rx_action(unsigned long 1.93 mcl[1].args[1] = 3; 1.94 mcl[1].args[2] = 0; 1.95 1.96 + mcl += 2; 1.97 mmu += 3; 1.98 - mcl += 2; 1.99 1.100 - ((rx_info_t *)&skb->cb[0])->old_mach_ptr = mdata; 1.101 - ((rx_info_t *)&skb->cb[0])->new_mach_pfn = new_mfn; 1.102 __skb_queue_tail(&rxq, skb); 1.103 1.104 /* Filled the batch queue? */ 1.105 @@ -250,14 +241,17 @@ static void net_rx_action(unsigned long 1.106 (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl); 1.107 1.108 mcl = rx_mcl; 1.109 + mmu = rx_mmu; 1.110 while ( (skb = __skb_dequeue(&rxq)) != NULL ) 1.111 { 1.112 - netif = ((rx_info_t *)&skb->cb[0])->netif; 1.113 + netif = (netif_t *)skb->dev->priv; 1.114 size = skb->tail - skb->data; 1.115 - id = ((rx_info_t *)&skb->cb[0])->id; 1.116 - new_mfn = ((rx_info_t *)&skb->cb[0])->new_mach_pfn; 1.117 - mdata = ((rx_info_t *)&skb->cb[0])->old_mach_ptr; 1.118 1.119 + /* Rederive the machine addresses. */ 1.120 + new_mfn = mcl[0].args[1] >> PAGE_SHIFT; 1.121 + mdata = ((mmu[2].ptr & PAGE_MASK) | 1.122 + ((unsigned long)skb->data & ~PAGE_MASK)); 1.123 + 1.124 /* Check the reassignment error code. */ 1.125 if ( unlikely(mcl[1].args[5] != 0) ) 1.126 { 1.127 @@ -285,6 +279,7 @@ static void net_rx_action(unsigned long 1.128 } 1.129 1.130 evtchn = netif->evtchn; 1.131 + id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id; 1.132 if ( make_rx_response(netif, id, status, mdata, size) && 1.133 (rx_notify[evtchn] == 0) ) 1.134 { 1.135 @@ -295,6 +290,7 @@ static void net_rx_action(unsigned long 1.136 dev_kfree_skb(skb); 1.137 1.138 mcl += 2; 1.139 + mmu += 3; 1.140 } 1.141 1.142 while ( notify_nr != 0 ) 1.143 @@ -406,10 +402,11 @@ static void net_tx_action(unsigned long 1.144 { 1.145 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)]; 1.146 1.147 - netif = pending_netif[pending_idx]; 1.148 + netif = pending_tx_info[pending_idx].netif; 1.149 1.150 spin_lock(&netif->tx_lock); 1.151 - make_tx_response(netif, pending_id[pending_idx], NETIF_RSP_OKAY); 1.152 + make_tx_response(netif, pending_tx_info[pending_idx].req.id, 1.153 + NETIF_RSP_OKAY); 1.154 spin_unlock(&netif->tx_lock); 1.155 1.156 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 1.157 @@ -512,10 +509,11 @@ static void net_tx_action(unsigned long 1.158 mcl[0].args[2] = 0; 1.159 mcl[0].args[3] = netif->domid; 1.160 mcl++; 1.161 - 1.162 - ((tx_info_t *)&skb->cb[0])->idx = pending_idx; 1.163 - ((tx_info_t *)&skb->cb[0])->netif = netif; 1.164 - memcpy(&((tx_info_t *)&skb->cb[0])->req, &txreq, sizeof(txreq)); 1.165 + 1.166 + memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); 1.167 + pending_tx_info[pending_idx].netif = netif; 1.168 + *((u16 *)skb->data) = pending_idx; 1.169 + 1.170 __skb_queue_tail(&tx_queue, skb); 1.171 1.172 pending_cons++; 1.173 @@ -533,9 +531,9 @@ static void net_tx_action(unsigned long 1.174 mcl = tx_mcl; 1.175 while ( (skb = __skb_dequeue(&tx_queue)) != NULL ) 1.176 { 1.177 - pending_idx = ((tx_info_t *)&skb->cb[0])->idx; 1.178 - netif = ((tx_info_t *)&skb->cb[0])->netif; 1.179 - memcpy(&txreq, &((tx_info_t *)&skb->cb[0])->req, sizeof(txreq)); 1.180 + pending_idx = *((u16 *)skb->data); 1.181 + netif = pending_tx_info[pending_idx].netif; 1.182 + memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq)); 1.183 1.184 /* Check the remap error code. */ 1.185 if ( unlikely(mcl[0].args[5] != 0) ) 1.186 @@ -581,8 +579,6 @@ static void net_tx_action(unsigned long 1.187 */ 1.188 page->mapping = (struct address_space *)netif_page_release; 1.189 atomic_set(&page->count, 1); 1.190 - pending_id[pending_idx] = txreq.id; 1.191 - pending_netif[pending_idx] = netif; 1.192 1.193 netif->stats.tx_bytes += txreq.size; 1.194 netif->stats.tx_packets++;
2.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/netif/frontend/main.c Tue Jun 29 00:51:10 2004 +0000 2.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/netif/frontend/main.c Tue Jun 29 14:01:49 2004 +0000 2.3 @@ -99,8 +99,6 @@ static struct net_device *find_dev_by_ha 2.4 return NULL; 2.5 } 2.6 2.7 -#define MULTIVIF 2.8 - 2.9 /** Network interface info. */ 2.10 struct netif_ctrl { 2.11 /** Number of interfaces. */ 2.12 @@ -385,14 +383,8 @@ static void netif_int(int irq, void *dev 2.13 unsigned long flags; 2.14 2.15 spin_lock_irqsave(&np->tx_lock, flags); 2.16 - 2.17 - if( !netif_carrier_ok(dev) ) 2.18 - { 2.19 - spin_unlock_irqrestore(&np->tx_lock, flags); 2.20 - return; 2.21 - } 2.22 - 2.23 - network_tx_buf_gc(dev); 2.24 + if ( likely(netif_carrier_ok(dev)) ) 2.25 + network_tx_buf_gc(dev); 2.26 spin_unlock_irqrestore(&np->tx_lock, flags); 2.27 2.28 if ( np->rx_resp_cons != np->rx->resp_prod ) 2.29 @@ -414,7 +406,7 @@ static int netif_poll(struct net_device 2.30 2.31 spin_lock(&np->rx_lock); 2.32 2.33 - /* if the device is undergoing recovery then don't do anything */ 2.34 + /* If the device is undergoing recovery then don't do anything. */ 2.35 if ( !netif_carrier_ok(dev) ) 2.36 { 2.37 spin_unlock(&np->rx_lock); 2.38 @@ -721,20 +713,17 @@ static void netif_status_change(netif_fe 2.39 2.40 memcpy(dev->dev_addr, status->mac, ETH_ALEN); 2.41 2.42 - if(netif_carrier_ok(dev)){ 2.43 + if ( netif_carrier_ok(dev) ) 2.44 np->state = NETIF_STATE_CONNECTED; 2.45 - } else { 2.46 + else 2.47 network_reconnect(dev, status); 2.48 - } 2.49 2.50 np->evtchn = status->evtchn; 2.51 np->irq = bind_evtchn_to_irq(np->evtchn); 2.52 (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, 2.53 dev->name, dev); 2.54 2.55 -#ifdef MULTIVIF 2.56 netctrl_connected_count(); 2.57 -#endif 2.58 break; 2.59 2.60 default: 2.61 @@ -744,13 +733,13 @@ static void netif_status_change(netif_fe 2.62 } 2.63 } 2.64 2.65 -/** Create a network devices. 2.66 - * 2.67 +/** Create a network device. 2.68 * @param handle device handle 2.69 * @param val return parameter for created device 2.70 * @return 0 on success, error code otherwise 2.71 */ 2.72 -static int create_netdev(int handle, struct net_device **val){ 2.73 +static int create_netdev(int handle, struct net_device **val) 2.74 +{ 2.75 int err = 0; 2.76 struct net_device *dev = NULL; 2.77 struct net_private *np = NULL; 2.78 @@ -847,11 +836,7 @@ static int __init init_module(void) 2.79 { 2.80 ctrl_msg_t cmsg; 2.81 netif_fe_driver_status_changed_t st; 2.82 - int err = 0; 2.83 -#ifdef MULTIVIF 2.84 - int wait_n = 20; 2.85 - int wait_i; 2.86 -#endif 2.87 + int err = 0, wait_i, wait_n = 20; 2.88 2.89 if ( (start_info.flags & SIF_INITDOMAIN) || 2.90 (start_info.flags & SIF_NET_BE_DOMAIN) ) 2.91 @@ -860,9 +845,8 @@ static int __init init_module(void) 2.92 printk("Initialising Xen virtual ethernet frontend driver"); 2.93 2.94 INIT_LIST_HEAD(&dev_list); 2.95 -#ifdef MULTIVIF 2.96 + 2.97 netctrl_init(); 2.98 -#endif 2.99 2.100 (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx, 2.101 CALLBACK_IN_BLOCKING_CONTEXT); 2.102 @@ -876,7 +860,6 @@ static int __init init_module(void) 2.103 memcpy(cmsg.msg, &st, sizeof(st)); 2.104 ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE); 2.105 2.106 -#ifdef MULTIVIF 2.107 /* Wait for all interfaces to be connected. */ 2.108 for ( wait_i = 0; ; wait_i++) 2.109 { 2.110 @@ -888,7 +871,6 @@ static int __init init_module(void) 2.111 set_current_state(TASK_INTERRUPTIBLE); 2.112 schedule_timeout(1); 2.113 } 2.114 -#endif 2.115 2.116 if ( err ) 2.117 ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);