ia64/xen-unstable

changeset 7119:1a82995a017c

Fix netif save/restore.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Sep 28 17:51:24 2005 +0100 (2005-09-28)
parents 1643f6110469
children c7f58e86446f
files linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Wed Sep 28 17:25:08 2005 +0100
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Wed Sep 28 17:51:24 2005 +0100
     1.3 @@ -700,6 +700,7 @@ static void network_connect(struct net_d
     1.4  	struct net_private *np;
     1.5  	int i, requeue_idx;
     1.6  	netif_tx_request_t *tx;
     1.7 +	struct sk_buff *skb;
     1.8  
     1.9  	np = netdev_priv(dev);
    1.10  	spin_lock_irq(&np->tx_lock);
    1.11 @@ -711,7 +712,8 @@ static void network_connect(struct net_d
    1.12  	np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
    1.13  	np->rx->event = np->tx->event = 1;
    1.14  
    1.15 -	/* Step 2: Rebuild the RX and TX ring contents.
    1.16 +	/*
    1.17 +	 * Step 2: Rebuild the RX and TX ring contents.
    1.18  	 * NB. We could just free the queued TX packets now but we hope
    1.19  	 * that sending them out might do some good.  We have to rebuild
    1.20  	 * the RX ring because some of our pages are currently flipped out
    1.21 @@ -722,49 +724,52 @@ static void network_connect(struct net_d
    1.22  	 * them.
    1.23  	 */
    1.24  
    1.25 -	/* Rebuild the TX buffer freelist and the TX ring itself.
    1.26 +	/*
    1.27 +	 * Rebuild the TX buffer freelist and the TX ring itself.
    1.28  	 * NB. This reorders packets.  We could keep more private state
    1.29  	 * to avoid this but maybe it doesn't matter so much given the
    1.30  	 * interface has been down.
    1.31  	 */
    1.32  	for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
    1.33 -		if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
    1.34 -			struct sk_buff *skb = np->tx_skbs[i];
    1.35 +		if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
    1.36 +			continue;
    1.37  
    1.38 -			tx = &np->tx->ring[requeue_idx++].req;
    1.39 +		skb = np->tx_skbs[i];
    1.40 +
    1.41 +		tx = &np->tx->ring[requeue_idx++].req;
    1.42  
    1.43 -			tx->id   = i;
    1.44 -			gnttab_grant_foreign_access_ref(
    1.45 -				np->grant_tx_ref[i], np->backend_id, 
    1.46 -				virt_to_mfn(np->tx_skbs[i]->data),
    1.47 -				GNTMAP_readonly); 
    1.48 -			tx->gref = np->grant_tx_ref[i];
    1.49 -			tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
    1.50 -			tx->size = skb->len;
    1.51 +		tx->id = i;
    1.52 +		gnttab_grant_foreign_access_ref(
    1.53 +			np->grant_tx_ref[i], np->backend_id, 
    1.54 +			virt_to_mfn(np->tx_skbs[i]->data),
    1.55 +			GNTMAP_readonly); 
    1.56 +		tx->gref = np->grant_tx_ref[i];
    1.57 +		tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
    1.58 +		tx->size = skb->len;
    1.59 +		tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
    1.60  
    1.61 -			np->stats.tx_bytes += skb->len;
    1.62 -			np->stats.tx_packets++;
    1.63 -		}
    1.64 +		np->stats.tx_bytes += skb->len;
    1.65 +		np->stats.tx_packets++;
    1.66  	}
    1.67  	wmb();
    1.68  	np->tx->req_prod = requeue_idx;
    1.69  
    1.70  	/* Rebuild the RX buffer freelist and the RX ring itself. */
    1.71  	for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
    1.72 -		if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
    1.73 -			gnttab_grant_foreign_transfer_ref(
    1.74 -				np->grant_rx_ref[i], np->backend_id);
    1.75 -			np->rx->ring[requeue_idx].req.gref =
    1.76 -				np->grant_rx_ref[i];
    1.77 -			np->rx->ring[requeue_idx].req.id = i;
    1.78 -			requeue_idx++; 
    1.79 -		}
    1.80 +		if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
    1.81 +			continue;
    1.82 +		gnttab_grant_foreign_transfer_ref(
    1.83 +			np->grant_rx_ref[i], np->backend_id);
    1.84 +		np->rx->ring[requeue_idx].req.gref =
    1.85 +			np->grant_rx_ref[i];
    1.86 +		np->rx->ring[requeue_idx].req.id = i;
    1.87 +		requeue_idx++; 
    1.88  	}
    1.89 -
    1.90  	wmb();                
    1.91  	np->rx->req_prod = requeue_idx;
    1.92  
    1.93 -	/* Step 3: All public and private state should now be sane.  Get
    1.94 +	/*
    1.95 +	 * Step 3: All public and private state should now be sane.  Get
    1.96  	 * ready to start sending and receiving packets and give the driver
    1.97  	 * domain a kick because we've probably just requeued some
    1.98  	 * packets.
    1.99 @@ -798,7 +803,8 @@ static void show_device(struct net_priva
   1.100  #endif
   1.101  }
   1.102  
   1.103 -/* Move the vif into connected state.
   1.104 +/*
   1.105 + * Move the vif into connected state.
   1.106   * Sets the mac and event channel from the message.
   1.107   * Binds the irq to the event channel.
   1.108   */
   1.109 @@ -1053,8 +1059,7 @@ static void netif_free(struct netfront_i
   1.110  	info->evtchn = 0;
   1.111  }
   1.112  
   1.113 -/* Stop network device and free tx/rx queues and irq.
   1.114 - */
   1.115 +/* Stop network device and free tx/rx queues and irq. */
   1.116  static void shutdown_device(struct net_private *np)
   1.117  {
   1.118  	/* Stop old i/f to prevent errors whilst we rebuild the state. */
   1.119 @@ -1182,10 +1187,11 @@ again:
   1.120  	return err;
   1.121  }
   1.122  
   1.123 -/* Setup supplies the backend dir, virtual device.
   1.124 -
   1.125 -   We place an event channel and shared frame entries.
   1.126 -   We watch backend to wait if it's ok. */
   1.127 +/*
   1.128 + * Setup supplies the backend dir, virtual device.
   1.129 + * We place an event channel and shared frame entries.
   1.130 + * We watch backend to wait if it's ok.
   1.131 + */
   1.132  static int netfront_probe(struct xenbus_device *dev,
   1.133  			  const struct xenbus_device_id *id)
   1.134  {