ia64/xen-unstable

changeset 6909:8bb3f2567b8c

Clean up and re-indent netfront.c

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Sep 16 13:06:49 2005 +0000 (2005-09-16)
parents 3a7c0b00da8a
children 7fbaf67a0af5
files linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Sep 16 12:47:40 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Sep 16 13:06:49 2005 +0000
     1.3 @@ -54,36 +54,11 @@
     1.4  #include <asm-xen/balloon.h>
     1.5  #include <asm/page.h>
     1.6  #include <asm/uaccess.h>
     1.7 -
     1.8 -#ifdef CONFIG_XEN_NETDEV_GRANT
     1.9  #include <asm-xen/xen-public/grant_table.h>
    1.10  #include <asm-xen/gnttab.h>
    1.11  
    1.12  #define GRANT_INVALID_REF	(0xFFFF)
    1.13  
    1.14 -#ifdef GRANT_DEBUG
    1.15 -static void
    1.16 -dump_packet(int tag, void *addr, u32 ap)
    1.17 -{
    1.18 -    unsigned char *p = (unsigned char *)ap;
    1.19 -    int i;
    1.20 -    
    1.21 -    printk(KERN_ALERT "#### rx_poll   %c %08x ", tag & 0xff, addr);
    1.22 -    for (i = 0; i < 20; i++) {
    1.23 -        printk("%02x", p[i]);
    1.24 -    }
    1.25 -    printk("\n");
    1.26 -}
    1.27 -
    1.28 -#define GDPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
    1.29 -                           __FILE__ , __LINE__ , ## _a )
    1.30 -#else 
    1.31 -#define dump_packet(x,y,z)  ((void)0)  
    1.32 -#define GDPRINTK(_f, _a...) ((void)0)
    1.33 -#endif
    1.34 -
    1.35 -#endif
    1.36 -
    1.37  #ifndef __GFP_NOWARN
    1.38  #define __GFP_NOWARN 0
    1.39  #endif
    1.40 @@ -116,7 +91,6 @@ dump_packet(int tag, void *addr, u32 ap)
    1.41  #define NETIF_STATE_DISCONNECTED 0
    1.42  #define NETIF_STATE_CONNECTED    1
    1.43  
    1.44 -
    1.45  static unsigned int netif_state = NETIF_STATE_DISCONNECTED;
    1.46  
    1.47  static void network_tx_buf_gc(struct net_device *dev);
    1.48 @@ -139,50 +113,50 @@ static void xennet_proc_delif(struct net
    1.49  #define netfront_info net_private
    1.50  struct net_private
    1.51  {
    1.52 -    struct list_head list;
    1.53 -    struct net_device *netdev;
    1.54 +	struct list_head list;
    1.55 +	struct net_device *netdev;
    1.56  
    1.57 -    struct net_device_stats stats;
    1.58 -    NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
    1.59 -    unsigned int tx_full;
    1.60 +	struct net_device_stats stats;
    1.61 +	NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
    1.62 +	unsigned int tx_full;
    1.63      
    1.64 -    netif_tx_interface_t *tx;
    1.65 -    netif_rx_interface_t *rx;
    1.66 +	netif_tx_interface_t *tx;
    1.67 +	netif_rx_interface_t *rx;
    1.68  
    1.69 -    spinlock_t   tx_lock;
    1.70 -    spinlock_t   rx_lock;
    1.71 +	spinlock_t   tx_lock;
    1.72 +	spinlock_t   rx_lock;
    1.73  
    1.74 -    unsigned int handle;
    1.75 -    unsigned int evtchn;
    1.76 +	unsigned int handle;
    1.77 +	unsigned int evtchn;
    1.78  
    1.79 -    /* What is the status of our connection to the remote backend? */
    1.80 +	/* What is the status of our connection to the remote backend? */
    1.81  #define BEST_CLOSED       0
    1.82  #define BEST_DISCONNECTED 1
    1.83  #define BEST_CONNECTED    2
    1.84 -    unsigned int backend_state;
    1.85 +	unsigned int backend_state;
    1.86  
    1.87 -    /* Is this interface open or closed (down or up)? */
    1.88 +	/* Is this interface open or closed (down or up)? */
    1.89  #define UST_CLOSED        0
    1.90  #define UST_OPEN          1
    1.91 -    unsigned int user_state;
    1.92 +	unsigned int user_state;
    1.93  
    1.94 -    /* Receive-ring batched refills. */
    1.95 +	/* Receive-ring batched refills. */
    1.96  #define RX_MIN_TARGET 8
    1.97  #define RX_MAX_TARGET NETIF_RX_RING_SIZE
    1.98 -    int rx_min_target, rx_max_target, rx_target;
    1.99 -    struct sk_buff_head rx_batch;
   1.100 +	int rx_min_target, rx_max_target, rx_target;
   1.101 +	struct sk_buff_head rx_batch;
   1.102  
   1.103 -    /*
   1.104 -     * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
   1.105 -     * array is an index into a chain of free entries.
   1.106 -     */
   1.107 -    struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
   1.108 -    struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
   1.109 +	/*
   1.110 +	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
   1.111 +	 * array is an index into a chain of free entries.
   1.112 +	 */
   1.113 +	struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
   1.114 +	struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
   1.115  
   1.116 -    grant_ref_t gref_tx_head;
   1.117 -    grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 
   1.118 -    grant_ref_t gref_rx_head;
   1.119 -    grant_ref_t grant_rx_ref[NETIF_TX_RING_SIZE + 1]; 
   1.120 +	grant_ref_t gref_tx_head;
   1.121 +	grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 
   1.122 +	grant_ref_t gref_rx_head;
   1.123 +	grant_ref_t grant_rx_ref[NETIF_TX_RING_SIZE + 1]; 
   1.124  
   1.125  	struct xenbus_device *xbdev;
   1.126  	char *backend;
   1.127 @@ -194,32 +168,32 @@ struct net_private
   1.128  };
   1.129  
   1.130  /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
   1.131 -#define ADD_ID_TO_FREELIST(_list, _id)             \
   1.132 -    (_list)[(_id)] = (_list)[0];                   \
   1.133 -    (_list)[0]     = (void *)(unsigned long)(_id);
   1.134 -#define GET_ID_FROM_FREELIST(_list)                \
   1.135 - ({ unsigned long _id = (unsigned long)(_list)[0]; \
   1.136 -    (_list)[0]  = (_list)[_id];                    \
   1.137 -    (unsigned short)_id; })
   1.138 +#define ADD_ID_TO_FREELIST(_list, _id)			\
   1.139 +	(_list)[(_id)] = (_list)[0];			\
   1.140 +	(_list)[0]     = (void *)(unsigned long)(_id);
   1.141 +#define GET_ID_FROM_FREELIST(_list)				\
   1.142 +	({ unsigned long _id = (unsigned long)(_list)[0];	\
   1.143 +	   (_list)[0]  = (_list)[_id];				\
   1.144 +	   (unsigned short)_id; })
   1.145  
   1.146  #ifdef DEBUG
   1.147  static char *be_state_name[] = {
   1.148 -    [BEST_CLOSED]       = "closed",
   1.149 -    [BEST_DISCONNECTED] = "disconnected",
   1.150 -    [BEST_CONNECTED]    = "connected",
   1.151 +	[BEST_CLOSED]       = "closed",
   1.152 +	[BEST_DISCONNECTED] = "disconnected",
   1.153 +	[BEST_CONNECTED]    = "connected",
   1.154  };
   1.155  #endif
   1.156  
   1.157  #ifdef DEBUG
   1.158  #define DPRINTK(fmt, args...) \
   1.159 -    printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
   1.160 +	printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
   1.161  #else
   1.162  #define DPRINTK(fmt, args...) ((void)0)
   1.163  #endif
   1.164  #define IPRINTK(fmt, args...) \
   1.165 -    printk(KERN_INFO "xen_net: " fmt, ##args)
   1.166 +	printk(KERN_INFO "xen_net: " fmt, ##args)
   1.167  #define WPRINTK(fmt, args...) \
   1.168 -    printk(KERN_WARNING "xen_net: " fmt, ##args)
   1.169 +	printk(KERN_WARNING "xen_net: " fmt, ##args)
   1.170  
   1.171  /** Send a packet on a net device to encourage switches to learn the
   1.172   * MAC. We send a fake ARP request.
   1.173 @@ -229,625 +203,627 @@ static char *be_state_name[] = {
   1.174   */
   1.175  static int send_fake_arp(struct net_device *dev)
   1.176  {
   1.177 -    struct sk_buff *skb;
   1.178 -    u32             src_ip, dst_ip;
   1.179 +	struct sk_buff *skb;
   1.180 +	u32             src_ip, dst_ip;
   1.181  
   1.182 -    dst_ip = INADDR_BROADCAST;
   1.183 -    src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
   1.184 +	dst_ip = INADDR_BROADCAST;
   1.185 +	src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
   1.186  
   1.187 -    /* No IP? Then nothing to do. */
   1.188 -    if (src_ip == 0)
   1.189 -        return 0;
   1.190 +	/* No IP? Then nothing to do. */
   1.191 +	if (src_ip == 0)
   1.192 +		return 0;
   1.193  
   1.194 -    skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
   1.195 -                     dst_ip, dev, src_ip,
   1.196 -                     /*dst_hw*/ NULL, /*src_hw*/ NULL, 
   1.197 -                     /*target_hw*/ dev->dev_addr);
   1.198 -    if (skb == NULL)
   1.199 -        return -ENOMEM;
   1.200 +	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
   1.201 +			 dst_ip, dev, src_ip,
   1.202 +			 /*dst_hw*/ NULL, /*src_hw*/ NULL, 
   1.203 +			 /*target_hw*/ dev->dev_addr);
   1.204 +	if (skb == NULL)
   1.205 +		return -ENOMEM;
   1.206  
   1.207 -    return dev_queue_xmit(skb);
   1.208 +	return dev_queue_xmit(skb);
   1.209  }
   1.210  
   1.211  static int network_open(struct net_device *dev)
   1.212  {
   1.213 -    struct net_private *np = netdev_priv(dev);
   1.214 +	struct net_private *np = netdev_priv(dev);
   1.215  
   1.216 -    memset(&np->stats, 0, sizeof(np->stats));
   1.217 +	memset(&np->stats, 0, sizeof(np->stats));
   1.218  
   1.219 -    np->user_state = UST_OPEN;
   1.220 +	np->user_state = UST_OPEN;
   1.221  
   1.222 -    network_alloc_rx_buffers(dev);
   1.223 -    np->rx->event = np->rx_resp_cons + 1;
   1.224 +	network_alloc_rx_buffers(dev);
   1.225 +	np->rx->event = np->rx_resp_cons + 1;
   1.226  
   1.227 -    netif_start_queue(dev);
   1.228 +	netif_start_queue(dev);
   1.229  
   1.230 -    return 0;
   1.231 +	return 0;
   1.232  }
   1.233  
   1.234  static void network_tx_buf_gc(struct net_device *dev)
   1.235  {
   1.236 -    NETIF_RING_IDX i, prod;
   1.237 -    unsigned short id;
   1.238 -    struct net_private *np = netdev_priv(dev);
   1.239 -    struct sk_buff *skb;
   1.240 +	NETIF_RING_IDX i, prod;
   1.241 +	unsigned short id;
   1.242 +	struct net_private *np = netdev_priv(dev);
   1.243 +	struct sk_buff *skb;
   1.244  
   1.245 -    if (np->backend_state != BEST_CONNECTED)
   1.246 -        return;
   1.247 +	if (np->backend_state != BEST_CONNECTED)
   1.248 +		return;
   1.249  
   1.250 -    do {
   1.251 -        prod = np->tx->resp_prod;
   1.252 -        rmb(); /* Ensure we see responses up to 'rp'. */
   1.253 +	do {
   1.254 +		prod = np->tx->resp_prod;
   1.255 +		rmb(); /* Ensure we see responses up to 'rp'. */
   1.256  
   1.257 -        for (i = np->tx_resp_cons; i != prod; i++) {
   1.258 -            id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
   1.259 -            skb = np->tx_skbs[id];
   1.260 +		for (i = np->tx_resp_cons; i != prod; i++) {
   1.261 +			id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
   1.262 +			skb = np->tx_skbs[id];
   1.263  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.264 -            if (unlikely(gnttab_query_foreign_access(np->grant_tx_ref[id]) != 0)) {
   1.265 -                /* other domain is still using this grant - shouldn't happen
   1.266 -                   but if it does, we'll try to reclaim the grant later */
   1.267 -                printk(KERN_ALERT "network_tx_buf_gc: warning -- grant "
   1.268 -                       "still in use by backend domain.\n");
   1.269 -                goto out; 
   1.270 -            }
   1.271 -            gnttab_end_foreign_access_ref(np->grant_tx_ref[id], GNTMAP_readonly);
   1.272 -            gnttab_release_grant_reference(&np->gref_tx_head, np->grant_tx_ref[id]);
   1.273 -            np->grant_tx_ref[id] = GRANT_INVALID_REF;
   1.274 +			if (unlikely(gnttab_query_foreign_access(np->grant_tx_ref[id]) != 0)) {
   1.275 +				printk(KERN_ALERT "network_tx_buf_gc: warning "
   1.276 +				       "-- grant still in use by backend "
   1.277 +				       "domain.\n");
   1.278 +				goto out; 
   1.279 +			}
   1.280 +			gnttab_end_foreign_access_ref(
   1.281 +				np->grant_tx_ref[id], GNTMAP_readonly);
   1.282 +			gnttab_release_grant_reference(
   1.283 +				&np->gref_tx_head, np->grant_tx_ref[id]);
   1.284 +			np->grant_tx_ref[id] = GRANT_INVALID_REF;
   1.285  #endif
   1.286 -            ADD_ID_TO_FREELIST(np->tx_skbs, id);
   1.287 -            dev_kfree_skb_irq(skb);
   1.288 -        }
   1.289 +			ADD_ID_TO_FREELIST(np->tx_skbs, id);
   1.290 +			dev_kfree_skb_irq(skb);
   1.291 +		}
   1.292          
   1.293 -        np->tx_resp_cons = prod;
   1.294 +		np->tx_resp_cons = prod;
   1.295          
   1.296 -        /*
   1.297 -         * Set a new event, then check for race with update of tx_cons. Note
   1.298 -         * that it is essential to schedule a callback, no matter how few
   1.299 -         * buffers are pending. Even if there is space in the transmit ring,
   1.300 -         * higher layers may be blocked because too much data is outstanding:
   1.301 -         * in such cases notification from Xen is likely to be the only kick
   1.302 -         * that we'll get.
   1.303 -         */
   1.304 -        np->tx->event = 
   1.305 -            prod + ((np->tx->req_prod - prod) >> 1) + 1;
   1.306 -        mb();
   1.307 -    } while (prod != np->tx->resp_prod);
   1.308 +		/*
   1.309 +		 * Set a new event, then check for race with update of tx_cons.
   1.310 +		 * Note that it is essential to schedule a callback, no matter
   1.311 +		 * how few buffers are pending. Even if there is space in the
   1.312 +		 * transmit ring, higher layers may be blocked because too much
   1.313 +		 * data is outstanding: in such cases notification from Xen is
   1.314 +		 * likely to be the only kick that we'll get.
   1.315 +		 */
   1.316 +		np->tx->event = prod + ((np->tx->req_prod - prod) >> 1) + 1;
   1.317 +		mb();
   1.318 +	} while (prod != np->tx->resp_prod);
   1.319  
   1.320  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.321 -  out: 
   1.322 + out: 
   1.323  #endif
   1.324  
   1.325 -    if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
   1.326 -        np->tx_full = 0;
   1.327 -        if (np->user_state == UST_OPEN)
   1.328 -            netif_wake_queue(dev);
   1.329 -    }
   1.330 +	if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
   1.331 +		np->tx_full = 0;
   1.332 +		if (np->user_state == UST_OPEN)
   1.333 +			netif_wake_queue(dev);
   1.334 +	}
   1.335  }
   1.336  
   1.337  
   1.338  static void network_alloc_rx_buffers(struct net_device *dev)
   1.339  {
   1.340 -    unsigned short id;
   1.341 -    struct net_private *np = netdev_priv(dev);
   1.342 -    struct sk_buff *skb;
   1.343 -    int i, batch_target;
   1.344 -    NETIF_RING_IDX req_prod = np->rx->req_prod;
   1.345 -    struct xen_memory_reservation reservation;
   1.346 +	unsigned short id;
   1.347 +	struct net_private *np = netdev_priv(dev);
   1.348 +	struct sk_buff *skb;
   1.349 +	int i, batch_target;
   1.350 +	NETIF_RING_IDX req_prod = np->rx->req_prod;
   1.351 +	struct xen_memory_reservation reservation;
   1.352  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.353 -    grant_ref_t ref;
   1.354 +	grant_ref_t ref;
   1.355  #endif
   1.356  
   1.357 -    if (unlikely(np->backend_state != BEST_CONNECTED))
   1.358 -        return;
   1.359 +	if (unlikely(np->backend_state != BEST_CONNECTED))
   1.360 +		return;
   1.361  
   1.362 -    /*
   1.363 -     * Allocate skbuffs greedily, even though we batch updates to the
   1.364 -     * receive ring. This creates a less bursty demand on the memory allocator,
   1.365 -     * so should reduce the chance of failed allocation requests both for
   1.366 -     * ourself and for other kernel subsystems.
   1.367 -     */
   1.368 -    batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
   1.369 -    for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
   1.370 -        if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
   1.371 -            break;
   1.372 -        __skb_queue_tail(&np->rx_batch, skb);
   1.373 -    }
   1.374 -
   1.375 -    /* Is the batch large enough to be worthwhile? */
   1.376 -    if (i < (np->rx_target/2))
   1.377 -        return;
   1.378 +	/*
   1.379 +	 * Allocate skbuffs greedily, even though we batch updates to the
   1.380 +	 * receive ring. This creates a less bursty demand on the memory
   1.381 +	 * allocator, so should reduce the chance of failed allocation requests
   1.382 +	 *  both for ourself and for other kernel subsystems.
   1.383 +	 */
   1.384 +	batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
   1.385 +	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
   1.386 +		skb = alloc_xen_skb(dev->mtu + RX_HEADROOM);
   1.387 +		if (skb == NULL)
   1.388 +			break;
   1.389 +		__skb_queue_tail(&np->rx_batch, skb);
   1.390 +	}
   1.391  
   1.392 -    for (i = 0; ; i++) {
   1.393 -        if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
   1.394 -            break;
   1.395 +	/* Is the batch large enough to be worthwhile? */
   1.396 +	if (i < (np->rx_target/2))
   1.397 +		return;
   1.398  
   1.399 -        skb->dev = dev;
   1.400 +	for (i = 0; ; i++) {
   1.401 +		if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
   1.402 +			break;
   1.403  
   1.404 -        id = GET_ID_FROM_FREELIST(np->rx_skbs);
   1.405 +		skb->dev = dev;
   1.406  
   1.407 -        np->rx_skbs[id] = skb;
   1.408 +		id = GET_ID_FROM_FREELIST(np->rx_skbs);
   1.409 +
   1.410 +		np->rx_skbs[id] = skb;
   1.411          
   1.412 -        np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
   1.413 +		np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
   1.414  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.415 -	ref = gnttab_claim_grant_reference(&np->gref_rx_head);
   1.416 -        if (unlikely((signed short)ref < 0)) {
   1.417 -            printk(KERN_ALERT "#### netfront can't claim rx reference\n");
   1.418 -            BUG();
   1.419 -        }
   1.420 -        np->grant_rx_ref[id] = ref;
   1.421 -        gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
   1.422 -        np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
   1.423 +		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
   1.424 +		BUG_ON((signed short)ref < 0);
   1.425 +		np->grant_rx_ref[id] = ref;
   1.426 +		gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
   1.427 +		np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
   1.428  #endif
   1.429 -        rx_pfn_array[i] = virt_to_mfn(skb->head);
   1.430 +		rx_pfn_array[i] = virt_to_mfn(skb->head);
   1.431  
   1.432 -	/* Remove this page from pseudo phys map before passing back to Xen. */
   1.433 -	phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 
   1.434 -	    = INVALID_P2M_ENTRY;
   1.435 +		/* Remove this page from map before passing back to Xen. */
   1.436 +		phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 
   1.437 +			= INVALID_P2M_ENTRY;
   1.438  
   1.439 -	MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
   1.440 -				__pte(0), 0);
   1.441 -    }
   1.442 +		MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
   1.443 +					__pte(0), 0);
   1.444 +	}
   1.445  
   1.446 -    /* After all PTEs have been zapped we blow away stale TLB entries. */
   1.447 -    rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   1.448 +	/* After all PTEs have been zapped we blow away stale TLB entries. */
   1.449 +	rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   1.450  
   1.451 -    /* Give away a batch of pages. */
   1.452 -    rx_mcl[i].op = __HYPERVISOR_memory_op;
   1.453 -    rx_mcl[i].args[0] = XENMEM_decrease_reservation;
   1.454 -    rx_mcl[i].args[1] = (unsigned long)&reservation;
   1.455 +	/* Give away a batch of pages. */
   1.456 +	rx_mcl[i].op = __HYPERVISOR_memory_op;
   1.457 +	rx_mcl[i].args[0] = XENMEM_decrease_reservation;
   1.458 +	rx_mcl[i].args[1] = (unsigned long)&reservation;
   1.459  
   1.460 -    reservation.extent_start = rx_pfn_array;
   1.461 -    reservation.nr_extents   = i;
   1.462 -    reservation.extent_order = 0;
   1.463 -    reservation.address_bits = 0;
   1.464 -    reservation.domid        = DOMID_SELF;
   1.465 +	reservation.extent_start = rx_pfn_array;
   1.466 +	reservation.nr_extents   = i;
   1.467 +	reservation.extent_order = 0;
   1.468 +	reservation.address_bits = 0;
   1.469 +	reservation.domid        = DOMID_SELF;
   1.470  
   1.471 -    /* Tell the ballon driver what is going on. */
   1.472 -    balloon_update_driver_allowance(i);
   1.473 +	/* Tell the ballon driver what is going on. */
   1.474 +	balloon_update_driver_allowance(i);
   1.475  
   1.476 -    /* Zap PTEs and give away pages in one big multicall. */
   1.477 -    (void)HYPERVISOR_multicall(rx_mcl, i+1);
   1.478 +	/* Zap PTEs and give away pages in one big multicall. */
   1.479 +	(void)HYPERVISOR_multicall(rx_mcl, i+1);
   1.480  
   1.481 -    /* Check return status of HYPERVISOR_memory_op(). */
   1.482 -    if (unlikely(rx_mcl[i].result != i))
   1.483 -        panic("Unable to reduce memory reservation\n");
   1.484 +	/* Check return status of HYPERVISOR_memory_op(). */
   1.485 +	if (unlikely(rx_mcl[i].result != i))
   1.486 +		panic("Unable to reduce memory reservation\n");
   1.487  
   1.488 -    /* Above is a suitable barrier to ensure backend will see requests. */
   1.489 -    np->rx->req_prod = req_prod + i;
   1.490 +	/* Above is a suitable barrier to ensure backend will see requests. */
   1.491 +	np->rx->req_prod = req_prod + i;
   1.492  
   1.493 -    /* Adjust our floating fill target if we risked running out of buffers. */
   1.494 -    if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   1.495 -         ((np->rx_target *= 2) > np->rx_max_target))
   1.496 -        np->rx_target = np->rx_max_target;
   1.497 +	/* Adjust our fill target if we risked running out of buffers. */
   1.498 +	if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   1.499 +	    ((np->rx_target *= 2) > np->rx_max_target))
   1.500 +		np->rx_target = np->rx_max_target;
   1.501  }
   1.502  
   1.503  
   1.504  static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
   1.505  {
   1.506 -    unsigned short id;
   1.507 -    struct net_private *np = netdev_priv(dev);
   1.508 -    netif_tx_request_t *tx;
   1.509 -    NETIF_RING_IDX i;
   1.510 +	unsigned short id;
   1.511 +	struct net_private *np = netdev_priv(dev);
   1.512 +	netif_tx_request_t *tx;
   1.513 +	NETIF_RING_IDX i;
   1.514  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.515 -    grant_ref_t ref;
   1.516 -    unsigned long mfn;
   1.517 +	grant_ref_t ref;
   1.518 +	unsigned long mfn;
   1.519  #endif
   1.520  
   1.521 -    if (unlikely(np->tx_full)) {
   1.522 -        printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
   1.523 -        netif_stop_queue(dev);
   1.524 -        goto drop;
   1.525 -    }
   1.526 +	if (unlikely(np->tx_full)) {
   1.527 +		printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
   1.528 +		       dev->name);
   1.529 +		netif_stop_queue(dev);
   1.530 +		goto drop;
   1.531 +	}
   1.532  
   1.533 -    if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   1.534 -                  PAGE_SIZE)) {
   1.535 -        struct sk_buff *nskb;
   1.536 -        if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
   1.537 -            goto drop;
   1.538 -        skb_put(nskb, skb->len);
   1.539 -        memcpy(nskb->data, skb->data, skb->len);
   1.540 -        nskb->dev = skb->dev;
   1.541 -        dev_kfree_skb(skb);
   1.542 -        skb = nskb;
   1.543 -    }
   1.544 +	if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   1.545 +		     PAGE_SIZE)) {
   1.546 +		struct sk_buff *nskb;
   1.547 +		if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
   1.548 +			goto drop;
   1.549 +		skb_put(nskb, skb->len);
   1.550 +		memcpy(nskb->data, skb->data, skb->len);
   1.551 +		nskb->dev = skb->dev;
   1.552 +		dev_kfree_skb(skb);
   1.553 +		skb = nskb;
   1.554 +	}
   1.555      
   1.556 -    spin_lock_irq(&np->tx_lock);
   1.557 +	spin_lock_irq(&np->tx_lock);
   1.558  
   1.559 -    if (np->backend_state != BEST_CONNECTED) {
   1.560 -        spin_unlock_irq(&np->tx_lock);
   1.561 -        goto drop;
   1.562 -    }
   1.563 +	if (np->backend_state != BEST_CONNECTED) {
   1.564 +		spin_unlock_irq(&np->tx_lock);
   1.565 +		goto drop;
   1.566 +	}
   1.567  
   1.568 -    i = np->tx->req_prod;
   1.569 +	i = np->tx->req_prod;
   1.570  
   1.571 -    id = GET_ID_FROM_FREELIST(np->tx_skbs);
   1.572 -    np->tx_skbs[id] = skb;
   1.573 +	id = GET_ID_FROM_FREELIST(np->tx_skbs);
   1.574 +	np->tx_skbs[id] = skb;
   1.575  
   1.576 -    tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
   1.577 +	tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
   1.578  
   1.579 -    tx->id   = id;
   1.580 +	tx->id   = id;
   1.581  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.582 -    ref = gnttab_claim_grant_reference(&np->gref_tx_head);
   1.583 -    if (unlikely((signed short)ref < 0)) {
   1.584 -        printk(KERN_ALERT "#### netfront can't claim tx grant reference\n");
   1.585 -        BUG();
   1.586 -    }
   1.587 -    mfn = virt_to_mfn(skb->data);
   1.588 -    gnttab_grant_foreign_access_ref(ref, np->backend_id, mfn, GNTMAP_readonly);
   1.589 -    tx->addr = ref << PAGE_SHIFT;
   1.590 -    np->grant_tx_ref[id] = ref;
   1.591 +	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
   1.592 +	BUG_ON((signed short)ref < 0);
   1.593 +	mfn = virt_to_mfn(skb->data);
   1.594 +	gnttab_grant_foreign_access_ref(
   1.595 +		ref, np->backend_id, mfn, GNTMAP_readonly);
   1.596 +	tx->addr = ref << PAGE_SHIFT;
   1.597 +	np->grant_tx_ref[id] = ref;
   1.598  #else
   1.599 -    tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
   1.600 +	tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
   1.601  #endif
   1.602 -    tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
   1.603 -    tx->size = skb->len;
   1.604 -    tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
   1.605 +	tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
   1.606 +	tx->size = skb->len;
   1.607 +	tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
   1.608  
   1.609 -    wmb(); /* Ensure that backend will see the request. */
   1.610 -    np->tx->req_prod = i + 1;
   1.611 +	wmb(); /* Ensure that backend will see the request. */
   1.612 +	np->tx->req_prod = i + 1;
   1.613  
   1.614 -    network_tx_buf_gc(dev);
   1.615 +	network_tx_buf_gc(dev);
   1.616  
   1.617 -    if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
   1.618 -        np->tx_full = 1;
   1.619 -        netif_stop_queue(dev);
   1.620 -    }
   1.621 +	if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
   1.622 +		np->tx_full = 1;
   1.623 +		netif_stop_queue(dev);
   1.624 +	}
   1.625  
   1.626 -    spin_unlock_irq(&np->tx_lock);
   1.627 +	spin_unlock_irq(&np->tx_lock);
   1.628  
   1.629 -    np->stats.tx_bytes += skb->len;
   1.630 -    np->stats.tx_packets++;
   1.631 +	np->stats.tx_bytes += skb->len;
   1.632 +	np->stats.tx_packets++;
   1.633  
   1.634 -    /* Only notify Xen if we really have to. */
   1.635 -    mb();
   1.636 -    if (np->tx->TX_TEST_IDX == i)
   1.637 -        notify_via_evtchn(np->evtchn);
   1.638 +	/* Only notify Xen if we really have to. */
   1.639 +	mb();
   1.640 +	if (np->tx->TX_TEST_IDX == i)
   1.641 +		notify_via_evtchn(np->evtchn);
   1.642  
   1.643 -    return 0;
   1.644 +	return 0;
   1.645  
   1.646   drop:
   1.647 -    np->stats.tx_dropped++;
   1.648 -    dev_kfree_skb(skb);
   1.649 -    return 0;
   1.650 +	np->stats.tx_dropped++;
   1.651 +	dev_kfree_skb(skb);
   1.652 +	return 0;
   1.653  }
   1.654  
   1.655  static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
   1.656  {
   1.657 -    struct net_device *dev = dev_id;
   1.658 -    struct net_private *np = netdev_priv(dev);
   1.659 -    unsigned long flags;
   1.660 +	struct net_device *dev = dev_id;
   1.661 +	struct net_private *np = netdev_priv(dev);
   1.662 +	unsigned long flags;
   1.663  
   1.664 -    spin_lock_irqsave(&np->tx_lock, flags);
   1.665 -    network_tx_buf_gc(dev);
   1.666 -    spin_unlock_irqrestore(&np->tx_lock, flags);
   1.667 +	spin_lock_irqsave(&np->tx_lock, flags);
   1.668 +	network_tx_buf_gc(dev);
   1.669 +	spin_unlock_irqrestore(&np->tx_lock, flags);
   1.670  
   1.671 -    if((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN))
   1.672 -        netif_rx_schedule(dev);
   1.673 +	if ((np->rx_resp_cons != np->rx->resp_prod) &&
   1.674 +	    (np->user_state == UST_OPEN))
   1.675 +		netif_rx_schedule(dev);
   1.676  
   1.677 -    return IRQ_HANDLED;
   1.678 +	return IRQ_HANDLED;
   1.679  }
   1.680  
   1.681  
   1.682  static int netif_poll(struct net_device *dev, int *pbudget)
   1.683  {
   1.684 -    struct net_private *np = netdev_priv(dev);
   1.685 -    struct sk_buff *skb, *nskb;
   1.686 -    netif_rx_response_t *rx;
   1.687 -    NETIF_RING_IDX i, rp;
   1.688 -    mmu_update_t *mmu = rx_mmu;
   1.689 -    multicall_entry_t *mcl = rx_mcl;
   1.690 -    int work_done, budget, more_to_do = 1;
   1.691 -    struct sk_buff_head rxq;
   1.692 -    unsigned long flags;
   1.693 +	struct net_private *np = netdev_priv(dev);
   1.694 +	struct sk_buff *skb, *nskb;
   1.695 +	netif_rx_response_t *rx;
   1.696 +	NETIF_RING_IDX i, rp;
   1.697 +	mmu_update_t *mmu = rx_mmu;
   1.698 +	multicall_entry_t *mcl = rx_mcl;
   1.699 +	int work_done, budget, more_to_do = 1;
   1.700 +	struct sk_buff_head rxq;
   1.701 +	unsigned long flags;
   1.702  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.703 -    unsigned long mfn;
   1.704 -    grant_ref_t ref;
   1.705 +	unsigned long mfn;
   1.706 +	grant_ref_t ref;
   1.707  #endif
   1.708  
   1.709 -    spin_lock(&np->rx_lock);
   1.710 +	spin_lock(&np->rx_lock);
   1.711  
   1.712 -    if (np->backend_state != BEST_CONNECTED) {
   1.713 -        spin_unlock(&np->rx_lock);
   1.714 -        return 0;
   1.715 -    }
   1.716 +	if (np->backend_state != BEST_CONNECTED) {
   1.717 +		spin_unlock(&np->rx_lock);
   1.718 +		return 0;
   1.719 +	}
   1.720  
   1.721 -    skb_queue_head_init(&rxq);
   1.722 +	skb_queue_head_init(&rxq);
   1.723  
   1.724 -    if ((budget = *pbudget) > dev->quota)
   1.725 -        budget = dev->quota;
   1.726 -    rp = np->rx->resp_prod;
   1.727 -    rmb(); /* Ensure we see queued responses up to 'rp'. */
   1.728 +	if ((budget = *pbudget) > dev->quota)
   1.729 +		budget = dev->quota;
   1.730 +	rp = np->rx->resp_prod;
   1.731 +	rmb(); /* Ensure we see queued responses up to 'rp'. */
   1.732  
   1.733 -    for (i = np->rx_resp_cons, work_done = 0; 
   1.734 -		    (i != rp) && (work_done < budget);
   1.735 -		    i++, work_done++) {
   1.736 -        rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
   1.737 -        /*
   1.738 -         * An error here is very odd. Usually indicates a backend bug,
   1.739 -         * low-memory condition, or that we didn't have reservation headroom.
   1.740 -         */
   1.741 -        if (unlikely(rx->status <= 0)) {
   1.742 -            if (net_ratelimit())
   1.743 -                printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
   1.744 -            np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
   1.745 -            wmb();
   1.746 -            np->rx->req_prod++;
   1.747 -            work_done--;
   1.748 -            continue;
   1.749 -        }
   1.750 +	for (i = np->rx_resp_cons, work_done = 0; 
   1.751 +	     (i != rp) && (work_done < budget);
   1.752 +	     i++, work_done++) {
   1.753 +		rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
   1.754 +		/*
   1.755 +		 * An error here is very odd. Usually indicates a backend bug,
   1.756 +		 * low-mem condition, or we didn't have reservation headroom.
   1.757 +		 */
   1.758 +		if (unlikely(rx->status <= 0)) {
   1.759 +			if (net_ratelimit())
   1.760 +				printk(KERN_WARNING "Bad rx buffer "
   1.761 +				       "(memory squeeze?).\n");
   1.762 +			np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
   1.763 +				req.id = rx->id;
   1.764 +			wmb();
   1.765 +			np->rx->req_prod++;
   1.766 +			work_done--;
   1.767 +			continue;
   1.768 +		}
   1.769  
   1.770  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.771 -        ref = np->grant_rx_ref[rx->id]; 
   1.772 +		ref = np->grant_rx_ref[rx->id]; 
   1.773  
   1.774 -        if(ref == GRANT_INVALID_REF) { 
   1.775 -            printk(KERN_WARNING "Bad rx grant reference %d from dom %d.\n",
   1.776 -                   ref, np->backend_id);
   1.777 -            np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
   1.778 -            wmb();
   1.779 -            np->rx->req_prod++;
   1.780 -            work_done--;
   1.781 -            continue;
   1.782 -        }
   1.783 +		if(ref == GRANT_INVALID_REF) { 
   1.784 +			printk(KERN_WARNING "Bad rx grant reference %d "
   1.785 +			       "from dom %d.\n",
   1.786 +			       ref, np->backend_id);
   1.787 +			np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
   1.788 +				req.id = rx->id;
   1.789 +			wmb();
   1.790 +			np->rx->req_prod++;
   1.791 +			work_done--;
   1.792 +			continue;
   1.793 +		}
   1.794  
   1.795 -        np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
   1.796 -        mfn = gnttab_end_foreign_transfer_ref(ref);
   1.797 -        gnttab_release_grant_reference(&np->gref_rx_head, ref);
   1.798 +		np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
   1.799 +		mfn = gnttab_end_foreign_transfer_ref(ref);
   1.800 +		gnttab_release_grant_reference(&np->gref_rx_head, ref);
   1.801  #endif
   1.802  
   1.803 -        skb = np->rx_skbs[rx->id];
   1.804 -        ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
   1.805 +		skb = np->rx_skbs[rx->id];
   1.806 +		ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
   1.807  
   1.808 -        /* NB. We handle skb overflow later. */
   1.809 +		/* NB. We handle skb overflow later. */
   1.810  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.811 -        skb->data = skb->head + rx->addr;
   1.812 +		skb->data = skb->head + rx->addr;
   1.813  #else
   1.814 -        skb->data = skb->head + (rx->addr & ~PAGE_MASK);
   1.815 +		skb->data = skb->head + (rx->addr & ~PAGE_MASK);
   1.816  #endif
   1.817 -        skb->len  = rx->status;
   1.818 -        skb->tail = skb->data + skb->len;
   1.819 +		skb->len  = rx->status;
   1.820 +		skb->tail = skb->data + skb->len;
   1.821  
   1.822 -        if ( rx->csum_valid )
   1.823 -            skb->ip_summed = CHECKSUM_UNNECESSARY;
   1.824 +		if ( rx->csum_valid )
   1.825 +			skb->ip_summed = CHECKSUM_UNNECESSARY;
   1.826  
   1.827 -        np->stats.rx_packets++;
   1.828 -        np->stats.rx_bytes += rx->status;
   1.829 +		np->stats.rx_packets++;
   1.830 +		np->stats.rx_bytes += rx->status;
   1.831  
   1.832 -        /* Remap the page. */
   1.833 +		/* Remap the page. */
   1.834  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.835 -        mmu->ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   1.836 +		mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   1.837  #else
   1.838 -        mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
   1.839 +		mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
   1.840  #endif
   1.841 -        mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
   1.842 -        mmu++;
   1.843 +		mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
   1.844 +		mmu++;
   1.845  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.846 -	MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
   1.847 -				pfn_pte_ma(mfn, PAGE_KERNEL), 0);
   1.848 +		MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
   1.849 +					pfn_pte_ma(mfn, PAGE_KERNEL), 0);
   1.850  #else
   1.851 -	MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
   1.852 -				pfn_pte_ma(rx->addr >> PAGE_SHIFT, 
   1.853 -                                           PAGE_KERNEL), 0);
   1.854 +		MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
   1.855 +					pfn_pte_ma(rx->addr >> PAGE_SHIFT, 
   1.856 +						   PAGE_KERNEL), 0);
   1.857  #endif
   1.858 -        mcl++;
   1.859 +		mcl++;
   1.860  
   1.861  #ifdef CONFIG_XEN_NETDEV_GRANT
   1.862 -        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
   1.863 -        GDPRINTK("#### rx_poll     enqueue vdata=%p mfn=%lu ref=%x\n",
   1.864 -                skb->data, mfn, ref);
   1.865 +		phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
   1.866  #else
   1.867 -        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
   1.868 -            rx->addr >> PAGE_SHIFT;
   1.869 +		phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
   1.870 +			rx->addr >> PAGE_SHIFT;
   1.871  #endif 
   1.872  
   1.873  
   1.874 -        __skb_queue_tail(&rxq, skb);
   1.875 -    }
   1.876 +		__skb_queue_tail(&rxq, skb);
   1.877 +	}
   1.878  
   1.879 -
   1.880 -    /* Some pages are no longer absent... */
   1.881 -    balloon_update_driver_allowance(-work_done);
   1.882 +	/* Some pages are no longer absent... */
   1.883 +	balloon_update_driver_allowance(-work_done);
   1.884  
   1.885 -    /* Do all the remapping work, and M->P updates, in one big hypercall. */
   1.886 -    if (likely((mcl - rx_mcl) != 0)) {
   1.887 -        mcl->op = __HYPERVISOR_mmu_update;
   1.888 -        mcl->args[0] = (unsigned long)rx_mmu;
   1.889 -        mcl->args[1] = mmu - rx_mmu;
   1.890 -        mcl->args[2] = 0;
   1.891 -        mcl->args[3] = DOMID_SELF;
   1.892 -        mcl++;
   1.893 -        (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   1.894 -    }
   1.895 +	/* Do all the remapping work, and M2P updates, in one big hypercall. */
   1.896 +	if (likely((mcl - rx_mcl) != 0)) {
   1.897 +		mcl->op = __HYPERVISOR_mmu_update;
   1.898 +		mcl->args[0] = (unsigned long)rx_mmu;
   1.899 +		mcl->args[1] = mmu - rx_mmu;
   1.900 +		mcl->args[2] = 0;
   1.901 +		mcl->args[3] = DOMID_SELF;
   1.902 +		mcl++;
   1.903 +		(void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   1.904 +	}
   1.905  
   1.906 -    while ((skb = __skb_dequeue(&rxq)) != NULL) {
   1.907 -#ifdef CONFIG_XEN_NETDEV_GRANT
   1.908 -        GDPRINTK("#### rx_poll     dequeue vdata=%p mfn=%lu\n",
   1.909 -                skb->data, virt_to_mfn(skb->data));
   1.910 -        dump_packet('d', skb->data, (unsigned long)skb->data);
   1.911 -#endif
   1.912 -        /*
   1.913 -         * Enough room in skbuff for the data we were passed? Also, Linux 
   1.914 -         * expects at least 16 bytes headroom in each receive buffer.
   1.915 -         */
   1.916 -        if (unlikely(skb->tail > skb->end) || 
   1.917 -			unlikely((skb->data - skb->head) < 16)) {
   1.918 -            nskb = NULL;
   1.919 -
   1.920 +	while ((skb = __skb_dequeue(&rxq)) != NULL) {
   1.921 +		/*
   1.922 +		 * Enough room in skbuff for the data we were passed? Also,
   1.923 +		 * Linux expects at least 16 bytes headroom in each rx buffer.
   1.924 +		 */
   1.925 +		if (unlikely(skb->tail > skb->end) || 
   1.926 +		    unlikely((skb->data - skb->head) < 16)) {
   1.927 +			nskb = NULL;
   1.928  
   1.929 -            /* Only copy the packet if it fits in the current MTU. */
   1.930 -            if (skb->len <= (dev->mtu + ETH_HLEN)) {
   1.931 -                if ((skb->tail > skb->end) && net_ratelimit())
   1.932 -                    printk(KERN_INFO "Received packet needs %zd bytes more "
   1.933 -                           "headroom.\n", skb->tail - skb->end);
   1.934 +			/* Only copy the packet if it fits in the MTU. */
   1.935 +			if (skb->len <= (dev->mtu + ETH_HLEN)) {
   1.936 +				if ((skb->tail > skb->end) && net_ratelimit())
   1.937 +					printk(KERN_INFO "Received packet "
   1.938 +					       "needs %zd bytes more "
   1.939 +					       "headroom.\n",
   1.940 +					       skb->tail - skb->end);
   1.941  
   1.942 -                if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
   1.943 -                    skb_reserve(nskb, 2);
   1.944 -                    skb_put(nskb, skb->len);
   1.945 -                    memcpy(nskb->data, skb->data, skb->len);
   1.946 -                    nskb->dev = skb->dev;
   1.947 -                }
   1.948 -            }
   1.949 -            else if (net_ratelimit())
   1.950 -                printk(KERN_INFO "Received packet too big for MTU "
   1.951 -                       "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
   1.952 +				nskb = alloc_xen_skb(skb->len + 2);
   1.953 +				if (nskb != NULL) {
   1.954 +					skb_reserve(nskb, 2);
   1.955 +					skb_put(nskb, skb->len);
   1.956 +					memcpy(nskb->data,
   1.957 +					       skb->data,
   1.958 +					       skb->len);
   1.959 +					nskb->dev = skb->dev;
   1.960 +				}
   1.961 +			}
   1.962 +			else if (net_ratelimit())
   1.963 +				printk(KERN_INFO "Received packet too big for "
   1.964 +				       "MTU (%d > %d)\n",
   1.965 +				       skb->len - ETH_HLEN, dev->mtu);
   1.966  
   1.967 -            /* Reinitialise and then destroy the old skbuff. */
   1.968 -            skb->len  = 0;
   1.969 -            skb->tail = skb->data;
   1.970 -            init_skb_shinfo(skb);
   1.971 -            dev_kfree_skb(skb);
   1.972 +			/* Reinitialise and then destroy the old skbuff. */
   1.973 +			skb->len  = 0;
   1.974 +			skb->tail = skb->data;
   1.975 +			init_skb_shinfo(skb);
   1.976 +			dev_kfree_skb(skb);
   1.977  
   1.978 -            /* Switch old for new, if we copied the buffer. */
   1.979 -            if ((skb = nskb) == NULL)
   1.980 -                continue;
   1.981 -        }
   1.982 +			/* Switch old for new, if we copied the buffer. */
   1.983 +			if ((skb = nskb) == NULL)
   1.984 +				continue;
   1.985 +		}
   1.986          
   1.987 -        /* Set the shared-info area, which is hidden behind the real data. */
   1.988 -        init_skb_shinfo(skb);
   1.989 -        /* Ethernet-specific work. Delayed to here as it peeks the header. */
   1.990 -        skb->protocol = eth_type_trans(skb, dev);
   1.991 +		/* Set the shinfo area, which is hidden behind the data. */
   1.992 +		init_skb_shinfo(skb);
   1.993 +		/* Ethernet work: Delayed to here as it peeks the header. */
   1.994 +		skb->protocol = eth_type_trans(skb, dev);
   1.995  
   1.996 -        /* Pass it up. */
   1.997 -        netif_receive_skb(skb);
   1.998 -        dev->last_rx = jiffies;
   1.999 -    }
  1.1000 +		/* Pass it up. */
  1.1001 +		netif_receive_skb(skb);
  1.1002 +		dev->last_rx = jiffies;
  1.1003 +	}
  1.1004  
  1.1005 -    np->rx_resp_cons = i;
  1.1006 +	np->rx_resp_cons = i;
  1.1007  
  1.1008 -    /* If we get a callback with very few responses, reduce fill target. */
  1.1009 -    /* NB. Note exponential increase, linear decrease. */
  1.1010 -    if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
  1.1011 -         (--np->rx_target < np->rx_min_target))
  1.1012 -        np->rx_target = np->rx_min_target;
  1.1013 +	/* If we get a callback with very few responses, reduce fill target. */
  1.1014 +	/* NB. Note exponential increase, linear decrease. */
  1.1015 +	if (((np->rx->req_prod - np->rx->resp_prod) >
  1.1016 +	     ((3*np->rx_target) / 4)) &&
  1.1017 +	    (--np->rx_target < np->rx_min_target))
  1.1018 +		np->rx_target = np->rx_min_target;
  1.1019  
  1.1020 -    network_alloc_rx_buffers(dev);
  1.1021 +	network_alloc_rx_buffers(dev);
  1.1022  
  1.1023 -    *pbudget   -= work_done;
  1.1024 -    dev->quota -= work_done;
  1.1025 +	*pbudget   -= work_done;
  1.1026 +	dev->quota -= work_done;
  1.1027  
  1.1028 -    if (work_done < budget) {
  1.1029 -        local_irq_save(flags);
  1.1030 +	if (work_done < budget) {
  1.1031 +		local_irq_save(flags);
  1.1032  
  1.1033 -        np->rx->event = i + 1;
  1.1034 +		np->rx->event = i + 1;
  1.1035      
  1.1036 -        /* Deal with hypervisor racing our resetting of rx_event. */
  1.1037 -        mb();
  1.1038 -        if (np->rx->resp_prod == i) {
  1.1039 -            __netif_rx_complete(dev);
  1.1040 -            more_to_do = 0;
  1.1041 -        }
  1.1042 +		/* Deal with hypervisor racing our resetting of rx_event. */
  1.1043 +		mb();
  1.1044 +		if (np->rx->resp_prod == i) {
  1.1045 +			__netif_rx_complete(dev);
  1.1046 +			more_to_do = 0;
  1.1047 +		}
  1.1048  
  1.1049 -        local_irq_restore(flags);
  1.1050 -    }
  1.1051 +		local_irq_restore(flags);
  1.1052 +	}
  1.1053  
  1.1054 -    spin_unlock(&np->rx_lock);
  1.1055 +	spin_unlock(&np->rx_lock);
  1.1056  
  1.1057 -    return more_to_do;
  1.1058 +	return more_to_do;
  1.1059  }
  1.1060  
  1.1061  
  1.1062  static int network_close(struct net_device *dev)
  1.1063  {
  1.1064 -    struct net_private *np = netdev_priv(dev);
  1.1065 -    np->user_state = UST_CLOSED;
  1.1066 -    netif_stop_queue(np->netdev);
  1.1067 -    return 0;
  1.1068 +	struct net_private *np = netdev_priv(dev);
  1.1069 +	np->user_state = UST_CLOSED;
  1.1070 +	netif_stop_queue(np->netdev);
  1.1071 +	return 0;
  1.1072  }
  1.1073  
  1.1074  
  1.1075  static struct net_device_stats *network_get_stats(struct net_device *dev)
  1.1076  {
  1.1077 -    struct net_private *np = netdev_priv(dev);
  1.1078 -    return &np->stats;
  1.1079 +	struct net_private *np = netdev_priv(dev);
  1.1080 +	return &np->stats;
  1.1081  }
  1.1082  
  1.1083  static void network_connect(struct net_device *dev)
  1.1084  {
  1.1085 -    struct net_private *np;
  1.1086 -    int i, requeue_idx;
  1.1087 -    netif_tx_request_t *tx;
  1.1088 +	struct net_private *np;
  1.1089 +	int i, requeue_idx;
  1.1090 +	netif_tx_request_t *tx;
  1.1091  
  1.1092 -    np = netdev_priv(dev);
  1.1093 -    spin_lock_irq(&np->tx_lock);
  1.1094 -    spin_lock(&np->rx_lock);
  1.1095 +	np = netdev_priv(dev);
  1.1096 +	spin_lock_irq(&np->tx_lock);
  1.1097 +	spin_lock(&np->rx_lock);
  1.1098  
  1.1099 -    /* Recovery procedure: */
  1.1100 +	/* Recovery procedure: */
  1.1101  
  1.1102 -    /* Step 1: Reinitialise variables. */
  1.1103 -    np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
  1.1104 -    np->rx->event = np->tx->event = 1;
  1.1105 +	/* Step 1: Reinitialise variables. */
  1.1106 +	np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
  1.1107 +	np->rx->event = np->tx->event = 1;
  1.1108  
  1.1109 -    /* Step 2: Rebuild the RX and TX ring contents.
  1.1110 -     * NB. We could just free the queued TX packets now but we hope
  1.1111 -     * that sending them out might do some good.  We have to rebuild
  1.1112 -     * the RX ring because some of our pages are currently flipped out
  1.1113 -     * so we can't just free the RX skbs.
  1.1114 -     * NB2. Freelist index entries are always going to be less than
  1.1115 -     *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
  1.1116 -     * greater than __PAGE_OFFSET: we use this property to distinguish
  1.1117 -     * them.
  1.1118 -     */
  1.1119 +	/* Step 2: Rebuild the RX and TX ring contents.
  1.1120 +	 * NB. We could just free the queued TX packets now but we hope
  1.1121 +	 * that sending them out might do some good.  We have to rebuild
  1.1122 +	 * the RX ring because some of our pages are currently flipped out
  1.1123 +	 * so we can't just free the RX skbs.
  1.1124 +	 * NB2. Freelist index entries are always going to be less than
  1.1125 +	 *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
  1.1126 +	 * greater than __PAGE_OFFSET: we use this property to distinguish
  1.1127 +	 * them.
  1.1128 +	 */
  1.1129  
  1.1130 -    /* Rebuild the TX buffer freelist and the TX ring itself.
  1.1131 -     * NB. This reorders packets.  We could keep more private state
  1.1132 -     * to avoid this but maybe it doesn't matter so much given the
  1.1133 -     * interface has been down.
  1.1134 -     */
  1.1135 -    for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
  1.1136 -        if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
  1.1137 -            struct sk_buff *skb = np->tx_skbs[i];
  1.1138 +	/* Rebuild the TX buffer freelist and the TX ring itself.
  1.1139 +	 * NB. This reorders packets.  We could keep more private state
  1.1140 +	 * to avoid this but maybe it doesn't matter so much given the
  1.1141 +	 * interface has been down.
  1.1142 +	 */
  1.1143 +	for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
  1.1144 +		if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
  1.1145 +			struct sk_buff *skb = np->tx_skbs[i];
  1.1146  
  1.1147 -            tx = &np->tx->ring[requeue_idx++].req;
  1.1148 +			tx = &np->tx->ring[requeue_idx++].req;
  1.1149  
  1.1150 -            tx->id   = i;
  1.1151 +			tx->id   = i;
  1.1152  #ifdef CONFIG_XEN_NETDEV_GRANT
  1.1153 -            gnttab_grant_foreign_access_ref(np->grant_tx_ref[i], np->backend_id, 
  1.1154 -                                            virt_to_mfn(np->tx_skbs[i]->data),
  1.1155 -                                            GNTMAP_readonly); 
  1.1156 -            tx->addr = np->grant_tx_ref[i] << PAGE_SHIFT; 
  1.1157 +			gnttab_grant_foreign_access_ref(
  1.1158 +				np->grant_tx_ref[i], np->backend_id, 
  1.1159 +				virt_to_mfn(np->tx_skbs[i]->data),
  1.1160 +				GNTMAP_readonly); 
  1.1161 +			tx->addr = np->grant_tx_ref[i] << PAGE_SHIFT; 
  1.1162  #else
  1.1163 -            tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
  1.1164 +			tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
  1.1165  #endif
  1.1166 -            tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
  1.1167 -            tx->size = skb->len;
  1.1168 +			tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
  1.1169 +			tx->size = skb->len;
  1.1170  
  1.1171 -            np->stats.tx_bytes += skb->len;
  1.1172 -            np->stats.tx_packets++;
  1.1173 -        }
  1.1174 -    }
  1.1175 -    wmb();
  1.1176 -    np->tx->req_prod = requeue_idx;
  1.1177 +			np->stats.tx_bytes += skb->len;
  1.1178 +			np->stats.tx_packets++;
  1.1179 +		}
  1.1180 +	}
  1.1181 +	wmb();
  1.1182 +	np->tx->req_prod = requeue_idx;
  1.1183  
  1.1184 -    /* Rebuild the RX buffer freelist and the RX ring itself. */
  1.1185 -    for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
  1.1186 -        if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
  1.1187 +	/* Rebuild the RX buffer freelist and the RX ring itself. */
  1.1188 +	for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
  1.1189 +		if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
  1.1190  #ifdef CONFIG_XEN_NETDEV_GRANT 
  1.1191 -            /* Reinstate the grant ref so backend can transfer mfn to us. */
  1.1192 -            gnttab_grant_foreign_transfer_ref(np->grant_rx_ref[i], np->backend_id);
  1.1193 -            np->rx->ring[requeue_idx].req.gref = np->grant_rx_ref[i];
  1.1194 +			gnttab_grant_foreign_transfer_ref(
  1.1195 +				np->grant_rx_ref[i], np->backend_id);
  1.1196 +			np->rx->ring[requeue_idx].req.gref =
  1.1197 +				np->grant_rx_ref[i];
  1.1198  #endif
  1.1199 -            np->rx->ring[requeue_idx].req.id   = i;
  1.1200 -            requeue_idx++; 
  1.1201 -        }
  1.1202 -    }
  1.1203 +			np->rx->ring[requeue_idx].req.id = i;
  1.1204 +			requeue_idx++; 
  1.1205 +		}
  1.1206 +	}
  1.1207  
  1.1208 -    wmb();                
  1.1209 -    np->rx->req_prod = requeue_idx;
  1.1210 +	wmb();                
  1.1211 +	np->rx->req_prod = requeue_idx;
  1.1212  
  1.1213 -    /* Step 3: All public and private state should now be sane.  Get
  1.1214 -     * ready to start sending and receiving packets and give the driver
  1.1215 -     * domain a kick because we've probably just requeued some
  1.1216 -     * packets.
  1.1217 -     */
  1.1218 -    np->backend_state = BEST_CONNECTED;
  1.1219 -    wmb();
  1.1220 -    notify_via_evtchn(np->evtchn);  
  1.1221 -    network_tx_buf_gc(dev);
  1.1222 +	/* Step 3: All public and private state should now be sane.  Get
  1.1223 +	 * ready to start sending and receiving packets and give the driver
  1.1224 +	 * domain a kick because we've probably just requeued some
  1.1225 +	 * packets.
  1.1226 +	 */
  1.1227 +	np->backend_state = BEST_CONNECTED;
  1.1228 +	wmb();
  1.1229 +	notify_via_evtchn(np->evtchn);  
  1.1230 +	network_tx_buf_gc(dev);
  1.1231  
  1.1232 -    if (np->user_state == UST_OPEN)
  1.1233 -        netif_start_queue(dev);
  1.1234 +	if (np->user_state == UST_OPEN)
  1.1235 +		netif_start_queue(dev);
  1.1236  
  1.1237 -    spin_unlock(&np->rx_lock);
  1.1238 -    spin_unlock_irq(&np->tx_lock);
  1.1239 +	spin_unlock(&np->rx_lock);
  1.1240 +	spin_unlock_irq(&np->tx_lock);
  1.1241  }
  1.1242  
  1.1243  static void show_device(struct net_private *np)
  1.1244 @@ -887,9 +863,9 @@ connect_device(struct net_private *np, u
  1.1245  static void netif_uninit(struct net_device *dev)
  1.1246  {
  1.1247  #ifdef CONFIG_XEN_NETDEV_GRANT
  1.1248 -    struct net_private *np = netdev_priv(dev);
  1.1249 -    gnttab_free_grant_references(np->gref_tx_head);
  1.1250 -    gnttab_free_grant_references(np->gref_rx_head);
  1.1251 +	struct net_private *np = netdev_priv(dev);
  1.1252 +	gnttab_free_grant_references(np->gref_tx_head);
  1.1253 +	gnttab_free_grant_references(np->gref_rx_head);
  1.1254  #endif
  1.1255  }
  1.1256  
  1.1257 @@ -907,97 +883,99 @@ static struct ethtool_ops network_ethtoo
  1.1258  static int create_netdev(int handle, struct xenbus_device *dev,
  1.1259  			 struct net_device **val)
  1.1260  {
  1.1261 -    int i, err = 0;
  1.1262 -    struct net_device *netdev = NULL;
  1.1263 -    struct net_private *np = NULL;
  1.1264 +	int i, err = 0;
  1.1265 +	struct net_device *netdev = NULL;
  1.1266 +	struct net_private *np = NULL;
  1.1267  
  1.1268 -    if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
  1.1269 -        printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
  1.1270 -        err = -ENOMEM;
  1.1271 -        goto exit;
  1.1272 -    }
  1.1273 +	if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
  1.1274 +		printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
  1.1275 +		       __FUNCTION__);
  1.1276 +		err = -ENOMEM;
  1.1277 +		goto exit;
  1.1278 +	}
  1.1279  
  1.1280 -    np                = netdev_priv(netdev);
  1.1281 -    np->backend_state = BEST_CLOSED;
  1.1282 -    np->user_state    = UST_CLOSED;
  1.1283 -    np->handle        = handle;
  1.1284 -    np->xbdev         = dev;
  1.1285 +	np                = netdev_priv(netdev);
  1.1286 +	np->backend_state = BEST_CLOSED;
  1.1287 +	np->user_state    = UST_CLOSED;
  1.1288 +	np->handle        = handle;
  1.1289 +	np->xbdev         = dev;
  1.1290      
  1.1291 -    spin_lock_init(&np->tx_lock);
  1.1292 -    spin_lock_init(&np->rx_lock);
  1.1293 +	spin_lock_init(&np->tx_lock);
  1.1294 +	spin_lock_init(&np->rx_lock);
  1.1295  
  1.1296 -    skb_queue_head_init(&np->rx_batch);
  1.1297 -    np->rx_target     = RX_MIN_TARGET;
  1.1298 -    np->rx_min_target = RX_MIN_TARGET;
  1.1299 -    np->rx_max_target = RX_MAX_TARGET;
  1.1300 +	skb_queue_head_init(&np->rx_batch);
  1.1301 +	np->rx_target     = RX_MIN_TARGET;
  1.1302 +	np->rx_min_target = RX_MIN_TARGET;
  1.1303 +	np->rx_max_target = RX_MAX_TARGET;
  1.1304  
  1.1305 -    /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
  1.1306 -    for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
  1.1307 -        np->tx_skbs[i] = (void *)((unsigned long) i+1);
  1.1308 +	/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
  1.1309 +	for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
  1.1310 +		np->tx_skbs[i] = (void *)((unsigned long) i+1);
  1.1311  #ifdef CONFIG_XEN_NETDEV_GRANT
  1.1312 -        np->grant_tx_ref[i] = GRANT_INVALID_REF;
  1.1313 +		np->grant_tx_ref[i] = GRANT_INVALID_REF;
  1.1314  #endif
  1.1315 -    }
  1.1316 +	}
  1.1317  
  1.1318 -    for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
  1.1319 -        np->rx_skbs[i] = (void *)((unsigned long) i+1);
  1.1320 +	for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
  1.1321 +		np->rx_skbs[i] = (void *)((unsigned long) i+1);
  1.1322  #ifdef CONFIG_XEN_NETDEV_GRANT
  1.1323 -        np->grant_rx_ref[i] = GRANT_INVALID_REF;
  1.1324 +		np->grant_rx_ref[i] = GRANT_INVALID_REF;
  1.1325  #endif
  1.1326 -    }
  1.1327 +	}
  1.1328  
  1.1329  #ifdef CONFIG_XEN_NETDEV_GRANT
  1.1330 -    /* A grant for every tx ring slot */
  1.1331 -    if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
  1.1332 -                                      &np->gref_tx_head) < 0) {
  1.1333 -        printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
  1.1334 -        goto exit;
  1.1335 -    }
  1.1336 -    /* A grant for every rx ring slot */
  1.1337 -    if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
  1.1338 -                                      &np->gref_rx_head) < 0) {
  1.1339 -        printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
  1.1340 -        gnttab_free_grant_references(np->gref_tx_head);
  1.1341 -        goto exit;
  1.1342 -    }
  1.1343 +	/* A grant for every tx ring slot */
  1.1344 +	if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
  1.1345 +					  &np->gref_tx_head) < 0) {
  1.1346 +		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
  1.1347 +		goto exit;
  1.1348 +	}
  1.1349 +	/* A grant for every rx ring slot */
  1.1350 +	if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
  1.1351 +					  &np->gref_rx_head) < 0) {
  1.1352 +		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
  1.1353 +		gnttab_free_grant_references(np->gref_tx_head);
  1.1354 +		goto exit;
  1.1355 +	}
  1.1356  #endif
  1.1357  
  1.1358 -    netdev->open            = network_open;
  1.1359 -    netdev->hard_start_xmit = network_start_xmit;
  1.1360 -    netdev->stop            = network_close;
  1.1361 -    netdev->get_stats       = network_get_stats;
  1.1362 -    netdev->poll            = netif_poll;
  1.1363 -    netdev->uninit          = netif_uninit;
  1.1364 -    netdev->weight          = 64;
  1.1365 -    netdev->features        = NETIF_F_IP_CSUM;
  1.1366 +	netdev->open            = network_open;
  1.1367 +	netdev->hard_start_xmit = network_start_xmit;
  1.1368 +	netdev->stop            = network_close;
  1.1369 +	netdev->get_stats       = network_get_stats;
  1.1370 +	netdev->poll            = netif_poll;
  1.1371 +	netdev->uninit          = netif_uninit;
  1.1372 +	netdev->weight          = 64;
  1.1373 +	netdev->features        = NETIF_F_IP_CSUM;
  1.1374  
  1.1375 -    SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
  1.1376 +	SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
  1.1377  
  1.1378 -    if ((err = register_netdev(netdev)) != 0) {
  1.1379 -        printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
  1.1380 -        goto exit_free_grefs;
  1.1381 -    }
  1.1382 +	if ((err = register_netdev(netdev)) != 0) {
  1.1383 +		printk(KERN_WARNING "%s> register_netdev err=%d\n",
  1.1384 +		       __FUNCTION__, err);
  1.1385 +		goto exit_free_grefs;
  1.1386 +	}
  1.1387  
  1.1388 -    if ((err = xennet_proc_addif(netdev)) != 0) {
  1.1389 -        unregister_netdev(netdev);
  1.1390 -        goto exit_free_grefs;
  1.1391 -    }
  1.1392 +	if ((err = xennet_proc_addif(netdev)) != 0) {
  1.1393 +		unregister_netdev(netdev);
  1.1394 +		goto exit_free_grefs;
  1.1395 +	}
  1.1396  
  1.1397 -    np->netdev = netdev;
  1.1398 +	np->netdev = netdev;
  1.1399  
  1.1400 -  exit:
  1.1401 -    if ((err != 0) && (netdev != NULL))
  1.1402 -        kfree(netdev);
  1.1403 -    else if (val != NULL)
  1.1404 -        *val = netdev;
  1.1405 -    return err;
  1.1406 + exit:
  1.1407 +	if ((err != 0) && (netdev != NULL))
  1.1408 +		kfree(netdev);
  1.1409 +	else if (val != NULL)
  1.1410 +		*val = netdev;
  1.1411 +	return err;
  1.1412  
  1.1413   exit_free_grefs:
  1.1414  #ifdef CONFIG_XEN_NETDEV_GRANT
  1.1415 -    gnttab_free_grant_references(np->gref_tx_head);
  1.1416 -    gnttab_free_grant_references(np->gref_rx_head);
  1.1417 +	gnttab_free_grant_references(np->gref_tx_head);
  1.1418 +	gnttab_free_grant_references(np->gref_rx_head);
  1.1419  #endif
  1.1420 -    goto exit;
  1.1421 +	goto exit;
  1.1422  }
  1.1423  
  1.1424  static int destroy_netdev(struct net_device *netdev)
  1.1425 @@ -1016,20 +994,20 @@ static int destroy_netdev(struct net_dev
  1.1426  static int 
  1.1427  inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
  1.1428  {
  1.1429 -    struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
  1.1430 -    struct net_device *dev = ifa->ifa_dev->dev;
  1.1431 +	struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
  1.1432 +	struct net_device *dev = ifa->ifa_dev->dev;
  1.1433  
  1.1434 -    /* UP event and is it one of our devices? */
  1.1435 -    if (event == NETDEV_UP && dev->open == network_open)
  1.1436 -        (void)send_fake_arp(dev);
  1.1437 +	/* UP event and is it one of our devices? */
  1.1438 +	if (event == NETDEV_UP && dev->open == network_open)
  1.1439 +		(void)send_fake_arp(dev);
  1.1440          
  1.1441 -    return NOTIFY_DONE;
  1.1442 +	return NOTIFY_DONE;
  1.1443  }
  1.1444  
  1.1445  static struct notifier_block notifier_inetdev = {
  1.1446 -    .notifier_call  = inetdev_notify,
  1.1447 -    .next           = NULL,
  1.1448 -    .priority       = 0
  1.1449 +	.notifier_call  = inetdev_notify,
  1.1450 +	.next           = NULL,
  1.1451 +	.priority       = 0
  1.1452  };
  1.1453  
  1.1454  static struct xenbus_device_id netfront_ids[] = {
  1.1455 @@ -1368,46 +1346,46 @@ static void __init init_net_xenbus(void)
  1.1456  
  1.1457  static int wait_for_netif(void)
  1.1458  {
  1.1459 -    int err = 0;
  1.1460 -    int i;
  1.1461 +	int err = 0;
  1.1462 +	int i;
  1.1463  
  1.1464 -    /*
  1.1465 -     * We should figure out how many and which devices we need to
  1.1466 -     * proceed and only wait for those.  For now, continue once the
  1.1467 -     * first device is around.
  1.1468 -     */
  1.1469 -    for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ )
  1.1470 -    {
  1.1471 -        set_current_state(TASK_INTERRUPTIBLE);
  1.1472 -        schedule_timeout(1);
  1.1473 -    }
  1.1474 +	/*
  1.1475 +	 * We should figure out how many and which devices we need to
  1.1476 +	 * proceed and only wait for those.  For now, continue once the
  1.1477 +	 * first device is around.
  1.1478 +	 */
  1.1479 +	for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ )
  1.1480 +	{
  1.1481 +		set_current_state(TASK_INTERRUPTIBLE);
  1.1482 +		schedule_timeout(1);
  1.1483 +	}
  1.1484  
  1.1485 -    if (netif_state != NETIF_STATE_CONNECTED) {
  1.1486 -        WPRINTK("Timeout connecting to device!\n");
  1.1487 -        err = -ENOSYS;
  1.1488 -    }
  1.1489 -    return err;
  1.1490 +	if (netif_state != NETIF_STATE_CONNECTED) {
  1.1491 +		WPRINTK("Timeout connecting to device!\n");
  1.1492 +		err = -ENOSYS;
  1.1493 +	}
  1.1494 +	return err;
  1.1495  }
  1.1496  
  1.1497  static int __init netif_init(void)
  1.1498  {
  1.1499 -    int err = 0;
  1.1500 +	int err = 0;
  1.1501  
  1.1502 -    if (xen_start_info->flags & SIF_INITDOMAIN)
  1.1503 -        return 0;
  1.1504 +	if (xen_start_info->flags & SIF_INITDOMAIN)
  1.1505 +		return 0;
  1.1506  
  1.1507 -    if ((err = xennet_proc_init()) != 0)
  1.1508 -        return err;
  1.1509 +	if ((err = xennet_proc_init()) != 0)
  1.1510 +		return err;
  1.1511  
  1.1512 -    IPRINTK("Initialising virtual ethernet driver.\n");
  1.1513 +	IPRINTK("Initialising virtual ethernet driver.\n");
  1.1514  
  1.1515 -    (void)register_inetaddr_notifier(&notifier_inetdev);
  1.1516 +	(void)register_inetaddr_notifier(&notifier_inetdev);
  1.1517  
  1.1518 -    init_net_xenbus();
  1.1519 +	init_net_xenbus();
  1.1520  
  1.1521 -    wait_for_netif();
  1.1522 +	wait_for_netif();
  1.1523  
  1.1524 -    return err;
  1.1525 +	return err;
  1.1526  }
  1.1527  
  1.1528  static void netif_exit(void)
  1.1529 @@ -1421,147 +1399,159 @@ static void netif_exit(void)
  1.1530  #define TARGET_CUR 2UL
  1.1531  
  1.1532  static int xennet_proc_read(
  1.1533 -    char *page, char **start, off_t off, int count, int *eof, void *data)
  1.1534 +	char *page, char **start, off_t off, int count, int *eof, void *data)
  1.1535  {
  1.1536 -    struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
  1.1537 -    struct net_private *np = netdev_priv(dev);
  1.1538 -    int len = 0, which_target = (long)data & 3;
  1.1539 +	struct net_device *dev =
  1.1540 +		(struct net_device *)((unsigned long)data & ~3UL);
  1.1541 +	struct net_private *np = netdev_priv(dev);
  1.1542 +	int len = 0, which_target = (long)data & 3;
  1.1543      
  1.1544 -    switch (which_target)
  1.1545 -    {
  1.1546 -    case TARGET_MIN:
  1.1547 -        len = sprintf(page, "%d\n", np->rx_min_target);
  1.1548 -        break;
  1.1549 -    case TARGET_MAX:
  1.1550 -        len = sprintf(page, "%d\n", np->rx_max_target);
  1.1551 -        break;
  1.1552 -    case TARGET_CUR:
  1.1553 -        len = sprintf(page, "%d\n", np->rx_target);
  1.1554 -        break;
  1.1555 -    }
  1.1556 +	switch (which_target)
  1.1557 +	{
  1.1558 +	case TARGET_MIN:
  1.1559 +		len = sprintf(page, "%d\n", np->rx_min_target);
  1.1560 +		break;
  1.1561 +	case TARGET_MAX:
  1.1562 +		len = sprintf(page, "%d\n", np->rx_max_target);
  1.1563 +		break;
  1.1564 +	case TARGET_CUR:
  1.1565 +		len = sprintf(page, "%d\n", np->rx_target);
  1.1566 +		break;
  1.1567 +	}
  1.1568  
  1.1569 -    *eof = 1;
  1.1570 -    return len;
  1.1571 +	*eof = 1;
  1.1572 +	return len;
  1.1573  }
  1.1574  
  1.1575  static int xennet_proc_write(
  1.1576 -    struct file *file, const char __user *buffer,
  1.1577 -    unsigned long count, void *data)
  1.1578 +	struct file *file, const char __user *buffer,
  1.1579 +	unsigned long count, void *data)
  1.1580  {
  1.1581 -    struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
  1.1582 -    struct net_private *np = netdev_priv(dev);
  1.1583 -    int which_target = (long)data & 3;
  1.1584 -    char string[64];
  1.1585 -    long target;
  1.1586 +	struct net_device *dev =
  1.1587 +		(struct net_device *)((unsigned long)data & ~3UL);
  1.1588 +	struct net_private *np = netdev_priv(dev);
  1.1589 +	int which_target = (long)data & 3;
  1.1590 +	char string[64];
  1.1591 +	long target;
  1.1592  
  1.1593 -    if (!capable(CAP_SYS_ADMIN))
  1.1594 -        return -EPERM;
  1.1595 +	if (!capable(CAP_SYS_ADMIN))
  1.1596 +		return -EPERM;
  1.1597  
  1.1598 -    if (count <= 1)
  1.1599 -        return -EBADMSG; /* runt */
  1.1600 -    if (count > sizeof(string))
  1.1601 -        return -EFBIG;   /* too long */
  1.1602 +	if (count <= 1)
  1.1603 +		return -EBADMSG; /* runt */
  1.1604 +	if (count > sizeof(string))
  1.1605 +		return -EFBIG;   /* too long */
  1.1606  
  1.1607 -    if (copy_from_user(string, buffer, count))
  1.1608 -        return -EFAULT;
  1.1609 -    string[sizeof(string)-1] = '\0';
  1.1610 +	if (copy_from_user(string, buffer, count))
  1.1611 +		return -EFAULT;
  1.1612 +	string[sizeof(string)-1] = '\0';
  1.1613  
  1.1614 -    target = simple_strtol(string, NULL, 10);
  1.1615 -    if (target < RX_MIN_TARGET)
  1.1616 -        target = RX_MIN_TARGET;
  1.1617 -    if (target > RX_MAX_TARGET)
  1.1618 -        target = RX_MAX_TARGET;
  1.1619 +	target = simple_strtol(string, NULL, 10);
  1.1620 +	if (target < RX_MIN_TARGET)
  1.1621 +		target = RX_MIN_TARGET;
  1.1622 +	if (target > RX_MAX_TARGET)
  1.1623 +		target = RX_MAX_TARGET;
  1.1624  
  1.1625 -    spin_lock(&np->rx_lock);
  1.1626 +	spin_lock(&np->rx_lock);
  1.1627  
  1.1628 -    switch (which_target)
  1.1629 -    {
  1.1630 -    case TARGET_MIN:
  1.1631 -        if (target > np->rx_max_target)
  1.1632 -            np->rx_max_target = target;
  1.1633 -        np->rx_min_target = target;
  1.1634 -        if (target > np->rx_target)
  1.1635 -            np->rx_target = target;
  1.1636 -        break;
  1.1637 -    case TARGET_MAX:
  1.1638 -        if (target < np->rx_min_target)
  1.1639 -            np->rx_min_target = target;
  1.1640 -        np->rx_max_target = target;
  1.1641 -        if (target < np->rx_target)
  1.1642 -            np->rx_target = target;
  1.1643 -        break;
  1.1644 -    case TARGET_CUR:
  1.1645 -        break;
  1.1646 -    }
  1.1647 +	switch (which_target)
  1.1648 +	{
  1.1649 +	case TARGET_MIN:
  1.1650 +		if (target > np->rx_max_target)
  1.1651 +			np->rx_max_target = target;
  1.1652 +		np->rx_min_target = target;
  1.1653 +		if (target > np->rx_target)
  1.1654 +			np->rx_target = target;
  1.1655 +		break;
  1.1656 +	case TARGET_MAX:
  1.1657 +		if (target < np->rx_min_target)
  1.1658 +			np->rx_min_target = target;
  1.1659 +		np->rx_max_target = target;
  1.1660 +		if (target < np->rx_target)
  1.1661 +			np->rx_target = target;
  1.1662 +		break;
  1.1663 +	case TARGET_CUR:
  1.1664 +		break;
  1.1665 +	}
  1.1666  
  1.1667 -    network_alloc_rx_buffers(dev);
  1.1668 +	network_alloc_rx_buffers(dev);
  1.1669  
  1.1670 -    spin_unlock(&np->rx_lock);
  1.1671 +	spin_unlock(&np->rx_lock);
  1.1672  
  1.1673 -    return count;
  1.1674 +	return count;
  1.1675  }
  1.1676  
  1.1677  static int xennet_proc_init(void)
  1.1678  {
  1.1679 -    if (proc_mkdir("xen/net", NULL) == NULL)
  1.1680 -        return -ENOMEM;
  1.1681 -    return 0;
  1.1682 +	if (proc_mkdir("xen/net", NULL) == NULL)
  1.1683 +		return -ENOMEM;
  1.1684 +	return 0;
  1.1685  }
  1.1686  
  1.1687  static int xennet_proc_addif(struct net_device *dev)
  1.1688  {
  1.1689 -    struct proc_dir_entry *dir, *min, *max, *cur;
  1.1690 -    char name[30];
  1.1691 +	struct proc_dir_entry *dir, *min, *max, *cur;
  1.1692 +	char name[30];
  1.1693  
  1.1694 -    sprintf(name, "xen/net/%s", dev->name);
  1.1695 +	sprintf(name, "xen/net/%s", dev->name);
  1.1696  
  1.1697 -    dir = proc_mkdir(name, NULL);
  1.1698 -    if (!dir)
  1.1699 -        goto nomem;
  1.1700 +	dir = proc_mkdir(name, NULL);
  1.1701 +	if (!dir)
  1.1702 +		goto nomem;
  1.1703  
  1.1704 -    min = create_proc_entry("rxbuf_min", 0644, dir);
  1.1705 -    max = create_proc_entry("rxbuf_max", 0644, dir);
  1.1706 -    cur = create_proc_entry("rxbuf_cur", 0444, dir);
  1.1707 -    if (!min || !max || !cur)
  1.1708 -        goto nomem;
  1.1709 +	min = create_proc_entry("rxbuf_min", 0644, dir);
  1.1710 +	max = create_proc_entry("rxbuf_max", 0644, dir);
  1.1711 +	cur = create_proc_entry("rxbuf_cur", 0444, dir);
  1.1712 +	if (!min || !max || !cur)
  1.1713 +		goto nomem;
  1.1714  
  1.1715 -    min->read_proc  = xennet_proc_read;
  1.1716 -    min->write_proc = xennet_proc_write;
  1.1717 -    min->data       = (void *)((unsigned long)dev | TARGET_MIN);
  1.1718 +	min->read_proc  = xennet_proc_read;
  1.1719 +	min->write_proc = xennet_proc_write;
  1.1720 +	min->data       = (void *)((unsigned long)dev | TARGET_MIN);
  1.1721  
  1.1722 -    max->read_proc  = xennet_proc_read;
  1.1723 -    max->write_proc = xennet_proc_write;
  1.1724 -    max->data       = (void *)((unsigned long)dev | TARGET_MAX);
  1.1725 +	max->read_proc  = xennet_proc_read;
  1.1726 +	max->write_proc = xennet_proc_write;
  1.1727 +	max->data       = (void *)((unsigned long)dev | TARGET_MAX);
  1.1728  
  1.1729 -    cur->read_proc  = xennet_proc_read;
  1.1730 -    cur->write_proc = xennet_proc_write;
  1.1731 -    cur->data       = (void *)((unsigned long)dev | TARGET_CUR);
  1.1732 +	cur->read_proc  = xennet_proc_read;
  1.1733 +	cur->write_proc = xennet_proc_write;
  1.1734 +	cur->data       = (void *)((unsigned long)dev | TARGET_CUR);
  1.1735  
  1.1736 -    return 0;
  1.1737 +	return 0;
  1.1738  
  1.1739   nomem:
  1.1740 -    xennet_proc_delif(dev);
  1.1741 -    return -ENOMEM;
  1.1742 +	xennet_proc_delif(dev);
  1.1743 +	return -ENOMEM;
  1.1744  }
  1.1745  
  1.1746  static void xennet_proc_delif(struct net_device *dev)
  1.1747  {
  1.1748 -    char name[30];
  1.1749 +	char name[30];
  1.1750  
  1.1751 -    sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
  1.1752 -    remove_proc_entry(name, NULL);
  1.1753 +	sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
  1.1754 +	remove_proc_entry(name, NULL);
  1.1755  
  1.1756 -    sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
  1.1757 -    remove_proc_entry(name, NULL);
  1.1758 +	sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
  1.1759 +	remove_proc_entry(name, NULL);
  1.1760  
  1.1761 -    sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
  1.1762 -    remove_proc_entry(name, NULL);
  1.1763 +	sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
  1.1764 +	remove_proc_entry(name, NULL);
  1.1765  
  1.1766 -    sprintf(name, "xen/net/%s", dev->name);
  1.1767 -    remove_proc_entry(name, NULL);
  1.1768 +	sprintf(name, "xen/net/%s", dev->name);
  1.1769 +	remove_proc_entry(name, NULL);
  1.1770  }
  1.1771  
  1.1772  #endif
  1.1773  
  1.1774  module_init(netif_init);
  1.1775  module_exit(netif_exit);
  1.1776 +
  1.1777 +/*
  1.1778 + * Local variables:
  1.1779 + *  c-file-style: "linux"
  1.1780 + *  indent-tabs-mode: t
  1.1781 + *  c-indent-level: 8
  1.1782 + *  c-basic-offset: 8
  1.1783 + *  tab-width: 8
  1.1784 + * End:
  1.1785 + */