ia64/xen-unstable

changeset 6910:7fbaf67a0af5

Clean up and re-indent netback driver.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Sep 16 13:27:01 2005 +0000 (2005-09-16)
parents 8bb3f2567b8c
children a434b5449d59 ffbc98d735bd
files linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Fri Sep 16 13:06:49 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Fri Sep 16 13:27:01 2005 +0000
     1.3 @@ -18,17 +18,11 @@
     1.4  #include <asm-xen/xen-public/io/netif.h>
     1.5  #include <asm/io.h>
     1.6  #include <asm/pgalloc.h>
     1.7 -
     1.8 -#ifdef CONFIG_XEN_NETDEV_GRANT
     1.9  #include <asm-xen/xen-public/grant_table.h>
    1.10  #include <asm-xen/gnttab.h>
    1.11  
    1.12  #define GRANT_INVALID_REF (0xFFFF)
    1.13  
    1.14 -#endif
    1.15 -
    1.16 -
    1.17 -
    1.18  #if 0
    1.19  #define ASSERT(_p) \
    1.20      if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
    1.21 @@ -44,74 +38,73 @@
    1.22  #define WPRINTK(fmt, args...) \
    1.23      printk(KERN_WARNING "xen_net: " fmt, ##args)
    1.24  
    1.25 +typedef struct netif_st {
    1.26 +	/* Unique identifier for this interface. */
    1.27 +	domid_t          domid;
    1.28 +	unsigned int     handle;
    1.29  
    1.30 -typedef struct netif_st {
    1.31 -    /* Unique identifier for this interface. */
    1.32 -    domid_t          domid;
    1.33 -    unsigned int     handle;
    1.34 +	u8               fe_dev_addr[6];
    1.35  
    1.36 -    u8               fe_dev_addr[6];
    1.37 -
    1.38 -    /* Physical parameters of the comms window. */
    1.39 -    unsigned long    tx_shmem_frame;
    1.40 +	/* Physical parameters of the comms window. */
    1.41 +	unsigned long    tx_shmem_frame;
    1.42  #ifdef CONFIG_XEN_NETDEV_GRANT
    1.43 -    u16              tx_shmem_handle;
    1.44 -    unsigned long    tx_shmem_vaddr; 
    1.45 -    grant_ref_t      tx_shmem_ref; 
    1.46 +	u16              tx_shmem_handle;
    1.47 +	unsigned long    tx_shmem_vaddr; 
    1.48 +	grant_ref_t      tx_shmem_ref; 
    1.49  #endif
    1.50 -    unsigned long    rx_shmem_frame;
    1.51 +	unsigned long    rx_shmem_frame;
    1.52  #ifdef CONFIG_XEN_NETDEV_GRANT
    1.53 -    u16              rx_shmem_handle;
    1.54 -    unsigned long    rx_shmem_vaddr; 
    1.55 -    grant_ref_t      rx_shmem_ref; 
    1.56 +	u16              rx_shmem_handle;
    1.57 +	unsigned long    rx_shmem_vaddr; 
    1.58 +	grant_ref_t      rx_shmem_ref; 
    1.59  #endif
    1.60 -    unsigned int     evtchn;
    1.61 -    unsigned int     remote_evtchn;
    1.62 +	unsigned int     evtchn;
    1.63 +	unsigned int     remote_evtchn;
    1.64  
    1.65 -    /* The shared rings and indexes. */
    1.66 -    netif_tx_interface_t *tx;
    1.67 -    netif_rx_interface_t *rx;
    1.68 +	/* The shared rings and indexes. */
    1.69 +	netif_tx_interface_t *tx;
    1.70 +	netif_rx_interface_t *rx;
    1.71  
    1.72 -    /* Private indexes into shared ring. */
    1.73 -    NETIF_RING_IDX rx_req_cons;
    1.74 -    NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
    1.75 +	/* Private indexes into shared ring. */
    1.76 +	NETIF_RING_IDX rx_req_cons;
    1.77 +	NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
    1.78  #ifdef CONFIG_XEN_NETDEV_GRANT
    1.79 -    NETIF_RING_IDX rx_resp_prod_copy; /* private version of shared variable */
    1.80 +	NETIF_RING_IDX rx_resp_prod_copy;
    1.81  #endif
    1.82 -    NETIF_RING_IDX tx_req_cons;
    1.83 -    NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
    1.84 +	NETIF_RING_IDX tx_req_cons;
    1.85 +	NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
    1.86  
    1.87 -    /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
    1.88 -    unsigned long   credit_bytes;
    1.89 -    unsigned long   credit_usec;
    1.90 -    unsigned long   remaining_credit;
    1.91 -    struct timer_list credit_timeout;
    1.92 +	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
    1.93 +	unsigned long   credit_bytes;
    1.94 +	unsigned long   credit_usec;
    1.95 +	unsigned long   remaining_credit;
    1.96 +	struct timer_list credit_timeout;
    1.97  
    1.98 -    /* Miscellaneous private stuff. */
    1.99 -    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
   1.100 -    int active;
   1.101 -    struct list_head list;  /* scheduling list */
   1.102 -    atomic_t         refcnt;
   1.103 -    struct net_device *dev;
   1.104 -    struct net_device_stats stats;
   1.105 +	/* Miscellaneous private stuff. */
   1.106 +	enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
   1.107 +	int active;
   1.108 +	struct list_head list;  /* scheduling list */
   1.109 +	atomic_t         refcnt;
   1.110 +	struct net_device *dev;
   1.111 +	struct net_device_stats stats;
   1.112  
   1.113 -    struct work_struct free_work;
   1.114 +	struct work_struct free_work;
   1.115  } netif_t;
   1.116  
   1.117  void netif_creditlimit(netif_t *netif);
   1.118  int  netif_disconnect(netif_t *netif);
   1.119  
   1.120  netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]);
   1.121 -void free_netif_callback(netif_t *netif);
   1.122 +void free_netif(netif_t *netif);
   1.123  int netif_map(netif_t *netif, unsigned long tx_ring_ref,
   1.124  	      unsigned long rx_ring_ref, unsigned int evtchn);
   1.125  
   1.126  #define netif_get(_b) (atomic_inc(&(_b)->refcnt))
   1.127 -#define netif_put(_b)                             \
   1.128 -    do {                                          \
   1.129 -        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
   1.130 -            free_netif_callback(_b);              \
   1.131 -    } while (0)
   1.132 +#define netif_put(_b)						\
   1.133 +	do {							\
   1.134 +		if ( atomic_dec_and_test(&(_b)->refcnt) )	\
   1.135 +			free_netif(_b);				\
   1.136 +	} while (0)
   1.137  
   1.138  void netif_xenbus_init(void);
   1.139  
   1.140 @@ -123,3 +116,13 @@ struct net_device_stats *netif_be_get_st
   1.141  irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
   1.142  
   1.143  #endif /* __NETIF__BACKEND__COMMON_H__ */
   1.144 +
   1.145 +/*
   1.146 + * Local variables:
   1.147 + *  c-file-style: "linux"
   1.148 + *  indent-tabs-mode: t
   1.149 + *  c-indent-level: 8
   1.150 + *  c-basic-offset: 8
   1.151 + *  tab-width: 8
   1.152 + * End:
   1.153 + */
     2.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Fri Sep 16 13:06:49 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Fri Sep 16 13:27:01 2005 +0000
     2.3 @@ -11,104 +11,105 @@
     2.4  
     2.5  static void __netif_up(netif_t *netif)
     2.6  {
     2.7 -    struct net_device *dev = netif->dev;
     2.8 -    spin_lock_bh(&dev->xmit_lock);
     2.9 -    netif->active = 1;
    2.10 -    spin_unlock_bh(&dev->xmit_lock);
    2.11 -    (void)bind_evtchn_to_irqhandler(
    2.12 -        netif->evtchn, netif_be_int, 0, dev->name, netif);
    2.13 -    netif_schedule_work(netif);
    2.14 +	struct net_device *dev = netif->dev;
    2.15 +	spin_lock_bh(&dev->xmit_lock);
    2.16 +	netif->active = 1;
    2.17 +	spin_unlock_bh(&dev->xmit_lock);
    2.18 +	(void)bind_evtchn_to_irqhandler(
    2.19 +		netif->evtchn, netif_be_int, 0, dev->name, netif);
    2.20 +	netif_schedule_work(netif);
    2.21  }
    2.22  
    2.23  static void __netif_down(netif_t *netif)
    2.24  {
    2.25 -    struct net_device *dev = netif->dev;
    2.26 -    spin_lock_bh(&dev->xmit_lock);
    2.27 -    netif->active = 0;
    2.28 -    spin_unlock_bh(&dev->xmit_lock);
    2.29 -    unbind_evtchn_from_irqhandler(netif->evtchn, netif);
    2.30 -    netif_deschedule_work(netif);
    2.31 +	struct net_device *dev = netif->dev;
    2.32 +	spin_lock_bh(&dev->xmit_lock);
    2.33 +	netif->active = 0;
    2.34 +	spin_unlock_bh(&dev->xmit_lock);
    2.35 +	unbind_evtchn_from_irqhandler(netif->evtchn, netif);
    2.36 +	netif_deschedule_work(netif);
    2.37  }
    2.38  
    2.39  static int net_open(struct net_device *dev)
    2.40  {
    2.41 -    netif_t *netif = netdev_priv(dev);
    2.42 -    if (netif->status == CONNECTED)
    2.43 -        __netif_up(netif);
    2.44 -    netif_start_queue(dev);
    2.45 -    return 0;
    2.46 +	netif_t *netif = netdev_priv(dev);
    2.47 +	if (netif->status == CONNECTED)
    2.48 +		__netif_up(netif);
    2.49 +	netif_start_queue(dev);
    2.50 +	return 0;
    2.51  }
    2.52  
    2.53  static int net_close(struct net_device *dev)
    2.54  {
    2.55 -    netif_t *netif = netdev_priv(dev);
    2.56 -    netif_stop_queue(dev);
    2.57 -    if (netif->status == CONNECTED)
    2.58 -        __netif_down(netif);
    2.59 -    return 0;
    2.60 +	netif_t *netif = netdev_priv(dev);
    2.61 +	netif_stop_queue(dev);
    2.62 +	if (netif->status == CONNECTED)
    2.63 +		__netif_down(netif);
    2.64 +	return 0;
    2.65  }
    2.66  
    2.67  netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN])
    2.68  {
    2.69 -    int err = 0, i;
    2.70 -    struct net_device *dev;
    2.71 -    netif_t *netif;
    2.72 -    char name[IFNAMSIZ] = {};
    2.73 +	int err = 0, i;
    2.74 +	struct net_device *dev;
    2.75 +	netif_t *netif;
    2.76 +	char name[IFNAMSIZ] = {};
    2.77  
    2.78 -    snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
    2.79 -    dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
    2.80 -    if (dev == NULL) {
    2.81 -        DPRINTK("Could not create netif: out of memory\n");
    2.82 -        return NULL;
    2.83 -    }
    2.84 +	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
    2.85 +	dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
    2.86 +	if (dev == NULL) {
    2.87 +		DPRINTK("Could not create netif: out of memory\n");
    2.88 +		return NULL;
    2.89 +	}
    2.90  
    2.91 -    netif = netdev_priv(dev);
    2.92 -    memset(netif, 0, sizeof(*netif));
    2.93 -    netif->domid  = domid;
    2.94 -    netif->handle = handle;
    2.95 -    netif->status = DISCONNECTED;
    2.96 -    atomic_set(&netif->refcnt, 0);
    2.97 -    netif->dev = dev;
    2.98 +	netif = netdev_priv(dev);
    2.99 +	memset(netif, 0, sizeof(*netif));
   2.100 +	netif->domid  = domid;
   2.101 +	netif->handle = handle;
   2.102 +	netif->status = DISCONNECTED;
   2.103 +	atomic_set(&netif->refcnt, 0);
   2.104 +	netif->dev = dev;
   2.105  
   2.106 -    netif->credit_bytes = netif->remaining_credit = ~0UL;
   2.107 -    netif->credit_usec  = 0UL;
   2.108 -    init_timer(&netif->credit_timeout);
   2.109 +	netif->credit_bytes = netif->remaining_credit = ~0UL;
   2.110 +	netif->credit_usec  = 0UL;
   2.111 +	init_timer(&netif->credit_timeout);
   2.112  
   2.113 -    dev->hard_start_xmit = netif_be_start_xmit;
   2.114 -    dev->get_stats       = netif_be_get_stats;
   2.115 -    dev->open            = net_open;
   2.116 -    dev->stop            = net_close;
   2.117 -    dev->features        = NETIF_F_NO_CSUM;
   2.118 +	dev->hard_start_xmit = netif_be_start_xmit;
   2.119 +	dev->get_stats       = netif_be_get_stats;
   2.120 +	dev->open            = net_open;
   2.121 +	dev->stop            = net_close;
   2.122 +	dev->features        = NETIF_F_NO_CSUM;
   2.123  
   2.124 -    /* Disable queuing. */
   2.125 -    dev->tx_queue_len = 0;
   2.126 +	/* Disable queuing. */
   2.127 +	dev->tx_queue_len = 0;
   2.128  
   2.129 -    for (i = 0; i < ETH_ALEN; i++)
   2.130 -	if (be_mac[i] != 0)
   2.131 -	    break;
   2.132 -    if (i == ETH_ALEN) {
   2.133 -        /*
   2.134 -         * Initialise a dummy MAC address. We choose the numerically largest
   2.135 -         * non-broadcast address to prevent the address getting stolen by an
   2.136 -         * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
   2.137 -         */ 
   2.138 -        memset(dev->dev_addr, 0xFF, ETH_ALEN);
   2.139 -        dev->dev_addr[0] &= ~0x01;
   2.140 -    } else
   2.141 -        memcpy(dev->dev_addr, be_mac, ETH_ALEN);
   2.142 +	for (i = 0; i < ETH_ALEN; i++)
   2.143 +		if (be_mac[i] != 0)
   2.144 +			break;
   2.145 +	if (i == ETH_ALEN) {
   2.146 +		/*
   2.147 +		 * Initialise a dummy MAC address. We choose the numerically
   2.148 +		 * largest non-broadcast address to prevent the address getting
   2.149 +		 * stolen by an Ethernet bridge for STP purposes.
   2.150 +                 * (FE:FF:FF:FF:FF:FF) 
   2.151 +		 */ 
   2.152 +		memset(dev->dev_addr, 0xFF, ETH_ALEN);
   2.153 +		dev->dev_addr[0] &= ~0x01;
   2.154 +	} else
   2.155 +		memcpy(dev->dev_addr, be_mac, ETH_ALEN);
   2.156  
   2.157 -    rtnl_lock();
   2.158 -    err = register_netdevice(dev);
   2.159 -    rtnl_unlock();
   2.160 -    if (err) {
   2.161 -        DPRINTK("Could not register new net device %s: err=%d\n",
   2.162 -                dev->name, err);
   2.163 -        free_netdev(dev);
   2.164 -        return NULL;
   2.165 -    }
   2.166 +	rtnl_lock();
   2.167 +	err = register_netdevice(dev);
   2.168 +	rtnl_unlock();
   2.169 +	if (err) {
   2.170 +		DPRINTK("Could not register new net device %s: err=%d\n",
   2.171 +			dev->name, err);
   2.172 +		free_netdev(dev);
   2.173 +		return NULL;
   2.174 +	}
   2.175  
   2.176 -    DPRINTK("Successfully created netif\n");
   2.177 -    return netif;
   2.178 +	DPRINTK("Successfully created netif\n");
   2.179 +	return netif;
   2.180  }
   2.181  
   2.182  static int map_frontend_pages(netif_t *netif, unsigned long localaddr,
   2.183 @@ -116,191 +117,204 @@ static int map_frontend_pages(netif_t *n
   2.184                                unsigned long rx_ring_ref)
   2.185  {
   2.186  #ifdef CONFIG_XEN_NETDEV_GRANT
   2.187 -    struct gnttab_map_grant_ref op;
   2.188 +	struct gnttab_map_grant_ref op;
   2.189  
   2.190 -    /* Map: Use the Grant table reference */
   2.191 -    op.host_addr = localaddr;
   2.192 -    op.flags     = GNTMAP_host_map;
   2.193 -    op.ref       = tx_ring_ref;
   2.194 -    op.dom       = netif->domid;
   2.195 +	/* Map: Use the Grant table reference */
   2.196 +	op.host_addr = localaddr;
   2.197 +	op.flags     = GNTMAP_host_map;
   2.198 +	op.ref       = tx_ring_ref;
   2.199 +	op.dom       = netif->domid;
   2.200      
   2.201 -    BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
   2.202 -    if (op.handle < 0) { 
   2.203 -        DPRINTK(" Grant table operation failure mapping tx_ring_ref!\n");
   2.204 -        return op.handle;
   2.205 -    }
   2.206 +	BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
   2.207 +	if (op.handle < 0) { 
   2.208 +		DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
   2.209 +		return op.handle;
   2.210 +	}
   2.211  
   2.212 -    netif->tx_shmem_ref    = tx_ring_ref;
   2.213 -    netif->tx_shmem_handle = op.handle;
   2.214 -    netif->tx_shmem_vaddr  = localaddr;
   2.215 +	netif->tx_shmem_ref    = tx_ring_ref;
   2.216 +	netif->tx_shmem_handle = op.handle;
   2.217 +	netif->tx_shmem_vaddr  = localaddr;
   2.218  
   2.219 -    /* Map: Use the Grant table reference */
   2.220 -    op.host_addr = localaddr + PAGE_SIZE;
   2.221 -    op.flags     = GNTMAP_host_map;
   2.222 -    op.ref       = rx_ring_ref;
   2.223 -    op.dom       = netif->domid;
   2.224 +	/* Map: Use the Grant table reference */
   2.225 +	op.host_addr = localaddr + PAGE_SIZE;
   2.226 +	op.flags     = GNTMAP_host_map;
   2.227 +	op.ref       = rx_ring_ref;
   2.228 +	op.dom       = netif->domid;
   2.229  
   2.230 -    BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
   2.231 -    if (op.handle < 0) { 
   2.232 -        DPRINTK(" Grant table operation failure mapping rx_ring_ref!\n");
   2.233 -        return op.handle;
   2.234 -    }
   2.235 +	BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
   2.236 +	if (op.handle < 0) { 
   2.237 +		DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
   2.238 +		return op.handle;
   2.239 +	}
   2.240  
   2.241 -    netif->rx_shmem_ref    = rx_ring_ref;
   2.242 -    netif->rx_shmem_handle = op.handle;
   2.243 -    netif->rx_shmem_vaddr  = localaddr + PAGE_SIZE;
   2.244 +	netif->rx_shmem_ref    = rx_ring_ref;
   2.245 +	netif->rx_shmem_handle = op.handle;
   2.246 +	netif->rx_shmem_vaddr  = localaddr + PAGE_SIZE;
   2.247  
   2.248  #else
   2.249 -    pgprot_t      prot = __pgprot(_KERNPG_TABLE);
   2.250 -    int           err;
   2.251 +	pgprot_t prot = __pgprot(_KERNPG_TABLE);
   2.252 +	int      err;
   2.253  
   2.254 -    err = direct_remap_pfn_range(&init_mm, localaddr,
   2.255 -				  tx_ring_ref, PAGE_SIZE,
   2.256 -				  prot, netif->domid); 
   2.257 +	err = direct_remap_pfn_range(
   2.258 +		&init_mm, localaddr,
   2.259 +		tx_ring_ref, PAGE_SIZE,
   2.260 +		prot, netif->domid); 
   2.261      
   2.262 -    err |= direct_remap_pfn_range(&init_mm, localaddr + PAGE_SIZE,
   2.263 -				  rx_ring_ref, PAGE_SIZE,
   2.264 -				  prot, netif->domid);
   2.265 +	err |= direct_remap_pfn_range(
   2.266 +		&init_mm, localaddr + PAGE_SIZE,
   2.267 +		rx_ring_ref, PAGE_SIZE,
   2.268 +		prot, netif->domid);
   2.269  
   2.270 -    if (err)
   2.271 -	return err;
   2.272 +	if (err)
   2.273 +		return err;
   2.274  #endif
   2.275  
   2.276 -    return 0;
   2.277 +	return 0;
   2.278  }
   2.279  
   2.280  static void unmap_frontend_pages(netif_t *netif)
   2.281  {
   2.282  #ifdef CONFIG_XEN_NETDEV_GRANT
   2.283 -    struct gnttab_unmap_grant_ref op;
   2.284 +	struct gnttab_unmap_grant_ref op;
   2.285  
   2.286 -    op.host_addr    = netif->tx_shmem_vaddr;
   2.287 -    op.handle       = netif->tx_shmem_handle;
   2.288 -    op.dev_bus_addr = 0;
   2.289 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   2.290 +	op.host_addr    = netif->tx_shmem_vaddr;
   2.291 +	op.handle       = netif->tx_shmem_handle;
   2.292 +	op.dev_bus_addr = 0;
   2.293 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   2.294  
   2.295 -    op.host_addr    = netif->rx_shmem_vaddr;
   2.296 -    op.handle       = netif->rx_shmem_handle;
   2.297 -    op.dev_bus_addr = 0;
   2.298 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   2.299 +	op.host_addr    = netif->rx_shmem_vaddr;
   2.300 +	op.handle       = netif->rx_shmem_handle;
   2.301 +	op.dev_bus_addr = 0;
   2.302 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   2.303  #endif
   2.304  
   2.305 -    return; 
   2.306 +	return; 
   2.307  }
   2.308  
   2.309  int netif_map(netif_t *netif, unsigned long tx_ring_ref,
   2.310  	      unsigned long rx_ring_ref, unsigned int evtchn)
   2.311  {
   2.312 -    struct vm_struct *vma;
   2.313 -    evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
   2.314 -    int err;
   2.315 +	struct vm_struct *vma;
   2.316 +	evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
   2.317 +	int err;
   2.318  
   2.319 -    vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP);
   2.320 -    if (vma == NULL)
   2.321 -        return -ENOMEM;
   2.322 +	vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP);
   2.323 +	if (vma == NULL)
   2.324 +		return -ENOMEM;
   2.325  
   2.326 -    err = map_frontend_pages(netif, (unsigned long)vma->addr, tx_ring_ref,
   2.327 -                             rx_ring_ref);
   2.328 -    if (err) {
   2.329 -        vfree(vma->addr);
   2.330 -	return err;
   2.331 -    }
   2.332 +	err = map_frontend_pages(
   2.333 +		netif, (unsigned long)vma->addr, tx_ring_ref, rx_ring_ref);
   2.334 +	if (err) {
   2.335 +		vfree(vma->addr);
   2.336 +		return err;
   2.337 +	}
   2.338  
   2.339 -    op.u.bind_interdomain.dom1 = DOMID_SELF;
   2.340 -    op.u.bind_interdomain.dom2 = netif->domid;
   2.341 -    op.u.bind_interdomain.port1 = 0;
   2.342 -    op.u.bind_interdomain.port2 = evtchn;
   2.343 -    err = HYPERVISOR_event_channel_op(&op);
   2.344 -    if (err) {
   2.345 -	unmap_frontend_pages(netif);
   2.346 -	vfree(vma->addr);
   2.347 -	return err;
   2.348 -    }
   2.349 +	op.u.bind_interdomain.dom1 = DOMID_SELF;
   2.350 +	op.u.bind_interdomain.dom2 = netif->domid;
   2.351 +	op.u.bind_interdomain.port1 = 0;
   2.352 +	op.u.bind_interdomain.port2 = evtchn;
   2.353 +	err = HYPERVISOR_event_channel_op(&op);
   2.354 +	if (err) {
   2.355 +		unmap_frontend_pages(netif);
   2.356 +		vfree(vma->addr);
   2.357 +		return err;
   2.358 +	}
   2.359  
   2.360 -    netif->evtchn = op.u.bind_interdomain.port1;
   2.361 -    netif->remote_evtchn = evtchn;
   2.362 +	netif->evtchn = op.u.bind_interdomain.port1;
   2.363 +	netif->remote_evtchn = evtchn;
   2.364  
   2.365 -    netif->tx = (netif_tx_interface_t *)vma->addr;
   2.366 -    netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
   2.367 -    netif->tx->resp_prod = netif->rx->resp_prod = 0;
   2.368 -    netif_get(netif);
   2.369 -    wmb(); /* Other CPUs see new state before interface is started. */
   2.370 +	netif->tx = (netif_tx_interface_t *)vma->addr;
   2.371 +	netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
   2.372 +	netif->tx->resp_prod = netif->rx->resp_prod = 0;
   2.373 +	netif_get(netif);
   2.374 +	wmb(); /* Other CPUs see new state before interface is started. */
   2.375  
   2.376 -    rtnl_lock();
   2.377 -    netif->status = CONNECTED;
   2.378 -    wmb();
   2.379 -    if (netif_running(netif->dev))
   2.380 -        __netif_up(netif);
   2.381 -    rtnl_unlock();
   2.382 +	rtnl_lock();
   2.383 +	netif->status = CONNECTED;
   2.384 +	wmb();
   2.385 +	if (netif_running(netif->dev))
   2.386 +		__netif_up(netif);
   2.387 +	rtnl_unlock();
   2.388  
   2.389 -    return 0;
   2.390 +	return 0;
   2.391  }
   2.392  
   2.393 -static void free_netif(void *arg)
   2.394 +static void free_netif_callback(void *arg)
   2.395  {
   2.396 -    evtchn_op_t op = { .cmd = EVTCHNOP_close };
   2.397 -    netif_t *netif = (netif_t *)arg;
   2.398 +	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   2.399 +	netif_t *netif = (netif_t *)arg;
   2.400  
   2.401 -    /*
   2.402 -     * These can't be done in netif_disconnect() because at that point there
   2.403 -     * may be outstanding requests in the network stack whose asynchronous
   2.404 -     * responses must still be notified to the remote driver.
   2.405 -     */
   2.406 +	/*
   2.407 +	 * These can't be done in netif_disconnect() because at that point
   2.408 +	 * there may be outstanding requests in the network stack whose
   2.409 +	 * asynchronous responses must still be notified to the remote driver.
   2.410 +	 */
   2.411  
   2.412 -    op.u.close.port = netif->evtchn;
   2.413 -    op.u.close.dom = DOMID_SELF;
   2.414 -    HYPERVISOR_event_channel_op(&op);
   2.415 -    op.u.close.port = netif->remote_evtchn;
   2.416 -    op.u.close.dom = netif->domid;
   2.417 -    HYPERVISOR_event_channel_op(&op);
   2.418 +	op.u.close.port = netif->evtchn;
   2.419 +	op.u.close.dom = DOMID_SELF;
   2.420 +	HYPERVISOR_event_channel_op(&op);
   2.421 +	op.u.close.port = netif->remote_evtchn;
   2.422 +	op.u.close.dom = netif->domid;
   2.423 +	HYPERVISOR_event_channel_op(&op);
   2.424  
   2.425 -    unregister_netdev(netif->dev);
   2.426 +	unregister_netdev(netif->dev);
   2.427  
   2.428 -    if (netif->tx) {
   2.429 -	unmap_frontend_pages(netif);
   2.430 -	vfree(netif->tx); /* Frees netif->rx as well. */
   2.431 -    }
   2.432 +	if (netif->tx) {
   2.433 +		unmap_frontend_pages(netif);
   2.434 +		vfree(netif->tx); /* Frees netif->rx as well. */
   2.435 +	}
   2.436  
   2.437 -    free_netdev(netif->dev);
   2.438 +	free_netdev(netif->dev);
   2.439  }
   2.440  
   2.441 -void free_netif_callback(netif_t *netif)
   2.442 +void free_netif(netif_t *netif)
   2.443  {
   2.444 -    INIT_WORK(&netif->free_work, free_netif, (void *)netif);
   2.445 -    schedule_work(&netif->free_work);
   2.446 +	INIT_WORK(&netif->free_work, free_netif_callback, (void *)netif);
   2.447 +	schedule_work(&netif->free_work);
   2.448  }
   2.449  
   2.450  void netif_creditlimit(netif_t *netif)
   2.451  {
   2.452  #if 0
   2.453 -    /* Set the credit limit (reset remaining credit to new limit). */
   2.454 -    netif->credit_bytes = netif->remaining_credit = creditlimit->credit_bytes;
   2.455 -    netif->credit_usec = creditlimit->period_usec;
   2.456 +	/* Set the credit limit (reset remaining credit to new limit). */
   2.457 +	netif->credit_bytes     = creditlimit->credit_bytes;
   2.458 +	netif->remaining_credit = creditlimit->credit_bytes;
   2.459 +	netif->credit_usec      = creditlimit->period_usec;
   2.460  
   2.461 -    if (netif->status == CONNECTED) {
   2.462 -        /*
   2.463 -         * Schedule work so that any packets waiting under previous credit 
   2.464 -         * limit are dealt with (acts like a replenishment point).
   2.465 -         */
   2.466 -        netif->credit_timeout.expires = jiffies;
   2.467 -        netif_schedule_work(netif);
   2.468 -    }
   2.469 +	if (netif->status == CONNECTED) {
   2.470 +		/*
   2.471 +		 * Schedule work so that any packets waiting under previous
   2.472 +		 * credit limit are dealt with (acts as a replenishment point).
   2.473 +		 */
   2.474 +		netif->credit_timeout.expires = jiffies;
   2.475 +		netif_schedule_work(netif);
   2.476 +	}
   2.477  #endif
   2.478  }
   2.479  
   2.480  int netif_disconnect(netif_t *netif)
   2.481  {
   2.482  
   2.483 -    if (netif->status == CONNECTED) {
   2.484 -        rtnl_lock();
   2.485 -        netif->status = DISCONNECTING;
   2.486 -        wmb();
   2.487 -        if (netif_running(netif->dev))
   2.488 -            __netif_down(netif);
   2.489 -        rtnl_unlock();
   2.490 -        netif_put(netif);
   2.491 -        return 0; /* Caller should not send response message. */
   2.492 -    }
   2.493 +	if (netif->status == CONNECTED) {
   2.494 +		rtnl_lock();
   2.495 +		netif->status = DISCONNECTING;
   2.496 +		wmb();
   2.497 +		if (netif_running(netif->dev))
   2.498 +			__netif_down(netif);
   2.499 +		rtnl_unlock();
   2.500 +		netif_put(netif);
   2.501 +		return 0; /* Caller should not send response message. */
   2.502 +	}
   2.503  
   2.504 -    return 1;
   2.505 +	return 1;
   2.506  }
   2.507 +
   2.508 +/*
   2.509 + * Local variables:
   2.510 + *  c-file-style: "linux"
   2.511 + *  indent-tabs-mode: t
   2.512 + *  c-indent-level: 8
   2.513 + *  c-basic-offset: 8
   2.514 + *  tab-width: 8
   2.515 + * End:
   2.516 + */
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Sep 16 13:06:49 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Sep 16 13:27:01 2005 +0000
     3.3 @@ -57,8 +57,8 @@ static unsigned long mmap_vstart;
     3.4  #define PKT_PROT_LEN 64
     3.5  
     3.6  static struct {
     3.7 -    netif_tx_request_t req;
     3.8 -    netif_t *netif;
     3.9 +	netif_tx_request_t req;
    3.10 +	netif_t *netif;
    3.11  } pending_tx_info[MAX_PENDING_REQS];
    3.12  static u16 pending_ring[MAX_PENDING_REQS];
    3.13  typedef unsigned int PEND_RING_IDX;
    3.14 @@ -91,49 +91,49 @@ static spinlock_t mfn_lock = SPIN_LOCK_U
    3.15  
    3.16  static unsigned long alloc_mfn(void)
    3.17  {
    3.18 -    unsigned long mfn = 0, flags;
    3.19 -    struct xen_memory_reservation reservation = {
    3.20 -        .extent_start = mfn_list,
    3.21 -        .nr_extents   = MAX_MFN_ALLOC,
    3.22 -        .extent_order = 0,
    3.23 -        .domid        = DOMID_SELF
    3.24 -    };
    3.25 -    spin_lock_irqsave(&mfn_lock, flags);
    3.26 -    if ( unlikely(alloc_index == 0) )
    3.27 -        alloc_index = HYPERVISOR_memory_op(
    3.28 -            XENMEM_increase_reservation, &reservation);
    3.29 -    if ( alloc_index != 0 )
    3.30 -        mfn = mfn_list[--alloc_index];
    3.31 -    spin_unlock_irqrestore(&mfn_lock, flags);
    3.32 -    return mfn;
    3.33 +	unsigned long mfn = 0, flags;
    3.34 +	struct xen_memory_reservation reservation = {
    3.35 +		.extent_start = mfn_list,
    3.36 +		.nr_extents   = MAX_MFN_ALLOC,
    3.37 +		.extent_order = 0,
    3.38 +		.domid        = DOMID_SELF
    3.39 +	};
    3.40 +	spin_lock_irqsave(&mfn_lock, flags);
    3.41 +	if ( unlikely(alloc_index == 0) )
    3.42 +		alloc_index = HYPERVISOR_memory_op(
    3.43 +			XENMEM_increase_reservation, &reservation);
    3.44 +	if ( alloc_index != 0 )
    3.45 +		mfn = mfn_list[--alloc_index];
    3.46 +	spin_unlock_irqrestore(&mfn_lock, flags);
    3.47 +	return mfn;
    3.48  }
    3.49  
    3.50  #ifndef CONFIG_XEN_NETDEV_GRANT
    3.51  static void free_mfn(unsigned long mfn)
    3.52  {
    3.53 -    unsigned long flags;
    3.54 -    struct xen_memory_reservation reservation = {
    3.55 -        .extent_start = &mfn,
    3.56 -        .nr_extents   = 1,
    3.57 -        .extent_order = 0,
    3.58 -        .domid        = DOMID_SELF
    3.59 -    };
    3.60 -    spin_lock_irqsave(&mfn_lock, flags);
    3.61 -    if ( alloc_index != MAX_MFN_ALLOC )
    3.62 -        mfn_list[alloc_index++] = mfn;
    3.63 -    else if ( HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation)
    3.64 -              != 1 )
    3.65 -        BUG();
    3.66 -    spin_unlock_irqrestore(&mfn_lock, flags);
    3.67 +	unsigned long flags;
    3.68 +	struct xen_memory_reservation reservation = {
    3.69 +		.extent_start = &mfn,
    3.70 +		.nr_extents   = 1,
    3.71 +		.extent_order = 0,
    3.72 +		.domid        = DOMID_SELF
    3.73 +	};
    3.74 +	spin_lock_irqsave(&mfn_lock, flags);
    3.75 +	if ( alloc_index != MAX_MFN_ALLOC )
    3.76 +		mfn_list[alloc_index++] = mfn;
    3.77 +	else
    3.78 +		BUG_ON(HYPERVISOR_memory_op(XENMEM_decrease_reservation,
    3.79 +					    &reservation) != 1);
    3.80 +	spin_unlock_irqrestore(&mfn_lock, flags);
    3.81  }
    3.82  #endif
    3.83  
    3.84  static inline void maybe_schedule_tx_action(void)
    3.85  {
    3.86 -    smp_mb();
    3.87 -    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
    3.88 -         !list_empty(&net_schedule_list) )
    3.89 -        tasklet_schedule(&net_tx_tasklet);
    3.90 +	smp_mb();
    3.91 +	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
    3.92 +	    !list_empty(&net_schedule_list))
    3.93 +		tasklet_schedule(&net_tx_tasklet);
    3.94  }
    3.95  
    3.96  /*
    3.97 @@ -142,77 +142,77 @@ static inline void maybe_schedule_tx_act
    3.98   */
    3.99  static inline int is_xen_skb(struct sk_buff *skb)
   3.100  {
   3.101 -    extern kmem_cache_t *skbuff_cachep;
   3.102 -    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
   3.103 -    return (cp == skbuff_cachep);
   3.104 +	extern kmem_cache_t *skbuff_cachep;
   3.105 +	kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
   3.106 +	return (cp == skbuff_cachep);
   3.107  }
   3.108  
   3.109  int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
   3.110  {
   3.111 -    netif_t *netif = netdev_priv(dev);
   3.112 +	netif_t *netif = netdev_priv(dev);
   3.113  
   3.114 -    ASSERT(skb->dev == dev);
   3.115 +	ASSERT(skb->dev == dev);
   3.116  
   3.117 -    /* Drop the packet if the target domain has no receive buffers. */
   3.118 -    if ( !netif->active || 
   3.119 -         (netif->rx_req_cons == netif->rx->req_prod) ||
   3.120 -         ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
   3.121 -        goto drop;
   3.122 +	/* Drop the packet if the target domain has no receive buffers. */
   3.123 +	if (!netif->active || 
   3.124 +	    (netif->rx_req_cons == netif->rx->req_prod) ||
   3.125 +	    ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE))
   3.126 +		goto drop;
   3.127  
   3.128 -    /*
   3.129 -     * We do not copy the packet unless:
   3.130 -     *  1. The data is shared; or
   3.131 -     *  2. The data is not allocated from our special cache.
   3.132 -     * NB. We also couldn't cope with fragmented packets, but we won't get
   3.133 -     *     any because we not advertise the NETIF_F_SG feature.
   3.134 -     */
   3.135 -    if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
   3.136 -    {
   3.137 -        int hlen = skb->data - skb->head;
   3.138 -        struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
   3.139 -        if ( unlikely(nskb == NULL) )
   3.140 -            goto drop;
   3.141 -        skb_reserve(nskb, hlen);
   3.142 -        __skb_put(nskb, skb->len);
   3.143 -        if (skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen))
   3.144 -            BUG();
   3.145 -        nskb->dev = skb->dev;
   3.146 -        nskb->proto_csum_valid = skb->proto_csum_valid;
   3.147 -        dev_kfree_skb(skb);
   3.148 -        skb = nskb;
   3.149 -    }
   3.150 +	/*
   3.151 +	 * We do not copy the packet unless:
   3.152 +	 *  1. The data is shared; or
   3.153 +	 *  2. The data is not allocated from our special cache.
   3.154 +	 * NB. We also couldn't cope with fragmented packets, but we won't get
   3.155 +	 *     any because we not advertise the NETIF_F_SG feature.
   3.156 +	 */
   3.157 +	if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
   3.158 +		int hlen = skb->data - skb->head;
   3.159 +		struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
   3.160 +		if ( unlikely(nskb == NULL) )
   3.161 +			goto drop;
   3.162 +		skb_reserve(nskb, hlen);
   3.163 +		__skb_put(nskb, skb->len);
   3.164 +		BUG_ON(skb_copy_bits(skb, -hlen, nskb->data - hlen,
   3.165 +				     skb->len + hlen));
   3.166 +		nskb->dev = skb->dev;
   3.167 +		nskb->proto_csum_valid = skb->proto_csum_valid;
   3.168 +		dev_kfree_skb(skb);
   3.169 +		skb = nskb;
   3.170 +	}
   3.171  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.172  #ifdef DEBUG_GRANT
   3.173 -    printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d id=%04x gr=%04x\n",
   3.174 -           netif->rx->req_prod,
   3.175 -           netif->rx_req_cons,
   3.176 -           netif->rx->ring[
   3.177 -		   MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
   3.178 -           netif->rx->ring[
   3.179 -		   MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
   3.180 +	printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d "
   3.181 +	       "id=%04x gr=%04x\n",
   3.182 +	       netif->rx->req_prod,
   3.183 +	       netif->rx_req_cons,
   3.184 +	       netif->rx->ring[
   3.185 +		       MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
   3.186 +	       netif->rx->ring[
   3.187 +		       MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
   3.188  #endif
   3.189  #endif
   3.190 -    netif->rx_req_cons++;
   3.191 -    netif_get(netif);
   3.192 +	netif->rx_req_cons++;
   3.193 +	netif_get(netif);
   3.194  
   3.195 -    skb_queue_tail(&rx_queue, skb);
   3.196 -    tasklet_schedule(&net_rx_tasklet);
   3.197 +	skb_queue_tail(&rx_queue, skb);
   3.198 +	tasklet_schedule(&net_rx_tasklet);
   3.199  
   3.200 -    return 0;
   3.201 +	return 0;
   3.202  
   3.203   drop:
   3.204 -    netif->stats.tx_dropped++;
   3.205 -    dev_kfree_skb(skb);
   3.206 -    return 0;
   3.207 +	netif->stats.tx_dropped++;
   3.208 +	dev_kfree_skb(skb);
   3.209 +	return 0;
   3.210  }
   3.211  
   3.212  #if 0
   3.213  static void xen_network_done_notify(void)
   3.214  {
   3.215 -    static struct net_device *eth0_dev = NULL;
   3.216 -    if ( unlikely(eth0_dev == NULL) )
   3.217 -        eth0_dev = __dev_get_by_name("eth0");
   3.218 -    netif_rx_schedule(eth0_dev);
   3.219 +	static struct net_device *eth0_dev = NULL;
   3.220 +	if (unlikely(eth0_dev == NULL))
   3.221 +		eth0_dev = __dev_get_by_name("eth0");
   3.222 +	netif_rx_schedule(eth0_dev);
   3.223  }
   3.224  /* 
   3.225   * Add following to poll() function in NAPI driver (Tigon3 is example):
   3.226 @@ -221,658 +221,654 @@ static void xen_network_done_notify(void
   3.227   */
   3.228  int xen_network_done(void)
   3.229  {
   3.230 -    return skb_queue_empty(&rx_queue);
   3.231 +	return skb_queue_empty(&rx_queue);
   3.232  }
   3.233  #endif
   3.234  
   3.235  static void net_rx_action(unsigned long unused)
   3.236  {
   3.237 -    netif_t *netif = NULL; 
   3.238 -    s8 status;
   3.239 -    u16 size, id, evtchn;
   3.240 -    multicall_entry_t *mcl;
   3.241 -    mmu_update_t *mmu;
   3.242 +	netif_t *netif = NULL; 
   3.243 +	s8 status;
   3.244 +	u16 size, id, evtchn;
   3.245 +	multicall_entry_t *mcl;
   3.246 +	mmu_update_t *mmu;
   3.247  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.248 -    gnttab_transfer_t *gop;
   3.249 +	gnttab_transfer_t *gop;
   3.250  #else
   3.251 -    struct mmuext_op *mmuext;
   3.252 +	struct mmuext_op *mmuext;
   3.253  #endif
   3.254 -    unsigned long vdata, old_mfn, new_mfn;
   3.255 -    struct sk_buff_head rxq;
   3.256 -    struct sk_buff *skb;
   3.257 -    u16 notify_list[NETIF_RX_RING_SIZE];
   3.258 -    int notify_nr = 0;
   3.259 +	unsigned long vdata, old_mfn, new_mfn;
   3.260 +	struct sk_buff_head rxq;
   3.261 +	struct sk_buff *skb;
   3.262 +	u16 notify_list[NETIF_RX_RING_SIZE];
   3.263 +	int notify_nr = 0;
   3.264  
   3.265 -    skb_queue_head_init(&rxq);
   3.266 +	skb_queue_head_init(&rxq);
   3.267  
   3.268 -    mcl = rx_mcl;
   3.269 -    mmu = rx_mmu;
   3.270 +	mcl = rx_mcl;
   3.271 +	mmu = rx_mmu;
   3.272  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.273 -    gop = grant_rx_op;
   3.274 +	gop = grant_rx_op;
   3.275  #else
   3.276 -    mmuext = rx_mmuext;
   3.277 +	mmuext = rx_mmuext;
   3.278  #endif
   3.279  
   3.280 -    while ( (skb = skb_dequeue(&rx_queue)) != NULL )
   3.281 -    {
   3.282 -        netif   = netdev_priv(skb->dev);
   3.283 -        vdata   = (unsigned long)skb->data;
   3.284 -        old_mfn = virt_to_mfn(vdata);
   3.285 +	while ((skb = skb_dequeue(&rx_queue)) != NULL) {
   3.286 +		netif   = netdev_priv(skb->dev);
   3.287 +		vdata   = (unsigned long)skb->data;
   3.288 +		old_mfn = virt_to_mfn(vdata);
   3.289  
   3.290 -        /* Memory squeeze? Back off for an arbitrary while. */
   3.291 -        if ( (new_mfn = alloc_mfn()) == 0 )
   3.292 -        {
   3.293 -            if ( net_ratelimit() )
   3.294 -                WPRINTK("Memory squeeze in netback driver.\n");
   3.295 -            mod_timer(&net_timer, jiffies + HZ);
   3.296 -            skb_queue_head(&rx_queue, skb);
   3.297 -            break;
   3.298 -        }
   3.299 -        /*
   3.300 -         * Set the new P2M table entry before reassigning the old data page.
   3.301 -         * Heed the comment in pgtable-2level.h:pte_page(). :-)
   3.302 -         */
   3.303 -        phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
   3.304 +		/* Memory squeeze? Back off for an arbitrary while. */
   3.305 +		if ((new_mfn = alloc_mfn()) == 0) {
   3.306 +			if ( net_ratelimit() )
   3.307 +				WPRINTK("Memory squeeze in netback driver.\n");
   3.308 +			mod_timer(&net_timer, jiffies + HZ);
   3.309 +			skb_queue_head(&rx_queue, skb);
   3.310 +			break;
   3.311 +		}
   3.312 +		/*
   3.313 +		 * Set the new P2M table entry before reassigning the old data
   3.314 +		 * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
   3.315 +		 */
   3.316 +		phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] =
   3.317 +			new_mfn;
   3.318  
   3.319 -        MULTI_update_va_mapping(mcl, vdata,
   3.320 -				pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
   3.321 -        mcl++;
   3.322 +		MULTI_update_va_mapping(mcl, vdata,
   3.323 +					pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
   3.324 +		mcl++;
   3.325  
   3.326  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.327 -        gop->mfn = old_mfn;
   3.328 -        gop->domid = netif->domid;
   3.329 -        gop->ref = netif->rx->ring[
   3.330 -        MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
   3.331 -        netif->rx_resp_prod_copy++;
   3.332 -        gop++;
   3.333 +		gop->mfn = old_mfn;
   3.334 +		gop->domid = netif->domid;
   3.335 +		gop->ref = netif->rx->ring[
   3.336 +			MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
   3.337 +		netif->rx_resp_prod_copy++;
   3.338 +		gop++;
   3.339  #else
   3.340 -        mcl->op = __HYPERVISOR_mmuext_op;
   3.341 -        mcl->args[0] = (unsigned long)mmuext;
   3.342 -        mcl->args[1] = 1;
   3.343 -        mcl->args[2] = 0;
   3.344 -        mcl->args[3] = netif->domid;
   3.345 -        mcl++;
   3.346 +		mcl->op = __HYPERVISOR_mmuext_op;
   3.347 +		mcl->args[0] = (unsigned long)mmuext;
   3.348 +		mcl->args[1] = 1;
   3.349 +		mcl->args[2] = 0;
   3.350 +		mcl->args[3] = netif->domid;
   3.351 +		mcl++;
   3.352  
   3.353 -        mmuext->cmd = MMUEXT_REASSIGN_PAGE;
   3.354 -        mmuext->arg1.mfn = old_mfn;
   3.355 -        mmuext++;
   3.356 +		mmuext->cmd = MMUEXT_REASSIGN_PAGE;
   3.357 +		mmuext->arg1.mfn = old_mfn;
   3.358 +		mmuext++;
   3.359  #endif
   3.360 -        mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   3.361 -        mmu->val = __pa(vdata) >> PAGE_SHIFT;  
   3.362 -        mmu++;
   3.363 +		mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
   3.364 +			MMU_MACHPHYS_UPDATE;
   3.365 +		mmu->val = __pa(vdata) >> PAGE_SHIFT;  
   3.366 +		mmu++;
   3.367  
   3.368 -        __skb_queue_tail(&rxq, skb);
   3.369 +		__skb_queue_tail(&rxq, skb);
   3.370  
   3.371  #ifdef DEBUG_GRANT
   3.372 -        dump_packet('a', old_mfn, vdata);
   3.373 +		dump_packet('a', old_mfn, vdata);
   3.374  #endif
   3.375 -        /* Filled the batch queue? */
   3.376 -        if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
   3.377 -            break;
   3.378 -    }
   3.379 +		/* Filled the batch queue? */
   3.380 +		if ((mcl - rx_mcl) == ARRAY_SIZE(rx_mcl))
   3.381 +			break;
   3.382 +	}
   3.383  
   3.384 -    if ( mcl == rx_mcl )
   3.385 -        return;
   3.386 +	if (mcl == rx_mcl)
   3.387 +		return;
   3.388  
   3.389 -    mcl->op = __HYPERVISOR_mmu_update;
   3.390 -    mcl->args[0] = (unsigned long)rx_mmu;
   3.391 -    mcl->args[1] = mmu - rx_mmu;
   3.392 -    mcl->args[2] = 0;
   3.393 -    mcl->args[3] = DOMID_SELF;
   3.394 -    mcl++;
   3.395 +	mcl->op = __HYPERVISOR_mmu_update;
   3.396 +	mcl->args[0] = (unsigned long)rx_mmu;
   3.397 +	mcl->args[1] = mmu - rx_mmu;
   3.398 +	mcl->args[2] = 0;
   3.399 +	mcl->args[3] = DOMID_SELF;
   3.400 +	mcl++;
   3.401  
   3.402  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.403 -    mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   3.404 +	mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   3.405  #else
   3.406 -    mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   3.407 +	mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   3.408  #endif
   3.409 -    if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
   3.410 -        BUG();
   3.411 +	BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0);
   3.412  
   3.413 -    mcl = rx_mcl;
   3.414 +	mcl = rx_mcl;
   3.415  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.416 -    if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
   3.417 -                                 gop - grant_rx_op)) { 
   3.418 -        /* 
   3.419 -        ** The other side has given us a bad grant ref, or has no headroom, 
   3.420 -        ** or has gone away. Unfortunately the current grant table code 
   3.421 -        ** doesn't inform us which is the case, so not much we can do. 
   3.422 -        */
   3.423 -        DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) %d "
   3.424 -                "packets.\n", grant_rx_op[0].domid, gop - grant_rx_op); 
   3.425 -    }
   3.426 -    gop = grant_rx_op;
   3.427 +	if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
   3.428 +				     gop - grant_rx_op)) { 
   3.429 +		/*
   3.430 +		 * The other side has given us a bad grant ref, or has no 
   3.431 +		 * headroom, or has gone away. Unfortunately the current grant
   3.432 +		 * table code doesn't inform us which is the case, so not much
   3.433 +		 * we can do. 
   3.434 +		 */
   3.435 +		DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) "
   3.436 +			"%d packets.\n",
   3.437 +			grant_rx_op[0].domid, gop - grant_rx_op); 
   3.438 +	}
   3.439 +	gop = grant_rx_op;
   3.440  #else
   3.441 -    mmuext = rx_mmuext;
   3.442 +	mmuext = rx_mmuext;
   3.443  #endif
   3.444 -    while ( (skb = __skb_dequeue(&rxq)) != NULL )
   3.445 -    {
   3.446 -        netif   = netdev_priv(skb->dev);
   3.447 -        size    = skb->tail - skb->data;
   3.448 +	while ((skb = __skb_dequeue(&rxq)) != NULL) {
   3.449 +		netif   = netdev_priv(skb->dev);
   3.450 +		size    = skb->tail - skb->data;
   3.451  
   3.452 -        /* Rederive the machine addresses. */
   3.453 -        new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
   3.454 +		/* Rederive the machine addresses. */
   3.455 +		new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
   3.456  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.457 -        old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
   3.458 +		old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
   3.459  #else
   3.460 -        old_mfn = mmuext[0].arg1.mfn;
   3.461 +		old_mfn = mmuext[0].arg1.mfn;
   3.462  #endif
   3.463 -        atomic_set(&(skb_shinfo(skb)->dataref), 1);
   3.464 -        skb_shinfo(skb)->nr_frags = 0;
   3.465 -        skb_shinfo(skb)->frag_list = NULL;
   3.466 +		atomic_set(&(skb_shinfo(skb)->dataref), 1);
   3.467 +		skb_shinfo(skb)->nr_frags = 0;
   3.468 +		skb_shinfo(skb)->frag_list = NULL;
   3.469  
   3.470 -        netif->stats.tx_bytes += size;
   3.471 -        netif->stats.tx_packets++;
   3.472 +		netif->stats.tx_bytes += size;
   3.473 +		netif->stats.tx_packets++;
   3.474  
   3.475 -        /* The update_va_mapping() must not fail. */
   3.476 -        BUG_ON(mcl[0].result != 0);
   3.477 +		/* The update_va_mapping() must not fail. */
   3.478 +		BUG_ON(mcl[0].result != 0);
   3.479  
   3.480 -        /* Check the reassignment error code. */
   3.481 -        status = NETIF_RSP_OKAY;
   3.482 +		/* Check the reassignment error code. */
   3.483 +		status = NETIF_RSP_OKAY;
   3.484  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.485 -        if(gop->status != 0) { 
   3.486 -            DPRINTK("Bad status %d from grant transfer to DOM%u\n", 
   3.487 -                    gop->status, netif->domid);
   3.488 -            /* XXX SMH: should free 'old_mfn' here */
   3.489 -            status = NETIF_RSP_ERROR; 
   3.490 -        } 
   3.491 +		if(gop->status != 0) { 
   3.492 +			DPRINTK("Bad status %d from grant transfer to DOM%u\n",
   3.493 +				gop->status, netif->domid);
   3.494 +			/* XXX SMH: should free 'old_mfn' here */
   3.495 +			status = NETIF_RSP_ERROR; 
   3.496 +		} 
   3.497  #else
   3.498 -        if ( unlikely(mcl[1].result != 0) )
   3.499 -        {
   3.500 -            DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
   3.501 -            free_mfn(old_mfn);
   3.502 -            status = NETIF_RSP_ERROR;
   3.503 -        }
   3.504 +		if (unlikely(mcl[1].result != 0)) {
   3.505 +			DPRINTK("Failed MMU update transferring to DOM%u\n",
   3.506 +				netif->domid);
   3.507 +			free_mfn(old_mfn);
   3.508 +			status = NETIF_RSP_ERROR;
   3.509 +		}
   3.510  #endif
   3.511 -        evtchn = netif->evtchn;
   3.512 -        id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
   3.513 -        if ( make_rx_response(netif, id, status,
   3.514 -                              (old_mfn << PAGE_SHIFT) | /* XXX */
   3.515 -                              ((unsigned long)skb->data & ~PAGE_MASK),
   3.516 -                              size, skb->proto_csum_valid) &&
   3.517 -             (rx_notify[evtchn] == 0) )
   3.518 -        {
   3.519 -            rx_notify[evtchn] = 1;
   3.520 -            notify_list[notify_nr++] = evtchn;
   3.521 -        }
   3.522 +		evtchn = netif->evtchn;
   3.523 +		id = netif->rx->ring[
   3.524 +			MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
   3.525 +		if (make_rx_response(netif, id, status,
   3.526 +				     (old_mfn << PAGE_SHIFT) | /* XXX */
   3.527 +				     ((unsigned long)skb->data & ~PAGE_MASK),
   3.528 +				     size, skb->proto_csum_valid) &&
   3.529 +		    (rx_notify[evtchn] == 0)) {
   3.530 +			rx_notify[evtchn] = 1;
   3.531 +			notify_list[notify_nr++] = evtchn;
   3.532 +		}
   3.533  
   3.534 -        netif_put(netif);
   3.535 -        dev_kfree_skb(skb);
   3.536 +		netif_put(netif);
   3.537 +		dev_kfree_skb(skb);
   3.538  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.539 -        mcl++;
   3.540 -        gop++;
   3.541 +		mcl++;
   3.542 +		gop++;
   3.543  #else
   3.544 -        mcl += 2;
   3.545 -        mmuext += 1;
   3.546 +		mcl += 2;
   3.547 +		mmuext += 1;
   3.548  #endif
   3.549 -    }
   3.550 +	}
   3.551  
   3.552 -    while ( notify_nr != 0 )
   3.553 -    {
   3.554 -        evtchn = notify_list[--notify_nr];
   3.555 -        rx_notify[evtchn] = 0;
   3.556 -        notify_via_evtchn(evtchn);
   3.557 -    }
   3.558 +	while (notify_nr != 0) {
   3.559 +		evtchn = notify_list[--notify_nr];
   3.560 +		rx_notify[evtchn] = 0;
   3.561 +		notify_via_evtchn(evtchn);
   3.562 +	}
   3.563  
   3.564 -  out: 
   3.565 -    /* More work to do? */
   3.566 -    if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
   3.567 -        tasklet_schedule(&net_rx_tasklet);
   3.568 +	/* More work to do? */
   3.569 +	if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
   3.570 +		tasklet_schedule(&net_rx_tasklet);
   3.571  #if 0
   3.572 -    else
   3.573 -        xen_network_done_notify();
   3.574 +	else
   3.575 +		xen_network_done_notify();
   3.576  #endif
   3.577  }
   3.578  
   3.579  static void net_alarm(unsigned long unused)
   3.580  {
   3.581 -    tasklet_schedule(&net_rx_tasklet);
   3.582 +	tasklet_schedule(&net_rx_tasklet);
   3.583  }
   3.584  
   3.585  struct net_device_stats *netif_be_get_stats(struct net_device *dev)
   3.586  {
   3.587 -    netif_t *netif = netdev_priv(dev);
   3.588 -    return &netif->stats;
   3.589 +	netif_t *netif = netdev_priv(dev);
   3.590 +	return &netif->stats;
   3.591  }
   3.592  
   3.593  static int __on_net_schedule_list(netif_t *netif)
   3.594  {
   3.595 -    return netif->list.next != NULL;
   3.596 +	return netif->list.next != NULL;
   3.597  }
   3.598  
   3.599  static void remove_from_net_schedule_list(netif_t *netif)
   3.600  {
   3.601 -    spin_lock_irq(&net_schedule_list_lock);
   3.602 -    if ( likely(__on_net_schedule_list(netif)) )
   3.603 -    {
   3.604 -        list_del(&netif->list);
   3.605 -        netif->list.next = NULL;
   3.606 -        netif_put(netif);
   3.607 -    }
   3.608 -    spin_unlock_irq(&net_schedule_list_lock);
   3.609 +	spin_lock_irq(&net_schedule_list_lock);
   3.610 +	if (likely(__on_net_schedule_list(netif))) {
   3.611 +		list_del(&netif->list);
   3.612 +		netif->list.next = NULL;
   3.613 +		netif_put(netif);
   3.614 +	}
   3.615 +	spin_unlock_irq(&net_schedule_list_lock);
   3.616  }
   3.617  
   3.618  static void add_to_net_schedule_list_tail(netif_t *netif)
   3.619  {
   3.620 -    if ( __on_net_schedule_list(netif) )
   3.621 -        return;
   3.622 +	if (__on_net_schedule_list(netif))
   3.623 +		return;
   3.624  
   3.625 -    spin_lock_irq(&net_schedule_list_lock);
   3.626 -    if ( !__on_net_schedule_list(netif) && netif->active )
   3.627 -    {
   3.628 -        list_add_tail(&netif->list, &net_schedule_list);
   3.629 -        netif_get(netif);
   3.630 -    }
   3.631 -    spin_unlock_irq(&net_schedule_list_lock);
   3.632 +	spin_lock_irq(&net_schedule_list_lock);
   3.633 +	if (!__on_net_schedule_list(netif) && netif->active) {
   3.634 +		list_add_tail(&netif->list, &net_schedule_list);
   3.635 +		netif_get(netif);
   3.636 +	}
   3.637 +	spin_unlock_irq(&net_schedule_list_lock);
   3.638  }
   3.639  
   3.640  void netif_schedule_work(netif_t *netif)
   3.641  {
   3.642 -    if ( (netif->tx_req_cons != netif->tx->req_prod) &&
   3.643 -         ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
   3.644 -    {
   3.645 -        add_to_net_schedule_list_tail(netif);
   3.646 -        maybe_schedule_tx_action();
   3.647 -    }
   3.648 +	if ((netif->tx_req_cons != netif->tx->req_prod) &&
   3.649 +	    ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE)) {
   3.650 +		add_to_net_schedule_list_tail(netif);
   3.651 +		maybe_schedule_tx_action();
   3.652 +	}
   3.653  }
   3.654  
   3.655  void netif_deschedule_work(netif_t *netif)
   3.656  {
   3.657 -    remove_from_net_schedule_list(netif);
   3.658 +	remove_from_net_schedule_list(netif);
   3.659  }
   3.660  
   3.661  
   3.662  static void tx_credit_callback(unsigned long data)
   3.663  {
   3.664 -    netif_t *netif = (netif_t *)data;
   3.665 -    netif->remaining_credit = netif->credit_bytes;
   3.666 -    netif_schedule_work(netif);
   3.667 +	netif_t *netif = (netif_t *)data;
   3.668 +	netif->remaining_credit = netif->credit_bytes;
   3.669 +	netif_schedule_work(netif);
   3.670  }
   3.671  
   3.672  inline static void net_tx_action_dealloc(void)
   3.673  {
   3.674  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.675 -    gnttab_unmap_grant_ref_t *gop;
   3.676 +	gnttab_unmap_grant_ref_t *gop;
   3.677  #else
   3.678 -    multicall_entry_t *mcl;
   3.679 +	multicall_entry_t *mcl;
   3.680  #endif
   3.681 -    u16 pending_idx;
   3.682 -    PEND_RING_IDX dc, dp;
   3.683 -    netif_t *netif;
   3.684 +	u16 pending_idx;
   3.685 +	PEND_RING_IDX dc, dp;
   3.686 +	netif_t *netif;
   3.687  
   3.688 -    dc = dealloc_cons;
   3.689 -    dp = dealloc_prod;
   3.690 +	dc = dealloc_cons;
   3.691 +	dp = dealloc_prod;
   3.692  
   3.693  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.694 -    /*
   3.695 -     * Free up any grants we have finished using
   3.696 -     */
   3.697 -    gop = tx_unmap_ops;
   3.698 -    while ( dc != dp )
   3.699 -    {
   3.700 -        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
   3.701 -        gop->host_addr    = MMAP_VADDR(pending_idx);
   3.702 -        gop->dev_bus_addr = 0;
   3.703 -        gop->handle       = grant_tx_ref[pending_idx];
   3.704 -        grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
   3.705 -        gop++;
   3.706 -    }
   3.707 -    BUG_ON(HYPERVISOR_grant_table_op(
   3.708 -               GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
   3.709 +	/*
   3.710 +	 * Free up any grants we have finished using
   3.711 +	 */
   3.712 +	gop = tx_unmap_ops;
   3.713 +	while (dc != dp) {
   3.714 +		pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
   3.715 +		gop->host_addr    = MMAP_VADDR(pending_idx);
   3.716 +		gop->dev_bus_addr = 0;
   3.717 +		gop->handle       = grant_tx_ref[pending_idx];
   3.718 +		grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
   3.719 +		gop++;
   3.720 +	}
   3.721 +	BUG_ON(HYPERVISOR_grant_table_op(
   3.722 +		GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
   3.723  #else
   3.724 -    mcl = tx_mcl;
   3.725 -    while ( dc != dp )
   3.726 -    {
   3.727 -        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
   3.728 -	MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
   3.729 -				__pte(0), 0);
   3.730 -        mcl++;     
   3.731 -    }
   3.732 +	mcl = tx_mcl;
   3.733 +	while (dc != dp) {
   3.734 +		pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
   3.735 +		MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
   3.736 +					__pte(0), 0);
   3.737 +		mcl++;     
   3.738 +	}
   3.739  
   3.740 -    mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   3.741 -    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
   3.742 -        BUG();
   3.743 +	mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   3.744 +	BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
   3.745  
   3.746 -    mcl = tx_mcl;
   3.747 +	mcl = tx_mcl;
   3.748  #endif
   3.749 -    while ( dealloc_cons != dp )
   3.750 -    {
   3.751 +	while (dealloc_cons != dp) {
   3.752  #ifndef CONFIG_XEN_NETDEV_GRANT
   3.753 -        /* The update_va_mapping() must not fail. */
   3.754 -        BUG_ON(mcl[0].result != 0);
   3.755 +		/* The update_va_mapping() must not fail. */
   3.756 +		BUG_ON(mcl[0].result != 0);
   3.757  #endif
   3.758  
   3.759 -        pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
   3.760 +		pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
   3.761  
   3.762 -        netif = pending_tx_info[pending_idx].netif;
   3.763 +		netif = pending_tx_info[pending_idx].netif;
   3.764  
   3.765 -        make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
   3.766 -                         NETIF_RSP_OKAY);
   3.767 +		make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
   3.768 +				 NETIF_RSP_OKAY);
   3.769          
   3.770 -        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   3.771 +		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   3.772  
   3.773 -        /*
   3.774 -         * Scheduling checks must happen after the above response is posted.
   3.775 -         * This avoids a possible race with a guest OS on another CPU if that
   3.776 -         * guest is testing against 'resp_prod' when deciding whether to notify
   3.777 -         * us when it queues additional packets.
   3.778 -         */
   3.779 -        mb();
   3.780 -        if ( (netif->tx_req_cons != netif->tx->req_prod) &&
   3.781 -             ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
   3.782 -            add_to_net_schedule_list_tail(netif);
   3.783 +		/*
   3.784 +		 * Scheduling checks must happen after the above response is
   3.785 +		 * posted. This avoids a possible race with a guest OS on
   3.786 +		 * another CPU if that guest is testing against 'resp_prod'
   3.787 +		 * when deciding whether to notify us when it queues additional
   3.788 +                 * packets.
   3.789 +		 */
   3.790 +		mb();
   3.791 +		if ((netif->tx_req_cons != netif->tx->req_prod) &&
   3.792 +		    ((netif->tx_req_cons-netif->tx_resp_prod) !=
   3.793 +		     NETIF_TX_RING_SIZE))
   3.794 +			add_to_net_schedule_list_tail(netif);
   3.795          
   3.796 -        netif_put(netif);
   3.797 +		netif_put(netif);
   3.798  
   3.799  #ifndef CONFIG_XEN_NETDEV_GRANT
   3.800 -        mcl++;
   3.801 +		mcl++;
   3.802  #endif
   3.803 -    }
   3.804 -
   3.805 +	}
   3.806  }
   3.807  
   3.808  /* Called after netfront has transmitted */
   3.809  static void net_tx_action(unsigned long unused)
   3.810  {
   3.811 -    struct list_head *ent;
   3.812 -    struct sk_buff *skb;
   3.813 -    netif_t *netif;
   3.814 -    netif_tx_request_t txreq;
   3.815 -    u16 pending_idx;
   3.816 -    NETIF_RING_IDX i;
   3.817 +	struct list_head *ent;
   3.818 +	struct sk_buff *skb;
   3.819 +	netif_t *netif;
   3.820 +	netif_tx_request_t txreq;
   3.821 +	u16 pending_idx;
   3.822 +	NETIF_RING_IDX i;
   3.823  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.824 -    gnttab_map_grant_ref_t *mop;
   3.825 +	gnttab_map_grant_ref_t *mop;
   3.826  #else
   3.827 -    multicall_entry_t *mcl;
   3.828 +	multicall_entry_t *mcl;
   3.829  #endif
   3.830 -    unsigned int data_len;
   3.831 +	unsigned int data_len;
   3.832  
   3.833 -    if ( dealloc_cons != dealloc_prod )
   3.834 -        net_tx_action_dealloc();
   3.835 +	if (dealloc_cons != dealloc_prod)
   3.836 +		net_tx_action_dealloc();
   3.837  
   3.838  #ifdef CONFIG_XEN_NETDEV_GRANT
   3.839 -    mop = tx_map_ops;
   3.840 +	mop = tx_map_ops;
   3.841  #else
   3.842 -    mcl = tx_mcl;
   3.843 +	mcl = tx_mcl;
   3.844  #endif
   3.845 -    while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
   3.846 -            !list_empty(&net_schedule_list) )
   3.847 -    {
   3.848 -        /* Get a netif from the list with work to do. */
   3.849 -        ent = net_schedule_list.next;
   3.850 -        netif = list_entry(ent, netif_t, list);
   3.851 -        netif_get(netif);
   3.852 -        remove_from_net_schedule_list(netif);
   3.853 +	while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
   3.854 +		!list_empty(&net_schedule_list)) {
   3.855 +		/* Get a netif from the list with work to do. */
   3.856 +		ent = net_schedule_list.next;
   3.857 +		netif = list_entry(ent, netif_t, list);
   3.858 +		netif_get(netif);
   3.859 +		remove_from_net_schedule_list(netif);
   3.860  
   3.861 -        /* Work to do? */
   3.862 -        i = netif->tx_req_cons;
   3.863 -        if ( (i == netif->tx->req_prod) ||
   3.864 -             ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
   3.865 -        {
   3.866 -            netif_put(netif);
   3.867 -            continue;
   3.868 -        }
   3.869 +		/* Work to do? */
   3.870 +		i = netif->tx_req_cons;
   3.871 +		if ((i == netif->tx->req_prod) ||
   3.872 +		    ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE)) {
   3.873 +			netif_put(netif);
   3.874 +			continue;
   3.875 +		}
   3.876  
   3.877 -        rmb(); /* Ensure that we see the request before we copy it. */
   3.878 -        memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
   3.879 -               sizeof(txreq));
   3.880 -        /* Credit-based scheduling. */
   3.881 -        if ( txreq.size > netif->remaining_credit )
   3.882 -        {
   3.883 -            unsigned long now = jiffies;
   3.884 -            unsigned long next_credit = 
   3.885 -                netif->credit_timeout.expires +
   3.886 -                msecs_to_jiffies(netif->credit_usec / 1000);
   3.887 +		rmb(); /* Ensure that we see the request before we copy it. */
   3.888 +		memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
   3.889 +		       sizeof(txreq));
   3.890 +		/* Credit-based scheduling. */
   3.891 +		if (txreq.size > netif->remaining_credit) {
   3.892 +			unsigned long now = jiffies;
   3.893 +			unsigned long next_credit = 
   3.894 +				netif->credit_timeout.expires +
   3.895 +				msecs_to_jiffies(netif->credit_usec / 1000);
   3.896  
   3.897 -            /* Timer could already be pending in some rare cases. */
   3.898 -            if ( timer_pending(&netif->credit_timeout) )
   3.899 -                break;
   3.900 +			/* Timer could already be pending in rare cases. */
   3.901 +			if (timer_pending(&netif->credit_timeout))
   3.902 +				break;
   3.903  
   3.904 -            /* Already passed the point at which we can replenish credit? */
   3.905 -            if ( time_after_eq(now, next_credit) )
   3.906 -            {
   3.907 -                netif->credit_timeout.expires = now;
   3.908 -                netif->remaining_credit = netif->credit_bytes;
   3.909 -            }
   3.910 +			/* Passed the point where we can replenish credit? */
   3.911 +			if (time_after_eq(now, next_credit)) {
   3.912 +				netif->credit_timeout.expires = now;
   3.913 +				netif->remaining_credit = netif->credit_bytes;
   3.914 +			}
   3.915  
   3.916 -            /* Still too big to send right now? Then set a timer callback. */
   3.917 -            if ( txreq.size > netif->remaining_credit )
   3.918 -            {
   3.919 -                netif->remaining_credit = 0;
   3.920 -                netif->credit_timeout.expires  = next_credit;
   3.921 -                netif->credit_timeout.data     = (unsigned long)netif;
   3.922 -                netif->credit_timeout.function = tx_credit_callback;
   3.923 -                add_timer_on(&netif->credit_timeout, smp_processor_id());
   3.924 -                break;
   3.925 -            }
   3.926 -        }
   3.927 -        netif->remaining_credit -= txreq.size;
   3.928 +			/* Still too big to send right now? Set a callback. */
   3.929 +			if (txreq.size > netif->remaining_credit) {
   3.930 +				netif->remaining_credit = 0;
   3.931 +				netif->credit_timeout.expires  = 
   3.932 +					next_credit;
   3.933 +				netif->credit_timeout.data     =
   3.934 +					(unsigned long)netif;
   3.935 +				netif->credit_timeout.function =
   3.936 +					tx_credit_callback;
   3.937 +				add_timer_on(&netif->credit_timeout,
   3.938 +					     smp_processor_id());
   3.939 +				break;
   3.940 +			}
   3.941 +		}
   3.942 +		netif->remaining_credit -= txreq.size;
   3.943  
   3.944 -        /*
   3.945 -         * Why the barrier? It ensures that the frontend sees updated req_cons
   3.946 -         * before we check for more work to schedule.
   3.947 -         */
   3.948 -        netif->tx->req_cons = ++netif->tx_req_cons;
   3.949 -        mb();
   3.950 +		/*
   3.951 +		 * Why the barrier? It ensures that the frontend sees updated
   3.952 +		 * req_cons before we check for more work to schedule.
   3.953 +		 */
   3.954 +		netif->tx->req_cons = ++netif->tx_req_cons;
   3.955 +		mb();
   3.956  
   3.957 -        netif_schedule_work(netif);
   3.958 +		netif_schedule_work(netif);
   3.959  
   3.960 -        if ( unlikely(txreq.size < ETH_HLEN) || 
   3.961 -             unlikely(txreq.size > ETH_FRAME_LEN) )
   3.962 -        {
   3.963 -            DPRINTK("Bad packet size: %d\n", txreq.size);
   3.964 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   3.965 -            netif_put(netif);
   3.966 -            continue; 
   3.967 -        }
   3.968 +		if (unlikely(txreq.size < ETH_HLEN) || 
   3.969 +		    unlikely(txreq.size > ETH_FRAME_LEN)) {
   3.970 +			DPRINTK("Bad packet size: %d\n", txreq.size);
   3.971 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   3.972 +			netif_put(netif);
   3.973 +			continue; 
   3.974 +		}
   3.975 +
   3.976 +		/* No crossing a page as the payload mustn't fragment. */
   3.977 +		if (unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >=
   3.978 +			     PAGE_SIZE)) {
   3.979 +			DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
   3.980 +				txreq.addr, txreq.size, 
   3.981 +				(txreq.addr &~PAGE_MASK) + txreq.size);
   3.982 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   3.983 +			netif_put(netif);
   3.984 +			continue;
   3.985 +		}
   3.986  
   3.987 -        /* No crossing a page boundary as the payload mustn't fragment. */
   3.988 -        if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) ) 
   3.989 -        {
   3.990 -            DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
   3.991 -                    txreq.addr, txreq.size, 
   3.992 -                    (txreq.addr &~PAGE_MASK) + txreq.size);
   3.993 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   3.994 -            netif_put(netif);
   3.995 -            continue;
   3.996 -        }
   3.997 +		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   3.998 +
   3.999 +		data_len = (txreq.size > PKT_PROT_LEN) ?
  3.1000 +			PKT_PROT_LEN : txreq.size;
  3.1001  
  3.1002 -        pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
  3.1003 -
  3.1004 -        data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
  3.1005 +		skb = alloc_skb(data_len+16, GFP_ATOMIC);
  3.1006 +		if (unlikely(skb == NULL)) {
  3.1007 +			DPRINTK("Can't allocate a skb in start_xmit.\n");
  3.1008 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  3.1009 +			netif_put(netif);
  3.1010 +			break;
  3.1011 +		}
  3.1012  
  3.1013 -        if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
  3.1014 -        {
  3.1015 -            DPRINTK("Can't allocate a skb in start_xmit.\n");
  3.1016 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  3.1017 -            netif_put(netif);
  3.1018 -            break;
  3.1019 -        }
  3.1020 -
  3.1021 -        /* Packets passed to netif_rx() must have some headroom. */
  3.1022 -        skb_reserve(skb, 16);
  3.1023 +		/* Packets passed to netif_rx() must have some headroom. */
  3.1024 +		skb_reserve(skb, 16);
  3.1025  #ifdef CONFIG_XEN_NETDEV_GRANT
  3.1026 -        mop->host_addr = MMAP_VADDR(pending_idx);
  3.1027 -        mop->dom       = netif->domid;
  3.1028 -        mop->ref       = txreq.addr >> PAGE_SHIFT;
  3.1029 -        mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
  3.1030 -        mop++;
  3.1031 +		mop->host_addr = MMAP_VADDR(pending_idx);
  3.1032 +		mop->dom       = netif->domid;
  3.1033 +		mop->ref       = txreq.addr >> PAGE_SHIFT;
  3.1034 +		mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
  3.1035 +		mop++;
  3.1036  #else
  3.1037 -	MULTI_update_va_mapping_otherdomain(
  3.1038 -	    mcl, MMAP_VADDR(pending_idx),
  3.1039 -	    pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
  3.1040 -	    0, netif->domid);
  3.1041 +		MULTI_update_va_mapping_otherdomain(
  3.1042 +			mcl, MMAP_VADDR(pending_idx),
  3.1043 +			pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
  3.1044 +			0, netif->domid);
  3.1045  
  3.1046 -        mcl++;
  3.1047 +		mcl++;
  3.1048  #endif
  3.1049  
  3.1050 -        memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
  3.1051 -        pending_tx_info[pending_idx].netif = netif;
  3.1052 -        *((u16 *)skb->data) = pending_idx;
  3.1053 +		memcpy(&pending_tx_info[pending_idx].req,
  3.1054 +		       &txreq, sizeof(txreq));
  3.1055 +		pending_tx_info[pending_idx].netif = netif;
  3.1056 +		*((u16 *)skb->data) = pending_idx;
  3.1057  
  3.1058 -        __skb_queue_tail(&tx_queue, skb);
  3.1059 +		__skb_queue_tail(&tx_queue, skb);
  3.1060  
  3.1061 -        pending_cons++;
  3.1062 +		pending_cons++;
  3.1063  
  3.1064  #ifdef CONFIG_XEN_NETDEV_GRANT
  3.1065 -        if ( (mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops) )
  3.1066 -            break;
  3.1067 +		if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
  3.1068 +			break;
  3.1069  #else
  3.1070 -        /* Filled the batch queue? */
  3.1071 -        if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
  3.1072 -            break;
  3.1073 +		/* Filled the batch queue? */
  3.1074 +		if ((mcl - tx_mcl) == ARRAY_SIZE(tx_mcl))
  3.1075 +			break;
  3.1076  #endif
  3.1077 -    }
  3.1078 +	}
  3.1079  
  3.1080  #ifdef CONFIG_XEN_NETDEV_GRANT
  3.1081 -    if ( mop == tx_map_ops )
  3.1082 -        return;
  3.1083 +	if (mop == tx_map_ops)
  3.1084 +		return;
  3.1085  
  3.1086 -    BUG_ON(HYPERVISOR_grant_table_op(
  3.1087 -        GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
  3.1088 +	BUG_ON(HYPERVISOR_grant_table_op(
  3.1089 +		GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
  3.1090  
  3.1091 -    mop = tx_map_ops;
  3.1092 +	mop = tx_map_ops;
  3.1093  #else
  3.1094 -    if ( mcl == tx_mcl )
  3.1095 -        return;
  3.1096 +	if (mcl == tx_mcl)
  3.1097 +		return;
  3.1098  
  3.1099 -    BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
  3.1100 +	BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
  3.1101  
  3.1102 -    mcl = tx_mcl;
  3.1103 +	mcl = tx_mcl;
  3.1104  #endif
  3.1105 -    while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
  3.1106 -    {
  3.1107 -        pending_idx = *((u16 *)skb->data);
  3.1108 -        netif       = pending_tx_info[pending_idx].netif;
  3.1109 -        memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
  3.1110 +	while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
  3.1111 +		pending_idx = *((u16 *)skb->data);
  3.1112 +		netif       = pending_tx_info[pending_idx].netif;
  3.1113 +		memcpy(&txreq, &pending_tx_info[pending_idx].req,
  3.1114 +		       sizeof(txreq));
  3.1115  
  3.1116 -        /* Check the remap error code. */
  3.1117 +		/* Check the remap error code. */
  3.1118  #ifdef CONFIG_XEN_NETDEV_GRANT
  3.1119 -        if ( unlikely(mop->handle < 0) )
  3.1120 -        {
  3.1121 -            printk(KERN_ALERT "#### netback grant fails\n");
  3.1122 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  3.1123 -            netif_put(netif);
  3.1124 -            kfree_skb(skb);
  3.1125 -            mop++;
  3.1126 -            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
  3.1127 -            continue;
  3.1128 -        }
  3.1129 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
  3.1130 -                             FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
  3.1131 -        grant_tx_ref[pending_idx] = mop->handle;
  3.1132 +		if (unlikely(mop->handle < 0)) {
  3.1133 +			printk(KERN_ALERT "#### netback grant fails\n");
  3.1134 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  3.1135 +			netif_put(netif);
  3.1136 +			kfree_skb(skb);
  3.1137 +			mop++;
  3.1138 +			pending_ring[MASK_PEND_IDX(pending_prod++)] =
  3.1139 +				pending_idx;
  3.1140 +			continue;
  3.1141 +		}
  3.1142 +		phys_to_machine_mapping[
  3.1143 +			__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
  3.1144 +			FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
  3.1145 +		grant_tx_ref[pending_idx] = mop->handle;
  3.1146  #else
  3.1147 -        if ( unlikely(mcl[0].result != 0) )
  3.1148 -        {
  3.1149 -            DPRINTK("Bad page frame\n");
  3.1150 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  3.1151 -            netif_put(netif);
  3.1152 -            kfree_skb(skb);
  3.1153 -            mcl++;
  3.1154 -            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
  3.1155 -            continue;
  3.1156 -        }
  3.1157 +		if (unlikely(mcl[0].result != 0)) {
  3.1158 +			DPRINTK("Bad page frame\n");
  3.1159 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  3.1160 +			netif_put(netif);
  3.1161 +			kfree_skb(skb);
  3.1162 +			mcl++;
  3.1163 +			pending_ring[MASK_PEND_IDX(pending_prod++)] =
  3.1164 +				pending_idx;
  3.1165 +			continue;
  3.1166 +		}
  3.1167  
  3.1168 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
  3.1169 -            FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
  3.1170 +		phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >>
  3.1171 +				       PAGE_SHIFT] =
  3.1172 +			FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
  3.1173  #endif
  3.1174  
  3.1175 -        data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
  3.1176 +		data_len = (txreq.size > PKT_PROT_LEN) ?
  3.1177 +			PKT_PROT_LEN : txreq.size;
  3.1178  
  3.1179 -        __skb_put(skb, data_len);
  3.1180 -        memcpy(skb->data, 
  3.1181 -               (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
  3.1182 -               data_len);
  3.1183 -        if ( data_len < txreq.size )
  3.1184 -        {
  3.1185 -            /* Append the packet payload as a fragment. */
  3.1186 -            skb_shinfo(skb)->frags[0].page        = 
  3.1187 -                virt_to_page(MMAP_VADDR(pending_idx));
  3.1188 -            skb_shinfo(skb)->frags[0].size        = txreq.size - data_len;
  3.1189 -            skb_shinfo(skb)->frags[0].page_offset = 
  3.1190 -                (txreq.addr + data_len) & ~PAGE_MASK;
  3.1191 -            skb_shinfo(skb)->nr_frags = 1;
  3.1192 -        }
  3.1193 -        else
  3.1194 -        {
  3.1195 -            /* Schedule a response immediately. */
  3.1196 -            netif_idx_release(pending_idx);
  3.1197 -        }
  3.1198 +		__skb_put(skb, data_len);
  3.1199 +		memcpy(skb->data, 
  3.1200 +		       (void *)(MMAP_VADDR(pending_idx)|
  3.1201 +				(txreq.addr&~PAGE_MASK)),
  3.1202 +		       data_len);
  3.1203 +		if (data_len < txreq.size) {
  3.1204 +			/* Append the packet payload as a fragment. */
  3.1205 +			skb_shinfo(skb)->frags[0].page        = 
  3.1206 +				virt_to_page(MMAP_VADDR(pending_idx));
  3.1207 +			skb_shinfo(skb)->frags[0].size        =
  3.1208 +				txreq.size - data_len;
  3.1209 +			skb_shinfo(skb)->frags[0].page_offset = 
  3.1210 +				(txreq.addr + data_len) & ~PAGE_MASK;
  3.1211 +			skb_shinfo(skb)->nr_frags = 1;
  3.1212 +		} else {
  3.1213 +			/* Schedule a response immediately. */
  3.1214 +			netif_idx_release(pending_idx);
  3.1215 +		}
  3.1216  
  3.1217 -        skb->data_len  = txreq.size - data_len;
  3.1218 -        skb->len      += skb->data_len;
  3.1219 +		skb->data_len  = txreq.size - data_len;
  3.1220 +		skb->len      += skb->data_len;
  3.1221  
  3.1222 -        skb->dev      = netif->dev;
  3.1223 -        skb->protocol = eth_type_trans(skb, skb->dev);
  3.1224 +		skb->dev      = netif->dev;
  3.1225 +		skb->protocol = eth_type_trans(skb, skb->dev);
  3.1226  
  3.1227 -        /* No checking needed on localhost, but remember the field is blank. */
  3.1228 -        skb->ip_summed        = CHECKSUM_UNNECESSARY;
  3.1229 -        skb->proto_csum_valid = 1;
  3.1230 -        skb->proto_csum_blank = txreq.csum_blank;
  3.1231 +		/*
  3.1232 +                 * No checking needed on localhost, but remember the field is
  3.1233 +                 * blank. 
  3.1234 +                 */
  3.1235 +		skb->ip_summed        = CHECKSUM_UNNECESSARY;
  3.1236 +		skb->proto_csum_valid = 1;
  3.1237 +		skb->proto_csum_blank = txreq.csum_blank;
  3.1238  
  3.1239 -        netif->stats.rx_bytes += txreq.size;
  3.1240 -        netif->stats.rx_packets++;
  3.1241 +		netif->stats.rx_bytes += txreq.size;
  3.1242 +		netif->stats.rx_packets++;
  3.1243  
  3.1244 -        netif_rx(skb);
  3.1245 -        netif->dev->last_rx = jiffies;
  3.1246 +		netif_rx(skb);
  3.1247 +		netif->dev->last_rx = jiffies;
  3.1248  
  3.1249  #ifdef CONFIG_XEN_NETDEV_GRANT
  3.1250 -        mop++;
  3.1251 +		mop++;
  3.1252  #else
  3.1253 -        mcl++;
  3.1254 +		mcl++;
  3.1255  #endif
  3.1256 -    }
  3.1257 +	}
  3.1258  }
  3.1259  
  3.1260  static void netif_idx_release(u16 pending_idx)
  3.1261  {
  3.1262 -    static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
  3.1263 -    unsigned long flags;
  3.1264 +	static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
  3.1265 +	unsigned long flags;
  3.1266  
  3.1267 -    spin_lock_irqsave(&_lock, flags);
  3.1268 -    dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
  3.1269 -    spin_unlock_irqrestore(&_lock, flags);
  3.1270 +	spin_lock_irqsave(&_lock, flags);
  3.1271 +	dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
  3.1272 +	spin_unlock_irqrestore(&_lock, flags);
  3.1273  
  3.1274 -    tasklet_schedule(&net_tx_tasklet);
  3.1275 +	tasklet_schedule(&net_tx_tasklet);
  3.1276  }
  3.1277  
  3.1278  static void netif_page_release(struct page *page)
  3.1279  {
  3.1280 -    u16 pending_idx = page - virt_to_page(mmap_vstart);
  3.1281 +	u16 pending_idx = page - virt_to_page(mmap_vstart);
  3.1282  
  3.1283 -    /* Ready for next use. */
  3.1284 -    set_page_count(page, 1);
  3.1285 +	/* Ready for next use. */
  3.1286 +	set_page_count(page, 1);
  3.1287  
  3.1288 -    netif_idx_release(pending_idx);
  3.1289 +	netif_idx_release(pending_idx);
  3.1290  }
  3.1291  
  3.1292  irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
  3.1293  {
  3.1294 -    netif_t *netif = dev_id;
  3.1295 -    if ( tx_work_exists(netif) )
  3.1296 -    {
  3.1297 -        add_to_net_schedule_list_tail(netif);
  3.1298 -        maybe_schedule_tx_action();
  3.1299 -    }
  3.1300 -    return IRQ_HANDLED;
  3.1301 +	netif_t *netif = dev_id;
  3.1302 +	if (tx_work_exists(netif)) {
  3.1303 +		add_to_net_schedule_list_tail(netif);
  3.1304 +		maybe_schedule_tx_action();
  3.1305 +	}
  3.1306 +	return IRQ_HANDLED;
  3.1307  }
  3.1308  
  3.1309  static void make_tx_response(netif_t *netif, 
  3.1310                               u16      id,
  3.1311                               s8       st)
  3.1312  {
  3.1313 -    NETIF_RING_IDX i = netif->tx_resp_prod;
  3.1314 -    netif_tx_response_t *resp;
  3.1315 +	NETIF_RING_IDX i = netif->tx_resp_prod;
  3.1316 +	netif_tx_response_t *resp;
  3.1317  
  3.1318 -    resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
  3.1319 -    resp->id     = id;
  3.1320 -    resp->status = st;
  3.1321 -    wmb();
  3.1322 -    netif->tx->resp_prod = netif->tx_resp_prod = ++i;
  3.1323 +	resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
  3.1324 +	resp->id     = id;
  3.1325 +	resp->status = st;
  3.1326 +	wmb();
  3.1327 +	netif->tx->resp_prod = netif->tx_resp_prod = ++i;
  3.1328  
  3.1329 -    mb(); /* Update producer before checking event threshold. */
  3.1330 -    if ( i == netif->tx->event )
  3.1331 -        notify_via_evtchn(netif->evtchn);
  3.1332 +	mb(); /* Update producer before checking event threshold. */
  3.1333 +	if (i == netif->tx->event)
  3.1334 +		notify_via_evtchn(netif->evtchn);
  3.1335  }
  3.1336  
  3.1337  static int make_rx_response(netif_t *netif, 
  3.1338 @@ -882,110 +878,120 @@ static int make_rx_response(netif_t *net
  3.1339                              u16      size,
  3.1340                              u16      csum_valid)
  3.1341  {
  3.1342 -    NETIF_RING_IDX i = netif->rx_resp_prod;
  3.1343 -    netif_rx_response_t *resp;
  3.1344 +	NETIF_RING_IDX i = netif->rx_resp_prod;
  3.1345 +	netif_rx_response_t *resp;
  3.1346  
  3.1347 -    resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
  3.1348 -    resp->addr       = addr;
  3.1349 -    resp->csum_valid = csum_valid;
  3.1350 -    resp->id         = id;
  3.1351 -    resp->status     = (s16)size;
  3.1352 -    if ( st < 0 )
  3.1353 -        resp->status = (s16)st;
  3.1354 -    wmb();
  3.1355 -    netif->rx->resp_prod = netif->rx_resp_prod = ++i;
  3.1356 +	resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
  3.1357 +	resp->addr       = addr;
  3.1358 +	resp->csum_valid = csum_valid;
  3.1359 +	resp->id         = id;
  3.1360 +	resp->status     = (s16)size;
  3.1361 +	if (st < 0)
  3.1362 +		resp->status = (s16)st;
  3.1363 +	wmb();
  3.1364 +	netif->rx->resp_prod = netif->rx_resp_prod = ++i;
  3.1365  
  3.1366 -    mb(); /* Update producer before checking event threshold. */
  3.1367 -    return (i == netif->rx->event);
  3.1368 +	mb(); /* Update producer before checking event threshold. */
  3.1369 +	return (i == netif->rx->event);
  3.1370  }
  3.1371  
  3.1372  static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
  3.1373  {
  3.1374 -    struct list_head *ent;
  3.1375 -    netif_t *netif;
  3.1376 -    int i = 0;
  3.1377 +	struct list_head *ent;
  3.1378 +	netif_t *netif;
  3.1379 +	int i = 0;
  3.1380  
  3.1381 -    printk(KERN_ALERT "netif_schedule_list:\n");
  3.1382 -    spin_lock_irq(&net_schedule_list_lock);
  3.1383 +	printk(KERN_ALERT "netif_schedule_list:\n");
  3.1384 +	spin_lock_irq(&net_schedule_list_lock);
  3.1385  
  3.1386 -    list_for_each ( ent, &net_schedule_list )
  3.1387 -    {
  3.1388 -        netif = list_entry(ent, netif_t, list);
  3.1389 -        printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
  3.1390 -               i, netif->rx_req_cons, netif->rx_resp_prod);               
  3.1391 -        printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
  3.1392 -               netif->tx_req_cons, netif->tx_resp_prod);
  3.1393 -        printk(KERN_ALERT "   shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
  3.1394 -               netif->rx->req_prod, netif->rx->resp_prod);
  3.1395 -        printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
  3.1396 -               netif->rx->event, netif->tx->req_prod);
  3.1397 -        printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
  3.1398 -               netif->tx->resp_prod, netif->tx->event);
  3.1399 -        i++;
  3.1400 -    }
  3.1401 +	list_for_each (ent, &net_schedule_list) {
  3.1402 +		netif = list_entry(ent, netif_t, list);
  3.1403 +		printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
  3.1404 +		       "rx_resp_prod=%08x\n",
  3.1405 +		       i, netif->rx_req_cons, netif->rx_resp_prod);
  3.1406 +		printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
  3.1407 +		       netif->tx_req_cons, netif->tx_resp_prod);
  3.1408 +		printk(KERN_ALERT "   shared(rx_req_prod=%08x "
  3.1409 +		       "rx_resp_prod=%08x\n",
  3.1410 +		       netif->rx->req_prod, netif->rx->resp_prod);
  3.1411 +		printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
  3.1412 +		       netif->rx->event, netif->tx->req_prod);
  3.1413 +		printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
  3.1414 +		       netif->tx->resp_prod, netif->tx->event);
  3.1415 +		i++;
  3.1416 +	}
  3.1417  
  3.1418 -    spin_unlock_irq(&net_schedule_list_lock);
  3.1419 -    printk(KERN_ALERT " ** End of netif_schedule_list **\n");
  3.1420 +	spin_unlock_irq(&net_schedule_list_lock);
  3.1421 +	printk(KERN_ALERT " ** End of netif_schedule_list **\n");
  3.1422  
  3.1423 -    return IRQ_HANDLED;
  3.1424 +	return IRQ_HANDLED;
  3.1425  }
  3.1426  
  3.1427  static int __init netback_init(void)
  3.1428  {
  3.1429 -    int i;
  3.1430 -    struct page *page;
  3.1431 +	int i;
  3.1432 +	struct page *page;
  3.1433  
  3.1434 -    if ( !(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
  3.1435 -         !(xen_start_info->flags & SIF_INITDOMAIN) )
  3.1436 -        return 0;
  3.1437 +	if (!(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
  3.1438 +	    !(xen_start_info->flags & SIF_INITDOMAIN))
  3.1439 +		return 0;
  3.1440  
  3.1441 -    IPRINTK("Initialising Xen netif backend.\n");
  3.1442 +	IPRINTK("Initialising Xen netif backend.\n");
  3.1443  #ifdef CONFIG_XEN_NETDEV_GRANT
  3.1444 -    IPRINTK("Using grant tables.\n");
  3.1445 +	IPRINTK("Using grant tables.\n");
  3.1446  #endif
  3.1447  
  3.1448 -    /* We can increase reservation by this much in net_rx_action(). */
  3.1449 -    balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
  3.1450 +	/* We can increase reservation by this much in net_rx_action(). */
  3.1451 +	balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
  3.1452  
  3.1453 -    skb_queue_head_init(&rx_queue);
  3.1454 -    skb_queue_head_init(&tx_queue);
  3.1455 +	skb_queue_head_init(&rx_queue);
  3.1456 +	skb_queue_head_init(&tx_queue);
  3.1457  
  3.1458 -    init_timer(&net_timer);
  3.1459 -    net_timer.data = 0;
  3.1460 -    net_timer.function = net_alarm;
  3.1461 +	init_timer(&net_timer);
  3.1462 +	net_timer.data = 0;
  3.1463 +	net_timer.function = net_alarm;
  3.1464      
  3.1465 -    page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
  3.1466 -    BUG_ON(page == NULL);
  3.1467 -    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
  3.1468 +	page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
  3.1469 +	BUG_ON(page == NULL);
  3.1470 +	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
  3.1471  
  3.1472 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
  3.1473 -    {
  3.1474 -        page = virt_to_page(MMAP_VADDR(i));
  3.1475 -        set_page_count(page, 1);
  3.1476 -        SetPageForeign(page, netif_page_release);
  3.1477 -    }
  3.1478 +	for (i = 0; i < MAX_PENDING_REQS; i++) {
  3.1479 +		page = virt_to_page(MMAP_VADDR(i));
  3.1480 +		set_page_count(page, 1);
  3.1481 +		SetPageForeign(page, netif_page_release);
  3.1482 +	}
  3.1483 +
  3.1484 +	pending_cons = 0;
  3.1485 +	pending_prod = MAX_PENDING_REQS;
  3.1486 +	for (i = 0; i < MAX_PENDING_REQS; i++)
  3.1487 +		pending_ring[i] = i;
  3.1488  
  3.1489 -    pending_cons = 0;
  3.1490 -    pending_prod = MAX_PENDING_REQS;
  3.1491 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
  3.1492 -        pending_ring[i] = i;
  3.1493 +	spin_lock_init(&net_schedule_list_lock);
  3.1494 +	INIT_LIST_HEAD(&net_schedule_list);
  3.1495  
  3.1496 -    spin_lock_init(&net_schedule_list_lock);
  3.1497 -    INIT_LIST_HEAD(&net_schedule_list);
  3.1498 +	netif_xenbus_init();
  3.1499  
  3.1500 -    netif_xenbus_init();
  3.1501 +	(void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
  3.1502 +			  netif_be_dbg, SA_SHIRQ, 
  3.1503 +			  "net-be-dbg", &netif_be_dbg);
  3.1504  
  3.1505 -    (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
  3.1506 -                      netif_be_dbg, SA_SHIRQ, 
  3.1507 -                      "net-be-dbg", &netif_be_dbg);
  3.1508 -
  3.1509 -    return 0;
  3.1510 +	return 0;
  3.1511  }
  3.1512  
  3.1513  static void netback_cleanup(void)
  3.1514  {
  3.1515 -    BUG();
  3.1516 +	BUG();
  3.1517  }
  3.1518  
  3.1519  module_init(netback_init);
  3.1520  module_exit(netback_cleanup);
  3.1521 +
  3.1522 +/*
  3.1523 + * Local variables:
  3.1524 + *  c-file-style: "linux"
  3.1525 + *  indent-tabs-mode: t
  3.1526 + *  c-indent-level: 8
  3.1527 + *  c-basic-offset: 8
  3.1528 + *  tab-width: 8
  3.1529 + * End:
  3.1530 + */
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Fri Sep 16 13:06:49 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Fri Sep 16 13:27:01 2005 +0000
     4.3 @@ -294,3 +294,13 @@ void netif_xenbus_init(void)
     4.4  {
     4.5  	xenbus_register_backend(&netback);
     4.6  }
     4.7 +
     4.8 +/*
     4.9 + * Local variables:
    4.10 + *  c-file-style: "linux"
    4.11 + *  indent-tabs-mode: t
    4.12 + *  c-indent-level: 8
    4.13 + *  c-basic-offset: 8
    4.14 + *  tab-width: 8
    4.15 + * End:
    4.16 + */