ia64/xen-unstable

changeset 6914:ffbc98d735bd

merge?
author cl349@firebug.cl.cam.ac.uk
date Fri Sep 16 18:07:50 2005 +0000 (2005-09-16)
parents 7cccdb49af75 7fbaf67a0af5
children a4cf3e17bb25
files linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c linux-2.6-xen-sparse/arch/xen/i386/pci/i386.c linux-2.6-xen-sparse/arch/xen/kernel/devmem.c linux-2.6-xen-sparse/drivers/xen/netback/common.h linux-2.6-xen-sparse/drivers/xen/netback/interface.c linux-2.6-xen-sparse/drivers/xen/netback/netback.c linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h tools/python/xen/xend/XendDomain.py tools/python/xen/xend/XendDomainInfo.py
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Fri Sep 16 18:06:42 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c	Fri Sep 16 18:07:50 2005 +0000
     1.3 @@ -45,12 +45,12 @@ static int direct_remap_area_pte_fn(pte_
     1.4  	return 0;
     1.5  }
     1.6  
     1.7 -int direct_remap_pfn_range(struct mm_struct *mm,
     1.8 -			    unsigned long address, 
     1.9 -			    unsigned long mfn,
    1.10 -			    unsigned long size, 
    1.11 -			    pgprot_t prot,
    1.12 -			    domid_t  domid)
    1.13 +static int __direct_remap_pfn_range(struct mm_struct *mm,
    1.14 +				    unsigned long address, 
    1.15 +				    unsigned long mfn,
    1.16 +				    unsigned long size, 
    1.17 +				    pgprot_t prot,
    1.18 +				    domid_t  domid)
    1.19  {
    1.20  	int i;
    1.21  	unsigned long start_address;
    1.22 @@ -98,6 +98,20 @@ int direct_remap_pfn_range(struct mm_str
    1.23  	return 0;
    1.24  }
    1.25  
    1.26 +int direct_remap_pfn_range(struct vm_area_struct *vma,
    1.27 +			   unsigned long address, 
    1.28 +			   unsigned long mfn,
    1.29 +			   unsigned long size, 
    1.30 +			   pgprot_t prot,
    1.31 +			   domid_t  domid)
    1.32 +{
    1.33 +	/* Same as remap_pfn_range(). */
    1.34 +	vma->vm_flags |= VM_IO | VM_RESERVED;
    1.35 +
    1.36 +	return __direct_remap_pfn_range(
    1.37 +		vma->vm_mm, address, mfn, size, prot, domid);
    1.38 +}
    1.39 +
    1.40  EXPORT_SYMBOL(direct_remap_pfn_range);
    1.41  
    1.42  
    1.43 @@ -221,8 +235,9 @@ void __iomem * __ioremap(unsigned long p
    1.44  #ifdef __x86_64__
    1.45  	flags |= _PAGE_USER;
    1.46  #endif
    1.47 -	if (direct_remap_pfn_range(&init_mm, (unsigned long) addr, phys_addr>>PAGE_SHIFT,
    1.48 -				    size, __pgprot(flags), domid)) {
    1.49 +	if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
    1.50 +				     phys_addr>>PAGE_SHIFT,
    1.51 +				     size, __pgprot(flags), domid)) {
    1.52  		vunmap((void __force *) addr);
    1.53  		return NULL;
    1.54  	}
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/pci/i386.c	Fri Sep 16 18:06:42 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/pci/i386.c	Fri Sep 16 18:07:50 2005 +0000
     2.3 @@ -295,7 +295,7 @@ int pci_mmap_page_range(struct pci_dev *
     2.4  	/* Write-combine setting is ignored, it is changed via the mtrr
     2.5  	 * interfaces on this platform.
     2.6  	 */
     2.7 -	if (direct_remap_pfn_range(vma->vm_mm, vma->vm_start, vma->vm_pgoff,
     2.8 +	if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
     2.9  				   vma->vm_end - vma->vm_start,
    2.10  				   vma->vm_page_prot, DOMID_IO))
    2.11  		return -EAGAIN;
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c	Fri Sep 16 18:06:42 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c	Fri Sep 16 18:07:50 2005 +0000
     3.3 @@ -90,22 +90,10 @@ out:
     3.4  
     3.5  static int mmap_mem(struct file * file, struct vm_area_struct * vma)
     3.6  {
     3.7 -	int uncached;
     3.8 -
     3.9 -	uncached = uncached_access(file);
    3.10 -	if (uncached)
    3.11 +	if (uncached_access(file))
    3.12  		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
    3.13  
    3.14 -	/* Don't try to swap out physical pages.. */
    3.15 -	vma->vm_flags |= VM_RESERVED;
    3.16 -
    3.17 -	/*
    3.18 -	 * Don't dump addresses that are not real memory to a core file.
    3.19 -	 */
    3.20 -	if (uncached)
    3.21 -		vma->vm_flags |= VM_IO;
    3.22 -
    3.23 -	if (direct_remap_pfn_range(vma->vm_mm, vma->vm_start, vma->vm_pgoff,
    3.24 +	if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
    3.25  				   vma->vm_end - vma->vm_start,
    3.26  				   vma->vm_page_prot, DOMID_IO))
    3.27  		return -EAGAIN;
     4.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Fri Sep 16 18:06:42 2005 +0000
     4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h	Fri Sep 16 18:07:50 2005 +0000
     4.3 @@ -18,17 +18,11 @@
     4.4  #include <asm-xen/xen-public/io/netif.h>
     4.5  #include <asm/io.h>
     4.6  #include <asm/pgalloc.h>
     4.7 -
     4.8 -#ifdef CONFIG_XEN_NETDEV_GRANT
     4.9  #include <asm-xen/xen-public/grant_table.h>
    4.10  #include <asm-xen/gnttab.h>
    4.11  
    4.12  #define GRANT_INVALID_REF (0xFFFF)
    4.13  
    4.14 -#endif
    4.15 -
    4.16 -
    4.17 -
    4.18  #if 0
    4.19  #define ASSERT(_p) \
    4.20      if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
    4.21 @@ -44,74 +38,73 @@
    4.22  #define WPRINTK(fmt, args...) \
    4.23      printk(KERN_WARNING "xen_net: " fmt, ##args)
    4.24  
    4.25 -
    4.26  typedef struct netif_st {
    4.27 -    /* Unique identifier for this interface. */
    4.28 -    domid_t          domid;
    4.29 -    unsigned int     handle;
    4.30 -
    4.31 -    u8               fe_dev_addr[6];
    4.32 -
    4.33 -    /* Physical parameters of the comms window. */
    4.34 -    unsigned long    tx_shmem_frame;
    4.35 -#ifdef CONFIG_XEN_NETDEV_GRANT
    4.36 -    u16              tx_shmem_handle;
    4.37 -    unsigned long    tx_shmem_vaddr; 
    4.38 -    grant_ref_t      tx_shmem_ref; 
    4.39 -#endif
    4.40 -    unsigned long    rx_shmem_frame;
    4.41 -#ifdef CONFIG_XEN_NETDEV_GRANT
    4.42 -    u16              rx_shmem_handle;
    4.43 -    unsigned long    rx_shmem_vaddr; 
    4.44 -    grant_ref_t      rx_shmem_ref; 
    4.45 -#endif
    4.46 -    unsigned int     evtchn;
    4.47 -    unsigned int     remote_evtchn;
    4.48 +	/* Unique identifier for this interface. */
    4.49 +	domid_t          domid;
    4.50 +	unsigned int     handle;
    4.51  
    4.52 -    /* The shared rings and indexes. */
    4.53 -    netif_tx_interface_t *tx;
    4.54 -    netif_rx_interface_t *rx;
    4.55 -
    4.56 -    /* Private indexes into shared ring. */
    4.57 -    NETIF_RING_IDX rx_req_cons;
    4.58 -    NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
    4.59 -#ifdef CONFIG_XEN_NETDEV_GRANT
    4.60 -    NETIF_RING_IDX rx_resp_prod_copy; /* private version of shared variable */
    4.61 -#endif
    4.62 -    NETIF_RING_IDX tx_req_cons;
    4.63 -    NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
    4.64 +	u8               fe_dev_addr[6];
    4.65  
    4.66 -    /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
    4.67 -    unsigned long   credit_bytes;
    4.68 -    unsigned long   credit_usec;
    4.69 -    unsigned long   remaining_credit;
    4.70 -    struct timer_list credit_timeout;
    4.71 +	/* Physical parameters of the comms window. */
    4.72 +	unsigned long    tx_shmem_frame;
    4.73 +#ifdef CONFIG_XEN_NETDEV_GRANT
    4.74 +	u16              tx_shmem_handle;
    4.75 +	unsigned long    tx_shmem_vaddr; 
    4.76 +	grant_ref_t      tx_shmem_ref; 
    4.77 +#endif
    4.78 +	unsigned long    rx_shmem_frame;
    4.79 +#ifdef CONFIG_XEN_NETDEV_GRANT
    4.80 +	u16              rx_shmem_handle;
    4.81 +	unsigned long    rx_shmem_vaddr; 
    4.82 +	grant_ref_t      rx_shmem_ref; 
    4.83 +#endif
    4.84 +	unsigned int     evtchn;
    4.85 +	unsigned int     remote_evtchn;
    4.86  
    4.87 -    /* Miscellaneous private stuff. */
    4.88 -    enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
    4.89 -    int active;
    4.90 -    struct list_head list;  /* scheduling list */
    4.91 -    atomic_t         refcnt;
    4.92 -    struct net_device *dev;
    4.93 -    struct net_device_stats stats;
    4.94 +	/* The shared rings and indexes. */
    4.95 +	netif_tx_interface_t *tx;
    4.96 +	netif_rx_interface_t *rx;
    4.97  
    4.98 -    struct work_struct free_work;
    4.99 +	/* Private indexes into shared ring. */
   4.100 +	NETIF_RING_IDX rx_req_cons;
   4.101 +	NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
   4.102 +#ifdef CONFIG_XEN_NETDEV_GRANT
   4.103 +	NETIF_RING_IDX rx_resp_prod_copy;
   4.104 +#endif
   4.105 +	NETIF_RING_IDX tx_req_cons;
   4.106 +	NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
   4.107 +
   4.108 +	/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
   4.109 +	unsigned long   credit_bytes;
   4.110 +	unsigned long   credit_usec;
   4.111 +	unsigned long   remaining_credit;
   4.112 +	struct timer_list credit_timeout;
   4.113 +
   4.114 +	/* Miscellaneous private stuff. */
   4.115 +	enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
   4.116 +	int active;
   4.117 +	struct list_head list;  /* scheduling list */
   4.118 +	atomic_t         refcnt;
   4.119 +	struct net_device *dev;
   4.120 +	struct net_device_stats stats;
   4.121 +
   4.122 +	struct work_struct free_work;
   4.123  } netif_t;
   4.124  
   4.125  void netif_creditlimit(netif_t *netif);
   4.126  int  netif_disconnect(netif_t *netif);
   4.127  
   4.128  netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]);
   4.129 -void free_netif_callback(netif_t *netif);
   4.130 +void free_netif(netif_t *netif);
   4.131  int netif_map(netif_t *netif, unsigned long tx_ring_ref,
   4.132  	      unsigned long rx_ring_ref, unsigned int evtchn);
   4.133  
   4.134  #define netif_get(_b) (atomic_inc(&(_b)->refcnt))
   4.135 -#define netif_put(_b)                             \
   4.136 -    do {                                          \
   4.137 -        if ( atomic_dec_and_test(&(_b)->refcnt) ) \
   4.138 -            free_netif_callback(_b);              \
   4.139 -    } while (0)
   4.140 +#define netif_put(_b)						\
   4.141 +	do {							\
   4.142 +		if ( atomic_dec_and_test(&(_b)->refcnt) )	\
   4.143 +			free_netif(_b);				\
   4.144 +	} while (0)
   4.145  
   4.146  void netif_xenbus_init(void);
   4.147  
   4.148 @@ -123,3 +116,13 @@ struct net_device_stats *netif_be_get_st
   4.149  irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
   4.150  
   4.151  #endif /* __NETIF__BACKEND__COMMON_H__ */
   4.152 +
   4.153 +/*
   4.154 + * Local variables:
   4.155 + *  c-file-style: "linux"
   4.156 + *  indent-tabs-mode: t
   4.157 + *  c-indent-level: 8
   4.158 + *  c-basic-offset: 8
   4.159 + *  tab-width: 8
   4.160 + * End:
   4.161 + */
     5.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Fri Sep 16 18:06:42 2005 +0000
     5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c	Fri Sep 16 18:07:50 2005 +0000
     5.3 @@ -11,104 +11,105 @@
     5.4  
     5.5  static void __netif_up(netif_t *netif)
     5.6  {
     5.7 -    struct net_device *dev = netif->dev;
     5.8 -    spin_lock_bh(&dev->xmit_lock);
     5.9 -    netif->active = 1;
    5.10 -    spin_unlock_bh(&dev->xmit_lock);
    5.11 -    (void)bind_evtchn_to_irqhandler(
    5.12 -        netif->evtchn, netif_be_int, 0, dev->name, netif);
    5.13 -    netif_schedule_work(netif);
    5.14 +	struct net_device *dev = netif->dev;
    5.15 +	spin_lock_bh(&dev->xmit_lock);
    5.16 +	netif->active = 1;
    5.17 +	spin_unlock_bh(&dev->xmit_lock);
    5.18 +	(void)bind_evtchn_to_irqhandler(
    5.19 +		netif->evtchn, netif_be_int, 0, dev->name, netif);
    5.20 +	netif_schedule_work(netif);
    5.21  }
    5.22  
    5.23  static void __netif_down(netif_t *netif)
    5.24  {
    5.25 -    struct net_device *dev = netif->dev;
    5.26 -    spin_lock_bh(&dev->xmit_lock);
    5.27 -    netif->active = 0;
    5.28 -    spin_unlock_bh(&dev->xmit_lock);
    5.29 -    unbind_evtchn_from_irqhandler(netif->evtchn, netif);
    5.30 -    netif_deschedule_work(netif);
    5.31 +	struct net_device *dev = netif->dev;
    5.32 +	spin_lock_bh(&dev->xmit_lock);
    5.33 +	netif->active = 0;
    5.34 +	spin_unlock_bh(&dev->xmit_lock);
    5.35 +	unbind_evtchn_from_irqhandler(netif->evtchn, netif);
    5.36 +	netif_deschedule_work(netif);
    5.37  }
    5.38  
    5.39  static int net_open(struct net_device *dev)
    5.40  {
    5.41 -    netif_t *netif = netdev_priv(dev);
    5.42 -    if (netif->status == CONNECTED)
    5.43 -        __netif_up(netif);
    5.44 -    netif_start_queue(dev);
    5.45 -    return 0;
    5.46 +	netif_t *netif = netdev_priv(dev);
    5.47 +	if (netif->status == CONNECTED)
    5.48 +		__netif_up(netif);
    5.49 +	netif_start_queue(dev);
    5.50 +	return 0;
    5.51  }
    5.52  
    5.53  static int net_close(struct net_device *dev)
    5.54  {
    5.55 -    netif_t *netif = netdev_priv(dev);
    5.56 -    netif_stop_queue(dev);
    5.57 -    if (netif->status == CONNECTED)
    5.58 -        __netif_down(netif);
    5.59 -    return 0;
    5.60 +	netif_t *netif = netdev_priv(dev);
    5.61 +	netif_stop_queue(dev);
    5.62 +	if (netif->status == CONNECTED)
    5.63 +		__netif_down(netif);
    5.64 +	return 0;
    5.65  }
    5.66  
    5.67  netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN])
    5.68  {
    5.69 -    int err = 0, i;
    5.70 -    struct net_device *dev;
    5.71 -    netif_t *netif;
    5.72 -    char name[IFNAMSIZ] = {};
    5.73 -
    5.74 -    snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
    5.75 -    dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
    5.76 -    if (dev == NULL) {
    5.77 -        DPRINTK("Could not create netif: out of memory\n");
    5.78 -        return NULL;
    5.79 -    }
    5.80 -
    5.81 -    netif = netdev_priv(dev);
    5.82 -    memset(netif, 0, sizeof(*netif));
    5.83 -    netif->domid  = domid;
    5.84 -    netif->handle = handle;
    5.85 -    netif->status = DISCONNECTED;
    5.86 -    atomic_set(&netif->refcnt, 0);
    5.87 -    netif->dev = dev;
    5.88 -
    5.89 -    netif->credit_bytes = netif->remaining_credit = ~0UL;
    5.90 -    netif->credit_usec  = 0UL;
    5.91 -    init_timer(&netif->credit_timeout);
    5.92 +	int err = 0, i;
    5.93 +	struct net_device *dev;
    5.94 +	netif_t *netif;
    5.95 +	char name[IFNAMSIZ] = {};
    5.96  
    5.97 -    dev->hard_start_xmit = netif_be_start_xmit;
    5.98 -    dev->get_stats       = netif_be_get_stats;
    5.99 -    dev->open            = net_open;
   5.100 -    dev->stop            = net_close;
   5.101 -    dev->features        = NETIF_F_NO_CSUM;
   5.102 -
   5.103 -    /* Disable queuing. */
   5.104 -    dev->tx_queue_len = 0;
   5.105 +	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
   5.106 +	dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
   5.107 +	if (dev == NULL) {
   5.108 +		DPRINTK("Could not create netif: out of memory\n");
   5.109 +		return NULL;
   5.110 +	}
   5.111  
   5.112 -    for (i = 0; i < ETH_ALEN; i++)
   5.113 -	if (be_mac[i] != 0)
   5.114 -	    break;
   5.115 -    if (i == ETH_ALEN) {
   5.116 -        /*
   5.117 -         * Initialise a dummy MAC address. We choose the numerically largest
   5.118 -         * non-broadcast address to prevent the address getting stolen by an
   5.119 -         * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
   5.120 -         */ 
   5.121 -        memset(dev->dev_addr, 0xFF, ETH_ALEN);
   5.122 -        dev->dev_addr[0] &= ~0x01;
   5.123 -    } else
   5.124 -        memcpy(dev->dev_addr, be_mac, ETH_ALEN);
   5.125 +	netif = netdev_priv(dev);
   5.126 +	memset(netif, 0, sizeof(*netif));
   5.127 +	netif->domid  = domid;
   5.128 +	netif->handle = handle;
   5.129 +	netif->status = DISCONNECTED;
   5.130 +	atomic_set(&netif->refcnt, 0);
   5.131 +	netif->dev = dev;
   5.132  
   5.133 -    rtnl_lock();
   5.134 -    err = register_netdevice(dev);
   5.135 -    rtnl_unlock();
   5.136 -    if (err) {
   5.137 -        DPRINTK("Could not register new net device %s: err=%d\n",
   5.138 -                dev->name, err);
   5.139 -        free_netdev(dev);
   5.140 -        return NULL;
   5.141 -    }
   5.142 +	netif->credit_bytes = netif->remaining_credit = ~0UL;
   5.143 +	netif->credit_usec  = 0UL;
   5.144 +	init_timer(&netif->credit_timeout);
   5.145  
   5.146 -    DPRINTK("Successfully created netif\n");
   5.147 -    return netif;
   5.148 +	dev->hard_start_xmit = netif_be_start_xmit;
   5.149 +	dev->get_stats       = netif_be_get_stats;
   5.150 +	dev->open            = net_open;
   5.151 +	dev->stop            = net_close;
   5.152 +	dev->features        = NETIF_F_NO_CSUM;
   5.153 +
   5.154 +	/* Disable queuing. */
   5.155 +	dev->tx_queue_len = 0;
   5.156 +
   5.157 +	for (i = 0; i < ETH_ALEN; i++)
   5.158 +		if (be_mac[i] != 0)
   5.159 +			break;
   5.160 +	if (i == ETH_ALEN) {
   5.161 +		/*
   5.162 +		 * Initialise a dummy MAC address. We choose the numerically
   5.163 +		 * largest non-broadcast address to prevent the address getting
   5.164 +		 * stolen by an Ethernet bridge for STP purposes.
   5.165 +                 * (FE:FF:FF:FF:FF:FF) 
   5.166 +		 */ 
   5.167 +		memset(dev->dev_addr, 0xFF, ETH_ALEN);
   5.168 +		dev->dev_addr[0] &= ~0x01;
   5.169 +	} else
   5.170 +		memcpy(dev->dev_addr, be_mac, ETH_ALEN);
   5.171 +
   5.172 +	rtnl_lock();
   5.173 +	err = register_netdevice(dev);
   5.174 +	rtnl_unlock();
   5.175 +	if (err) {
   5.176 +		DPRINTK("Could not register new net device %s: err=%d\n",
   5.177 +			dev->name, err);
   5.178 +		free_netdev(dev);
   5.179 +		return NULL;
   5.180 +	}
   5.181 +
   5.182 +	DPRINTK("Successfully created netif\n");
   5.183 +	return netif;
   5.184  }
   5.185  
   5.186  static int map_frontend_pages(netif_t *netif, unsigned long localaddr,
   5.187 @@ -116,191 +117,204 @@ static int map_frontend_pages(netif_t *n
   5.188                                unsigned long rx_ring_ref)
   5.189  {
   5.190  #ifdef CONFIG_XEN_NETDEV_GRANT
   5.191 -    struct gnttab_map_grant_ref op;
   5.192 -
   5.193 -    /* Map: Use the Grant table reference */
   5.194 -    op.host_addr = localaddr;
   5.195 -    op.flags     = GNTMAP_host_map;
   5.196 -    op.ref       = tx_ring_ref;
   5.197 -    op.dom       = netif->domid;
   5.198 -    
   5.199 -    BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
   5.200 -    if (op.handle < 0) { 
   5.201 -        DPRINTK(" Grant table operation failure mapping tx_ring_ref!\n");
   5.202 -        return op.handle;
   5.203 -    }
   5.204 +	struct gnttab_map_grant_ref op;
   5.205  
   5.206 -    netif->tx_shmem_ref    = tx_ring_ref;
   5.207 -    netif->tx_shmem_handle = op.handle;
   5.208 -    netif->tx_shmem_vaddr  = localaddr;
   5.209 +	/* Map: Use the Grant table reference */
   5.210 +	op.host_addr = localaddr;
   5.211 +	op.flags     = GNTMAP_host_map;
   5.212 +	op.ref       = tx_ring_ref;
   5.213 +	op.dom       = netif->domid;
   5.214 +    
   5.215 +	BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
   5.216 +	if (op.handle < 0) { 
   5.217 +		DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
   5.218 +		return op.handle;
   5.219 +	}
   5.220  
   5.221 -    /* Map: Use the Grant table reference */
   5.222 -    op.host_addr = localaddr + PAGE_SIZE;
   5.223 -    op.flags     = GNTMAP_host_map;
   5.224 -    op.ref       = rx_ring_ref;
   5.225 -    op.dom       = netif->domid;
   5.226 +	netif->tx_shmem_ref    = tx_ring_ref;
   5.227 +	netif->tx_shmem_handle = op.handle;
   5.228 +	netif->tx_shmem_vaddr  = localaddr;
   5.229  
   5.230 -    BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
   5.231 -    if (op.handle < 0) { 
   5.232 -        DPRINTK(" Grant table operation failure mapping rx_ring_ref!\n");
   5.233 -        return op.handle;
   5.234 -    }
   5.235 +	/* Map: Use the Grant table reference */
   5.236 +	op.host_addr = localaddr + PAGE_SIZE;
   5.237 +	op.flags     = GNTMAP_host_map;
   5.238 +	op.ref       = rx_ring_ref;
   5.239 +	op.dom       = netif->domid;
   5.240  
   5.241 -    netif->rx_shmem_ref    = rx_ring_ref;
   5.242 -    netif->rx_shmem_handle = op.handle;
   5.243 -    netif->rx_shmem_vaddr  = localaddr + PAGE_SIZE;
   5.244 +	BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) );
   5.245 +	if (op.handle < 0) { 
   5.246 +		DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
   5.247 +		return op.handle;
   5.248 +	}
   5.249 +
   5.250 +	netif->rx_shmem_ref    = rx_ring_ref;
   5.251 +	netif->rx_shmem_handle = op.handle;
   5.252 +	netif->rx_shmem_vaddr  = localaddr + PAGE_SIZE;
   5.253  
   5.254  #else
   5.255 -    pgprot_t      prot = __pgprot(_KERNPG_TABLE);
   5.256 -    int           err;
   5.257 +	pgprot_t prot = __pgprot(_KERNPG_TABLE);
   5.258 +	int      err;
   5.259  
   5.260 -    err = direct_remap_pfn_range(&init_mm, localaddr,
   5.261 -				  tx_ring_ref, PAGE_SIZE,
   5.262 -				  prot, netif->domid); 
   5.263 +	err = direct_remap_pfn_range(
   5.264 +		&init_mm, localaddr,
   5.265 +		tx_ring_ref, PAGE_SIZE,
   5.266 +		prot, netif->domid); 
   5.267      
   5.268 -    err |= direct_remap_pfn_range(&init_mm, localaddr + PAGE_SIZE,
   5.269 -				  rx_ring_ref, PAGE_SIZE,
   5.270 -				  prot, netif->domid);
   5.271 +	err |= direct_remap_pfn_range(
   5.272 +		&init_mm, localaddr + PAGE_SIZE,
   5.273 +		rx_ring_ref, PAGE_SIZE,
   5.274 +		prot, netif->domid);
   5.275  
   5.276 -    if (err)
   5.277 -	return err;
   5.278 +	if (err)
   5.279 +		return err;
   5.280  #endif
   5.281  
   5.282 -    return 0;
   5.283 +	return 0;
   5.284  }
   5.285  
   5.286  static void unmap_frontend_pages(netif_t *netif)
   5.287  {
   5.288  #ifdef CONFIG_XEN_NETDEV_GRANT
   5.289 -    struct gnttab_unmap_grant_ref op;
   5.290 +	struct gnttab_unmap_grant_ref op;
   5.291  
   5.292 -    op.host_addr    = netif->tx_shmem_vaddr;
   5.293 -    op.handle       = netif->tx_shmem_handle;
   5.294 -    op.dev_bus_addr = 0;
   5.295 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   5.296 +	op.host_addr    = netif->tx_shmem_vaddr;
   5.297 +	op.handle       = netif->tx_shmem_handle;
   5.298 +	op.dev_bus_addr = 0;
   5.299 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   5.300  
   5.301 -    op.host_addr    = netif->rx_shmem_vaddr;
   5.302 -    op.handle       = netif->rx_shmem_handle;
   5.303 -    op.dev_bus_addr = 0;
   5.304 -    BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   5.305 +	op.host_addr    = netif->rx_shmem_vaddr;
   5.306 +	op.handle       = netif->rx_shmem_handle;
   5.307 +	op.dev_bus_addr = 0;
   5.308 +	BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
   5.309  #endif
   5.310  
   5.311 -    return; 
   5.312 +	return; 
   5.313  }
   5.314  
   5.315  int netif_map(netif_t *netif, unsigned long tx_ring_ref,
   5.316  	      unsigned long rx_ring_ref, unsigned int evtchn)
   5.317  {
   5.318 -    struct vm_struct *vma;
   5.319 -    evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
   5.320 -    int err;
   5.321 -
   5.322 -    vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP);
   5.323 -    if (vma == NULL)
   5.324 -        return -ENOMEM;
   5.325 -
   5.326 -    err = map_frontend_pages(netif, (unsigned long)vma->addr, tx_ring_ref,
   5.327 -                             rx_ring_ref);
   5.328 -    if (err) {
   5.329 -        vfree(vma->addr);
   5.330 -	return err;
   5.331 -    }
   5.332 +	struct vm_struct *vma;
   5.333 +	evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain };
   5.334 +	int err;
   5.335  
   5.336 -    op.u.bind_interdomain.dom1 = DOMID_SELF;
   5.337 -    op.u.bind_interdomain.dom2 = netif->domid;
   5.338 -    op.u.bind_interdomain.port1 = 0;
   5.339 -    op.u.bind_interdomain.port2 = evtchn;
   5.340 -    err = HYPERVISOR_event_channel_op(&op);
   5.341 -    if (err) {
   5.342 -	unmap_frontend_pages(netif);
   5.343 -	vfree(vma->addr);
   5.344 -	return err;
   5.345 -    }
   5.346 -
   5.347 -    netif->evtchn = op.u.bind_interdomain.port1;
   5.348 -    netif->remote_evtchn = evtchn;
   5.349 +	vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP);
   5.350 +	if (vma == NULL)
   5.351 +		return -ENOMEM;
   5.352  
   5.353 -    netif->tx = (netif_tx_interface_t *)vma->addr;
   5.354 -    netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
   5.355 -    netif->tx->resp_prod = netif->rx->resp_prod = 0;
   5.356 -    netif_get(netif);
   5.357 -    wmb(); /* Other CPUs see new state before interface is started. */
   5.358 +	err = map_frontend_pages(
   5.359 +		netif, (unsigned long)vma->addr, tx_ring_ref, rx_ring_ref);
   5.360 +	if (err) {
   5.361 +		vfree(vma->addr);
   5.362 +		return err;
   5.363 +	}
   5.364  
   5.365 -    rtnl_lock();
   5.366 -    netif->status = CONNECTED;
   5.367 -    wmb();
   5.368 -    if (netif_running(netif->dev))
   5.369 -        __netif_up(netif);
   5.370 -    rtnl_unlock();
   5.371 +	op.u.bind_interdomain.dom1 = DOMID_SELF;
   5.372 +	op.u.bind_interdomain.dom2 = netif->domid;
   5.373 +	op.u.bind_interdomain.port1 = 0;
   5.374 +	op.u.bind_interdomain.port2 = evtchn;
   5.375 +	err = HYPERVISOR_event_channel_op(&op);
   5.376 +	if (err) {
   5.377 +		unmap_frontend_pages(netif);
   5.378 +		vfree(vma->addr);
   5.379 +		return err;
   5.380 +	}
   5.381  
   5.382 -    return 0;
   5.383 +	netif->evtchn = op.u.bind_interdomain.port1;
   5.384 +	netif->remote_evtchn = evtchn;
   5.385 +
   5.386 +	netif->tx = (netif_tx_interface_t *)vma->addr;
   5.387 +	netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
   5.388 +	netif->tx->resp_prod = netif->rx->resp_prod = 0;
   5.389 +	netif_get(netif);
   5.390 +	wmb(); /* Other CPUs see new state before interface is started. */
   5.391 +
   5.392 +	rtnl_lock();
   5.393 +	netif->status = CONNECTED;
   5.394 +	wmb();
   5.395 +	if (netif_running(netif->dev))
   5.396 +		__netif_up(netif);
   5.397 +	rtnl_unlock();
   5.398 +
   5.399 +	return 0;
   5.400  }
   5.401  
   5.402 -static void free_netif(void *arg)
   5.403 +static void free_netif_callback(void *arg)
   5.404  {
   5.405 -    evtchn_op_t op = { .cmd = EVTCHNOP_close };
   5.406 -    netif_t *netif = (netif_t *)arg;
   5.407 -
   5.408 -    /*
   5.409 -     * These can't be done in netif_disconnect() because at that point there
   5.410 -     * may be outstanding requests in the network stack whose asynchronous
   5.411 -     * responses must still be notified to the remote driver.
   5.412 -     */
   5.413 +	evtchn_op_t op = { .cmd = EVTCHNOP_close };
   5.414 +	netif_t *netif = (netif_t *)arg;
   5.415  
   5.416 -    op.u.close.port = netif->evtchn;
   5.417 -    op.u.close.dom = DOMID_SELF;
   5.418 -    HYPERVISOR_event_channel_op(&op);
   5.419 -    op.u.close.port = netif->remote_evtchn;
   5.420 -    op.u.close.dom = netif->domid;
   5.421 -    HYPERVISOR_event_channel_op(&op);
   5.422 +	/*
   5.423 +	 * These can't be done in netif_disconnect() because at that point
   5.424 +	 * there may be outstanding requests in the network stack whose
   5.425 +	 * asynchronous responses must still be notified to the remote driver.
   5.426 +	 */
   5.427  
   5.428 -    unregister_netdev(netif->dev);
   5.429 +	op.u.close.port = netif->evtchn;
   5.430 +	op.u.close.dom = DOMID_SELF;
   5.431 +	HYPERVISOR_event_channel_op(&op);
   5.432 +	op.u.close.port = netif->remote_evtchn;
   5.433 +	op.u.close.dom = netif->domid;
   5.434 +	HYPERVISOR_event_channel_op(&op);
   5.435  
   5.436 -    if (netif->tx) {
   5.437 -	unmap_frontend_pages(netif);
   5.438 -	vfree(netif->tx); /* Frees netif->rx as well. */
   5.439 -    }
   5.440 +	unregister_netdev(netif->dev);
   5.441  
   5.442 -    free_netdev(netif->dev);
   5.443 +	if (netif->tx) {
   5.444 +		unmap_frontend_pages(netif);
   5.445 +		vfree(netif->tx); /* Frees netif->rx as well. */
   5.446 +	}
   5.447 +
   5.448 +	free_netdev(netif->dev);
   5.449  }
   5.450  
   5.451 -void free_netif_callback(netif_t *netif)
   5.452 +void free_netif(netif_t *netif)
   5.453  {
   5.454 -    INIT_WORK(&netif->free_work, free_netif, (void *)netif);
   5.455 -    schedule_work(&netif->free_work);
   5.456 +	INIT_WORK(&netif->free_work, free_netif_callback, (void *)netif);
   5.457 +	schedule_work(&netif->free_work);
   5.458  }
   5.459  
   5.460  void netif_creditlimit(netif_t *netif)
   5.461  {
   5.462  #if 0
   5.463 -    /* Set the credit limit (reset remaining credit to new limit). */
   5.464 -    netif->credit_bytes = netif->remaining_credit = creditlimit->credit_bytes;
   5.465 -    netif->credit_usec = creditlimit->period_usec;
   5.466 +	/* Set the credit limit (reset remaining credit to new limit). */
   5.467 +	netif->credit_bytes     = creditlimit->credit_bytes;
   5.468 +	netif->remaining_credit = creditlimit->credit_bytes;
   5.469 +	netif->credit_usec      = creditlimit->period_usec;
   5.470  
   5.471 -    if (netif->status == CONNECTED) {
   5.472 -        /*
   5.473 -         * Schedule work so that any packets waiting under previous credit 
   5.474 -         * limit are dealt with (acts like a replenishment point).
   5.475 -         */
   5.476 -        netif->credit_timeout.expires = jiffies;
   5.477 -        netif_schedule_work(netif);
   5.478 -    }
   5.479 +	if (netif->status == CONNECTED) {
   5.480 +		/*
   5.481 +		 * Schedule work so that any packets waiting under previous
   5.482 +		 * credit limit are dealt with (acts as a replenishment point).
   5.483 +		 */
   5.484 +		netif->credit_timeout.expires = jiffies;
   5.485 +		netif_schedule_work(netif);
   5.486 +	}
   5.487  #endif
   5.488  }
   5.489  
   5.490  int netif_disconnect(netif_t *netif)
   5.491  {
   5.492  
   5.493 -    if (netif->status == CONNECTED) {
   5.494 -        rtnl_lock();
   5.495 -        netif->status = DISCONNECTING;
   5.496 -        wmb();
   5.497 -        if (netif_running(netif->dev))
   5.498 -            __netif_down(netif);
   5.499 -        rtnl_unlock();
   5.500 -        netif_put(netif);
   5.501 -        return 0; /* Caller should not send response message. */
   5.502 -    }
   5.503 +	if (netif->status == CONNECTED) {
   5.504 +		rtnl_lock();
   5.505 +		netif->status = DISCONNECTING;
   5.506 +		wmb();
   5.507 +		if (netif_running(netif->dev))
   5.508 +			__netif_down(netif);
   5.509 +		rtnl_unlock();
   5.510 +		netif_put(netif);
   5.511 +		return 0; /* Caller should not send response message. */
   5.512 +	}
   5.513  
   5.514 -    return 1;
   5.515 +	return 1;
   5.516  }
   5.517 +
   5.518 +/*
   5.519 + * Local variables:
   5.520 + *  c-file-style: "linux"
   5.521 + *  indent-tabs-mode: t
   5.522 + *  c-indent-level: 8
   5.523 + *  c-basic-offset: 8
   5.524 + *  tab-width: 8
   5.525 + * End:
   5.526 + */
     6.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Sep 16 18:06:42 2005 +0000
     6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c	Fri Sep 16 18:07:50 2005 +0000
     6.3 @@ -57,8 +57,8 @@ static unsigned long mmap_vstart;
     6.4  #define PKT_PROT_LEN 64
     6.5  
     6.6  static struct {
     6.7 -    netif_tx_request_t req;
     6.8 -    netif_t *netif;
     6.9 +	netif_tx_request_t req;
    6.10 +	netif_t *netif;
    6.11  } pending_tx_info[MAX_PENDING_REQS];
    6.12  static u16 pending_ring[MAX_PENDING_REQS];
    6.13  typedef unsigned int PEND_RING_IDX;
    6.14 @@ -91,49 +91,49 @@ static spinlock_t mfn_lock = SPIN_LOCK_U
    6.15  
    6.16  static unsigned long alloc_mfn(void)
    6.17  {
    6.18 -    unsigned long mfn = 0, flags;
    6.19 -    struct xen_memory_reservation reservation = {
    6.20 -        .extent_start = mfn_list,
    6.21 -        .nr_extents   = MAX_MFN_ALLOC,
    6.22 -        .extent_order = 0,
    6.23 -        .domid        = DOMID_SELF
    6.24 -    };
    6.25 -    spin_lock_irqsave(&mfn_lock, flags);
    6.26 -    if ( unlikely(alloc_index == 0) )
    6.27 -        alloc_index = HYPERVISOR_memory_op(
    6.28 -            XENMEM_increase_reservation, &reservation);
    6.29 -    if ( alloc_index != 0 )
    6.30 -        mfn = mfn_list[--alloc_index];
    6.31 -    spin_unlock_irqrestore(&mfn_lock, flags);
    6.32 -    return mfn;
    6.33 +	unsigned long mfn = 0, flags;
    6.34 +	struct xen_memory_reservation reservation = {
    6.35 +		.extent_start = mfn_list,
    6.36 +		.nr_extents   = MAX_MFN_ALLOC,
    6.37 +		.extent_order = 0,
    6.38 +		.domid        = DOMID_SELF
    6.39 +	};
    6.40 +	spin_lock_irqsave(&mfn_lock, flags);
    6.41 +	if ( unlikely(alloc_index == 0) )
    6.42 +		alloc_index = HYPERVISOR_memory_op(
    6.43 +			XENMEM_increase_reservation, &reservation);
    6.44 +	if ( alloc_index != 0 )
    6.45 +		mfn = mfn_list[--alloc_index];
    6.46 +	spin_unlock_irqrestore(&mfn_lock, flags);
    6.47 +	return mfn;
    6.48  }
    6.49  
    6.50  #ifndef CONFIG_XEN_NETDEV_GRANT
    6.51  static void free_mfn(unsigned long mfn)
    6.52  {
    6.53 -    unsigned long flags;
    6.54 -    struct xen_memory_reservation reservation = {
    6.55 -        .extent_start = &mfn,
    6.56 -        .nr_extents   = 1,
    6.57 -        .extent_order = 0,
    6.58 -        .domid        = DOMID_SELF
    6.59 -    };
    6.60 -    spin_lock_irqsave(&mfn_lock, flags);
    6.61 -    if ( alloc_index != MAX_MFN_ALLOC )
    6.62 -        mfn_list[alloc_index++] = mfn;
    6.63 -    else if ( HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation)
    6.64 -              != 1 )
    6.65 -        BUG();
    6.66 -    spin_unlock_irqrestore(&mfn_lock, flags);
    6.67 +	unsigned long flags;
    6.68 +	struct xen_memory_reservation reservation = {
    6.69 +		.extent_start = &mfn,
    6.70 +		.nr_extents   = 1,
    6.71 +		.extent_order = 0,
    6.72 +		.domid        = DOMID_SELF
    6.73 +	};
    6.74 +	spin_lock_irqsave(&mfn_lock, flags);
    6.75 +	if ( alloc_index != MAX_MFN_ALLOC )
    6.76 +		mfn_list[alloc_index++] = mfn;
    6.77 +	else
    6.78 +		BUG_ON(HYPERVISOR_memory_op(XENMEM_decrease_reservation,
    6.79 +					    &reservation) != 1);
    6.80 +	spin_unlock_irqrestore(&mfn_lock, flags);
    6.81  }
    6.82  #endif
    6.83  
    6.84  static inline void maybe_schedule_tx_action(void)
    6.85  {
    6.86 -    smp_mb();
    6.87 -    if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
    6.88 -         !list_empty(&net_schedule_list) )
    6.89 -        tasklet_schedule(&net_tx_tasklet);
    6.90 +	smp_mb();
    6.91 +	if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
    6.92 +	    !list_empty(&net_schedule_list))
    6.93 +		tasklet_schedule(&net_tx_tasklet);
    6.94  }
    6.95  
    6.96  /*
    6.97 @@ -142,77 +142,77 @@ static inline void maybe_schedule_tx_act
    6.98   */
    6.99  static inline int is_xen_skb(struct sk_buff *skb)
   6.100  {
   6.101 -    extern kmem_cache_t *skbuff_cachep;
   6.102 -    kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
   6.103 -    return (cp == skbuff_cachep);
   6.104 +	extern kmem_cache_t *skbuff_cachep;
   6.105 +	kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
   6.106 +	return (cp == skbuff_cachep);
   6.107  }
   6.108  
   6.109  int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
   6.110  {
   6.111 -    netif_t *netif = netdev_priv(dev);
   6.112 -
   6.113 -    ASSERT(skb->dev == dev);
   6.114 -
   6.115 -    /* Drop the packet if the target domain has no receive buffers. */
   6.116 -    if ( !netif->active || 
   6.117 -         (netif->rx_req_cons == netif->rx->req_prod) ||
   6.118 -         ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
   6.119 -        goto drop;
   6.120 +	netif_t *netif = netdev_priv(dev);
   6.121  
   6.122 -    /*
   6.123 -     * We do not copy the packet unless:
   6.124 -     *  1. The data is shared; or
   6.125 -     *  2. The data is not allocated from our special cache.
   6.126 -     * NB. We also couldn't cope with fragmented packets, but we won't get
   6.127 -     *     any because we not advertise the NETIF_F_SG feature.
   6.128 -     */
   6.129 -    if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
   6.130 -    {
   6.131 -        int hlen = skb->data - skb->head;
   6.132 -        struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
   6.133 -        if ( unlikely(nskb == NULL) )
   6.134 -            goto drop;
   6.135 -        skb_reserve(nskb, hlen);
   6.136 -        __skb_put(nskb, skb->len);
   6.137 -        if (skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen))
   6.138 -            BUG();
   6.139 -        nskb->dev = skb->dev;
   6.140 -        nskb->proto_csum_valid = skb->proto_csum_valid;
   6.141 -        dev_kfree_skb(skb);
   6.142 -        skb = nskb;
   6.143 -    }
   6.144 +	ASSERT(skb->dev == dev);
   6.145 +
   6.146 +	/* Drop the packet if the target domain has no receive buffers. */
   6.147 +	if (!netif->active || 
   6.148 +	    (netif->rx_req_cons == netif->rx->req_prod) ||
   6.149 +	    ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE))
   6.150 +		goto drop;
   6.151 +
   6.152 +	/*
   6.153 +	 * We do not copy the packet unless:
   6.154 +	 *  1. The data is shared; or
   6.155 +	 *  2. The data is not allocated from our special cache.
   6.156 +	 * NB. We also couldn't cope with fragmented packets, but we won't get
   6.157 +	 *     any because we not advertise the NETIF_F_SG feature.
   6.158 +	 */
   6.159 +	if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) {
   6.160 +		int hlen = skb->data - skb->head;
   6.161 +		struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
   6.162 +		if ( unlikely(nskb == NULL) )
   6.163 +			goto drop;
   6.164 +		skb_reserve(nskb, hlen);
   6.165 +		__skb_put(nskb, skb->len);
   6.166 +		BUG_ON(skb_copy_bits(skb, -hlen, nskb->data - hlen,
   6.167 +				     skb->len + hlen));
   6.168 +		nskb->dev = skb->dev;
   6.169 +		nskb->proto_csum_valid = skb->proto_csum_valid;
   6.170 +		dev_kfree_skb(skb);
   6.171 +		skb = nskb;
   6.172 +	}
   6.173  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.174  #ifdef DEBUG_GRANT
   6.175 -    printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d id=%04x gr=%04x\n",
   6.176 -           netif->rx->req_prod,
   6.177 -           netif->rx_req_cons,
   6.178 -           netif->rx->ring[
   6.179 -		   MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
   6.180 -           netif->rx->ring[
   6.181 -		   MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
   6.182 +	printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d "
   6.183 +	       "id=%04x gr=%04x\n",
   6.184 +	       netif->rx->req_prod,
   6.185 +	       netif->rx_req_cons,
   6.186 +	       netif->rx->ring[
   6.187 +		       MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
   6.188 +	       netif->rx->ring[
   6.189 +		       MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
   6.190  #endif
   6.191  #endif
   6.192 -    netif->rx_req_cons++;
   6.193 -    netif_get(netif);
   6.194 +	netif->rx_req_cons++;
   6.195 +	netif_get(netif);
   6.196  
   6.197 -    skb_queue_tail(&rx_queue, skb);
   6.198 -    tasklet_schedule(&net_rx_tasklet);
   6.199 +	skb_queue_tail(&rx_queue, skb);
   6.200 +	tasklet_schedule(&net_rx_tasklet);
   6.201  
   6.202 -    return 0;
   6.203 +	return 0;
   6.204  
   6.205   drop:
   6.206 -    netif->stats.tx_dropped++;
   6.207 -    dev_kfree_skb(skb);
   6.208 -    return 0;
   6.209 +	netif->stats.tx_dropped++;
   6.210 +	dev_kfree_skb(skb);
   6.211 +	return 0;
   6.212  }
   6.213  
   6.214  #if 0
   6.215  static void xen_network_done_notify(void)
   6.216  {
   6.217 -    static struct net_device *eth0_dev = NULL;
   6.218 -    if ( unlikely(eth0_dev == NULL) )
   6.219 -        eth0_dev = __dev_get_by_name("eth0");
   6.220 -    netif_rx_schedule(eth0_dev);
   6.221 +	static struct net_device *eth0_dev = NULL;
   6.222 +	if (unlikely(eth0_dev == NULL))
   6.223 +		eth0_dev = __dev_get_by_name("eth0");
   6.224 +	netif_rx_schedule(eth0_dev);
   6.225  }
   6.226  /* 
   6.227   * Add following to poll() function in NAPI driver (Tigon3 is example):
   6.228 @@ -221,658 +221,654 @@ static void xen_network_done_notify(void
   6.229   */
   6.230  int xen_network_done(void)
   6.231  {
   6.232 -    return skb_queue_empty(&rx_queue);
   6.233 +	return skb_queue_empty(&rx_queue);
   6.234  }
   6.235  #endif
   6.236  
   6.237  static void net_rx_action(unsigned long unused)
   6.238  {
   6.239 -    netif_t *netif = NULL; 
   6.240 -    s8 status;
   6.241 -    u16 size, id, evtchn;
   6.242 -    multicall_entry_t *mcl;
   6.243 -    mmu_update_t *mmu;
   6.244 +	netif_t *netif = NULL; 
   6.245 +	s8 status;
   6.246 +	u16 size, id, evtchn;
   6.247 +	multicall_entry_t *mcl;
   6.248 +	mmu_update_t *mmu;
   6.249  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.250 -    gnttab_transfer_t *gop;
   6.251 +	gnttab_transfer_t *gop;
   6.252  #else
   6.253 -    struct mmuext_op *mmuext;
   6.254 +	struct mmuext_op *mmuext;
   6.255  #endif
   6.256 -    unsigned long vdata, old_mfn, new_mfn;
   6.257 -    struct sk_buff_head rxq;
   6.258 -    struct sk_buff *skb;
   6.259 -    u16 notify_list[NETIF_RX_RING_SIZE];
   6.260 -    int notify_nr = 0;
   6.261 +	unsigned long vdata, old_mfn, new_mfn;
   6.262 +	struct sk_buff_head rxq;
   6.263 +	struct sk_buff *skb;
   6.264 +	u16 notify_list[NETIF_RX_RING_SIZE];
   6.265 +	int notify_nr = 0;
   6.266  
   6.267 -    skb_queue_head_init(&rxq);
   6.268 +	skb_queue_head_init(&rxq);
   6.269  
   6.270 -    mcl = rx_mcl;
   6.271 -    mmu = rx_mmu;
   6.272 +	mcl = rx_mcl;
   6.273 +	mmu = rx_mmu;
   6.274  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.275 -    gop = grant_rx_op;
   6.276 +	gop = grant_rx_op;
   6.277  #else
   6.278 -    mmuext = rx_mmuext;
   6.279 +	mmuext = rx_mmuext;
   6.280  #endif
   6.281  
   6.282 -    while ( (skb = skb_dequeue(&rx_queue)) != NULL )
   6.283 -    {
   6.284 -        netif   = netdev_priv(skb->dev);
   6.285 -        vdata   = (unsigned long)skb->data;
   6.286 -        old_mfn = virt_to_mfn(vdata);
   6.287 -
   6.288 -        /* Memory squeeze? Back off for an arbitrary while. */
   6.289 -        if ( (new_mfn = alloc_mfn()) == 0 )
   6.290 -        {
   6.291 -            if ( net_ratelimit() )
   6.292 -                WPRINTK("Memory squeeze in netback driver.\n");
   6.293 -            mod_timer(&net_timer, jiffies + HZ);
   6.294 -            skb_queue_head(&rx_queue, skb);
   6.295 -            break;
   6.296 -        }
   6.297 -        /*
   6.298 -         * Set the new P2M table entry before reassigning the old data page.
   6.299 -         * Heed the comment in pgtable-2level.h:pte_page(). :-)
   6.300 -         */
   6.301 -        phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
   6.302 -
   6.303 -        MULTI_update_va_mapping(mcl, vdata,
   6.304 -				pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
   6.305 -        mcl++;
   6.306 +	while ((skb = skb_dequeue(&rx_queue)) != NULL) {
   6.307 +		netif   = netdev_priv(skb->dev);
   6.308 +		vdata   = (unsigned long)skb->data;
   6.309 +		old_mfn = virt_to_mfn(vdata);
   6.310  
   6.311 -#ifdef CONFIG_XEN_NETDEV_GRANT
   6.312 -        gop->mfn = old_mfn;
   6.313 -        gop->domid = netif->domid;
   6.314 -        gop->ref = netif->rx->ring[
   6.315 -        MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
   6.316 -        netif->rx_resp_prod_copy++;
   6.317 -        gop++;
   6.318 -#else
   6.319 -        mcl->op = __HYPERVISOR_mmuext_op;
   6.320 -        mcl->args[0] = (unsigned long)mmuext;
   6.321 -        mcl->args[1] = 1;
   6.322 -        mcl->args[2] = 0;
   6.323 -        mcl->args[3] = netif->domid;
   6.324 -        mcl++;
   6.325 +		/* Memory squeeze? Back off for an arbitrary while. */
   6.326 +		if ((new_mfn = alloc_mfn()) == 0) {
   6.327 +			if ( net_ratelimit() )
   6.328 +				WPRINTK("Memory squeeze in netback driver.\n");
   6.329 +			mod_timer(&net_timer, jiffies + HZ);
   6.330 +			skb_queue_head(&rx_queue, skb);
   6.331 +			break;
   6.332 +		}
   6.333 +		/*
   6.334 +		 * Set the new P2M table entry before reassigning the old data
   6.335 +		 * page. Heed the comment in pgtable-2level.h:pte_page(). :-)
   6.336 +		 */
   6.337 +		phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] =
   6.338 +			new_mfn;
   6.339  
   6.340 -        mmuext->cmd = MMUEXT_REASSIGN_PAGE;
   6.341 -        mmuext->arg1.mfn = old_mfn;
   6.342 -        mmuext++;
   6.343 -#endif
   6.344 -        mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   6.345 -        mmu->val = __pa(vdata) >> PAGE_SHIFT;  
   6.346 -        mmu++;
   6.347 -
   6.348 -        __skb_queue_tail(&rxq, skb);
   6.349 -
   6.350 -#ifdef DEBUG_GRANT
   6.351 -        dump_packet('a', old_mfn, vdata);
   6.352 -#endif
   6.353 -        /* Filled the batch queue? */
   6.354 -        if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
   6.355 -            break;
   6.356 -    }
   6.357 -
   6.358 -    if ( mcl == rx_mcl )
   6.359 -        return;
   6.360 -
   6.361 -    mcl->op = __HYPERVISOR_mmu_update;
   6.362 -    mcl->args[0] = (unsigned long)rx_mmu;
   6.363 -    mcl->args[1] = mmu - rx_mmu;
   6.364 -    mcl->args[2] = 0;
   6.365 -    mcl->args[3] = DOMID_SELF;
   6.366 -    mcl++;
   6.367 +		MULTI_update_va_mapping(mcl, vdata,
   6.368 +					pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
   6.369 +		mcl++;
   6.370  
   6.371  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.372 -    mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   6.373 +		gop->mfn = old_mfn;
   6.374 +		gop->domid = netif->domid;
   6.375 +		gop->ref = netif->rx->ring[
   6.376 +			MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
   6.377 +		netif->rx_resp_prod_copy++;
   6.378 +		gop++;
   6.379  #else
   6.380 -    mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   6.381 -#endif
   6.382 -    if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
   6.383 -        BUG();
   6.384 +		mcl->op = __HYPERVISOR_mmuext_op;
   6.385 +		mcl->args[0] = (unsigned long)mmuext;
   6.386 +		mcl->args[1] = 1;
   6.387 +		mcl->args[2] = 0;
   6.388 +		mcl->args[3] = netif->domid;
   6.389 +		mcl++;
   6.390  
   6.391 -    mcl = rx_mcl;
   6.392 -#ifdef CONFIG_XEN_NETDEV_GRANT
   6.393 -    if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
   6.394 -                                 gop - grant_rx_op)) { 
   6.395 -        /* 
   6.396 -        ** The other side has given us a bad grant ref, or has no headroom, 
   6.397 -        ** or has gone away. Unfortunately the current grant table code 
   6.398 -        ** doesn't inform us which is the case, so not much we can do. 
   6.399 -        */
   6.400 -        DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) %d "
   6.401 -                "packets.\n", grant_rx_op[0].domid, gop - grant_rx_op); 
   6.402 -    }
   6.403 -    gop = grant_rx_op;
   6.404 -#else
   6.405 -    mmuext = rx_mmuext;
   6.406 +		mmuext->cmd = MMUEXT_REASSIGN_PAGE;
   6.407 +		mmuext->arg1.mfn = old_mfn;
   6.408 +		mmuext++;
   6.409  #endif
   6.410 -    while ( (skb = __skb_dequeue(&rxq)) != NULL )
   6.411 -    {
   6.412 -        netif   = netdev_priv(skb->dev);
   6.413 -        size    = skb->tail - skb->data;
   6.414 +		mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
   6.415 +			MMU_MACHPHYS_UPDATE;
   6.416 +		mmu->val = __pa(vdata) >> PAGE_SHIFT;  
   6.417 +		mmu++;
   6.418  
   6.419 -        /* Rederive the machine addresses. */
   6.420 -        new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
   6.421 -#ifdef CONFIG_XEN_NETDEV_GRANT
   6.422 -        old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
   6.423 -#else
   6.424 -        old_mfn = mmuext[0].arg1.mfn;
   6.425 +		__skb_queue_tail(&rxq, skb);
   6.426 +
   6.427 +#ifdef DEBUG_GRANT
   6.428 +		dump_packet('a', old_mfn, vdata);
   6.429  #endif
   6.430 -        atomic_set(&(skb_shinfo(skb)->dataref), 1);
   6.431 -        skb_shinfo(skb)->nr_frags = 0;
   6.432 -        skb_shinfo(skb)->frag_list = NULL;
   6.433 +		/* Filled the batch queue? */
   6.434 +		if ((mcl - rx_mcl) == ARRAY_SIZE(rx_mcl))
   6.435 +			break;
   6.436 +	}
   6.437  
   6.438 -        netif->stats.tx_bytes += size;
   6.439 -        netif->stats.tx_packets++;
   6.440 +	if (mcl == rx_mcl)
   6.441 +		return;
   6.442  
   6.443 -        /* The update_va_mapping() must not fail. */
   6.444 -        BUG_ON(mcl[0].result != 0);
   6.445 +	mcl->op = __HYPERVISOR_mmu_update;
   6.446 +	mcl->args[0] = (unsigned long)rx_mmu;
   6.447 +	mcl->args[1] = mmu - rx_mmu;
   6.448 +	mcl->args[2] = 0;
   6.449 +	mcl->args[3] = DOMID_SELF;
   6.450 +	mcl++;
   6.451  
   6.452 -        /* Check the reassignment error code. */
   6.453 -        status = NETIF_RSP_OKAY;
   6.454  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.455 -        if(gop->status != 0) { 
   6.456 -            DPRINTK("Bad status %d from grant transfer to DOM%u\n", 
   6.457 -                    gop->status, netif->domid);
   6.458 -            /* XXX SMH: should free 'old_mfn' here */
   6.459 -            status = NETIF_RSP_ERROR; 
   6.460 -        } 
   6.461 +	mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   6.462  #else
   6.463 -        if ( unlikely(mcl[1].result != 0) )
   6.464 -        {
   6.465 -            DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
   6.466 -            free_mfn(old_mfn);
   6.467 -            status = NETIF_RSP_ERROR;
   6.468 -        }
   6.469 +	mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   6.470  #endif
   6.471 -        evtchn = netif->evtchn;
   6.472 -        id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
   6.473 -        if ( make_rx_response(netif, id, status,
   6.474 -                              (old_mfn << PAGE_SHIFT) | /* XXX */
   6.475 -                              ((unsigned long)skb->data & ~PAGE_MASK),
   6.476 -                              size, skb->proto_csum_valid) &&
   6.477 -             (rx_notify[evtchn] == 0) )
   6.478 -        {
   6.479 -            rx_notify[evtchn] = 1;
   6.480 -            notify_list[notify_nr++] = evtchn;
   6.481 -        }
   6.482 +	BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0);
   6.483  
   6.484 -        netif_put(netif);
   6.485 -        dev_kfree_skb(skb);
   6.486 +	mcl = rx_mcl;
   6.487  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.488 -        mcl++;
   6.489 -        gop++;
   6.490 +	if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 
   6.491 +				     gop - grant_rx_op)) { 
   6.492 +		/*
   6.493 +		 * The other side has given us a bad grant ref, or has no 
   6.494 +		 * headroom, or has gone away. Unfortunately the current grant
   6.495 +		 * table code doesn't inform us which is the case, so not much
   6.496 +		 * we can do. 
   6.497 +		 */
   6.498 +		DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) "
   6.499 +			"%d packets.\n",
   6.500 +			grant_rx_op[0].domid, gop - grant_rx_op); 
   6.501 +	}
   6.502 +	gop = grant_rx_op;
   6.503  #else
   6.504 -        mcl += 2;
   6.505 -        mmuext += 1;
   6.506 +	mmuext = rx_mmuext;
   6.507  #endif
   6.508 -    }
   6.509 +	while ((skb = __skb_dequeue(&rxq)) != NULL) {
   6.510 +		netif   = netdev_priv(skb->dev);
   6.511 +		size    = skb->tail - skb->data;
   6.512  
   6.513 -    while ( notify_nr != 0 )
   6.514 -    {
   6.515 -        evtchn = notify_list[--notify_nr];
   6.516 -        rx_notify[evtchn] = 0;
   6.517 -        notify_via_evtchn(evtchn);
   6.518 -    }
   6.519 +		/* Rederive the machine addresses. */
   6.520 +		new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
   6.521 +#ifdef CONFIG_XEN_NETDEV_GRANT
   6.522 +		old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
   6.523 +#else
   6.524 +		old_mfn = mmuext[0].arg1.mfn;
   6.525 +#endif
   6.526 +		atomic_set(&(skb_shinfo(skb)->dataref), 1);
   6.527 +		skb_shinfo(skb)->nr_frags = 0;
   6.528 +		skb_shinfo(skb)->frag_list = NULL;
   6.529  
   6.530 -  out: 
   6.531 -    /* More work to do? */
   6.532 -    if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
   6.533 -        tasklet_schedule(&net_rx_tasklet);
   6.534 +		netif->stats.tx_bytes += size;
   6.535 +		netif->stats.tx_packets++;
   6.536 +
   6.537 +		/* The update_va_mapping() must not fail. */
   6.538 +		BUG_ON(mcl[0].result != 0);
   6.539 +
   6.540 +		/* Check the reassignment error code. */
   6.541 +		status = NETIF_RSP_OKAY;
   6.542 +#ifdef CONFIG_XEN_NETDEV_GRANT
   6.543 +		if(gop->status != 0) { 
   6.544 +			DPRINTK("Bad status %d from grant transfer to DOM%u\n",
   6.545 +				gop->status, netif->domid);
   6.546 +			/* XXX SMH: should free 'old_mfn' here */
   6.547 +			status = NETIF_RSP_ERROR; 
   6.548 +		} 
   6.549 +#else
   6.550 +		if (unlikely(mcl[1].result != 0)) {
   6.551 +			DPRINTK("Failed MMU update transferring to DOM%u\n",
   6.552 +				netif->domid);
   6.553 +			free_mfn(old_mfn);
   6.554 +			status = NETIF_RSP_ERROR;
   6.555 +		}
   6.556 +#endif
   6.557 +		evtchn = netif->evtchn;
   6.558 +		id = netif->rx->ring[
   6.559 +			MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
   6.560 +		if (make_rx_response(netif, id, status,
   6.561 +				     (old_mfn << PAGE_SHIFT) | /* XXX */
   6.562 +				     ((unsigned long)skb->data & ~PAGE_MASK),
   6.563 +				     size, skb->proto_csum_valid) &&
   6.564 +		    (rx_notify[evtchn] == 0)) {
   6.565 +			rx_notify[evtchn] = 1;
   6.566 +			notify_list[notify_nr++] = evtchn;
   6.567 +		}
   6.568 +
   6.569 +		netif_put(netif);
   6.570 +		dev_kfree_skb(skb);
   6.571 +#ifdef CONFIG_XEN_NETDEV_GRANT
   6.572 +		mcl++;
   6.573 +		gop++;
   6.574 +#else
   6.575 +		mcl += 2;
   6.576 +		mmuext += 1;
   6.577 +#endif
   6.578 +	}
   6.579 +
   6.580 +	while (notify_nr != 0) {
   6.581 +		evtchn = notify_list[--notify_nr];
   6.582 +		rx_notify[evtchn] = 0;
   6.583 +		notify_via_evtchn(evtchn);
   6.584 +	}
   6.585 +
   6.586 +	/* More work to do? */
   6.587 +	if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
   6.588 +		tasklet_schedule(&net_rx_tasklet);
   6.589  #if 0
   6.590 -    else
   6.591 -        xen_network_done_notify();
   6.592 +	else
   6.593 +		xen_network_done_notify();
   6.594  #endif
   6.595  }
   6.596  
   6.597  static void net_alarm(unsigned long unused)
   6.598  {
   6.599 -    tasklet_schedule(&net_rx_tasklet);
   6.600 +	tasklet_schedule(&net_rx_tasklet);
   6.601  }
   6.602  
   6.603  struct net_device_stats *netif_be_get_stats(struct net_device *dev)
   6.604  {
   6.605 -    netif_t *netif = netdev_priv(dev);
   6.606 -    return &netif->stats;
   6.607 +	netif_t *netif = netdev_priv(dev);
   6.608 +	return &netif->stats;
   6.609  }
   6.610  
   6.611  static int __on_net_schedule_list(netif_t *netif)
   6.612  {
   6.613 -    return netif->list.next != NULL;
   6.614 +	return netif->list.next != NULL;
   6.615  }
   6.616  
   6.617  static void remove_from_net_schedule_list(netif_t *netif)
   6.618  {
   6.619 -    spin_lock_irq(&net_schedule_list_lock);
   6.620 -    if ( likely(__on_net_schedule_list(netif)) )
   6.621 -    {
   6.622 -        list_del(&netif->list);
   6.623 -        netif->list.next = NULL;
   6.624 -        netif_put(netif);
   6.625 -    }
   6.626 -    spin_unlock_irq(&net_schedule_list_lock);
   6.627 +	spin_lock_irq(&net_schedule_list_lock);
   6.628 +	if (likely(__on_net_schedule_list(netif))) {
   6.629 +		list_del(&netif->list);
   6.630 +		netif->list.next = NULL;
   6.631 +		netif_put(netif);
   6.632 +	}
   6.633 +	spin_unlock_irq(&net_schedule_list_lock);
   6.634  }
   6.635  
   6.636  static void add_to_net_schedule_list_tail(netif_t *netif)
   6.637  {
   6.638 -    if ( __on_net_schedule_list(netif) )
   6.639 -        return;
   6.640 +	if (__on_net_schedule_list(netif))
   6.641 +		return;
   6.642  
   6.643 -    spin_lock_irq(&net_schedule_list_lock);
   6.644 -    if ( !__on_net_schedule_list(netif) && netif->active )
   6.645 -    {
   6.646 -        list_add_tail(&netif->list, &net_schedule_list);
   6.647 -        netif_get(netif);
   6.648 -    }
   6.649 -    spin_unlock_irq(&net_schedule_list_lock);
   6.650 +	spin_lock_irq(&net_schedule_list_lock);
   6.651 +	if (!__on_net_schedule_list(netif) && netif->active) {
   6.652 +		list_add_tail(&netif->list, &net_schedule_list);
   6.653 +		netif_get(netif);
   6.654 +	}
   6.655 +	spin_unlock_irq(&net_schedule_list_lock);
   6.656  }
   6.657  
   6.658  void netif_schedule_work(netif_t *netif)
   6.659  {
   6.660 -    if ( (netif->tx_req_cons != netif->tx->req_prod) &&
   6.661 -         ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
   6.662 -    {
   6.663 -        add_to_net_schedule_list_tail(netif);
   6.664 -        maybe_schedule_tx_action();
   6.665 -    }
   6.666 +	if ((netif->tx_req_cons != netif->tx->req_prod) &&
   6.667 +	    ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE)) {
   6.668 +		add_to_net_schedule_list_tail(netif);
   6.669 +		maybe_schedule_tx_action();
   6.670 +	}
   6.671  }
   6.672  
   6.673  void netif_deschedule_work(netif_t *netif)
   6.674  {
   6.675 -    remove_from_net_schedule_list(netif);
   6.676 +	remove_from_net_schedule_list(netif);
   6.677  }
   6.678  
   6.679  
   6.680  static void tx_credit_callback(unsigned long data)
   6.681  {
   6.682 -    netif_t *netif = (netif_t *)data;
   6.683 -    netif->remaining_credit = netif->credit_bytes;
   6.684 -    netif_schedule_work(netif);
   6.685 +	netif_t *netif = (netif_t *)data;
   6.686 +	netif->remaining_credit = netif->credit_bytes;
   6.687 +	netif_schedule_work(netif);
   6.688  }
   6.689  
   6.690  inline static void net_tx_action_dealloc(void)
   6.691  {
   6.692  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.693 -    gnttab_unmap_grant_ref_t *gop;
   6.694 +	gnttab_unmap_grant_ref_t *gop;
   6.695  #else
   6.696 -    multicall_entry_t *mcl;
   6.697 +	multicall_entry_t *mcl;
   6.698  #endif
   6.699 -    u16 pending_idx;
   6.700 -    PEND_RING_IDX dc, dp;
   6.701 -    netif_t *netif;
   6.702 +	u16 pending_idx;
   6.703 +	PEND_RING_IDX dc, dp;
   6.704 +	netif_t *netif;
   6.705  
   6.706 -    dc = dealloc_cons;
   6.707 -    dp = dealloc_prod;
   6.708 +	dc = dealloc_cons;
   6.709 +	dp = dealloc_prod;
   6.710  
   6.711  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.712 -    /*
   6.713 -     * Free up any grants we have finished using
   6.714 -     */
   6.715 -    gop = tx_unmap_ops;
   6.716 -    while ( dc != dp )
   6.717 -    {
   6.718 -        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
   6.719 -        gop->host_addr    = MMAP_VADDR(pending_idx);
   6.720 -        gop->dev_bus_addr = 0;
   6.721 -        gop->handle       = grant_tx_ref[pending_idx];
   6.722 -        grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
   6.723 -        gop++;
   6.724 -    }
   6.725 -    BUG_ON(HYPERVISOR_grant_table_op(
   6.726 -               GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
   6.727 +	/*
   6.728 +	 * Free up any grants we have finished using
   6.729 +	 */
   6.730 +	gop = tx_unmap_ops;
   6.731 +	while (dc != dp) {
   6.732 +		pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
   6.733 +		gop->host_addr    = MMAP_VADDR(pending_idx);
   6.734 +		gop->dev_bus_addr = 0;
   6.735 +		gop->handle       = grant_tx_ref[pending_idx];
   6.736 +		grant_tx_ref[pending_idx] = GRANT_INVALID_REF;
   6.737 +		gop++;
   6.738 +	}
   6.739 +	BUG_ON(HYPERVISOR_grant_table_op(
   6.740 +		GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
   6.741  #else
   6.742 -    mcl = tx_mcl;
   6.743 -    while ( dc != dp )
   6.744 -    {
   6.745 -        pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
   6.746 -	MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
   6.747 -				__pte(0), 0);
   6.748 -        mcl++;     
   6.749 -    }
   6.750 +	mcl = tx_mcl;
   6.751 +	while (dc != dp) {
   6.752 +		pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
   6.753 +		MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
   6.754 +					__pte(0), 0);
   6.755 +		mcl++;     
   6.756 +	}
   6.757  
   6.758 -    mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   6.759 -    if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
   6.760 -        BUG();
   6.761 +	mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   6.762 +	BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
   6.763  
   6.764 -    mcl = tx_mcl;
   6.765 +	mcl = tx_mcl;
   6.766  #endif
   6.767 -    while ( dealloc_cons != dp )
   6.768 -    {
   6.769 +	while (dealloc_cons != dp) {
   6.770  #ifndef CONFIG_XEN_NETDEV_GRANT
   6.771 -        /* The update_va_mapping() must not fail. */
   6.772 -        BUG_ON(mcl[0].result != 0);
   6.773 +		/* The update_va_mapping() must not fail. */
   6.774 +		BUG_ON(mcl[0].result != 0);
   6.775  #endif
   6.776  
   6.777 -        pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
   6.778 -
   6.779 -        netif = pending_tx_info[pending_idx].netif;
   6.780 -
   6.781 -        make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
   6.782 -                         NETIF_RSP_OKAY);
   6.783 -        
   6.784 -        pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   6.785 +		pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
   6.786  
   6.787 -        /*
   6.788 -         * Scheduling checks must happen after the above response is posted.
   6.789 -         * This avoids a possible race with a guest OS on another CPU if that
   6.790 -         * guest is testing against 'resp_prod' when deciding whether to notify
   6.791 -         * us when it queues additional packets.
   6.792 -         */
   6.793 -        mb();
   6.794 -        if ( (netif->tx_req_cons != netif->tx->req_prod) &&
   6.795 -             ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
   6.796 -            add_to_net_schedule_list_tail(netif);
   6.797 +		netif = pending_tx_info[pending_idx].netif;
   6.798 +
   6.799 +		make_tx_response(netif, pending_tx_info[pending_idx].req.id, 
   6.800 +				 NETIF_RSP_OKAY);
   6.801          
   6.802 -        netif_put(netif);
   6.803 +		pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
   6.804 +
   6.805 +		/*
   6.806 +		 * Scheduling checks must happen after the above response is
   6.807 +		 * posted. This avoids a possible race with a guest OS on
   6.808 +		 * another CPU if that guest is testing against 'resp_prod'
   6.809 +		 * when deciding whether to notify us when it queues additional
   6.810 +                 * packets.
   6.811 +		 */
   6.812 +		mb();
   6.813 +		if ((netif->tx_req_cons != netif->tx->req_prod) &&
   6.814 +		    ((netif->tx_req_cons-netif->tx_resp_prod) !=
   6.815 +		     NETIF_TX_RING_SIZE))
   6.816 +			add_to_net_schedule_list_tail(netif);
   6.817 +        
   6.818 +		netif_put(netif);
   6.819  
   6.820  #ifndef CONFIG_XEN_NETDEV_GRANT
   6.821 -        mcl++;
   6.822 +		mcl++;
   6.823  #endif
   6.824 -    }
   6.825 -
   6.826 +	}
   6.827  }
   6.828  
   6.829  /* Called after netfront has transmitted */
   6.830  static void net_tx_action(unsigned long unused)
   6.831  {
   6.832 -    struct list_head *ent;
   6.833 -    struct sk_buff *skb;
   6.834 -    netif_t *netif;
   6.835 -    netif_tx_request_t txreq;
   6.836 -    u16 pending_idx;
   6.837 -    NETIF_RING_IDX i;
   6.838 +	struct list_head *ent;
   6.839 +	struct sk_buff *skb;
   6.840 +	netif_t *netif;
   6.841 +	netif_tx_request_t txreq;
   6.842 +	u16 pending_idx;
   6.843 +	NETIF_RING_IDX i;
   6.844  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.845 -    gnttab_map_grant_ref_t *mop;
   6.846 +	gnttab_map_grant_ref_t *mop;
   6.847  #else
   6.848 -    multicall_entry_t *mcl;
   6.849 +	multicall_entry_t *mcl;
   6.850  #endif
   6.851 -    unsigned int data_len;
   6.852 +	unsigned int data_len;
   6.853  
   6.854 -    if ( dealloc_cons != dealloc_prod )
   6.855 -        net_tx_action_dealloc();
   6.856 +	if (dealloc_cons != dealloc_prod)
   6.857 +		net_tx_action_dealloc();
   6.858  
   6.859  #ifdef CONFIG_XEN_NETDEV_GRANT
   6.860 -    mop = tx_map_ops;
   6.861 +	mop = tx_map_ops;
   6.862  #else
   6.863 -    mcl = tx_mcl;
   6.864 +	mcl = tx_mcl;
   6.865  #endif
   6.866 -    while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
   6.867 -            !list_empty(&net_schedule_list) )
   6.868 -    {
   6.869 -        /* Get a netif from the list with work to do. */
   6.870 -        ent = net_schedule_list.next;
   6.871 -        netif = list_entry(ent, netif_t, list);
   6.872 -        netif_get(netif);
   6.873 -        remove_from_net_schedule_list(netif);
   6.874 -
   6.875 -        /* Work to do? */
   6.876 -        i = netif->tx_req_cons;
   6.877 -        if ( (i == netif->tx->req_prod) ||
   6.878 -             ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
   6.879 -        {
   6.880 -            netif_put(netif);
   6.881 -            continue;
   6.882 -        }
   6.883 -
   6.884 -        rmb(); /* Ensure that we see the request before we copy it. */
   6.885 -        memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
   6.886 -               sizeof(txreq));
   6.887 -        /* Credit-based scheduling. */
   6.888 -        if ( txreq.size > netif->remaining_credit )
   6.889 -        {
   6.890 -            unsigned long now = jiffies;
   6.891 -            unsigned long next_credit = 
   6.892 -                netif->credit_timeout.expires +
   6.893 -                msecs_to_jiffies(netif->credit_usec / 1000);
   6.894 -
   6.895 -            /* Timer could already be pending in some rare cases. */
   6.896 -            if ( timer_pending(&netif->credit_timeout) )
   6.897 -                break;
   6.898 -
   6.899 -            /* Already passed the point at which we can replenish credit? */
   6.900 -            if ( time_after_eq(now, next_credit) )
   6.901 -            {
   6.902 -                netif->credit_timeout.expires = now;
   6.903 -                netif->remaining_credit = netif->credit_bytes;
   6.904 -            }
   6.905 +	while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
   6.906 +		!list_empty(&net_schedule_list)) {
   6.907 +		/* Get a netif from the list with work to do. */
   6.908 +		ent = net_schedule_list.next;
   6.909 +		netif = list_entry(ent, netif_t, list);
   6.910 +		netif_get(netif);
   6.911 +		remove_from_net_schedule_list(netif);
   6.912  
   6.913 -            /* Still too big to send right now? Then set a timer callback. */
   6.914 -            if ( txreq.size > netif->remaining_credit )
   6.915 -            {
   6.916 -                netif->remaining_credit = 0;
   6.917 -                netif->credit_timeout.expires  = next_credit;
   6.918 -                netif->credit_timeout.data     = (unsigned long)netif;
   6.919 -                netif->credit_timeout.function = tx_credit_callback;
   6.920 -                add_timer_on(&netif->credit_timeout, smp_processor_id());
   6.921 -                break;
   6.922 -            }
   6.923 -        }
   6.924 -        netif->remaining_credit -= txreq.size;
   6.925 -
   6.926 -        /*
   6.927 -         * Why the barrier? It ensures that the frontend sees updated req_cons
   6.928 -         * before we check for more work to schedule.
   6.929 -         */
   6.930 -        netif->tx->req_cons = ++netif->tx_req_cons;
   6.931 -        mb();
   6.932 -
   6.933 -        netif_schedule_work(netif);
   6.934 -
   6.935 -        if ( unlikely(txreq.size < ETH_HLEN) || 
   6.936 -             unlikely(txreq.size > ETH_FRAME_LEN) )
   6.937 -        {
   6.938 -            DPRINTK("Bad packet size: %d\n", txreq.size);
   6.939 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   6.940 -            netif_put(netif);
   6.941 -            continue; 
   6.942 -        }
   6.943 +		/* Work to do? */
   6.944 +		i = netif->tx_req_cons;
   6.945 +		if ((i == netif->tx->req_prod) ||
   6.946 +		    ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE)) {
   6.947 +			netif_put(netif);
   6.948 +			continue;
   6.949 +		}
   6.950  
   6.951 -        /* No crossing a page boundary as the payload mustn't fragment. */
   6.952 -        if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) ) 
   6.953 -        {
   6.954 -            DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
   6.955 -                    txreq.addr, txreq.size, 
   6.956 -                    (txreq.addr &~PAGE_MASK) + txreq.size);
   6.957 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   6.958 -            netif_put(netif);
   6.959 -            continue;
   6.960 -        }
   6.961 -
   6.962 -        pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
   6.963 -
   6.964 -        data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
   6.965 +		rmb(); /* Ensure that we see the request before we copy it. */
   6.966 +		memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
   6.967 +		       sizeof(txreq));
   6.968 +		/* Credit-based scheduling. */
   6.969 +		if (txreq.size > netif->remaining_credit) {
   6.970 +			unsigned long now = jiffies;
   6.971 +			unsigned long next_credit = 
   6.972 +				netif->credit_timeout.expires +
   6.973 +				msecs_to_jiffies(netif->credit_usec / 1000);
   6.974  
   6.975 -        if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
   6.976 -        {
   6.977 -            DPRINTK("Can't allocate a skb in start_xmit.\n");
   6.978 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
   6.979 -            netif_put(netif);
   6.980 -            break;
   6.981 -        }
   6.982 +			/* Timer could already be pending in rare cases. */
   6.983 +			if (timer_pending(&netif->credit_timeout))
   6.984 +				break;
   6.985  
   6.986 -        /* Packets passed to netif_rx() must have some headroom. */
   6.987 -        skb_reserve(skb, 16);
   6.988 +			/* Passed the point where we can replenish credit? */
   6.989 +			if (time_after_eq(now, next_credit)) {
   6.990 +				netif->credit_timeout.expires = now;
   6.991 +				netif->remaining_credit = netif->credit_bytes;
   6.992 +			}
   6.993 +
   6.994 +			/* Still too big to send right now? Set a callback. */
   6.995 +			if (txreq.size > netif->remaining_credit) {
   6.996 +				netif->remaining_credit = 0;
   6.997 +				netif->credit_timeout.expires  = 
   6.998 +					next_credit;
   6.999 +				netif->credit_timeout.data     =
  6.1000 +					(unsigned long)netif;
  6.1001 +				netif->credit_timeout.function =
  6.1002 +					tx_credit_callback;
  6.1003 +				add_timer_on(&netif->credit_timeout,
  6.1004 +					     smp_processor_id());
  6.1005 +				break;
  6.1006 +			}
  6.1007 +		}
  6.1008 +		netif->remaining_credit -= txreq.size;
  6.1009 +
  6.1010 +		/*
  6.1011 +		 * Why the barrier? It ensures that the frontend sees updated
  6.1012 +		 * req_cons before we check for more work to schedule.
  6.1013 +		 */
  6.1014 +		netif->tx->req_cons = ++netif->tx_req_cons;
  6.1015 +		mb();
  6.1016 +
  6.1017 +		netif_schedule_work(netif);
  6.1018 +
  6.1019 +		if (unlikely(txreq.size < ETH_HLEN) || 
  6.1020 +		    unlikely(txreq.size > ETH_FRAME_LEN)) {
  6.1021 +			DPRINTK("Bad packet size: %d\n", txreq.size);
  6.1022 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  6.1023 +			netif_put(netif);
  6.1024 +			continue; 
  6.1025 +		}
  6.1026 +
  6.1027 +		/* No crossing a page as the payload mustn't fragment. */
  6.1028 +		if (unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >=
  6.1029 +			     PAGE_SIZE)) {
  6.1030 +			DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 
  6.1031 +				txreq.addr, txreq.size, 
  6.1032 +				(txreq.addr &~PAGE_MASK) + txreq.size);
  6.1033 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  6.1034 +			netif_put(netif);
  6.1035 +			continue;
  6.1036 +		}
  6.1037 +
  6.1038 +		pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
  6.1039 +
  6.1040 +		data_len = (txreq.size > PKT_PROT_LEN) ?
  6.1041 +			PKT_PROT_LEN : txreq.size;
  6.1042 +
  6.1043 +		skb = alloc_skb(data_len+16, GFP_ATOMIC);
  6.1044 +		if (unlikely(skb == NULL)) {
  6.1045 +			DPRINTK("Can't allocate a skb in start_xmit.\n");
  6.1046 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  6.1047 +			netif_put(netif);
  6.1048 +			break;
  6.1049 +		}
  6.1050 +
  6.1051 +		/* Packets passed to netif_rx() must have some headroom. */
  6.1052 +		skb_reserve(skb, 16);
  6.1053  #ifdef CONFIG_XEN_NETDEV_GRANT
  6.1054 -        mop->host_addr = MMAP_VADDR(pending_idx);
  6.1055 -        mop->dom       = netif->domid;
  6.1056 -        mop->ref       = txreq.addr >> PAGE_SHIFT;
  6.1057 -        mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
  6.1058 -        mop++;
  6.1059 +		mop->host_addr = MMAP_VADDR(pending_idx);
  6.1060 +		mop->dom       = netif->domid;
  6.1061 +		mop->ref       = txreq.addr >> PAGE_SHIFT;
  6.1062 +		mop->flags     = GNTMAP_host_map | GNTMAP_readonly;
  6.1063 +		mop++;
  6.1064  #else
  6.1065 -	MULTI_update_va_mapping_otherdomain(
  6.1066 -	    mcl, MMAP_VADDR(pending_idx),
  6.1067 -	    pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
  6.1068 -	    0, netif->domid);
  6.1069 +		MULTI_update_va_mapping_otherdomain(
  6.1070 +			mcl, MMAP_VADDR(pending_idx),
  6.1071 +			pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
  6.1072 +			0, netif->domid);
  6.1073  
  6.1074 -        mcl++;
  6.1075 +		mcl++;
  6.1076  #endif
  6.1077  
  6.1078 -        memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
  6.1079 -        pending_tx_info[pending_idx].netif = netif;
  6.1080 -        *((u16 *)skb->data) = pending_idx;
  6.1081 -
  6.1082 -        __skb_queue_tail(&tx_queue, skb);
  6.1083 -
  6.1084 -        pending_cons++;
  6.1085 +		memcpy(&pending_tx_info[pending_idx].req,
  6.1086 +		       &txreq, sizeof(txreq));
  6.1087 +		pending_tx_info[pending_idx].netif = netif;
  6.1088 +		*((u16 *)skb->data) = pending_idx;
  6.1089  
  6.1090 -#ifdef CONFIG_XEN_NETDEV_GRANT
  6.1091 -        if ( (mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops) )
  6.1092 -            break;
  6.1093 -#else
  6.1094 -        /* Filled the batch queue? */
  6.1095 -        if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
  6.1096 -            break;
  6.1097 -#endif
  6.1098 -    }
  6.1099 +		__skb_queue_tail(&tx_queue, skb);
  6.1100 +
  6.1101 +		pending_cons++;
  6.1102  
  6.1103  #ifdef CONFIG_XEN_NETDEV_GRANT
  6.1104 -    if ( mop == tx_map_ops )
  6.1105 -        return;
  6.1106 -
  6.1107 -    BUG_ON(HYPERVISOR_grant_table_op(
  6.1108 -        GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
  6.1109 -
  6.1110 -    mop = tx_map_ops;
  6.1111 +		if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
  6.1112 +			break;
  6.1113  #else
  6.1114 -    if ( mcl == tx_mcl )
  6.1115 -        return;
  6.1116 +		/* Filled the batch queue? */
  6.1117 +		if ((mcl - tx_mcl) == ARRAY_SIZE(tx_mcl))
  6.1118 +			break;
  6.1119 +#endif
  6.1120 +	}
  6.1121  
  6.1122 -    BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
  6.1123 -
  6.1124 -    mcl = tx_mcl;
  6.1125 -#endif
  6.1126 -    while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
  6.1127 -    {
  6.1128 -        pending_idx = *((u16 *)skb->data);
  6.1129 -        netif       = pending_tx_info[pending_idx].netif;
  6.1130 -        memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
  6.1131 -
  6.1132 -        /* Check the remap error code. */
  6.1133  #ifdef CONFIG_XEN_NETDEV_GRANT
  6.1134 -        if ( unlikely(mop->handle < 0) )
  6.1135 -        {
  6.1136 -            printk(KERN_ALERT "#### netback grant fails\n");
  6.1137 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  6.1138 -            netif_put(netif);
  6.1139 -            kfree_skb(skb);
  6.1140 -            mop++;
  6.1141 -            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
  6.1142 -            continue;
  6.1143 -        }
  6.1144 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
  6.1145 -                             FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
  6.1146 -        grant_tx_ref[pending_idx] = mop->handle;
  6.1147 +	if (mop == tx_map_ops)
  6.1148 +		return;
  6.1149 +
  6.1150 +	BUG_ON(HYPERVISOR_grant_table_op(
  6.1151 +		GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
  6.1152 +
  6.1153 +	mop = tx_map_ops;
  6.1154  #else
  6.1155 -        if ( unlikely(mcl[0].result != 0) )
  6.1156 -        {
  6.1157 -            DPRINTK("Bad page frame\n");
  6.1158 -            make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  6.1159 -            netif_put(netif);
  6.1160 -            kfree_skb(skb);
  6.1161 -            mcl++;
  6.1162 -            pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
  6.1163 -            continue;
  6.1164 -        }
  6.1165 +	if (mcl == tx_mcl)
  6.1166 +		return;
  6.1167  
  6.1168 -        phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
  6.1169 -            FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
  6.1170 +	BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
  6.1171 +
  6.1172 +	mcl = tx_mcl;
  6.1173 +#endif
  6.1174 +	while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
  6.1175 +		pending_idx = *((u16 *)skb->data);
  6.1176 +		netif       = pending_tx_info[pending_idx].netif;
  6.1177 +		memcpy(&txreq, &pending_tx_info[pending_idx].req,
  6.1178 +		       sizeof(txreq));
  6.1179 +
  6.1180 +		/* Check the remap error code. */
  6.1181 +#ifdef CONFIG_XEN_NETDEV_GRANT
  6.1182 +		if (unlikely(mop->handle < 0)) {
  6.1183 +			printk(KERN_ALERT "#### netback grant fails\n");
  6.1184 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  6.1185 +			netif_put(netif);
  6.1186 +			kfree_skb(skb);
  6.1187 +			mop++;
  6.1188 +			pending_ring[MASK_PEND_IDX(pending_prod++)] =
  6.1189 +				pending_idx;
  6.1190 +			continue;
  6.1191 +		}
  6.1192 +		phys_to_machine_mapping[
  6.1193 +			__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
  6.1194 +			FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
  6.1195 +		grant_tx_ref[pending_idx] = mop->handle;
  6.1196 +#else
  6.1197 +		if (unlikely(mcl[0].result != 0)) {
  6.1198 +			DPRINTK("Bad page frame\n");
  6.1199 +			make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
  6.1200 +			netif_put(netif);
  6.1201 +			kfree_skb(skb);
  6.1202 +			mcl++;
  6.1203 +			pending_ring[MASK_PEND_IDX(pending_prod++)] =
  6.1204 +				pending_idx;
  6.1205 +			continue;
  6.1206 +		}
  6.1207 +
  6.1208 +		phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >>
  6.1209 +				       PAGE_SHIFT] =
  6.1210 +			FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
  6.1211  #endif
  6.1212  
  6.1213 -        data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
  6.1214 +		data_len = (txreq.size > PKT_PROT_LEN) ?
  6.1215 +			PKT_PROT_LEN : txreq.size;
  6.1216  
  6.1217 -        __skb_put(skb, data_len);
  6.1218 -        memcpy(skb->data, 
  6.1219 -               (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
  6.1220 -               data_len);
  6.1221 -        if ( data_len < txreq.size )
  6.1222 -        {
  6.1223 -            /* Append the packet payload as a fragment. */
  6.1224 -            skb_shinfo(skb)->frags[0].page        = 
  6.1225 -                virt_to_page(MMAP_VADDR(pending_idx));
  6.1226 -            skb_shinfo(skb)->frags[0].size        = txreq.size - data_len;
  6.1227 -            skb_shinfo(skb)->frags[0].page_offset = 
  6.1228 -                (txreq.addr + data_len) & ~PAGE_MASK;
  6.1229 -            skb_shinfo(skb)->nr_frags = 1;
  6.1230 -        }
  6.1231 -        else
  6.1232 -        {
  6.1233 -            /* Schedule a response immediately. */
  6.1234 -            netif_idx_release(pending_idx);
  6.1235 -        }
  6.1236 +		__skb_put(skb, data_len);
  6.1237 +		memcpy(skb->data, 
  6.1238 +		       (void *)(MMAP_VADDR(pending_idx)|
  6.1239 +				(txreq.addr&~PAGE_MASK)),
  6.1240 +		       data_len);
  6.1241 +		if (data_len < txreq.size) {
  6.1242 +			/* Append the packet payload as a fragment. */
  6.1243 +			skb_shinfo(skb)->frags[0].page        = 
  6.1244 +				virt_to_page(MMAP_VADDR(pending_idx));
  6.1245 +			skb_shinfo(skb)->frags[0].size        =
  6.1246 +				txreq.size - data_len;
  6.1247 +			skb_shinfo(skb)->frags[0].page_offset = 
  6.1248 +				(txreq.addr + data_len) & ~PAGE_MASK;
  6.1249 +			skb_shinfo(skb)->nr_frags = 1;
  6.1250 +		} else {
  6.1251 +			/* Schedule a response immediately. */
  6.1252 +			netif_idx_release(pending_idx);
  6.1253 +		}
  6.1254  
  6.1255 -        skb->data_len  = txreq.size - data_len;
  6.1256 -        skb->len      += skb->data_len;
  6.1257 -
  6.1258 -        skb->dev      = netif->dev;
  6.1259 -        skb->protocol = eth_type_trans(skb, skb->dev);
  6.1260 +		skb->data_len  = txreq.size - data_len;
  6.1261 +		skb->len      += skb->data_len;
  6.1262  
  6.1263 -        /* No checking needed on localhost, but remember the field is blank. */
  6.1264 -        skb->ip_summed        = CHECKSUM_UNNECESSARY;
  6.1265 -        skb->proto_csum_valid = 1;
  6.1266 -        skb->proto_csum_blank = txreq.csum_blank;
  6.1267 +		skb->dev      = netif->dev;
  6.1268 +		skb->protocol = eth_type_trans(skb, skb->dev);
  6.1269  
  6.1270 -        netif->stats.rx_bytes += txreq.size;
  6.1271 -        netif->stats.rx_packets++;
  6.1272 +		/*
  6.1273 +                 * No checking needed on localhost, but remember the field is
  6.1274 +                 * blank. 
  6.1275 +                 */
  6.1276 +		skb->ip_summed        = CHECKSUM_UNNECESSARY;
  6.1277 +		skb->proto_csum_valid = 1;
  6.1278 +		skb->proto_csum_blank = txreq.csum_blank;
  6.1279  
  6.1280 -        netif_rx(skb);
  6.1281 -        netif->dev->last_rx = jiffies;
  6.1282 +		netif->stats.rx_bytes += txreq.size;
  6.1283 +		netif->stats.rx_packets++;
  6.1284 +
  6.1285 +		netif_rx(skb);
  6.1286 +		netif->dev->last_rx = jiffies;
  6.1287  
  6.1288  #ifdef CONFIG_XEN_NETDEV_GRANT
  6.1289 -        mop++;
  6.1290 +		mop++;
  6.1291  #else
  6.1292 -        mcl++;
  6.1293 +		mcl++;
  6.1294  #endif
  6.1295 -    }
  6.1296 +	}
  6.1297  }
  6.1298  
  6.1299  static void netif_idx_release(u16 pending_idx)
  6.1300  {
  6.1301 -    static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
  6.1302 -    unsigned long flags;
  6.1303 +	static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
  6.1304 +	unsigned long flags;
  6.1305  
  6.1306 -    spin_lock_irqsave(&_lock, flags);
  6.1307 -    dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
  6.1308 -    spin_unlock_irqrestore(&_lock, flags);
  6.1309 +	spin_lock_irqsave(&_lock, flags);
  6.1310 +	dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
  6.1311 +	spin_unlock_irqrestore(&_lock, flags);
  6.1312  
  6.1313 -    tasklet_schedule(&net_tx_tasklet);
  6.1314 +	tasklet_schedule(&net_tx_tasklet);
  6.1315  }
  6.1316  
  6.1317  static void netif_page_release(struct page *page)
  6.1318  {
  6.1319 -    u16 pending_idx = page - virt_to_page(mmap_vstart);
  6.1320 +	u16 pending_idx = page - virt_to_page(mmap_vstart);
  6.1321  
  6.1322 -    /* Ready for next use. */
  6.1323 -    set_page_count(page, 1);
  6.1324 +	/* Ready for next use. */
  6.1325 +	set_page_count(page, 1);
  6.1326  
  6.1327 -    netif_idx_release(pending_idx);
  6.1328 +	netif_idx_release(pending_idx);
  6.1329  }
  6.1330  
  6.1331  irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
  6.1332  {
  6.1333 -    netif_t *netif = dev_id;
  6.1334 -    if ( tx_work_exists(netif) )
  6.1335 -    {
  6.1336 -        add_to_net_schedule_list_tail(netif);
  6.1337 -        maybe_schedule_tx_action();
  6.1338 -    }
  6.1339 -    return IRQ_HANDLED;
  6.1340 +	netif_t *netif = dev_id;
  6.1341 +	if (tx_work_exists(netif)) {
  6.1342 +		add_to_net_schedule_list_tail(netif);
  6.1343 +		maybe_schedule_tx_action();
  6.1344 +	}
  6.1345 +	return IRQ_HANDLED;
  6.1346  }
  6.1347  
  6.1348  static void make_tx_response(netif_t *netif, 
  6.1349                               u16      id,
  6.1350                               s8       st)
  6.1351  {
  6.1352 -    NETIF_RING_IDX i = netif->tx_resp_prod;
  6.1353 -    netif_tx_response_t *resp;
  6.1354 +	NETIF_RING_IDX i = netif->tx_resp_prod;
  6.1355 +	netif_tx_response_t *resp;
  6.1356  
  6.1357 -    resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
  6.1358 -    resp->id     = id;
  6.1359 -    resp->status = st;
  6.1360 -    wmb();
  6.1361 -    netif->tx->resp_prod = netif->tx_resp_prod = ++i;
  6.1362 +	resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
  6.1363 +	resp->id     = id;
  6.1364 +	resp->status = st;
  6.1365 +	wmb();
  6.1366 +	netif->tx->resp_prod = netif->tx_resp_prod = ++i;
  6.1367  
  6.1368 -    mb(); /* Update producer before checking event threshold. */
  6.1369 -    if ( i == netif->tx->event )
  6.1370 -        notify_via_evtchn(netif->evtchn);
  6.1371 +	mb(); /* Update producer before checking event threshold. */
  6.1372 +	if (i == netif->tx->event)
  6.1373 +		notify_via_evtchn(netif->evtchn);
  6.1374  }
  6.1375  
  6.1376  static int make_rx_response(netif_t *netif, 
  6.1377 @@ -882,110 +878,120 @@ static int make_rx_response(netif_t *net
  6.1378                              u16      size,
  6.1379                              u16      csum_valid)
  6.1380  {
  6.1381 -    NETIF_RING_IDX i = netif->rx_resp_prod;
  6.1382 -    netif_rx_response_t *resp;
  6.1383 +	NETIF_RING_IDX i = netif->rx_resp_prod;
  6.1384 +	netif_rx_response_t *resp;
  6.1385  
  6.1386 -    resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
  6.1387 -    resp->addr       = addr;
  6.1388 -    resp->csum_valid = csum_valid;
  6.1389 -    resp->id         = id;
  6.1390 -    resp->status     = (s16)size;
  6.1391 -    if ( st < 0 )
  6.1392 -        resp->status = (s16)st;
  6.1393 -    wmb();
  6.1394 -    netif->rx->resp_prod = netif->rx_resp_prod = ++i;
  6.1395 +	resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
  6.1396 +	resp->addr       = addr;
  6.1397 +	resp->csum_valid = csum_valid;
  6.1398 +	resp->id         = id;
  6.1399 +	resp->status     = (s16)size;
  6.1400 +	if (st < 0)
  6.1401 +		resp->status = (s16)st;
  6.1402 +	wmb();
  6.1403 +	netif->rx->resp_prod = netif->rx_resp_prod = ++i;
  6.1404  
  6.1405 -    mb(); /* Update producer before checking event threshold. */
  6.1406 -    return (i == netif->rx->event);
  6.1407 +	mb(); /* Update producer before checking event threshold. */
  6.1408 +	return (i == netif->rx->event);
  6.1409  }
  6.1410  
  6.1411  static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
  6.1412  {
  6.1413 -    struct list_head *ent;
  6.1414 -    netif_t *netif;
  6.1415 -    int i = 0;
  6.1416 -
  6.1417 -    printk(KERN_ALERT "netif_schedule_list:\n");
  6.1418 -    spin_lock_irq(&net_schedule_list_lock);
  6.1419 +	struct list_head *ent;
  6.1420 +	netif_t *netif;
  6.1421 +	int i = 0;
  6.1422  
  6.1423 -    list_for_each ( ent, &net_schedule_list )
  6.1424 -    {
  6.1425 -        netif = list_entry(ent, netif_t, list);
  6.1426 -        printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
  6.1427 -               i, netif->rx_req_cons, netif->rx_resp_prod);               
  6.1428 -        printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
  6.1429 -               netif->tx_req_cons, netif->tx_resp_prod);
  6.1430 -        printk(KERN_ALERT "   shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
  6.1431 -               netif->rx->req_prod, netif->rx->resp_prod);
  6.1432 -        printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
  6.1433 -               netif->rx->event, netif->tx->req_prod);
  6.1434 -        printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
  6.1435 -               netif->tx->resp_prod, netif->tx->event);
  6.1436 -        i++;
  6.1437 -    }
  6.1438 +	printk(KERN_ALERT "netif_schedule_list:\n");
  6.1439 +	spin_lock_irq(&net_schedule_list_lock);
  6.1440  
  6.1441 -    spin_unlock_irq(&net_schedule_list_lock);
  6.1442 -    printk(KERN_ALERT " ** End of netif_schedule_list **\n");
  6.1443 +	list_for_each (ent, &net_schedule_list) {
  6.1444 +		netif = list_entry(ent, netif_t, list);
  6.1445 +		printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
  6.1446 +		       "rx_resp_prod=%08x\n",
  6.1447 +		       i, netif->rx_req_cons, netif->rx_resp_prod);
  6.1448 +		printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
  6.1449 +		       netif->tx_req_cons, netif->tx_resp_prod);
  6.1450 +		printk(KERN_ALERT "   shared(rx_req_prod=%08x "
  6.1451 +		       "rx_resp_prod=%08x\n",
  6.1452 +		       netif->rx->req_prod, netif->rx->resp_prod);
  6.1453 +		printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
  6.1454 +		       netif->rx->event, netif->tx->req_prod);
  6.1455 +		printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
  6.1456 +		       netif->tx->resp_prod, netif->tx->event);
  6.1457 +		i++;
  6.1458 +	}
  6.1459  
  6.1460 -    return IRQ_HANDLED;
  6.1461 +	spin_unlock_irq(&net_schedule_list_lock);
  6.1462 +	printk(KERN_ALERT " ** End of netif_schedule_list **\n");
  6.1463 +
  6.1464 +	return IRQ_HANDLED;
  6.1465  }
  6.1466  
  6.1467  static int __init netback_init(void)
  6.1468  {
  6.1469 -    int i;
  6.1470 -    struct page *page;
  6.1471 +	int i;
  6.1472 +	struct page *page;
  6.1473  
  6.1474 -    if ( !(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
  6.1475 -         !(xen_start_info->flags & SIF_INITDOMAIN) )
  6.1476 -        return 0;
  6.1477 +	if (!(xen_start_info->flags & SIF_NET_BE_DOMAIN) &&
  6.1478 +	    !(xen_start_info->flags & SIF_INITDOMAIN))
  6.1479 +		return 0;
  6.1480  
  6.1481 -    IPRINTK("Initialising Xen netif backend.\n");
  6.1482 +	IPRINTK("Initialising Xen netif backend.\n");
  6.1483  #ifdef CONFIG_XEN_NETDEV_GRANT
  6.1484 -    IPRINTK("Using grant tables.\n");
  6.1485 +	IPRINTK("Using grant tables.\n");
  6.1486  #endif
  6.1487  
  6.1488 -    /* We can increase reservation by this much in net_rx_action(). */
  6.1489 -    balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
  6.1490 -
  6.1491 -    skb_queue_head_init(&rx_queue);
  6.1492 -    skb_queue_head_init(&tx_queue);
  6.1493 -
  6.1494 -    init_timer(&net_timer);
  6.1495 -    net_timer.data = 0;
  6.1496 -    net_timer.function = net_alarm;
  6.1497 -    
  6.1498 -    page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
  6.1499 -    BUG_ON(page == NULL);
  6.1500 -    mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
  6.1501 +	/* We can increase reservation by this much in net_rx_action(). */
  6.1502 +	balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
  6.1503  
  6.1504 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
  6.1505 -    {
  6.1506 -        page = virt_to_page(MMAP_VADDR(i));
  6.1507 -        set_page_count(page, 1);
  6.1508 -        SetPageForeign(page, netif_page_release);
  6.1509 -    }
  6.1510 +	skb_queue_head_init(&rx_queue);
  6.1511 +	skb_queue_head_init(&tx_queue);
  6.1512  
  6.1513 -    pending_cons = 0;
  6.1514 -    pending_prod = MAX_PENDING_REQS;
  6.1515 -    for ( i = 0; i < MAX_PENDING_REQS; i++ )
  6.1516 -        pending_ring[i] = i;
  6.1517 +	init_timer(&net_timer);
  6.1518 +	net_timer.data = 0;
  6.1519 +	net_timer.function = net_alarm;
  6.1520 +    
  6.1521 +	page = balloon_alloc_empty_page_range(MAX_PENDING_REQS);
  6.1522 +	BUG_ON(page == NULL);
  6.1523 +	mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
  6.1524  
  6.1525 -    spin_lock_init(&net_schedule_list_lock);
  6.1526 -    INIT_LIST_HEAD(&net_schedule_list);
  6.1527 -
  6.1528 -    netif_xenbus_init();
  6.1529 +	for (i = 0; i < MAX_PENDING_REQS; i++) {
  6.1530 +		page = virt_to_page(MMAP_VADDR(i));
  6.1531 +		set_page_count(page, 1);
  6.1532 +		SetPageForeign(page, netif_page_release);
  6.1533 +	}
  6.1534  
  6.1535 -    (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
  6.1536 -                      netif_be_dbg, SA_SHIRQ, 
  6.1537 -                      "net-be-dbg", &netif_be_dbg);
  6.1538 +	pending_cons = 0;
  6.1539 +	pending_prod = MAX_PENDING_REQS;
  6.1540 +	for (i = 0; i < MAX_PENDING_REQS; i++)
  6.1541 +		pending_ring[i] = i;
  6.1542  
  6.1543 -    return 0;
  6.1544 +	spin_lock_init(&net_schedule_list_lock);
  6.1545 +	INIT_LIST_HEAD(&net_schedule_list);
  6.1546 +
  6.1547 +	netif_xenbus_init();
  6.1548 +
  6.1549 +	(void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
  6.1550 +			  netif_be_dbg, SA_SHIRQ, 
  6.1551 +			  "net-be-dbg", &netif_be_dbg);
  6.1552 +
  6.1553 +	return 0;
  6.1554  }
  6.1555  
  6.1556  static void netback_cleanup(void)
  6.1557  {
  6.1558 -    BUG();
  6.1559 +	BUG();
  6.1560  }
  6.1561  
  6.1562  module_init(netback_init);
  6.1563  module_exit(netback_cleanup);
  6.1564 +
  6.1565 +/*
  6.1566 + * Local variables:
  6.1567 + *  c-file-style: "linux"
  6.1568 + *  indent-tabs-mode: t
  6.1569 + *  c-indent-level: 8
  6.1570 + *  c-basic-offset: 8
  6.1571 + *  tab-width: 8
  6.1572 + * End:
  6.1573 + */
     7.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Fri Sep 16 18:06:42 2005 +0000
     7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c	Fri Sep 16 18:07:50 2005 +0000
     7.3 @@ -294,3 +294,13 @@ void netif_xenbus_init(void)
     7.4  {
     7.5  	xenbus_register_backend(&netback);
     7.6  }
     7.7 +
     7.8 +/*
     7.9 + * Local variables:
    7.10 + *  c-file-style: "linux"
    7.11 + *  indent-tabs-mode: t
    7.12 + *  c-indent-level: 8
    7.13 + *  c-basic-offset: 8
    7.14 + *  tab-width: 8
    7.15 + * End:
    7.16 + */
     8.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Sep 16 18:06:42 2005 +0000
     8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Sep 16 18:07:50 2005 +0000
     8.3 @@ -54,44 +54,11 @@
     8.4  #include <asm-xen/balloon.h>
     8.5  #include <asm/page.h>
     8.6  #include <asm/uaccess.h>
     8.7 -
     8.8 -#ifdef CONFIG_XEN_NETDEV_GRANT
     8.9  #include <asm-xen/xen-public/grant_table.h>
    8.10  #include <asm-xen/gnttab.h>
    8.11  
    8.12 -static grant_ref_t gref_tx_head;
    8.13 -static grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 
    8.14 -
    8.15 -static grant_ref_t gref_rx_head;
    8.16 -static grant_ref_t grant_rx_ref[NETIF_RX_RING_SIZE + 1];
    8.17 -
    8.18  #define GRANT_INVALID_REF	(0xFFFF)
    8.19  
    8.20 -#ifdef GRANT_DEBUG
    8.21 -static void
    8.22 -dump_packet(int tag, void *addr, u32 ap)
    8.23 -{
    8.24 -    unsigned char *p = (unsigned char *)ap;
    8.25 -    int i;
    8.26 -    
    8.27 -    printk(KERN_ALERT "#### rx_poll   %c %08x ", tag & 0xff, addr);
    8.28 -    for (i = 0; i < 20; i++) {
    8.29 -        printk("%02x", p[i]);
    8.30 -    }
    8.31 -    printk("\n");
    8.32 -}
    8.33 -
    8.34 -#define GDPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
    8.35 -                           __FILE__ , __LINE__ , ## _a )
    8.36 -#else 
    8.37 -#define dump_packet(x,y,z)  ((void)0)  
    8.38 -#define GDPRINTK(_f, _a...) ((void)0)
    8.39 -#endif
    8.40 -
    8.41 -#endif
    8.42 -
    8.43 -
    8.44 -
    8.45  #ifndef __GFP_NOWARN
    8.46  #define __GFP_NOWARN 0
    8.47  #endif
    8.48 @@ -124,7 +91,6 @@ dump_packet(int tag, void *addr, u32 ap)
    8.49  #define NETIF_STATE_DISCONNECTED 0
    8.50  #define NETIF_STATE_CONNECTED    1
    8.51  
    8.52 -
    8.53  static unsigned int netif_state = NETIF_STATE_DISCONNECTED;
    8.54  
    8.55  static void network_tx_buf_gc(struct net_device *dev);
    8.56 @@ -147,45 +113,50 @@ static void xennet_proc_delif(struct net
    8.57  #define netfront_info net_private
    8.58  struct net_private
    8.59  {
    8.60 -    struct list_head list;
    8.61 -    struct net_device *netdev;
    8.62 +	struct list_head list;
    8.63 +	struct net_device *netdev;
    8.64  
    8.65 -    struct net_device_stats stats;
    8.66 -    NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
    8.67 -    unsigned int tx_full;
    8.68 +	struct net_device_stats stats;
    8.69 +	NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
    8.70 +	unsigned int tx_full;
    8.71      
    8.72 -    netif_tx_interface_t *tx;
    8.73 -    netif_rx_interface_t *rx;
    8.74 +	netif_tx_interface_t *tx;
    8.75 +	netif_rx_interface_t *rx;
    8.76  
    8.77 -    spinlock_t   tx_lock;
    8.78 -    spinlock_t   rx_lock;
    8.79 +	spinlock_t   tx_lock;
    8.80 +	spinlock_t   rx_lock;
    8.81  
    8.82 -    unsigned int handle;
    8.83 -    unsigned int evtchn;
    8.84 +	unsigned int handle;
    8.85 +	unsigned int evtchn;
    8.86  
    8.87 -    /* What is the status of our connection to the remote backend? */
    8.88 +	/* What is the status of our connection to the remote backend? */
    8.89  #define BEST_CLOSED       0
    8.90  #define BEST_DISCONNECTED 1
    8.91  #define BEST_CONNECTED    2
    8.92 -    unsigned int backend_state;
    8.93 +	unsigned int backend_state;
    8.94  
    8.95 -    /* Is this interface open or closed (down or up)? */
    8.96 +	/* Is this interface open or closed (down or up)? */
    8.97  #define UST_CLOSED        0
    8.98  #define UST_OPEN          1
    8.99 -    unsigned int user_state;
   8.100 +	unsigned int user_state;
   8.101  
   8.102 -    /* Receive-ring batched refills. */
   8.103 +	/* Receive-ring batched refills. */
   8.104  #define RX_MIN_TARGET 8
   8.105  #define RX_MAX_TARGET NETIF_RX_RING_SIZE
   8.106 -    int rx_min_target, rx_max_target, rx_target;
   8.107 -    struct sk_buff_head rx_batch;
   8.108 +	int rx_min_target, rx_max_target, rx_target;
   8.109 +	struct sk_buff_head rx_batch;
   8.110  
   8.111 -    /*
   8.112 -     * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
   8.113 -     * array is an index into a chain of free entries.
   8.114 -     */
   8.115 -    struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
   8.116 -    struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
   8.117 +	/*
   8.118 +	 * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
   8.119 +	 * array is an index into a chain of free entries.
   8.120 +	 */
   8.121 +	struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
   8.122 +	struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
   8.123 +
   8.124 +	grant_ref_t gref_tx_head;
   8.125 +	grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 
   8.126 +	grant_ref_t gref_rx_head;
   8.127 +	grant_ref_t grant_rx_ref[NETIF_TX_RING_SIZE + 1]; 
   8.128  
   8.129  	struct xenbus_device *xbdev;
   8.130  	char *backend;
   8.131 @@ -197,32 +168,32 @@ struct net_private
   8.132  };
   8.133  
   8.134  /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
   8.135 -#define ADD_ID_TO_FREELIST(_list, _id)             \
   8.136 -    (_list)[(_id)] = (_list)[0];                   \
   8.137 -    (_list)[0]     = (void *)(unsigned long)(_id);
   8.138 -#define GET_ID_FROM_FREELIST(_list)                \
   8.139 - ({ unsigned long _id = (unsigned long)(_list)[0]; \
   8.140 -    (_list)[0]  = (_list)[_id];                    \
   8.141 -    (unsigned short)_id; })
   8.142 +#define ADD_ID_TO_FREELIST(_list, _id)			\
   8.143 +	(_list)[(_id)] = (_list)[0];			\
   8.144 +	(_list)[0]     = (void *)(unsigned long)(_id);
   8.145 +#define GET_ID_FROM_FREELIST(_list)				\
   8.146 +	({ unsigned long _id = (unsigned long)(_list)[0];	\
   8.147 +	   (_list)[0]  = (_list)[_id];				\
   8.148 +	   (unsigned short)_id; })
   8.149  
   8.150  #ifdef DEBUG
   8.151  static char *be_state_name[] = {
   8.152 -    [BEST_CLOSED]       = "closed",
   8.153 -    [BEST_DISCONNECTED] = "disconnected",
   8.154 -    [BEST_CONNECTED]    = "connected",
   8.155 +	[BEST_CLOSED]       = "closed",
   8.156 +	[BEST_DISCONNECTED] = "disconnected",
   8.157 +	[BEST_CONNECTED]    = "connected",
   8.158  };
   8.159  #endif
   8.160  
   8.161  #ifdef DEBUG
   8.162  #define DPRINTK(fmt, args...) \
   8.163 -    printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
   8.164 +	printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
   8.165  #else
   8.166  #define DPRINTK(fmt, args...) ((void)0)
   8.167  #endif
   8.168  #define IPRINTK(fmt, args...) \
   8.169 -    printk(KERN_INFO "xen_net: " fmt, ##args)
   8.170 +	printk(KERN_INFO "xen_net: " fmt, ##args)
   8.171  #define WPRINTK(fmt, args...) \
   8.172 -    printk(KERN_WARNING "xen_net: " fmt, ##args)
   8.173 +	printk(KERN_WARNING "xen_net: " fmt, ##args)
   8.174  
   8.175  /** Send a packet on a net device to encourage switches to learn the
   8.176   * MAC. We send a fake ARP request.
   8.177 @@ -232,625 +203,627 @@ static char *be_state_name[] = {
   8.178   */
   8.179  static int send_fake_arp(struct net_device *dev)
   8.180  {
   8.181 -    struct sk_buff *skb;
   8.182 -    u32             src_ip, dst_ip;
   8.183 -
   8.184 -    dst_ip = INADDR_BROADCAST;
   8.185 -    src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
   8.186 +	struct sk_buff *skb;
   8.187 +	u32             src_ip, dst_ip;
   8.188  
   8.189 -    /* No IP? Then nothing to do. */
   8.190 -    if (src_ip == 0)
   8.191 -        return 0;
   8.192 +	dst_ip = INADDR_BROADCAST;
   8.193 +	src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
   8.194  
   8.195 -    skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
   8.196 -                     dst_ip, dev, src_ip,
   8.197 -                     /*dst_hw*/ NULL, /*src_hw*/ NULL, 
   8.198 -                     /*target_hw*/ dev->dev_addr);
   8.199 -    if (skb == NULL)
   8.200 -        return -ENOMEM;
   8.201 +	/* No IP? Then nothing to do. */
   8.202 +	if (src_ip == 0)
   8.203 +		return 0;
   8.204  
   8.205 -    return dev_queue_xmit(skb);
   8.206 +	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
   8.207 +			 dst_ip, dev, src_ip,
   8.208 +			 /*dst_hw*/ NULL, /*src_hw*/ NULL, 
   8.209 +			 /*target_hw*/ dev->dev_addr);
   8.210 +	if (skb == NULL)
   8.211 +		return -ENOMEM;
   8.212 +
   8.213 +	return dev_queue_xmit(skb);
   8.214  }
   8.215  
   8.216  static int network_open(struct net_device *dev)
   8.217  {
   8.218 -    struct net_private *np = netdev_priv(dev);
   8.219 -
   8.220 -    memset(&np->stats, 0, sizeof(np->stats));
   8.221 -
   8.222 -    np->user_state = UST_OPEN;
   8.223 +	struct net_private *np = netdev_priv(dev);
   8.224  
   8.225 -    network_alloc_rx_buffers(dev);
   8.226 -    np->rx->event = np->rx_resp_cons + 1;
   8.227 +	memset(&np->stats, 0, sizeof(np->stats));
   8.228  
   8.229 -    netif_start_queue(dev);
   8.230 +	np->user_state = UST_OPEN;
   8.231  
   8.232 -    return 0;
   8.233 +	network_alloc_rx_buffers(dev);
   8.234 +	np->rx->event = np->rx_resp_cons + 1;
   8.235 +
   8.236 +	netif_start_queue(dev);
   8.237 +
   8.238 +	return 0;
   8.239  }
   8.240  
   8.241  static void network_tx_buf_gc(struct net_device *dev)
   8.242  {
   8.243 -    NETIF_RING_IDX i, prod;
   8.244 -    unsigned short id;
   8.245 -    struct net_private *np = netdev_priv(dev);
   8.246 -    struct sk_buff *skb;
   8.247 +	NETIF_RING_IDX i, prod;
   8.248 +	unsigned short id;
   8.249 +	struct net_private *np = netdev_priv(dev);
   8.250 +	struct sk_buff *skb;
   8.251  
   8.252 -    if (np->backend_state != BEST_CONNECTED)
   8.253 -        return;
   8.254 +	if (np->backend_state != BEST_CONNECTED)
   8.255 +		return;
   8.256  
   8.257 -    do {
   8.258 -        prod = np->tx->resp_prod;
   8.259 -        rmb(); /* Ensure we see responses up to 'rp'. */
   8.260 +	do {
   8.261 +		prod = np->tx->resp_prod;
   8.262 +		rmb(); /* Ensure we see responses up to 'rp'. */
   8.263  
   8.264 -        for (i = np->tx_resp_cons; i != prod; i++) {
   8.265 -            id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
   8.266 -            skb = np->tx_skbs[id];
   8.267 +		for (i = np->tx_resp_cons; i != prod; i++) {
   8.268 +			id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
   8.269 +			skb = np->tx_skbs[id];
   8.270  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.271 -            if (unlikely(gnttab_query_foreign_access(grant_tx_ref[id]) != 0)) {
   8.272 -                /* other domain is still using this grant - shouldn't happen
   8.273 -                   but if it does, we'll try to reclaim the grant later */
   8.274 -                printk(KERN_ALERT "network_tx_buf_gc: warning -- grant "
   8.275 -                       "still in use by backend domain.\n");
   8.276 -                goto out; 
   8.277 -            }
   8.278 -            gnttab_end_foreign_access_ref(grant_tx_ref[id], GNTMAP_readonly);
   8.279 -            gnttab_release_grant_reference(&gref_tx_head, grant_tx_ref[id]);
   8.280 -            grant_tx_ref[id] = GRANT_INVALID_REF;
   8.281 +			if (unlikely(gnttab_query_foreign_access(np->grant_tx_ref[id]) != 0)) {
   8.282 +				printk(KERN_ALERT "network_tx_buf_gc: warning "
   8.283 +				       "-- grant still in use by backend "
   8.284 +				       "domain.\n");
   8.285 +				goto out; 
   8.286 +			}
   8.287 +			gnttab_end_foreign_access_ref(
   8.288 +				np->grant_tx_ref[id], GNTMAP_readonly);
   8.289 +			gnttab_release_grant_reference(
   8.290 +				&np->gref_tx_head, np->grant_tx_ref[id]);
   8.291 +			np->grant_tx_ref[id] = GRANT_INVALID_REF;
   8.292  #endif
   8.293 -            ADD_ID_TO_FREELIST(np->tx_skbs, id);
   8.294 -            dev_kfree_skb_irq(skb);
   8.295 -        }
   8.296 -        
   8.297 -        np->tx_resp_cons = prod;
   8.298 +			ADD_ID_TO_FREELIST(np->tx_skbs, id);
   8.299 +			dev_kfree_skb_irq(skb);
   8.300 +		}
   8.301          
   8.302 -        /*
   8.303 -         * Set a new event, then check for race with update of tx_cons. Note
   8.304 -         * that it is essential to schedule a callback, no matter how few
   8.305 -         * buffers are pending. Even if there is space in the transmit ring,
   8.306 -         * higher layers may be blocked because too much data is outstanding:
   8.307 -         * in such cases notification from Xen is likely to be the only kick
   8.308 -         * that we'll get.
   8.309 -         */
   8.310 -        np->tx->event = 
   8.311 -            prod + ((np->tx->req_prod - prod) >> 1) + 1;
   8.312 -        mb();
   8.313 -    } while (prod != np->tx->resp_prod);
   8.314 +		np->tx_resp_cons = prod;
   8.315 +        
   8.316 +		/*
   8.317 +		 * Set a new event, then check for race with update of tx_cons.
   8.318 +		 * Note that it is essential to schedule a callback, no matter
   8.319 +		 * how few buffers are pending. Even if there is space in the
   8.320 +		 * transmit ring, higher layers may be blocked because too much
   8.321 +		 * data is outstanding: in such cases notification from Xen is
   8.322 +		 * likely to be the only kick that we'll get.
   8.323 +		 */
   8.324 +		np->tx->event = prod + ((np->tx->req_prod - prod) >> 1) + 1;
   8.325 +		mb();
   8.326 +	} while (prod != np->tx->resp_prod);
   8.327  
   8.328  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.329 -  out: 
   8.330 + out: 
   8.331  #endif
   8.332  
   8.333 -    if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
   8.334 -        np->tx_full = 0;
   8.335 -        if (np->user_state == UST_OPEN)
   8.336 -            netif_wake_queue(dev);
   8.337 -    }
   8.338 +	if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
   8.339 +		np->tx_full = 0;
   8.340 +		if (np->user_state == UST_OPEN)
   8.341 +			netif_wake_queue(dev);
   8.342 +	}
   8.343  }
   8.344  
   8.345  
   8.346  static void network_alloc_rx_buffers(struct net_device *dev)
   8.347  {
   8.348 -    unsigned short id;
   8.349 -    struct net_private *np = netdev_priv(dev);
   8.350 -    struct sk_buff *skb;
   8.351 -    int i, batch_target;
   8.352 -    NETIF_RING_IDX req_prod = np->rx->req_prod;
   8.353 -    struct xen_memory_reservation reservation;
   8.354 +	unsigned short id;
   8.355 +	struct net_private *np = netdev_priv(dev);
   8.356 +	struct sk_buff *skb;
   8.357 +	int i, batch_target;
   8.358 +	NETIF_RING_IDX req_prod = np->rx->req_prod;
   8.359 +	struct xen_memory_reservation reservation;
   8.360  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.361 -    grant_ref_t ref;
   8.362 +	grant_ref_t ref;
   8.363  #endif
   8.364  
   8.365 -    if (unlikely(np->backend_state != BEST_CONNECTED))
   8.366 -        return;
   8.367 -
   8.368 -    /*
   8.369 -     * Allocate skbuffs greedily, even though we batch updates to the
   8.370 -     * receive ring. This creates a less bursty demand on the memory allocator,
   8.371 -     * so should reduce the chance of failed allocation requests both for
   8.372 -     * ourself and for other kernel subsystems.
   8.373 -     */
   8.374 -    batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
   8.375 -    for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
   8.376 -        if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
   8.377 -            break;
   8.378 -        __skb_queue_tail(&np->rx_batch, skb);
   8.379 -    }
   8.380 -
   8.381 -    /* Is the batch large enough to be worthwhile? */
   8.382 -    if (i < (np->rx_target/2))
   8.383 -        return;
   8.384 +	if (unlikely(np->backend_state != BEST_CONNECTED))
   8.385 +		return;
   8.386  
   8.387 -    for (i = 0; ; i++) {
   8.388 -        if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
   8.389 -            break;
   8.390 +	/*
   8.391 +	 * Allocate skbuffs greedily, even though we batch updates to the
   8.392 +	 * receive ring. This creates a less bursty demand on the memory
   8.393 +	 * allocator, so should reduce the chance of failed allocation requests
   8.394 +	 *  both for ourself and for other kernel subsystems.
   8.395 +	 */
   8.396 +	batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
   8.397 +	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
   8.398 +		skb = alloc_xen_skb(dev->mtu + RX_HEADROOM);
   8.399 +		if (skb == NULL)
   8.400 +			break;
   8.401 +		__skb_queue_tail(&np->rx_batch, skb);
   8.402 +	}
   8.403  
   8.404 -        skb->dev = dev;
   8.405 +	/* Is the batch large enough to be worthwhile? */
   8.406 +	if (i < (np->rx_target/2))
   8.407 +		return;
   8.408  
   8.409 -        id = GET_ID_FROM_FREELIST(np->rx_skbs);
   8.410 +	for (i = 0; ; i++) {
   8.411 +		if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
   8.412 +			break;
   8.413  
   8.414 -        np->rx_skbs[id] = skb;
   8.415 +		skb->dev = dev;
   8.416 +
   8.417 +		id = GET_ID_FROM_FREELIST(np->rx_skbs);
   8.418 +
   8.419 +		np->rx_skbs[id] = skb;
   8.420          
   8.421 -        np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
   8.422 +		np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
   8.423  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.424 -	ref = gnttab_claim_grant_reference(&gref_rx_head);
   8.425 -        if (unlikely((signed short)ref < 0)) {
   8.426 -            printk(KERN_ALERT "#### netfront can't claim rx reference\n");
   8.427 -            BUG();
   8.428 -        }
   8.429 -        grant_rx_ref[id] = ref;
   8.430 -        gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
   8.431 -        np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
   8.432 +		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
   8.433 +		BUG_ON((signed short)ref < 0);
   8.434 +		np->grant_rx_ref[id] = ref;
   8.435 +		gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
   8.436 +		np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
   8.437  #endif
   8.438 -        rx_pfn_array[i] = virt_to_mfn(skb->head);
   8.439 -
   8.440 -	/* Remove this page from pseudo phys map before passing back to Xen. */
   8.441 -	phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 
   8.442 -	    = INVALID_P2M_ENTRY;
   8.443 -
   8.444 -	MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
   8.445 -				__pte(0), 0);
   8.446 -    }
   8.447 -
   8.448 -    /* After all PTEs have been zapped we blow away stale TLB entries. */
   8.449 -    rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   8.450 -
   8.451 -    /* Give away a batch of pages. */
   8.452 -    rx_mcl[i].op = __HYPERVISOR_memory_op;
   8.453 -    rx_mcl[i].args[0] = XENMEM_decrease_reservation;
   8.454 -    rx_mcl[i].args[1] = (unsigned long)&reservation;
   8.455 +		rx_pfn_array[i] = virt_to_mfn(skb->head);
   8.456  
   8.457 -    reservation.extent_start = rx_pfn_array;
   8.458 -    reservation.nr_extents   = i;
   8.459 -    reservation.extent_order = 0;
   8.460 -    reservation.address_bits = 0;
   8.461 -    reservation.domid        = DOMID_SELF;
   8.462 -
   8.463 -    /* Tell the ballon driver what is going on. */
   8.464 -    balloon_update_driver_allowance(i);
   8.465 +		/* Remove this page from map before passing back to Xen. */
   8.466 +		phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 
   8.467 +			= INVALID_P2M_ENTRY;
   8.468  
   8.469 -    /* Zap PTEs and give away pages in one big multicall. */
   8.470 -    (void)HYPERVISOR_multicall(rx_mcl, i+1);
   8.471 -
   8.472 -    /* Check return status of HYPERVISOR_memory_op(). */
   8.473 -    if (unlikely(rx_mcl[i].result != i))
   8.474 -        panic("Unable to reduce memory reservation\n");
   8.475 +		MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
   8.476 +					__pte(0), 0);
   8.477 +	}
   8.478  
   8.479 -    /* Above is a suitable barrier to ensure backend will see requests. */
   8.480 -    np->rx->req_prod = req_prod + i;
   8.481 +	/* After all PTEs have been zapped we blow away stale TLB entries. */
   8.482 +	rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
   8.483  
   8.484 -    /* Adjust our floating fill target if we risked running out of buffers. */
   8.485 -    if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   8.486 -         ((np->rx_target *= 2) > np->rx_max_target))
   8.487 -        np->rx_target = np->rx_max_target;
   8.488 +	/* Give away a batch of pages. */
   8.489 +	rx_mcl[i].op = __HYPERVISOR_memory_op;
   8.490 +	rx_mcl[i].args[0] = XENMEM_decrease_reservation;
   8.491 +	rx_mcl[i].args[1] = (unsigned long)&reservation;
   8.492 +
   8.493 +	reservation.extent_start = rx_pfn_array;
   8.494 +	reservation.nr_extents   = i;
   8.495 +	reservation.extent_order = 0;
   8.496 +	reservation.address_bits = 0;
   8.497 +	reservation.domid        = DOMID_SELF;
   8.498 +
   8.499 +	/* Tell the ballon driver what is going on. */
   8.500 +	balloon_update_driver_allowance(i);
   8.501 +
   8.502 +	/* Zap PTEs and give away pages in one big multicall. */
   8.503 +	(void)HYPERVISOR_multicall(rx_mcl, i+1);
   8.504 +
   8.505 +	/* Check return status of HYPERVISOR_memory_op(). */
   8.506 +	if (unlikely(rx_mcl[i].result != i))
   8.507 +		panic("Unable to reduce memory reservation\n");
   8.508 +
   8.509 +	/* Above is a suitable barrier to ensure backend will see requests. */
   8.510 +	np->rx->req_prod = req_prod + i;
   8.511 +
   8.512 +	/* Adjust our fill target if we risked running out of buffers. */
   8.513 +	if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
   8.514 +	    ((np->rx_target *= 2) > np->rx_max_target))
   8.515 +		np->rx_target = np->rx_max_target;
   8.516  }
   8.517  
   8.518  
   8.519  static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
   8.520  {
   8.521 -    unsigned short id;
   8.522 -    struct net_private *np = netdev_priv(dev);
   8.523 -    netif_tx_request_t *tx;
   8.524 -    NETIF_RING_IDX i;
   8.525 +	unsigned short id;
   8.526 +	struct net_private *np = netdev_priv(dev);
   8.527 +	netif_tx_request_t *tx;
   8.528 +	NETIF_RING_IDX i;
   8.529  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.530 -    grant_ref_t ref;
   8.531 -    unsigned long mfn;
   8.532 +	grant_ref_t ref;
   8.533 +	unsigned long mfn;
   8.534  #endif
   8.535  
   8.536 -    if (unlikely(np->tx_full)) {
   8.537 -        printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
   8.538 -        netif_stop_queue(dev);
   8.539 -        goto drop;
   8.540 -    }
   8.541 +	if (unlikely(np->tx_full)) {
   8.542 +		printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
   8.543 +		       dev->name);
   8.544 +		netif_stop_queue(dev);
   8.545 +		goto drop;
   8.546 +	}
   8.547  
   8.548 -    if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   8.549 -                  PAGE_SIZE)) {
   8.550 -        struct sk_buff *nskb;
   8.551 -        if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
   8.552 -            goto drop;
   8.553 -        skb_put(nskb, skb->len);
   8.554 -        memcpy(nskb->data, skb->data, skb->len);
   8.555 -        nskb->dev = skb->dev;
   8.556 -        dev_kfree_skb(skb);
   8.557 -        skb = nskb;
   8.558 -    }
   8.559 +	if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
   8.560 +		     PAGE_SIZE)) {
   8.561 +		struct sk_buff *nskb;
   8.562 +		if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
   8.563 +			goto drop;
   8.564 +		skb_put(nskb, skb->len);
   8.565 +		memcpy(nskb->data, skb->data, skb->len);
   8.566 +		nskb->dev = skb->dev;
   8.567 +		dev_kfree_skb(skb);
   8.568 +		skb = nskb;
   8.569 +	}
   8.570      
   8.571 -    spin_lock_irq(&np->tx_lock);
   8.572 -
   8.573 -    if (np->backend_state != BEST_CONNECTED) {
   8.574 -        spin_unlock_irq(&np->tx_lock);
   8.575 -        goto drop;
   8.576 -    }
   8.577 +	spin_lock_irq(&np->tx_lock);
   8.578  
   8.579 -    i = np->tx->req_prod;
   8.580 +	if (np->backend_state != BEST_CONNECTED) {
   8.581 +		spin_unlock_irq(&np->tx_lock);
   8.582 +		goto drop;
   8.583 +	}
   8.584  
   8.585 -    id = GET_ID_FROM_FREELIST(np->tx_skbs);
   8.586 -    np->tx_skbs[id] = skb;
   8.587 +	i = np->tx->req_prod;
   8.588  
   8.589 -    tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
   8.590 +	id = GET_ID_FROM_FREELIST(np->tx_skbs);
   8.591 +	np->tx_skbs[id] = skb;
   8.592  
   8.593 -    tx->id   = id;
   8.594 +	tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
   8.595 +
   8.596 +	tx->id   = id;
   8.597  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.598 -    ref = gnttab_claim_grant_reference(&gref_tx_head);
   8.599 -    if (unlikely((signed short)ref < 0)) {
   8.600 -        printk(KERN_ALERT "#### netfront can't claim tx grant reference\n");
   8.601 -        BUG();
   8.602 -    }
   8.603 -    mfn = virt_to_mfn(skb->data);
   8.604 -    gnttab_grant_foreign_access_ref(ref, np->backend_id, mfn, GNTMAP_readonly);
   8.605 -    tx->addr = ref << PAGE_SHIFT;
   8.606 -    grant_tx_ref[id] = ref;
   8.607 +	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
   8.608 +	BUG_ON((signed short)ref < 0);
   8.609 +	mfn = virt_to_mfn(skb->data);
   8.610 +	gnttab_grant_foreign_access_ref(
   8.611 +		ref, np->backend_id, mfn, GNTMAP_readonly);
   8.612 +	tx->addr = ref << PAGE_SHIFT;
   8.613 +	np->grant_tx_ref[id] = ref;
   8.614  #else
   8.615 -    tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
   8.616 +	tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
   8.617  #endif
   8.618 -    tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
   8.619 -    tx->size = skb->len;
   8.620 -    tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
   8.621 +	tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
   8.622 +	tx->size = skb->len;
   8.623 +	tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
   8.624  
   8.625 -    wmb(); /* Ensure that backend will see the request. */
   8.626 -    np->tx->req_prod = i + 1;
   8.627 -
   8.628 -    network_tx_buf_gc(dev);
   8.629 -
   8.630 -    if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
   8.631 -        np->tx_full = 1;
   8.632 -        netif_stop_queue(dev);
   8.633 -    }
   8.634 +	wmb(); /* Ensure that backend will see the request. */
   8.635 +	np->tx->req_prod = i + 1;
   8.636  
   8.637 -    spin_unlock_irq(&np->tx_lock);
   8.638 -
   8.639 -    np->stats.tx_bytes += skb->len;
   8.640 -    np->stats.tx_packets++;
   8.641 +	network_tx_buf_gc(dev);
   8.642  
   8.643 -    /* Only notify Xen if we really have to. */
   8.644 -    mb();
   8.645 -    if (np->tx->TX_TEST_IDX == i)
   8.646 -        notify_via_evtchn(np->evtchn);
   8.647 +	if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
   8.648 +		np->tx_full = 1;
   8.649 +		netif_stop_queue(dev);
   8.650 +	}
   8.651  
   8.652 -    return 0;
   8.653 +	spin_unlock_irq(&np->tx_lock);
   8.654 +
   8.655 +	np->stats.tx_bytes += skb->len;
   8.656 +	np->stats.tx_packets++;
   8.657 +
   8.658 +	/* Only notify Xen if we really have to. */
   8.659 +	mb();
   8.660 +	if (np->tx->TX_TEST_IDX == i)
   8.661 +		notify_via_evtchn(np->evtchn);
   8.662 +
   8.663 +	return 0;
   8.664  
   8.665   drop:
   8.666 -    np->stats.tx_dropped++;
   8.667 -    dev_kfree_skb(skb);
   8.668 -    return 0;
   8.669 +	np->stats.tx_dropped++;
   8.670 +	dev_kfree_skb(skb);
   8.671 +	return 0;
   8.672  }
   8.673  
   8.674  static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
   8.675  {
   8.676 -    struct net_device *dev = dev_id;
   8.677 -    struct net_private *np = netdev_priv(dev);
   8.678 -    unsigned long flags;
   8.679 +	struct net_device *dev = dev_id;
   8.680 +	struct net_private *np = netdev_priv(dev);
   8.681 +	unsigned long flags;
   8.682  
   8.683 -    spin_lock_irqsave(&np->tx_lock, flags);
   8.684 -    network_tx_buf_gc(dev);
   8.685 -    spin_unlock_irqrestore(&np->tx_lock, flags);
   8.686 +	spin_lock_irqsave(&np->tx_lock, flags);
   8.687 +	network_tx_buf_gc(dev);
   8.688 +	spin_unlock_irqrestore(&np->tx_lock, flags);
   8.689  
   8.690 -    if((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN))
   8.691 -        netif_rx_schedule(dev);
   8.692 +	if ((np->rx_resp_cons != np->rx->resp_prod) &&
   8.693 +	    (np->user_state == UST_OPEN))
   8.694 +		netif_rx_schedule(dev);
   8.695  
   8.696 -    return IRQ_HANDLED;
   8.697 +	return IRQ_HANDLED;
   8.698  }
   8.699  
   8.700  
   8.701  static int netif_poll(struct net_device *dev, int *pbudget)
   8.702  {
   8.703 -    struct net_private *np = netdev_priv(dev);
   8.704 -    struct sk_buff *skb, *nskb;
   8.705 -    netif_rx_response_t *rx;
   8.706 -    NETIF_RING_IDX i, rp;
   8.707 -    mmu_update_t *mmu = rx_mmu;
   8.708 -    multicall_entry_t *mcl = rx_mcl;
   8.709 -    int work_done, budget, more_to_do = 1;
   8.710 -    struct sk_buff_head rxq;
   8.711 -    unsigned long flags;
   8.712 +	struct net_private *np = netdev_priv(dev);
   8.713 +	struct sk_buff *skb, *nskb;
   8.714 +	netif_rx_response_t *rx;
   8.715 +	NETIF_RING_IDX i, rp;
   8.716 +	mmu_update_t *mmu = rx_mmu;
   8.717 +	multicall_entry_t *mcl = rx_mcl;
   8.718 +	int work_done, budget, more_to_do = 1;
   8.719 +	struct sk_buff_head rxq;
   8.720 +	unsigned long flags;
   8.721  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.722 -    unsigned long mfn;
   8.723 -    grant_ref_t ref;
   8.724 +	unsigned long mfn;
   8.725 +	grant_ref_t ref;
   8.726  #endif
   8.727  
   8.728 -    spin_lock(&np->rx_lock);
   8.729 -
   8.730 -    if (np->backend_state != BEST_CONNECTED) {
   8.731 -        spin_unlock(&np->rx_lock);
   8.732 -        return 0;
   8.733 -    }
   8.734 -
   8.735 -    skb_queue_head_init(&rxq);
   8.736 -
   8.737 -    if ((budget = *pbudget) > dev->quota)
   8.738 -        budget = dev->quota;
   8.739 -    rp = np->rx->resp_prod;
   8.740 -    rmb(); /* Ensure we see queued responses up to 'rp'. */
   8.741 +	spin_lock(&np->rx_lock);
   8.742  
   8.743 -    for (i = np->rx_resp_cons, work_done = 0; 
   8.744 -		    (i != rp) && (work_done < budget);
   8.745 -		    i++, work_done++) {
   8.746 -        rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
   8.747 -        /*
   8.748 -         * An error here is very odd. Usually indicates a backend bug,
   8.749 -         * low-memory condition, or that we didn't have reservation headroom.
   8.750 -         */
   8.751 -        if (unlikely(rx->status <= 0)) {
   8.752 -            if (net_ratelimit())
   8.753 -                printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
   8.754 -            np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
   8.755 -            wmb();
   8.756 -            np->rx->req_prod++;
   8.757 -            work_done--;
   8.758 -            continue;
   8.759 -        }
   8.760 +	if (np->backend_state != BEST_CONNECTED) {
   8.761 +		spin_unlock(&np->rx_lock);
   8.762 +		return 0;
   8.763 +	}
   8.764 +
   8.765 +	skb_queue_head_init(&rxq);
   8.766 +
   8.767 +	if ((budget = *pbudget) > dev->quota)
   8.768 +		budget = dev->quota;
   8.769 +	rp = np->rx->resp_prod;
   8.770 +	rmb(); /* Ensure we see queued responses up to 'rp'. */
   8.771 +
   8.772 +	for (i = np->rx_resp_cons, work_done = 0; 
   8.773 +	     (i != rp) && (work_done < budget);
   8.774 +	     i++, work_done++) {
   8.775 +		rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
   8.776 +		/*
   8.777 +		 * An error here is very odd. Usually indicates a backend bug,
   8.778 +		 * low-mem condition, or we didn't have reservation headroom.
   8.779 +		 */
   8.780 +		if (unlikely(rx->status <= 0)) {
   8.781 +			if (net_ratelimit())
   8.782 +				printk(KERN_WARNING "Bad rx buffer "
   8.783 +				       "(memory squeeze?).\n");
   8.784 +			np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
   8.785 +				req.id = rx->id;
   8.786 +			wmb();
   8.787 +			np->rx->req_prod++;
   8.788 +			work_done--;
   8.789 +			continue;
   8.790 +		}
   8.791  
   8.792  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.793 -        ref = grant_rx_ref[rx->id]; 
   8.794 +		ref = np->grant_rx_ref[rx->id]; 
   8.795  
   8.796 -        if(ref == GRANT_INVALID_REF) { 
   8.797 -            printk(KERN_WARNING "Bad rx grant reference %d from dom %d.\n",
   8.798 -                   ref, np->backend_id);
   8.799 -            np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
   8.800 -            wmb();
   8.801 -            np->rx->req_prod++;
   8.802 -            work_done--;
   8.803 -            continue;
   8.804 -        }
   8.805 +		if(ref == GRANT_INVALID_REF) { 
   8.806 +			printk(KERN_WARNING "Bad rx grant reference %d "
   8.807 +			       "from dom %d.\n",
   8.808 +			       ref, np->backend_id);
   8.809 +			np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
   8.810 +				req.id = rx->id;
   8.811 +			wmb();
   8.812 +			np->rx->req_prod++;
   8.813 +			work_done--;
   8.814 +			continue;
   8.815 +		}
   8.816  
   8.817 -        grant_rx_ref[rx->id] = GRANT_INVALID_REF;
   8.818 -        mfn = gnttab_end_foreign_transfer_ref(ref);
   8.819 -        gnttab_release_grant_reference(&gref_rx_head, ref);
   8.820 +		np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
   8.821 +		mfn = gnttab_end_foreign_transfer_ref(ref);
   8.822 +		gnttab_release_grant_reference(&np->gref_rx_head, ref);
   8.823  #endif
   8.824  
   8.825 -        skb = np->rx_skbs[rx->id];
   8.826 -        ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
   8.827 -
   8.828 -        /* NB. We handle skb overflow later. */
   8.829 -#ifdef CONFIG_XEN_NETDEV_GRANT
   8.830 -        skb->data = skb->head + rx->addr;
   8.831 -#else
   8.832 -        skb->data = skb->head + (rx->addr & ~PAGE_MASK);
   8.833 -#endif
   8.834 -        skb->len  = rx->status;
   8.835 -        skb->tail = skb->data + skb->len;
   8.836 -
   8.837 -        if ( rx->csum_valid )
   8.838 -            skb->ip_summed = CHECKSUM_UNNECESSARY;
   8.839 +		skb = np->rx_skbs[rx->id];
   8.840 +		ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
   8.841  
   8.842 -        np->stats.rx_packets++;
   8.843 -        np->stats.rx_bytes += rx->status;
   8.844 -
   8.845 -        /* Remap the page. */
   8.846 +		/* NB. We handle skb overflow later. */
   8.847  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.848 -        mmu->ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   8.849 +		skb->data = skb->head + rx->addr;
   8.850  #else
   8.851 -        mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
   8.852 +		skb->data = skb->head + (rx->addr & ~PAGE_MASK);
   8.853  #endif
   8.854 -        mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
   8.855 -        mmu++;
   8.856 +		skb->len  = rx->status;
   8.857 +		skb->tail = skb->data + skb->len;
   8.858 +
   8.859 +		if ( rx->csum_valid )
   8.860 +			skb->ip_summed = CHECKSUM_UNNECESSARY;
   8.861 +
   8.862 +		np->stats.rx_packets++;
   8.863 +		np->stats.rx_bytes += rx->status;
   8.864 +
   8.865 +		/* Remap the page. */
   8.866  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.867 -	MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
   8.868 -				pfn_pte_ma(mfn, PAGE_KERNEL), 0);
   8.869 +		mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
   8.870  #else
   8.871 -	MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
   8.872 -				pfn_pte_ma(rx->addr >> PAGE_SHIFT, 
   8.873 -                                           PAGE_KERNEL), 0);
   8.874 +		mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
   8.875  #endif
   8.876 -        mcl++;
   8.877 +		mmu->val  = __pa(skb->head) >> PAGE_SHIFT;
   8.878 +		mmu++;
   8.879 +#ifdef CONFIG_XEN_NETDEV_GRANT
   8.880 +		MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
   8.881 +					pfn_pte_ma(mfn, PAGE_KERNEL), 0);
   8.882 +#else
   8.883 +		MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
   8.884 +					pfn_pte_ma(rx->addr >> PAGE_SHIFT, 
   8.885 +						   PAGE_KERNEL), 0);
   8.886 +#endif
   8.887 +		mcl++;
   8.888  
   8.889  #ifdef CONFIG_XEN_NETDEV_GRANT
   8.890 -        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
   8.891 -        GDPRINTK("#### rx_poll     enqueue vdata=%p mfn=%lu ref=%x\n",
   8.892 -                skb->data, mfn, ref);
   8.893 +		phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
   8.894  #else
   8.895 -        phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
   8.896 -            rx->addr >> PAGE_SHIFT;
   8.897 +		phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 
   8.898 +			rx->addr >> PAGE_SHIFT;
   8.899  #endif 
   8.900  
   8.901  
   8.902 -        __skb_queue_tail(&rxq, skb);
   8.903 -    }
   8.904 -
   8.905 -
   8.906 -    /* Some pages are no longer absent... */
   8.907 -    balloon_update_driver_allowance(-work_done);
   8.908 -
   8.909 -    /* Do all the remapping work, and M->P updates, in one big hypercall. */
   8.910 -    if (likely((mcl - rx_mcl) != 0)) {
   8.911 -        mcl->op = __HYPERVISOR_mmu_update;
   8.912 -        mcl->args[0] = (unsigned long)rx_mmu;
   8.913 -        mcl->args[1] = mmu - rx_mmu;
   8.914 -        mcl->args[2] = 0;
   8.915 -        mcl->args[3] = DOMID_SELF;
   8.916 -        mcl++;
   8.917 -        (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
   8.918 -    }
   8.919 -
   8.920 -    while ((skb = __skb_dequeue(&rxq)) != NULL) {
   8.921 -#ifdef CONFIG_XEN_NETDEV_GRANT
   8.922 -        GDPRINTK("#### rx_poll     dequeue vdata=%p mfn=%lu\n",
   8.923 -                skb->data, virt_to_mfn(skb->data));
   8.924 -        dump_packet('d', skb->data, (unsigned long)skb->data);
   8.925 -#endif
   8.926 -        /*
   8.927 -         * Enough room in skbuff for the data we were passed? Also, Linux 
   8.928 -         * expects at least 16 bytes headroom in each receive buffer.
   8.929 -         */
   8.930 -        if (unlikely(skb->tail > skb->end) || 
   8.931 -			unlikely((skb->data - skb->head) < 16)) {
   8.932 -            nskb = NULL;
   8.933 -
   8.934 -
   8.935 -            /* Only copy the packet if it fits in the current MTU. */
   8.936 -            if (skb->len <= (dev->mtu + ETH_HLEN)) {
   8.937 -                if ((skb->tail > skb->end) && net_ratelimit())
   8.938 -                    printk(KERN_INFO "Received packet needs %zd bytes more "
   8.939 -                           "headroom.\n", skb->tail - skb->end);
   8.940 -
   8.941 -                if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
   8.942 -                    skb_reserve(nskb, 2);
   8.943 -                    skb_put(nskb, skb->len);
   8.944 -                    memcpy(nskb->data, skb->data, skb->len);
   8.945 -                    nskb->dev = skb->dev;
   8.946 -                }
   8.947 -            }
   8.948 -            else if (net_ratelimit())
   8.949 -                printk(KERN_INFO "Received packet too big for MTU "
   8.950 -                       "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
   8.951 +		__skb_queue_tail(&rxq, skb);
   8.952 +	}
   8.953  
   8.954 -            /* Reinitialise and then destroy the old skbuff. */
   8.955 -            skb->len  = 0;
   8.956 -            skb->tail = skb->data;
   8.957 -            init_skb_shinfo(skb);
   8.958 -            dev_kfree_skb(skb);
   8.959 -
   8.960 -            /* Switch old for new, if we copied the buffer. */
   8.961 -            if ((skb = nskb) == NULL)
   8.962 -                continue;
   8.963 -        }
   8.964 -        
   8.965 -        /* Set the shared-info area, which is hidden behind the real data. */
   8.966 -        init_skb_shinfo(skb);
   8.967 -        /* Ethernet-specific work. Delayed to here as it peeks the header. */
   8.968 -        skb->protocol = eth_type_trans(skb, dev);
   8.969 -
   8.970 -        /* Pass it up. */
   8.971 -        netif_receive_skb(skb);
   8.972 -        dev->last_rx = jiffies;
   8.973 -    }
   8.974 -
   8.975 -    np->rx_resp_cons = i;
   8.976 +	/* Some pages are no longer absent... */
   8.977 +	balloon_update_driver_allowance(-work_done);
   8.978  
   8.979 -    /* If we get a callback with very few responses, reduce fill target. */
   8.980 -    /* NB. Note exponential increase, linear decrease. */
   8.981 -    if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
   8.982 -         (--np->rx_target < np->rx_min_target))
   8.983 -        np->rx_target = np->rx_min_target;
   8.984 -
   8.985 -    network_alloc_rx_buffers(dev);
   8.986 -
   8.987 -    *pbudget   -= work_done;
   8.988 -    dev->quota -= work_done;
   8.989 -
   8.990 -    if (work_done < budget) {
   8.991 -        local_irq_save(flags);
   8.992 +	/* Do all the remapping work, and M2P updates, in one big hypercall. */
   8.993 +	if (likely((mcl - rx_mcl) != 0)) {
   8.994 +		mcl->op = __HYPERVISOR_mmu_update;
   8.995 +		mcl->args[0] = (unsigned long)rx_mmu;
   8.996 +		mcl->args[1] = mmu - rx_mmu;
   8.997 +		mcl->args[2] = 0;
   8.998 +		mcl->args[3] = DOMID_SELF;
   8.999 +		mcl++;
  8.1000 +		(void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
  8.1001 +	}
  8.1002  
  8.1003 -        np->rx->event = i + 1;
  8.1004 +	while ((skb = __skb_dequeue(&rxq)) != NULL) {
  8.1005 +		/*
  8.1006 +		 * Enough room in skbuff for the data we were passed? Also,
  8.1007 +		 * Linux expects at least 16 bytes headroom in each rx buffer.
  8.1008 +		 */
  8.1009 +		if (unlikely(skb->tail > skb->end) || 
  8.1010 +		    unlikely((skb->data - skb->head) < 16)) {
  8.1011 +			nskb = NULL;
  8.1012 +
  8.1013 +			/* Only copy the packet if it fits in the MTU. */
  8.1014 +			if (skb->len <= (dev->mtu + ETH_HLEN)) {
  8.1015 +				if ((skb->tail > skb->end) && net_ratelimit())
  8.1016 +					printk(KERN_INFO "Received packet "
  8.1017 +					       "needs %zd bytes more "
  8.1018 +					       "headroom.\n",
  8.1019 +					       skb->tail - skb->end);
  8.1020 +
  8.1021 +				nskb = alloc_xen_skb(skb->len + 2);
  8.1022 +				if (nskb != NULL) {
  8.1023 +					skb_reserve(nskb, 2);
  8.1024 +					skb_put(nskb, skb->len);
  8.1025 +					memcpy(nskb->data,
  8.1026 +					       skb->data,
  8.1027 +					       skb->len);
  8.1028 +					nskb->dev = skb->dev;
  8.1029 +				}
  8.1030 +			}
  8.1031 +			else if (net_ratelimit())
  8.1032 +				printk(KERN_INFO "Received packet too big for "
  8.1033 +				       "MTU (%d > %d)\n",
  8.1034 +				       skb->len - ETH_HLEN, dev->mtu);
  8.1035 +
  8.1036 +			/* Reinitialise and then destroy the old skbuff. */
  8.1037 +			skb->len  = 0;
  8.1038 +			skb->tail = skb->data;
  8.1039 +			init_skb_shinfo(skb);
  8.1040 +			dev_kfree_skb(skb);
  8.1041 +
  8.1042 +			/* Switch old for new, if we copied the buffer. */
  8.1043 +			if ((skb = nskb) == NULL)
  8.1044 +				continue;
  8.1045 +		}
  8.1046 +        
  8.1047 +		/* Set the shinfo area, which is hidden behind the data. */
  8.1048 +		init_skb_shinfo(skb);
  8.1049 +		/* Ethernet work: Delayed to here as it peeks the header. */
  8.1050 +		skb->protocol = eth_type_trans(skb, dev);
  8.1051 +
  8.1052 +		/* Pass it up. */
  8.1053 +		netif_receive_skb(skb);
  8.1054 +		dev->last_rx = jiffies;
  8.1055 +	}
  8.1056 +
  8.1057 +	np->rx_resp_cons = i;
  8.1058 +
  8.1059 +	/* If we get a callback with very few responses, reduce fill target. */
  8.1060 +	/* NB. Note exponential increase, linear decrease. */
  8.1061 +	if (((np->rx->req_prod - np->rx->resp_prod) >
  8.1062 +	     ((3*np->rx_target) / 4)) &&
  8.1063 +	    (--np->rx_target < np->rx_min_target))
  8.1064 +		np->rx_target = np->rx_min_target;
  8.1065 +
  8.1066 +	network_alloc_rx_buffers(dev);
  8.1067 +
  8.1068 +	*pbudget   -= work_done;
  8.1069 +	dev->quota -= work_done;
  8.1070 +
  8.1071 +	if (work_done < budget) {
  8.1072 +		local_irq_save(flags);
  8.1073 +
  8.1074 +		np->rx->event = i + 1;
  8.1075      
  8.1076 -        /* Deal with hypervisor racing our resetting of rx_event. */
  8.1077 -        mb();
  8.1078 -        if (np->rx->resp_prod == i) {
  8.1079 -            __netif_rx_complete(dev);
  8.1080 -            more_to_do = 0;
  8.1081 -        }
  8.1082 +		/* Deal with hypervisor racing our resetting of rx_event. */
  8.1083 +		mb();
  8.1084 +		if (np->rx->resp_prod == i) {
  8.1085 +			__netif_rx_complete(dev);
  8.1086 +			more_to_do = 0;
  8.1087 +		}
  8.1088  
  8.1089 -        local_irq_restore(flags);
  8.1090 -    }
  8.1091 +		local_irq_restore(flags);
  8.1092 +	}
  8.1093  
  8.1094 -    spin_unlock(&np->rx_lock);
  8.1095 +	spin_unlock(&np->rx_lock);
  8.1096  
  8.1097 -    return more_to_do;
  8.1098 +	return more_to_do;
  8.1099  }
  8.1100  
  8.1101  
  8.1102  static int network_close(struct net_device *dev)
  8.1103  {
  8.1104 -    struct net_private *np = netdev_priv(dev);
  8.1105 -    np->user_state = UST_CLOSED;
  8.1106 -    netif_stop_queue(np->netdev);
  8.1107 -    return 0;
  8.1108 +	struct net_private *np = netdev_priv(dev);
  8.1109 +	np->user_state = UST_CLOSED;
  8.1110 +	netif_stop_queue(np->netdev);
  8.1111 +	return 0;
  8.1112  }
  8.1113  
  8.1114  
  8.1115  static struct net_device_stats *network_get_stats(struct net_device *dev)
  8.1116  {
  8.1117 -    struct net_private *np = netdev_priv(dev);
  8.1118 -    return &np->stats;
  8.1119 +	struct net_private *np = netdev_priv(dev);
  8.1120 +	return &np->stats;
  8.1121  }
  8.1122  
  8.1123  static void network_connect(struct net_device *dev)
  8.1124  {
  8.1125 -    struct net_private *np;
  8.1126 -    int i, requeue_idx;
  8.1127 -    netif_tx_request_t *tx;
  8.1128 -
  8.1129 -    np = netdev_priv(dev);
  8.1130 -    spin_lock_irq(&np->tx_lock);
  8.1131 -    spin_lock(&np->rx_lock);
  8.1132 -
  8.1133 -    /* Recovery procedure: */
  8.1134 -
  8.1135 -    /* Step 1: Reinitialise variables. */
  8.1136 -    np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
  8.1137 -    np->rx->event = np->tx->event = 1;
  8.1138 +	struct net_private *np;
  8.1139 +	int i, requeue_idx;
  8.1140 +	netif_tx_request_t *tx;
  8.1141  
  8.1142 -    /* Step 2: Rebuild the RX and TX ring contents.
  8.1143 -     * NB. We could just free the queued TX packets now but we hope
  8.1144 -     * that sending them out might do some good.  We have to rebuild
  8.1145 -     * the RX ring because some of our pages are currently flipped out
  8.1146 -     * so we can't just free the RX skbs.
  8.1147 -     * NB2. Freelist index entries are always going to be less than
  8.1148 -     *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
  8.1149 -     * greater than __PAGE_OFFSET: we use this property to distinguish
  8.1150 -     * them.
  8.1151 -     */
  8.1152 +	np = netdev_priv(dev);
  8.1153 +	spin_lock_irq(&np->tx_lock);
  8.1154 +	spin_lock(&np->rx_lock);
  8.1155  
  8.1156 -    /* Rebuild the TX buffer freelist and the TX ring itself.
  8.1157 -     * NB. This reorders packets.  We could keep more private state
  8.1158 -     * to avoid this but maybe it doesn't matter so much given the
  8.1159 -     * interface has been down.
  8.1160 -     */
  8.1161 -    for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
  8.1162 -        if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
  8.1163 -            struct sk_buff *skb = np->tx_skbs[i];
  8.1164 +	/* Recovery procedure: */
  8.1165  
  8.1166 -            tx = &np->tx->ring[requeue_idx++].req;
  8.1167 +	/* Step 1: Reinitialise variables. */
  8.1168 +	np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
  8.1169 +	np->rx->event = np->tx->event = 1;
  8.1170  
  8.1171 -            tx->id   = i;
  8.1172 +	/* Step 2: Rebuild the RX and TX ring contents.
  8.1173 +	 * NB. We could just free the queued TX packets now but we hope
  8.1174 +	 * that sending them out might do some good.  We have to rebuild
  8.1175 +	 * the RX ring because some of our pages are currently flipped out
  8.1176 +	 * so we can't just free the RX skbs.
  8.1177 +	 * NB2. Freelist index entries are always going to be less than
  8.1178 +	 *  __PAGE_OFFSET, whereas pointers to skbs will always be equal or
  8.1179 +	 * greater than __PAGE_OFFSET: we use this property to distinguish
  8.1180 +	 * them.
  8.1181 +	 */
  8.1182 +
  8.1183 +	/* Rebuild the TX buffer freelist and the TX ring itself.
  8.1184 +	 * NB. This reorders packets.  We could keep more private state
  8.1185 +	 * to avoid this but maybe it doesn't matter so much given the
  8.1186 +	 * interface has been down.
  8.1187 +	 */
  8.1188 +	for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
  8.1189 +		if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
  8.1190 +			struct sk_buff *skb = np->tx_skbs[i];
  8.1191 +
  8.1192 +			tx = &np->tx->ring[requeue_idx++].req;
  8.1193 +
  8.1194 +			tx->id   = i;
  8.1195  #ifdef CONFIG_XEN_NETDEV_GRANT
  8.1196 -            gnttab_grant_foreign_access_ref(grant_tx_ref[i], np->backend_id, 
  8.1197 -                                            virt_to_mfn(np->tx_skbs[i]->data),
  8.1198 -                                            GNTMAP_readonly); 
  8.1199 -            tx->addr = grant_tx_ref[i] << PAGE_SHIFT; 
  8.1200 +			gnttab_grant_foreign_access_ref(
  8.1201 +				np->grant_tx_ref[i], np->backend_id, 
  8.1202 +				virt_to_mfn(np->tx_skbs[i]->data),
  8.1203 +				GNTMAP_readonly); 
  8.1204 +			tx->addr = np->grant_tx_ref[i] << PAGE_SHIFT; 
  8.1205  #else
  8.1206 -            tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
  8.1207 -#endif
  8.1208 -            tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
  8.1209 -            tx->size = skb->len;
  8.1210 -
  8.1211 -            np->stats.tx_bytes += skb->len;
  8.1212 -            np->stats.tx_packets++;
  8.1213 -        }
  8.1214 -    }
  8.1215 -    wmb();
  8.1216 -    np->tx->req_prod = requeue_idx;
  8.1217 -
  8.1218 -    /* Rebuild the RX buffer freelist and the RX ring itself. */
  8.1219 -    for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
  8.1220 -        if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
  8.1221 -#ifdef CONFIG_XEN_NETDEV_GRANT 
  8.1222 -            /* Reinstate the grant ref so backend can transfer mfn to us. */
  8.1223 -            gnttab_grant_foreign_transfer_ref(grant_rx_ref[i], np->backend_id);
  8.1224 -            np->rx->ring[requeue_idx].req.gref = grant_rx_ref[i];
  8.1225 +			tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
  8.1226  #endif
  8.1227 -            np->rx->ring[requeue_idx].req.id   = i;
  8.1228 -            requeue_idx++; 
  8.1229 -        }
  8.1230 -    }
  8.1231 -
  8.1232 -    wmb();                
  8.1233 -    np->rx->req_prod = requeue_idx;
  8.1234 +			tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
  8.1235 +			tx->size = skb->len;
  8.1236  
  8.1237 -    /* Step 3: All public and private state should now be sane.  Get
  8.1238 -     * ready to start sending and receiving packets and give the driver
  8.1239 -     * domain a kick because we've probably just requeued some
  8.1240 -     * packets.
  8.1241 -     */
  8.1242 -    np->backend_state = BEST_CONNECTED;
  8.1243 -    wmb();
  8.1244 -    notify_via_evtchn(np->evtchn);  
  8.1245 -    network_tx_buf_gc(dev);
  8.1246 +			np->stats.tx_bytes += skb->len;
  8.1247 +			np->stats.tx_packets++;
  8.1248 +		}
  8.1249 +	}
  8.1250 +	wmb();
  8.1251 +	np->tx->req_prod = requeue_idx;
  8.1252  
  8.1253 -    if (np->user_state == UST_OPEN)
  8.1254 -        netif_start_queue(dev);
  8.1255 +	/* Rebuild the RX buffer freelist and the RX ring itself. */
  8.1256 +	for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
  8.1257 +		if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
  8.1258 +#ifdef CONFIG_XEN_NETDEV_GRANT 
  8.1259 +			gnttab_grant_foreign_transfer_ref(
  8.1260 +				np->grant_rx_ref[i], np->backend_id);
  8.1261 +			np->rx->ring[requeue_idx].req.gref =
  8.1262 +				np->grant_rx_ref[i];
  8.1263 +#endif
  8.1264 +			np->rx->ring[requeue_idx].req.id = i;
  8.1265 +			requeue_idx++; 
  8.1266 +		}
  8.1267 +	}
  8.1268  
  8.1269 -    spin_unlock(&np->rx_lock);
  8.1270 -    spin_unlock_irq(&np->tx_lock);
  8.1271 +	wmb();                
  8.1272 +	np->rx->req_prod = requeue_idx;
  8.1273 +
  8.1274 +	/* Step 3: All public and private state should now be sane.  Get
  8.1275 +	 * ready to start sending and receiving packets and give the driver
  8.1276 +	 * domain a kick because we've probably just requeued some
  8.1277 +	 * packets.
  8.1278 +	 */
  8.1279 +	np->backend_state = BEST_CONNECTED;
  8.1280 +	wmb();
  8.1281 +	notify_via_evtchn(np->evtchn);  
  8.1282 +	network_tx_buf_gc(dev);
  8.1283 +
  8.1284 +	if (np->user_state == UST_OPEN)
  8.1285 +		netif_start_queue(dev);
  8.1286 +
  8.1287 +	spin_unlock(&np->rx_lock);
  8.1288 +	spin_unlock_irq(&np->tx_lock);
  8.1289  }
  8.1290  
  8.1291  static void show_device(struct net_private *np)
  8.1292 @@ -887,6 +860,15 @@ connect_device(struct net_private *np, u
  8.1293  	show_device(np);
  8.1294  }
  8.1295  
  8.1296 +static void netif_uninit(struct net_device *dev)
  8.1297 +{
  8.1298 +#ifdef CONFIG_XEN_NETDEV_GRANT
  8.1299 +	struct net_private *np = netdev_priv(dev);
  8.1300 +	gnttab_free_grant_references(np->gref_tx_head);
  8.1301 +	gnttab_free_grant_references(np->gref_rx_head);
  8.1302 +#endif
  8.1303 +}
  8.1304 +
  8.1305  static struct ethtool_ops network_ethtool_ops =
  8.1306  {
  8.1307  	.get_tx_csum = ethtool_op_get_tx_csum,
  8.1308 @@ -901,84 +883,107 @@ static struct ethtool_ops network_ethtoo
  8.1309  static int create_netdev(int handle, struct xenbus_device *dev,
  8.1310  			 struct net_device **val)
  8.1311  {
  8.1312 -    int i, err = 0;
  8.1313 -    struct net_device *netdev = NULL;
  8.1314 -    struct net_private *np = NULL;
  8.1315 -
  8.1316 -    if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
  8.1317 -        printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
  8.1318 -        err = -ENOMEM;
  8.1319 -        goto exit;
  8.1320 -    }
  8.1321 -
  8.1322 -    np                = netdev_priv(netdev);
  8.1323 -    np->backend_state = BEST_CLOSED;
  8.1324 -    np->user_state    = UST_CLOSED;
  8.1325 -    np->handle        = handle;
  8.1326 -    np->xbdev         = dev;
  8.1327 -    
  8.1328 -    spin_lock_init(&np->tx_lock);
  8.1329 -    spin_lock_init(&np->rx_lock);
  8.1330 -
  8.1331 -    skb_queue_head_init(&np->rx_batch);
  8.1332 -    np->rx_target     = RX_MIN_TARGET;
  8.1333 -    np->rx_min_target = RX_MIN_TARGET;
  8.1334 -    np->rx_max_target = RX_MAX_TARGET;
  8.1335 -
  8.1336 -    /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
  8.1337 -    for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
  8.1338 -        np->tx_skbs[i] = (void *)((unsigned long) i+1);
  8.1339 -#ifdef CONFIG_XEN_NETDEV_GRANT
  8.1340 -        grant_tx_ref[i] = GRANT_INVALID_REF;
  8.1341 -#endif
  8.1342 -    }
  8.1343 +	int i, err = 0;
  8.1344 +	struct net_device *netdev = NULL;
  8.1345 +	struct net_private *np = NULL;
  8.1346  
  8.1347 -    for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
  8.1348 -        np->rx_skbs[i] = (void *)((unsigned long) i+1);
  8.1349 -#ifdef CONFIG_XEN_NETDEV_GRANT
  8.1350 -        grant_rx_ref[i] = GRANT_INVALID_REF;
  8.1351 -#endif
  8.1352 -    }
  8.1353 -
  8.1354 -    netdev->open            = network_open;
  8.1355 -    netdev->hard_start_xmit = network_start_xmit;
  8.1356 -    netdev->stop            = network_close;
  8.1357 -    netdev->get_stats       = network_get_stats;
  8.1358 -    netdev->poll            = netif_poll;
  8.1359 -    netdev->weight          = 64;
  8.1360 -    netdev->features        = NETIF_F_IP_CSUM;
  8.1361 -
  8.1362 -    SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
  8.1363 +	if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
  8.1364 +		printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
  8.1365 +		       __FUNCTION__);
  8.1366 +		err = -ENOMEM;
  8.1367 +		goto exit;
  8.1368 +	}
  8.1369  
  8.1370 -    if ((err = register_netdev(netdev)) != 0) {
  8.1371 -        printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
  8.1372 -        goto exit;
  8.1373 -    }
  8.1374 +	np                = netdev_priv(netdev);
  8.1375 +	np->backend_state = BEST_CLOSED;
  8.1376 +	np->user_state    = UST_CLOSED;
  8.1377 +	np->handle        = handle;
  8.1378 +	np->xbdev         = dev;
  8.1379 +    
  8.1380 +	spin_lock_init(&np->tx_lock);
  8.1381 +	spin_lock_init(&np->rx_lock);
  8.1382  
  8.1383 -    if ((err = xennet_proc_addif(netdev)) != 0) {
  8.1384 -        unregister_netdev(netdev);
  8.1385 -        goto exit;
  8.1386 -    }
  8.1387 +	skb_queue_head_init(&np->rx_batch);
  8.1388 +	np->rx_target     = RX_MIN_TARGET;
  8.1389 +	np->rx_min_target = RX_MIN_TARGET;
  8.1390 +	np->rx_max_target = RX_MAX_TARGET;
  8.1391  
  8.1392 -    np->netdev = netdev;
  8.1393 +	/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
  8.1394 +	for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
  8.1395 +		np->tx_skbs[i] = (void *)((unsigned long) i+1);
  8.1396 +#ifdef CONFIG_XEN_NETDEV_GRANT
  8.1397 +		np->grant_tx_ref[i] = GRANT_INVALID_REF;
  8.1398 +#endif
  8.1399 +	}
  8.1400  
  8.1401 -  exit:
  8.1402 -    if ((err != 0) && (netdev != NULL))
  8.1403 -        kfree(netdev);
  8.1404 -    else if (val != NULL)
  8.1405 -        *val = netdev;
  8.1406 -    return err;
  8.1407 +	for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
  8.1408 +		np->rx_skbs[i] = (void *)((unsigned long) i+1);
  8.1409 +#ifdef CONFIG_XEN_NETDEV_GRANT
  8.1410 +		np->grant_rx_ref[i] = GRANT_INVALID_REF;
  8.1411 +#endif
  8.1412 +	}
  8.1413 +
  8.1414 +#ifdef CONFIG_XEN_NETDEV_GRANT
  8.1415 +	/* A grant for every tx ring slot */
  8.1416 +	if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
  8.1417 +					  &np->gref_tx_head) < 0) {
  8.1418 +		printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
  8.1419 +		goto exit;
  8.1420 +	}
  8.1421 +	/* A grant for every rx ring slot */
  8.1422 +	if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
  8.1423 +					  &np->gref_rx_head) < 0) {
  8.1424 +		printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
  8.1425 +		gnttab_free_grant_references(np->gref_tx_head);
  8.1426 +		goto exit;
  8.1427 +	}
  8.1428 +#endif
  8.1429 +
  8.1430 +	netdev->open            = network_open;
  8.1431 +	netdev->hard_start_xmit = network_start_xmit;
  8.1432 +	netdev->stop            = network_close;
  8.1433 +	netdev->get_stats       = network_get_stats;
  8.1434 +	netdev->poll            = netif_poll;
  8.1435 +	netdev->uninit          = netif_uninit;
  8.1436 +	netdev->weight          = 64;
  8.1437 +	netdev->features        = NETIF_F_IP_CSUM;
  8.1438 +
  8.1439 +	SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
  8.1440 +
  8.1441 +	if ((err = register_netdev(netdev)) != 0) {
  8.1442 +		printk(KERN_WARNING "%s> register_netdev err=%d\n",
  8.1443 +		       __FUNCTION__, err);
  8.1444 +		goto exit_free_grefs;
  8.1445 +	}
  8.1446 +
  8.1447 +	if ((err = xennet_proc_addif(netdev)) != 0) {
  8.1448 +		unregister_netdev(netdev);
  8.1449 +		goto exit_free_grefs;
  8.1450 +	}
  8.1451 +
  8.1452 +	np->netdev = netdev;
  8.1453 +
  8.1454 + exit:
  8.1455 +	if ((err != 0) && (netdev != NULL))
  8.1456 +		kfree(netdev);
  8.1457 +	else if (val != NULL)
  8.1458 +		*val = netdev;
  8.1459 +	return err;
  8.1460 +
  8.1461 + exit_free_grefs:
  8.1462 +#ifdef CONFIG_XEN_NETDEV_GRANT
  8.1463 +	gnttab_free_grant_references(np->gref_tx_head);
  8.1464 +	gnttab_free_grant_references(np->gref_rx_head);
  8.1465 +#endif
  8.1466 +	goto exit;
  8.1467  }
  8.1468  
  8.1469  static int destroy_netdev(struct net_device *netdev)
  8.1470  {
  8.1471 -
  8.1472  #ifdef CONFIG_PROC_FS
  8.1473  	xennet_proc_delif(netdev);
  8.1474  #endif
  8.1475 -
  8.1476          unregister_netdev(netdev);
  8.1477 -
  8.1478  	return 0;
  8.1479  }
  8.1480  
  8.1481 @@ -989,20 +994,20 @@ static int destroy_netdev(struct net_dev
  8.1482  static int 
  8.1483  inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
  8.1484  {
  8.1485 -    struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
  8.1486 -    struct net_device *dev = ifa->ifa_dev->dev;
  8.1487 +	struct in_ifaddr  *ifa = (struct in_ifaddr *)ptr; 
  8.1488 +	struct net_device *dev = ifa->ifa_dev->dev;
  8.1489  
  8.1490 -    /* UP event and is it one of our devices? */
  8.1491 -    if (event == NETDEV_UP && dev->open == network_open)
  8.1492 -        (void)send_fake_arp(dev);
  8.1493 +	/* UP event and is it one of our devices? */
  8.1494 +	if (event == NETDEV_UP && dev->open == network_open)
  8.1495 +		(void)send_fake_arp(dev);
  8.1496          
  8.1497 -    return NOTIFY_DONE;
  8.1498 +	return NOTIFY_DONE;
  8.1499  }
  8.1500  
  8.1501  static struct notifier_block notifier_inetdev = {
  8.1502 -    .notifier_call  = inetdev_notify,
  8.1503 -    .next           = NULL,
  8.1504 -    .priority       = 0
  8.1505 +	.notifier_call  = inetdev_notify,
  8.1506 +	.next           = NULL,
  8.1507 +	.priority       = 0
  8.1508  };
  8.1509  
  8.1510  static struct xenbus_device_id netfront_ids[] = {
  8.1511 @@ -1341,72 +1346,50 @@ static void __init init_net_xenbus(void)
  8.1512  
  8.1513  static int wait_for_netif(void)
  8.1514  {
  8.1515 -    int err = 0;
  8.1516 -    int i;
  8.1517 +	int err = 0;
  8.1518 +	int i;
  8.1519  
  8.1520 -    /*
  8.1521 -     * We should figure out how many and which devices we need to
  8.1522 -     * proceed and only wait for those.  For now, continue once the
  8.1523 -     * first device is around.
  8.1524 -     */
  8.1525 -    for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ )
  8.1526 -    {
  8.1527 -        set_current_state(TASK_INTERRUPTIBLE);
  8.1528 -        schedule_timeout(1);
  8.1529 -    }
  8.1530 +	/*
  8.1531 +	 * We should figure out how many and which devices we need to
  8.1532 +	 * proceed and only wait for those.  For now, continue once the
  8.1533 +	 * first device is around.
  8.1534 +	 */
  8.1535 +	for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ )
  8.1536 +	{
  8.1537 +		set_current_state(TASK_INTERRUPTIBLE);
  8.1538 +		schedule_timeout(1);
  8.1539 +	}
  8.1540  
  8.1541 -    if (netif_state != NETIF_STATE_CONNECTED) {
  8.1542 -        WPRINTK("Timeout connecting to device!\n");
  8.1543 -        err = -ENOSYS;
  8.1544 -    }
  8.1545 -    return err;
  8.1546 +	if (netif_state != NETIF_STATE_CONNECTED) {
  8.1547 +		WPRINTK("Timeout connecting to device!\n");
  8.1548 +		err = -ENOSYS;
  8.1549 +	}
  8.1550 +	return err;
  8.1551  }
  8.1552  
  8.1553  static int __init netif_init(void)
  8.1554  {
  8.1555 -    int err = 0;
  8.1556 -
  8.1557 -    if (xen_start_info->flags & SIF_INITDOMAIN)
  8.1558 -        return 0;
  8.1559 -
  8.1560 -    if ((err = xennet_proc_init()) != 0)
  8.1561 -        return err;
  8.1562 -
  8.1563 -    IPRINTK("Initialising virtual ethernet driver.\n");
  8.1564 -
  8.1565 -#ifdef CONFIG_XEN_NETDEV_GRANT
  8.1566 -    IPRINTK("Using grant tables.\n"); 
  8.1567 +	int err = 0;
  8.1568  
  8.1569 -    /* A grant for every tx ring slot */
  8.1570 -    if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
  8.1571 -                                      &gref_tx_head) < 0) {
  8.1572 -        printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
  8.1573 -        return 1;
  8.1574 -    }
  8.1575 -    /* A grant for every rx ring slot */
  8.1576 -    if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
  8.1577 -                                      &gref_rx_head) < 0) {
  8.1578 -        printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
  8.1579 -        return 1;
  8.1580 -    }
  8.1581 -#endif
  8.1582 +	if (xen_start_info->flags & SIF_INITDOMAIN)
  8.1583 +		return 0;
  8.1584  
  8.1585 -
  8.1586 -    (void)register_inetaddr_notifier(&notifier_inetdev);
  8.1587 +	if ((err = xennet_proc_init()) != 0)
  8.1588 +		return err;
  8.1589  
  8.1590 -    init_net_xenbus();
  8.1591 +	IPRINTK("Initialising virtual ethernet driver.\n");
  8.1592  
  8.1593 -    wait_for_netif();
  8.1594 +	(void)register_inetaddr_notifier(&notifier_inetdev);
  8.1595  
  8.1596 -    return err;
  8.1597 +	init_net_xenbus();
  8.1598 +
  8.1599 +	wait_for_netif();
  8.1600 +
  8.1601 +	return err;
  8.1602  }
  8.1603  
  8.1604  static void netif_exit(void)
  8.1605  {
  8.1606 -#ifdef CONFIG_XEN_NETDEV_GRANT
  8.1607 -    gnttab_free_grant_references(gref_tx_head);
  8.1608 -    gnttab_free_grant_references(gref_rx_head);
  8.1609 -#endif
  8.1610  }
  8.1611  
  8.1612  #ifdef CONFIG_PROC_FS
  8.1613 @@ -1416,147 +1399,159 @@ static void netif_exit(void)
  8.1614  #define TARGET_CUR 2UL
  8.1615  
  8.1616  static int xennet_proc_read(
  8.1617 -    char *page, char **start, off_t off, int count, int *eof, void *data)
  8.1618 +	char *page, char **start, off_t off, int count, int *eof, void *data)
  8.1619  {
  8.1620 -    struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
  8.1621 -    struct net_private *np = netdev_priv(dev);
  8.1622 -    int len = 0, which_target = (long)data & 3;
  8.1623 +	struct net_device *dev =
  8.1624 +		(struct net_device *)((unsigned long)data & ~3UL);
  8.1625 +	struct net_private *np = netdev_priv(dev);
  8.1626 +	int len = 0, which_target = (long)data & 3;
  8.1627      
  8.1628 -    switch (which_target)
  8.1629 -    {
  8.1630 -    case TARGET_MIN:
  8.1631 -        len = sprintf(page, "%d\n", np->rx_min_target);
  8.1632 -        break;
  8.1633 -    case TARGET_MAX:
  8.1634 -        len = sprintf(page, "%d\n", np->rx_max_target);
  8.1635 -        break;
  8.1636 -    case TARGET_CUR:
  8.1637 -        len = sprintf(page, "%d\n", np->rx_target);
  8.1638 -        break;
  8.1639 -    }
  8.1640 +	switch (which_target)
  8.1641 +	{
  8.1642 +	case TARGET_MIN:
  8.1643 +		len = sprintf(page, "%d\n", np->rx_min_target);
  8.1644 +		break;
  8.1645 +	case TARGET_MAX:
  8.1646 +		len = sprintf(page, "%d\n", np->rx_max_target);
  8.1647 +		break;
  8.1648 +	case TARGET_CUR:
  8.1649 +		len = sprintf(page, "%d\n", np->rx_target);
  8.1650 +		break;
  8.1651 +	}
  8.1652  
  8.1653 -    *eof = 1;
  8.1654 -    return len;
  8.1655 +	*eof = 1;
  8.1656 +	return len;
  8.1657  }
  8.1658  
  8.1659  static int xennet_proc_write(
  8.1660 -    struct file *file, const char __user *buffer,
  8.1661 -    unsigned long count, void *data)
  8.1662 +	struct file *file, const char __user *buffer,
  8.1663 +	unsigned long count, void *data)
  8.1664  {
  8.1665 -    struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL);
  8.1666 -    struct net_private *np = netdev_priv(dev);
  8.1667 -    int which_target = (long)data & 3;
  8.1668 -    char string[64];
  8.1669 -    long target;
  8.1670 -
  8.1671 -    if (!capable(CAP_SYS_ADMIN))
  8.1672 -        return -EPERM;
  8.1673 -
  8.1674 -    if (count <= 1)
  8.1675 -        return -EBADMSG; /* runt */
  8.1676 -    if (count > sizeof(string))
  8.1677 -        return -EFBIG;   /* too long */
  8.1678 -
  8.1679 -    if (copy_from_user(string, buffer, count))
  8.1680 -        return -EFAULT;
  8.1681 -    string[sizeof(string)-1] = '\0';
  8.1682 -
  8.1683 -    target = simple_strtol(string, NULL, 10);
  8.1684 -    if (target < RX_MIN_TARGET)
  8.1685 -        target = RX_MIN_TARGET;
  8.1686 -    if (target > RX_MAX_TARGET)
  8.1687 -        target = RX_MAX_TARGET;
  8.1688 +	struct net_device *dev =
  8.1689 +		(struct net_device *)((unsigned long)data & ~3UL);
  8.1690 +	struct net_private *np = netdev_priv(dev);
  8.1691 +	int which_target = (long)data & 3;
  8.1692 +	char string[64];
  8.1693 +	long target;
  8.1694  
  8.1695 -    spin_lock(&np->rx_lock);
  8.1696 +	if (!capable(CAP_SYS_ADMIN))
  8.1697 +		return -EPERM;
  8.1698  
  8.1699 -    switch (which_target)
  8.1700 -    {
  8.1701 -    case TARGET_MIN:
  8.1702 -        if (target > np->rx_max_target)
  8.1703 -            np->rx_max_target = target;
  8.1704 -        np->rx_min_target = target;
  8.1705 -        if (target > np->rx_target)
  8.1706 -            np->rx_target = target;
  8.1707 -        break;
  8.1708 -    case TARGET_MAX:
  8.1709 -        if (target < np->rx_min_target)
  8.1710 -            np->rx_min_target = target;
  8.1711 -        np->rx_max_target = target;
  8.1712 -        if (target < np->rx_target)
  8.1713 -            np->rx_target = target;
  8.1714 -        break;
  8.1715 -    case TARGET_CUR:
  8.1716 -        break;
  8.1717 -    }
  8.1718 +	if (count <= 1)
  8.1719 +		return -EBADMSG; /* runt */
  8.1720 +	if (count > sizeof(string))
  8.1721 +		return -EFBIG;   /* too long */
  8.1722  
  8.1723 -    network_alloc_rx_buffers(dev);
  8.1724 +	if (copy_from_user(string, buffer, count))
  8.1725 +		return -EFAULT;
  8.1726 +	string[sizeof(string)-1] = '\0';
  8.1727  
  8.1728 -    spin_unlock(&np->rx_lock);
  8.1729 +	target = simple_strtol(string, NULL, 10);
  8.1730 +	if (target < RX_MIN_TARGET)
  8.1731 +		target = RX_MIN_TARGET;
  8.1732 +	if (target > RX_MAX_TARGET)
  8.1733 +		target = RX_MAX_TARGET;
  8.1734  
  8.1735 -    return count;
  8.1736 +	spin_lock(&np->rx_lock);
  8.1737 +
  8.1738 +	switch (which_target)
  8.1739 +	{
  8.1740 +	case TARGET_MIN:
  8.1741 +		if (target > np->rx_max_target)
  8.1742 +			np->rx_max_target = target;
  8.1743 +		np->rx_min_target = target;
  8.1744 +		if (target > np->rx_target)
  8.1745 +			np->rx_target = target;
  8.1746 +		break;
  8.1747 +	case TARGET_MAX:
  8.1748 +		if (target < np->rx_min_target)
  8.1749 +			np->rx_min_target = target;
  8.1750 +		np->rx_max_target = target;
  8.1751 +		if (target < np->rx_target)
  8.1752 +			np->rx_target = target;
  8.1753 +		break;
  8.1754 +	case TARGET_CUR:
  8.1755 +		break;
  8.1756 +	}
  8.1757 +
  8.1758 +	network_alloc_rx_buffers(dev);
  8.1759 +
  8.1760 +	spin_unlock(&np->rx_lock);
  8.1761 +
  8.1762 +	return count;
  8.1763  }
  8.1764  
  8.1765  static int xennet_proc_init(void)
  8.1766  {
  8.1767 -    if (proc_mkdir("xen/net", NULL) == NULL)
  8.1768 -        return -ENOMEM;
  8.1769 -    return 0;
  8.1770 +	if (proc_mkdir("xen/net", NULL) == NULL)
  8.1771 +		return -ENOMEM;
  8.1772 +	return 0;
  8.1773  }
  8.1774  
  8.1775  static int xennet_proc_addif(struct net_device *dev)
  8.1776  {
  8.1777 -    struct proc_dir_entry *dir, *min, *max, *cur;
  8.1778 -    char name[30];
  8.1779 -
  8.1780 -    sprintf(name, "xen/net/%s", dev->name);
  8.1781 -
  8.1782 -    dir = proc_mkdir(name, NULL);
  8.1783 -    if (!dir)
  8.1784 -        goto nomem;
  8.1785 +	struct proc_dir_entry *dir, *min, *max, *cur;
  8.1786 +	char name[30];
  8.1787  
  8.1788 -    min = create_proc_entry("rxbuf_min", 0644, dir);
  8.1789 -    max = create_proc_entry("rxbuf_max", 0644, dir);
  8.1790 -    cur = create_proc_entry("rxbuf_cur", 0444, dir);
  8.1791 -    if (!min || !max || !cur)
  8.1792 -        goto nomem;
  8.1793 +	sprintf(name, "xen/net/%s", dev->name);
  8.1794  
  8.1795 -    min->read_proc  = xennet_proc_read;
  8.1796 -    min->write_proc = xennet_proc_write;
  8.1797 -    min->data       = (void *)((unsigned long)dev | TARGET_MIN);
  8.1798 +	dir = proc_mkdir(name, NULL);
  8.1799 +	if (!dir)
  8.1800 +		goto nomem;
  8.1801  
  8.1802 -    max->read_proc  = xennet_proc_read;
  8.1803 -    max->write_proc = xennet_proc_write;
  8.1804 -    max->data       = (void *)((unsigned long)dev | TARGET_MAX);
  8.1805 +	min = create_proc_entry("rxbuf_min", 0644, dir);
  8.1806 +	max = create_proc_entry("rxbuf_max", 0644, dir);
  8.1807 +	cur = create_proc_entry("rxbuf_cur", 0444, dir);
  8.1808 +	if (!min || !max || !cur)
  8.1809 +		goto nomem;
  8.1810  
  8.1811 -    cur->read_proc  = xennet_proc_read;
  8.1812 -    cur->write_proc = xennet_proc_write;
  8.1813 -    cur->data       = (void *)((unsigned long)dev | TARGET_CUR);
  8.1814 +	min->read_proc  = xennet_proc_read;
  8.1815 +	min->write_proc = xennet_proc_write;
  8.1816 +	min->data       = (void *)((unsigned long)dev | TARGET_MIN);
  8.1817  
  8.1818 -    return 0;
  8.1819 +	max->read_proc  = xennet_proc_read;
  8.1820 +	max->write_proc = xennet_proc_write;
  8.1821 +	max->data       = (void *)((unsigned long)dev | TARGET_MAX);
  8.1822 +
  8.1823 +	cur->read_proc  = xennet_proc_read;
  8.1824 +	cur->write_proc = xennet_proc_write;
  8.1825 +	cur->data       = (void *)((unsigned long)dev | TARGET_CUR);
  8.1826 +
  8.1827 +	return 0;
  8.1828  
  8.1829   nomem:
  8.1830 -    xennet_proc_delif(dev);
  8.1831 -    return -ENOMEM;
  8.1832 +	xennet_proc_delif(dev);
  8.1833 +	return -ENOMEM;
  8.1834  }
  8.1835  
  8.1836  static void xennet_proc_delif(struct net_device *dev)
  8.1837  {
  8.1838 -    char name[30];
  8.1839 -
  8.1840 -    sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
  8.1841 -    remove_proc_entry(name, NULL);
  8.1842 +	char name[30];
  8.1843  
  8.1844 -    sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
  8.1845 -    remove_proc_entry(name, NULL);
  8.1846 +	sprintf(name, "xen/net/%s/rxbuf_min", dev->name);
  8.1847 +	remove_proc_entry(name, NULL);
  8.1848  
  8.1849 -    sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
  8.1850 -    remove_proc_entry(name, NULL);
  8.1851 +	sprintf(name, "xen/net/%s/rxbuf_max", dev->name);
  8.1852 +	remove_proc_entry(name, NULL);
  8.1853  
  8.1854 -    sprintf(name, "xen/net/%s", dev->name);
  8.1855 -    remove_proc_entry(name, NULL);
  8.1856 +	sprintf(name, "xen/net/%s/rxbuf_cur", dev->name);
  8.1857 +	remove_proc_entry(name, NULL);
  8.1858 +
  8.1859 +	sprintf(name, "xen/net/%s", dev->name);
  8.1860 +	remove_proc_entry(name, NULL);
  8.1861  }
  8.1862  
  8.1863  #endif
  8.1864  
  8.1865  module_init(netif_init);
  8.1866  module_exit(netif_exit);
  8.1867 +
  8.1868 +/*
  8.1869 + * Local variables:
  8.1870 + *  c-file-style: "linux"
  8.1871 + *  indent-tabs-mode: t
  8.1872 + *  c-indent-level: 8
  8.1873 + *  c-basic-offset: 8
  8.1874 + *  tab-width: 8
  8.1875 + * End:
  8.1876 + */
     9.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Fri Sep 16 18:06:42 2005 +0000
     9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c	Fri Sep 16 18:07:50 2005 +0000
     9.3 @@ -130,12 +130,12 @@ static int privcmd_ioctl(struct inode *i
     9.4                  if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
     9.5                      return -EINVAL;
     9.6  
     9.7 -                if ( (rc = direct_remap_pfn_range(vma->vm_mm, 
     9.8 -                                                   msg[j].va&PAGE_MASK, 
     9.9 -                                                   msg[j].mfn, 
    9.10 -                                                   msg[j].npages<<PAGE_SHIFT, 
    9.11 -                                                   vma->vm_page_prot,
    9.12 -                                                   mmapcmd.dom)) < 0 )
    9.13 +                if ( (rc = direct_remap_pfn_range(vma,
    9.14 +                                                  msg[j].va&PAGE_MASK, 
    9.15 +                                                  msg[j].mfn, 
    9.16 +                                                  msg[j].npages<<PAGE_SHIFT, 
    9.17 +                                                  vma->vm_page_prot,
    9.18 +                                                  mmapcmd.dom)) < 0 )
    9.19                      return rc;
    9.20              }
    9.21          }
    10.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Fri Sep 16 18:06:42 2005 +0000
    10.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Fri Sep 16 18:07:50 2005 +0000
    10.3 @@ -460,7 +460,7 @@ void make_pages_writable(void *va, unsig
    10.4  #define kern_addr_valid(addr)	(1)
    10.5  #endif /* !CONFIG_DISCONTIGMEM */
    10.6  
    10.7 -int direct_remap_pfn_range(struct mm_struct *mm,
    10.8 +int direct_remap_pfn_range(struct vm_area_struct *vma,
    10.9                              unsigned long address, 
   10.10                              unsigned long mfn,
   10.11                              unsigned long size, 
   10.12 @@ -474,10 +474,10 @@ int touch_pte_range(struct mm_struct *mm
   10.13                      unsigned long size);
   10.14  
   10.15  #define io_remap_page_range(vma,from,phys,size,prot) \
   10.16 -direct_remap_pfn_range(vma->vm_mm,from,phys>>PAGE_SHIFT,size,prot,DOMID_IO)
   10.17 +direct_remap_pfn_range(vma,from,(phys)>>PAGE_SHIFT,size,prot,DOMID_IO)
   10.18  
   10.19  #define io_remap_pfn_range(vma,from,pfn,size,prot) \
   10.20 -direct_remap_pfn_range(vma->vm_mm,from,pfn,size,prot,DOMID_IO)
   10.21 +direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
   10.22  
   10.23  #define MK_IOSPACE_PFN(space, pfn)	(pfn)
   10.24  #define GET_IOSPACE(pfn)		0
    11.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h	Fri Sep 16 18:06:42 2005 +0000
    11.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h	Fri Sep 16 18:07:50 2005 +0000
    11.3 @@ -526,7 +526,7 @@ extern int kern_addr_valid(unsigned long
    11.4  
    11.5  #define DOMID_LOCAL (0xFFFFU)
    11.6  
    11.7 -int direct_remap_pfn_range(struct mm_struct *mm,
    11.8 +int direct_remap_pfn_range(struct vm_area_struct *vma,
    11.9                              unsigned long address,
   11.10                              unsigned long mfn,
   11.11                              unsigned long size,
   11.12 @@ -542,10 +542,10 @@ int touch_pte_range(struct mm_struct *mm
   11.13                      unsigned long size);
   11.14  
   11.15  #define io_remap_page_range(vma, vaddr, paddr, size, prot)		\
   11.16 -		direct_remap_pfn_range((vma)->vm_mm,vaddr,paddr>>PAGE_SHIFT,size,prot,DOMID_IO)
   11.17 +		direct_remap_pfn_range(vma,vaddr,(paddr)>>PAGE_SHIFT,size,prot,DOMID_IO)
   11.18  
   11.19  #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)		\
   11.20 -		direct_remap_pfn_range((vma)->vm_mm,vaddr,pfn,size,prot,DOMID_IO)
   11.21 +		direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
   11.22  
   11.23  #define MK_IOSPACE_PFN(space, pfn)	(pfn)
   11.24  #define GET_IOSPACE(pfn)		0