ia64/xen-unstable
changeset 6914:ffbc98d735bd
merge?
line diff
1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Fri Sep 16 18:06:42 2005 +0000 1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Fri Sep 16 18:07:50 2005 +0000 1.3 @@ -45,12 +45,12 @@ static int direct_remap_area_pte_fn(pte_ 1.4 return 0; 1.5 } 1.6 1.7 -int direct_remap_pfn_range(struct mm_struct *mm, 1.8 - unsigned long address, 1.9 - unsigned long mfn, 1.10 - unsigned long size, 1.11 - pgprot_t prot, 1.12 - domid_t domid) 1.13 +static int __direct_remap_pfn_range(struct mm_struct *mm, 1.14 + unsigned long address, 1.15 + unsigned long mfn, 1.16 + unsigned long size, 1.17 + pgprot_t prot, 1.18 + domid_t domid) 1.19 { 1.20 int i; 1.21 unsigned long start_address; 1.22 @@ -98,6 +98,20 @@ int direct_remap_pfn_range(struct mm_str 1.23 return 0; 1.24 } 1.25 1.26 +int direct_remap_pfn_range(struct vm_area_struct *vma, 1.27 + unsigned long address, 1.28 + unsigned long mfn, 1.29 + unsigned long size, 1.30 + pgprot_t prot, 1.31 + domid_t domid) 1.32 +{ 1.33 + /* Same as remap_pfn_range(). */ 1.34 + vma->vm_flags |= VM_IO | VM_RESERVED; 1.35 + 1.36 + return __direct_remap_pfn_range( 1.37 + vma->vm_mm, address, mfn, size, prot, domid); 1.38 +} 1.39 + 1.40 EXPORT_SYMBOL(direct_remap_pfn_range); 1.41 1.42 1.43 @@ -221,8 +235,9 @@ void __iomem * __ioremap(unsigned long p 1.44 #ifdef __x86_64__ 1.45 flags |= _PAGE_USER; 1.46 #endif 1.47 - if (direct_remap_pfn_range(&init_mm, (unsigned long) addr, phys_addr>>PAGE_SHIFT, 1.48 - size, __pgprot(flags), domid)) { 1.49 + if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr, 1.50 + phys_addr>>PAGE_SHIFT, 1.51 + size, __pgprot(flags), domid)) { 1.52 vunmap((void __force *) addr); 1.53 return NULL; 1.54 }
2.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/pci/i386.c Fri Sep 16 18:06:42 2005 +0000 2.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/pci/i386.c Fri Sep 16 18:07:50 2005 +0000 2.3 @@ -295,7 +295,7 @@ int pci_mmap_page_range(struct pci_dev * 2.4 /* Write-combine setting is ignored, it is changed via the mtrr 2.5 * interfaces on this platform. 2.6 */ 2.7 - if (direct_remap_pfn_range(vma->vm_mm, vma->vm_start, vma->vm_pgoff, 2.8 + if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 2.9 vma->vm_end - vma->vm_start, 2.10 vma->vm_page_prot, DOMID_IO)) 2.11 return -EAGAIN;
3.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c Fri Sep 16 18:06:42 2005 +0000 3.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/devmem.c Fri Sep 16 18:07:50 2005 +0000 3.3 @@ -90,22 +90,10 @@ out: 3.4 3.5 static int mmap_mem(struct file * file, struct vm_area_struct * vma) 3.6 { 3.7 - int uncached; 3.8 - 3.9 - uncached = uncached_access(file); 3.10 - if (uncached) 3.11 + if (uncached_access(file)) 3.12 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 3.13 3.14 - /* Don't try to swap out physical pages.. */ 3.15 - vma->vm_flags |= VM_RESERVED; 3.16 - 3.17 - /* 3.18 - * Don't dump addresses that are not real memory to a core file. 3.19 - */ 3.20 - if (uncached) 3.21 - vma->vm_flags |= VM_IO; 3.22 - 3.23 - if (direct_remap_pfn_range(vma->vm_mm, vma->vm_start, vma->vm_pgoff, 3.24 + if (direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 3.25 vma->vm_end - vma->vm_start, 3.26 vma->vm_page_prot, DOMID_IO)) 3.27 return -EAGAIN;
4.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Fri Sep 16 18:06:42 2005 +0000 4.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Fri Sep 16 18:07:50 2005 +0000 4.3 @@ -18,17 +18,11 @@ 4.4 #include <asm-xen/xen-public/io/netif.h> 4.5 #include <asm/io.h> 4.6 #include <asm/pgalloc.h> 4.7 - 4.8 -#ifdef CONFIG_XEN_NETDEV_GRANT 4.9 #include <asm-xen/xen-public/grant_table.h> 4.10 #include <asm-xen/gnttab.h> 4.11 4.12 #define GRANT_INVALID_REF (0xFFFF) 4.13 4.14 -#endif 4.15 - 4.16 - 4.17 - 4.18 #if 0 4.19 #define ASSERT(_p) \ 4.20 if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \ 4.21 @@ -44,74 +38,73 @@ 4.22 #define WPRINTK(fmt, args...) \ 4.23 printk(KERN_WARNING "xen_net: " fmt, ##args) 4.24 4.25 +typedef struct netif_st { 4.26 + /* Unique identifier for this interface. */ 4.27 + domid_t domid; 4.28 + unsigned int handle; 4.29 4.30 -typedef struct netif_st { 4.31 - /* Unique identifier for this interface. */ 4.32 - domid_t domid; 4.33 - unsigned int handle; 4.34 + u8 fe_dev_addr[6]; 4.35 4.36 - u8 fe_dev_addr[6]; 4.37 - 4.38 - /* Physical parameters of the comms window. */ 4.39 - unsigned long tx_shmem_frame; 4.40 + /* Physical parameters of the comms window. */ 4.41 + unsigned long tx_shmem_frame; 4.42 #ifdef CONFIG_XEN_NETDEV_GRANT 4.43 - u16 tx_shmem_handle; 4.44 - unsigned long tx_shmem_vaddr; 4.45 - grant_ref_t tx_shmem_ref; 4.46 + u16 tx_shmem_handle; 4.47 + unsigned long tx_shmem_vaddr; 4.48 + grant_ref_t tx_shmem_ref; 4.49 #endif 4.50 - unsigned long rx_shmem_frame; 4.51 + unsigned long rx_shmem_frame; 4.52 #ifdef CONFIG_XEN_NETDEV_GRANT 4.53 - u16 rx_shmem_handle; 4.54 - unsigned long rx_shmem_vaddr; 4.55 - grant_ref_t rx_shmem_ref; 4.56 + u16 rx_shmem_handle; 4.57 + unsigned long rx_shmem_vaddr; 4.58 + grant_ref_t rx_shmem_ref; 4.59 #endif 4.60 - unsigned int evtchn; 4.61 - unsigned int remote_evtchn; 4.62 + unsigned int evtchn; 4.63 + unsigned int remote_evtchn; 4.64 4.65 - /* The shared rings and indexes. */ 4.66 - netif_tx_interface_t *tx; 4.67 - netif_rx_interface_t *rx; 4.68 + /* The shared rings and indexes. */ 4.69 + netif_tx_interface_t *tx; 4.70 + netif_rx_interface_t *rx; 4.71 4.72 - /* Private indexes into shared ring. */ 4.73 - NETIF_RING_IDX rx_req_cons; 4.74 - NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */ 4.75 + /* Private indexes into shared ring. */ 4.76 + NETIF_RING_IDX rx_req_cons; 4.77 + NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */ 4.78 #ifdef CONFIG_XEN_NETDEV_GRANT 4.79 - NETIF_RING_IDX rx_resp_prod_copy; /* private version of shared variable */ 4.80 + NETIF_RING_IDX rx_resp_prod_copy; 4.81 #endif 4.82 - NETIF_RING_IDX tx_req_cons; 4.83 - NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */ 4.84 + NETIF_RING_IDX tx_req_cons; 4.85 + NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */ 4.86 4.87 - /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ 4.88 - unsigned long credit_bytes; 4.89 - unsigned long credit_usec; 4.90 - unsigned long remaining_credit; 4.91 - struct timer_list credit_timeout; 4.92 + /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ 4.93 + unsigned long credit_bytes; 4.94 + unsigned long credit_usec; 4.95 + unsigned long remaining_credit; 4.96 + struct timer_list credit_timeout; 4.97 4.98 - /* Miscellaneous private stuff. */ 4.99 - enum { DISCONNECTED, DISCONNECTING, CONNECTED } status; 4.100 - int active; 4.101 - struct list_head list; /* scheduling list */ 4.102 - atomic_t refcnt; 4.103 - struct net_device *dev; 4.104 - struct net_device_stats stats; 4.105 + /* Miscellaneous private stuff. */ 4.106 + enum { DISCONNECTED, DISCONNECTING, CONNECTED } status; 4.107 + int active; 4.108 + struct list_head list; /* scheduling list */ 4.109 + atomic_t refcnt; 4.110 + struct net_device *dev; 4.111 + struct net_device_stats stats; 4.112 4.113 - struct work_struct free_work; 4.114 + struct work_struct free_work; 4.115 } netif_t; 4.116 4.117 void netif_creditlimit(netif_t *netif); 4.118 int netif_disconnect(netif_t *netif); 4.119 4.120 netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]); 4.121 -void free_netif_callback(netif_t *netif); 4.122 +void free_netif(netif_t *netif); 4.123 int netif_map(netif_t *netif, unsigned long tx_ring_ref, 4.124 unsigned long rx_ring_ref, unsigned int evtchn); 4.125 4.126 #define netif_get(_b) (atomic_inc(&(_b)->refcnt)) 4.127 -#define netif_put(_b) \ 4.128 - do { \ 4.129 - if ( atomic_dec_and_test(&(_b)->refcnt) ) \ 4.130 - free_netif_callback(_b); \ 4.131 - } while (0) 4.132 +#define netif_put(_b) \ 4.133 + do { \ 4.134 + if ( atomic_dec_and_test(&(_b)->refcnt) ) \ 4.135 + free_netif(_b); \ 4.136 + } while (0) 4.137 4.138 void netif_xenbus_init(void); 4.139 4.140 @@ -123,3 +116,13 @@ struct net_device_stats *netif_be_get_st 4.141 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs); 4.142 4.143 #endif /* __NETIF__BACKEND__COMMON_H__ */ 4.144 + 4.145 +/* 4.146 + * Local variables: 4.147 + * c-file-style: "linux" 4.148 + * indent-tabs-mode: t 4.149 + * c-indent-level: 8 4.150 + * c-basic-offset: 8 4.151 + * tab-width: 8 4.152 + * End: 4.153 + */
5.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Fri Sep 16 18:06:42 2005 +0000 5.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c Fri Sep 16 18:07:50 2005 +0000 5.3 @@ -11,104 +11,105 @@ 5.4 5.5 static void __netif_up(netif_t *netif) 5.6 { 5.7 - struct net_device *dev = netif->dev; 5.8 - spin_lock_bh(&dev->xmit_lock); 5.9 - netif->active = 1; 5.10 - spin_unlock_bh(&dev->xmit_lock); 5.11 - (void)bind_evtchn_to_irqhandler( 5.12 - netif->evtchn, netif_be_int, 0, dev->name, netif); 5.13 - netif_schedule_work(netif); 5.14 + struct net_device *dev = netif->dev; 5.15 + spin_lock_bh(&dev->xmit_lock); 5.16 + netif->active = 1; 5.17 + spin_unlock_bh(&dev->xmit_lock); 5.18 + (void)bind_evtchn_to_irqhandler( 5.19 + netif->evtchn, netif_be_int, 0, dev->name, netif); 5.20 + netif_schedule_work(netif); 5.21 } 5.22 5.23 static void __netif_down(netif_t *netif) 5.24 { 5.25 - struct net_device *dev = netif->dev; 5.26 - spin_lock_bh(&dev->xmit_lock); 5.27 - netif->active = 0; 5.28 - spin_unlock_bh(&dev->xmit_lock); 5.29 - unbind_evtchn_from_irqhandler(netif->evtchn, netif); 5.30 - netif_deschedule_work(netif); 5.31 + struct net_device *dev = netif->dev; 5.32 + spin_lock_bh(&dev->xmit_lock); 5.33 + netif->active = 0; 5.34 + spin_unlock_bh(&dev->xmit_lock); 5.35 + unbind_evtchn_from_irqhandler(netif->evtchn, netif); 5.36 + netif_deschedule_work(netif); 5.37 } 5.38 5.39 static int net_open(struct net_device *dev) 5.40 { 5.41 - netif_t *netif = netdev_priv(dev); 5.42 - if (netif->status == CONNECTED) 5.43 - __netif_up(netif); 5.44 - netif_start_queue(dev); 5.45 - return 0; 5.46 + netif_t *netif = netdev_priv(dev); 5.47 + if (netif->status == CONNECTED) 5.48 + __netif_up(netif); 5.49 + netif_start_queue(dev); 5.50 + return 0; 5.51 } 5.52 5.53 static int net_close(struct net_device *dev) 5.54 { 5.55 - netif_t *netif = netdev_priv(dev); 5.56 - netif_stop_queue(dev); 5.57 - if (netif->status == CONNECTED) 5.58 - __netif_down(netif); 5.59 - return 0; 5.60 + netif_t *netif = netdev_priv(dev); 5.61 + netif_stop_queue(dev); 5.62 + if (netif->status == CONNECTED) 5.63 + __netif_down(netif); 5.64 + return 0; 5.65 } 5.66 5.67 netif_t *alloc_netif(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN]) 5.68 { 5.69 - int err = 0, i; 5.70 - struct net_device *dev; 5.71 - netif_t *netif; 5.72 - char name[IFNAMSIZ] = {}; 5.73 + int err = 0, i; 5.74 + struct net_device *dev; 5.75 + netif_t *netif; 5.76 + char name[IFNAMSIZ] = {}; 5.77 5.78 - snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 5.79 - dev = alloc_netdev(sizeof(netif_t), name, ether_setup); 5.80 - if (dev == NULL) { 5.81 - DPRINTK("Could not create netif: out of memory\n"); 5.82 - return NULL; 5.83 - } 5.84 + snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 5.85 + dev = alloc_netdev(sizeof(netif_t), name, ether_setup); 5.86 + if (dev == NULL) { 5.87 + DPRINTK("Could not create netif: out of memory\n"); 5.88 + return NULL; 5.89 + } 5.90 5.91 - netif = netdev_priv(dev); 5.92 - memset(netif, 0, sizeof(*netif)); 5.93 - netif->domid = domid; 5.94 - netif->handle = handle; 5.95 - netif->status = DISCONNECTED; 5.96 - atomic_set(&netif->refcnt, 0); 5.97 - netif->dev = dev; 5.98 + netif = netdev_priv(dev); 5.99 + memset(netif, 0, sizeof(*netif)); 5.100 + netif->domid = domid; 5.101 + netif->handle = handle; 5.102 + netif->status = DISCONNECTED; 5.103 + atomic_set(&netif->refcnt, 0); 5.104 + netif->dev = dev; 5.105 5.106 - netif->credit_bytes = netif->remaining_credit = ~0UL; 5.107 - netif->credit_usec = 0UL; 5.108 - init_timer(&netif->credit_timeout); 5.109 + netif->credit_bytes = netif->remaining_credit = ~0UL; 5.110 + netif->credit_usec = 0UL; 5.111 + init_timer(&netif->credit_timeout); 5.112 5.113 - dev->hard_start_xmit = netif_be_start_xmit; 5.114 - dev->get_stats = netif_be_get_stats; 5.115 - dev->open = net_open; 5.116 - dev->stop = net_close; 5.117 - dev->features = NETIF_F_NO_CSUM; 5.118 + dev->hard_start_xmit = netif_be_start_xmit; 5.119 + dev->get_stats = netif_be_get_stats; 5.120 + dev->open = net_open; 5.121 + dev->stop = net_close; 5.122 + dev->features = NETIF_F_NO_CSUM; 5.123 5.124 - /* Disable queuing. */ 5.125 - dev->tx_queue_len = 0; 5.126 + /* Disable queuing. */ 5.127 + dev->tx_queue_len = 0; 5.128 5.129 - for (i = 0; i < ETH_ALEN; i++) 5.130 - if (be_mac[i] != 0) 5.131 - break; 5.132 - if (i == ETH_ALEN) { 5.133 - /* 5.134 - * Initialise a dummy MAC address. We choose the numerically largest 5.135 - * non-broadcast address to prevent the address getting stolen by an 5.136 - * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF) 5.137 - */ 5.138 - memset(dev->dev_addr, 0xFF, ETH_ALEN); 5.139 - dev->dev_addr[0] &= ~0x01; 5.140 - } else 5.141 - memcpy(dev->dev_addr, be_mac, ETH_ALEN); 5.142 + for (i = 0; i < ETH_ALEN; i++) 5.143 + if (be_mac[i] != 0) 5.144 + break; 5.145 + if (i == ETH_ALEN) { 5.146 + /* 5.147 + * Initialise a dummy MAC address. We choose the numerically 5.148 + * largest non-broadcast address to prevent the address getting 5.149 + * stolen by an Ethernet bridge for STP purposes. 5.150 + * (FE:FF:FF:FF:FF:FF) 5.151 + */ 5.152 + memset(dev->dev_addr, 0xFF, ETH_ALEN); 5.153 + dev->dev_addr[0] &= ~0x01; 5.154 + } else 5.155 + memcpy(dev->dev_addr, be_mac, ETH_ALEN); 5.156 5.157 - rtnl_lock(); 5.158 - err = register_netdevice(dev); 5.159 - rtnl_unlock(); 5.160 - if (err) { 5.161 - DPRINTK("Could not register new net device %s: err=%d\n", 5.162 - dev->name, err); 5.163 - free_netdev(dev); 5.164 - return NULL; 5.165 - } 5.166 + rtnl_lock(); 5.167 + err = register_netdevice(dev); 5.168 + rtnl_unlock(); 5.169 + if (err) { 5.170 + DPRINTK("Could not register new net device %s: err=%d\n", 5.171 + dev->name, err); 5.172 + free_netdev(dev); 5.173 + return NULL; 5.174 + } 5.175 5.176 - DPRINTK("Successfully created netif\n"); 5.177 - return netif; 5.178 + DPRINTK("Successfully created netif\n"); 5.179 + return netif; 5.180 } 5.181 5.182 static int map_frontend_pages(netif_t *netif, unsigned long localaddr, 5.183 @@ -116,191 +117,204 @@ static int map_frontend_pages(netif_t *n 5.184 unsigned long rx_ring_ref) 5.185 { 5.186 #ifdef CONFIG_XEN_NETDEV_GRANT 5.187 - struct gnttab_map_grant_ref op; 5.188 + struct gnttab_map_grant_ref op; 5.189 5.190 - /* Map: Use the Grant table reference */ 5.191 - op.host_addr = localaddr; 5.192 - op.flags = GNTMAP_host_map; 5.193 - op.ref = tx_ring_ref; 5.194 - op.dom = netif->domid; 5.195 + /* Map: Use the Grant table reference */ 5.196 + op.host_addr = localaddr; 5.197 + op.flags = GNTMAP_host_map; 5.198 + op.ref = tx_ring_ref; 5.199 + op.dom = netif->domid; 5.200 5.201 - BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) ); 5.202 - if (op.handle < 0) { 5.203 - DPRINTK(" Grant table operation failure mapping tx_ring_ref!\n"); 5.204 - return op.handle; 5.205 - } 5.206 + BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) ); 5.207 + if (op.handle < 0) { 5.208 + DPRINTK(" Gnttab failure mapping tx_ring_ref!\n"); 5.209 + return op.handle; 5.210 + } 5.211 5.212 - netif->tx_shmem_ref = tx_ring_ref; 5.213 - netif->tx_shmem_handle = op.handle; 5.214 - netif->tx_shmem_vaddr = localaddr; 5.215 + netif->tx_shmem_ref = tx_ring_ref; 5.216 + netif->tx_shmem_handle = op.handle; 5.217 + netif->tx_shmem_vaddr = localaddr; 5.218 5.219 - /* Map: Use the Grant table reference */ 5.220 - op.host_addr = localaddr + PAGE_SIZE; 5.221 - op.flags = GNTMAP_host_map; 5.222 - op.ref = rx_ring_ref; 5.223 - op.dom = netif->domid; 5.224 + /* Map: Use the Grant table reference */ 5.225 + op.host_addr = localaddr + PAGE_SIZE; 5.226 + op.flags = GNTMAP_host_map; 5.227 + op.ref = rx_ring_ref; 5.228 + op.dom = netif->domid; 5.229 5.230 - BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) ); 5.231 - if (op.handle < 0) { 5.232 - DPRINTK(" Grant table operation failure mapping rx_ring_ref!\n"); 5.233 - return op.handle; 5.234 - } 5.235 + BUG_ON( HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1) ); 5.236 + if (op.handle < 0) { 5.237 + DPRINTK(" Gnttab failure mapping rx_ring_ref!\n"); 5.238 + return op.handle; 5.239 + } 5.240 5.241 - netif->rx_shmem_ref = rx_ring_ref; 5.242 - netif->rx_shmem_handle = op.handle; 5.243 - netif->rx_shmem_vaddr = localaddr + PAGE_SIZE; 5.244 + netif->rx_shmem_ref = rx_ring_ref; 5.245 + netif->rx_shmem_handle = op.handle; 5.246 + netif->rx_shmem_vaddr = localaddr + PAGE_SIZE; 5.247 5.248 #else 5.249 - pgprot_t prot = __pgprot(_KERNPG_TABLE); 5.250 - int err; 5.251 + pgprot_t prot = __pgprot(_KERNPG_TABLE); 5.252 + int err; 5.253 5.254 - err = direct_remap_pfn_range(&init_mm, localaddr, 5.255 - tx_ring_ref, PAGE_SIZE, 5.256 - prot, netif->domid); 5.257 + err = direct_remap_pfn_range( 5.258 + &init_mm, localaddr, 5.259 + tx_ring_ref, PAGE_SIZE, 5.260 + prot, netif->domid); 5.261 5.262 - err |= direct_remap_pfn_range(&init_mm, localaddr + PAGE_SIZE, 5.263 - rx_ring_ref, PAGE_SIZE, 5.264 - prot, netif->domid); 5.265 + err |= direct_remap_pfn_range( 5.266 + &init_mm, localaddr + PAGE_SIZE, 5.267 + rx_ring_ref, PAGE_SIZE, 5.268 + prot, netif->domid); 5.269 5.270 - if (err) 5.271 - return err; 5.272 + if (err) 5.273 + return err; 5.274 #endif 5.275 5.276 - return 0; 5.277 + return 0; 5.278 } 5.279 5.280 static void unmap_frontend_pages(netif_t *netif) 5.281 { 5.282 #ifdef CONFIG_XEN_NETDEV_GRANT 5.283 - struct gnttab_unmap_grant_ref op; 5.284 + struct gnttab_unmap_grant_ref op; 5.285 5.286 - op.host_addr = netif->tx_shmem_vaddr; 5.287 - op.handle = netif->tx_shmem_handle; 5.288 - op.dev_bus_addr = 0; 5.289 - BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 5.290 + op.host_addr = netif->tx_shmem_vaddr; 5.291 + op.handle = netif->tx_shmem_handle; 5.292 + op.dev_bus_addr = 0; 5.293 + BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 5.294 5.295 - op.host_addr = netif->rx_shmem_vaddr; 5.296 - op.handle = netif->rx_shmem_handle; 5.297 - op.dev_bus_addr = 0; 5.298 - BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 5.299 + op.host_addr = netif->rx_shmem_vaddr; 5.300 + op.handle = netif->rx_shmem_handle; 5.301 + op.dev_bus_addr = 0; 5.302 + BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)); 5.303 #endif 5.304 5.305 - return; 5.306 + return; 5.307 } 5.308 5.309 int netif_map(netif_t *netif, unsigned long tx_ring_ref, 5.310 unsigned long rx_ring_ref, unsigned int evtchn) 5.311 { 5.312 - struct vm_struct *vma; 5.313 - evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain }; 5.314 - int err; 5.315 + struct vm_struct *vma; 5.316 + evtchn_op_t op = { .cmd = EVTCHNOP_bind_interdomain }; 5.317 + int err; 5.318 5.319 - vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP); 5.320 - if (vma == NULL) 5.321 - return -ENOMEM; 5.322 + vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP); 5.323 + if (vma == NULL) 5.324 + return -ENOMEM; 5.325 5.326 - err = map_frontend_pages(netif, (unsigned long)vma->addr, tx_ring_ref, 5.327 - rx_ring_ref); 5.328 - if (err) { 5.329 - vfree(vma->addr); 5.330 - return err; 5.331 - } 5.332 + err = map_frontend_pages( 5.333 + netif, (unsigned long)vma->addr, tx_ring_ref, rx_ring_ref); 5.334 + if (err) { 5.335 + vfree(vma->addr); 5.336 + return err; 5.337 + } 5.338 5.339 - op.u.bind_interdomain.dom1 = DOMID_SELF; 5.340 - op.u.bind_interdomain.dom2 = netif->domid; 5.341 - op.u.bind_interdomain.port1 = 0; 5.342 - op.u.bind_interdomain.port2 = evtchn; 5.343 - err = HYPERVISOR_event_channel_op(&op); 5.344 - if (err) { 5.345 - unmap_frontend_pages(netif); 5.346 - vfree(vma->addr); 5.347 - return err; 5.348 - } 5.349 + op.u.bind_interdomain.dom1 = DOMID_SELF; 5.350 + op.u.bind_interdomain.dom2 = netif->domid; 5.351 + op.u.bind_interdomain.port1 = 0; 5.352 + op.u.bind_interdomain.port2 = evtchn; 5.353 + err = HYPERVISOR_event_channel_op(&op); 5.354 + if (err) { 5.355 + unmap_frontend_pages(netif); 5.356 + vfree(vma->addr); 5.357 + return err; 5.358 + } 5.359 5.360 - netif->evtchn = op.u.bind_interdomain.port1; 5.361 - netif->remote_evtchn = evtchn; 5.362 + netif->evtchn = op.u.bind_interdomain.port1; 5.363 + netif->remote_evtchn = evtchn; 5.364 5.365 - netif->tx = (netif_tx_interface_t *)vma->addr; 5.366 - netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE); 5.367 - netif->tx->resp_prod = netif->rx->resp_prod = 0; 5.368 - netif_get(netif); 5.369 - wmb(); /* Other CPUs see new state before interface is started. */ 5.370 + netif->tx = (netif_tx_interface_t *)vma->addr; 5.371 + netif->rx = (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE); 5.372 + netif->tx->resp_prod = netif->rx->resp_prod = 0; 5.373 + netif_get(netif); 5.374 + wmb(); /* Other CPUs see new state before interface is started. */ 5.375 5.376 - rtnl_lock(); 5.377 - netif->status = CONNECTED; 5.378 - wmb(); 5.379 - if (netif_running(netif->dev)) 5.380 - __netif_up(netif); 5.381 - rtnl_unlock(); 5.382 + rtnl_lock(); 5.383 + netif->status = CONNECTED; 5.384 + wmb(); 5.385 + if (netif_running(netif->dev)) 5.386 + __netif_up(netif); 5.387 + rtnl_unlock(); 5.388 5.389 - return 0; 5.390 + return 0; 5.391 } 5.392 5.393 -static void free_netif(void *arg) 5.394 +static void free_netif_callback(void *arg) 5.395 { 5.396 - evtchn_op_t op = { .cmd = EVTCHNOP_close }; 5.397 - netif_t *netif = (netif_t *)arg; 5.398 + evtchn_op_t op = { .cmd = EVTCHNOP_close }; 5.399 + netif_t *netif = (netif_t *)arg; 5.400 5.401 - /* 5.402 - * These can't be done in netif_disconnect() because at that point there 5.403 - * may be outstanding requests in the network stack whose asynchronous 5.404 - * responses must still be notified to the remote driver. 5.405 - */ 5.406 + /* 5.407 + * These can't be done in netif_disconnect() because at that point 5.408 + * there may be outstanding requests in the network stack whose 5.409 + * asynchronous responses must still be notified to the remote driver. 5.410 + */ 5.411 5.412 - op.u.close.port = netif->evtchn; 5.413 - op.u.close.dom = DOMID_SELF; 5.414 - HYPERVISOR_event_channel_op(&op); 5.415 - op.u.close.port = netif->remote_evtchn; 5.416 - op.u.close.dom = netif->domid; 5.417 - HYPERVISOR_event_channel_op(&op); 5.418 + op.u.close.port = netif->evtchn; 5.419 + op.u.close.dom = DOMID_SELF; 5.420 + HYPERVISOR_event_channel_op(&op); 5.421 + op.u.close.port = netif->remote_evtchn; 5.422 + op.u.close.dom = netif->domid; 5.423 + HYPERVISOR_event_channel_op(&op); 5.424 5.425 - unregister_netdev(netif->dev); 5.426 + unregister_netdev(netif->dev); 5.427 5.428 - if (netif->tx) { 5.429 - unmap_frontend_pages(netif); 5.430 - vfree(netif->tx); /* Frees netif->rx as well. */ 5.431 - } 5.432 + if (netif->tx) { 5.433 + unmap_frontend_pages(netif); 5.434 + vfree(netif->tx); /* Frees netif->rx as well. */ 5.435 + } 5.436 5.437 - free_netdev(netif->dev); 5.438 + free_netdev(netif->dev); 5.439 } 5.440 5.441 -void free_netif_callback(netif_t *netif) 5.442 +void free_netif(netif_t *netif) 5.443 { 5.444 - INIT_WORK(&netif->free_work, free_netif, (void *)netif); 5.445 - schedule_work(&netif->free_work); 5.446 + INIT_WORK(&netif->free_work, free_netif_callback, (void *)netif); 5.447 + schedule_work(&netif->free_work); 5.448 } 5.449 5.450 void netif_creditlimit(netif_t *netif) 5.451 { 5.452 #if 0 5.453 - /* Set the credit limit (reset remaining credit to new limit). */ 5.454 - netif->credit_bytes = netif->remaining_credit = creditlimit->credit_bytes; 5.455 - netif->credit_usec = creditlimit->period_usec; 5.456 + /* Set the credit limit (reset remaining credit to new limit). */ 5.457 + netif->credit_bytes = creditlimit->credit_bytes; 5.458 + netif->remaining_credit = creditlimit->credit_bytes; 5.459 + netif->credit_usec = creditlimit->period_usec; 5.460 5.461 - if (netif->status == CONNECTED) { 5.462 - /* 5.463 - * Schedule work so that any packets waiting under previous credit 5.464 - * limit are dealt with (acts like a replenishment point). 5.465 - */ 5.466 - netif->credit_timeout.expires = jiffies; 5.467 - netif_schedule_work(netif); 5.468 - } 5.469 + if (netif->status == CONNECTED) { 5.470 + /* 5.471 + * Schedule work so that any packets waiting under previous 5.472 + * credit limit are dealt with (acts as a replenishment point). 5.473 + */ 5.474 + netif->credit_timeout.expires = jiffies; 5.475 + netif_schedule_work(netif); 5.476 + } 5.477 #endif 5.478 } 5.479 5.480 int netif_disconnect(netif_t *netif) 5.481 { 5.482 5.483 - if (netif->status == CONNECTED) { 5.484 - rtnl_lock(); 5.485 - netif->status = DISCONNECTING; 5.486 - wmb(); 5.487 - if (netif_running(netif->dev)) 5.488 - __netif_down(netif); 5.489 - rtnl_unlock(); 5.490 - netif_put(netif); 5.491 - return 0; /* Caller should not send response message. */ 5.492 - } 5.493 + if (netif->status == CONNECTED) { 5.494 + rtnl_lock(); 5.495 + netif->status = DISCONNECTING; 5.496 + wmb(); 5.497 + if (netif_running(netif->dev)) 5.498 + __netif_down(netif); 5.499 + rtnl_unlock(); 5.500 + netif_put(netif); 5.501 + return 0; /* Caller should not send response message. */ 5.502 + } 5.503 5.504 - return 1; 5.505 + return 1; 5.506 } 5.507 + 5.508 +/* 5.509 + * Local variables: 5.510 + * c-file-style: "linux" 5.511 + * indent-tabs-mode: t 5.512 + * c-indent-level: 8 5.513 + * c-basic-offset: 8 5.514 + * tab-width: 8 5.515 + * End: 5.516 + */
6.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Fri Sep 16 18:06:42 2005 +0000 6.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Fri Sep 16 18:07:50 2005 +0000 6.3 @@ -57,8 +57,8 @@ static unsigned long mmap_vstart; 6.4 #define PKT_PROT_LEN 64 6.5 6.6 static struct { 6.7 - netif_tx_request_t req; 6.8 - netif_t *netif; 6.9 + netif_tx_request_t req; 6.10 + netif_t *netif; 6.11 } pending_tx_info[MAX_PENDING_REQS]; 6.12 static u16 pending_ring[MAX_PENDING_REQS]; 6.13 typedef unsigned int PEND_RING_IDX; 6.14 @@ -91,49 +91,49 @@ static spinlock_t mfn_lock = SPIN_LOCK_U 6.15 6.16 static unsigned long alloc_mfn(void) 6.17 { 6.18 - unsigned long mfn = 0, flags; 6.19 - struct xen_memory_reservation reservation = { 6.20 - .extent_start = mfn_list, 6.21 - .nr_extents = MAX_MFN_ALLOC, 6.22 - .extent_order = 0, 6.23 - .domid = DOMID_SELF 6.24 - }; 6.25 - spin_lock_irqsave(&mfn_lock, flags); 6.26 - if ( unlikely(alloc_index == 0) ) 6.27 - alloc_index = HYPERVISOR_memory_op( 6.28 - XENMEM_increase_reservation, &reservation); 6.29 - if ( alloc_index != 0 ) 6.30 - mfn = mfn_list[--alloc_index]; 6.31 - spin_unlock_irqrestore(&mfn_lock, flags); 6.32 - return mfn; 6.33 + unsigned long mfn = 0, flags; 6.34 + struct xen_memory_reservation reservation = { 6.35 + .extent_start = mfn_list, 6.36 + .nr_extents = MAX_MFN_ALLOC, 6.37 + .extent_order = 0, 6.38 + .domid = DOMID_SELF 6.39 + }; 6.40 + spin_lock_irqsave(&mfn_lock, flags); 6.41 + if ( unlikely(alloc_index == 0) ) 6.42 + alloc_index = HYPERVISOR_memory_op( 6.43 + XENMEM_increase_reservation, &reservation); 6.44 + if ( alloc_index != 0 ) 6.45 + mfn = mfn_list[--alloc_index]; 6.46 + spin_unlock_irqrestore(&mfn_lock, flags); 6.47 + return mfn; 6.48 } 6.49 6.50 #ifndef CONFIG_XEN_NETDEV_GRANT 6.51 static void free_mfn(unsigned long mfn) 6.52 { 6.53 - unsigned long flags; 6.54 - struct xen_memory_reservation reservation = { 6.55 - .extent_start = &mfn, 6.56 - .nr_extents = 1, 6.57 - .extent_order = 0, 6.58 - .domid = DOMID_SELF 6.59 - }; 6.60 - spin_lock_irqsave(&mfn_lock, flags); 6.61 - if ( alloc_index != MAX_MFN_ALLOC ) 6.62 - mfn_list[alloc_index++] = mfn; 6.63 - else if ( HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) 6.64 - != 1 ) 6.65 - BUG(); 6.66 - spin_unlock_irqrestore(&mfn_lock, flags); 6.67 + unsigned long flags; 6.68 + struct xen_memory_reservation reservation = { 6.69 + .extent_start = &mfn, 6.70 + .nr_extents = 1, 6.71 + .extent_order = 0, 6.72 + .domid = DOMID_SELF 6.73 + }; 6.74 + spin_lock_irqsave(&mfn_lock, flags); 6.75 + if ( alloc_index != MAX_MFN_ALLOC ) 6.76 + mfn_list[alloc_index++] = mfn; 6.77 + else 6.78 + BUG_ON(HYPERVISOR_memory_op(XENMEM_decrease_reservation, 6.79 + &reservation) != 1); 6.80 + spin_unlock_irqrestore(&mfn_lock, flags); 6.81 } 6.82 #endif 6.83 6.84 static inline void maybe_schedule_tx_action(void) 6.85 { 6.86 - smp_mb(); 6.87 - if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && 6.88 - !list_empty(&net_schedule_list) ) 6.89 - tasklet_schedule(&net_tx_tasklet); 6.90 + smp_mb(); 6.91 + if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) && 6.92 + !list_empty(&net_schedule_list)) 6.93 + tasklet_schedule(&net_tx_tasklet); 6.94 } 6.95 6.96 /* 6.97 @@ -142,77 +142,77 @@ static inline void maybe_schedule_tx_act 6.98 */ 6.99 static inline int is_xen_skb(struct sk_buff *skb) 6.100 { 6.101 - extern kmem_cache_t *skbuff_cachep; 6.102 - kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next; 6.103 - return (cp == skbuff_cachep); 6.104 + extern kmem_cache_t *skbuff_cachep; 6.105 + kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next; 6.106 + return (cp == skbuff_cachep); 6.107 } 6.108 6.109 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev) 6.110 { 6.111 - netif_t *netif = netdev_priv(dev); 6.112 + netif_t *netif = netdev_priv(dev); 6.113 6.114 - ASSERT(skb->dev == dev); 6.115 + ASSERT(skb->dev == dev); 6.116 6.117 - /* Drop the packet if the target domain has no receive buffers. */ 6.118 - if ( !netif->active || 6.119 - (netif->rx_req_cons == netif->rx->req_prod) || 6.120 - ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) ) 6.121 - goto drop; 6.122 + /* Drop the packet if the target domain has no receive buffers. */ 6.123 + if (!netif->active || 6.124 + (netif->rx_req_cons == netif->rx->req_prod) || 6.125 + ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE)) 6.126 + goto drop; 6.127 6.128 - /* 6.129 - * We do not copy the packet unless: 6.130 - * 1. The data is shared; or 6.131 - * 2. The data is not allocated from our special cache. 6.132 - * NB. We also couldn't cope with fragmented packets, but we won't get 6.133 - * any because we not advertise the NETIF_F_SG feature. 6.134 - */ 6.135 - if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) ) 6.136 - { 6.137 - int hlen = skb->data - skb->head; 6.138 - struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len); 6.139 - if ( unlikely(nskb == NULL) ) 6.140 - goto drop; 6.141 - skb_reserve(nskb, hlen); 6.142 - __skb_put(nskb, skb->len); 6.143 - if (skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen)) 6.144 - BUG(); 6.145 - nskb->dev = skb->dev; 6.146 - nskb->proto_csum_valid = skb->proto_csum_valid; 6.147 - dev_kfree_skb(skb); 6.148 - skb = nskb; 6.149 - } 6.150 + /* 6.151 + * We do not copy the packet unless: 6.152 + * 1. The data is shared; or 6.153 + * 2. The data is not allocated from our special cache. 6.154 + * NB. We also couldn't cope with fragmented packets, but we won't get 6.155 + * any because we not advertise the NETIF_F_SG feature. 6.156 + */ 6.157 + if (skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb)) { 6.158 + int hlen = skb->data - skb->head; 6.159 + struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len); 6.160 + if ( unlikely(nskb == NULL) ) 6.161 + goto drop; 6.162 + skb_reserve(nskb, hlen); 6.163 + __skb_put(nskb, skb->len); 6.164 + BUG_ON(skb_copy_bits(skb, -hlen, nskb->data - hlen, 6.165 + skb->len + hlen)); 6.166 + nskb->dev = skb->dev; 6.167 + nskb->proto_csum_valid = skb->proto_csum_valid; 6.168 + dev_kfree_skb(skb); 6.169 + skb = nskb; 6.170 + } 6.171 #ifdef CONFIG_XEN_NETDEV_GRANT 6.172 #ifdef DEBUG_GRANT 6.173 - printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d id=%04x gr=%04x\n", 6.174 - netif->rx->req_prod, 6.175 - netif->rx_req_cons, 6.176 - netif->rx->ring[ 6.177 - MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id, 6.178 - netif->rx->ring[ 6.179 - MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref); 6.180 + printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d " 6.181 + "id=%04x gr=%04x\n", 6.182 + netif->rx->req_prod, 6.183 + netif->rx_req_cons, 6.184 + netif->rx->ring[ 6.185 + MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id, 6.186 + netif->rx->ring[ 6.187 + MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref); 6.188 #endif 6.189 #endif 6.190 - netif->rx_req_cons++; 6.191 - netif_get(netif); 6.192 + netif->rx_req_cons++; 6.193 + netif_get(netif); 6.194 6.195 - skb_queue_tail(&rx_queue, skb); 6.196 - tasklet_schedule(&net_rx_tasklet); 6.197 + skb_queue_tail(&rx_queue, skb); 6.198 + tasklet_schedule(&net_rx_tasklet); 6.199 6.200 - return 0; 6.201 + return 0; 6.202 6.203 drop: 6.204 - netif->stats.tx_dropped++; 6.205 - dev_kfree_skb(skb); 6.206 - return 0; 6.207 + netif->stats.tx_dropped++; 6.208 + dev_kfree_skb(skb); 6.209 + return 0; 6.210 } 6.211 6.212 #if 0 6.213 static void xen_network_done_notify(void) 6.214 { 6.215 - static struct net_device *eth0_dev = NULL; 6.216 - if ( unlikely(eth0_dev == NULL) ) 6.217 - eth0_dev = __dev_get_by_name("eth0"); 6.218 - netif_rx_schedule(eth0_dev); 6.219 + static struct net_device *eth0_dev = NULL; 6.220 + if (unlikely(eth0_dev == NULL)) 6.221 + eth0_dev = __dev_get_by_name("eth0"); 6.222 + netif_rx_schedule(eth0_dev); 6.223 } 6.224 /* 6.225 * Add following to poll() function in NAPI driver (Tigon3 is example): 6.226 @@ -221,658 +221,654 @@ static void xen_network_done_notify(void 6.227 */ 6.228 int xen_network_done(void) 6.229 { 6.230 - return skb_queue_empty(&rx_queue); 6.231 + return skb_queue_empty(&rx_queue); 6.232 } 6.233 #endif 6.234 6.235 static void net_rx_action(unsigned long unused) 6.236 { 6.237 - netif_t *netif = NULL; 6.238 - s8 status; 6.239 - u16 size, id, evtchn; 6.240 - multicall_entry_t *mcl; 6.241 - mmu_update_t *mmu; 6.242 + netif_t *netif = NULL; 6.243 + s8 status; 6.244 + u16 size, id, evtchn; 6.245 + multicall_entry_t *mcl; 6.246 + mmu_update_t *mmu; 6.247 #ifdef CONFIG_XEN_NETDEV_GRANT 6.248 - gnttab_transfer_t *gop; 6.249 + gnttab_transfer_t *gop; 6.250 #else 6.251 - struct mmuext_op *mmuext; 6.252 + struct mmuext_op *mmuext; 6.253 #endif 6.254 - unsigned long vdata, old_mfn, new_mfn; 6.255 - struct sk_buff_head rxq; 6.256 - struct sk_buff *skb; 6.257 - u16 notify_list[NETIF_RX_RING_SIZE]; 6.258 - int notify_nr = 0; 6.259 + unsigned long vdata, old_mfn, new_mfn; 6.260 + struct sk_buff_head rxq; 6.261 + struct sk_buff *skb; 6.262 + u16 notify_list[NETIF_RX_RING_SIZE]; 6.263 + int notify_nr = 0; 6.264 6.265 - skb_queue_head_init(&rxq); 6.266 + skb_queue_head_init(&rxq); 6.267 6.268 - mcl = rx_mcl; 6.269 - mmu = rx_mmu; 6.270 + mcl = rx_mcl; 6.271 + mmu = rx_mmu; 6.272 #ifdef CONFIG_XEN_NETDEV_GRANT 6.273 - gop = grant_rx_op; 6.274 + gop = grant_rx_op; 6.275 #else 6.276 - mmuext = rx_mmuext; 6.277 + mmuext = rx_mmuext; 6.278 #endif 6.279 6.280 - while ( (skb = skb_dequeue(&rx_queue)) != NULL ) 6.281 - { 6.282 - netif = netdev_priv(skb->dev); 6.283 - vdata = (unsigned long)skb->data; 6.284 - old_mfn = virt_to_mfn(vdata); 6.285 + while ((skb = skb_dequeue(&rx_queue)) != NULL) { 6.286 + netif = netdev_priv(skb->dev); 6.287 + vdata = (unsigned long)skb->data; 6.288 + old_mfn = virt_to_mfn(vdata); 6.289 6.290 - /* Memory squeeze? Back off for an arbitrary while. */ 6.291 - if ( (new_mfn = alloc_mfn()) == 0 ) 6.292 - { 6.293 - if ( net_ratelimit() ) 6.294 - WPRINTK("Memory squeeze in netback driver.\n"); 6.295 - mod_timer(&net_timer, jiffies + HZ); 6.296 - skb_queue_head(&rx_queue, skb); 6.297 - break; 6.298 - } 6.299 - /* 6.300 - * Set the new P2M table entry before reassigning the old data page. 6.301 - * Heed the comment in pgtable-2level.h:pte_page(). :-) 6.302 - */ 6.303 - phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn; 6.304 + /* Memory squeeze? Back off for an arbitrary while. */ 6.305 + if ((new_mfn = alloc_mfn()) == 0) { 6.306 + if ( net_ratelimit() ) 6.307 + WPRINTK("Memory squeeze in netback driver.\n"); 6.308 + mod_timer(&net_timer, jiffies + HZ); 6.309 + skb_queue_head(&rx_queue, skb); 6.310 + break; 6.311 + } 6.312 + /* 6.313 + * Set the new P2M table entry before reassigning the old data 6.314 + * page. Heed the comment in pgtable-2level.h:pte_page(). :-) 6.315 + */ 6.316 + phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = 6.317 + new_mfn; 6.318 6.319 - MULTI_update_va_mapping(mcl, vdata, 6.320 - pfn_pte_ma(new_mfn, PAGE_KERNEL), 0); 6.321 - mcl++; 6.322 + MULTI_update_va_mapping(mcl, vdata, 6.323 + pfn_pte_ma(new_mfn, PAGE_KERNEL), 0); 6.324 + mcl++; 6.325 6.326 #ifdef CONFIG_XEN_NETDEV_GRANT 6.327 - gop->mfn = old_mfn; 6.328 - gop->domid = netif->domid; 6.329 - gop->ref = netif->rx->ring[ 6.330 - MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref; 6.331 - netif->rx_resp_prod_copy++; 6.332 - gop++; 6.333 + gop->mfn = old_mfn; 6.334 + gop->domid = netif->domid; 6.335 + gop->ref = netif->rx->ring[ 6.336 + MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref; 6.337 + netif->rx_resp_prod_copy++; 6.338 + gop++; 6.339 #else 6.340 - mcl->op = __HYPERVISOR_mmuext_op; 6.341 - mcl->args[0] = (unsigned long)mmuext; 6.342 - mcl->args[1] = 1; 6.343 - mcl->args[2] = 0; 6.344 - mcl->args[3] = netif->domid; 6.345 - mcl++; 6.346 + mcl->op = __HYPERVISOR_mmuext_op; 6.347 + mcl->args[0] = (unsigned long)mmuext; 6.348 + mcl->args[1] = 1; 6.349 + mcl->args[2] = 0; 6.350 + mcl->args[3] = netif->domid; 6.351 + mcl++; 6.352 6.353 - mmuext->cmd = MMUEXT_REASSIGN_PAGE; 6.354 - mmuext->arg1.mfn = old_mfn; 6.355 - mmuext++; 6.356 + mmuext->cmd = MMUEXT_REASSIGN_PAGE; 6.357 + mmuext->arg1.mfn = old_mfn; 6.358 + mmuext++; 6.359 #endif 6.360 - mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 6.361 - mmu->val = __pa(vdata) >> PAGE_SHIFT; 6.362 - mmu++; 6.363 + mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) | 6.364 + MMU_MACHPHYS_UPDATE; 6.365 + mmu->val = __pa(vdata) >> PAGE_SHIFT; 6.366 + mmu++; 6.367 6.368 - __skb_queue_tail(&rxq, skb); 6.369 + __skb_queue_tail(&rxq, skb); 6.370 6.371 #ifdef DEBUG_GRANT 6.372 - dump_packet('a', old_mfn, vdata); 6.373 + dump_packet('a', old_mfn, vdata); 6.374 #endif 6.375 - /* Filled the batch queue? */ 6.376 - if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) ) 6.377 - break; 6.378 - } 6.379 + /* Filled the batch queue? */ 6.380 + if ((mcl - rx_mcl) == ARRAY_SIZE(rx_mcl)) 6.381 + break; 6.382 + } 6.383 6.384 - if ( mcl == rx_mcl ) 6.385 - return; 6.386 + if (mcl == rx_mcl) 6.387 + return; 6.388 6.389 - mcl->op = __HYPERVISOR_mmu_update; 6.390 - mcl->args[0] = (unsigned long)rx_mmu; 6.391 - mcl->args[1] = mmu - rx_mmu; 6.392 - mcl->args[2] = 0; 6.393 - mcl->args[3] = DOMID_SELF; 6.394 - mcl++; 6.395 + mcl->op = __HYPERVISOR_mmu_update; 6.396 + mcl->args[0] = (unsigned long)rx_mmu; 6.397 + mcl->args[1] = mmu - rx_mmu; 6.398 + mcl->args[2] = 0; 6.399 + mcl->args[3] = DOMID_SELF; 6.400 + mcl++; 6.401 6.402 #ifdef CONFIG_XEN_NETDEV_GRANT 6.403 - mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 6.404 + mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 6.405 #else 6.406 - mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 6.407 + mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 6.408 #endif 6.409 - if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) ) 6.410 - BUG(); 6.411 + BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0); 6.412 6.413 - mcl = rx_mcl; 6.414 + mcl = rx_mcl; 6.415 #ifdef CONFIG_XEN_NETDEV_GRANT 6.416 - if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 6.417 - gop - grant_rx_op)) { 6.418 - /* 6.419 - ** The other side has given us a bad grant ref, or has no headroom, 6.420 - ** or has gone away. Unfortunately the current grant table code 6.421 - ** doesn't inform us which is the case, so not much we can do. 6.422 - */ 6.423 - DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) %d " 6.424 - "packets.\n", grant_rx_op[0].domid, gop - grant_rx_op); 6.425 - } 6.426 - gop = grant_rx_op; 6.427 + if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op, 6.428 + gop - grant_rx_op)) { 6.429 + /* 6.430 + * The other side has given us a bad grant ref, or has no 6.431 + * headroom, or has gone away. Unfortunately the current grant 6.432 + * table code doesn't inform us which is the case, so not much 6.433 + * we can do. 6.434 + */ 6.435 + DPRINTK("net_rx: transfer to DOM%u failed; dropping (up to) " 6.436 + "%d packets.\n", 6.437 + grant_rx_op[0].domid, gop - grant_rx_op); 6.438 + } 6.439 + gop = grant_rx_op; 6.440 #else 6.441 - mmuext = rx_mmuext; 6.442 + mmuext = rx_mmuext; 6.443 #endif 6.444 - while ( (skb = __skb_dequeue(&rxq)) != NULL ) 6.445 - { 6.446 - netif = netdev_priv(skb->dev); 6.447 - size = skb->tail - skb->data; 6.448 + while ((skb = __skb_dequeue(&rxq)) != NULL) { 6.449 + netif = netdev_priv(skb->dev); 6.450 + size = skb->tail - skb->data; 6.451 6.452 - /* Rederive the machine addresses. */ 6.453 - new_mfn = mcl[0].args[1] >> PAGE_SHIFT; 6.454 + /* Rederive the machine addresses. */ 6.455 + new_mfn = mcl[0].args[1] >> PAGE_SHIFT; 6.456 #ifdef CONFIG_XEN_NETDEV_GRANT 6.457 - old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */ 6.458 + old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */ 6.459 #else 6.460 - old_mfn = mmuext[0].arg1.mfn; 6.461 + old_mfn = mmuext[0].arg1.mfn; 6.462 #endif 6.463 - atomic_set(&(skb_shinfo(skb)->dataref), 1); 6.464 - skb_shinfo(skb)->nr_frags = 0; 6.465 - skb_shinfo(skb)->frag_list = NULL; 6.466 + atomic_set(&(skb_shinfo(skb)->dataref), 1); 6.467 + skb_shinfo(skb)->nr_frags = 0; 6.468 + skb_shinfo(skb)->frag_list = NULL; 6.469 6.470 - netif->stats.tx_bytes += size; 6.471 - netif->stats.tx_packets++; 6.472 + netif->stats.tx_bytes += size; 6.473 + netif->stats.tx_packets++; 6.474 6.475 - /* The update_va_mapping() must not fail. */ 6.476 - BUG_ON(mcl[0].result != 0); 6.477 + /* The update_va_mapping() must not fail. */ 6.478 + BUG_ON(mcl[0].result != 0); 6.479 6.480 - /* Check the reassignment error code. */ 6.481 - status = NETIF_RSP_OKAY; 6.482 + /* Check the reassignment error code. */ 6.483 + status = NETIF_RSP_OKAY; 6.484 #ifdef CONFIG_XEN_NETDEV_GRANT 6.485 - if(gop->status != 0) { 6.486 - DPRINTK("Bad status %d from grant transfer to DOM%u\n", 6.487 - gop->status, netif->domid); 6.488 - /* XXX SMH: should free 'old_mfn' here */ 6.489 - status = NETIF_RSP_ERROR; 6.490 - } 6.491 + if(gop->status != 0) { 6.492 + DPRINTK("Bad status %d from grant transfer to DOM%u\n", 6.493 + gop->status, netif->domid); 6.494 + /* XXX SMH: should free 'old_mfn' here */ 6.495 + status = NETIF_RSP_ERROR; 6.496 + } 6.497 #else 6.498 - if ( unlikely(mcl[1].result != 0) ) 6.499 - { 6.500 - DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid); 6.501 - free_mfn(old_mfn); 6.502 - status = NETIF_RSP_ERROR; 6.503 - } 6.504 + if (unlikely(mcl[1].result != 0)) { 6.505 + DPRINTK("Failed MMU update transferring to DOM%u\n", 6.506 + netif->domid); 6.507 + free_mfn(old_mfn); 6.508 + status = NETIF_RSP_ERROR; 6.509 + } 6.510 #endif 6.511 - evtchn = netif->evtchn; 6.512 - id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id; 6.513 - if ( make_rx_response(netif, id, status, 6.514 - (old_mfn << PAGE_SHIFT) | /* XXX */ 6.515 - ((unsigned long)skb->data & ~PAGE_MASK), 6.516 - size, skb->proto_csum_valid) && 6.517 - (rx_notify[evtchn] == 0) ) 6.518 - { 6.519 - rx_notify[evtchn] = 1; 6.520 - notify_list[notify_nr++] = evtchn; 6.521 - } 6.522 + evtchn = netif->evtchn; 6.523 + id = netif->rx->ring[ 6.524 + MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id; 6.525 + if (make_rx_response(netif, id, status, 6.526 + (old_mfn << PAGE_SHIFT) | /* XXX */ 6.527 + ((unsigned long)skb->data & ~PAGE_MASK), 6.528 + size, skb->proto_csum_valid) && 6.529 + (rx_notify[evtchn] == 0)) { 6.530 + rx_notify[evtchn] = 1; 6.531 + notify_list[notify_nr++] = evtchn; 6.532 + } 6.533 6.534 - netif_put(netif); 6.535 - dev_kfree_skb(skb); 6.536 + netif_put(netif); 6.537 + dev_kfree_skb(skb); 6.538 #ifdef CONFIG_XEN_NETDEV_GRANT 6.539 - mcl++; 6.540 - gop++; 6.541 + mcl++; 6.542 + gop++; 6.543 #else 6.544 - mcl += 2; 6.545 - mmuext += 1; 6.546 + mcl += 2; 6.547 + mmuext += 1; 6.548 #endif 6.549 - } 6.550 + } 6.551 6.552 - while ( notify_nr != 0 ) 6.553 - { 6.554 - evtchn = notify_list[--notify_nr]; 6.555 - rx_notify[evtchn] = 0; 6.556 - notify_via_evtchn(evtchn); 6.557 - } 6.558 + while (notify_nr != 0) { 6.559 + evtchn = notify_list[--notify_nr]; 6.560 + rx_notify[evtchn] = 0; 6.561 + notify_via_evtchn(evtchn); 6.562 + } 6.563 6.564 - out: 6.565 - /* More work to do? */ 6.566 - if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) ) 6.567 - tasklet_schedule(&net_rx_tasklet); 6.568 + /* More work to do? */ 6.569 + if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer)) 6.570 + tasklet_schedule(&net_rx_tasklet); 6.571 #if 0 6.572 - else 6.573 - xen_network_done_notify(); 6.574 + else 6.575 + xen_network_done_notify(); 6.576 #endif 6.577 } 6.578 6.579 static void net_alarm(unsigned long unused) 6.580 { 6.581 - tasklet_schedule(&net_rx_tasklet); 6.582 + tasklet_schedule(&net_rx_tasklet); 6.583 } 6.584 6.585 struct net_device_stats *netif_be_get_stats(struct net_device *dev) 6.586 { 6.587 - netif_t *netif = netdev_priv(dev); 6.588 - return &netif->stats; 6.589 + netif_t *netif = netdev_priv(dev); 6.590 + return &netif->stats; 6.591 } 6.592 6.593 static int __on_net_schedule_list(netif_t *netif) 6.594 { 6.595 - return netif->list.next != NULL; 6.596 + return netif->list.next != NULL; 6.597 } 6.598 6.599 static void remove_from_net_schedule_list(netif_t *netif) 6.600 { 6.601 - spin_lock_irq(&net_schedule_list_lock); 6.602 - if ( likely(__on_net_schedule_list(netif)) ) 6.603 - { 6.604 - list_del(&netif->list); 6.605 - netif->list.next = NULL; 6.606 - netif_put(netif); 6.607 - } 6.608 - spin_unlock_irq(&net_schedule_list_lock); 6.609 + spin_lock_irq(&net_schedule_list_lock); 6.610 + if (likely(__on_net_schedule_list(netif))) { 6.611 + list_del(&netif->list); 6.612 + netif->list.next = NULL; 6.613 + netif_put(netif); 6.614 + } 6.615 + spin_unlock_irq(&net_schedule_list_lock); 6.616 } 6.617 6.618 static void add_to_net_schedule_list_tail(netif_t *netif) 6.619 { 6.620 - if ( __on_net_schedule_list(netif) ) 6.621 - return; 6.622 + if (__on_net_schedule_list(netif)) 6.623 + return; 6.624 6.625 - spin_lock_irq(&net_schedule_list_lock); 6.626 - if ( !__on_net_schedule_list(netif) && netif->active ) 6.627 - { 6.628 - list_add_tail(&netif->list, &net_schedule_list); 6.629 - netif_get(netif); 6.630 - } 6.631 - spin_unlock_irq(&net_schedule_list_lock); 6.632 + spin_lock_irq(&net_schedule_list_lock); 6.633 + if (!__on_net_schedule_list(netif) && netif->active) { 6.634 + list_add_tail(&netif->list, &net_schedule_list); 6.635 + netif_get(netif); 6.636 + } 6.637 + spin_unlock_irq(&net_schedule_list_lock); 6.638 } 6.639 6.640 void netif_schedule_work(netif_t *netif) 6.641 { 6.642 - if ( (netif->tx_req_cons != netif->tx->req_prod) && 6.643 - ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) ) 6.644 - { 6.645 - add_to_net_schedule_list_tail(netif); 6.646 - maybe_schedule_tx_action(); 6.647 - } 6.648 + if ((netif->tx_req_cons != netif->tx->req_prod) && 6.649 + ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE)) { 6.650 + add_to_net_schedule_list_tail(netif); 6.651 + maybe_schedule_tx_action(); 6.652 + } 6.653 } 6.654 6.655 void netif_deschedule_work(netif_t *netif) 6.656 { 6.657 - remove_from_net_schedule_list(netif); 6.658 + remove_from_net_schedule_list(netif); 6.659 } 6.660 6.661 6.662 static void tx_credit_callback(unsigned long data) 6.663 { 6.664 - netif_t *netif = (netif_t *)data; 6.665 - netif->remaining_credit = netif->credit_bytes; 6.666 - netif_schedule_work(netif); 6.667 + netif_t *netif = (netif_t *)data; 6.668 + netif->remaining_credit = netif->credit_bytes; 6.669 + netif_schedule_work(netif); 6.670 } 6.671 6.672 inline static void net_tx_action_dealloc(void) 6.673 { 6.674 #ifdef CONFIG_XEN_NETDEV_GRANT 6.675 - gnttab_unmap_grant_ref_t *gop; 6.676 + gnttab_unmap_grant_ref_t *gop; 6.677 #else 6.678 - multicall_entry_t *mcl; 6.679 + multicall_entry_t *mcl; 6.680 #endif 6.681 - u16 pending_idx; 6.682 - PEND_RING_IDX dc, dp; 6.683 - netif_t *netif; 6.684 + u16 pending_idx; 6.685 + PEND_RING_IDX dc, dp; 6.686 + netif_t *netif; 6.687 6.688 - dc = dealloc_cons; 6.689 - dp = dealloc_prod; 6.690 + dc = dealloc_cons; 6.691 + dp = dealloc_prod; 6.692 6.693 #ifdef CONFIG_XEN_NETDEV_GRANT 6.694 - /* 6.695 - * Free up any grants we have finished using 6.696 - */ 6.697 - gop = tx_unmap_ops; 6.698 - while ( dc != dp ) 6.699 - { 6.700 - pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)]; 6.701 - gop->host_addr = MMAP_VADDR(pending_idx); 6.702 - gop->dev_bus_addr = 0; 6.703 - gop->handle = grant_tx_ref[pending_idx]; 6.704 - grant_tx_ref[pending_idx] = GRANT_INVALID_REF; 6.705 - gop++; 6.706 - } 6.707 - BUG_ON(HYPERVISOR_grant_table_op( 6.708 - GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops)); 6.709 + /* 6.710 + * Free up any grants we have finished using 6.711 + */ 6.712 + gop = tx_unmap_ops; 6.713 + while (dc != dp) { 6.714 + pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)]; 6.715 + gop->host_addr = MMAP_VADDR(pending_idx); 6.716 + gop->dev_bus_addr = 0; 6.717 + gop->handle = grant_tx_ref[pending_idx]; 6.718 + grant_tx_ref[pending_idx] = GRANT_INVALID_REF; 6.719 + gop++; 6.720 + } 6.721 + BUG_ON(HYPERVISOR_grant_table_op( 6.722 + GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops)); 6.723 #else 6.724 - mcl = tx_mcl; 6.725 - while ( dc != dp ) 6.726 - { 6.727 - pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)]; 6.728 - MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx), 6.729 - __pte(0), 0); 6.730 - mcl++; 6.731 - } 6.732 + mcl = tx_mcl; 6.733 + while (dc != dp) { 6.734 + pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)]; 6.735 + MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx), 6.736 + __pte(0), 0); 6.737 + mcl++; 6.738 + } 6.739 6.740 - mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 6.741 - if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) ) 6.742 - BUG(); 6.743 + mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 6.744 + BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0); 6.745 6.746 - mcl = tx_mcl; 6.747 + mcl = tx_mcl; 6.748 #endif 6.749 - while ( dealloc_cons != dp ) 6.750 - { 6.751 + while (dealloc_cons != dp) { 6.752 #ifndef CONFIG_XEN_NETDEV_GRANT 6.753 - /* The update_va_mapping() must not fail. */ 6.754 - BUG_ON(mcl[0].result != 0); 6.755 + /* The update_va_mapping() must not fail. */ 6.756 + BUG_ON(mcl[0].result != 0); 6.757 #endif 6.758 6.759 - pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)]; 6.760 + pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)]; 6.761 6.762 - netif = pending_tx_info[pending_idx].netif; 6.763 + netif = pending_tx_info[pending_idx].netif; 6.764 6.765 - make_tx_response(netif, pending_tx_info[pending_idx].req.id, 6.766 - NETIF_RSP_OKAY); 6.767 + make_tx_response(netif, pending_tx_info[pending_idx].req.id, 6.768 + NETIF_RSP_OKAY); 6.769 6.770 - pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 6.771 + pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 6.772 6.773 - /* 6.774 - * Scheduling checks must happen after the above response is posted. 6.775 - * This avoids a possible race with a guest OS on another CPU if that 6.776 - * guest is testing against 'resp_prod' when deciding whether to notify 6.777 - * us when it queues additional packets. 6.778 - */ 6.779 - mb(); 6.780 - if ( (netif->tx_req_cons != netif->tx->req_prod) && 6.781 - ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) ) 6.782 - add_to_net_schedule_list_tail(netif); 6.783 + /* 6.784 + * Scheduling checks must happen after the above response is 6.785 + * posted. This avoids a possible race with a guest OS on 6.786 + * another CPU if that guest is testing against 'resp_prod' 6.787 + * when deciding whether to notify us when it queues additional 6.788 + * packets. 6.789 + */ 6.790 + mb(); 6.791 + if ((netif->tx_req_cons != netif->tx->req_prod) && 6.792 + ((netif->tx_req_cons-netif->tx_resp_prod) != 6.793 + NETIF_TX_RING_SIZE)) 6.794 + add_to_net_schedule_list_tail(netif); 6.795 6.796 - netif_put(netif); 6.797 + netif_put(netif); 6.798 6.799 #ifndef CONFIG_XEN_NETDEV_GRANT 6.800 - mcl++; 6.801 + mcl++; 6.802 #endif 6.803 - } 6.804 - 6.805 + } 6.806 } 6.807 6.808 /* Called after netfront has transmitted */ 6.809 static void net_tx_action(unsigned long unused) 6.810 { 6.811 - struct list_head *ent; 6.812 - struct sk_buff *skb; 6.813 - netif_t *netif; 6.814 - netif_tx_request_t txreq; 6.815 - u16 pending_idx; 6.816 - NETIF_RING_IDX i; 6.817 + struct list_head *ent; 6.818 + struct sk_buff *skb; 6.819 + netif_t *netif; 6.820 + netif_tx_request_t txreq; 6.821 + u16 pending_idx; 6.822 + NETIF_RING_IDX i; 6.823 #ifdef CONFIG_XEN_NETDEV_GRANT 6.824 - gnttab_map_grant_ref_t *mop; 6.825 + gnttab_map_grant_ref_t *mop; 6.826 #else 6.827 - multicall_entry_t *mcl; 6.828 + multicall_entry_t *mcl; 6.829 #endif 6.830 - unsigned int data_len; 6.831 + unsigned int data_len; 6.832 6.833 - if ( dealloc_cons != dealloc_prod ) 6.834 - net_tx_action_dealloc(); 6.835 + if (dealloc_cons != dealloc_prod) 6.836 + net_tx_action_dealloc(); 6.837 6.838 #ifdef CONFIG_XEN_NETDEV_GRANT 6.839 - mop = tx_map_ops; 6.840 + mop = tx_map_ops; 6.841 #else 6.842 - mcl = tx_mcl; 6.843 + mcl = tx_mcl; 6.844 #endif 6.845 - while ( (NR_PENDING_REQS < MAX_PENDING_REQS) && 6.846 - !list_empty(&net_schedule_list) ) 6.847 - { 6.848 - /* Get a netif from the list with work to do. */ 6.849 - ent = net_schedule_list.next; 6.850 - netif = list_entry(ent, netif_t, list); 6.851 - netif_get(netif); 6.852 - remove_from_net_schedule_list(netif); 6.853 + while ((NR_PENDING_REQS < MAX_PENDING_REQS) && 6.854 + !list_empty(&net_schedule_list)) { 6.855 + /* Get a netif from the list with work to do. */ 6.856 + ent = net_schedule_list.next; 6.857 + netif = list_entry(ent, netif_t, list); 6.858 + netif_get(netif); 6.859 + remove_from_net_schedule_list(netif); 6.860 6.861 - /* Work to do? */ 6.862 - i = netif->tx_req_cons; 6.863 - if ( (i == netif->tx->req_prod) || 6.864 - ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) ) 6.865 - { 6.866 - netif_put(netif); 6.867 - continue; 6.868 - } 6.869 + /* Work to do? */ 6.870 + i = netif->tx_req_cons; 6.871 + if ((i == netif->tx->req_prod) || 6.872 + ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE)) { 6.873 + netif_put(netif); 6.874 + continue; 6.875 + } 6.876 6.877 - rmb(); /* Ensure that we see the request before we copy it. */ 6.878 - memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 6.879 - sizeof(txreq)); 6.880 - /* Credit-based scheduling. */ 6.881 - if ( txreq.size > netif->remaining_credit ) 6.882 - { 6.883 - unsigned long now = jiffies; 6.884 - unsigned long next_credit = 6.885 - netif->credit_timeout.expires + 6.886 - msecs_to_jiffies(netif->credit_usec / 1000); 6.887 + rmb(); /* Ensure that we see the request before we copy it. */ 6.888 + memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 6.889 + sizeof(txreq)); 6.890 + /* Credit-based scheduling. */ 6.891 + if (txreq.size > netif->remaining_credit) { 6.892 + unsigned long now = jiffies; 6.893 + unsigned long next_credit = 6.894 + netif->credit_timeout.expires + 6.895 + msecs_to_jiffies(netif->credit_usec / 1000); 6.896 6.897 - /* Timer could already be pending in some rare cases. */ 6.898 - if ( timer_pending(&netif->credit_timeout) ) 6.899 - break; 6.900 + /* Timer could already be pending in rare cases. */ 6.901 + if (timer_pending(&netif->credit_timeout)) 6.902 + break; 6.903 6.904 - /* Already passed the point at which we can replenish credit? */ 6.905 - if ( time_after_eq(now, next_credit) ) 6.906 - { 6.907 - netif->credit_timeout.expires = now; 6.908 - netif->remaining_credit = netif->credit_bytes; 6.909 - } 6.910 + /* Passed the point where we can replenish credit? */ 6.911 + if (time_after_eq(now, next_credit)) { 6.912 + netif->credit_timeout.expires = now; 6.913 + netif->remaining_credit = netif->credit_bytes; 6.914 + } 6.915 6.916 - /* Still too big to send right now? Then set a timer callback. */ 6.917 - if ( txreq.size > netif->remaining_credit ) 6.918 - { 6.919 - netif->remaining_credit = 0; 6.920 - netif->credit_timeout.expires = next_credit; 6.921 - netif->credit_timeout.data = (unsigned long)netif; 6.922 - netif->credit_timeout.function = tx_credit_callback; 6.923 - add_timer_on(&netif->credit_timeout, smp_processor_id()); 6.924 - break; 6.925 - } 6.926 - } 6.927 - netif->remaining_credit -= txreq.size; 6.928 + /* Still too big to send right now? Set a callback. */ 6.929 + if (txreq.size > netif->remaining_credit) { 6.930 + netif->remaining_credit = 0; 6.931 + netif->credit_timeout.expires = 6.932 + next_credit; 6.933 + netif->credit_timeout.data = 6.934 + (unsigned long)netif; 6.935 + netif->credit_timeout.function = 6.936 + tx_credit_callback; 6.937 + add_timer_on(&netif->credit_timeout, 6.938 + smp_processor_id()); 6.939 + break; 6.940 + } 6.941 + } 6.942 + netif->remaining_credit -= txreq.size; 6.943 6.944 - /* 6.945 - * Why the barrier? It ensures that the frontend sees updated req_cons 6.946 - * before we check for more work to schedule. 6.947 - */ 6.948 - netif->tx->req_cons = ++netif->tx_req_cons; 6.949 - mb(); 6.950 + /* 6.951 + * Why the barrier? It ensures that the frontend sees updated 6.952 + * req_cons before we check for more work to schedule. 6.953 + */ 6.954 + netif->tx->req_cons = ++netif->tx_req_cons; 6.955 + mb(); 6.956 6.957 - netif_schedule_work(netif); 6.958 + netif_schedule_work(netif); 6.959 6.960 - if ( unlikely(txreq.size < ETH_HLEN) || 6.961 - unlikely(txreq.size > ETH_FRAME_LEN) ) 6.962 - { 6.963 - DPRINTK("Bad packet size: %d\n", txreq.size); 6.964 - make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.965 - netif_put(netif); 6.966 - continue; 6.967 - } 6.968 + if (unlikely(txreq.size < ETH_HLEN) || 6.969 + unlikely(txreq.size > ETH_FRAME_LEN)) { 6.970 + DPRINTK("Bad packet size: %d\n", txreq.size); 6.971 + make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.972 + netif_put(netif); 6.973 + continue; 6.974 + } 6.975 + 6.976 + /* No crossing a page as the payload mustn't fragment. */ 6.977 + if (unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= 6.978 + PAGE_SIZE)) { 6.979 + DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 6.980 + txreq.addr, txreq.size, 6.981 + (txreq.addr &~PAGE_MASK) + txreq.size); 6.982 + make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.983 + netif_put(netif); 6.984 + continue; 6.985 + } 6.986 6.987 - /* No crossing a page boundary as the payload mustn't fragment. */ 6.988 - if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) ) 6.989 - { 6.990 - DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n", 6.991 - txreq.addr, txreq.size, 6.992 - (txreq.addr &~PAGE_MASK) + txreq.size); 6.993 - make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.994 - netif_put(netif); 6.995 - continue; 6.996 - } 6.997 + pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 6.998 + 6.999 + data_len = (txreq.size > PKT_PROT_LEN) ? 6.1000 + PKT_PROT_LEN : txreq.size; 6.1001 6.1002 - pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)]; 6.1003 - 6.1004 - data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size; 6.1005 + skb = alloc_skb(data_len+16, GFP_ATOMIC); 6.1006 + if (unlikely(skb == NULL)) { 6.1007 + DPRINTK("Can't allocate a skb in start_xmit.\n"); 6.1008 + make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.1009 + netif_put(netif); 6.1010 + break; 6.1011 + } 6.1012 6.1013 - if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) ) 6.1014 - { 6.1015 - DPRINTK("Can't allocate a skb in start_xmit.\n"); 6.1016 - make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.1017 - netif_put(netif); 6.1018 - break; 6.1019 - } 6.1020 - 6.1021 - /* Packets passed to netif_rx() must have some headroom. */ 6.1022 - skb_reserve(skb, 16); 6.1023 + /* Packets passed to netif_rx() must have some headroom. */ 6.1024 + skb_reserve(skb, 16); 6.1025 #ifdef CONFIG_XEN_NETDEV_GRANT 6.1026 - mop->host_addr = MMAP_VADDR(pending_idx); 6.1027 - mop->dom = netif->domid; 6.1028 - mop->ref = txreq.addr >> PAGE_SHIFT; 6.1029 - mop->flags = GNTMAP_host_map | GNTMAP_readonly; 6.1030 - mop++; 6.1031 + mop->host_addr = MMAP_VADDR(pending_idx); 6.1032 + mop->dom = netif->domid; 6.1033 + mop->ref = txreq.addr >> PAGE_SHIFT; 6.1034 + mop->flags = GNTMAP_host_map | GNTMAP_readonly; 6.1035 + mop++; 6.1036 #else 6.1037 - MULTI_update_va_mapping_otherdomain( 6.1038 - mcl, MMAP_VADDR(pending_idx), 6.1039 - pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL), 6.1040 - 0, netif->domid); 6.1041 + MULTI_update_va_mapping_otherdomain( 6.1042 + mcl, MMAP_VADDR(pending_idx), 6.1043 + pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL), 6.1044 + 0, netif->domid); 6.1045 6.1046 - mcl++; 6.1047 + mcl++; 6.1048 #endif 6.1049 6.1050 - memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq)); 6.1051 - pending_tx_info[pending_idx].netif = netif; 6.1052 - *((u16 *)skb->data) = pending_idx; 6.1053 + memcpy(&pending_tx_info[pending_idx].req, 6.1054 + &txreq, sizeof(txreq)); 6.1055 + pending_tx_info[pending_idx].netif = netif; 6.1056 + *((u16 *)skb->data) = pending_idx; 6.1057 6.1058 - __skb_queue_tail(&tx_queue, skb); 6.1059 + __skb_queue_tail(&tx_queue, skb); 6.1060 6.1061 - pending_cons++; 6.1062 + pending_cons++; 6.1063 6.1064 #ifdef CONFIG_XEN_NETDEV_GRANT 6.1065 - if ( (mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops) ) 6.1066 - break; 6.1067 + if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops)) 6.1068 + break; 6.1069 #else 6.1070 - /* Filled the batch queue? */ 6.1071 - if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) ) 6.1072 - break; 6.1073 + /* Filled the batch queue? */ 6.1074 + if ((mcl - tx_mcl) == ARRAY_SIZE(tx_mcl)) 6.1075 + break; 6.1076 #endif 6.1077 - } 6.1078 + } 6.1079 6.1080 #ifdef CONFIG_XEN_NETDEV_GRANT 6.1081 - if ( mop == tx_map_ops ) 6.1082 - return; 6.1083 + if (mop == tx_map_ops) 6.1084 + return; 6.1085 6.1086 - BUG_ON(HYPERVISOR_grant_table_op( 6.1087 - GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops)); 6.1088 + BUG_ON(HYPERVISOR_grant_table_op( 6.1089 + GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops)); 6.1090 6.1091 - mop = tx_map_ops; 6.1092 + mop = tx_map_ops; 6.1093 #else 6.1094 - if ( mcl == tx_mcl ) 6.1095 - return; 6.1096 + if (mcl == tx_mcl) 6.1097 + return; 6.1098 6.1099 - BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0); 6.1100 + BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0); 6.1101 6.1102 - mcl = tx_mcl; 6.1103 + mcl = tx_mcl; 6.1104 #endif 6.1105 - while ( (skb = __skb_dequeue(&tx_queue)) != NULL ) 6.1106 - { 6.1107 - pending_idx = *((u16 *)skb->data); 6.1108 - netif = pending_tx_info[pending_idx].netif; 6.1109 - memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq)); 6.1110 + while ((skb = __skb_dequeue(&tx_queue)) != NULL) { 6.1111 + pending_idx = *((u16 *)skb->data); 6.1112 + netif = pending_tx_info[pending_idx].netif; 6.1113 + memcpy(&txreq, &pending_tx_info[pending_idx].req, 6.1114 + sizeof(txreq)); 6.1115 6.1116 - /* Check the remap error code. */ 6.1117 + /* Check the remap error code. */ 6.1118 #ifdef CONFIG_XEN_NETDEV_GRANT 6.1119 - if ( unlikely(mop->handle < 0) ) 6.1120 - { 6.1121 - printk(KERN_ALERT "#### netback grant fails\n"); 6.1122 - make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.1123 - netif_put(netif); 6.1124 - kfree_skb(skb); 6.1125 - mop++; 6.1126 - pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 6.1127 - continue; 6.1128 - } 6.1129 - phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] = 6.1130 - FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT); 6.1131 - grant_tx_ref[pending_idx] = mop->handle; 6.1132 + if (unlikely(mop->handle < 0)) { 6.1133 + printk(KERN_ALERT "#### netback grant fails\n"); 6.1134 + make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.1135 + netif_put(netif); 6.1136 + kfree_skb(skb); 6.1137 + mop++; 6.1138 + pending_ring[MASK_PEND_IDX(pending_prod++)] = 6.1139 + pending_idx; 6.1140 + continue; 6.1141 + } 6.1142 + phys_to_machine_mapping[ 6.1143 + __pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] = 6.1144 + FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT); 6.1145 + grant_tx_ref[pending_idx] = mop->handle; 6.1146 #else 6.1147 - if ( unlikely(mcl[0].result != 0) ) 6.1148 - { 6.1149 - DPRINTK("Bad page frame\n"); 6.1150 - make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.1151 - netif_put(netif); 6.1152 - kfree_skb(skb); 6.1153 - mcl++; 6.1154 - pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx; 6.1155 - continue; 6.1156 - } 6.1157 + if (unlikely(mcl[0].result != 0)) { 6.1158 + DPRINTK("Bad page frame\n"); 6.1159 + make_tx_response(netif, txreq.id, NETIF_RSP_ERROR); 6.1160 + netif_put(netif); 6.1161 + kfree_skb(skb); 6.1162 + mcl++; 6.1163 + pending_ring[MASK_PEND_IDX(pending_prod++)] = 6.1164 + pending_idx; 6.1165 + continue; 6.1166 + } 6.1167 6.1168 - phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] = 6.1169 - FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT); 6.1170 + phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> 6.1171 + PAGE_SHIFT] = 6.1172 + FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT); 6.1173 #endif 6.1174 6.1175 - data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size; 6.1176 + data_len = (txreq.size > PKT_PROT_LEN) ? 6.1177 + PKT_PROT_LEN : txreq.size; 6.1178 6.1179 - __skb_put(skb, data_len); 6.1180 - memcpy(skb->data, 6.1181 - (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)), 6.1182 - data_len); 6.1183 - if ( data_len < txreq.size ) 6.1184 - { 6.1185 - /* Append the packet payload as a fragment. */ 6.1186 - skb_shinfo(skb)->frags[0].page = 6.1187 - virt_to_page(MMAP_VADDR(pending_idx)); 6.1188 - skb_shinfo(skb)->frags[0].size = txreq.size - data_len; 6.1189 - skb_shinfo(skb)->frags[0].page_offset = 6.1190 - (txreq.addr + data_len) & ~PAGE_MASK; 6.1191 - skb_shinfo(skb)->nr_frags = 1; 6.1192 - } 6.1193 - else 6.1194 - { 6.1195 - /* Schedule a response immediately. */ 6.1196 - netif_idx_release(pending_idx); 6.1197 - } 6.1198 + __skb_put(skb, data_len); 6.1199 + memcpy(skb->data, 6.1200 + (void *)(MMAP_VADDR(pending_idx)| 6.1201 + (txreq.addr&~PAGE_MASK)), 6.1202 + data_len); 6.1203 + if (data_len < txreq.size) { 6.1204 + /* Append the packet payload as a fragment. */ 6.1205 + skb_shinfo(skb)->frags[0].page = 6.1206 + virt_to_page(MMAP_VADDR(pending_idx)); 6.1207 + skb_shinfo(skb)->frags[0].size = 6.1208 + txreq.size - data_len; 6.1209 + skb_shinfo(skb)->frags[0].page_offset = 6.1210 + (txreq.addr + data_len) & ~PAGE_MASK; 6.1211 + skb_shinfo(skb)->nr_frags = 1; 6.1212 + } else { 6.1213 + /* Schedule a response immediately. */ 6.1214 + netif_idx_release(pending_idx); 6.1215 + } 6.1216 6.1217 - skb->data_len = txreq.size - data_len; 6.1218 - skb->len += skb->data_len; 6.1219 + skb->data_len = txreq.size - data_len; 6.1220 + skb->len += skb->data_len; 6.1221 6.1222 - skb->dev = netif->dev; 6.1223 - skb->protocol = eth_type_trans(skb, skb->dev); 6.1224 + skb->dev = netif->dev; 6.1225 + skb->protocol = eth_type_trans(skb, skb->dev); 6.1226 6.1227 - /* No checking needed on localhost, but remember the field is blank. */ 6.1228 - skb->ip_summed = CHECKSUM_UNNECESSARY; 6.1229 - skb->proto_csum_valid = 1; 6.1230 - skb->proto_csum_blank = txreq.csum_blank; 6.1231 + /* 6.1232 + * No checking needed on localhost, but remember the field is 6.1233 + * blank. 6.1234 + */ 6.1235 + skb->ip_summed = CHECKSUM_UNNECESSARY; 6.1236 + skb->proto_csum_valid = 1; 6.1237 + skb->proto_csum_blank = txreq.csum_blank; 6.1238 6.1239 - netif->stats.rx_bytes += txreq.size; 6.1240 - netif->stats.rx_packets++; 6.1241 + netif->stats.rx_bytes += txreq.size; 6.1242 + netif->stats.rx_packets++; 6.1243 6.1244 - netif_rx(skb); 6.1245 - netif->dev->last_rx = jiffies; 6.1246 + netif_rx(skb); 6.1247 + netif->dev->last_rx = jiffies; 6.1248 6.1249 #ifdef CONFIG_XEN_NETDEV_GRANT 6.1250 - mop++; 6.1251 + mop++; 6.1252 #else 6.1253 - mcl++; 6.1254 + mcl++; 6.1255 #endif 6.1256 - } 6.1257 + } 6.1258 } 6.1259 6.1260 static void netif_idx_release(u16 pending_idx) 6.1261 { 6.1262 - static spinlock_t _lock = SPIN_LOCK_UNLOCKED; 6.1263 - unsigned long flags; 6.1264 + static spinlock_t _lock = SPIN_LOCK_UNLOCKED; 6.1265 + unsigned long flags; 6.1266 6.1267 - spin_lock_irqsave(&_lock, flags); 6.1268 - dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx; 6.1269 - spin_unlock_irqrestore(&_lock, flags); 6.1270 + spin_lock_irqsave(&_lock, flags); 6.1271 + dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx; 6.1272 + spin_unlock_irqrestore(&_lock, flags); 6.1273 6.1274 - tasklet_schedule(&net_tx_tasklet); 6.1275 + tasklet_schedule(&net_tx_tasklet); 6.1276 } 6.1277 6.1278 static void netif_page_release(struct page *page) 6.1279 { 6.1280 - u16 pending_idx = page - virt_to_page(mmap_vstart); 6.1281 + u16 pending_idx = page - virt_to_page(mmap_vstart); 6.1282 6.1283 - /* Ready for next use. */ 6.1284 - set_page_count(page, 1); 6.1285 + /* Ready for next use. */ 6.1286 + set_page_count(page, 1); 6.1287 6.1288 - netif_idx_release(pending_idx); 6.1289 + netif_idx_release(pending_idx); 6.1290 } 6.1291 6.1292 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs) 6.1293 { 6.1294 - netif_t *netif = dev_id; 6.1295 - if ( tx_work_exists(netif) ) 6.1296 - { 6.1297 - add_to_net_schedule_list_tail(netif); 6.1298 - maybe_schedule_tx_action(); 6.1299 - } 6.1300 - return IRQ_HANDLED; 6.1301 + netif_t *netif = dev_id; 6.1302 + if (tx_work_exists(netif)) { 6.1303 + add_to_net_schedule_list_tail(netif); 6.1304 + maybe_schedule_tx_action(); 6.1305 + } 6.1306 + return IRQ_HANDLED; 6.1307 } 6.1308 6.1309 static void make_tx_response(netif_t *netif, 6.1310 u16 id, 6.1311 s8 st) 6.1312 { 6.1313 - NETIF_RING_IDX i = netif->tx_resp_prod; 6.1314 - netif_tx_response_t *resp; 6.1315 + NETIF_RING_IDX i = netif->tx_resp_prod; 6.1316 + netif_tx_response_t *resp; 6.1317 6.1318 - resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp; 6.1319 - resp->id = id; 6.1320 - resp->status = st; 6.1321 - wmb(); 6.1322 - netif->tx->resp_prod = netif->tx_resp_prod = ++i; 6.1323 + resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp; 6.1324 + resp->id = id; 6.1325 + resp->status = st; 6.1326 + wmb(); 6.1327 + netif->tx->resp_prod = netif->tx_resp_prod = ++i; 6.1328 6.1329 - mb(); /* Update producer before checking event threshold. */ 6.1330 - if ( i == netif->tx->event ) 6.1331 - notify_via_evtchn(netif->evtchn); 6.1332 + mb(); /* Update producer before checking event threshold. */ 6.1333 + if (i == netif->tx->event) 6.1334 + notify_via_evtchn(netif->evtchn); 6.1335 } 6.1336 6.1337 static int make_rx_response(netif_t *netif, 6.1338 @@ -882,110 +878,120 @@ static int make_rx_response(netif_t *net 6.1339 u16 size, 6.1340 u16 csum_valid) 6.1341 { 6.1342 - NETIF_RING_IDX i = netif->rx_resp_prod; 6.1343 - netif_rx_response_t *resp; 6.1344 + NETIF_RING_IDX i = netif->rx_resp_prod; 6.1345 + netif_rx_response_t *resp; 6.1346 6.1347 - resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp; 6.1348 - resp->addr = addr; 6.1349 - resp->csum_valid = csum_valid; 6.1350 - resp->id = id; 6.1351 - resp->status = (s16)size; 6.1352 - if ( st < 0 ) 6.1353 - resp->status = (s16)st; 6.1354 - wmb(); 6.1355 - netif->rx->resp_prod = netif->rx_resp_prod = ++i; 6.1356 + resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp; 6.1357 + resp->addr = addr; 6.1358 + resp->csum_valid = csum_valid; 6.1359 + resp->id = id; 6.1360 + resp->status = (s16)size; 6.1361 + if (st < 0) 6.1362 + resp->status = (s16)st; 6.1363 + wmb(); 6.1364 + netif->rx->resp_prod = netif->rx_resp_prod = ++i; 6.1365 6.1366 - mb(); /* Update producer before checking event threshold. */ 6.1367 - return (i == netif->rx->event); 6.1368 + mb(); /* Update producer before checking event threshold. */ 6.1369 + return (i == netif->rx->event); 6.1370 } 6.1371 6.1372 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs) 6.1373 { 6.1374 - struct list_head *ent; 6.1375 - netif_t *netif; 6.1376 - int i = 0; 6.1377 + struct list_head *ent; 6.1378 + netif_t *netif; 6.1379 + int i = 0; 6.1380 6.1381 - printk(KERN_ALERT "netif_schedule_list:\n"); 6.1382 - spin_lock_irq(&net_schedule_list_lock); 6.1383 + printk(KERN_ALERT "netif_schedule_list:\n"); 6.1384 + spin_lock_irq(&net_schedule_list_lock); 6.1385 6.1386 - list_for_each ( ent, &net_schedule_list ) 6.1387 - { 6.1388 - netif = list_entry(ent, netif_t, list); 6.1389 - printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n", 6.1390 - i, netif->rx_req_cons, netif->rx_resp_prod); 6.1391 - printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n", 6.1392 - netif->tx_req_cons, netif->tx_resp_prod); 6.1393 - printk(KERN_ALERT " shared(rx_req_prod=%08x rx_resp_prod=%08x\n", 6.1394 - netif->rx->req_prod, netif->rx->resp_prod); 6.1395 - printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n", 6.1396 - netif->rx->event, netif->tx->req_prod); 6.1397 - printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n", 6.1398 - netif->tx->resp_prod, netif->tx->event); 6.1399 - i++; 6.1400 - } 6.1401 + list_for_each (ent, &net_schedule_list) { 6.1402 + netif = list_entry(ent, netif_t, list); 6.1403 + printk(KERN_ALERT " %d: private(rx_req_cons=%08x " 6.1404 + "rx_resp_prod=%08x\n", 6.1405 + i, netif->rx_req_cons, netif->rx_resp_prod); 6.1406 + printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n", 6.1407 + netif->tx_req_cons, netif->tx_resp_prod); 6.1408 + printk(KERN_ALERT " shared(rx_req_prod=%08x " 6.1409 + "rx_resp_prod=%08x\n", 6.1410 + netif->rx->req_prod, netif->rx->resp_prod); 6.1411 + printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n", 6.1412 + netif->rx->event, netif->tx->req_prod); 6.1413 + printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n", 6.1414 + netif->tx->resp_prod, netif->tx->event); 6.1415 + i++; 6.1416 + } 6.1417 6.1418 - spin_unlock_irq(&net_schedule_list_lock); 6.1419 - printk(KERN_ALERT " ** End of netif_schedule_list **\n"); 6.1420 + spin_unlock_irq(&net_schedule_list_lock); 6.1421 + printk(KERN_ALERT " ** End of netif_schedule_list **\n"); 6.1422 6.1423 - return IRQ_HANDLED; 6.1424 + return IRQ_HANDLED; 6.1425 } 6.1426 6.1427 static int __init netback_init(void) 6.1428 { 6.1429 - int i; 6.1430 - struct page *page; 6.1431 + int i; 6.1432 + struct page *page; 6.1433 6.1434 - if ( !(xen_start_info->flags & SIF_NET_BE_DOMAIN) && 6.1435 - !(xen_start_info->flags & SIF_INITDOMAIN) ) 6.1436 - return 0; 6.1437 + if (!(xen_start_info->flags & SIF_NET_BE_DOMAIN) && 6.1438 + !(xen_start_info->flags & SIF_INITDOMAIN)) 6.1439 + return 0; 6.1440 6.1441 - IPRINTK("Initialising Xen netif backend.\n"); 6.1442 + IPRINTK("Initialising Xen netif backend.\n"); 6.1443 #ifdef CONFIG_XEN_NETDEV_GRANT 6.1444 - IPRINTK("Using grant tables.\n"); 6.1445 + IPRINTK("Using grant tables.\n"); 6.1446 #endif 6.1447 6.1448 - /* We can increase reservation by this much in net_rx_action(). */ 6.1449 - balloon_update_driver_allowance(NETIF_RX_RING_SIZE); 6.1450 + /* We can increase reservation by this much in net_rx_action(). */ 6.1451 + balloon_update_driver_allowance(NETIF_RX_RING_SIZE); 6.1452 6.1453 - skb_queue_head_init(&rx_queue); 6.1454 - skb_queue_head_init(&tx_queue); 6.1455 + skb_queue_head_init(&rx_queue); 6.1456 + skb_queue_head_init(&tx_queue); 6.1457 6.1458 - init_timer(&net_timer); 6.1459 - net_timer.data = 0; 6.1460 - net_timer.function = net_alarm; 6.1461 + init_timer(&net_timer); 6.1462 + net_timer.data = 0; 6.1463 + net_timer.function = net_alarm; 6.1464 6.1465 - page = balloon_alloc_empty_page_range(MAX_PENDING_REQS); 6.1466 - BUG_ON(page == NULL); 6.1467 - mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 6.1468 + page = balloon_alloc_empty_page_range(MAX_PENDING_REQS); 6.1469 + BUG_ON(page == NULL); 6.1470 + mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 6.1471 6.1472 - for ( i = 0; i < MAX_PENDING_REQS; i++ ) 6.1473 - { 6.1474 - page = virt_to_page(MMAP_VADDR(i)); 6.1475 - set_page_count(page, 1); 6.1476 - SetPageForeign(page, netif_page_release); 6.1477 - } 6.1478 + for (i = 0; i < MAX_PENDING_REQS; i++) { 6.1479 + page = virt_to_page(MMAP_VADDR(i)); 6.1480 + set_page_count(page, 1); 6.1481 + SetPageForeign(page, netif_page_release); 6.1482 + } 6.1483 + 6.1484 + pending_cons = 0; 6.1485 + pending_prod = MAX_PENDING_REQS; 6.1486 + for (i = 0; i < MAX_PENDING_REQS; i++) 6.1487 + pending_ring[i] = i; 6.1488 6.1489 - pending_cons = 0; 6.1490 - pending_prod = MAX_PENDING_REQS; 6.1491 - for ( i = 0; i < MAX_PENDING_REQS; i++ ) 6.1492 - pending_ring[i] = i; 6.1493 + spin_lock_init(&net_schedule_list_lock); 6.1494 + INIT_LIST_HEAD(&net_schedule_list); 6.1495 6.1496 - spin_lock_init(&net_schedule_list_lock); 6.1497 - INIT_LIST_HEAD(&net_schedule_list); 6.1498 + netif_xenbus_init(); 6.1499 6.1500 - netif_xenbus_init(); 6.1501 + (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG), 6.1502 + netif_be_dbg, SA_SHIRQ, 6.1503 + "net-be-dbg", &netif_be_dbg); 6.1504 6.1505 - (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG), 6.1506 - netif_be_dbg, SA_SHIRQ, 6.1507 - "net-be-dbg", &netif_be_dbg); 6.1508 - 6.1509 - return 0; 6.1510 + return 0; 6.1511 } 6.1512 6.1513 static void netback_cleanup(void) 6.1514 { 6.1515 - BUG(); 6.1516 + BUG(); 6.1517 } 6.1518 6.1519 module_init(netback_init); 6.1520 module_exit(netback_cleanup); 6.1521 + 6.1522 +/* 6.1523 + * Local variables: 6.1524 + * c-file-style: "linux" 6.1525 + * indent-tabs-mode: t 6.1526 + * c-indent-level: 8 6.1527 + * c-basic-offset: 8 6.1528 + * tab-width: 8 6.1529 + * End: 6.1530 + */
7.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Fri Sep 16 18:06:42 2005 +0000 7.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/xenbus.c Fri Sep 16 18:07:50 2005 +0000 7.3 @@ -294,3 +294,13 @@ void netif_xenbus_init(void) 7.4 { 7.5 xenbus_register_backend(&netback); 7.6 } 7.7 + 7.8 +/* 7.9 + * Local variables: 7.10 + * c-file-style: "linux" 7.11 + * indent-tabs-mode: t 7.12 + * c-indent-level: 8 7.13 + * c-basic-offset: 8 7.14 + * tab-width: 8 7.15 + * End: 7.16 + */
8.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Fri Sep 16 18:06:42 2005 +0000 8.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Fri Sep 16 18:07:50 2005 +0000 8.3 @@ -54,44 +54,11 @@ 8.4 #include <asm-xen/balloon.h> 8.5 #include <asm/page.h> 8.6 #include <asm/uaccess.h> 8.7 - 8.8 -#ifdef CONFIG_XEN_NETDEV_GRANT 8.9 #include <asm-xen/xen-public/grant_table.h> 8.10 #include <asm-xen/gnttab.h> 8.11 8.12 -static grant_ref_t gref_tx_head; 8.13 -static grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 8.14 - 8.15 -static grant_ref_t gref_rx_head; 8.16 -static grant_ref_t grant_rx_ref[NETIF_RX_RING_SIZE + 1]; 8.17 - 8.18 #define GRANT_INVALID_REF (0xFFFF) 8.19 8.20 -#ifdef GRANT_DEBUG 8.21 -static void 8.22 -dump_packet(int tag, void *addr, u32 ap) 8.23 -{ 8.24 - unsigned char *p = (unsigned char *)ap; 8.25 - int i; 8.26 - 8.27 - printk(KERN_ALERT "#### rx_poll %c %08x ", tag & 0xff, addr); 8.28 - for (i = 0; i < 20; i++) { 8.29 - printk("%02x", p[i]); 8.30 - } 8.31 - printk("\n"); 8.32 -} 8.33 - 8.34 -#define GDPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \ 8.35 - __FILE__ , __LINE__ , ## _a ) 8.36 -#else 8.37 -#define dump_packet(x,y,z) ((void)0) 8.38 -#define GDPRINTK(_f, _a...) ((void)0) 8.39 -#endif 8.40 - 8.41 -#endif 8.42 - 8.43 - 8.44 - 8.45 #ifndef __GFP_NOWARN 8.46 #define __GFP_NOWARN 0 8.47 #endif 8.48 @@ -124,7 +91,6 @@ dump_packet(int tag, void *addr, u32 ap) 8.49 #define NETIF_STATE_DISCONNECTED 0 8.50 #define NETIF_STATE_CONNECTED 1 8.51 8.52 - 8.53 static unsigned int netif_state = NETIF_STATE_DISCONNECTED; 8.54 8.55 static void network_tx_buf_gc(struct net_device *dev); 8.56 @@ -147,45 +113,50 @@ static void xennet_proc_delif(struct net 8.57 #define netfront_info net_private 8.58 struct net_private 8.59 { 8.60 - struct list_head list; 8.61 - struct net_device *netdev; 8.62 + struct list_head list; 8.63 + struct net_device *netdev; 8.64 8.65 - struct net_device_stats stats; 8.66 - NETIF_RING_IDX rx_resp_cons, tx_resp_cons; 8.67 - unsigned int tx_full; 8.68 + struct net_device_stats stats; 8.69 + NETIF_RING_IDX rx_resp_cons, tx_resp_cons; 8.70 + unsigned int tx_full; 8.71 8.72 - netif_tx_interface_t *tx; 8.73 - netif_rx_interface_t *rx; 8.74 + netif_tx_interface_t *tx; 8.75 + netif_rx_interface_t *rx; 8.76 8.77 - spinlock_t tx_lock; 8.78 - spinlock_t rx_lock; 8.79 + spinlock_t tx_lock; 8.80 + spinlock_t rx_lock; 8.81 8.82 - unsigned int handle; 8.83 - unsigned int evtchn; 8.84 + unsigned int handle; 8.85 + unsigned int evtchn; 8.86 8.87 - /* What is the status of our connection to the remote backend? */ 8.88 + /* What is the status of our connection to the remote backend? */ 8.89 #define BEST_CLOSED 0 8.90 #define BEST_DISCONNECTED 1 8.91 #define BEST_CONNECTED 2 8.92 - unsigned int backend_state; 8.93 + unsigned int backend_state; 8.94 8.95 - /* Is this interface open or closed (down or up)? */ 8.96 + /* Is this interface open or closed (down or up)? */ 8.97 #define UST_CLOSED 0 8.98 #define UST_OPEN 1 8.99 - unsigned int user_state; 8.100 + unsigned int user_state; 8.101 8.102 - /* Receive-ring batched refills. */ 8.103 + /* Receive-ring batched refills. */ 8.104 #define RX_MIN_TARGET 8 8.105 #define RX_MAX_TARGET NETIF_RX_RING_SIZE 8.106 - int rx_min_target, rx_max_target, rx_target; 8.107 - struct sk_buff_head rx_batch; 8.108 + int rx_min_target, rx_max_target, rx_target; 8.109 + struct sk_buff_head rx_batch; 8.110 8.111 - /* 8.112 - * {tx,rx}_skbs store outstanding skbuffs. The first entry in each 8.113 - * array is an index into a chain of free entries. 8.114 - */ 8.115 - struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1]; 8.116 - struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1]; 8.117 + /* 8.118 + * {tx,rx}_skbs store outstanding skbuffs. The first entry in each 8.119 + * array is an index into a chain of free entries. 8.120 + */ 8.121 + struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1]; 8.122 + struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1]; 8.123 + 8.124 + grant_ref_t gref_tx_head; 8.125 + grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 8.126 + grant_ref_t gref_rx_head; 8.127 + grant_ref_t grant_rx_ref[NETIF_TX_RING_SIZE + 1]; 8.128 8.129 struct xenbus_device *xbdev; 8.130 char *backend; 8.131 @@ -197,32 +168,32 @@ struct net_private 8.132 }; 8.133 8.134 /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */ 8.135 -#define ADD_ID_TO_FREELIST(_list, _id) \ 8.136 - (_list)[(_id)] = (_list)[0]; \ 8.137 - (_list)[0] = (void *)(unsigned long)(_id); 8.138 -#define GET_ID_FROM_FREELIST(_list) \ 8.139 - ({ unsigned long _id = (unsigned long)(_list)[0]; \ 8.140 - (_list)[0] = (_list)[_id]; \ 8.141 - (unsigned short)_id; }) 8.142 +#define ADD_ID_TO_FREELIST(_list, _id) \ 8.143 + (_list)[(_id)] = (_list)[0]; \ 8.144 + (_list)[0] = (void *)(unsigned long)(_id); 8.145 +#define GET_ID_FROM_FREELIST(_list) \ 8.146 + ({ unsigned long _id = (unsigned long)(_list)[0]; \ 8.147 + (_list)[0] = (_list)[_id]; \ 8.148 + (unsigned short)_id; }) 8.149 8.150 #ifdef DEBUG 8.151 static char *be_state_name[] = { 8.152 - [BEST_CLOSED] = "closed", 8.153 - [BEST_DISCONNECTED] = "disconnected", 8.154 - [BEST_CONNECTED] = "connected", 8.155 + [BEST_CLOSED] = "closed", 8.156 + [BEST_DISCONNECTED] = "disconnected", 8.157 + [BEST_CONNECTED] = "connected", 8.158 }; 8.159 #endif 8.160 8.161 #ifdef DEBUG 8.162 #define DPRINTK(fmt, args...) \ 8.163 - printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args) 8.164 + printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args) 8.165 #else 8.166 #define DPRINTK(fmt, args...) ((void)0) 8.167 #endif 8.168 #define IPRINTK(fmt, args...) \ 8.169 - printk(KERN_INFO "xen_net: " fmt, ##args) 8.170 + printk(KERN_INFO "xen_net: " fmt, ##args) 8.171 #define WPRINTK(fmt, args...) \ 8.172 - printk(KERN_WARNING "xen_net: " fmt, ##args) 8.173 + printk(KERN_WARNING "xen_net: " fmt, ##args) 8.174 8.175 /** Send a packet on a net device to encourage switches to learn the 8.176 * MAC. We send a fake ARP request. 8.177 @@ -232,625 +203,627 @@ static char *be_state_name[] = { 8.178 */ 8.179 static int send_fake_arp(struct net_device *dev) 8.180 { 8.181 - struct sk_buff *skb; 8.182 - u32 src_ip, dst_ip; 8.183 + struct sk_buff *skb; 8.184 + u32 src_ip, dst_ip; 8.185 8.186 - dst_ip = INADDR_BROADCAST; 8.187 - src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); 8.188 + dst_ip = INADDR_BROADCAST; 8.189 + src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK); 8.190 8.191 - /* No IP? Then nothing to do. */ 8.192 - if (src_ip == 0) 8.193 - return 0; 8.194 + /* No IP? Then nothing to do. */ 8.195 + if (src_ip == 0) 8.196 + return 0; 8.197 8.198 - skb = arp_create(ARPOP_REPLY, ETH_P_ARP, 8.199 - dst_ip, dev, src_ip, 8.200 - /*dst_hw*/ NULL, /*src_hw*/ NULL, 8.201 - /*target_hw*/ dev->dev_addr); 8.202 - if (skb == NULL) 8.203 - return -ENOMEM; 8.204 + skb = arp_create(ARPOP_REPLY, ETH_P_ARP, 8.205 + dst_ip, dev, src_ip, 8.206 + /*dst_hw*/ NULL, /*src_hw*/ NULL, 8.207 + /*target_hw*/ dev->dev_addr); 8.208 + if (skb == NULL) 8.209 + return -ENOMEM; 8.210 8.211 - return dev_queue_xmit(skb); 8.212 + return dev_queue_xmit(skb); 8.213 } 8.214 8.215 static int network_open(struct net_device *dev) 8.216 { 8.217 - struct net_private *np = netdev_priv(dev); 8.218 + struct net_private *np = netdev_priv(dev); 8.219 8.220 - memset(&np->stats, 0, sizeof(np->stats)); 8.221 + memset(&np->stats, 0, sizeof(np->stats)); 8.222 8.223 - np->user_state = UST_OPEN; 8.224 + np->user_state = UST_OPEN; 8.225 8.226 - network_alloc_rx_buffers(dev); 8.227 - np->rx->event = np->rx_resp_cons + 1; 8.228 + network_alloc_rx_buffers(dev); 8.229 + np->rx->event = np->rx_resp_cons + 1; 8.230 8.231 - netif_start_queue(dev); 8.232 + netif_start_queue(dev); 8.233 8.234 - return 0; 8.235 + return 0; 8.236 } 8.237 8.238 static void network_tx_buf_gc(struct net_device *dev) 8.239 { 8.240 - NETIF_RING_IDX i, prod; 8.241 - unsigned short id; 8.242 - struct net_private *np = netdev_priv(dev); 8.243 - struct sk_buff *skb; 8.244 + NETIF_RING_IDX i, prod; 8.245 + unsigned short id; 8.246 + struct net_private *np = netdev_priv(dev); 8.247 + struct sk_buff *skb; 8.248 8.249 - if (np->backend_state != BEST_CONNECTED) 8.250 - return; 8.251 + if (np->backend_state != BEST_CONNECTED) 8.252 + return; 8.253 8.254 - do { 8.255 - prod = np->tx->resp_prod; 8.256 - rmb(); /* Ensure we see responses up to 'rp'. */ 8.257 + do { 8.258 + prod = np->tx->resp_prod; 8.259 + rmb(); /* Ensure we see responses up to 'rp'. */ 8.260 8.261 - for (i = np->tx_resp_cons; i != prod; i++) { 8.262 - id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id; 8.263 - skb = np->tx_skbs[id]; 8.264 + for (i = np->tx_resp_cons; i != prod; i++) { 8.265 + id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id; 8.266 + skb = np->tx_skbs[id]; 8.267 #ifdef CONFIG_XEN_NETDEV_GRANT 8.268 - if (unlikely(gnttab_query_foreign_access(grant_tx_ref[id]) != 0)) { 8.269 - /* other domain is still using this grant - shouldn't happen 8.270 - but if it does, we'll try to reclaim the grant later */ 8.271 - printk(KERN_ALERT "network_tx_buf_gc: warning -- grant " 8.272 - "still in use by backend domain.\n"); 8.273 - goto out; 8.274 - } 8.275 - gnttab_end_foreign_access_ref(grant_tx_ref[id], GNTMAP_readonly); 8.276 - gnttab_release_grant_reference(&gref_tx_head, grant_tx_ref[id]); 8.277 - grant_tx_ref[id] = GRANT_INVALID_REF; 8.278 + if (unlikely(gnttab_query_foreign_access(np->grant_tx_ref[id]) != 0)) { 8.279 + printk(KERN_ALERT "network_tx_buf_gc: warning " 8.280 + "-- grant still in use by backend " 8.281 + "domain.\n"); 8.282 + goto out; 8.283 + } 8.284 + gnttab_end_foreign_access_ref( 8.285 + np->grant_tx_ref[id], GNTMAP_readonly); 8.286 + gnttab_release_grant_reference( 8.287 + &np->gref_tx_head, np->grant_tx_ref[id]); 8.288 + np->grant_tx_ref[id] = GRANT_INVALID_REF; 8.289 #endif 8.290 - ADD_ID_TO_FREELIST(np->tx_skbs, id); 8.291 - dev_kfree_skb_irq(skb); 8.292 - } 8.293 + ADD_ID_TO_FREELIST(np->tx_skbs, id); 8.294 + dev_kfree_skb_irq(skb); 8.295 + } 8.296 8.297 - np->tx_resp_cons = prod; 8.298 + np->tx_resp_cons = prod; 8.299 8.300 - /* 8.301 - * Set a new event, then check for race with update of tx_cons. Note 8.302 - * that it is essential to schedule a callback, no matter how few 8.303 - * buffers are pending. Even if there is space in the transmit ring, 8.304 - * higher layers may be blocked because too much data is outstanding: 8.305 - * in such cases notification from Xen is likely to be the only kick 8.306 - * that we'll get. 8.307 - */ 8.308 - np->tx->event = 8.309 - prod + ((np->tx->req_prod - prod) >> 1) + 1; 8.310 - mb(); 8.311 - } while (prod != np->tx->resp_prod); 8.312 + /* 8.313 + * Set a new event, then check for race with update of tx_cons. 8.314 + * Note that it is essential to schedule a callback, no matter 8.315 + * how few buffers are pending. Even if there is space in the 8.316 + * transmit ring, higher layers may be blocked because too much 8.317 + * data is outstanding: in such cases notification from Xen is 8.318 + * likely to be the only kick that we'll get. 8.319 + */ 8.320 + np->tx->event = prod + ((np->tx->req_prod - prod) >> 1) + 1; 8.321 + mb(); 8.322 + } while (prod != np->tx->resp_prod); 8.323 8.324 #ifdef CONFIG_XEN_NETDEV_GRANT 8.325 - out: 8.326 + out: 8.327 #endif 8.328 8.329 - if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) { 8.330 - np->tx_full = 0; 8.331 - if (np->user_state == UST_OPEN) 8.332 - netif_wake_queue(dev); 8.333 - } 8.334 + if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) { 8.335 + np->tx_full = 0; 8.336 + if (np->user_state == UST_OPEN) 8.337 + netif_wake_queue(dev); 8.338 + } 8.339 } 8.340 8.341 8.342 static void network_alloc_rx_buffers(struct net_device *dev) 8.343 { 8.344 - unsigned short id; 8.345 - struct net_private *np = netdev_priv(dev); 8.346 - struct sk_buff *skb; 8.347 - int i, batch_target; 8.348 - NETIF_RING_IDX req_prod = np->rx->req_prod; 8.349 - struct xen_memory_reservation reservation; 8.350 + unsigned short id; 8.351 + struct net_private *np = netdev_priv(dev); 8.352 + struct sk_buff *skb; 8.353 + int i, batch_target; 8.354 + NETIF_RING_IDX req_prod = np->rx->req_prod; 8.355 + struct xen_memory_reservation reservation; 8.356 #ifdef CONFIG_XEN_NETDEV_GRANT 8.357 - grant_ref_t ref; 8.358 + grant_ref_t ref; 8.359 #endif 8.360 8.361 - if (unlikely(np->backend_state != BEST_CONNECTED)) 8.362 - return; 8.363 + if (unlikely(np->backend_state != BEST_CONNECTED)) 8.364 + return; 8.365 8.366 - /* 8.367 - * Allocate skbuffs greedily, even though we batch updates to the 8.368 - * receive ring. This creates a less bursty demand on the memory allocator, 8.369 - * so should reduce the chance of failed allocation requests both for 8.370 - * ourself and for other kernel subsystems. 8.371 - */ 8.372 - batch_target = np->rx_target - (req_prod - np->rx_resp_cons); 8.373 - for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 8.374 - if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL)) 8.375 - break; 8.376 - __skb_queue_tail(&np->rx_batch, skb); 8.377 - } 8.378 - 8.379 - /* Is the batch large enough to be worthwhile? */ 8.380 - if (i < (np->rx_target/2)) 8.381 - return; 8.382 + /* 8.383 + * Allocate skbuffs greedily, even though we batch updates to the 8.384 + * receive ring. This creates a less bursty demand on the memory 8.385 + * allocator, so should reduce the chance of failed allocation requests 8.386 + * both for ourself and for other kernel subsystems. 8.387 + */ 8.388 + batch_target = np->rx_target - (req_prod - np->rx_resp_cons); 8.389 + for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 8.390 + skb = alloc_xen_skb(dev->mtu + RX_HEADROOM); 8.391 + if (skb == NULL) 8.392 + break; 8.393 + __skb_queue_tail(&np->rx_batch, skb); 8.394 + } 8.395 8.396 - for (i = 0; ; i++) { 8.397 - if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) 8.398 - break; 8.399 + /* Is the batch large enough to be worthwhile? */ 8.400 + if (i < (np->rx_target/2)) 8.401 + return; 8.402 8.403 - skb->dev = dev; 8.404 + for (i = 0; ; i++) { 8.405 + if ((skb = __skb_dequeue(&np->rx_batch)) == NULL) 8.406 + break; 8.407 8.408 - id = GET_ID_FROM_FREELIST(np->rx_skbs); 8.409 + skb->dev = dev; 8.410 8.411 - np->rx_skbs[id] = skb; 8.412 + id = GET_ID_FROM_FREELIST(np->rx_skbs); 8.413 + 8.414 + np->rx_skbs[id] = skb; 8.415 8.416 - np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id; 8.417 + np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id; 8.418 #ifdef CONFIG_XEN_NETDEV_GRANT 8.419 - ref = gnttab_claim_grant_reference(&gref_rx_head); 8.420 - if (unlikely((signed short)ref < 0)) { 8.421 - printk(KERN_ALERT "#### netfront can't claim rx reference\n"); 8.422 - BUG(); 8.423 - } 8.424 - grant_rx_ref[id] = ref; 8.425 - gnttab_grant_foreign_transfer_ref(ref, np->backend_id); 8.426 - np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref; 8.427 + ref = gnttab_claim_grant_reference(&np->gref_rx_head); 8.428 + BUG_ON((signed short)ref < 0); 8.429 + np->grant_rx_ref[id] = ref; 8.430 + gnttab_grant_foreign_transfer_ref(ref, np->backend_id); 8.431 + np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref; 8.432 #endif 8.433 - rx_pfn_array[i] = virt_to_mfn(skb->head); 8.434 + rx_pfn_array[i] = virt_to_mfn(skb->head); 8.435 8.436 - /* Remove this page from pseudo phys map before passing back to Xen. */ 8.437 - phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 8.438 - = INVALID_P2M_ENTRY; 8.439 + /* Remove this page from map before passing back to Xen. */ 8.440 + phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] 8.441 + = INVALID_P2M_ENTRY; 8.442 8.443 - MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head, 8.444 - __pte(0), 0); 8.445 - } 8.446 + MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head, 8.447 + __pte(0), 0); 8.448 + } 8.449 8.450 - /* After all PTEs have been zapped we blow away stale TLB entries. */ 8.451 - rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 8.452 + /* After all PTEs have been zapped we blow away stale TLB entries. */ 8.453 + rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 8.454 8.455 - /* Give away a batch of pages. */ 8.456 - rx_mcl[i].op = __HYPERVISOR_memory_op; 8.457 - rx_mcl[i].args[0] = XENMEM_decrease_reservation; 8.458 - rx_mcl[i].args[1] = (unsigned long)&reservation; 8.459 + /* Give away a batch of pages. */ 8.460 + rx_mcl[i].op = __HYPERVISOR_memory_op; 8.461 + rx_mcl[i].args[0] = XENMEM_decrease_reservation; 8.462 + rx_mcl[i].args[1] = (unsigned long)&reservation; 8.463 8.464 - reservation.extent_start = rx_pfn_array; 8.465 - reservation.nr_extents = i; 8.466 - reservation.extent_order = 0; 8.467 - reservation.address_bits = 0; 8.468 - reservation.domid = DOMID_SELF; 8.469 + reservation.extent_start = rx_pfn_array; 8.470 + reservation.nr_extents = i; 8.471 + reservation.extent_order = 0; 8.472 + reservation.address_bits = 0; 8.473 + reservation.domid = DOMID_SELF; 8.474 8.475 - /* Tell the ballon driver what is going on. */ 8.476 - balloon_update_driver_allowance(i); 8.477 + /* Tell the ballon driver what is going on. */ 8.478 + balloon_update_driver_allowance(i); 8.479 8.480 - /* Zap PTEs and give away pages in one big multicall. */ 8.481 - (void)HYPERVISOR_multicall(rx_mcl, i+1); 8.482 + /* Zap PTEs and give away pages in one big multicall. */ 8.483 + (void)HYPERVISOR_multicall(rx_mcl, i+1); 8.484 8.485 - /* Check return status of HYPERVISOR_memory_op(). */ 8.486 - if (unlikely(rx_mcl[i].result != i)) 8.487 - panic("Unable to reduce memory reservation\n"); 8.488 + /* Check return status of HYPERVISOR_memory_op(). */ 8.489 + if (unlikely(rx_mcl[i].result != i)) 8.490 + panic("Unable to reduce memory reservation\n"); 8.491 8.492 - /* Above is a suitable barrier to ensure backend will see requests. */ 8.493 - np->rx->req_prod = req_prod + i; 8.494 + /* Above is a suitable barrier to ensure backend will see requests. */ 8.495 + np->rx->req_prod = req_prod + i; 8.496 8.497 - /* Adjust our floating fill target if we risked running out of buffers. */ 8.498 - if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) && 8.499 - ((np->rx_target *= 2) > np->rx_max_target)) 8.500 - np->rx_target = np->rx_max_target; 8.501 + /* Adjust our fill target if we risked running out of buffers. */ 8.502 + if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) && 8.503 + ((np->rx_target *= 2) > np->rx_max_target)) 8.504 + np->rx_target = np->rx_max_target; 8.505 } 8.506 8.507 8.508 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) 8.509 { 8.510 - unsigned short id; 8.511 - struct net_private *np = netdev_priv(dev); 8.512 - netif_tx_request_t *tx; 8.513 - NETIF_RING_IDX i; 8.514 + unsigned short id; 8.515 + struct net_private *np = netdev_priv(dev); 8.516 + netif_tx_request_t *tx; 8.517 + NETIF_RING_IDX i; 8.518 #ifdef CONFIG_XEN_NETDEV_GRANT 8.519 - grant_ref_t ref; 8.520 - unsigned long mfn; 8.521 + grant_ref_t ref; 8.522 + unsigned long mfn; 8.523 #endif 8.524 8.525 - if (unlikely(np->tx_full)) { 8.526 - printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name); 8.527 - netif_stop_queue(dev); 8.528 - goto drop; 8.529 - } 8.530 + if (unlikely(np->tx_full)) { 8.531 + printk(KERN_ALERT "%s: full queue wasn't stopped!\n", 8.532 + dev->name); 8.533 + netif_stop_queue(dev); 8.534 + goto drop; 8.535 + } 8.536 8.537 - if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >= 8.538 - PAGE_SIZE)) { 8.539 - struct sk_buff *nskb; 8.540 - if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL)) 8.541 - goto drop; 8.542 - skb_put(nskb, skb->len); 8.543 - memcpy(nskb->data, skb->data, skb->len); 8.544 - nskb->dev = skb->dev; 8.545 - dev_kfree_skb(skb); 8.546 - skb = nskb; 8.547 - } 8.548 + if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >= 8.549 + PAGE_SIZE)) { 8.550 + struct sk_buff *nskb; 8.551 + if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL)) 8.552 + goto drop; 8.553 + skb_put(nskb, skb->len); 8.554 + memcpy(nskb->data, skb->data, skb->len); 8.555 + nskb->dev = skb->dev; 8.556 + dev_kfree_skb(skb); 8.557 + skb = nskb; 8.558 + } 8.559 8.560 - spin_lock_irq(&np->tx_lock); 8.561 + spin_lock_irq(&np->tx_lock); 8.562 8.563 - if (np->backend_state != BEST_CONNECTED) { 8.564 - spin_unlock_irq(&np->tx_lock); 8.565 - goto drop; 8.566 - } 8.567 + if (np->backend_state != BEST_CONNECTED) { 8.568 + spin_unlock_irq(&np->tx_lock); 8.569 + goto drop; 8.570 + } 8.571 8.572 - i = np->tx->req_prod; 8.573 + i = np->tx->req_prod; 8.574 8.575 - id = GET_ID_FROM_FREELIST(np->tx_skbs); 8.576 - np->tx_skbs[id] = skb; 8.577 + id = GET_ID_FROM_FREELIST(np->tx_skbs); 8.578 + np->tx_skbs[id] = skb; 8.579 8.580 - tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req; 8.581 + tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req; 8.582 8.583 - tx->id = id; 8.584 + tx->id = id; 8.585 #ifdef CONFIG_XEN_NETDEV_GRANT 8.586 - ref = gnttab_claim_grant_reference(&gref_tx_head); 8.587 - if (unlikely((signed short)ref < 0)) { 8.588 - printk(KERN_ALERT "#### netfront can't claim tx grant reference\n"); 8.589 - BUG(); 8.590 - } 8.591 - mfn = virt_to_mfn(skb->data); 8.592 - gnttab_grant_foreign_access_ref(ref, np->backend_id, mfn, GNTMAP_readonly); 8.593 - tx->addr = ref << PAGE_SHIFT; 8.594 - grant_tx_ref[id] = ref; 8.595 + ref = gnttab_claim_grant_reference(&np->gref_tx_head); 8.596 + BUG_ON((signed short)ref < 0); 8.597 + mfn = virt_to_mfn(skb->data); 8.598 + gnttab_grant_foreign_access_ref( 8.599 + ref, np->backend_id, mfn, GNTMAP_readonly); 8.600 + tx->addr = ref << PAGE_SHIFT; 8.601 + np->grant_tx_ref[id] = ref; 8.602 #else 8.603 - tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT; 8.604 + tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT; 8.605 #endif 8.606 - tx->addr |= (unsigned long)skb->data & ~PAGE_MASK; 8.607 - tx->size = skb->len; 8.608 - tx->csum_blank = (skb->ip_summed == CHECKSUM_HW); 8.609 + tx->addr |= (unsigned long)skb->data & ~PAGE_MASK; 8.610 + tx->size = skb->len; 8.611 + tx->csum_blank = (skb->ip_summed == CHECKSUM_HW); 8.612 8.613 - wmb(); /* Ensure that backend will see the request. */ 8.614 - np->tx->req_prod = i + 1; 8.615 + wmb(); /* Ensure that backend will see the request. */ 8.616 + np->tx->req_prod = i + 1; 8.617 8.618 - network_tx_buf_gc(dev); 8.619 + network_tx_buf_gc(dev); 8.620 8.621 - if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) { 8.622 - np->tx_full = 1; 8.623 - netif_stop_queue(dev); 8.624 - } 8.625 + if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) { 8.626 + np->tx_full = 1; 8.627 + netif_stop_queue(dev); 8.628 + } 8.629 8.630 - spin_unlock_irq(&np->tx_lock); 8.631 + spin_unlock_irq(&np->tx_lock); 8.632 8.633 - np->stats.tx_bytes += skb->len; 8.634 - np->stats.tx_packets++; 8.635 + np->stats.tx_bytes += skb->len; 8.636 + np->stats.tx_packets++; 8.637 8.638 - /* Only notify Xen if we really have to. */ 8.639 - mb(); 8.640 - if (np->tx->TX_TEST_IDX == i) 8.641 - notify_via_evtchn(np->evtchn); 8.642 + /* Only notify Xen if we really have to. */ 8.643 + mb(); 8.644 + if (np->tx->TX_TEST_IDX == i) 8.645 + notify_via_evtchn(np->evtchn); 8.646 8.647 - return 0; 8.648 + return 0; 8.649 8.650 drop: 8.651 - np->stats.tx_dropped++; 8.652 - dev_kfree_skb(skb); 8.653 - return 0; 8.654 + np->stats.tx_dropped++; 8.655 + dev_kfree_skb(skb); 8.656 + return 0; 8.657 } 8.658 8.659 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs) 8.660 { 8.661 - struct net_device *dev = dev_id; 8.662 - struct net_private *np = netdev_priv(dev); 8.663 - unsigned long flags; 8.664 + struct net_device *dev = dev_id; 8.665 + struct net_private *np = netdev_priv(dev); 8.666 + unsigned long flags; 8.667 8.668 - spin_lock_irqsave(&np->tx_lock, flags); 8.669 - network_tx_buf_gc(dev); 8.670 - spin_unlock_irqrestore(&np->tx_lock, flags); 8.671 + spin_lock_irqsave(&np->tx_lock, flags); 8.672 + network_tx_buf_gc(dev); 8.673 + spin_unlock_irqrestore(&np->tx_lock, flags); 8.674 8.675 - if((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN)) 8.676 - netif_rx_schedule(dev); 8.677 + if ((np->rx_resp_cons != np->rx->resp_prod) && 8.678 + (np->user_state == UST_OPEN)) 8.679 + netif_rx_schedule(dev); 8.680 8.681 - return IRQ_HANDLED; 8.682 + return IRQ_HANDLED; 8.683 } 8.684 8.685 8.686 static int netif_poll(struct net_device *dev, int *pbudget) 8.687 { 8.688 - struct net_private *np = netdev_priv(dev); 8.689 - struct sk_buff *skb, *nskb; 8.690 - netif_rx_response_t *rx; 8.691 - NETIF_RING_IDX i, rp; 8.692 - mmu_update_t *mmu = rx_mmu; 8.693 - multicall_entry_t *mcl = rx_mcl; 8.694 - int work_done, budget, more_to_do = 1; 8.695 - struct sk_buff_head rxq; 8.696 - unsigned long flags; 8.697 + struct net_private *np = netdev_priv(dev); 8.698 + struct sk_buff *skb, *nskb; 8.699 + netif_rx_response_t *rx; 8.700 + NETIF_RING_IDX i, rp; 8.701 + mmu_update_t *mmu = rx_mmu; 8.702 + multicall_entry_t *mcl = rx_mcl; 8.703 + int work_done, budget, more_to_do = 1; 8.704 + struct sk_buff_head rxq; 8.705 + unsigned long flags; 8.706 #ifdef CONFIG_XEN_NETDEV_GRANT 8.707 - unsigned long mfn; 8.708 - grant_ref_t ref; 8.709 + unsigned long mfn; 8.710 + grant_ref_t ref; 8.711 #endif 8.712 8.713 - spin_lock(&np->rx_lock); 8.714 + spin_lock(&np->rx_lock); 8.715 8.716 - if (np->backend_state != BEST_CONNECTED) { 8.717 - spin_unlock(&np->rx_lock); 8.718 - return 0; 8.719 - } 8.720 + if (np->backend_state != BEST_CONNECTED) { 8.721 + spin_unlock(&np->rx_lock); 8.722 + return 0; 8.723 + } 8.724 8.725 - skb_queue_head_init(&rxq); 8.726 + skb_queue_head_init(&rxq); 8.727 8.728 - if ((budget = *pbudget) > dev->quota) 8.729 - budget = dev->quota; 8.730 - rp = np->rx->resp_prod; 8.731 - rmb(); /* Ensure we see queued responses up to 'rp'. */ 8.732 + if ((budget = *pbudget) > dev->quota) 8.733 + budget = dev->quota; 8.734 + rp = np->rx->resp_prod; 8.735 + rmb(); /* Ensure we see queued responses up to 'rp'. */ 8.736 8.737 - for (i = np->rx_resp_cons, work_done = 0; 8.738 - (i != rp) && (work_done < budget); 8.739 - i++, work_done++) { 8.740 - rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp; 8.741 - /* 8.742 - * An error here is very odd. Usually indicates a backend bug, 8.743 - * low-memory condition, or that we didn't have reservation headroom. 8.744 - */ 8.745 - if (unlikely(rx->status <= 0)) { 8.746 - if (net_ratelimit()) 8.747 - printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n"); 8.748 - np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id; 8.749 - wmb(); 8.750 - np->rx->req_prod++; 8.751 - work_done--; 8.752 - continue; 8.753 - } 8.754 + for (i = np->rx_resp_cons, work_done = 0; 8.755 + (i != rp) && (work_done < budget); 8.756 + i++, work_done++) { 8.757 + rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp; 8.758 + /* 8.759 + * An error here is very odd. Usually indicates a backend bug, 8.760 + * low-mem condition, or we didn't have reservation headroom. 8.761 + */ 8.762 + if (unlikely(rx->status <= 0)) { 8.763 + if (net_ratelimit()) 8.764 + printk(KERN_WARNING "Bad rx buffer " 8.765 + "(memory squeeze?).\n"); 8.766 + np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)]. 8.767 + req.id = rx->id; 8.768 + wmb(); 8.769 + np->rx->req_prod++; 8.770 + work_done--; 8.771 + continue; 8.772 + } 8.773 8.774 #ifdef CONFIG_XEN_NETDEV_GRANT 8.775 - ref = grant_rx_ref[rx->id]; 8.776 + ref = np->grant_rx_ref[rx->id]; 8.777 8.778 - if(ref == GRANT_INVALID_REF) { 8.779 - printk(KERN_WARNING "Bad rx grant reference %d from dom %d.\n", 8.780 - ref, np->backend_id); 8.781 - np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id; 8.782 - wmb(); 8.783 - np->rx->req_prod++; 8.784 - work_done--; 8.785 - continue; 8.786 - } 8.787 + if(ref == GRANT_INVALID_REF) { 8.788 + printk(KERN_WARNING "Bad rx grant reference %d " 8.789 + "from dom %d.\n", 8.790 + ref, np->backend_id); 8.791 + np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)]. 8.792 + req.id = rx->id; 8.793 + wmb(); 8.794 + np->rx->req_prod++; 8.795 + work_done--; 8.796 + continue; 8.797 + } 8.798 8.799 - grant_rx_ref[rx->id] = GRANT_INVALID_REF; 8.800 - mfn = gnttab_end_foreign_transfer_ref(ref); 8.801 - gnttab_release_grant_reference(&gref_rx_head, ref); 8.802 + np->grant_rx_ref[rx->id] = GRANT_INVALID_REF; 8.803 + mfn = gnttab_end_foreign_transfer_ref(ref); 8.804 + gnttab_release_grant_reference(&np->gref_rx_head, ref); 8.805 #endif 8.806 8.807 - skb = np->rx_skbs[rx->id]; 8.808 - ADD_ID_TO_FREELIST(np->rx_skbs, rx->id); 8.809 + skb = np->rx_skbs[rx->id]; 8.810 + ADD_ID_TO_FREELIST(np->rx_skbs, rx->id); 8.811 8.812 - /* NB. We handle skb overflow later. */ 8.813 + /* NB. We handle skb overflow later. */ 8.814 #ifdef CONFIG_XEN_NETDEV_GRANT 8.815 - skb->data = skb->head + rx->addr; 8.816 + skb->data = skb->head + rx->addr; 8.817 #else 8.818 - skb->data = skb->head + (rx->addr & ~PAGE_MASK); 8.819 + skb->data = skb->head + (rx->addr & ~PAGE_MASK); 8.820 #endif 8.821 - skb->len = rx->status; 8.822 - skb->tail = skb->data + skb->len; 8.823 + skb->len = rx->status; 8.824 + skb->tail = skb->data + skb->len; 8.825 8.826 - if ( rx->csum_valid ) 8.827 - skb->ip_summed = CHECKSUM_UNNECESSARY; 8.828 + if ( rx->csum_valid ) 8.829 + skb->ip_summed = CHECKSUM_UNNECESSARY; 8.830 8.831 - np->stats.rx_packets++; 8.832 - np->stats.rx_bytes += rx->status; 8.833 + np->stats.rx_packets++; 8.834 + np->stats.rx_bytes += rx->status; 8.835 8.836 - /* Remap the page. */ 8.837 + /* Remap the page. */ 8.838 #ifdef CONFIG_XEN_NETDEV_GRANT 8.839 - mmu->ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 8.840 + mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 8.841 #else 8.842 - mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE; 8.843 + mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE; 8.844 #endif 8.845 - mmu->val = __pa(skb->head) >> PAGE_SHIFT; 8.846 - mmu++; 8.847 + mmu->val = __pa(skb->head) >> PAGE_SHIFT; 8.848 + mmu++; 8.849 #ifdef CONFIG_XEN_NETDEV_GRANT 8.850 - MULTI_update_va_mapping(mcl, (unsigned long)skb->head, 8.851 - pfn_pte_ma(mfn, PAGE_KERNEL), 0); 8.852 + MULTI_update_va_mapping(mcl, (unsigned long)skb->head, 8.853 + pfn_pte_ma(mfn, PAGE_KERNEL), 0); 8.854 #else 8.855 - MULTI_update_va_mapping(mcl, (unsigned long)skb->head, 8.856 - pfn_pte_ma(rx->addr >> PAGE_SHIFT, 8.857 - PAGE_KERNEL), 0); 8.858 + MULTI_update_va_mapping(mcl, (unsigned long)skb->head, 8.859 + pfn_pte_ma(rx->addr >> PAGE_SHIFT, 8.860 + PAGE_KERNEL), 0); 8.861 #endif 8.862 - mcl++; 8.863 + mcl++; 8.864 8.865 #ifdef CONFIG_XEN_NETDEV_GRANT 8.866 - phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn; 8.867 - GDPRINTK("#### rx_poll enqueue vdata=%p mfn=%lu ref=%x\n", 8.868 - skb->data, mfn, ref); 8.869 + phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn; 8.870 #else 8.871 - phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 8.872 - rx->addr >> PAGE_SHIFT; 8.873 + phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = 8.874 + rx->addr >> PAGE_SHIFT; 8.875 #endif 8.876 8.877 8.878 - __skb_queue_tail(&rxq, skb); 8.879 - } 8.880 + __skb_queue_tail(&rxq, skb); 8.881 + } 8.882 8.883 - 8.884 - /* Some pages are no longer absent... */ 8.885 - balloon_update_driver_allowance(-work_done); 8.886 + /* Some pages are no longer absent... */ 8.887 + balloon_update_driver_allowance(-work_done); 8.888 8.889 - /* Do all the remapping work, and M->P updates, in one big hypercall. */ 8.890 - if (likely((mcl - rx_mcl) != 0)) { 8.891 - mcl->op = __HYPERVISOR_mmu_update; 8.892 - mcl->args[0] = (unsigned long)rx_mmu; 8.893 - mcl->args[1] = mmu - rx_mmu; 8.894 - mcl->args[2] = 0; 8.895 - mcl->args[3] = DOMID_SELF; 8.896 - mcl++; 8.897 - (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl); 8.898 - } 8.899 + /* Do all the remapping work, and M2P updates, in one big hypercall. */ 8.900 + if (likely((mcl - rx_mcl) != 0)) { 8.901 + mcl->op = __HYPERVISOR_mmu_update; 8.902 + mcl->args[0] = (unsigned long)rx_mmu; 8.903 + mcl->args[1] = mmu - rx_mmu; 8.904 + mcl->args[2] = 0; 8.905 + mcl->args[3] = DOMID_SELF; 8.906 + mcl++; 8.907 + (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl); 8.908 + } 8.909 8.910 - while ((skb = __skb_dequeue(&rxq)) != NULL) { 8.911 -#ifdef CONFIG_XEN_NETDEV_GRANT 8.912 - GDPRINTK("#### rx_poll dequeue vdata=%p mfn=%lu\n", 8.913 - skb->data, virt_to_mfn(skb->data)); 8.914 - dump_packet('d', skb->data, (unsigned long)skb->data); 8.915 -#endif 8.916 - /* 8.917 - * Enough room in skbuff for the data we were passed? Also, Linux 8.918 - * expects at least 16 bytes headroom in each receive buffer. 8.919 - */ 8.920 - if (unlikely(skb->tail > skb->end) || 8.921 - unlikely((skb->data - skb->head) < 16)) { 8.922 - nskb = NULL; 8.923 - 8.924 + while ((skb = __skb_dequeue(&rxq)) != NULL) { 8.925 + /* 8.926 + * Enough room in skbuff for the data we were passed? Also, 8.927 + * Linux expects at least 16 bytes headroom in each rx buffer. 8.928 + */ 8.929 + if (unlikely(skb->tail > skb->end) || 8.930 + unlikely((skb->data - skb->head) < 16)) { 8.931 + nskb = NULL; 8.932 8.933 - /* Only copy the packet if it fits in the current MTU. */ 8.934 - if (skb->len <= (dev->mtu + ETH_HLEN)) { 8.935 - if ((skb->tail > skb->end) && net_ratelimit()) 8.936 - printk(KERN_INFO "Received packet needs %zd bytes more " 8.937 - "headroom.\n", skb->tail - skb->end); 8.938 + /* Only copy the packet if it fits in the MTU. */ 8.939 + if (skb->len <= (dev->mtu + ETH_HLEN)) { 8.940 + if ((skb->tail > skb->end) && net_ratelimit()) 8.941 + printk(KERN_INFO "Received packet " 8.942 + "needs %zd bytes more " 8.943 + "headroom.\n", 8.944 + skb->tail - skb->end); 8.945 8.946 - if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) { 8.947 - skb_reserve(nskb, 2); 8.948 - skb_put(nskb, skb->len); 8.949 - memcpy(nskb->data, skb->data, skb->len); 8.950 - nskb->dev = skb->dev; 8.951 - } 8.952 - } 8.953 - else if (net_ratelimit()) 8.954 - printk(KERN_INFO "Received packet too big for MTU " 8.955 - "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu); 8.956 + nskb = alloc_xen_skb(skb->len + 2); 8.957 + if (nskb != NULL) { 8.958 + skb_reserve(nskb, 2); 8.959 + skb_put(nskb, skb->len); 8.960 + memcpy(nskb->data, 8.961 + skb->data, 8.962 + skb->len); 8.963 + nskb->dev = skb->dev; 8.964 + } 8.965 + } 8.966 + else if (net_ratelimit()) 8.967 + printk(KERN_INFO "Received packet too big for " 8.968 + "MTU (%d > %d)\n", 8.969 + skb->len - ETH_HLEN, dev->mtu); 8.970 8.971 - /* Reinitialise and then destroy the old skbuff. */ 8.972 - skb->len = 0; 8.973 - skb->tail = skb->data; 8.974 - init_skb_shinfo(skb); 8.975 - dev_kfree_skb(skb); 8.976 + /* Reinitialise and then destroy the old skbuff. */ 8.977 + skb->len = 0; 8.978 + skb->tail = skb->data; 8.979 + init_skb_shinfo(skb); 8.980 + dev_kfree_skb(skb); 8.981 8.982 - /* Switch old for new, if we copied the buffer. */ 8.983 - if ((skb = nskb) == NULL) 8.984 - continue; 8.985 - } 8.986 + /* Switch old for new, if we copied the buffer. */ 8.987 + if ((skb = nskb) == NULL) 8.988 + continue; 8.989 + } 8.990 8.991 - /* Set the shared-info area, which is hidden behind the real data. */ 8.992 - init_skb_shinfo(skb); 8.993 - /* Ethernet-specific work. Delayed to here as it peeks the header. */ 8.994 - skb->protocol = eth_type_trans(skb, dev); 8.995 + /* Set the shinfo area, which is hidden behind the data. */ 8.996 + init_skb_shinfo(skb); 8.997 + /* Ethernet work: Delayed to here as it peeks the header. */ 8.998 + skb->protocol = eth_type_trans(skb, dev); 8.999 8.1000 - /* Pass it up. */ 8.1001 - netif_receive_skb(skb); 8.1002 - dev->last_rx = jiffies; 8.1003 - } 8.1004 + /* Pass it up. */ 8.1005 + netif_receive_skb(skb); 8.1006 + dev->last_rx = jiffies; 8.1007 + } 8.1008 8.1009 - np->rx_resp_cons = i; 8.1010 + np->rx_resp_cons = i; 8.1011 8.1012 - /* If we get a callback with very few responses, reduce fill target. */ 8.1013 - /* NB. Note exponential increase, linear decrease. */ 8.1014 - if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) && 8.1015 - (--np->rx_target < np->rx_min_target)) 8.1016 - np->rx_target = np->rx_min_target; 8.1017 + /* If we get a callback with very few responses, reduce fill target. */ 8.1018 + /* NB. Note exponential increase, linear decrease. */ 8.1019 + if (((np->rx->req_prod - np->rx->resp_prod) > 8.1020 + ((3*np->rx_target) / 4)) && 8.1021 + (--np->rx_target < np->rx_min_target)) 8.1022 + np->rx_target = np->rx_min_target; 8.1023 8.1024 - network_alloc_rx_buffers(dev); 8.1025 + network_alloc_rx_buffers(dev); 8.1026 8.1027 - *pbudget -= work_done; 8.1028 - dev->quota -= work_done; 8.1029 + *pbudget -= work_done; 8.1030 + dev->quota -= work_done; 8.1031 8.1032 - if (work_done < budget) { 8.1033 - local_irq_save(flags); 8.1034 + if (work_done < budget) { 8.1035 + local_irq_save(flags); 8.1036 8.1037 - np->rx->event = i + 1; 8.1038 + np->rx->event = i + 1; 8.1039 8.1040 - /* Deal with hypervisor racing our resetting of rx_event. */ 8.1041 - mb(); 8.1042 - if (np->rx->resp_prod == i) { 8.1043 - __netif_rx_complete(dev); 8.1044 - more_to_do = 0; 8.1045 - } 8.1046 + /* Deal with hypervisor racing our resetting of rx_event. */ 8.1047 + mb(); 8.1048 + if (np->rx->resp_prod == i) { 8.1049 + __netif_rx_complete(dev); 8.1050 + more_to_do = 0; 8.1051 + } 8.1052 8.1053 - local_irq_restore(flags); 8.1054 - } 8.1055 + local_irq_restore(flags); 8.1056 + } 8.1057 8.1058 - spin_unlock(&np->rx_lock); 8.1059 + spin_unlock(&np->rx_lock); 8.1060 8.1061 - return more_to_do; 8.1062 + return more_to_do; 8.1063 } 8.1064 8.1065 8.1066 static int network_close(struct net_device *dev) 8.1067 { 8.1068 - struct net_private *np = netdev_priv(dev); 8.1069 - np->user_state = UST_CLOSED; 8.1070 - netif_stop_queue(np->netdev); 8.1071 - return 0; 8.1072 + struct net_private *np = netdev_priv(dev); 8.1073 + np->user_state = UST_CLOSED; 8.1074 + netif_stop_queue(np->netdev); 8.1075 + return 0; 8.1076 } 8.1077 8.1078 8.1079 static struct net_device_stats *network_get_stats(struct net_device *dev) 8.1080 { 8.1081 - struct net_private *np = netdev_priv(dev); 8.1082 - return &np->stats; 8.1083 + struct net_private *np = netdev_priv(dev); 8.1084 + return &np->stats; 8.1085 } 8.1086 8.1087 static void network_connect(struct net_device *dev) 8.1088 { 8.1089 - struct net_private *np; 8.1090 - int i, requeue_idx; 8.1091 - netif_tx_request_t *tx; 8.1092 + struct net_private *np; 8.1093 + int i, requeue_idx; 8.1094 + netif_tx_request_t *tx; 8.1095 8.1096 - np = netdev_priv(dev); 8.1097 - spin_lock_irq(&np->tx_lock); 8.1098 - spin_lock(&np->rx_lock); 8.1099 + np = netdev_priv(dev); 8.1100 + spin_lock_irq(&np->tx_lock); 8.1101 + spin_lock(&np->rx_lock); 8.1102 8.1103 - /* Recovery procedure: */ 8.1104 + /* Recovery procedure: */ 8.1105 8.1106 - /* Step 1: Reinitialise variables. */ 8.1107 - np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0; 8.1108 - np->rx->event = np->tx->event = 1; 8.1109 + /* Step 1: Reinitialise variables. */ 8.1110 + np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0; 8.1111 + np->rx->event = np->tx->event = 1; 8.1112 8.1113 - /* Step 2: Rebuild the RX and TX ring contents. 8.1114 - * NB. We could just free the queued TX packets now but we hope 8.1115 - * that sending them out might do some good. We have to rebuild 8.1116 - * the RX ring because some of our pages are currently flipped out 8.1117 - * so we can't just free the RX skbs. 8.1118 - * NB2. Freelist index entries are always going to be less than 8.1119 - * __PAGE_OFFSET, whereas pointers to skbs will always be equal or 8.1120 - * greater than __PAGE_OFFSET: we use this property to distinguish 8.1121 - * them. 8.1122 - */ 8.1123 + /* Step 2: Rebuild the RX and TX ring contents. 8.1124 + * NB. We could just free the queued TX packets now but we hope 8.1125 + * that sending them out might do some good. We have to rebuild 8.1126 + * the RX ring because some of our pages are currently flipped out 8.1127 + * so we can't just free the RX skbs. 8.1128 + * NB2. Freelist index entries are always going to be less than 8.1129 + * __PAGE_OFFSET, whereas pointers to skbs will always be equal or 8.1130 + * greater than __PAGE_OFFSET: we use this property to distinguish 8.1131 + * them. 8.1132 + */ 8.1133 8.1134 - /* Rebuild the TX buffer freelist and the TX ring itself. 8.1135 - * NB. This reorders packets. We could keep more private state 8.1136 - * to avoid this but maybe it doesn't matter so much given the 8.1137 - * interface has been down. 8.1138 - */ 8.1139 - for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) { 8.1140 - if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) { 8.1141 - struct sk_buff *skb = np->tx_skbs[i]; 8.1142 + /* Rebuild the TX buffer freelist and the TX ring itself. 8.1143 + * NB. This reorders packets. We could keep more private state 8.1144 + * to avoid this but maybe it doesn't matter so much given the 8.1145 + * interface has been down. 8.1146 + */ 8.1147 + for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) { 8.1148 + if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) { 8.1149 + struct sk_buff *skb = np->tx_skbs[i]; 8.1150 8.1151 - tx = &np->tx->ring[requeue_idx++].req; 8.1152 + tx = &np->tx->ring[requeue_idx++].req; 8.1153 8.1154 - tx->id = i; 8.1155 + tx->id = i; 8.1156 #ifdef CONFIG_XEN_NETDEV_GRANT 8.1157 - gnttab_grant_foreign_access_ref(grant_tx_ref[i], np->backend_id, 8.1158 - virt_to_mfn(np->tx_skbs[i]->data), 8.1159 - GNTMAP_readonly); 8.1160 - tx->addr = grant_tx_ref[i] << PAGE_SHIFT; 8.1161 + gnttab_grant_foreign_access_ref( 8.1162 + np->grant_tx_ref[i], np->backend_id, 8.1163 + virt_to_mfn(np->tx_skbs[i]->data), 8.1164 + GNTMAP_readonly); 8.1165 + tx->addr = np->grant_tx_ref[i] << PAGE_SHIFT; 8.1166 #else 8.1167 - tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT; 8.1168 + tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT; 8.1169 #endif 8.1170 - tx->addr |= (unsigned long)skb->data & ~PAGE_MASK; 8.1171 - tx->size = skb->len; 8.1172 + tx->addr |= (unsigned long)skb->data & ~PAGE_MASK; 8.1173 + tx->size = skb->len; 8.1174 8.1175 - np->stats.tx_bytes += skb->len; 8.1176 - np->stats.tx_packets++; 8.1177 - } 8.1178 - } 8.1179 - wmb(); 8.1180 - np->tx->req_prod = requeue_idx; 8.1181 + np->stats.tx_bytes += skb->len; 8.1182 + np->stats.tx_packets++; 8.1183 + } 8.1184 + } 8.1185 + wmb(); 8.1186 + np->tx->req_prod = requeue_idx; 8.1187 8.1188 - /* Rebuild the RX buffer freelist and the RX ring itself. */ 8.1189 - for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 8.1190 - if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) { 8.1191 + /* Rebuild the RX buffer freelist and the RX ring itself. */ 8.1192 + for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 8.1193 + if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) { 8.1194 #ifdef CONFIG_XEN_NETDEV_GRANT 8.1195 - /* Reinstate the grant ref so backend can transfer mfn to us. */ 8.1196 - gnttab_grant_foreign_transfer_ref(grant_rx_ref[i], np->backend_id); 8.1197 - np->rx->ring[requeue_idx].req.gref = grant_rx_ref[i]; 8.1198 + gnttab_grant_foreign_transfer_ref( 8.1199 + np->grant_rx_ref[i], np->backend_id); 8.1200 + np->rx->ring[requeue_idx].req.gref = 8.1201 + np->grant_rx_ref[i]; 8.1202 #endif 8.1203 - np->rx->ring[requeue_idx].req.id = i; 8.1204 - requeue_idx++; 8.1205 - } 8.1206 - } 8.1207 + np->rx->ring[requeue_idx].req.id = i; 8.1208 + requeue_idx++; 8.1209 + } 8.1210 + } 8.1211 8.1212 - wmb(); 8.1213 - np->rx->req_prod = requeue_idx; 8.1214 + wmb(); 8.1215 + np->rx->req_prod = requeue_idx; 8.1216 8.1217 - /* Step 3: All public and private state should now be sane. Get 8.1218 - * ready to start sending and receiving packets and give the driver 8.1219 - * domain a kick because we've probably just requeued some 8.1220 - * packets. 8.1221 - */ 8.1222 - np->backend_state = BEST_CONNECTED; 8.1223 - wmb(); 8.1224 - notify_via_evtchn(np->evtchn); 8.1225 - network_tx_buf_gc(dev); 8.1226 + /* Step 3: All public and private state should now be sane. Get 8.1227 + * ready to start sending and receiving packets and give the driver 8.1228 + * domain a kick because we've probably just requeued some 8.1229 + * packets. 8.1230 + */ 8.1231 + np->backend_state = BEST_CONNECTED; 8.1232 + wmb(); 8.1233 + notify_via_evtchn(np->evtchn); 8.1234 + network_tx_buf_gc(dev); 8.1235 8.1236 - if (np->user_state == UST_OPEN) 8.1237 - netif_start_queue(dev); 8.1238 + if (np->user_state == UST_OPEN) 8.1239 + netif_start_queue(dev); 8.1240 8.1241 - spin_unlock(&np->rx_lock); 8.1242 - spin_unlock_irq(&np->tx_lock); 8.1243 + spin_unlock(&np->rx_lock); 8.1244 + spin_unlock_irq(&np->tx_lock); 8.1245 } 8.1246 8.1247 static void show_device(struct net_private *np) 8.1248 @@ -887,6 +860,15 @@ connect_device(struct net_private *np, u 8.1249 show_device(np); 8.1250 } 8.1251 8.1252 +static void netif_uninit(struct net_device *dev) 8.1253 +{ 8.1254 +#ifdef CONFIG_XEN_NETDEV_GRANT 8.1255 + struct net_private *np = netdev_priv(dev); 8.1256 + gnttab_free_grant_references(np->gref_tx_head); 8.1257 + gnttab_free_grant_references(np->gref_rx_head); 8.1258 +#endif 8.1259 +} 8.1260 + 8.1261 static struct ethtool_ops network_ethtool_ops = 8.1262 { 8.1263 .get_tx_csum = ethtool_op_get_tx_csum, 8.1264 @@ -901,84 +883,107 @@ static struct ethtool_ops network_ethtoo 8.1265 static int create_netdev(int handle, struct xenbus_device *dev, 8.1266 struct net_device **val) 8.1267 { 8.1268 - int i, err = 0; 8.1269 - struct net_device *netdev = NULL; 8.1270 - struct net_private *np = NULL; 8.1271 + int i, err = 0; 8.1272 + struct net_device *netdev = NULL; 8.1273 + struct net_private *np = NULL; 8.1274 8.1275 - if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) { 8.1276 - printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__); 8.1277 - err = -ENOMEM; 8.1278 - goto exit; 8.1279 - } 8.1280 + if ((netdev = alloc_etherdev(sizeof(struct net_private))) == NULL) { 8.1281 + printk(KERN_WARNING "%s> alloc_etherdev failed.\n", 8.1282 + __FUNCTION__); 8.1283 + err = -ENOMEM; 8.1284 + goto exit; 8.1285 + } 8.1286 8.1287 - np = netdev_priv(netdev); 8.1288 - np->backend_state = BEST_CLOSED; 8.1289 - np->user_state = UST_CLOSED; 8.1290 - np->handle = handle; 8.1291 - np->xbdev = dev; 8.1292 + np = netdev_priv(netdev); 8.1293 + np->backend_state = BEST_CLOSED; 8.1294 + np->user_state = UST_CLOSED; 8.1295 + np->handle = handle; 8.1296 + np->xbdev = dev; 8.1297 8.1298 - spin_lock_init(&np->tx_lock); 8.1299 - spin_lock_init(&np->rx_lock); 8.1300 + spin_lock_init(&np->tx_lock); 8.1301 + spin_lock_init(&np->rx_lock); 8.1302 8.1303 - skb_queue_head_init(&np->rx_batch); 8.1304 - np->rx_target = RX_MIN_TARGET; 8.1305 - np->rx_min_target = RX_MIN_TARGET; 8.1306 - np->rx_max_target = RX_MAX_TARGET; 8.1307 + skb_queue_head_init(&np->rx_batch); 8.1308 + np->rx_target = RX_MIN_TARGET; 8.1309 + np->rx_min_target = RX_MIN_TARGET; 8.1310 + np->rx_max_target = RX_MAX_TARGET; 8.1311 8.1312 - /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */ 8.1313 - for (i = 0; i <= NETIF_TX_RING_SIZE; i++) { 8.1314 - np->tx_skbs[i] = (void *)((unsigned long) i+1); 8.1315 + /* Initialise {tx,rx}_skbs as a free chain containing every entry. */ 8.1316 + for (i = 0; i <= NETIF_TX_RING_SIZE; i++) { 8.1317 + np->tx_skbs[i] = (void *)((unsigned long) i+1); 8.1318 #ifdef CONFIG_XEN_NETDEV_GRANT 8.1319 - grant_tx_ref[i] = GRANT_INVALID_REF; 8.1320 + np->grant_tx_ref[i] = GRANT_INVALID_REF; 8.1321 #endif 8.1322 - } 8.1323 + } 8.1324 + 8.1325 + for (i = 0; i <= NETIF_RX_RING_SIZE; i++) { 8.1326 + np->rx_skbs[i] = (void *)((unsigned long) i+1); 8.1327 +#ifdef CONFIG_XEN_NETDEV_GRANT 8.1328 + np->grant_rx_ref[i] = GRANT_INVALID_REF; 8.1329 +#endif 8.1330 + } 8.1331 8.1332 - for (i = 0; i <= NETIF_RX_RING_SIZE; i++) { 8.1333 - np->rx_skbs[i] = (void *)((unsigned long) i+1); 8.1334 #ifdef CONFIG_XEN_NETDEV_GRANT 8.1335 - grant_rx_ref[i] = GRANT_INVALID_REF; 8.1336 + /* A grant for every tx ring slot */ 8.1337 + if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE, 8.1338 + &np->gref_tx_head) < 0) { 8.1339 + printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); 8.1340 + goto exit; 8.1341 + } 8.1342 + /* A grant for every rx ring slot */ 8.1343 + if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE, 8.1344 + &np->gref_rx_head) < 0) { 8.1345 + printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); 8.1346 + gnttab_free_grant_references(np->gref_tx_head); 8.1347 + goto exit; 8.1348 + } 8.1349 #endif 8.1350 - } 8.1351 8.1352 - netdev->open = network_open; 8.1353 - netdev->hard_start_xmit = network_start_xmit; 8.1354 - netdev->stop = network_close; 8.1355 - netdev->get_stats = network_get_stats; 8.1356 - netdev->poll = netif_poll; 8.1357 - netdev->weight = 64; 8.1358 - netdev->features = NETIF_F_IP_CSUM; 8.1359 + netdev->open = network_open; 8.1360 + netdev->hard_start_xmit = network_start_xmit; 8.1361 + netdev->stop = network_close; 8.1362 + netdev->get_stats = network_get_stats; 8.1363 + netdev->poll = netif_poll; 8.1364 + netdev->uninit = netif_uninit; 8.1365 + netdev->weight = 64; 8.1366 + netdev->features = NETIF_F_IP_CSUM; 8.1367 8.1368 - SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); 8.1369 + SET_ETHTOOL_OPS(netdev, &network_ethtool_ops); 8.1370 8.1371 - if ((err = register_netdev(netdev)) != 0) { 8.1372 - printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err); 8.1373 - goto exit; 8.1374 - } 8.1375 + if ((err = register_netdev(netdev)) != 0) { 8.1376 + printk(KERN_WARNING "%s> register_netdev err=%d\n", 8.1377 + __FUNCTION__, err); 8.1378 + goto exit_free_grefs; 8.1379 + } 8.1380 + 8.1381 + if ((err = xennet_proc_addif(netdev)) != 0) { 8.1382 + unregister_netdev(netdev); 8.1383 + goto exit_free_grefs; 8.1384 + } 8.1385 + 8.1386 + np->netdev = netdev; 8.1387 8.1388 - if ((err = xennet_proc_addif(netdev)) != 0) { 8.1389 - unregister_netdev(netdev); 8.1390 - goto exit; 8.1391 - } 8.1392 - 8.1393 - np->netdev = netdev; 8.1394 + exit: 8.1395 + if ((err != 0) && (netdev != NULL)) 8.1396 + kfree(netdev); 8.1397 + else if (val != NULL) 8.1398 + *val = netdev; 8.1399 + return err; 8.1400 8.1401 - exit: 8.1402 - if ((err != 0) && (netdev != NULL)) 8.1403 - kfree(netdev); 8.1404 - else if (val != NULL) 8.1405 - *val = netdev; 8.1406 - return err; 8.1407 + exit_free_grefs: 8.1408 +#ifdef CONFIG_XEN_NETDEV_GRANT 8.1409 + gnttab_free_grant_references(np->gref_tx_head); 8.1410 + gnttab_free_grant_references(np->gref_rx_head); 8.1411 +#endif 8.1412 + goto exit; 8.1413 } 8.1414 8.1415 static int destroy_netdev(struct net_device *netdev) 8.1416 { 8.1417 - 8.1418 #ifdef CONFIG_PROC_FS 8.1419 xennet_proc_delif(netdev); 8.1420 #endif 8.1421 - 8.1422 unregister_netdev(netdev); 8.1423 - 8.1424 return 0; 8.1425 } 8.1426 8.1427 @@ -989,20 +994,20 @@ static int destroy_netdev(struct net_dev 8.1428 static int 8.1429 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr) 8.1430 { 8.1431 - struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 8.1432 - struct net_device *dev = ifa->ifa_dev->dev; 8.1433 + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 8.1434 + struct net_device *dev = ifa->ifa_dev->dev; 8.1435 8.1436 - /* UP event and is it one of our devices? */ 8.1437 - if (event == NETDEV_UP && dev->open == network_open) 8.1438 - (void)send_fake_arp(dev); 8.1439 + /* UP event and is it one of our devices? */ 8.1440 + if (event == NETDEV_UP && dev->open == network_open) 8.1441 + (void)send_fake_arp(dev); 8.1442 8.1443 - return NOTIFY_DONE; 8.1444 + return NOTIFY_DONE; 8.1445 } 8.1446 8.1447 static struct notifier_block notifier_inetdev = { 8.1448 - .notifier_call = inetdev_notify, 8.1449 - .next = NULL, 8.1450 - .priority = 0 8.1451 + .notifier_call = inetdev_notify, 8.1452 + .next = NULL, 8.1453 + .priority = 0 8.1454 }; 8.1455 8.1456 static struct xenbus_device_id netfront_ids[] = { 8.1457 @@ -1341,72 +1346,50 @@ static void __init init_net_xenbus(void) 8.1458 8.1459 static int wait_for_netif(void) 8.1460 { 8.1461 - int err = 0; 8.1462 - int i; 8.1463 + int err = 0; 8.1464 + int i; 8.1465 8.1466 - /* 8.1467 - * We should figure out how many and which devices we need to 8.1468 - * proceed and only wait for those. For now, continue once the 8.1469 - * first device is around. 8.1470 - */ 8.1471 - for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ ) 8.1472 - { 8.1473 - set_current_state(TASK_INTERRUPTIBLE); 8.1474 - schedule_timeout(1); 8.1475 - } 8.1476 + /* 8.1477 + * We should figure out how many and which devices we need to 8.1478 + * proceed and only wait for those. For now, continue once the 8.1479 + * first device is around. 8.1480 + */ 8.1481 + for ( i=0; netif_state != NETIF_STATE_CONNECTED && (i < 10*HZ); i++ ) 8.1482 + { 8.1483 + set_current_state(TASK_INTERRUPTIBLE); 8.1484 + schedule_timeout(1); 8.1485 + } 8.1486 8.1487 - if (netif_state != NETIF_STATE_CONNECTED) { 8.1488 - WPRINTK("Timeout connecting to device!\n"); 8.1489 - err = -ENOSYS; 8.1490 - } 8.1491 - return err; 8.1492 + if (netif_state != NETIF_STATE_CONNECTED) { 8.1493 + WPRINTK("Timeout connecting to device!\n"); 8.1494 + err = -ENOSYS; 8.1495 + } 8.1496 + return err; 8.1497 } 8.1498 8.1499 static int __init netif_init(void) 8.1500 { 8.1501 - int err = 0; 8.1502 - 8.1503 - if (xen_start_info->flags & SIF_INITDOMAIN) 8.1504 - return 0; 8.1505 + int err = 0; 8.1506 8.1507 - if ((err = xennet_proc_init()) != 0) 8.1508 - return err; 8.1509 + if (xen_start_info->flags & SIF_INITDOMAIN) 8.1510 + return 0; 8.1511 8.1512 - IPRINTK("Initialising virtual ethernet driver.\n"); 8.1513 - 8.1514 -#ifdef CONFIG_XEN_NETDEV_GRANT 8.1515 - IPRINTK("Using grant tables.\n"); 8.1516 + if ((err = xennet_proc_init()) != 0) 8.1517 + return err; 8.1518 8.1519 - /* A grant for every tx ring slot */ 8.1520 - if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE, 8.1521 - &gref_tx_head) < 0) { 8.1522 - printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); 8.1523 - return 1; 8.1524 - } 8.1525 - /* A grant for every rx ring slot */ 8.1526 - if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE, 8.1527 - &gref_rx_head) < 0) { 8.1528 - printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); 8.1529 - return 1; 8.1530 - } 8.1531 -#endif 8.1532 + IPRINTK("Initialising virtual ethernet driver.\n"); 8.1533 + 8.1534 + (void)register_inetaddr_notifier(¬ifier_inetdev); 8.1535 8.1536 - 8.1537 - (void)register_inetaddr_notifier(¬ifier_inetdev); 8.1538 + init_net_xenbus(); 8.1539 8.1540 - init_net_xenbus(); 8.1541 + wait_for_netif(); 8.1542 8.1543 - wait_for_netif(); 8.1544 - 8.1545 - return err; 8.1546 + return err; 8.1547 } 8.1548 8.1549 static void netif_exit(void) 8.1550 { 8.1551 -#ifdef CONFIG_XEN_NETDEV_GRANT 8.1552 - gnttab_free_grant_references(gref_tx_head); 8.1553 - gnttab_free_grant_references(gref_rx_head); 8.1554 -#endif 8.1555 } 8.1556 8.1557 #ifdef CONFIG_PROC_FS 8.1558 @@ -1416,147 +1399,159 @@ static void netif_exit(void) 8.1559 #define TARGET_CUR 2UL 8.1560 8.1561 static int xennet_proc_read( 8.1562 - char *page, char **start, off_t off, int count, int *eof, void *data) 8.1563 + char *page, char **start, off_t off, int count, int *eof, void *data) 8.1564 { 8.1565 - struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL); 8.1566 - struct net_private *np = netdev_priv(dev); 8.1567 - int len = 0, which_target = (long)data & 3; 8.1568 + struct net_device *dev = 8.1569 + (struct net_device *)((unsigned long)data & ~3UL); 8.1570 + struct net_private *np = netdev_priv(dev); 8.1571 + int len = 0, which_target = (long)data & 3; 8.1572 8.1573 - switch (which_target) 8.1574 - { 8.1575 - case TARGET_MIN: 8.1576 - len = sprintf(page, "%d\n", np->rx_min_target); 8.1577 - break; 8.1578 - case TARGET_MAX: 8.1579 - len = sprintf(page, "%d\n", np->rx_max_target); 8.1580 - break; 8.1581 - case TARGET_CUR: 8.1582 - len = sprintf(page, "%d\n", np->rx_target); 8.1583 - break; 8.1584 - } 8.1585 + switch (which_target) 8.1586 + { 8.1587 + case TARGET_MIN: 8.1588 + len = sprintf(page, "%d\n", np->rx_min_target); 8.1589 + break; 8.1590 + case TARGET_MAX: 8.1591 + len = sprintf(page, "%d\n", np->rx_max_target); 8.1592 + break; 8.1593 + case TARGET_CUR: 8.1594 + len = sprintf(page, "%d\n", np->rx_target); 8.1595 + break; 8.1596 + } 8.1597 8.1598 - *eof = 1; 8.1599 - return len; 8.1600 + *eof = 1; 8.1601 + return len; 8.1602 } 8.1603 8.1604 static int xennet_proc_write( 8.1605 - struct file *file, const char __user *buffer, 8.1606 - unsigned long count, void *data) 8.1607 + struct file *file, const char __user *buffer, 8.1608 + unsigned long count, void *data) 8.1609 { 8.1610 - struct net_device *dev = (struct net_device *)((unsigned long)data & ~3UL); 8.1611 - struct net_private *np = netdev_priv(dev); 8.1612 - int which_target = (long)data & 3; 8.1613 - char string[64]; 8.1614 - long target; 8.1615 + struct net_device *dev = 8.1616 + (struct net_device *)((unsigned long)data & ~3UL); 8.1617 + struct net_private *np = netdev_priv(dev); 8.1618 + int which_target = (long)data & 3; 8.1619 + char string[64]; 8.1620 + long target; 8.1621 8.1622 - if (!capable(CAP_SYS_ADMIN)) 8.1623 - return -EPERM; 8.1624 + if (!capable(CAP_SYS_ADMIN)) 8.1625 + return -EPERM; 8.1626 8.1627 - if (count <= 1) 8.1628 - return -EBADMSG; /* runt */ 8.1629 - if (count > sizeof(string)) 8.1630 - return -EFBIG; /* too long */ 8.1631 + if (count <= 1) 8.1632 + return -EBADMSG; /* runt */ 8.1633 + if (count > sizeof(string)) 8.1634 + return -EFBIG; /* too long */ 8.1635 8.1636 - if (copy_from_user(string, buffer, count)) 8.1637 - return -EFAULT; 8.1638 - string[sizeof(string)-1] = '\0'; 8.1639 + if (copy_from_user(string, buffer, count)) 8.1640 + return -EFAULT; 8.1641 + string[sizeof(string)-1] = '\0'; 8.1642 8.1643 - target = simple_strtol(string, NULL, 10); 8.1644 - if (target < RX_MIN_TARGET) 8.1645 - target = RX_MIN_TARGET; 8.1646 - if (target > RX_MAX_TARGET) 8.1647 - target = RX_MAX_TARGET; 8.1648 + target = simple_strtol(string, NULL, 10); 8.1649 + if (target < RX_MIN_TARGET) 8.1650 + target = RX_MIN_TARGET; 8.1651 + if (target > RX_MAX_TARGET) 8.1652 + target = RX_MAX_TARGET; 8.1653 8.1654 - spin_lock(&np->rx_lock); 8.1655 + spin_lock(&np->rx_lock); 8.1656 8.1657 - switch (which_target) 8.1658 - { 8.1659 - case TARGET_MIN: 8.1660 - if (target > np->rx_max_target) 8.1661 - np->rx_max_target = target; 8.1662 - np->rx_min_target = target; 8.1663 - if (target > np->rx_target) 8.1664 - np->rx_target = target; 8.1665 - break; 8.1666 - case TARGET_MAX: 8.1667 - if (target < np->rx_min_target) 8.1668 - np->rx_min_target = target; 8.1669 - np->rx_max_target = target; 8.1670 - if (target < np->rx_target) 8.1671 - np->rx_target = target; 8.1672 - break; 8.1673 - case TARGET_CUR: 8.1674 - break; 8.1675 - } 8.1676 + switch (which_target) 8.1677 + { 8.1678 + case TARGET_MIN: 8.1679 + if (target > np->rx_max_target) 8.1680 + np->rx_max_target = target; 8.1681 + np->rx_min_target = target; 8.1682 + if (target > np->rx_target) 8.1683 + np->rx_target = target; 8.1684 + break; 8.1685 + case TARGET_MAX: 8.1686 + if (target < np->rx_min_target) 8.1687 + np->rx_min_target = target; 8.1688 + np->rx_max_target = target; 8.1689 + if (target < np->rx_target) 8.1690 + np->rx_target = target; 8.1691 + break; 8.1692 + case TARGET_CUR: 8.1693 + break; 8.1694 + } 8.1695 8.1696 - network_alloc_rx_buffers(dev); 8.1697 + network_alloc_rx_buffers(dev); 8.1698 8.1699 - spin_unlock(&np->rx_lock); 8.1700 + spin_unlock(&np->rx_lock); 8.1701 8.1702 - return count; 8.1703 + return count; 8.1704 } 8.1705 8.1706 static int xennet_proc_init(void) 8.1707 { 8.1708 - if (proc_mkdir("xen/net", NULL) == NULL) 8.1709 - return -ENOMEM; 8.1710 - return 0; 8.1711 + if (proc_mkdir("xen/net", NULL) == NULL) 8.1712 + return -ENOMEM; 8.1713 + return 0; 8.1714 } 8.1715 8.1716 static int xennet_proc_addif(struct net_device *dev) 8.1717 { 8.1718 - struct proc_dir_entry *dir, *min, *max, *cur; 8.1719 - char name[30]; 8.1720 + struct proc_dir_entry *dir, *min, *max, *cur; 8.1721 + char name[30]; 8.1722 8.1723 - sprintf(name, "xen/net/%s", dev->name); 8.1724 + sprintf(name, "xen/net/%s", dev->name); 8.1725 8.1726 - dir = proc_mkdir(name, NULL); 8.1727 - if (!dir) 8.1728 - goto nomem; 8.1729 + dir = proc_mkdir(name, NULL); 8.1730 + if (!dir) 8.1731 + goto nomem; 8.1732 8.1733 - min = create_proc_entry("rxbuf_min", 0644, dir); 8.1734 - max = create_proc_entry("rxbuf_max", 0644, dir); 8.1735 - cur = create_proc_entry("rxbuf_cur", 0444, dir); 8.1736 - if (!min || !max || !cur) 8.1737 - goto nomem; 8.1738 + min = create_proc_entry("rxbuf_min", 0644, dir); 8.1739 + max = create_proc_entry("rxbuf_max", 0644, dir); 8.1740 + cur = create_proc_entry("rxbuf_cur", 0444, dir); 8.1741 + if (!min || !max || !cur) 8.1742 + goto nomem; 8.1743 8.1744 - min->read_proc = xennet_proc_read; 8.1745 - min->write_proc = xennet_proc_write; 8.1746 - min->data = (void *)((unsigned long)dev | TARGET_MIN); 8.1747 + min->read_proc = xennet_proc_read; 8.1748 + min->write_proc = xennet_proc_write; 8.1749 + min->data = (void *)((unsigned long)dev | TARGET_MIN); 8.1750 8.1751 - max->read_proc = xennet_proc_read; 8.1752 - max->write_proc = xennet_proc_write; 8.1753 - max->data = (void *)((unsigned long)dev | TARGET_MAX); 8.1754 + max->read_proc = xennet_proc_read; 8.1755 + max->write_proc = xennet_proc_write; 8.1756 + max->data = (void *)((unsigned long)dev | TARGET_MAX); 8.1757 8.1758 - cur->read_proc = xennet_proc_read; 8.1759 - cur->write_proc = xennet_proc_write; 8.1760 - cur->data = (void *)((unsigned long)dev | TARGET_CUR); 8.1761 + cur->read_proc = xennet_proc_read; 8.1762 + cur->write_proc = xennet_proc_write; 8.1763 + cur->data = (void *)((unsigned long)dev | TARGET_CUR); 8.1764 8.1765 - return 0; 8.1766 + return 0; 8.1767 8.1768 nomem: 8.1769 - xennet_proc_delif(dev); 8.1770 - return -ENOMEM; 8.1771 + xennet_proc_delif(dev); 8.1772 + return -ENOMEM; 8.1773 } 8.1774 8.1775 static void xennet_proc_delif(struct net_device *dev) 8.1776 { 8.1777 - char name[30]; 8.1778 + char name[30]; 8.1779 8.1780 - sprintf(name, "xen/net/%s/rxbuf_min", dev->name); 8.1781 - remove_proc_entry(name, NULL); 8.1782 + sprintf(name, "xen/net/%s/rxbuf_min", dev->name); 8.1783 + remove_proc_entry(name, NULL); 8.1784 8.1785 - sprintf(name, "xen/net/%s/rxbuf_max", dev->name); 8.1786 - remove_proc_entry(name, NULL); 8.1787 + sprintf(name, "xen/net/%s/rxbuf_max", dev->name); 8.1788 + remove_proc_entry(name, NULL); 8.1789 8.1790 - sprintf(name, "xen/net/%s/rxbuf_cur", dev->name); 8.1791 - remove_proc_entry(name, NULL); 8.1792 + sprintf(name, "xen/net/%s/rxbuf_cur", dev->name); 8.1793 + remove_proc_entry(name, NULL); 8.1794 8.1795 - sprintf(name, "xen/net/%s", dev->name); 8.1796 - remove_proc_entry(name, NULL); 8.1797 + sprintf(name, "xen/net/%s", dev->name); 8.1798 + remove_proc_entry(name, NULL); 8.1799 } 8.1800 8.1801 #endif 8.1802 8.1803 module_init(netif_init); 8.1804 module_exit(netif_exit); 8.1805 + 8.1806 +/* 8.1807 + * Local variables: 8.1808 + * c-file-style: "linux" 8.1809 + * indent-tabs-mode: t 8.1810 + * c-indent-level: 8 8.1811 + * c-basic-offset: 8 8.1812 + * tab-width: 8 8.1813 + * End: 8.1814 + */
9.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Fri Sep 16 18:06:42 2005 +0000 9.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Fri Sep 16 18:07:50 2005 +0000 9.3 @@ -130,12 +130,12 @@ static int privcmd_ioctl(struct inode *i 9.4 if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end ) 9.5 return -EINVAL; 9.6 9.7 - if ( (rc = direct_remap_pfn_range(vma->vm_mm, 9.8 - msg[j].va&PAGE_MASK, 9.9 - msg[j].mfn, 9.10 - msg[j].npages<<PAGE_SHIFT, 9.11 - vma->vm_page_prot, 9.12 - mmapcmd.dom)) < 0 ) 9.13 + if ( (rc = direct_remap_pfn_range(vma, 9.14 + msg[j].va&PAGE_MASK, 9.15 + msg[j].mfn, 9.16 + msg[j].npages<<PAGE_SHIFT, 9.17 + vma->vm_page_prot, 9.18 + mmapcmd.dom)) < 0 ) 9.19 return rc; 9.20 } 9.21 }
10.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h Fri Sep 16 18:06:42 2005 +0000 10.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h Fri Sep 16 18:07:50 2005 +0000 10.3 @@ -460,7 +460,7 @@ void make_pages_writable(void *va, unsig 10.4 #define kern_addr_valid(addr) (1) 10.5 #endif /* !CONFIG_DISCONTIGMEM */ 10.6 10.7 -int direct_remap_pfn_range(struct mm_struct *mm, 10.8 +int direct_remap_pfn_range(struct vm_area_struct *vma, 10.9 unsigned long address, 10.10 unsigned long mfn, 10.11 unsigned long size, 10.12 @@ -474,10 +474,10 @@ int touch_pte_range(struct mm_struct *mm 10.13 unsigned long size); 10.14 10.15 #define io_remap_page_range(vma,from,phys,size,prot) \ 10.16 -direct_remap_pfn_range(vma->vm_mm,from,phys>>PAGE_SHIFT,size,prot,DOMID_IO) 10.17 +direct_remap_pfn_range(vma,from,(phys)>>PAGE_SHIFT,size,prot,DOMID_IO) 10.18 10.19 #define io_remap_pfn_range(vma,from,pfn,size,prot) \ 10.20 -direct_remap_pfn_range(vma->vm_mm,from,pfn,size,prot,DOMID_IO) 10.21 +direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO) 10.22 10.23 #define MK_IOSPACE_PFN(space, pfn) (pfn) 10.24 #define GET_IOSPACE(pfn) 0
11.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Fri Sep 16 18:06:42 2005 +0000 11.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Fri Sep 16 18:07:50 2005 +0000 11.3 @@ -526,7 +526,7 @@ extern int kern_addr_valid(unsigned long 11.4 11.5 #define DOMID_LOCAL (0xFFFFU) 11.6 11.7 -int direct_remap_pfn_range(struct mm_struct *mm, 11.8 +int direct_remap_pfn_range(struct vm_area_struct *vma, 11.9 unsigned long address, 11.10 unsigned long mfn, 11.11 unsigned long size, 11.12 @@ -542,10 +542,10 @@ int touch_pte_range(struct mm_struct *mm 11.13 unsigned long size); 11.14 11.15 #define io_remap_page_range(vma, vaddr, paddr, size, prot) \ 11.16 - direct_remap_pfn_range((vma)->vm_mm,vaddr,paddr>>PAGE_SHIFT,size,prot,DOMID_IO) 11.17 + direct_remap_pfn_range(vma,vaddr,(paddr)>>PAGE_SHIFT,size,prot,DOMID_IO) 11.18 11.19 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 11.20 - direct_remap_pfn_range((vma)->vm_mm,vaddr,pfn,size,prot,DOMID_IO) 11.21 + direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO) 11.22 11.23 #define MK_IOSPACE_PFN(space, pfn) (pfn) 11.24 #define GET_IOSPACE(pfn) 0