ia64/xen-unstable

changeset 231:5262380c8422

bitkeeper revision 1.89 (3e58aeffGldVBDUrCPQ3AX0iBm4OYw)

Many files:
Ripped out lots from code from Xen's network transmit path, pending a new scheduler.
devinit.c:
Rename: xen-2.4.16/net/sch_generic.c -> xen-2.4.16/net/devinit.c
sch_generic.c:
Rename: BitKeeper/deleted/.del-sch_generic.c~c8d7d9959cc80952 -> xen-2.4.16/net/sch_generic.c
.del-sch_generic.c~c8d7d9959cc80952:
Delete: xen-2.4.16/net/sch_generic.c
.del-utils.c~e7553afc72d1b648:
Delete: xen-2.4.16/net/utils.c
.del-pkt_sched.h~e13e384d6b974c61:
Delete: xen-2.4.16/include/xeno/pkt_sched.h
author kaf24@labyrinth.cl.cam.ac.uk
date Sun Feb 23 11:22:39 2003 +0000 (2003-02-23)
parents a9994f1785cb
children f73ef0280d7e
files .rootkeys xen-2.4.16/common/network.c xen-2.4.16/drivers/net/net_init.c xen-2.4.16/drivers/scsi/aacraid/aacraid.h xen-2.4.16/drivers/scsi/aacraid/commsup.c xen-2.4.16/drivers/scsi/aacraid/rx.c xen-2.4.16/include/xeno/if_vlan.h xen-2.4.16/include/xeno/interrupt.h xen-2.4.16/include/xeno/netdevice.h xen-2.4.16/include/xeno/pkt_sched.h xen-2.4.16/include/xeno/skbuff.h xen-2.4.16/include/xeno/sockios.h xen-2.4.16/include/xeno/vif.h xen-2.4.16/net/dev.c xen-2.4.16/net/devinit.c xen-2.4.16/net/sch_generic.c xen-2.4.16/net/skbuff.c xen-2.4.16/net/utils.c
line diff
     1.1 --- a/.rootkeys	Fri Feb 21 16:04:44 2003 +0000
     1.2 +++ b/.rootkeys	Sun Feb 23 11:22:39 2003 +0000
     1.3 @@ -260,7 +260,6 @@ 3ddb79c2Fg44_PBPVxHSC0gTOMq4Ow xen-2.4.1
     1.4  3ddb79c0MOVXq8qZDQRGb6z64_xAwg xen-2.4.16/include/xeno/pci_ids.h
     1.5  3e54c38dlSCVdyVM4PKcrSfzLLxWUQ xen-2.4.16/include/xeno/perfc.h
     1.6  3e54c38de9SUSYSAwxDf_DwkpAnQFA xen-2.4.16/include/xeno/perfc_defn.h
     1.7 -3ddb79c2byJwwNNkiES__A9H4Cvc4g xen-2.4.16/include/xeno/pkt_sched.h
     1.8  3ddb79c04nQVR3EYM5L4zxDV_MCo1g xen-2.4.16/include/xeno/prefetch.h
     1.9  3e4540ccU1sgCx8seIMGlahmMfv7yQ xen-2.4.16/include/xeno/reboot.h
    1.10  3ddb79c0LzqqS0LhAQ50ekgj4oGl7Q xen-2.4.16/include/xeno/sched.h
    1.11 @@ -279,10 +278,9 @@ 3ddb79c2Ae5KpzhC9LCYG7mP_Vi4Aw xen-2.4.1
    1.12  3ddb79c4YQCQ6r0xNLLu0jfbM7pVmA xen-2.4.16/net/Makefile
    1.13  3ddb79c4AkfDkTCw0comx4L8wsUOMg xen-2.4.16/net/dev.c
    1.14  3ddb79c4x1L_soh8b-r_1jQW_37Icw xen-2.4.16/net/dev_mcast.c
    1.15 +3ddb79c4KZhNxUuYJ7lul8cc-wRkyg xen-2.4.16/net/devinit.c
    1.16  3ddb79c4NSDwiQ-AmrYdxcRAwLPzwQ xen-2.4.16/net/eth.c
    1.17 -3ddb79c4KZhNxUuYJ7lul8cc-wRkyg xen-2.4.16/net/sch_generic.c
    1.18  3ddb79c4TZj1wXPKQt36O72SddtBNQ xen-2.4.16/net/skbuff.c
    1.19 -3ddb79c4ARyIHqv3Y6YFckIUbyA8Tw xen-2.4.16/net/utils.c
    1.20  3ddb79c4x8dvwPtzclghWAKFWpEBFA xen-2.4.16/tools/Makefile
    1.21  3ddb79c4yGZ7_22QAFFwPzqP4NSHwA xen-2.4.16/tools/elf-reloc.c
    1.22  3ddb79bbYMXGmQTsr5BeGS_RuZ5f_w xenolinux-2.4.16-sparse/Makefile
     2.1 --- a/xen-2.4.16/common/network.c	Fri Feb 21 16:04:44 2003 +0000
     2.2 +++ b/xen-2.4.16/common/network.c	Sun Feb 23 11:22:39 2003 +0000
     2.3 @@ -54,29 +54,27 @@ net_vif_t *create_net_vif(int domain)
     2.4      net_shadow_ring_t *shadow_ring;
     2.5      struct task_struct *dom_task;
     2.6      
     2.7 -    if ( !(dom_task = find_domain_by_id(domain)) ) 
     2.8 -    {
     2.9 -            return NULL;
    2.10 -    }
    2.11 +    if ( !(dom_task = find_domain_by_id(domain)) )
    2.12 +        return NULL;
    2.13      
    2.14      if ( (new_vif = kmem_cache_alloc(net_vif_cache, GFP_KERNEL)) == NULL )
    2.15 -    {
    2.16 -            return NULL;
    2.17 -    }
    2.18 +        return NULL;
    2.19      
    2.20      new_ring = dom_task->net_ring_base + dom_task->num_net_vifs;
    2.21      memset(new_ring, 0, sizeof(net_ring_t));
    2.22  
    2.23      shadow_ring = kmalloc(sizeof(net_shadow_ring_t), GFP_KERNEL);
    2.24 -    if (shadow_ring == NULL) goto fail;
    2.25 +    if ( shadow_ring == NULL ) goto fail;
    2.26      
    2.27      shadow_ring->rx_ring = kmalloc(RX_RING_SIZE
    2.28                      * sizeof(rx_shadow_entry_t), GFP_KERNEL);
    2.29 -    if ( shadow_ring->rx_ring == NULL )
    2.30 +    shadow_ring->tx_ring = kmalloc(TX_RING_SIZE
    2.31 +                    * sizeof(tx_shadow_entry_t), GFP_KERNEL);
    2.32 +    if ( (shadow_ring->rx_ring == NULL) || (shadow_ring->tx_ring == NULL) )
    2.33              goto fail;
    2.34  
    2.35      shadow_ring->rx_prod = shadow_ring->rx_cons = shadow_ring->rx_idx = 0;
    2.36 -    shadow_ring->tx_cons = 0;
    2.37 +    shadow_ring->tx_prod = shadow_ring->tx_cons = shadow_ring->tx_idx = 0;
    2.38      
    2.39      /* Fill in the new vif struct. */
    2.40      
    2.41 @@ -98,7 +96,13 @@ net_vif_t *create_net_vif(int domain)
    2.42      return new_vif;
    2.43      
    2.44  fail:
    2.45 -    printk("VIF allocation failed!\n");
    2.46 +    kmem_cache_free(net_vif_cache, new_vif);
    2.47 +    if ( shadow_ring != NULL )
    2.48 +    {
    2.49 +        if ( shadow_ring->rx_ring ) kfree(shadow_ring->rx_ring);
    2.50 +        if ( shadow_ring->tx_ring ) kfree(shadow_ring->tx_ring);
    2.51 +        kfree(shadow_ring);
    2.52 +    }
    2.53      return NULL;
    2.54  }
    2.55  
    2.56 @@ -125,6 +129,7 @@ void destroy_net_vif(struct task_struct 
    2.57      sys_vif_list[p->net_vif_list[i]->id] = NULL; // system vif list not gc'ed
    2.58      write_unlock(&sys_vif_lock);        
    2.59     
    2.60 +    kfree(p->net_vif_list[i]->shadow_ring->tx_ring);
    2.61      kfree(p->net_vif_list[i]->shadow_ring->rx_ring);
    2.62      kfree(p->net_vif_list[i]->shadow_ring);
    2.63      kmem_cache_free(net_vif_cache, p->net_vif_list[i]);
     3.1 --- a/xen-2.4.16/drivers/net/net_init.c	Fri Feb 21 16:04:44 2003 +0000
     3.2 +++ b/xen-2.4.16/drivers/net/net_init.c	Sun Feb 23 11:22:39 2003 +0000
     3.3 @@ -423,8 +423,7 @@ void ether_setup(struct net_device *dev)
     3.4  	dev->hard_header_len 	= ETH_HLEN;
     3.5  	dev->mtu		= 1500; /* eth_mtu */
     3.6  	dev->addr_len		= ETH_ALEN;
     3.7 -	dev->tx_queue_len	= 100;	/* Ethernet wants good queues */	
     3.8 -	
     3.9 +
    3.10  	memset(dev->broadcast,0xFF, ETH_ALEN);
    3.11  
    3.12  	/* New-style flags. */
    3.13 @@ -449,7 +448,6 @@ void fddi_setup(struct net_device *dev)
    3.14  	dev->hard_header_len	= FDDI_K_SNAP_HLEN+3;	/* Assume 802.2 SNAP hdr len + 3 pad bytes */
    3.15  	dev->mtu				= FDDI_K_SNAP_DLEN;		/* Assume max payload of 802.2 SNAP frame */
    3.16  	dev->addr_len			= FDDI_K_ALEN;
    3.17 -	dev->tx_queue_len		= 100;	/* Long queues on FDDI */
    3.18  	
    3.19  	memset(dev->broadcast, 0xFF, FDDI_K_ALEN);
    3.20  
    3.21 @@ -482,10 +480,9 @@ void hippi_setup(struct net_device *dev)
    3.22  	dev->hard_header_len 	= HIPPI_HLEN;
    3.23  	dev->mtu		= 65280;
    3.24  	dev->addr_len		= HIPPI_ALEN;
    3.25 -	dev->tx_queue_len	= 25 /* 5 */;
    3.26 +
    3.27  	memset(dev->broadcast, 0xFF, HIPPI_ALEN);
    3.28  
    3.29 -
    3.30  	/*
    3.31  	 * HIPPI doesn't support broadcast+multicast and we only use
    3.32  	 * static ARP tables. ARP is disabled by hippi_neigh_setup_dev. 
    3.33 @@ -523,7 +520,6 @@ void ltalk_setup(struct net_device *dev)
    3.34  	dev->hard_header_len 	= LTALK_HLEN;
    3.35  	dev->mtu		= LTALK_MTU;
    3.36  	dev->addr_len		= LTALK_ALEN;
    3.37 -	dev->tx_queue_len	= 10;	
    3.38  	
    3.39  	dev->broadcast[0]	= 0xFF;
    3.40  
    3.41 @@ -594,7 +590,6 @@ void tr_setup(struct net_device *dev)
    3.42  	dev->hard_header_len	= TR_HLEN;
    3.43  	dev->mtu		= 2000;
    3.44  	dev->addr_len		= TR_ALEN;
    3.45 -	dev->tx_queue_len	= 100;	/* Long queues on tr */
    3.46  	
    3.47  	memset(dev->broadcast,0xFF, TR_ALEN);
    3.48  
    3.49 @@ -671,7 +666,6 @@ void fc_setup(struct net_device *dev)
    3.50  	dev->hard_header_len    =        FC_HLEN;
    3.51          dev->mtu                =        2024;
    3.52          dev->addr_len           =        FC_ALEN;
    3.53 -        dev->tx_queue_len       =        100; /* Long queues on fc */
    3.54  
    3.55          memset(dev->broadcast,0xFF, FC_ALEN);
    3.56  
     4.1 --- a/xen-2.4.16/drivers/scsi/aacraid/aacraid.h	Fri Feb 21 16:04:44 2003 +0000
     4.2 +++ b/xen-2.4.16/drivers/scsi/aacraid/aacraid.h	Sun Feb 23 11:22:39 2003 +0000
     4.3 @@ -6,10 +6,10 @@
     4.4  
     4.5  #include <asm/byteorder.h>
     4.6  
     4.7 -#define TRY_SOFTIRQ
     4.8 -#ifdef TRY_SOFTIRQ
     4.9 +#define TRY_TASKLET
    4.10 +#ifdef TRY_TASKLET
    4.11  /* XXX SMH: trying to use softirqs to trigger stuff done prev by threads */
    4.12 -#include <xeno/interrupt.h>  /* for softirq stuff */
    4.13 +#include <xeno/interrupt.h>  /* for tasklet/softirq stuff */
    4.14  #endif
    4.15  
    4.16  /*------------------------------------------------------------------------------
    4.17 @@ -1408,8 +1408,9 @@ int aac_rx_init(struct aac_dev *dev, uns
    4.18  int aac_sa_init(struct aac_dev *dev, unsigned long devNumber);
    4.19  unsigned int aac_response_normal(struct aac_queue * q);
    4.20  unsigned int aac_command_normal(struct aac_queue * q);
    4.21 -#ifdef TRY_SOFTIRQ
    4.22 -int aac_command_thread(struct softirq_action *h); 
    4.23 +#ifdef TRY_TASKLET
    4.24 +extern struct tasklet_struct aac_command_tasklet;
    4.25 +int aac_command_thread(unsigned long data);
    4.26  #else
    4.27  int aac_command_thread(struct aac_dev * dev);
    4.28  #endif
     5.1 --- a/xen-2.4.16/drivers/scsi/aacraid/commsup.c	Fri Feb 21 16:04:44 2003 +0000
     5.2 +++ b/xen-2.4.16/drivers/scsi/aacraid/commsup.c	Sun Feb 23 11:22:39 2003 +0000
     5.3 @@ -39,7 +39,7 @@
     5.4  #include <xeno/pci.h>
     5.5  #include <xeno/spinlock.h>
     5.6  
     5.7 -#include <xeno/interrupt.h> // for softirq stuff 
     5.8 +#include <xeno/interrupt.h> /* tasklet stuff */
     5.9  
    5.10  /*  #include <xeno/slab.h> */
    5.11  /*  #include <xeno/completion.h> */
    5.12 @@ -529,10 +529,15 @@ int fib_send(u16 command, struct fib * f
    5.13  #if 0
    5.14  	down(&fibptr->event_wait);
    5.15  #endif
    5.16 -#ifdef TRY_SOFTIRQ
    5.17 +#ifdef TRY_TASKLET
    5.18 +        /*
    5.19 +         * XXX KAF: Well, this is pretty gross. We should probably
    5.20 +         * do_softirq() after scheduling the tasklet, as long as we
    5.21 +         * are _sure_ we hold no locks here...
    5.22 +         */
    5.23  	printk("about to softirq aac_command_thread...\n"); 
    5.24  	while (!fibptr->done) { 
    5.25 -	    raise_softirq(SCSI_LOW_SOFTIRQ); 
    5.26 +            tasklet_schedule(&aac_command_tasklet);
    5.27  	    mdelay(100); 
    5.28  	}
    5.29  	printk("back from softirq cmd thread and fibptr->done!\n"); 
    5.30 @@ -837,13 +842,14 @@ static void aac_handle_aif(struct aac_de
    5.31   *	more FIBs.
    5.32   */
    5.33   
    5.34 -#ifndef TRY_SOFTIRQ
    5.35 +#ifndef TRY_TASKLET
    5.36 +DECLARE_TASKLET_DISABLED(aac_command_tasklet, aac_command_thread, 0);
    5.37  int aac_command_thread(struct aac_dev * dev)
    5.38  {
    5.39  #else
    5.40 -int aac_command_thread(struct softirq_action *h)
    5.41 +int aac_command_thread(unsigned long data)
    5.42  {   
    5.43 -    struct aac_dev *dev = (struct aac_dev *)h->data; 
    5.44 +    struct aac_dev *dev = (struct aac_dev *)data; 
    5.45  #endif
    5.46      struct hw_fib *fib, *newfib;
    5.47      struct fib fibptr; /* for error logging */
     6.1 --- a/xen-2.4.16/drivers/scsi/aacraid/rx.c	Fri Feb 21 16:04:44 2003 +0000
     6.2 +++ b/xen-2.4.16/drivers/scsi/aacraid/rx.c	Sun Feb 23 11:22:39 2003 +0000
     6.3 @@ -437,16 +437,16 @@ int aac_rx_init(struct aac_dev *dev, uns
     6.4      
     6.5      if (aac_init_adapter(dev) == NULL)
     6.6  	return -1;
     6.7 -#if 0
     6.8 +#ifdef TRY_TASKLET
     6.9 +    aac_command_tasklet.data = (unsigned long)dev;
    6.10 +    tasklet_enable(&aac_command_tasklet);
    6.11 +#else
    6.12      /*
    6.13       *	Start any kernel threads needed
    6.14       */
    6.15      dev->thread_pid = kernel_thread((int (*)(void *))aac_command_thread, 
    6.16  				    dev, 0);
    6.17 -#else 
    6.18 -    /* XXX SMH: just put in a softirq handler instead... */
    6.19 -    open_softirq(SCSI_LOW_SOFTIRQ, aac_command_thread, dev); 
    6.20 -#endif
    6.21 +#endif 
    6.22  
    6.23      /*
    6.24       *	Tell the adapter that all is configured, and it can start
     7.1 --- a/xen-2.4.16/include/xeno/if_vlan.h	Fri Feb 21 16:04:44 2003 +0000
     7.2 +++ b/xen-2.4.16/include/xeno/if_vlan.h	Sun Feb 23 11:22:39 2003 +0000
     7.3 @@ -135,12 +135,19 @@ struct vlan_skb_tx_cookie {
     7.4  	u32	vlan_tag;
     7.5  };
     7.6  
     7.7 +#if 0
     7.8  #define VLAN_TX_COOKIE_MAGIC	0x564c414e	/* "VLAN" in ascii. */
     7.9  #define VLAN_TX_SKB_CB(__skb)	((struct vlan_skb_tx_cookie *)&((__skb)->cb[0]))
    7.10  #define vlan_tx_tag_present(__skb) \
    7.11  	(VLAN_TX_SKB_CB(__skb)->magic == VLAN_TX_COOKIE_MAGIC)
    7.12  #define vlan_tx_tag_get(__skb)	(VLAN_TX_SKB_CB(__skb)->vlan_tag)
    7.13 +#else /* XXX KAF: We don't support vlan tagging at the moment. */
    7.14 +#define VLAN_TX_SKB_CB(__skb)	   NULL
    7.15 +#define vlan_tx_tag_present(__skb) 0
    7.16 +#define vlan_tx_tag_get(__skb)	   0
    7.17 +#endif
    7.18  
    7.19 +#if 0
    7.20  /* VLAN rx hw acceleration helper.  This acts like netif_{rx,receive_skb}(). */
    7.21  static inline int __vlan_hwaccel_rx(struct sk_buff *skb,
    7.22  				    struct vlan_group *grp,
    7.23 @@ -203,6 +210,9 @@ static inline int vlan_hwaccel_receive_s
    7.24  {
    7.25  	return __vlan_hwaccel_rx(skb, grp, vlan_tag, 1);
    7.26  }
    7.27 +#else
    7.28 +#define vlan_hwaccel_rx(_skb, _grp, _tag) (netif_rx(_skb))
    7.29 +#endif
    7.30  #endif /* __KERNEL__ */
    7.31  
    7.32  /* VLAN IOCTLs are found in sockios.h */
     8.1 --- a/xen-2.4.16/include/xeno/interrupt.h	Fri Feb 21 16:04:44 2003 +0000
     8.2 +++ b/xen-2.4.16/include/xeno/interrupt.h	Sun Feb 23 11:22:39 2003 +0000
     8.3 @@ -27,19 +27,8 @@ struct irqaction {
     8.4  enum {
     8.5  	TIMER_BH = 0,
     8.6  	TQUEUE_BH,
     8.7 -	DIGI_BH,
     8.8 -	SERIAL_BH,
     8.9 -	RISCOM8_BH,
    8.10 -	SPECIALIX_BH,
    8.11 -	AURORA_BH,
    8.12 -	ESP_BH,
    8.13  	SCSI_BH,
    8.14 -	IMMEDIATE_BH,
    8.15 -	CYCLADES_BH,
    8.16 -	CM206_BH,
    8.17 -	JS_BH,
    8.18 -	MACSERIAL_BH,
    8.19 -	ISICOM_BH
    8.20 +	IMMEDIATE_BH
    8.21  };
    8.22  
    8.23  #include <asm/hardirq.h>
    8.24 @@ -56,10 +45,7 @@ enum {
    8.25  enum
    8.26  {
    8.27  	HI_SOFTIRQ=0,
    8.28 -	NET_TX_SOFTIRQ,
    8.29 -	NET_RX_SOFTIRQ,
    8.30 -	TASKLET_SOFTIRQ, 
    8.31 -	SCSI_LOW_SOFTIRQ,
    8.32 +	TASKLET_SOFTIRQ
    8.33  };
    8.34  
    8.35  /* softirq mask and active fields moved to irq_cpustat_t in
     9.1 --- a/xen-2.4.16/include/xeno/netdevice.h	Fri Feb 21 16:04:44 2003 +0000
     9.2 +++ b/xen-2.4.16/include/xeno/netdevice.h	Sun Feb 23 11:22:39 2003 +0000
     9.3 @@ -25,17 +25,18 @@
     9.4  #ifndef _LINUX_NETDEVICE_H
     9.5  #define _LINUX_NETDEVICE_H
     9.6  
     9.7 -#include <linux/if.h>
     9.8 -#include <linux/if_ether.h>
     9.9 -#include <linux/if_packet.h>
    9.10 -#include <linux/sched.h>
    9.11 +#include <xeno/if.h>
    9.12 +#include <xeno/if_ether.h>
    9.13 +#include <xeno/if_packet.h>
    9.14 +#include <xeno/sched.h>
    9.15 +#include <xeno/interrupt.h>
    9.16  
    9.17  #include <asm/atomic.h>
    9.18  #include <asm/cache.h>
    9.19  #include <asm/byteorder.h>
    9.20  
    9.21  #ifdef __KERNEL__
    9.22 -#include <linux/config.h>
    9.23 +#include <xeno/config.h>
    9.24  
    9.25  struct divert_blk;
    9.26  struct vlan_group;
    9.27 @@ -66,28 +67,6 @@ struct vlan_group;
    9.28  #define MAX_ADDR_LEN	8		/* Largest hardware address length */
    9.29  
    9.30  /*
    9.31 - *	Compute the worst case header length according to the protocols
    9.32 - *	used.
    9.33 - */
    9.34 - 
    9.35 -#if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
    9.36 -#define LL_MAX_HEADER	32
    9.37 -#else
    9.38 -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
    9.39 -#define LL_MAX_HEADER	96
    9.40 -#else
    9.41 -#define LL_MAX_HEADER	48
    9.42 -#endif
    9.43 -#endif
    9.44 -
    9.45 -#if !defined(CONFIG_NET_IPIP) && \
    9.46 -    !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE)
    9.47 -#define MAX_HEADER LL_MAX_HEADER
    9.48 -#else
    9.49 -#define MAX_HEADER (LL_MAX_HEADER + 48)
    9.50 -#endif
    9.51 -
    9.52 -/*
    9.53   *	Network device statistics. Akin to the 2.0 ether stats but
    9.54   *	with byte counters.
    9.55   */
    9.56 @@ -141,8 +120,8 @@ enum {
    9.57  
    9.58  extern const char *if_port_text[];
    9.59  
    9.60 -#include <linux/cache.h>
    9.61 -#include <linux/skbuff.h>
    9.62 +#include <xeno/cache.h>
    9.63 +#include <xeno/skbuff.h>
    9.64  
    9.65  struct neighbour;
    9.66  struct neigh_parms;
    9.67 @@ -311,18 +290,6 @@ struct net_device
    9.68  	void                    *dn_ptr;        /* DECnet specific data */
    9.69  	void                    *ip6_ptr;       /* IPv6 specific data */
    9.70  	void			*ec_ptr;	/* Econet specific data	*/
    9.71 -#if 0
    9.72 -						/* IAP: add fields but
    9.73 -						nothing else */		
    9.74 -	struct list_head        poll_list;      /* Link to poll list    */
    9.75 -	int                     quota;
    9.76 -	int                     weight;
    9.77 -#endif
    9.78 -	struct Qdisc		*qdisc;
    9.79 -	struct Qdisc		*qdisc_sleeping;
    9.80 -	struct Qdisc		*qdisc_list;
    9.81 -	struct Qdisc		*qdisc_ingress;
    9.82 -	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
    9.83  
    9.84  	/* hard_start_xmit synchronizer */
    9.85  	spinlock_t		xmit_lock;
    9.86 @@ -425,12 +392,10 @@ struct packet_type
    9.87  };
    9.88  
    9.89  
    9.90 -#include <linux/interrupt.h>
    9.91 -//#include <linux/notifier.h>
    9.92 +#include <xeno/interrupt.h>
    9.93  
    9.94 -extern struct net_device		loopback_dev;		/* The loopback */
    9.95 -extern struct net_device		*dev_base;		/* All devices */
    9.96 -extern rwlock_t				dev_base_lock;		/* Device list lock */
    9.97 +extern struct net_device		*dev_base;      /* All devices */
    9.98 +extern rwlock_t				dev_base_lock;	/* Device list lock */
    9.99  
   9.100  extern int			netdev_boot_setup_add(char *name, struct ifmap *map);
   9.101  extern int 			netdev_boot_setup_check(struct net_device *dev);
   9.102 @@ -447,8 +412,10 @@ extern int		dev_close(struct net_device 
   9.103  extern int		dev_queue_xmit(struct sk_buff *skb);
   9.104  extern int		register_netdevice(struct net_device *dev);
   9.105  extern int		unregister_netdevice(struct net_device *dev);
   9.106 -//extern int 		register_netdevice_notifier(struct notifier_block *nb);
   9.107 -//extern int		unregister_netdevice_notifier(struct notifier_block *nb);
   9.108 +extern void dev_shutdown(struct net_device *dev);
   9.109 +extern void dev_activate(struct net_device *dev);
   9.110 +extern void dev_deactivate(struct net_device *dev);
   9.111 +extern void dev_init_scheduler(struct net_device *dev);
   9.112  extern int		dev_new_index(void);
   9.113  extern struct net_device	*dev_get_by_index(int ifindex);
   9.114  extern struct net_device	*__dev_get_by_index(int ifindex);
   9.115 @@ -461,17 +428,11 @@ static inline int unregister_gifconf(uns
   9.116  	return register_gifconf(family, 0);
   9.117  }
   9.118  
   9.119 -/*
   9.120 - * Incoming packets are placed on per-cpu queues so that
   9.121 - * no locking is needed.
   9.122 - */
   9.123 +extern struct tasklet_struct net_tx_tasklet;
   9.124 +
   9.125  
   9.126  struct softnet_data
   9.127  {
   9.128 -	int			throttle;
   9.129 -	int			cng_level;
   9.130 -	int			avg_blog;
   9.131 -	struct sk_buff_head	input_pkt_queue;
   9.132  	struct net_device	*output_queue;
   9.133  	struct sk_buff		*completion_queue;
   9.134  } __attribute__((__aligned__(SMP_CACHE_BYTES)));
   9.135 @@ -490,7 +451,7 @@ static inline void __netif_schedule(stru
   9.136  		local_irq_save(flags);
   9.137  		dev->next_sched = softnet_data[cpu].output_queue;
   9.138  		softnet_data[cpu].output_queue = dev;
   9.139 -		cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
   9.140 +                tasklet_schedule(&net_tx_tasklet);
   9.141  		local_irq_restore(flags);
   9.142  	}
   9.143  }
   9.144 @@ -533,13 +494,13 @@ static inline int netif_running(struct n
   9.145  static inline void dev_kfree_skb_irq(struct sk_buff *skb)
   9.146  {
   9.147  	if (atomic_dec_and_test(&skb->users)) {
   9.148 -		int cpu =smp_processor_id();
   9.149 +		int cpu = smp_processor_id();
   9.150  		unsigned long flags;
   9.151  
   9.152  		local_irq_save(flags);
   9.153  		skb->next = softnet_data[cpu].completion_queue;
   9.154  		softnet_data[cpu].completion_queue = skb;
   9.155 -		cpu_raise_softirq(cpu, NET_TX_SOFTIRQ);
   9.156 +                tasklet_schedule(&net_tx_tasklet);
   9.157  		local_irq_restore(flags);
   9.158  	}
   9.159  }
   9.160 @@ -577,11 +538,6 @@ static inline int netif_rx_ni(struct sk_
   9.161         return err;
   9.162  }
   9.163  
   9.164 -static inline void dev_init_buffers(struct net_device *dev)
   9.165 -{
   9.166 -	/* WILL BE REMOVED IN 2.5.0 */
   9.167 -}
   9.168 -
   9.169  extern int netdev_finish_unregister(struct net_device *dev);
   9.170  
   9.171  static inline void dev_put(struct net_device *dev)
    10.1 --- a/xen-2.4.16/include/xeno/pkt_sched.h	Fri Feb 21 16:04:44 2003 +0000
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,816 +0,0 @@
    10.4 -#ifndef __NET_PKT_SCHED_H
    10.5 -#define __NET_PKT_SCHED_H
    10.6 -
    10.7 -#define PSCHED_GETTIMEOFDAY	1
    10.8 -#define PSCHED_JIFFIES 		2
    10.9 -#define PSCHED_CPU 		3
   10.10 -
   10.11 -#define PSCHED_CLOCK_SOURCE	PSCHED_JIFFIES
   10.12 -
   10.13 -#include <linux/config.h>
   10.14 -#include <linux/pkt_sched.h>
   10.15 -//#include <net/pkt_cls.h>
   10.16 -
   10.17 -#ifdef CONFIG_X86_TSC
   10.18 -#include <asm/msr.h>
   10.19 -#endif
   10.20 -
   10.21 -struct rtattr;
   10.22 -struct Qdisc;
   10.23 -
   10.24 -struct qdisc_walker
   10.25 -{
   10.26 -	int	stop;
   10.27 -	int	skip;
   10.28 -	int	count;
   10.29 -	int	(*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
   10.30 -};
   10.31 -
   10.32 -struct Qdisc_class_ops
   10.33 -{
   10.34 -	/* Child qdisc manipulation */
   10.35 -	int			(*graft)(struct Qdisc *, unsigned long cl, struct Qdisc *, struct Qdisc **);
   10.36 -	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
   10.37 -
   10.38 -	/* Class manipulation routines */
   10.39 -	unsigned long		(*get)(struct Qdisc *, u32 classid);
   10.40 -	void			(*put)(struct Qdisc *, unsigned long);
   10.41 -	int			(*change)(struct Qdisc *, u32, u32, struct rtattr **, unsigned long *);
   10.42 -	int			(*delete)(struct Qdisc *, unsigned long);
   10.43 -	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);
   10.44 -
   10.45 -#if 0
   10.46 -	/* Filter manipulation */
   10.47 -	struct tcf_proto **	(*tcf_chain)(struct Qdisc *, unsigned long);
   10.48 -	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long, u32 classid);
   10.49 -	void			(*unbind_tcf)(struct Qdisc *, unsigned long);
   10.50 -#endif
   10.51 -};
   10.52 -
   10.53 -struct Qdisc_ops
   10.54 -{
   10.55 -	struct Qdisc_ops	*next;
   10.56 -	struct Qdisc_class_ops	*cl_ops;
   10.57 -	char			id[IFNAMSIZ];
   10.58 -	int			priv_size;
   10.59 -
   10.60 -	int 			(*enqueue)(struct sk_buff *, struct Qdisc *);
   10.61 -	struct sk_buff *	(*dequeue)(struct Qdisc *);
   10.62 -	int 			(*requeue)(struct sk_buff *, struct Qdisc *);
   10.63 -	int			(*drop)(struct Qdisc *);
   10.64 -
   10.65 -	int			(*init)(struct Qdisc *, struct rtattr *arg);
   10.66 -	void			(*reset)(struct Qdisc *);
   10.67 -	void			(*destroy)(struct Qdisc *);
   10.68 -	int			(*change)(struct Qdisc *, struct rtattr *arg);
   10.69 -
   10.70 -	int			(*dump)(struct Qdisc *, struct sk_buff *);
   10.71 -};
   10.72 -
   10.73 -extern rwlock_t qdisc_tree_lock;
   10.74 -
   10.75 -struct Qdisc
   10.76 -{
   10.77 -	int 			(*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
   10.78 -	struct sk_buff *	(*dequeue)(struct Qdisc *dev);
   10.79 -	unsigned		flags;
   10.80 -#define TCQ_F_BUILTIN	1
   10.81 -#define TCQ_F_THROTTLED	2
   10.82 -#define TCQ_F_INGRES	4
   10.83 -	struct Qdisc_ops	*ops;
   10.84 -	struct Qdisc		*next;
   10.85 -	u32			handle;
   10.86 -	atomic_t		refcnt;
   10.87 -	struct sk_buff_head	q;
   10.88 -	struct net_device	*dev;
   10.89 -
   10.90 -    //struct tc_stats		stats;
   10.91 -	int			(*reshape_fail)(struct sk_buff *skb, struct Qdisc *q);
   10.92 -
   10.93 -	/* This field is deprecated, but it is still used by CBQ
   10.94 -	 * and it will live until better solution will be invented.
   10.95 -	 */
   10.96 -	struct Qdisc		*__parent;
   10.97 -
   10.98 -	char			data[0];
   10.99 -};
  10.100 -
  10.101 -struct qdisc_rate_table
  10.102 -{
  10.103 -    //struct tc_ratespec rate;
  10.104 -	u32		data[256];
  10.105 -	struct qdisc_rate_table *next;
  10.106 -	int		refcnt;
  10.107 -};
  10.108 -
  10.109 -static inline void sch_tree_lock(struct Qdisc *q)
  10.110 -{
  10.111 -	write_lock(&qdisc_tree_lock);
  10.112 -	spin_lock_bh(&q->dev->queue_lock);
  10.113 -}
  10.114 -
  10.115 -static inline void sch_tree_unlock(struct Qdisc *q)
  10.116 -{
  10.117 -	spin_unlock_bh(&q->dev->queue_lock);
  10.118 -	write_unlock(&qdisc_tree_lock);
  10.119 -}
  10.120 -
  10.121 -#if 0
  10.122 -static inline void tcf_tree_lock(struct tcf_proto *tp)
  10.123 -{
  10.124 -	write_lock(&qdisc_tree_lock);
  10.125 -	spin_lock_bh(&tp->q->dev->queue_lock);
  10.126 -}
  10.127 -
  10.128 -static inline void tcf_tree_unlock(struct tcf_proto *tp)
  10.129 -{
  10.130 -	spin_unlock_bh(&tp->q->dev->queue_lock);
  10.131 -	write_unlock(&qdisc_tree_lock);
  10.132 -}
  10.133 -
  10.134 -static inline unsigned long
  10.135 -cls_set_class(struct tcf_proto *tp, unsigned long *clp, unsigned long cl)
  10.136 -{
  10.137 -	unsigned long old_cl;
  10.138 -
  10.139 -	tcf_tree_lock(tp);
  10.140 -	old_cl = *clp;
  10.141 -	*clp = cl;
  10.142 -	tcf_tree_unlock(tp);
  10.143 -	return old_cl;
  10.144 -}
  10.145 -
  10.146 -static inline unsigned long
  10.147 -__cls_set_class(unsigned long *clp, unsigned long cl)
  10.148 -{
  10.149 -	unsigned long old_cl;
  10.150 -
  10.151 -	old_cl = *clp;
  10.152 -	*clp = cl;
  10.153 -	return old_cl;
  10.154 -}
  10.155 -#endif
  10.156 -
  10.157 -
  10.158 -/* 
  10.159 -   Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
  10.160 -   
  10.161 -   Normal IP packet size ~ 512byte, hence:
  10.162 -
  10.163 -   0.5Kbyte/1Mbyte/sec = 0.5msec, so that we need 50usec timer for
  10.164 -   10Mbit ethernet.
  10.165 -
  10.166 -   10msec resolution -> <50Kbit/sec.
  10.167 -   
  10.168 -   The result: [34]86 is not good choice for QoS router :-(
  10.169 -
  10.170 -   The things are not so bad, because we may use artifical
  10.171 -   clock evaluated by integration of network data flow
  10.172 -   in the most critical places.
  10.173 -
  10.174 -   Note: we do not use fastgettimeofday.
  10.175 -   The reason is that, when it is not the same thing as
  10.176 -   gettimeofday, it returns invalid timestamp, which is
  10.177 -   not updated, when net_bh is active.
  10.178 -
  10.179 -   So, use PSCHED_CLOCK_SOURCE = PSCHED_CPU on alpha and pentiums
  10.180 -   with rtdsc. And PSCHED_JIFFIES on all other architectures, including [34]86
  10.181 -   and pentiums without rtdsc.
  10.182 -   You can use PSCHED_GETTIMEOFDAY on another architectures,
  10.183 -   which have fast and precise clock source, but it is too expensive.
  10.184 - */
  10.185 -
  10.186 -/* General note about internal clock.
  10.187 -
  10.188 -   Any clock source returns time intervals, measured in units
  10.189 -   close to 1usec. With source PSCHED_GETTIMEOFDAY it is precisely
  10.190 -   microseconds, otherwise something close but different chosen to minimize
  10.191 -   arithmetic cost. Ratio usec/internal untis in form nominator/denominator
  10.192 -   may be read from /proc/net/psched.
  10.193 - */
  10.194 -
  10.195 -
  10.196 -#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
  10.197 -
  10.198 -typedef struct timeval	psched_time_t;
  10.199 -typedef long		psched_tdiff_t;
  10.200 -
  10.201 -#define PSCHED_GET_TIME(stamp) do_gettimeofday(&(stamp))
  10.202 -#define PSCHED_US2JIFFIE(usecs) (((usecs)+(1000000/HZ-1))/(1000000/HZ))
  10.203 -
  10.204 -#define PSCHED_EXPORTLIST EXPORT_SYMBOL(psched_tod_diff);
  10.205 -
  10.206 -#else /* PSCHED_CLOCK_SOURCE != PSCHED_GETTIMEOFDAY */
  10.207 -
  10.208 -#define PSCHED_EXPORTLIST PSCHED_EXPORTLIST_1 PSCHED_EXPORTLIST_2
  10.209 -
  10.210 -typedef u64	psched_time_t;
  10.211 -typedef long	psched_tdiff_t;
  10.212 -
  10.213 -extern psched_time_t	psched_time_base;
  10.214 -
  10.215 -#if PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES
  10.216 -
  10.217 -#if HZ == 100
  10.218 -#define PSCHED_JSCALE 13
  10.219 -#elif HZ == 1024
  10.220 -#define PSCHED_JSCALE 10
  10.221 -#else
  10.222 -#define PSCHED_JSCALE 0
  10.223 -#endif
  10.224 -
  10.225 -#define PSCHED_EXPORTLIST_2
  10.226 -
  10.227 -#if ~0UL == 0xFFFFFFFF
  10.228 -
  10.229 -#define PSCHED_WATCHER unsigned long
  10.230 -
  10.231 -extern PSCHED_WATCHER psched_time_mark;
  10.232 -
  10.233 -#define PSCHED_GET_TIME(stamp) ((stamp) = psched_time_base + (((unsigned long)(jiffies-psched_time_mark))<<PSCHED_JSCALE))
  10.234 -
  10.235 -#define PSCHED_EXPORTLIST_1 EXPORT_SYMBOL(psched_time_base); \
  10.236 -                            EXPORT_SYMBOL(psched_time_mark);
  10.237 -
  10.238 -#else
  10.239 -
  10.240 -#define PSCHED_GET_TIME(stamp) ((stamp) = (jiffies<<PSCHED_JSCALE))
  10.241 -
  10.242 -#define PSCHED_EXPORTLIST_1 
  10.243 -
  10.244 -#endif
  10.245 -
  10.246 -#define PSCHED_US2JIFFIE(delay) (((delay)+(1<<PSCHED_JSCALE)-1)>>PSCHED_JSCALE)
  10.247 -
  10.248 -#elif PSCHED_CLOCK_SOURCE == PSCHED_CPU
  10.249 -
  10.250 -extern psched_tdiff_t psched_clock_per_hz;
  10.251 -extern int psched_clock_scale;
  10.252 -
  10.253 -#define PSCHED_EXPORTLIST_2 EXPORT_SYMBOL(psched_clock_per_hz); \
  10.254 -                            EXPORT_SYMBOL(psched_clock_scale);
  10.255 -
  10.256 -#define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
  10.257 -
  10.258 -#ifdef CONFIG_X86_TSC
  10.259 -
  10.260 -#define PSCHED_GET_TIME(stamp) \
  10.261 -({ u64 __cur; \
  10.262 -   rdtscll(__cur); \
  10.263 -   (stamp) = __cur>>psched_clock_scale; \
  10.264 -})
  10.265 -
  10.266 -#define PSCHED_EXPORTLIST_1
  10.267 -
  10.268 -#elif defined (__alpha__)
  10.269 -
  10.270 -#define PSCHED_WATCHER u32
  10.271 -
  10.272 -extern PSCHED_WATCHER psched_time_mark;
  10.273 -
  10.274 -#define PSCHED_GET_TIME(stamp) \
  10.275 -({ u32 __res; \
  10.276 -   __asm__ __volatile__ ("rpcc %0" : "r="(__res)); \
  10.277 -   if (__res <= psched_time_mark) psched_time_base += 0x100000000UL; \
  10.278 -   psched_time_mark = __res; \
  10.279 -   (stamp) = (psched_time_base + __res)>>psched_clock_scale; \
  10.280 -})
  10.281 -
  10.282 -#define PSCHED_EXPORTLIST_1 EXPORT_SYMBOL(psched_time_base); \
  10.283 -                            EXPORT_SYMBOL(psched_time_mark);
  10.284 -
  10.285 -#else
  10.286 -
  10.287 -#error PSCHED_CLOCK_SOURCE=PSCHED_CPU is not supported on this arch.
  10.288 -
  10.289 -#endif /* ARCH */
  10.290 -
  10.291 -#endif /* PSCHED_CLOCK_SOURCE == PSCHED_JIFFIES */
  10.292 -
  10.293 -#endif /* PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY */
  10.294 -
  10.295 -#if PSCHED_CLOCK_SOURCE == PSCHED_GETTIMEOFDAY
  10.296 -#define PSCHED_TDIFF(tv1, tv2) \
  10.297 -({ \
  10.298 -	   int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
  10.299 -	   int __delta = (tv1).tv_usec - (tv2).tv_usec; \
  10.300 -	   if (__delta_sec) { \
  10.301 -	           switch (__delta_sec) { \
  10.302 -		   default: \
  10.303 -			   __delta = 0; \
  10.304 -		   case 2: \
  10.305 -			   __delta += 1000000; \
  10.306 -		   case 1: \
  10.307 -			   __delta += 1000000; \
  10.308 -	           } \
  10.309 -	   } \
  10.310 -	   __delta; \
  10.311 -})
  10.312 -
  10.313 -extern int psched_tod_diff(int delta_sec, int bound);
  10.314 -
  10.315 -#define PSCHED_TDIFF_SAFE(tv1, tv2, bound, guard) \
  10.316 -({ \
  10.317 -	   int __delta_sec = (tv1).tv_sec - (tv2).tv_sec; \
  10.318 -	   int __delta = (tv1).tv_usec - (tv2).tv_usec; \
  10.319 -	   switch (__delta_sec) { \
  10.320 -	   default: \
  10.321 -		   __delta = psched_tod_diff(__delta_sec, bound); guard; break; \
  10.322 -	   case 2: \
  10.323 -		   __delta += 1000000; \
  10.324 -	   case 1: \
  10.325 -		   __delta += 1000000; \
  10.326 -	   case 0: ; \
  10.327 -	   } \
  10.328 -	   __delta; \
  10.329 -})
  10.330 -
  10.331 -#define PSCHED_TLESS(tv1, tv2) (((tv1).tv_usec < (tv2).tv_usec && \
  10.332 -				(tv1).tv_sec <= (tv2).tv_sec) || \
  10.333 -				 (tv1).tv_sec < (tv2).tv_sec)
  10.334 -
  10.335 -#define PSCHED_TADD2(tv, delta, tv_res) \
  10.336 -({ \
  10.337 -	   int __delta = (tv).tv_usec + (delta); \
  10.338 -	   (tv_res).tv_sec = (tv).tv_sec; \
  10.339 -	   if (__delta > 1000000) { (tv_res).tv_sec++; __delta -= 1000000; } \
  10.340 -	   (tv_res).tv_usec = __delta; \
  10.341 -})
  10.342 -
  10.343 -#define PSCHED_TADD(tv, delta) \
  10.344 -({ \
  10.345 -	   (tv).tv_usec += (delta); \
  10.346 -	   if ((tv).tv_usec > 1000000) { (tv).tv_sec++; \
  10.347 -		 (tv).tv_usec -= 1000000; } \
  10.348 -})
  10.349 -
  10.350 -/* Set/check that time is in the "past perfect";
  10.351 -   it depends on concrete representation of system time
  10.352 - */
  10.353 -
  10.354 -#define PSCHED_SET_PASTPERFECT(t)	((t).tv_sec = 0)
  10.355 -#define PSCHED_IS_PASTPERFECT(t)	((t).tv_sec == 0)
  10.356 -
  10.357 -#define	PSCHED_AUDIT_TDIFF(t) ({ if ((t) > 2000000) (t) = 2000000; })
  10.358 -
  10.359 -#else
  10.360 -
  10.361 -#define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
  10.362 -#define PSCHED_TDIFF_SAFE(tv1, tv2, bound, guard) \
  10.363 -({ \
  10.364 -	   long __delta = (tv1) - (tv2); \
  10.365 -	   if ( __delta > (bound)) {  __delta = (bound); guard; } \
  10.366 -	   __delta; \
  10.367 -})
  10.368 -
  10.369 -
  10.370 -#define PSCHED_TLESS(tv1, tv2) ((tv1) < (tv2))
  10.371 -#define PSCHED_TADD2(tv, delta, tv_res) ((tv_res) = (tv) + (delta))
  10.372 -#define PSCHED_TADD(tv, delta) ((tv) += (delta))
  10.373 -#define PSCHED_SET_PASTPERFECT(t)	((t) = 0)
  10.374 -#define PSCHED_IS_PASTPERFECT(t)	((t) == 0)
  10.375 -#define	PSCHED_AUDIT_TDIFF(t)
  10.376 -
  10.377 -#endif
  10.378 -
  10.379 -struct tcf_police
  10.380 -{
  10.381 -	struct tcf_police *next;
  10.382 -	int		refcnt;
  10.383 -	u32		index;
  10.384 -
  10.385 -	int		action;
  10.386 -	int		result;
  10.387 -	u32		ewma_rate;
  10.388 -	u32		burst;
  10.389 -	u32		mtu;
  10.390 -
  10.391 -	u32		toks;
  10.392 -	u32		ptoks;
  10.393 -	psched_time_t	t_c;
  10.394 -	spinlock_t	lock;
  10.395 -	struct qdisc_rate_table *R_tab;
  10.396 -	struct qdisc_rate_table *P_tab;
  10.397 -
  10.398 -    //struct tc_stats	stats;
  10.399 -};
  10.400 -
  10.401 -//extern int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st);
  10.402 -extern void tcf_police_destroy(struct tcf_police *p);
  10.403 -extern struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est);
  10.404 -extern int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p);
  10.405 -extern int tcf_police(struct sk_buff *skb, struct tcf_police *p);
  10.406 -
  10.407 -static inline void tcf_police_release(struct tcf_police *p)
  10.408 -{
  10.409 -	if (p && --p->refcnt == 0)
  10.410 -		tcf_police_destroy(p);
  10.411 -}
  10.412 -
  10.413 -extern struct Qdisc noop_qdisc;
  10.414 -extern struct Qdisc_ops noop_qdisc_ops;
  10.415 -extern struct Qdisc_ops pfifo_qdisc_ops;
  10.416 -extern struct Qdisc_ops bfifo_qdisc_ops;
  10.417 -
  10.418 -int register_qdisc(struct Qdisc_ops *qops);
  10.419 -int unregister_qdisc(struct Qdisc_ops *qops);
  10.420 -struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
  10.421 -struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
  10.422 -void dev_init_scheduler(struct net_device *dev);
  10.423 -void dev_shutdown(struct net_device *dev);
  10.424 -void dev_activate(struct net_device *dev);
  10.425 -void dev_deactivate(struct net_device *dev);
  10.426 -void qdisc_reset(struct Qdisc *qdisc);
  10.427 -void qdisc_destroy(struct Qdisc *qdisc);
  10.428 -struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops);
  10.429 -//int qdisc_new_estimator(struct tc_stats *stats, struct rtattr *opt);
  10.430 -//void qdisc_kill_estimator(struct tc_stats *stats);
  10.431 -//struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *tab);
  10.432 -void qdisc_put_rtab(struct qdisc_rate_table *tab);
  10.433 -int teql_init(void);
  10.434 -int tc_filter_init(void);
  10.435 -int pktsched_init(void);
  10.436 -
  10.437 -extern int qdisc_restart(struct net_device *dev);
  10.438 -
  10.439 -static inline void qdisc_run(struct net_device *dev)
  10.440 -{
  10.441 -	while (!netif_queue_stopped(dev) &&
  10.442 -	       qdisc_restart(dev)<0)
  10.443 -		/* NOTHING */;
  10.444 -}
  10.445 -
  10.446 -/* Calculate maximal size of packet seen by hard_start_xmit
  10.447 -   routine of this device.
  10.448 - */
  10.449 -static inline unsigned psched_mtu(struct net_device *dev)
  10.450 -{
  10.451 -	unsigned mtu = dev->mtu;
  10.452 -	return dev->hard_header ? mtu + dev->hard_header_len : mtu;
  10.453 -}
  10.454 -
  10.455 -
  10.456 -/******************************************************************************
  10.457 - * XXXXXXXXX Rest of this file is real linux/pkt_sched.h XXXXX
  10.458 - */
  10.459 -
  10.460 -
  10.461 -/* Logical priority bands not depending on specific packet scheduler.
  10.462 -   Every scheduler will map them to real traffic classes, if it has
  10.463 -   no more precise mechanism to classify packets.
  10.464 -
  10.465 -   These numbers have no special meaning, though their coincidence
  10.466 -   with obsolete IPv6 values is not occasional :-). New IPv6 drafts
  10.467 -   preferred full anarchy inspired by diffserv group.
  10.468 -
  10.469 -   Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
  10.470 -   class, actually, as rule it will be handled with more care than
  10.471 -   filler or even bulk.
  10.472 - */
  10.473 -
  10.474 -#define TC_PRIO_BESTEFFORT		0
  10.475 -#define TC_PRIO_FILLER			1
  10.476 -#define TC_PRIO_BULK			2
  10.477 -#define TC_PRIO_INTERACTIVE_BULK	4
  10.478 -#define TC_PRIO_INTERACTIVE		6
  10.479 -#define TC_PRIO_CONTROL			7
  10.480 -
  10.481 -#define TC_PRIO_MAX			15
  10.482 -
  10.483 -/* Generic queue statistics, available for all the elements.
  10.484 -   Particular schedulers may have also their private records.
  10.485 - */
  10.486 -
  10.487 -struct tc_stats
  10.488 -{
  10.489 -	__u64	bytes;			/* NUmber of enqueues bytes */
  10.490 -	__u32	packets;		/* Number of enqueued packets	*/
  10.491 -	__u32	drops;			/* Packets dropped because of lack of resources */
  10.492 -	__u32	overlimits;		/* Number of throttle events when this
  10.493 -					 * flow goes out of allocated bandwidth */
  10.494 -	__u32	bps;			/* Current flow byte rate */
  10.495 -	__u32	pps;			/* Current flow packet rate */
  10.496 -	__u32	qlen;
  10.497 -	__u32	backlog;
  10.498 -#ifdef __KERNEL__
  10.499 -	spinlock_t *lock;
  10.500 -#endif
  10.501 -};
  10.502 -
  10.503 -struct tc_estimator
  10.504 -{
  10.505 -	char		interval;
  10.506 -	unsigned char	ewma_log;
  10.507 -};
  10.508 -
  10.509 -/* "Handles"
  10.510 -   ---------
  10.511 -
  10.512 -    All the traffic control objects have 32bit identifiers, or "handles".
  10.513 -
  10.514 -    They can be considered as opaque numbers from user API viewpoint,
  10.515 -    but actually they always consist of two fields: major and
  10.516 -    minor numbers, which are interpreted by kernel specially,
  10.517 -    that may be used by applications, though not recommended.
  10.518 -
  10.519 -    F.e. qdisc handles always have minor number equal to zero,
  10.520 -    classes (or flows) have major equal to parent qdisc major, and
  10.521 -    minor uniquely identifying class inside qdisc.
  10.522 -
  10.523 -    Macros to manipulate handles:
  10.524 - */
  10.525 -
  10.526 -#define TC_H_MAJ_MASK (0xFFFF0000U)
  10.527 -#define TC_H_MIN_MASK (0x0000FFFFU)
  10.528 -#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
  10.529 -#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
  10.530 -#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
  10.531 -
  10.532 -#define TC_H_UNSPEC	(0U)
  10.533 -#define TC_H_ROOT	(0xFFFFFFFFU)
  10.534 -#define TC_H_INGRESS    (0xFFFFFFF1U)
  10.535 -
  10.536 -struct tc_ratespec
  10.537 -{
  10.538 -	unsigned char	cell_log;
  10.539 -	unsigned char	__reserved;
  10.540 -	unsigned short	feature;
  10.541 -	short		addend;
  10.542 -	unsigned short	mpu;
  10.543 -	__u32		rate;
  10.544 -};
  10.545 -
  10.546 -/* FIFO section */
  10.547 -
  10.548 -struct tc_fifo_qopt
  10.549 -{
  10.550 -	__u32	limit;	/* Queue length: bytes for bfifo, packets for pfifo */
  10.551 -};
  10.552 -
  10.553 -/* PRIO section */
  10.554 -
  10.555 -#define TCQ_PRIO_BANDS	16
  10.556 -
  10.557 -struct tc_prio_qopt
  10.558 -{
  10.559 -	int	bands;			/* Number of bands */
  10.560 -	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
  10.561 -};
  10.562 -
  10.563 -/* CSZ section */
  10.564 -
  10.565 -struct tc_csz_qopt
  10.566 -{
  10.567 -	int		flows;		/* Maximal number of guaranteed flows */
  10.568 -	unsigned char	R_log;		/* Fixed point position for round number */
  10.569 -	unsigned char	delta_log;	/* Log of maximal managed time interval */
  10.570 -	__u8		priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> CSZ band */
  10.571 -};
  10.572 -
  10.573 -struct tc_csz_copt
  10.574 -{
  10.575 -	struct tc_ratespec slice;
  10.576 -	struct tc_ratespec rate;
  10.577 -	struct tc_ratespec peakrate;
  10.578 -	__u32		limit;
  10.579 -	__u32		buffer;
  10.580 -	__u32		mtu;
  10.581 -};
  10.582 -
  10.583 -enum
  10.584 -{
  10.585 -	TCA_CSZ_UNSPEC,
  10.586 -	TCA_CSZ_PARMS,
  10.587 -	TCA_CSZ_RTAB,
  10.588 -	TCA_CSZ_PTAB,
  10.589 -};
  10.590 -
  10.591 -/* TBF section */
  10.592 -
  10.593 -struct tc_tbf_qopt
  10.594 -{
  10.595 -	struct tc_ratespec rate;
  10.596 -	struct tc_ratespec peakrate;
  10.597 -	__u32		limit;
  10.598 -	__u32		buffer;
  10.599 -	__u32		mtu;
  10.600 -};
  10.601 -
  10.602 -enum
  10.603 -{
  10.604 -	TCA_TBF_UNSPEC,
  10.605 -	TCA_TBF_PARMS,
  10.606 -	TCA_TBF_RTAB,
  10.607 -	TCA_TBF_PTAB,
  10.608 -};
  10.609 -
  10.610 -
  10.611 -/* TEQL section */
  10.612 -
  10.613 -/* TEQL does not require any parameters */
  10.614 -
  10.615 -/* SFQ section */
  10.616 -
  10.617 -struct tc_sfq_qopt
  10.618 -{
  10.619 -	unsigned	quantum;	/* Bytes per round allocated to flow */
  10.620 -	int		perturb_period;	/* Period of hash perturbation */
  10.621 -	__u32		limit;		/* Maximal packets in queue */
  10.622 -	unsigned	divisor;	/* Hash divisor  */
  10.623 -	unsigned	flows;		/* Maximal number of flows  */
  10.624 -};
  10.625 -
  10.626 -/*
  10.627 - *  NOTE: limit, divisor and flows are hardwired to code at the moment.
  10.628 - *
  10.629 - *	limit=flows=128, divisor=1024;
  10.630 - *
  10.631 - *	The only reason for this is efficiency, it is possible
  10.632 - *	to change these parameters in compile time.
  10.633 - */
  10.634 -
  10.635 -/* RED section */
  10.636 -
  10.637 -enum
  10.638 -{
  10.639 -	TCA_RED_UNSPEC,
  10.640 -	TCA_RED_PARMS,
  10.641 -	TCA_RED_STAB,
  10.642 -};
  10.643 -
  10.644 -struct tc_red_qopt
  10.645 -{
  10.646 -	__u32		limit;		/* HARD maximal queue length (bytes)	*/
  10.647 -	__u32		qth_min;	/* Min average length threshold (bytes) */
  10.648 -	__u32		qth_max;	/* Max average length threshold (bytes) */
  10.649 -	unsigned char   Wlog;		/* log(W)		*/
  10.650 -	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
  10.651 -	unsigned char   Scell_log;	/* cell size for idle damping */
  10.652 -	unsigned char	flags;
  10.653 -#define TC_RED_ECN	1
  10.654 -};
  10.655 -
  10.656 -struct tc_red_xstats
  10.657 -{
  10.658 -	__u32           early;          /* Early drops */
  10.659 -	__u32           pdrop;          /* Drops due to queue limits */
  10.660 -	__u32           other;          /* Drops due to drop() calls */
  10.661 -	__u32           marked;         /* Marked packets */
  10.662 -};
  10.663 -
  10.664 -/* GRED section */
  10.665 -
  10.666 -#define MAX_DPs 16
  10.667 -
  10.668 -enum
  10.669 -{
  10.670 -       TCA_GRED_UNSPEC,
  10.671 -       TCA_GRED_PARMS,
  10.672 -       TCA_GRED_STAB,
  10.673 -       TCA_GRED_DPS,
  10.674 -};
  10.675 -
  10.676 -#define TCA_SET_OFF TCA_GRED_PARMS
  10.677 -struct tc_gred_qopt
  10.678 -{
  10.679 -       __u32           limit;          /* HARD maximal queue length (bytes)    
  10.680 -*/
  10.681 -       __u32           qth_min;        /* Min average length threshold (bytes) 
  10.682 -*/
  10.683 -       __u32           qth_max;        /* Max average length threshold (bytes) 
  10.684 -*/
  10.685 -       __u32           DP;             /* upto 2^32 DPs */
  10.686 -       __u32           backlog;        
  10.687 -       __u32           qave;   
  10.688 -       __u32           forced; 
  10.689 -       __u32           early;  
  10.690 -       __u32           other;  
  10.691 -       __u32           pdrop;  
  10.692 -
  10.693 -       unsigned char   Wlog;           /* log(W)               */
  10.694 -       unsigned char   Plog;           /* log(P_max/(qth_max-qth_min)) */
  10.695 -       unsigned char   Scell_log;      /* cell size for idle damping */
  10.696 -       __u8            prio;		/* prio of this VQ */
  10.697 -       __u32	packets;
  10.698 -       __u32	bytesin;
  10.699 -};
  10.700 -/* gred setup */
  10.701 -struct tc_gred_sopt
  10.702 -{
  10.703 -       __u32           DPs;
  10.704 -       __u32           def_DP;
  10.705 -       __u8            grio;
  10.706 -};
  10.707 -
  10.708 -/* CBQ section */
  10.709 -
  10.710 -#define TC_CBQ_MAXPRIO		8
  10.711 -#define TC_CBQ_MAXLEVEL		8
  10.712 -#define TC_CBQ_DEF_EWMA		5
  10.713 -
  10.714 -struct tc_cbq_lssopt
  10.715 -{
  10.716 -	unsigned char	change;
  10.717 -	unsigned char	flags;
  10.718 -#define TCF_CBQ_LSS_BOUNDED	1
  10.719 -#define TCF_CBQ_LSS_ISOLATED	2
  10.720 -	unsigned char  	ewma_log;
  10.721 -	unsigned char  	level;
  10.722 -#define TCF_CBQ_LSS_FLAGS	1
  10.723 -#define TCF_CBQ_LSS_EWMA	2
  10.724 -#define TCF_CBQ_LSS_MAXIDLE	4
  10.725 -#define TCF_CBQ_LSS_MINIDLE	8
  10.726 -#define TCF_CBQ_LSS_OFFTIME	0x10
  10.727 -#define TCF_CBQ_LSS_AVPKT	0x20
  10.728 -	__u32		maxidle;
  10.729 -	__u32		minidle;
  10.730 -	__u32		offtime;
  10.731 -	__u32		avpkt;
  10.732 -};
  10.733 -
  10.734 -struct tc_cbq_wrropt
  10.735 -{
  10.736 -	unsigned char	flags;
  10.737 -	unsigned char	priority;
  10.738 -	unsigned char	cpriority;
  10.739 -	unsigned char	__reserved;
  10.740 -	__u32		allot;
  10.741 -	__u32		weight;
  10.742 -};
  10.743 -
  10.744 -struct tc_cbq_ovl
  10.745 -{
  10.746 -	unsigned char	strategy;
  10.747 -#define	TC_CBQ_OVL_CLASSIC	0
  10.748 -#define	TC_CBQ_OVL_DELAY	1
  10.749 -#define	TC_CBQ_OVL_LOWPRIO	2
  10.750 -#define	TC_CBQ_OVL_DROP		3
  10.751 -#define	TC_CBQ_OVL_RCLASSIC	4
  10.752 -	unsigned char	priority2;
  10.753 -	__u32		penalty;
  10.754 -};
  10.755 -
  10.756 -struct tc_cbq_police
  10.757 -{
  10.758 -	unsigned char	police;
  10.759 -	unsigned char	__res1;
  10.760 -	unsigned short	__res2;
  10.761 -};
  10.762 -
  10.763 -struct tc_cbq_fopt
  10.764 -{
  10.765 -	__u32		split;
  10.766 -	__u32		defmap;
  10.767 -	__u32		defchange;
  10.768 -};
  10.769 -
  10.770 -struct tc_cbq_xstats
  10.771 -{
  10.772 -	__u32		borrows;
  10.773 -	__u32		overactions;
  10.774 -	__s32		avgidle;
  10.775 -	__s32		undertime;
  10.776 -};
  10.777 -
  10.778 -enum
  10.779 -{
  10.780 -	TCA_CBQ_UNSPEC,
  10.781 -	TCA_CBQ_LSSOPT,
  10.782 -	TCA_CBQ_WRROPT,
  10.783 -	TCA_CBQ_FOPT,
  10.784 -	TCA_CBQ_OVL_STRATEGY,
  10.785 -	TCA_CBQ_RATE,
  10.786 -	TCA_CBQ_RTAB,
  10.787 -	TCA_CBQ_POLICE,
  10.788 -};
  10.789 -
  10.790 -#define TCA_CBQ_MAX	TCA_CBQ_POLICE
  10.791 -
  10.792 -/* dsmark section */
  10.793 -
  10.794 -enum {
  10.795 -	TCA_DSMARK_UNSPEC,
  10.796 -	TCA_DSMARK_INDICES,
  10.797 -	TCA_DSMARK_DEFAULT_INDEX,
  10.798 -	TCA_DSMARK_SET_TC_INDEX,
  10.799 -	TCA_DSMARK_MASK,
  10.800 -	TCA_DSMARK_VALUE
  10.801 -};
  10.802 -
  10.803 -#define TCA_DSMARK_MAX TCA_DSMARK_VALUE
  10.804 -
  10.805 -/* ATM  section */
  10.806 -
  10.807 -enum {
  10.808 -	TCA_ATM_UNSPEC,
  10.809 -	TCA_ATM_FD,		/* file/socket descriptor */
  10.810 -	TCA_ATM_PTR,		/* pointer to descriptor - later */
  10.811 -	TCA_ATM_HDR,		/* LL header */
  10.812 -	TCA_ATM_EXCESS,		/* excess traffic class (0 for CLP)  */
  10.813 -	TCA_ATM_ADDR,		/* PVC address (for output only) */
  10.814 -	TCA_ATM_STATE		/* VC state (ATM_VS_*; for output only) */
  10.815 -};
  10.816 -
  10.817 -#define TCA_ATM_MAX	TCA_ATM_STATE
  10.818 -
  10.819 -#endif
    11.1 --- a/xen-2.4.16/include/xeno/skbuff.h	Fri Feb 21 16:04:44 2003 +0000
    11.2 +++ b/xen-2.4.16/include/xeno/skbuff.h	Sun Feb 23 11:22:39 2003 +0000
    11.3 @@ -16,8 +16,6 @@
    11.4  
    11.5  #include <linux/config.h>
    11.6  #include <linux/lib.h>
    11.7 -//#include <linux/kernel.h>
    11.8 -//#include <linux/sched.h>
    11.9  #include <linux/time.h>
   11.10  #include <linux/timer.h>
   11.11  #include <linux/cache.h>
   11.12 @@ -26,7 +24,6 @@
   11.13  #include <asm/types.h>
   11.14  #include <linux/spinlock.h>
   11.15  #include <linux/mm.h>
   11.16 -//#include <linux/highmem.h>
   11.17  
   11.18  // vif special values.
   11.19  #define VIF_PHYSICAL_INTERFACE  -1
   11.20 @@ -144,8 +141,6 @@ struct sk_buff {
   11.21  	struct sk_buff	* prev;			/* Previous buffer in list 			*/
   11.22  
   11.23  	struct sk_buff_head * list;		/* List we are on				*/
   11.24 -	struct sock	*sk;			/* Socket we are owned by 			*/
   11.25 -	struct timeval	stamp;			/* Time we arrived				*/
   11.26  	struct net_device	*dev;		/* Device we arrived on/are leaving by		*/
   11.27  
   11.28  	/* Transport layer header */
   11.29 @@ -177,66 +172,25 @@ struct sk_buff {
   11.30  	  	unsigned char 	*raw;
   11.31  	} mac;
   11.32  
   11.33 -//	struct  dst_entry *dst;
   11.34 -
   11.35 -	/* 
   11.36 -	 * This is the control buffer. It is free to use for every
   11.37 -	 * layer. Please put your private variables there. If you
   11.38 -	 * want to keep them across layers you have to do a skb_clone()
   11.39 -	 * first. This is owned by whoever has the skb queued ATM.
   11.40 -	 */ 
   11.41 -	char		cb[48];	 
   11.42 -
   11.43  	unsigned int 	len;			/* Length of actual data			*/
   11.44   	unsigned int 	data_len;
   11.45  	unsigned int	csum;			/* Checksum 					*/
   11.46 -	unsigned char 	__unused,		/* Dead field, may be reused			*/
   11.47 +	unsigned char 	skb_type,
   11.48  			cloned, 		/* head may be cloned (check refcnt to be sure) */
   11.49    			pkt_type,		/* Packet class					*/
   11.50    			ip_summed;		/* Driver fed us an IP checksum			*/
   11.51 -	__u32		priority;		/* Packet queueing priority			*/
   11.52  	atomic_t	users;			/* User count - see datagram.c,tcp.c 		*/
   11.53  	unsigned short	protocol;		/* Packet protocol from driver. 		*/
   11.54 -	unsigned short	security;		/* Security level of packet			*/
   11.55 -	unsigned int	truesize;		/* Buffer size 					*/
   11.56 -
   11.57  	unsigned char	*head;			/* Head of buffer 				*/
   11.58  	unsigned char	*data;			/* Data head pointer				*/
   11.59  	unsigned char	*tail;			/* Tail pointer					*/
   11.60  	unsigned char 	*end;			/* End pointer					*/
   11.61  
   11.62  	void 		(*destructor)(struct sk_buff *);	/* Destruct function		*/
   11.63 -
   11.64 -        unsigned int    skb_type;               /* SKB_NORMAL or SKB_ZERO_COPY                  */
   11.65          struct pfn_info *pf;                    /* record of physical pf address for freeing    */
   11.66          int src_vif;                            /* vif we came from                             */
   11.67          int dst_vif;                            /* vif we are bound for                         */
   11.68          struct skb_shared_info shinfo;          /* shared info is no longer shared in Xen.      */
   11.69 -        
   11.70 -
   11.71 -                
   11.72 -        
   11.73 -#ifdef CONFIG_NETFILTER
   11.74 -	/* Can be used for communication between hooks. */
   11.75 -        unsigned long	nfmark;
   11.76 -	/* Cache info */
   11.77 -	__u32		nfcache;
   11.78 -	/* Associated connection, if any */
   11.79 -	struct nf_ct_info *nfct;
   11.80 -#ifdef CONFIG_NETFILTER_DEBUG
   11.81 -        unsigned int nf_debug;
   11.82 -#endif
   11.83 -#endif /*CONFIG_NETFILTER*/
   11.84 -
   11.85 -#if defined(CONFIG_HIPPI)
   11.86 -	union{
   11.87 -		__u32	ifield;
   11.88 -	} private;
   11.89 -#endif
   11.90 -
   11.91 -#ifdef CONFIG_NET_SCHED
   11.92 -       __u32           tc_index;               /* traffic control index */
   11.93 -#endif
   11.94  };
   11.95  
   11.96  #define SK_WMEM_MAX	65535
   11.97 @@ -1000,7 +954,6 @@ static inline void skb_orphan(struct sk_
   11.98  	if (skb->destructor)
   11.99  		skb->destructor(skb);
  11.100  	skb->destructor = NULL;
  11.101 -	skb->sk = NULL;
  11.102  }
  11.103  
  11.104  /**
  11.105 @@ -1130,10 +1083,8 @@ static inline void kunmap_skb_frag(void 
  11.106  		     skb=skb->next)
  11.107  
  11.108  
  11.109 -extern struct sk_buff *		skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
  11.110  extern int			skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);
  11.111  extern int			skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);
  11.112 -extern void			skb_free_datagram(struct sock * sk, struct sk_buff *skb);
  11.113  
  11.114  extern unsigned int		skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
  11.115  extern int			skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
    12.1 --- a/xen-2.4.16/include/xeno/sockios.h	Fri Feb 21 16:04:44 2003 +0000
    12.2 +++ b/xen-2.4.16/include/xeno/sockios.h	Sun Feb 23 11:22:39 2003 +0000
    12.3 @@ -69,9 +69,6 @@
    12.4  #define SIOCGIFBR	0x8940		/* Bridging support		*/
    12.5  #define SIOCSIFBR	0x8941		/* Set bridging options 	*/
    12.6  
    12.7 -#define SIOCGIFTXQLEN	0x8942		/* Get the tx queue length	*/
    12.8 -#define SIOCSIFTXQLEN	0x8943		/* Set the tx queue length 	*/
    12.9 -
   12.10  #define SIOCGIFDIVERT	0x8944		/* Frame diversion support */
   12.11  #define SIOCSIFDIVERT	0x8945		/* Set frame diversion options */
   12.12  
    13.1 --- a/xen-2.4.16/include/xeno/vif.h	Fri Feb 21 16:04:44 2003 +0000
    13.2 +++ b/xen-2.4.16/include/xeno/vif.h	Sun Feb 23 11:22:39 2003 +0000
    13.3 @@ -32,10 +32,17 @@ typedef struct rx_shadow_entry_st {
    13.4      unsigned long flush_count;
    13.5  } rx_shadow_entry_t;
    13.6  
    13.7 +typedef struct tx_shadow_entry_st {
    13.8 +    unsigned long addr;
    13.9 +    unsigned long size;
   13.10 +    int           status;
   13.11 +} tx_shadow_entry_t;
   13.12 +
   13.13  typedef struct net_shadow_ring_st {
   13.14      rx_shadow_entry_t *rx_ring;
   13.15 +    tx_shadow_entry_t *tx_ring;
   13.16      unsigned int rx_prod, rx_cons, rx_idx;
   13.17 -    unsigned int tx_cons; /* ahead of shared tx_cons */
   13.18 +    unsigned int tx_prod, tx_cons, tx_idx;
   13.19  } net_shadow_ring_t;
   13.20  
   13.21  typedef struct net_vif_st {
    14.1 --- a/xen-2.4.16/net/dev.c	Fri Feb 21 16:04:44 2003 +0000
    14.2 +++ b/xen-2.4.16/net/dev.c	Sun Feb 23 11:22:39 2003 +0000
    14.3 @@ -27,7 +27,6 @@
    14.4  #include <linux/brlock.h>
    14.5  #include <linux/init.h>
    14.6  #include <linux/module.h>
    14.7 -#include <linux/pkt_sched.h>
    14.8  
    14.9  #include <linux/event.h>
   14.10  #include <asm/domain_page.h>
   14.11 @@ -60,12 +59,6 @@ struct net_device *the_dev = NULL;
   14.12  struct softnet_data softnet_data[NR_CPUS] __cacheline_aligned;
   14.13  
   14.14  
   14.15 -/*********************************************************************************
   14.16 -
   14.17 -			    Device Interface Subroutines
   14.18 -
   14.19 -**********************************************************************************/
   14.20 -
   14.21  /**
   14.22   *	__dev_get_by_name	- find a device by its name 
   14.23   *	@name: name to find
   14.24 @@ -80,13 +73,13 @@ struct softnet_data softnet_data[NR_CPUS
   14.25  
   14.26  struct net_device *__dev_get_by_name(const char *name)
   14.27  {
   14.28 -	struct net_device *dev;
   14.29 +    struct net_device *dev;
   14.30  
   14.31 -	for (dev = dev_base; dev != NULL; dev = dev->next) {
   14.32 -		if (strncmp(dev->name, name, IFNAMSIZ) == 0)
   14.33 -			return dev;
   14.34 -	}
   14.35 -	return NULL;
   14.36 +    for (dev = dev_base; dev != NULL; dev = dev->next) {
   14.37 +        if (strncmp(dev->name, name, IFNAMSIZ) == 0)
   14.38 +            return dev;
   14.39 +    }
   14.40 +    return NULL;
   14.41  }
   14.42  
   14.43  /**
   14.44 @@ -102,14 +95,14 @@ struct net_device *__dev_get_by_name(con
   14.45  
   14.46  struct net_device *dev_get_by_name(const char *name)
   14.47  {
   14.48 -	struct net_device *dev;
   14.49 +    struct net_device *dev;
   14.50  
   14.51 -	read_lock(&dev_base_lock);
   14.52 -	dev = __dev_get_by_name(name);
   14.53 -	if (dev)
   14.54 -		dev_hold(dev);
   14.55 -	read_unlock(&dev_base_lock);
   14.56 -	return dev;
   14.57 +    read_lock(&dev_base_lock);
   14.58 +    dev = __dev_get_by_name(name);
   14.59 +    if (dev)
   14.60 +        dev_hold(dev);
   14.61 +    read_unlock(&dev_base_lock);
   14.62 +    return dev;
   14.63  }
   14.64  
   14.65  /* 
   14.66 @@ -134,12 +127,12 @@ struct net_device *dev_get_by_name(const
   14.67   
   14.68  int dev_get(const char *name)
   14.69  {
   14.70 -	struct net_device *dev;
   14.71 +    struct net_device *dev;
   14.72  
   14.73 -	read_lock(&dev_base_lock);
   14.74 -	dev = __dev_get_by_name(name);
   14.75 -	read_unlock(&dev_base_lock);
   14.76 -	return dev != NULL;
   14.77 +    read_lock(&dev_base_lock);
   14.78 +    dev = __dev_get_by_name(name);
   14.79 +    read_unlock(&dev_base_lock);
   14.80 +    return dev != NULL;
   14.81  }
   14.82  
   14.83  /**
   14.84 @@ -155,13 +148,13 @@ int dev_get(const char *name)
   14.85  
   14.86  struct net_device * __dev_get_by_index(int ifindex)
   14.87  {
   14.88 -	struct net_device *dev;
   14.89 +    struct net_device *dev;
   14.90  
   14.91 -	for (dev = dev_base; dev != NULL; dev = dev->next) {
   14.92 -		if (dev->ifindex == ifindex)
   14.93 -			return dev;
   14.94 -	}
   14.95 -	return NULL;
   14.96 +    for (dev = dev_base; dev != NULL; dev = dev->next) {
   14.97 +        if (dev->ifindex == ifindex)
   14.98 +            return dev;
   14.99 +    }
  14.100 +    return NULL;
  14.101  }
  14.102  
  14.103  
  14.104 @@ -177,14 +170,14 @@ struct net_device * __dev_get_by_index(i
  14.105  
  14.106  struct net_device * dev_get_by_index(int ifindex)
  14.107  {
  14.108 -	struct net_device *dev;
  14.109 +    struct net_device *dev;
  14.110  
  14.111 -	read_lock(&dev_base_lock);
  14.112 -	dev = __dev_get_by_index(ifindex);
  14.113 -	if (dev)
  14.114 -		dev_hold(dev);
  14.115 -	read_unlock(&dev_base_lock);
  14.116 -	return dev;
  14.117 +    read_lock(&dev_base_lock);
  14.118 +    dev = __dev_get_by_index(ifindex);
  14.119 +    if (dev)
  14.120 +        dev_hold(dev);
  14.121 +    read_unlock(&dev_base_lock);
  14.122 +    return dev;
  14.123  }
  14.124  
  14.125  /**
  14.126 @@ -203,14 +196,14 @@ struct net_device * dev_get_by_index(int
  14.127  
  14.128  struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
  14.129  {
  14.130 -	struct net_device *dev;
  14.131 +    struct net_device *dev;
  14.132  
  14.133 -	for (dev = dev_base; dev != NULL; dev = dev->next) {
  14.134 -		if (dev->type == type &&
  14.135 -		    memcmp(dev->dev_addr, ha, dev->addr_len) == 0)
  14.136 -			return dev;
  14.137 -	}
  14.138 -	return NULL;
  14.139 +    for (dev = dev_base; dev != NULL; dev = dev->next) {
  14.140 +        if (dev->type == type &&
  14.141 +            memcmp(dev->dev_addr, ha, dev->addr_len) == 0)
  14.142 +            return dev;
  14.143 +    }
  14.144 +    return NULL;
  14.145  }
  14.146  
  14.147  /**
  14.148 @@ -227,30 +220,30 @@ struct net_device *dev_getbyhwaddr(unsig
  14.149  
  14.150  int dev_alloc_name(struct net_device *dev, const char *name)
  14.151  {
  14.152 -	int i;
  14.153 -	char buf[32];
  14.154 -	char *p;
  14.155 +    int i;
  14.156 +    char buf[32];
  14.157 +    char *p;
  14.158  
  14.159 -	/*
  14.160 -	 * Verify the string as this thing may have come from
  14.161 -	 * the user.  There must be either one "%d" and no other "%"
  14.162 -	 * characters, or no "%" characters at all.
  14.163 -	 */
  14.164 -	p = strchr(name, '%');
  14.165 -	if (p && (p[1] != 'd' || strchr(p+2, '%')))
  14.166 -		return -EINVAL;
  14.167 +    /*
  14.168 +     * Verify the string as this thing may have come from
  14.169 +     * the user.  There must be either one "%d" and no other "%"
  14.170 +     * characters, or no "%" characters at all.
  14.171 +     */
  14.172 +    p = strchr(name, '%');
  14.173 +    if (p && (p[1] != 'd' || strchr(p+2, '%')))
  14.174 +        return -EINVAL;
  14.175  
  14.176 -	/*
  14.177 -	 * If you need over 100 please also fix the algorithm...
  14.178 -	 */
  14.179 -	for (i = 0; i < 100; i++) {
  14.180 -		snprintf(buf,sizeof(buf),name,i);
  14.181 -		if (__dev_get_by_name(buf) == NULL) {
  14.182 -			strcpy(dev->name, buf);
  14.183 -			return i;
  14.184 -		}
  14.185 -	}
  14.186 -	return -ENFILE;	/* Over 100 of the things .. bail out! */
  14.187 +    /*
  14.188 +     * If you need over 100 please also fix the algorithm...
  14.189 +     */
  14.190 +    for (i = 0; i < 100; i++) {
  14.191 +        snprintf(buf,sizeof(buf),name,i);
  14.192 +        if (__dev_get_by_name(buf) == NULL) {
  14.193 +            strcpy(dev->name, buf);
  14.194 +            return i;
  14.195 +        }
  14.196 +    }
  14.197 +    return -ENFILE;	/* Over 100 of the things .. bail out! */
  14.198  }
  14.199  
  14.200  /**
  14.201 @@ -271,18 +264,18 @@ int dev_alloc_name(struct net_device *de
  14.202  
  14.203  struct net_device *dev_alloc(const char *name, int *err)
  14.204  {
  14.205 -	struct net_device *dev=kmalloc(sizeof(struct net_device), GFP_KERNEL);
  14.206 -	if (dev == NULL) {
  14.207 -		*err = -ENOBUFS;
  14.208 -		return NULL;
  14.209 -	}
  14.210 -	memset(dev, 0, sizeof(struct net_device));
  14.211 -	*err = dev_alloc_name(dev, name);
  14.212 -	if (*err < 0) {
  14.213 -		kfree(dev);
  14.214 -		return NULL;
  14.215 -	}
  14.216 -	return dev;
  14.217 +    struct net_device *dev=kmalloc(sizeof(struct net_device), GFP_KERNEL);
  14.218 +    if (dev == NULL) {
  14.219 +        *err = -ENOBUFS;
  14.220 +        return NULL;
  14.221 +    }
  14.222 +    memset(dev, 0, sizeof(struct net_device));
  14.223 +    *err = dev_alloc_name(dev, name);
  14.224 +    if (*err < 0) {
  14.225 +        kfree(dev);
  14.226 +        return NULL;
  14.227 +    }
  14.228 +    return dev;
  14.229  }
  14.230  
  14.231  /**
  14.232 @@ -296,10 +289,10 @@ struct net_device *dev_alloc(const char 
  14.233   
  14.234  void netdev_state_change(struct net_device *dev)
  14.235  {
  14.236 -	if (dev->flags&IFF_UP) {
  14.237 -		notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
  14.238 -		rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
  14.239 -	}
  14.240 +    if (dev->flags&IFF_UP) {
  14.241 +        notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
  14.242 +        rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
  14.243 +    }
  14.244  }
  14.245  
  14.246  
  14.247 @@ -316,8 +309,8 @@ void netdev_state_change(struct net_devi
  14.248  
  14.249  void dev_load(const char *name)
  14.250  {
  14.251 -	if (!dev_get(name) && capable(CAP_SYS_MODULE))
  14.252 -		request_module(name);
  14.253 +    if (!dev_get(name) && capable(CAP_SYS_MODULE))
  14.254 +        request_module(name);
  14.255  }
  14.256  
  14.257  #else
  14.258 @@ -328,9 +321,10 @@ extern inline void dev_load(const char *
  14.259  
  14.260  static int default_rebuild_header(struct sk_buff *skb)
  14.261  {
  14.262 -	printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", skb->dev ? skb->dev->name : "NULL!!!");
  14.263 -	kfree_skb(skb);
  14.264 -	return 1;
  14.265 +    printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n", 
  14.266 +           skb->dev ? skb->dev->name : "NULL!!!");
  14.267 +    kfree_skb(skb);
  14.268 +    return 1;
  14.269  }
  14.270  
  14.271  /**
  14.272 @@ -348,63 +342,63 @@ static int default_rebuild_header(struct
  14.273   
  14.274  int dev_open(struct net_device *dev)
  14.275  {
  14.276 -	int ret = 0;
  14.277 +    int ret = 0;
  14.278  
  14.279 -	/*
  14.280 -	 *	Is it already up?
  14.281 -	 */
  14.282 +    /*
  14.283 +     *	Is it already up?
  14.284 +     */
  14.285  
  14.286 -	if (dev->flags&IFF_UP)
  14.287 -		return 0;
  14.288 +    if (dev->flags&IFF_UP)
  14.289 +        return 0;
  14.290  
  14.291 -	/*
  14.292 -	 *	Is it even present?
  14.293 -	 */
  14.294 -	if (!netif_device_present(dev))
  14.295 -		return -ENODEV;
  14.296 +    /*
  14.297 +     *	Is it even present?
  14.298 +     */
  14.299 +    if (!netif_device_present(dev))
  14.300 +        return -ENODEV;
  14.301  
  14.302 -	/*
  14.303 -	 *	Call device private open method
  14.304 -	 */
  14.305 -	if (try_inc_mod_count(dev->owner)) {
  14.306 -		if (dev->open) {
  14.307 -			ret = dev->open(dev);
  14.308 -			if (ret != 0 && dev->owner)
  14.309 -				__MOD_DEC_USE_COUNT(dev->owner);
  14.310 -		}
  14.311 -	} else {
  14.312 -		ret = -ENODEV;
  14.313 -	}
  14.314 +    /*
  14.315 +     *	Call device private open method
  14.316 +     */
  14.317 +    if (try_inc_mod_count(dev->owner)) {
  14.318 +        if (dev->open) {
  14.319 +            ret = dev->open(dev);
  14.320 +            if (ret != 0 && dev->owner)
  14.321 +                __MOD_DEC_USE_COUNT(dev->owner);
  14.322 +        }
  14.323 +    } else {
  14.324 +        ret = -ENODEV;
  14.325 +    }
  14.326  
  14.327 -	/*
  14.328 -	 *	If it went open OK then:
  14.329 -	 */
  14.330 +    /*
  14.331 +     *	If it went open OK then:
  14.332 +     */
  14.333  	 
  14.334 -	if (ret == 0) 
  14.335 -	{
  14.336 -		/*
  14.337 -		 *	Set the flags.
  14.338 -		 */
  14.339 -		dev->flags |= IFF_UP;
  14.340 +    if (ret == 0) 
  14.341 +    {
  14.342 +        /*
  14.343 +         *	Set the flags.
  14.344 +         */
  14.345 +        dev->flags |= IFF_UP;
  14.346  
  14.347 -		set_bit(__LINK_STATE_START, &dev->state);
  14.348 +        set_bit(__LINK_STATE_START, &dev->state);
  14.349  
  14.350 -		/*
  14.351 -		 *	Initialize multicasting status 
  14.352 -		 */
  14.353 -		dev_mc_upload(dev);
  14.354 +        /*
  14.355 +         *	Initialize multicasting status 
  14.356 +         */
  14.357 +        dev_mc_upload(dev);
  14.358  
  14.359 -		/*
  14.360 -		 *	Wakeup transmit queue engine
  14.361 -		 */
  14.362 -		dev_activate(dev);
  14.363 +        /*
  14.364 +         *	Wakeup transmit queue engine
  14.365 +         */
  14.366 +        dev_activate(dev);
  14.367  
  14.368 -		/*
  14.369 -		 *	... and announce new interface.
  14.370 -		 */
  14.371 -		notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
  14.372 -	}
  14.373 -	return(ret);
  14.374 +        /*
  14.375 +         *	... and announce new interface.
  14.376 +         */
  14.377 +        notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
  14.378 +    }
  14.379 +    return(ret);
  14.380  }
  14.381  
  14.382  
  14.383 @@ -420,48 +414,48 @@ int dev_open(struct net_device *dev)
  14.384   
  14.385  int dev_close(struct net_device *dev)
  14.386  {
  14.387 -	if (!(dev->flags&IFF_UP))
  14.388 -		return 0;
  14.389 +    if (!(dev->flags&IFF_UP))
  14.390 +        return 0;
  14.391  
  14.392 -	/*
  14.393 -	 *	Tell people we are going down, so that they can
  14.394 -	 *	prepare to death, when device is still operating.
  14.395 -	 */
  14.396 -	notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
  14.397 +    /*
  14.398 +     *	Tell people we are going down, so that they can
  14.399 +     *	prepare to death, when device is still operating.
  14.400 +     */
  14.401 +    notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
  14.402  
  14.403 -	dev_deactivate(dev);
  14.404 +    dev_deactivate(dev);
  14.405  
  14.406 -	clear_bit(__LINK_STATE_START, &dev->state);
  14.407 +    clear_bit(__LINK_STATE_START, &dev->state);
  14.408  
  14.409 -	/*
  14.410 -	 *	Call the device specific close. This cannot fail.
  14.411 -	 *	Only if device is UP
  14.412 -	 *
  14.413 -	 *	We allow it to be called even after a DETACH hot-plug
  14.414 -	 *	event.
  14.415 -	 */
  14.416 +    /*
  14.417 +     *	Call the device specific close. This cannot fail.
  14.418 +     *	Only if device is UP
  14.419 +     *
  14.420 +     *	We allow it to be called even after a DETACH hot-plug
  14.421 +     *	event.
  14.422 +     */
  14.423  	 
  14.424 -	if (dev->stop)
  14.425 -		dev->stop(dev);
  14.426 +    if (dev->stop)
  14.427 +        dev->stop(dev);
  14.428  
  14.429 -	/*
  14.430 -	 *	Device is now down.
  14.431 -	 */
  14.432 +    /*
  14.433 +     *	Device is now down.
  14.434 +     */
  14.435  
  14.436 -	dev->flags &= ~IFF_UP;
  14.437 +    dev->flags &= ~IFF_UP;
  14.438  
  14.439 -	/*
  14.440 -	 *	Tell people we are down
  14.441 -	 */
  14.442 -	notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
  14.443 +    /*
  14.444 +     *	Tell people we are down
  14.445 +     */
  14.446 +    notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
  14.447  
  14.448 -	/*
  14.449 -	 * Drop the module refcount
  14.450 -	 */
  14.451 -	if (dev->owner)
  14.452 -		__MOD_DEC_USE_COUNT(dev->owner);
  14.453 +    /*
  14.454 +     * Drop the module refcount
  14.455 +     */
  14.456 +    if (dev->owner)
  14.457 +        __MOD_DEC_USE_COUNT(dev->owner);
  14.458  
  14.459 -	return(0);
  14.460 +    return(0);
  14.461  }
  14.462  
  14.463  
  14.464 @@ -474,113 +468,77 @@ int dev_close(struct net_device *dev)
  14.465  static inline int
  14.466  illegal_highdma(struct net_device *dev, struct sk_buff *skb)
  14.467  {
  14.468 -	int i;
  14.469 +    int i;
  14.470  
  14.471 -	if (dev->features&NETIF_F_HIGHDMA)
  14.472 -		return 0;
  14.473 +    if (dev->features&NETIF_F_HIGHDMA)
  14.474 +        return 0;
  14.475  
  14.476 -	for (i=0; i<skb_shinfo(skb)->nr_frags; i++)
  14.477 -		if (skb_shinfo(skb)->frags[i].page >= highmem_start_page)
  14.478 -			return 1;
  14.479 +    for (i=0; i<skb_shinfo(skb)->nr_frags; i++)
  14.480 +        if (skb_shinfo(skb)->frags[i].page >= highmem_start_page)
  14.481 +            return 1;
  14.482  
  14.483 -	return 0;
  14.484 +    return 0;
  14.485  }
  14.486  #else
  14.487  #define illegal_highdma(dev, skb)	(0)
  14.488  #endif
  14.489  
  14.490 -/**
  14.491 - *	dev_queue_xmit - transmit a buffer
  14.492 - *	@skb: buffer to transmit
  14.493 +/*
  14.494 + * dev_queue_xmit - transmit a buffer
  14.495 + * @skb: buffer to transmit
  14.496   *	
  14.497 - *	Queue a buffer for transmission to a network device. The caller must
  14.498 - *	have set the device and priority and built the buffer before calling this 
  14.499 - *	function. The function can be called from an interrupt.
  14.500 + * Queue a buffer for transmission to a network device. The caller must
  14.501 + * have set the device and priority and built the buffer before calling this 
  14.502 + * function. The function can be called from an interrupt.
  14.503   *
  14.504 - *	A negative errno code is returned on a failure. A success does not
  14.505 - *	guarantee the frame will be transmitted as it may be dropped due
  14.506 - *	to congestion or traffic shaping.
  14.507 + * A negative errno code is returned on a failure. A success does not
  14.508 + * guarantee the frame will be transmitted as it may be dropped due
  14.509 + * to congestion or traffic shaping.
  14.510   */
  14.511  
  14.512  int dev_queue_xmit(struct sk_buff *skb)
  14.513  {
  14.514 -	struct net_device *dev = skb->dev;
  14.515 -	struct Qdisc  *q;
  14.516 +    struct net_device *dev = skb->dev;
  14.517          
  14.518 -        if (!(dev->features&NETIF_F_SG)) 
  14.519 -        {
  14.520 -            printk("NIC doesn't do scatter-gather!\n");
  14.521 -            BUG();
  14.522 -        }
  14.523 +    if (!(dev->features&NETIF_F_SG)) 
  14.524 +    {
  14.525 +        printk("NIC doesn't do scatter-gather!\n");
  14.526 +        BUG();
  14.527 +    }
  14.528          
  14.529 -	if (skb_shinfo(skb)->frag_list &&
  14.530 -	    !(dev->features&NETIF_F_FRAGLIST) &&
  14.531 -	    skb_linearize(skb, GFP_ATOMIC) != 0) {
  14.532 -		kfree_skb(skb);
  14.533 -		return -ENOMEM;
  14.534 -	}
  14.535 +    if (skb_shinfo(skb)->frag_list &&
  14.536 +        !(dev->features&NETIF_F_FRAGLIST) &&
  14.537 +        skb_linearize(skb, GFP_ATOMIC) != 0) {
  14.538 +        kfree_skb(skb);
  14.539 +        return -ENOMEM;
  14.540 +    }
  14.541  
  14.542 -	/* Fragmented skb is linearized if device does not support SG,
  14.543 -	 * or if at least one of fragments is in highmem and device
  14.544 -	 * does not support DMA from it.
  14.545 -	 */
  14.546 -	if (skb_shinfo(skb)->nr_frags &&
  14.547 -	    (!(dev->features&NETIF_F_SG) || illegal_highdma(dev, skb)) &&
  14.548 -	    skb_linearize(skb, GFP_ATOMIC) != 0) {
  14.549 -		kfree_skb(skb);
  14.550 -		return -ENOMEM;
  14.551 -	}
  14.552 -
  14.553 -	/* Grab device queue */
  14.554 -	spin_lock_bh(&dev->queue_lock);
  14.555 -	q = dev->qdisc;
  14.556 -	if (q->enqueue) {
  14.557 -		int ret = q->enqueue(skb, q);
  14.558 -
  14.559 -		qdisc_run(dev);
  14.560 -
  14.561 -		spin_unlock_bh(&dev->queue_lock);
  14.562 -		return ret == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : ret;
  14.563 -	}
  14.564 +    spin_lock_bh(&dev->queue_lock);
  14.565 +    if (dev->flags&IFF_UP) {
  14.566 +        int cpu = smp_processor_id();
  14.567  
  14.568 -	/* The device has no queue. Common case for software devices:
  14.569 -	   loopback, all the sorts of tunnels...
  14.570 -
  14.571 -	   Really, it is unlikely that xmit_lock protection is necessary here.
  14.572 -	   (f.e. loopback and IP tunnels are clean ignoring statistics counters.)
  14.573 -	   However, it is possible, that they rely on protection
  14.574 -	   made by us here.
  14.575 -
  14.576 -	   Check this and shot the lock. It is not prone from deadlocks.
  14.577 -	   Either shot noqueue qdisc, it is even simpler 8)
  14.578 -	 */
  14.579 -	if (dev->flags&IFF_UP) {
  14.580 -		int cpu = smp_processor_id();
  14.581 -
  14.582 -		if (dev->xmit_lock_owner != cpu) {
  14.583 -			spin_unlock(&dev->queue_lock);
  14.584 -			spin_lock(&dev->xmit_lock);
  14.585 -			dev->xmit_lock_owner = cpu;
  14.586 +        if (dev->xmit_lock_owner != cpu) {
  14.587 +            spin_unlock(&dev->queue_lock);
  14.588 +            spin_lock(&dev->xmit_lock);
  14.589 +            dev->xmit_lock_owner = cpu;
  14.590  
  14.591 -			if (!netif_queue_stopped(dev)) {
  14.592 -				if (dev->hard_start_xmit(skb, dev) == 0) {
  14.593 -					dev->xmit_lock_owner = -1;
  14.594 -					spin_unlock_bh(&dev->xmit_lock);
  14.595 -					return 0;
  14.596 -				}
  14.597 -			}
  14.598 -			dev->xmit_lock_owner = -1;
  14.599 -			spin_unlock_bh(&dev->xmit_lock);
  14.600 -			kfree_skb(skb);
  14.601 -			return -ENETDOWN;
  14.602 -		} else {
  14.603 -			/* Recursion is detected! It is possible, unfortunately */
  14.604 -		}
  14.605 -	}
  14.606 -	spin_unlock_bh(&dev->queue_lock);
  14.607 +            if (!netif_queue_stopped(dev)) {
  14.608 +                if (dev->hard_start_xmit(skb, dev) == 0) {
  14.609 +                    dev->xmit_lock_owner = -1;
  14.610 +                    spin_unlock_bh(&dev->xmit_lock);
  14.611 +                    return 0;
  14.612 +                }
  14.613 +            }
  14.614 +            dev->xmit_lock_owner = -1;
  14.615 +            spin_unlock_bh(&dev->xmit_lock);
  14.616 +            kfree_skb(skb);
  14.617 +            return -ENETDOWN;
  14.618 +        }
  14.619 +    }
  14.620 +    spin_unlock_bh(&dev->queue_lock);
  14.621  
  14.622 -	kfree_skb(skb);
  14.623 -	return -ENETDOWN;
  14.624 +    kfree_skb(skb);
  14.625 +    return -ENETDOWN;
  14.626  }
  14.627  
  14.628  
  14.629 @@ -609,133 +567,114 @@ spinlock_t netdev_fc_lock = SPIN_LOCK_UN
  14.630  
  14.631  static struct
  14.632  {
  14.633 -	void (*stimul)(struct net_device *);
  14.634 -	struct net_device *dev;
  14.635 +    void (*stimul)(struct net_device *);
  14.636 +    struct net_device *dev;
  14.637  } netdev_fc_slots[BITS_PER_LONG];
  14.638  
  14.639 -int netdev_register_fc(struct net_device *dev, void (*stimul)(struct net_device *dev))
  14.640 +int netdev_register_fc(struct net_device *dev, 
  14.641 +                       void (*stimul)(struct net_device *dev))
  14.642  {
  14.643 -	int bit = 0;
  14.644 -	unsigned long flags;
  14.645 +    int bit = 0;
  14.646 +    unsigned long flags;
  14.647  
  14.648 -	spin_lock_irqsave(&netdev_fc_lock, flags);
  14.649 -	if (netdev_fc_mask != ~0UL) {
  14.650 -		bit = ffz(netdev_fc_mask);
  14.651 -		netdev_fc_slots[bit].stimul = stimul;
  14.652 -		netdev_fc_slots[bit].dev = dev;
  14.653 -		set_bit(bit, &netdev_fc_mask);
  14.654 -		clear_bit(bit, &netdev_fc_xoff);
  14.655 -	}
  14.656 -	spin_unlock_irqrestore(&netdev_fc_lock, flags);
  14.657 -	return bit;
  14.658 +    spin_lock_irqsave(&netdev_fc_lock, flags);
  14.659 +    if (netdev_fc_mask != ~0UL) {
  14.660 +        bit = ffz(netdev_fc_mask);
  14.661 +        netdev_fc_slots[bit].stimul = stimul;
  14.662 +        netdev_fc_slots[bit].dev = dev;
  14.663 +        set_bit(bit, &netdev_fc_mask);
  14.664 +        clear_bit(bit, &netdev_fc_xoff);
  14.665 +    }
  14.666 +    spin_unlock_irqrestore(&netdev_fc_lock, flags);
  14.667 +    return bit;
  14.668  }
  14.669  
  14.670  void netdev_unregister_fc(int bit)
  14.671  {
  14.672 -	unsigned long flags;
  14.673 +    unsigned long flags;
  14.674  
  14.675 -	spin_lock_irqsave(&netdev_fc_lock, flags);
  14.676 -	if (bit > 0) {
  14.677 -		netdev_fc_slots[bit].stimul = NULL;
  14.678 -		netdev_fc_slots[bit].dev = NULL;
  14.679 -		clear_bit(bit, &netdev_fc_mask);
  14.680 -		clear_bit(bit, &netdev_fc_xoff);
  14.681 -	}
  14.682 -	spin_unlock_irqrestore(&netdev_fc_lock, flags);
  14.683 +    spin_lock_irqsave(&netdev_fc_lock, flags);
  14.684 +    if (bit > 0) {
  14.685 +        netdev_fc_slots[bit].stimul = NULL;
  14.686 +        netdev_fc_slots[bit].dev = NULL;
  14.687 +        clear_bit(bit, &netdev_fc_mask);
  14.688 +        clear_bit(bit, &netdev_fc_xoff);
  14.689 +    }
  14.690 +    spin_unlock_irqrestore(&netdev_fc_lock, flags);
  14.691  }
  14.692  
  14.693  static void netdev_wakeup(void)
  14.694  {
  14.695 -	unsigned long xoff;
  14.696 +    unsigned long xoff;
  14.697  
  14.698 -	spin_lock(&netdev_fc_lock);
  14.699 -	xoff = netdev_fc_xoff;
  14.700 -	netdev_fc_xoff = 0;
  14.701 -	while (xoff) {
  14.702 -		int i = ffz(~xoff);
  14.703 -		xoff &= ~(1<<i);
  14.704 -		netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
  14.705 -	}
  14.706 -	spin_unlock(&netdev_fc_lock);
  14.707 +    spin_lock(&netdev_fc_lock);
  14.708 +    xoff = netdev_fc_xoff;
  14.709 +    netdev_fc_xoff = 0;
  14.710 +    while (xoff) {
  14.711 +        int i = ffz(~xoff);
  14.712 +        xoff &= ~(1<<i);
  14.713 +        netdev_fc_slots[i].stimul(netdev_fc_slots[i].dev);
  14.714 +    }
  14.715 +    spin_unlock(&netdev_fc_lock);
  14.716  }
  14.717  #endif
  14.718  
  14.719 -static void get_sample_stats(int cpu)
  14.720 -{
  14.721 -	int blog = softnet_data[cpu].input_pkt_queue.qlen;
  14.722 -	int avg_blog = softnet_data[cpu].avg_blog;
  14.723 -
  14.724 -	avg_blog = (avg_blog >> 1)+ (blog >> 1);
  14.725 -
  14.726 -	if (avg_blog > mod_cong) {
  14.727 -		/* Above moderate congestion levels. */
  14.728 -		softnet_data[cpu].cng_level = NET_RX_CN_HIGH;
  14.729 -	} else if (avg_blog > lo_cong) {
  14.730 -		softnet_data[cpu].cng_level = NET_RX_CN_MOD;
  14.731 -	} else if (avg_blog > no_cong) 
  14.732 -		softnet_data[cpu].cng_level = NET_RX_CN_LOW;
  14.733 -	else  /* no congestion */
  14.734 -		softnet_data[cpu].cng_level = NET_RX_SUCCESS;
  14.735 -
  14.736 -	softnet_data[cpu].avg_blog = avg_blog;
  14.737 -}
  14.738 -
  14.739  void deliver_packet(struct sk_buff *skb, net_vif_t *vif)
  14.740  {
  14.741 -        net_shadow_ring_t *shadow_ring;
  14.742 -        rx_shadow_entry_t *rx;
  14.743 -        unsigned long *g_pte; 
  14.744 -        struct pfn_info *g_pfn, *h_pfn;
  14.745 -        unsigned int i; 
  14.746 +    net_shadow_ring_t *shadow_ring;
  14.747 +    rx_shadow_entry_t *rx;
  14.748 +    unsigned long *g_pte; 
  14.749 +    struct pfn_info *g_pfn, *h_pfn;
  14.750 +    unsigned int i; 
  14.751  
  14.752 -        memset(skb->mac.ethernet->h_dest, 0, ETH_ALEN);
  14.753 -        if ( ntohs(skb->mac.ethernet->h_proto) == ETH_P_ARP )
  14.754 -        {
  14.755 -            memset(skb->nh.raw + 18, 0, ETH_ALEN);
  14.756 -        }
  14.757 -        shadow_ring = vif->shadow_ring;
  14.758 +    memset(skb->mac.ethernet->h_dest, 0, ETH_ALEN);
  14.759 +    if ( ntohs(skb->mac.ethernet->h_proto) == ETH_P_ARP )
  14.760 +    {
  14.761 +        memset(skb->nh.raw + 18, 0, ETH_ALEN);
  14.762 +    }
  14.763 +    shadow_ring = vif->shadow_ring;
  14.764  
  14.765 -        if ( (i = shadow_ring->rx_cons) == shadow_ring->rx_prod )
  14.766 -        {
  14.767 -            return;
  14.768 -        }
  14.769 +    if ( (i = shadow_ring->rx_cons) == shadow_ring->rx_prod )
  14.770 +    {
  14.771 +        return;
  14.772 +    }
  14.773  
  14.774 -        if ( shadow_ring->rx_ring[i].status != RING_STATUS_OK )
  14.775 -        {
  14.776 -            DPRINTK("Bad buffer in deliver_packet()\n");
  14.777 -            shadow_ring->rx_cons = RX_RING_INC(i);
  14.778 -            return;
  14.779 -        }
  14.780 +    if ( shadow_ring->rx_ring[i].status != RING_STATUS_OK )
  14.781 +    {
  14.782 +        DPRINTK("Bad buffer in deliver_packet()\n");
  14.783 +        shadow_ring->rx_cons = RX_RING_INC(i);
  14.784 +        return;
  14.785 +    }
  14.786  
  14.787 -        rx = shadow_ring->rx_ring + i;
  14.788 -        if ( (skb->len + ETH_HLEN) < rx->size )
  14.789 -            rx->size = skb->len + ETH_HLEN;
  14.790 +    rx = shadow_ring->rx_ring + i;
  14.791 +    if ( (skb->len + ETH_HLEN) < rx->size )
  14.792 +        rx->size = skb->len + ETH_HLEN;
  14.793              
  14.794 -        g_pte = map_domain_mem(rx->addr);
  14.795 +    g_pte = map_domain_mem(rx->addr);
  14.796  
  14.797 -        g_pfn = frame_table + (*g_pte >> PAGE_SHIFT);
  14.798 -        h_pfn = skb->pf;
  14.799 +    g_pfn = frame_table + (*g_pte >> PAGE_SHIFT);
  14.800 +    h_pfn = skb->pf;
  14.801          
  14.802 -        h_pfn->tot_count = h_pfn->type_count = 1;
  14.803 -        g_pfn->tot_count = g_pfn->type_count = 0;
  14.804 -        h_pfn->flags = g_pfn->flags & (~PG_type_mask);
  14.805 +    h_pfn->tot_count = h_pfn->type_count = 1;
  14.806 +    g_pfn->tot_count = g_pfn->type_count = 0;
  14.807 +    h_pfn->flags = g_pfn->flags & (~PG_type_mask);
  14.808          
  14.809 -        if (*g_pte & _PAGE_RW) h_pfn->flags |= PGT_writeable_page;
  14.810 -        g_pfn->flags = 0;
  14.811 +    if (*g_pte & _PAGE_RW) h_pfn->flags |= PGT_writeable_page;
  14.812 +    g_pfn->flags = 0;
  14.813          
  14.814 -        /* Point the guest at the new machine frame. */
  14.815 -        machine_to_phys_mapping[h_pfn - frame_table] 
  14.816 -            = machine_to_phys_mapping[g_pfn - frame_table];        
  14.817 -        *g_pte = (*g_pte & ~PAGE_MASK) 
  14.818 -            | (((h_pfn - frame_table) << PAGE_SHIFT) & PAGE_MASK);
  14.819 -        *g_pte |= _PAGE_PRESENT;
  14.820 +    /* Point the guest at the new machine frame. */
  14.821 +    machine_to_phys_mapping[h_pfn - frame_table] 
  14.822 +        = machine_to_phys_mapping[g_pfn - frame_table];        
  14.823 +    *g_pte = (*g_pte & ~PAGE_MASK) 
  14.824 +        | (((h_pfn - frame_table) << PAGE_SHIFT) & PAGE_MASK);
  14.825 +    *g_pte |= _PAGE_PRESENT;
  14.826          
  14.827 -        unmap_domain_mem(g_pte);
  14.828 +    unmap_domain_mem(g_pte);
  14.829  
  14.830 -        /* Our skbuff now points at the guest's old frame. */
  14.831 -        skb->pf = g_pfn;
  14.832 +    /* Our skbuff now points at the guest's old frame. */
  14.833 +    skb->pf = g_pfn;
  14.834          
  14.835 -        shadow_ring->rx_cons = RX_RING_INC(i);
  14.836 +    shadow_ring->rx_cons = RX_RING_INC(i);
  14.837  }
  14.838  
  14.839  /* Deliver skb to an old protocol, which is not threaded well
  14.840 @@ -763,211 +702,207 @@ void deliver_packet(struct sk_buff *skb,
  14.841  int netif_rx(struct sk_buff *skb)
  14.842  {
  14.843  #ifdef CONFIG_SMP
  14.844 -        unsigned long cpu_mask;
  14.845 +    unsigned long cpu_mask;
  14.846  #endif
  14.847          
  14.848 -        struct task_struct *p;
  14.849 -	int this_cpu = smp_processor_id();
  14.850 -	struct softnet_data *queue;
  14.851 -	unsigned long flags;
  14.852 -        net_vif_t *vif;
  14.853 -
  14.854 -	local_irq_save(flags);
  14.855 +    struct task_struct *p;
  14.856 +    int this_cpu = smp_processor_id();
  14.857 +    struct softnet_data *queue;
  14.858 +    unsigned long flags;
  14.859 +    net_vif_t *vif;
  14.860  
  14.861 -        if (skb->skb_type != SKB_ZERO_COPY) 
  14.862 -            BUG();
  14.863 -                
  14.864 -	if (skb->stamp.tv_sec == 0)
  14.865 -	    get_fast_time(&skb->stamp);
  14.866 +    local_irq_save(flags);
  14.867  
  14.868 -        if ( (skb->data - skb->head) != (18 + ETH_HLEN) )
  14.869 -            BUG();
  14.870 +    ASSERT(skb->skb_type == SKB_ZERO_COPY);
  14.871 +    ASSERT((skb->data - skb->head) == (18 + ETH_HLEN));
  14.872          
  14.873 -        skb->head = (u8 *)map_domain_mem(((skb->pf - frame_table) << PAGE_SHIFT));
  14.874 +    skb->head = (u8 *)map_domain_mem(((skb->pf - frame_table) << PAGE_SHIFT));
  14.875  
  14.876 -        /*
  14.877 -         * remapping this address really screws up all the skb pointers.  We
  14.878 -         * need to map them all here sufficiently to get the packet
  14.879 -         * demultiplexed. this remapping happens more than once in the code and
  14.880 -         * is grim.  It will be fixed in a later update -- drivers should be
  14.881 -         * able to align the packet arbitrarily.
  14.882 -         */
  14.883 +    /*
  14.884 +     * remapping this address really screws up all the skb pointers.  We
  14.885 +     * need to map them all here sufficiently to get the packet
  14.886 +     * demultiplexed. this remapping happens more than once in the code and
  14.887 +     * is grim.  It will be fixed in a later update -- drivers should be
  14.888 +     * able to align the packet arbitrarily.
  14.889 +     */
  14.890                  
  14.891 -        skb->data = skb->head;
  14.892 -        skb_reserve(skb,18); /* 18 is the 16 from dev_alloc_skb plus 2 for
  14.893 -                                IP header alignment. */
  14.894 -        skb->mac.raw = skb->data;
  14.895 -        skb->data += ETH_HLEN;
  14.896 -        skb->nh.raw = skb->data;
  14.897 +    skb->data = skb->head;
  14.898 +    skb_reserve(skb,18); /* 18 is the 16 from dev_alloc_skb plus 2 for
  14.899 +                            IP header alignment. */
  14.900 +    skb->mac.raw = skb->data;
  14.901 +    skb->data += ETH_HLEN;
  14.902 +    skb->nh.raw = skb->data;
  14.903 +        
  14.904 +    queue = &softnet_data[this_cpu];
  14.905          
  14.906 -	queue = &softnet_data[this_cpu];
  14.907 -        
  14.908 -	netdev_rx_stat[this_cpu].total++;
  14.909 +    netdev_rx_stat[this_cpu].total++;
  14.910  
  14.911 -        if ( skb->src_vif == VIF_UNKNOWN_INTERFACE )
  14.912 -            skb->src_vif = VIF_PHYSICAL_INTERFACE;
  14.913 +    if ( skb->src_vif == VIF_UNKNOWN_INTERFACE )
  14.914 +        skb->src_vif = VIF_PHYSICAL_INTERFACE;
  14.915                  
  14.916 -        if ( skb->dst_vif == VIF_UNKNOWN_INTERFACE )
  14.917 -            skb->dst_vif = __net_get_target_vif(skb->mac.raw, skb->len, skb->src_vif);
  14.918 +    if ( skb->dst_vif == VIF_UNKNOWN_INTERFACE )
  14.919 +        skb->dst_vif = __net_get_target_vif(skb->mac.raw, 
  14.920 +                                            skb->len, skb->src_vif);
  14.921          
  14.922 -        if ( (vif = sys_vif_list[skb->dst_vif]) == NULL )
  14.923 -            goto drop;
  14.924 +    if ( (vif = sys_vif_list[skb->dst_vif]) == NULL )
  14.925 +        goto drop;
  14.926  
  14.927 -        /*
  14.928 -         * This lock-and-walk of the task list isn't really necessary, and is
  14.929 -         * an artifact of the old code.  The vif contains a pointer to the skb
  14.930 -         * list we are going to queue the packet in, so the lock and the inner
  14.931 -         * loop could be removed. The argument against this is a possible race
  14.932 -         * in which a domain is killed as packets are being delivered to it.
  14.933 -         * This would result in the dest vif vanishing before we can deliver to
  14.934 -         * it.
  14.935 -         */
  14.936 +    /*
  14.937 +     * This lock-and-walk of the task list isn't really necessary, and is
  14.938 +     * an artifact of the old code.  The vif contains a pointer to the skb
  14.939 +     * list we are going to queue the packet in, so the lock and the inner
  14.940 +     * loop could be removed. The argument against this is a possible race
  14.941 +     * in which a domain is killed as packets are being delivered to it.
  14.942 +     * This would result in the dest vif vanishing before we can deliver to
  14.943 +     * it.
  14.944 +     */
  14.945          
  14.946 -        if ( skb->dst_vif >= VIF_PHYSICAL_INTERFACE )
  14.947 -        {
  14.948 -            read_lock(&tasklist_lock);
  14.949 -            p = &idle0_task;
  14.950 -            do {
  14.951 -                if ( p->domain != vif->domain ) continue;
  14.952 -                if ( vif->skb_list.qlen > 100 ) break;
  14.953 -                deliver_packet(skb, vif);
  14.954 -                cpu_mask = mark_hyp_event(p, _HYP_EVENT_NET_RX);
  14.955 -                read_unlock(&tasklist_lock);
  14.956 -                goto found;
  14.957 -            }
  14.958 -            while ( (p = p->next_task) != &idle0_task );
  14.959 -            read_unlock(&tasklist_lock); 
  14.960 -            goto drop;
  14.961 +    if ( skb->dst_vif >= VIF_PHYSICAL_INTERFACE )
  14.962 +    {
  14.963 +        read_lock(&tasklist_lock);
  14.964 +        p = &idle0_task;
  14.965 +        do {
  14.966 +            if ( p->domain != vif->domain ) continue;
  14.967 +            if ( vif->skb_list.qlen > 100 ) break;
  14.968 +            deliver_packet(skb, vif);
  14.969 +            cpu_mask = mark_hyp_event(p, _HYP_EVENT_NET_RX);
  14.970 +            read_unlock(&tasklist_lock);
  14.971 +            goto found;
  14.972          }
  14.973 -
  14.974 -drop:
  14.975 -	netdev_rx_stat[this_cpu].dropped++;
  14.976 -        unmap_domain_mem(skb->head);
  14.977 -	kfree_skb(skb);
  14.978 -        local_irq_restore(flags);
  14.979 -	return NET_RX_DROP;
  14.980 +        while ( (p = p->next_task) != &idle0_task );
  14.981 +        read_unlock(&tasklist_lock); 
  14.982 +        goto drop;
  14.983 +    }
  14.984  
  14.985 -found:
  14.986 -        unmap_domain_mem(skb->head);
  14.987 -        skb->head = skb->data = skb->tail = (void *)0xdeadbeef;
  14.988 -        kfree_skb(skb);
  14.989 -        hyp_event_notify(cpu_mask);
  14.990 -        local_irq_restore(flags);
  14.991 -        return 0;
  14.992 + drop:
  14.993 +    netdev_rx_stat[this_cpu].dropped++;
  14.994 +    unmap_domain_mem(skb->head);
  14.995 +    kfree_skb(skb);
  14.996 +    local_irq_restore(flags);
  14.997 +    return NET_RX_DROP;
  14.998 +
  14.999 + found:
 14.1000 +    unmap_domain_mem(skb->head);
 14.1001 +    skb->head = skb->data = skb->tail = (void *)0xdeadbeef;
 14.1002 +    kfree_skb(skb);
 14.1003 +    hyp_event_notify(cpu_mask);
 14.1004 +    local_irq_restore(flags);
 14.1005 +    return 0;
 14.1006  }
 14.1007  
 14.1008  
 14.1009 -static int deliver_to_old_ones(struct packet_type *pt, struct sk_buff *skb, int last)
 14.1010 +static int deliver_to_old_ones(struct packet_type *pt, 
 14.1011 +                               struct sk_buff *skb, int last)
 14.1012  {
 14.1013 -	static spinlock_t net_bh_lock = SPIN_LOCK_UNLOCKED;
 14.1014 -	int ret = NET_RX_DROP;
 14.1015 +    static spinlock_t net_bh_lock = SPIN_LOCK_UNLOCKED;
 14.1016 +    int ret = NET_RX_DROP;
 14.1017  
 14.1018  
 14.1019 -	if (!last) {
 14.1020 -		skb = skb_clone(skb, GFP_ATOMIC);
 14.1021 -		if (skb == NULL)
 14.1022 -			return ret;
 14.1023 -	}
 14.1024 -	if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC) != 0) {
 14.1025 -		kfree_skb(skb);
 14.1026 -		return ret;
 14.1027 -	}
 14.1028 +    if (!last) {
 14.1029 +        skb = skb_clone(skb, GFP_ATOMIC);
 14.1030 +        if (skb == NULL)
 14.1031 +            return ret;
 14.1032 +    }
 14.1033 +    if (skb_is_nonlinear(skb) && skb_linearize(skb, GFP_ATOMIC) != 0) {
 14.1034 +        kfree_skb(skb);
 14.1035 +        return ret;
 14.1036 +    }
 14.1037  
 14.1038 -	/* The assumption (correct one) is that old protocols
 14.1039 -	   did not depened on BHs different of NET_BH and TIMER_BH.
 14.1040 -	 */
 14.1041 +    /* The assumption (correct one) is that old protocols
 14.1042 +       did not depened on BHs different of NET_BH and TIMER_BH.
 14.1043 +    */
 14.1044  
 14.1045 -	/* Emulate NET_BH with special spinlock */
 14.1046 -	spin_lock(&net_bh_lock);
 14.1047 +    /* Emulate NET_BH with special spinlock */
 14.1048 +    spin_lock(&net_bh_lock);
 14.1049  
 14.1050 -	/* Disable timers and wait for all timers completion */
 14.1051 -	tasklet_disable(bh_task_vec+TIMER_BH);
 14.1052 +    /* Disable timers and wait for all timers completion */
 14.1053 +    tasklet_disable(bh_task_vec+TIMER_BH);
 14.1054  
 14.1055 -	ret = pt->func(skb, skb->dev, pt);
 14.1056 +    ret = pt->func(skb, skb->dev, pt);
 14.1057  
 14.1058 -	tasklet_hi_enable(bh_task_vec+TIMER_BH);
 14.1059 -	spin_unlock(&net_bh_lock);
 14.1060 -	return ret;
 14.1061 +    tasklet_hi_enable(bh_task_vec+TIMER_BH);
 14.1062 +    spin_unlock(&net_bh_lock);
 14.1063 +    return ret;
 14.1064  }
 14.1065  
 14.1066 -static void net_tx_action(struct softirq_action *h)
 14.1067 +static void net_tx_action(unsigned long unused)
 14.1068  {
 14.1069 -	int cpu = smp_processor_id();
 14.1070 +    int cpu = smp_processor_id();
 14.1071  
 14.1072 -	if (softnet_data[cpu].completion_queue) {
 14.1073 -		struct sk_buff *clist;
 14.1074 +    if (softnet_data[cpu].completion_queue) {
 14.1075 +        struct sk_buff *clist;
 14.1076  
 14.1077 -		local_irq_disable();
 14.1078 -		clist = softnet_data[cpu].completion_queue;
 14.1079 -		softnet_data[cpu].completion_queue = NULL;
 14.1080 -		local_irq_enable();
 14.1081 +        local_irq_disable();
 14.1082 +        clist = softnet_data[cpu].completion_queue;
 14.1083 +        softnet_data[cpu].completion_queue = NULL;
 14.1084 +        local_irq_enable();
 14.1085  
 14.1086 -		while (clist != NULL) {
 14.1087 -			struct sk_buff *skb = clist;
 14.1088 -			clist = clist->next;
 14.1089 +        while (clist != NULL) {
 14.1090 +            struct sk_buff *skb = clist;
 14.1091 +            clist = clist->next;
 14.1092  
 14.1093 -			BUG_TRAP(atomic_read(&skb->users) == 0);
 14.1094 -			__kfree_skb(skb);
 14.1095 -		}
 14.1096 -	}
 14.1097 +            BUG_TRAP(atomic_read(&skb->users) == 0);
 14.1098 +            __kfree_skb(skb);
 14.1099 +        }
 14.1100 +    }
 14.1101  
 14.1102 -	if (softnet_data[cpu].output_queue) {
 14.1103 -		struct net_device *head;
 14.1104 +    if (softnet_data[cpu].output_queue) {
 14.1105 +        struct net_device *head;
 14.1106  
 14.1107 -		local_irq_disable();
 14.1108 -		head = softnet_data[cpu].output_queue;
 14.1109 -		softnet_data[cpu].output_queue = NULL;
 14.1110 -		local_irq_enable();
 14.1111 +        local_irq_disable();
 14.1112 +        head = softnet_data[cpu].output_queue;
 14.1113 +        softnet_data[cpu].output_queue = NULL;
 14.1114 +        local_irq_enable();
 14.1115  
 14.1116 -		while (head != NULL) {
 14.1117 -			struct net_device *dev = head;
 14.1118 -			head = head->next_sched;
 14.1119 +        while (head != NULL) {
 14.1120 +            struct net_device *dev = head;
 14.1121 +            head = head->next_sched;
 14.1122  
 14.1123 -			smp_mb__before_clear_bit();
 14.1124 -			clear_bit(__LINK_STATE_SCHED, &dev->state);
 14.1125 +            smp_mb__before_clear_bit();
 14.1126 +            clear_bit(__LINK_STATE_SCHED, &dev->state);
 14.1127  
 14.1128 -			if (spin_trylock(&dev->queue_lock)) {
 14.1129 -				qdisc_run(dev);
 14.1130 -				spin_unlock(&dev->queue_lock);
 14.1131 -			} else {
 14.1132 -				netif_schedule(dev);
 14.1133 -			}
 14.1134 -		}
 14.1135 -	}
 14.1136 +            if (spin_trylock(&dev->queue_lock)) {
 14.1137 +				/*qdisc_run(dev); XXX KAF */
 14.1138 +                spin_unlock(&dev->queue_lock);
 14.1139 +            } else {
 14.1140 +                netif_schedule(dev);
 14.1141 +            }
 14.1142 +        }
 14.1143 +    }
 14.1144  }
 14.1145 -
 14.1146 +DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
 14.1147  
 14.1148  #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
 14.1149  void (*br_handle_frame_hook)(struct sk_buff *skb) = NULL;
 14.1150  #endif
 14.1151  
 14.1152  static __inline__ int handle_bridge(struct sk_buff *skb,
 14.1153 -				     struct packet_type *pt_prev)
 14.1154 +                                    struct packet_type *pt_prev)
 14.1155  {
 14.1156 -	int ret = NET_RX_DROP;
 14.1157 +    int ret = NET_RX_DROP;
 14.1158  
 14.1159 -	if (pt_prev) {
 14.1160 -		if (!pt_prev->data)
 14.1161 -			ret = deliver_to_old_ones(pt_prev, skb, 0);
 14.1162 -		else {
 14.1163 -			atomic_inc(&skb->users);
 14.1164 -			ret = pt_prev->func(skb, skb->dev, pt_prev);
 14.1165 -		}
 14.1166 -	}
 14.1167 +    if (pt_prev) {
 14.1168 +        if (!pt_prev->data)
 14.1169 +            ret = deliver_to_old_ones(pt_prev, skb, 0);
 14.1170 +        else {
 14.1171 +            atomic_inc(&skb->users);
 14.1172 +            ret = pt_prev->func(skb, skb->dev, pt_prev);
 14.1173 +        }
 14.1174 +    }
 14.1175  
 14.1176  #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
 14.1177 -	br_handle_frame_hook(skb);
 14.1178 +    br_handle_frame_hook(skb);
 14.1179  #endif
 14.1180 -	return ret;
 14.1181 +    return ret;
 14.1182  }
 14.1183  
 14.1184  
 14.1185  #ifdef CONFIG_NET_DIVERT
 14.1186  static inline void handle_diverter(struct sk_buff *skb)
 14.1187  {
 14.1188 -	/* if diversion is supported on device, then divert */
 14.1189 -	if (skb->dev->divert && skb->dev->divert->divert)
 14.1190 -		divert_frame(skb);
 14.1191 +    /* if diversion is supported on device, then divert */
 14.1192 +    if (skb->dev->divert && skb->dev->divert->divert)
 14.1193 +        divert_frame(skb);
 14.1194  }
 14.1195  #endif   /* CONFIG_NET_DIVERT */
 14.1196  
 14.1197 @@ -1031,29 +966,29 @@ void update_shared_ring(void)
 14.1198  
 14.1199  static int dev_ifname(struct ifreq *arg)
 14.1200  {
 14.1201 -	struct net_device *dev;
 14.1202 -	struct ifreq ifr;
 14.1203 +    struct net_device *dev;
 14.1204 +    struct ifreq ifr;
 14.1205  
 14.1206 -	/*
 14.1207 -	 *	Fetch the caller's info block. 
 14.1208 -	 */
 14.1209 +    /*
 14.1210 +     *	Fetch the caller's info block. 
 14.1211 +     */
 14.1212  	
 14.1213 -	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
 14.1214 -		return -EFAULT;
 14.1215 +    if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
 14.1216 +        return -EFAULT;
 14.1217  
 14.1218 -	read_lock(&dev_base_lock);
 14.1219 -	dev = __dev_get_by_index(ifr.ifr_ifindex);
 14.1220 -	if (!dev) {
 14.1221 -		read_unlock(&dev_base_lock);
 14.1222 -		return -ENODEV;
 14.1223 -	}
 14.1224 +    read_lock(&dev_base_lock);
 14.1225 +    dev = __dev_get_by_index(ifr.ifr_ifindex);
 14.1226 +    if (!dev) {
 14.1227 +        read_unlock(&dev_base_lock);
 14.1228 +        return -ENODEV;
 14.1229 +    }
 14.1230  
 14.1231 -	strcpy(ifr.ifr_name, dev->name);
 14.1232 -	read_unlock(&dev_base_lock);
 14.1233 +    strcpy(ifr.ifr_name, dev->name);
 14.1234 +    read_unlock(&dev_base_lock);
 14.1235  
 14.1236 -	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 14.1237 -		return -EFAULT;
 14.1238 -	return 0;
 14.1239 +    if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 14.1240 +        return -EFAULT;
 14.1241 +    return 0;
 14.1242  }
 14.1243  
 14.1244  
 14.1245 @@ -1071,28 +1006,28 @@ static int dev_ifname(struct ifreq *arg)
 14.1246   
 14.1247  int netdev_set_master(struct net_device *slave, struct net_device *master)
 14.1248  {
 14.1249 -	struct net_device *old = slave->master;
 14.1250 +    struct net_device *old = slave->master;
 14.1251  
 14.1252 -	if (master) {
 14.1253 -		if (old)
 14.1254 -			return -EBUSY;
 14.1255 -		dev_hold(master);
 14.1256 -	}
 14.1257 +    if (master) {
 14.1258 +        if (old)
 14.1259 +            return -EBUSY;
 14.1260 +        dev_hold(master);
 14.1261 +    }
 14.1262  
 14.1263 -	br_write_lock_bh(BR_NETPROTO_LOCK);
 14.1264 -	slave->master = master;
 14.1265 -	br_write_unlock_bh(BR_NETPROTO_LOCK);
 14.1266 +    br_write_lock_bh(BR_NETPROTO_LOCK);
 14.1267 +    slave->master = master;
 14.1268 +    br_write_unlock_bh(BR_NETPROTO_LOCK);
 14.1269  
 14.1270 -	if (old)
 14.1271 -		dev_put(old);
 14.1272 +    if (old)
 14.1273 +        dev_put(old);
 14.1274  
 14.1275 -	if (master)
 14.1276 -		slave->flags |= IFF_SLAVE;
 14.1277 -	else
 14.1278 -		slave->flags &= ~IFF_SLAVE;
 14.1279 +    if (master)
 14.1280 +        slave->flags |= IFF_SLAVE;
 14.1281 +    else
 14.1282 +        slave->flags &= ~IFF_SLAVE;
 14.1283  
 14.1284 -	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
 14.1285 -	return 0;
 14.1286 +    rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
 14.1287 +    return 0;
 14.1288  }
 14.1289  
 14.1290  /**
 14.1291 @@ -1108,23 +1043,23 @@ int netdev_set_master(struct net_device 
 14.1292   
 14.1293  void dev_set_promiscuity(struct net_device *dev, int inc)
 14.1294  {
 14.1295 -	unsigned short old_flags = dev->flags;
 14.1296 +    unsigned short old_flags = dev->flags;
 14.1297  
 14.1298 -	dev->flags |= IFF_PROMISC;
 14.1299 -	if ((dev->promiscuity += inc) == 0)
 14.1300 -		dev->flags &= ~IFF_PROMISC;
 14.1301 -	if (dev->flags^old_flags) {
 14.1302 +    dev->flags |= IFF_PROMISC;
 14.1303 +    if ((dev->promiscuity += inc) == 0)
 14.1304 +        dev->flags &= ~IFF_PROMISC;
 14.1305 +    if (dev->flags^old_flags) {
 14.1306  #ifdef CONFIG_NET_FASTROUTE
 14.1307 -		if (dev->flags&IFF_PROMISC) {
 14.1308 -			netdev_fastroute_obstacles++;
 14.1309 -			dev_clear_fastroute(dev);
 14.1310 -		} else
 14.1311 -			netdev_fastroute_obstacles--;
 14.1312 +        if (dev->flags&IFF_PROMISC) {
 14.1313 +            netdev_fastroute_obstacles++;
 14.1314 +            dev_clear_fastroute(dev);
 14.1315 +        } else
 14.1316 +            netdev_fastroute_obstacles--;
 14.1317  #endif
 14.1318 -		dev_mc_upload(dev);
 14.1319 -		printk(KERN_INFO "device %s %s promiscuous mode\n",
 14.1320 -		       dev->name, (dev->flags&IFF_PROMISC) ? "entered" : "left");
 14.1321 -	}
 14.1322 +        dev_mc_upload(dev);
 14.1323 +        printk(KERN_INFO "device %s %s promiscuous mode\n",
 14.1324 +               dev->name, (dev->flags&IFF_PROMISC) ? "entered" : "left");
 14.1325 +    }
 14.1326  }
 14.1327  
 14.1328  /**
 14.1329 @@ -1141,73 +1076,74 @@ void dev_set_promiscuity(struct net_devi
 14.1330  
 14.1331  void dev_set_allmulti(struct net_device *dev, int inc)
 14.1332  {
 14.1333 -	unsigned short old_flags = dev->flags;
 14.1334 +    unsigned short old_flags = dev->flags;
 14.1335  
 14.1336 -	dev->flags |= IFF_ALLMULTI;
 14.1337 -	if ((dev->allmulti += inc) == 0)
 14.1338 -		dev->flags &= ~IFF_ALLMULTI;
 14.1339 -	if (dev->flags^old_flags)
 14.1340 -		dev_mc_upload(dev);
 14.1341 +    dev->flags |= IFF_ALLMULTI;
 14.1342 +    if ((dev->allmulti += inc) == 0)
 14.1343 +        dev->flags &= ~IFF_ALLMULTI;
 14.1344 +    if (dev->flags^old_flags)
 14.1345 +        dev_mc_upload(dev);
 14.1346  }
 14.1347  
 14.1348  int dev_change_flags(struct net_device *dev, unsigned flags)
 14.1349  {
 14.1350 -	int ret;
 14.1351 -	int old_flags = dev->flags;
 14.1352 +    int ret;
 14.1353 +    int old_flags = dev->flags;
 14.1354  
 14.1355 -	/*
 14.1356 -	 *	Set the flags on our device.
 14.1357 -	 */
 14.1358 +    /*
 14.1359 +     *	Set the flags on our device.
 14.1360 +     */
 14.1361  
 14.1362 -	dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_NOARP|IFF_DYNAMIC|
 14.1363 -			       IFF_MULTICAST|IFF_PORTSEL|IFF_AUTOMEDIA)) |
 14.1364 -				       (dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC|IFF_ALLMULTI));
 14.1365 +    dev->flags = (flags & (IFF_DEBUG|IFF_NOTRAILERS|IFF_NOARP|IFF_DYNAMIC|
 14.1366 +                           IFF_MULTICAST|IFF_PORTSEL|IFF_AUTOMEDIA)) |
 14.1367 +        (dev->flags & (IFF_UP|IFF_VOLATILE|IFF_PROMISC|IFF_ALLMULTI));
 14.1368  
 14.1369 -	/*
 14.1370 -	 *	Load in the correct multicast list now the flags have changed.
 14.1371 -	 */				
 14.1372 +    /*
 14.1373 +     *	Load in the correct multicast list now the flags have changed.
 14.1374 +     */				
 14.1375  
 14.1376 -	dev_mc_upload(dev);
 14.1377 +    dev_mc_upload(dev);
 14.1378  
 14.1379 -	/*
 14.1380 -	 *	Have we downed the interface. We handle IFF_UP ourselves
 14.1381 -	 *	according to user attempts to set it, rather than blindly
 14.1382 -	 *	setting it.
 14.1383 -	 */
 14.1384 +    /*
 14.1385 +     *	Have we downed the interface. We handle IFF_UP ourselves
 14.1386 +     *	according to user attempts to set it, rather than blindly
 14.1387 +     *	setting it.
 14.1388 +     */
 14.1389  
 14.1390 -	ret = 0;
 14.1391 -	if ((old_flags^flags)&IFF_UP)	/* Bit is different  ? */
 14.1392 -	{
 14.1393 -		ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
 14.1394 +    ret = 0;
 14.1395 +    if ((old_flags^flags)&IFF_UP)	/* Bit is different  ? */
 14.1396 +    {
 14.1397 +        ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
 14.1398  
 14.1399 -		if (ret == 0) 
 14.1400 -			dev_mc_upload(dev);
 14.1401 -	}
 14.1402 +        if (ret == 0) 
 14.1403 +            dev_mc_upload(dev);
 14.1404 +    }
 14.1405  
 14.1406 -	if (dev->flags&IFF_UP &&
 14.1407 -	    ((old_flags^dev->flags)&~(IFF_UP|IFF_PROMISC|IFF_ALLMULTI|IFF_VOLATILE)))
 14.1408 -		notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
 14.1409 +    if (dev->flags&IFF_UP &&
 14.1410 +        ((old_flags^dev->flags)&
 14.1411 +         ~(IFF_UP|IFF_PROMISC|IFF_ALLMULTI|IFF_VOLATILE)))
 14.1412 +        notifier_call_chain(&netdev_chain, NETDEV_CHANGE, dev);
 14.1413  
 14.1414 -	if ((flags^dev->gflags)&IFF_PROMISC) {
 14.1415 -		int inc = (flags&IFF_PROMISC) ? +1 : -1;
 14.1416 -		dev->gflags ^= IFF_PROMISC;
 14.1417 -		dev_set_promiscuity(dev, inc);
 14.1418 -	}
 14.1419 +    if ((flags^dev->gflags)&IFF_PROMISC) {
 14.1420 +        int inc = (flags&IFF_PROMISC) ? +1 : -1;
 14.1421 +        dev->gflags ^= IFF_PROMISC;
 14.1422 +        dev_set_promiscuity(dev, inc);
 14.1423 +    }
 14.1424  
 14.1425 -	/* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
 14.1426 -	   is important. Some (broken) drivers set IFF_PROMISC, when
 14.1427 -	   IFF_ALLMULTI is requested not asking us and not reporting.
 14.1428 -	 */
 14.1429 -	if ((flags^dev->gflags)&IFF_ALLMULTI) {
 14.1430 -		int inc = (flags&IFF_ALLMULTI) ? +1 : -1;
 14.1431 -		dev->gflags ^= IFF_ALLMULTI;
 14.1432 -		dev_set_allmulti(dev, inc);
 14.1433 -	}
 14.1434 +    /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
 14.1435 +       is important. Some (broken) drivers set IFF_PROMISC, when
 14.1436 +       IFF_ALLMULTI is requested not asking us and not reporting.
 14.1437 +    */
 14.1438 +    if ((flags^dev->gflags)&IFF_ALLMULTI) {
 14.1439 +        int inc = (flags&IFF_ALLMULTI) ? +1 : -1;
 14.1440 +        dev->gflags ^= IFF_ALLMULTI;
 14.1441 +        dev_set_allmulti(dev, inc);
 14.1442 +    }
 14.1443  
 14.1444 -	if (old_flags^dev->flags)
 14.1445 -		rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags^dev->flags);
 14.1446 +    if (old_flags^dev->flags)
 14.1447 +        rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags^dev->flags);
 14.1448  
 14.1449 -	return ret;
 14.1450 +    return ret;
 14.1451  }
 14.1452  
 14.1453  /*
 14.1454 @@ -1216,190 +1152,180 @@ int dev_change_flags(struct net_device *
 14.1455   
 14.1456  static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
 14.1457  {
 14.1458 -	struct net_device *dev;
 14.1459 -	int err;
 14.1460 +    struct net_device *dev;
 14.1461 +    int err;
 14.1462  
 14.1463 -	if ((dev = __dev_get_by_name(ifr->ifr_name)) == NULL)
 14.1464 -		return -ENODEV;
 14.1465 +    if ((dev = __dev_get_by_name(ifr->ifr_name)) == NULL)
 14.1466 +        return -ENODEV;
 14.1467  
 14.1468 -	switch(cmd) 
 14.1469 -	{
 14.1470 -		case SIOCGIFFLAGS:	/* Get interface flags */
 14.1471 -			ifr->ifr_flags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI|IFF_RUNNING))
 14.1472 -				|(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI));
 14.1473 -			if (netif_running(dev) && netif_carrier_ok(dev))
 14.1474 -				ifr->ifr_flags |= IFF_RUNNING;
 14.1475 -			return 0;
 14.1476 +    switch(cmd) 
 14.1477 +    {
 14.1478 +    case SIOCGIFFLAGS:	/* Get interface flags */
 14.1479 +        ifr->ifr_flags = (dev->flags&~(IFF_PROMISC|IFF_ALLMULTI|IFF_RUNNING))
 14.1480 +            |(dev->gflags&(IFF_PROMISC|IFF_ALLMULTI));
 14.1481 +        if (netif_running(dev) && netif_carrier_ok(dev))
 14.1482 +            ifr->ifr_flags |= IFF_RUNNING;
 14.1483 +        return 0;
 14.1484  
 14.1485 -		case SIOCSIFFLAGS:	/* Set interface flags */
 14.1486 -			return dev_change_flags(dev, ifr->ifr_flags);
 14.1487 +    case SIOCSIFFLAGS:	/* Set interface flags */
 14.1488 +        return dev_change_flags(dev, ifr->ifr_flags);
 14.1489  		
 14.1490 -		case SIOCGIFMETRIC:	/* Get the metric on the interface (currently unused) */
 14.1491 -			ifr->ifr_metric = 0;
 14.1492 -			return 0;
 14.1493 +    case SIOCGIFMETRIC:	/* Get the metric on the interface */
 14.1494 +        ifr->ifr_metric = 0;
 14.1495 +        return 0;
 14.1496  			
 14.1497 -		case SIOCSIFMETRIC:	/* Set the metric on the interface (currently unused) */
 14.1498 -			return -EOPNOTSUPP;
 14.1499 +    case SIOCSIFMETRIC:	/* Set the metric on the interface */
 14.1500 +        return -EOPNOTSUPP;
 14.1501  	
 14.1502 -		case SIOCGIFMTU:	/* Get the MTU of a device */
 14.1503 -			ifr->ifr_mtu = dev->mtu;
 14.1504 -			return 0;
 14.1505 +    case SIOCGIFMTU:	/* Get the MTU of a device */
 14.1506 +        ifr->ifr_mtu = dev->mtu;
 14.1507 +        return 0;
 14.1508  	
 14.1509 -		case SIOCSIFMTU:	/* Set the MTU of a device */
 14.1510 -			if (ifr->ifr_mtu == dev->mtu)
 14.1511 -				return 0;
 14.1512 +    case SIOCSIFMTU:	/* Set the MTU of a device */
 14.1513 +        if (ifr->ifr_mtu == dev->mtu)
 14.1514 +            return 0;
 14.1515  
 14.1516 -			/*
 14.1517 -			 *	MTU must be positive.
 14.1518 -			 */
 14.1519 +        /*
 14.1520 +         *	MTU must be positive.
 14.1521 +         */
 14.1522  			 
 14.1523 -			if (ifr->ifr_mtu<0)
 14.1524 -				return -EINVAL;
 14.1525 +        if (ifr->ifr_mtu<0)
 14.1526 +            return -EINVAL;
 14.1527  
 14.1528 -			if (!netif_device_present(dev))
 14.1529 -				return -ENODEV;
 14.1530 +        if (!netif_device_present(dev))
 14.1531 +            return -ENODEV;
 14.1532  
 14.1533 -			if (dev->change_mtu)
 14.1534 -				err = dev->change_mtu(dev, ifr->ifr_mtu);
 14.1535 -			else {
 14.1536 -				dev->mtu = ifr->ifr_mtu;
 14.1537 -				err = 0;
 14.1538 -			}
 14.1539 -			if (!err && dev->flags&IFF_UP)
 14.1540 -				notifier_call_chain(&netdev_chain, NETDEV_CHANGEMTU, dev);
 14.1541 -			return err;
 14.1542 +        if (dev->change_mtu)
 14.1543 +            err = dev->change_mtu(dev, ifr->ifr_mtu);
 14.1544 +        else {
 14.1545 +            dev->mtu = ifr->ifr_mtu;
 14.1546 +            err = 0;
 14.1547 +        }
 14.1548 +        if (!err && dev->flags&IFF_UP)
 14.1549 +            notifier_call_chain(&netdev_chain, NETDEV_CHANGEMTU, dev);
 14.1550 +        return err;
 14.1551  
 14.1552 -		case SIOCGIFHWADDR:
 14.1553 -			memcpy(ifr->ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
 14.1554 -			ifr->ifr_hwaddr.sa_family=dev->type;
 14.1555 -			return 0;
 14.1556 +    case SIOCGIFHWADDR:
 14.1557 +        memcpy(ifr->ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
 14.1558 +        ifr->ifr_hwaddr.sa_family=dev->type;
 14.1559 +        return 0;
 14.1560  				
 14.1561 -		case SIOCSIFHWADDR:
 14.1562 -			if (dev->set_mac_address == NULL)
 14.1563 -				return -EOPNOTSUPP;
 14.1564 -			if (ifr->ifr_hwaddr.sa_family!=dev->type)
 14.1565 -				return -EINVAL;
 14.1566 -			if (!netif_device_present(dev))
 14.1567 -				return -ENODEV;
 14.1568 -			err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
 14.1569 -			if (!err)
 14.1570 -				notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
 14.1571 -			return err;
 14.1572 +    case SIOCSIFHWADDR:
 14.1573 +        if (dev->set_mac_address == NULL)
 14.1574 +            return -EOPNOTSUPP;
 14.1575 +        if (ifr->ifr_hwaddr.sa_family!=dev->type)
 14.1576 +            return -EINVAL;
 14.1577 +        if (!netif_device_present(dev))
 14.1578 +            return -ENODEV;
 14.1579 +        err = dev->set_mac_address(dev, &ifr->ifr_hwaddr);
 14.1580 +        if (!err)
 14.1581 +            notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
 14.1582 +        return err;
 14.1583  			
 14.1584 -		case SIOCSIFHWBROADCAST:
 14.1585 -			if (ifr->ifr_hwaddr.sa_family!=dev->type)
 14.1586 -				return -EINVAL;
 14.1587 -			memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, MAX_ADDR_LEN);
 14.1588 -			notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
 14.1589 -			return 0;
 14.1590 +    case SIOCSIFHWBROADCAST:
 14.1591 +        if (ifr->ifr_hwaddr.sa_family!=dev->type)
 14.1592 +            return -EINVAL;
 14.1593 +        memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, MAX_ADDR_LEN);
 14.1594 +        notifier_call_chain(&netdev_chain, NETDEV_CHANGEADDR, dev);
 14.1595 +        return 0;
 14.1596  
 14.1597 -		case SIOCGIFMAP:
 14.1598 -			ifr->ifr_map.mem_start=dev->mem_start;
 14.1599 -			ifr->ifr_map.mem_end=dev->mem_end;
 14.1600 -			ifr->ifr_map.base_addr=dev->base_addr;
 14.1601 -			ifr->ifr_map.irq=dev->irq;
 14.1602 -			ifr->ifr_map.dma=dev->dma;
 14.1603 -			ifr->ifr_map.port=dev->if_port;
 14.1604 -			return 0;
 14.1605 -			
 14.1606 -		case SIOCSIFMAP:
 14.1607 -			if (dev->set_config) {
 14.1608 -				if (!netif_device_present(dev))
 14.1609 -					return -ENODEV;
 14.1610 -				return dev->set_config(dev,&ifr->ifr_map);
 14.1611 -			}
 14.1612 -			return -EOPNOTSUPP;
 14.1613 +    case SIOCGIFMAP:
 14.1614 +        ifr->ifr_map.mem_start=dev->mem_start;
 14.1615 +        ifr->ifr_map.mem_end=dev->mem_end;
 14.1616 +        ifr->ifr_map.base_addr=dev->base_addr;
 14.1617 +        ifr->ifr_map.irq=dev->irq;
 14.1618 +        ifr->ifr_map.dma=dev->dma;
 14.1619 +        ifr->ifr_map.port=dev->if_port;
 14.1620 +        return 0;
 14.1621  			
 14.1622 -		case SIOCADDMULTI:
 14.1623 -			if (dev->set_multicast_list == NULL ||
 14.1624 -			    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
 14.1625 -				return -EINVAL;
 14.1626 -			if (!netif_device_present(dev))
 14.1627 -				return -ENODEV;
 14.1628 -			dev_mc_add(dev,ifr->ifr_hwaddr.sa_data, dev->addr_len, 1);
 14.1629 -			return 0;
 14.1630 -
 14.1631 -		case SIOCDELMULTI:
 14.1632 -			if (dev->set_multicast_list == NULL ||
 14.1633 -			    ifr->ifr_hwaddr.sa_family!=AF_UNSPEC)
 14.1634 -				return -EINVAL;
 14.1635 -			if (!netif_device_present(dev))
 14.1636 -				return -ENODEV;
 14.1637 -			dev_mc_delete(dev,ifr->ifr_hwaddr.sa_data,dev->addr_len, 1);
 14.1638 -			return 0;
 14.1639 +    case SIOCSIFMAP:
 14.1640 +        if (dev->set_config) {
 14.1641 +            if (!netif_device_present(dev))
 14.1642 +                return -ENODEV;
 14.1643 +            return dev->set_config(dev,&ifr->ifr_map);
 14.1644 +        }
 14.1645 +        return -EOPNOTSUPP;
 14.1646 +			
 14.1647 +    case SIOCADDMULTI:
 14.1648 +        if (dev->set_multicast_list == NULL ||
 14.1649 +            ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
 14.1650 +            return -EINVAL;
 14.1651 +        if (!netif_device_present(dev))
 14.1652 +            return -ENODEV;
 14.1653 +        dev_mc_add(dev,ifr->ifr_hwaddr.sa_data, dev->addr_len, 1);
 14.1654 +        return 0;
 14.1655  
 14.1656 -		case SIOCGIFINDEX:
 14.1657 -			ifr->ifr_ifindex = dev->ifindex;
 14.1658 -			return 0;
 14.1659 -
 14.1660 -		case SIOCGIFTXQLEN:
 14.1661 -			ifr->ifr_qlen = dev->tx_queue_len;
 14.1662 -			return 0;
 14.1663 +    case SIOCDELMULTI:
 14.1664 +        if (dev->set_multicast_list == NULL ||
 14.1665 +            ifr->ifr_hwaddr.sa_family!=AF_UNSPEC)
 14.1666 +            return -EINVAL;
 14.1667 +        if (!netif_device_present(dev))
 14.1668 +            return -ENODEV;
 14.1669 +        dev_mc_delete(dev,ifr->ifr_hwaddr.sa_data,dev->addr_len, 1);
 14.1670 +        return 0;
 14.1671  
 14.1672 -		case SIOCSIFTXQLEN:
 14.1673 -			if (ifr->ifr_qlen<0)
 14.1674 -				return -EINVAL;
 14.1675 -			dev->tx_queue_len = ifr->ifr_qlen;
 14.1676 -			return 0;
 14.1677 +    case SIOCGIFINDEX:
 14.1678 +        ifr->ifr_ifindex = dev->ifindex;
 14.1679 +        return 0;
 14.1680  
 14.1681 -		case SIOCSIFNAME:
 14.1682 -			if (dev->flags&IFF_UP)
 14.1683 -				return -EBUSY;
 14.1684 -			if (__dev_get_by_name(ifr->ifr_newname))
 14.1685 -				return -EEXIST;
 14.1686 -			memcpy(dev->name, ifr->ifr_newname, IFNAMSIZ);
 14.1687 -			dev->name[IFNAMSIZ-1] = 0;
 14.1688 -			notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
 14.1689 -			return 0;
 14.1690 +    case SIOCSIFNAME:
 14.1691 +        if (dev->flags&IFF_UP)
 14.1692 +            return -EBUSY;
 14.1693 +        if (__dev_get_by_name(ifr->ifr_newname))
 14.1694 +            return -EEXIST;
 14.1695 +        memcpy(dev->name, ifr->ifr_newname, IFNAMSIZ);
 14.1696 +        dev->name[IFNAMSIZ-1] = 0;
 14.1697 +        notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
 14.1698 +        return 0;
 14.1699  
 14.1700  #ifdef WIRELESS_EXT
 14.1701 -		case SIOCGIWSTATS:
 14.1702 -			return dev_iwstats(dev, ifr);
 14.1703 +    case SIOCGIWSTATS:
 14.1704 +        return dev_iwstats(dev, ifr);
 14.1705  #endif	/* WIRELESS_EXT */
 14.1706  
 14.1707 -		/*
 14.1708 -		 *	Unknown or private ioctl
 14.1709 -		 */
 14.1710 +        /*
 14.1711 +         *	Unknown or private ioctl
 14.1712 +         */
 14.1713  
 14.1714 -		default:
 14.1715 -			if ((cmd >= SIOCDEVPRIVATE &&
 14.1716 -			    cmd <= SIOCDEVPRIVATE + 15) ||
 14.1717 -			    cmd == SIOCBONDENSLAVE ||
 14.1718 -			    cmd == SIOCBONDRELEASE ||
 14.1719 -			    cmd == SIOCBONDSETHWADDR ||
 14.1720 -			    cmd == SIOCBONDSLAVEINFOQUERY ||
 14.1721 -			    cmd == SIOCBONDINFOQUERY ||
 14.1722 -			    cmd == SIOCBONDCHANGEACTIVE ||
 14.1723 -			    cmd == SIOCETHTOOL ||
 14.1724 -			    cmd == SIOCGMIIPHY ||
 14.1725 -			    cmd == SIOCGMIIREG ||
 14.1726 -			    cmd == SIOCSMIIREG) {
 14.1727 -				if (dev->do_ioctl) {
 14.1728 -					if (!netif_device_present(dev))
 14.1729 -						return -ENODEV;
 14.1730 -					return dev->do_ioctl(dev, ifr, cmd);
 14.1731 -				}
 14.1732 -				return -EOPNOTSUPP;
 14.1733 -			}
 14.1734 +    default:
 14.1735 +        if ((cmd >= SIOCDEVPRIVATE &&
 14.1736 +             cmd <= SIOCDEVPRIVATE + 15) ||
 14.1737 +            cmd == SIOCBONDENSLAVE ||
 14.1738 +            cmd == SIOCBONDRELEASE ||
 14.1739 +            cmd == SIOCBONDSETHWADDR ||
 14.1740 +            cmd == SIOCBONDSLAVEINFOQUERY ||
 14.1741 +            cmd == SIOCBONDINFOQUERY ||
 14.1742 +            cmd == SIOCBONDCHANGEACTIVE ||
 14.1743 +            cmd == SIOCETHTOOL ||
 14.1744 +            cmd == SIOCGMIIPHY ||
 14.1745 +            cmd == SIOCGMIIREG ||
 14.1746 +            cmd == SIOCSMIIREG) {
 14.1747 +            if (dev->do_ioctl) {
 14.1748 +                if (!netif_device_present(dev))
 14.1749 +                    return -ENODEV;
 14.1750 +                return dev->do_ioctl(dev, ifr, cmd);
 14.1751 +            }
 14.1752 +            return -EOPNOTSUPP;
 14.1753 +        }
 14.1754  
 14.1755  #ifdef WIRELESS_EXT
 14.1756 -			if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
 14.1757 -				if (dev->do_ioctl) {
 14.1758 -					if (!netif_device_present(dev))
 14.1759 -						return -ENODEV;
 14.1760 -					return dev->do_ioctl(dev, ifr, cmd);
 14.1761 -				}
 14.1762 -				return -EOPNOTSUPP;
 14.1763 -			}
 14.1764 +        if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
 14.1765 +            if (dev->do_ioctl) {
 14.1766 +                if (!netif_device_present(dev))
 14.1767 +                    return -ENODEV;
 14.1768 +                return dev->do_ioctl(dev, ifr, cmd);
 14.1769 +            }
 14.1770 +            return -EOPNOTSUPP;
 14.1771 +        }
 14.1772  #endif	/* WIRELESS_EXT */
 14.1773  
 14.1774 -	}
 14.1775 -	return -EINVAL;
 14.1776 +    }
 14.1777 +    return -EINVAL;
 14.1778  }
 14.1779  
 14.1780  /*
 14.1781 - *	This function handles all "interface"-type I/O control requests. The actual
 14.1782 - *	'doing' part of this is dev_ifsioc above.
 14.1783 + * This function handles all "interface"-type I/O control requests. The actual
 14.1784 + * 'doing' part of this is dev_ifsioc above.
 14.1785   */
 14.1786  
 14.1787  /**
 14.1788 @@ -1415,172 +1341,171 @@ static int dev_ifsioc(struct ifreq *ifr,
 14.1789  
 14.1790  int dev_ioctl(unsigned int cmd, void *arg)
 14.1791  {
 14.1792 -	struct ifreq ifr;
 14.1793 -	int ret;
 14.1794 -	char *colon;
 14.1795 +    struct ifreq ifr;
 14.1796 +    int ret;
 14.1797 +    char *colon;
 14.1798  
 14.1799 -	/* One special case: SIOCGIFCONF takes ifconf argument
 14.1800 -	   and requires shared lock, because it sleeps writing
 14.1801 -	   to user space.
 14.1802 -	 */
 14.1803 +    /* One special case: SIOCGIFCONF takes ifconf argument
 14.1804 +       and requires shared lock, because it sleeps writing
 14.1805 +       to user space.
 14.1806 +    */
 14.1807  	   
 14.1808 -	if (cmd == SIOCGIFCONF) {
 14.1809 -            return -ENOSYS;
 14.1810 -	}
 14.1811 -	if (cmd == SIOCGIFNAME) {
 14.1812 -		return dev_ifname((struct ifreq *)arg);
 14.1813 -	}
 14.1814 +    if (cmd == SIOCGIFCONF) {
 14.1815 +        return -ENOSYS;
 14.1816 +    }
 14.1817 +    if (cmd == SIOCGIFNAME) {
 14.1818 +        return dev_ifname((struct ifreq *)arg);
 14.1819 +    }
 14.1820  
 14.1821 -	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
 14.1822 -		return -EFAULT;
 14.1823 +    if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
 14.1824 +        return -EFAULT;
 14.1825  
 14.1826 -	ifr.ifr_name[IFNAMSIZ-1] = 0;
 14.1827 +    ifr.ifr_name[IFNAMSIZ-1] = 0;
 14.1828  
 14.1829 -	colon = strchr(ifr.ifr_name, ':');
 14.1830 -	if (colon)
 14.1831 -		*colon = 0;
 14.1832 +    colon = strchr(ifr.ifr_name, ':');
 14.1833 +    if (colon)
 14.1834 +        *colon = 0;
 14.1835  
 14.1836 -	/*
 14.1837 -	 *	See which interface the caller is talking about. 
 14.1838 -	 */
 14.1839 +    /*
 14.1840 +     *	See which interface the caller is talking about. 
 14.1841 +     */
 14.1842  	 
 14.1843 -	switch(cmd) 
 14.1844 -	{
 14.1845 -		/*
 14.1846 -		 *	These ioctl calls:
 14.1847 -		 *	- can be done by all.
 14.1848 -		 *	- atomic and do not require locking.
 14.1849 -		 *	- return a value
 14.1850 -		 */
 14.1851 +    switch(cmd) 
 14.1852 +    {
 14.1853 +        /*
 14.1854 +         *	These ioctl calls:
 14.1855 +         *	- can be done by all.
 14.1856 +         *	- atomic and do not require locking.
 14.1857 +         *	- return a value
 14.1858 +         */
 14.1859  		 
 14.1860 -		case SIOCGIFFLAGS:
 14.1861 -		case SIOCGIFMETRIC:
 14.1862 -		case SIOCGIFMTU:
 14.1863 -		case SIOCGIFHWADDR:
 14.1864 -		case SIOCGIFSLAVE:
 14.1865 -		case SIOCGIFMAP:
 14.1866 -		case SIOCGIFINDEX:
 14.1867 -		case SIOCGIFTXQLEN:
 14.1868 -			dev_load(ifr.ifr_name);
 14.1869 -			read_lock(&dev_base_lock);
 14.1870 -			ret = dev_ifsioc(&ifr, cmd);
 14.1871 -			read_unlock(&dev_base_lock);
 14.1872 -			if (!ret) {
 14.1873 -				if (colon)
 14.1874 -					*colon = ':';
 14.1875 -				if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 14.1876 -					return -EFAULT;
 14.1877 -			}
 14.1878 -			return ret;
 14.1879 +    case SIOCGIFFLAGS:
 14.1880 +    case SIOCGIFMETRIC:
 14.1881 +    case SIOCGIFMTU:
 14.1882 +    case SIOCGIFHWADDR:
 14.1883 +    case SIOCGIFSLAVE:
 14.1884 +    case SIOCGIFMAP:
 14.1885 +    case SIOCGIFINDEX:
 14.1886 +        dev_load(ifr.ifr_name);
 14.1887 +        read_lock(&dev_base_lock);
 14.1888 +        ret = dev_ifsioc(&ifr, cmd);
 14.1889 +        read_unlock(&dev_base_lock);
 14.1890 +        if (!ret) {
 14.1891 +            if (colon)
 14.1892 +                *colon = ':';
 14.1893 +            if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 14.1894 +                return -EFAULT;
 14.1895 +        }
 14.1896 +        return ret;
 14.1897  
 14.1898 -		/*
 14.1899 -		 *	These ioctl calls:
 14.1900 -		 *	- require superuser power.
 14.1901 -		 *	- require strict serialization.
 14.1902 -		 *	- return a value
 14.1903 -		 */
 14.1904 +        /*
 14.1905 +         *	These ioctl calls:
 14.1906 +         *	- require superuser power.
 14.1907 +         *	- require strict serialization.
 14.1908 +         *	- return a value
 14.1909 +         */
 14.1910  		 
 14.1911 -		case SIOCETHTOOL:
 14.1912 -		case SIOCGMIIPHY:
 14.1913 -		case SIOCGMIIREG:
 14.1914 -			if (!capable(CAP_NET_ADMIN))
 14.1915 -				return -EPERM;
 14.1916 -			dev_load(ifr.ifr_name);
 14.1917 -			dev_probe_lock();
 14.1918 -			rtnl_lock();
 14.1919 -			ret = dev_ifsioc(&ifr, cmd);
 14.1920 -			rtnl_unlock();
 14.1921 -			dev_probe_unlock();
 14.1922 -			if (!ret) {
 14.1923 -				if (colon)
 14.1924 -					*colon = ':';
 14.1925 -				if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 14.1926 -					return -EFAULT;
 14.1927 -			}
 14.1928 -			return ret;
 14.1929 +    case SIOCETHTOOL:
 14.1930 +    case SIOCGMIIPHY:
 14.1931 +    case SIOCGMIIREG:
 14.1932 +        if (!capable(CAP_NET_ADMIN))
 14.1933 +            return -EPERM;
 14.1934 +        dev_load(ifr.ifr_name);
 14.1935 +        dev_probe_lock();
 14.1936 +        rtnl_lock();
 14.1937 +        ret = dev_ifsioc(&ifr, cmd);
 14.1938 +        rtnl_unlock();
 14.1939 +        dev_probe_unlock();
 14.1940 +        if (!ret) {
 14.1941 +            if (colon)
 14.1942 +                *colon = ':';
 14.1943 +            if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 14.1944 +                return -EFAULT;
 14.1945 +        }
 14.1946 +        return ret;
 14.1947  
 14.1948 -		/*
 14.1949 -		 *	These ioctl calls:
 14.1950 -		 *	- require superuser power.
 14.1951 -		 *	- require strict serialization.
 14.1952 -		 *	- do not return a value
 14.1953 -		 */
 14.1954 +        /*
 14.1955 +         *	These ioctl calls:
 14.1956 +         *	- require superuser power.
 14.1957 +         *	- require strict serialization.
 14.1958 +         *	- do not return a value
 14.1959 +         */
 14.1960  		 
 14.1961 -		case SIOCSIFFLAGS:
 14.1962 -		case SIOCSIFMETRIC:
 14.1963 -		case SIOCSIFMTU:
 14.1964 -		case SIOCSIFMAP:
 14.1965 -		case SIOCSIFHWADDR:
 14.1966 -		case SIOCSIFSLAVE:
 14.1967 -		case SIOCADDMULTI:
 14.1968 -		case SIOCDELMULTI:
 14.1969 -		case SIOCSIFHWBROADCAST:
 14.1970 -		case SIOCSIFTXQLEN:
 14.1971 -		case SIOCSIFNAME:
 14.1972 -		case SIOCSMIIREG:
 14.1973 -		case SIOCBONDENSLAVE:
 14.1974 -		case SIOCBONDRELEASE:
 14.1975 -		case SIOCBONDSETHWADDR:
 14.1976 -		case SIOCBONDSLAVEINFOQUERY:
 14.1977 -		case SIOCBONDINFOQUERY:
 14.1978 -		case SIOCBONDCHANGEACTIVE:
 14.1979 -			if (!capable(CAP_NET_ADMIN))
 14.1980 -				return -EPERM;
 14.1981 -			dev_load(ifr.ifr_name);
 14.1982 -			dev_probe_lock();
 14.1983 -			rtnl_lock();
 14.1984 -			ret = dev_ifsioc(&ifr, cmd);
 14.1985 -			rtnl_unlock();
 14.1986 -			dev_probe_unlock();
 14.1987 -			return ret;
 14.1988 +    case SIOCSIFFLAGS:
 14.1989 +    case SIOCSIFMETRIC:
 14.1990 +    case SIOCSIFMTU:
 14.1991 +    case SIOCSIFMAP:
 14.1992 +    case SIOCSIFHWADDR:
 14.1993 +    case SIOCSIFSLAVE:
 14.1994 +    case SIOCADDMULTI:
 14.1995 +    case SIOCDELMULTI:
 14.1996 +    case SIOCSIFHWBROADCAST:
 14.1997 +    case SIOCSIFNAME:
 14.1998 +    case SIOCSMIIREG:
 14.1999 +    case SIOCBONDENSLAVE:
 14.2000 +    case SIOCBONDRELEASE:
 14.2001 +    case SIOCBONDSETHWADDR:
 14.2002 +    case SIOCBONDSLAVEINFOQUERY:
 14.2003 +    case SIOCBONDINFOQUERY:
 14.2004 +    case SIOCBONDCHANGEACTIVE:
 14.2005 +        if (!capable(CAP_NET_ADMIN))
 14.2006 +            return -EPERM;
 14.2007 +        dev_load(ifr.ifr_name);
 14.2008 +        dev_probe_lock();
 14.2009 +        rtnl_lock();
 14.2010 +        ret = dev_ifsioc(&ifr, cmd);
 14.2011 +        rtnl_unlock();
 14.2012 +        dev_probe_unlock();
 14.2013 +        return ret;
 14.2014  	
 14.2015 -		case SIOCGIFMEM:
 14.2016 -			/* Get the per device memory space. We can add this but currently
 14.2017 -			   do not support it */
 14.2018 -		case SIOCSIFMEM:
 14.2019 -			/* Set the per device memory buffer space. Not applicable in our case */
 14.2020 -		case SIOCSIFLINK:
 14.2021 -			return -EINVAL;
 14.2022 +    case SIOCGIFMEM:
 14.2023 +        /* Get the per device memory space. We can add this but currently
 14.2024 +           do not support it */
 14.2025 +    case SIOCSIFMEM:
 14.2026 +        /* Set the per device memory buffer space. */
 14.2027 +    case SIOCSIFLINK:
 14.2028 +        return -EINVAL;
 14.2029  
 14.2030 -		/*
 14.2031 -		 *	Unknown or private ioctl.
 14.2032 -		 */	
 14.2033 +        /*
 14.2034 +         *	Unknown or private ioctl.
 14.2035 +         */	
 14.2036  		 
 14.2037 -		default:
 14.2038 -			if (cmd >= SIOCDEVPRIVATE &&
 14.2039 -			    cmd <= SIOCDEVPRIVATE + 15) {
 14.2040 -				dev_load(ifr.ifr_name);
 14.2041 -				dev_probe_lock();
 14.2042 -				rtnl_lock();
 14.2043 -				ret = dev_ifsioc(&ifr, cmd);
 14.2044 -				rtnl_unlock();
 14.2045 -				dev_probe_unlock();
 14.2046 -				if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 14.2047 -					return -EFAULT;
 14.2048 -				return ret;
 14.2049 -			}
 14.2050 +    default:
 14.2051 +        if (cmd >= SIOCDEVPRIVATE &&
 14.2052 +            cmd <= SIOCDEVPRIVATE + 15) {
 14.2053 +            dev_load(ifr.ifr_name);
 14.2054 +            dev_probe_lock();
 14.2055 +            rtnl_lock();
 14.2056 +            ret = dev_ifsioc(&ifr, cmd);
 14.2057 +            rtnl_unlock();
 14.2058 +            dev_probe_unlock();
 14.2059 +            if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 14.2060 +                return -EFAULT;
 14.2061 +            return ret;
 14.2062 +        }
 14.2063  #ifdef WIRELESS_EXT
 14.2064 -			/* Take care of Wireless Extensions */
 14.2065 -			if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
 14.2066 +        /* Take care of Wireless Extensions */
 14.2067 +        if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
 14.2068  				/* If command is `set a parameter', or
 14.2069  				 * `get the encoding parameters', check if
 14.2070  				 * the user has the right to do it */
 14.2071 -				if (IW_IS_SET(cmd) || (cmd == SIOCGIWENCODE)) {
 14.2072 -					if(!capable(CAP_NET_ADMIN))
 14.2073 -						return -EPERM;
 14.2074 -				}
 14.2075 -				dev_load(ifr.ifr_name);
 14.2076 -				rtnl_lock();
 14.2077 -				ret = dev_ifsioc(&ifr, cmd);
 14.2078 -				rtnl_unlock();
 14.2079 -				if (!ret && IW_IS_GET(cmd) &&
 14.2080 -				    copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 14.2081 -					return -EFAULT;
 14.2082 -				return ret;
 14.2083 -			}
 14.2084 +            if (IW_IS_SET(cmd) || (cmd == SIOCGIWENCODE)) {
 14.2085 +                if(!capable(CAP_NET_ADMIN))
 14.2086 +                    return -EPERM;
 14.2087 +            }
 14.2088 +            dev_load(ifr.ifr_name);
 14.2089 +            rtnl_lock();
 14.2090 +            ret = dev_ifsioc(&ifr, cmd);
 14.2091 +            rtnl_unlock();
 14.2092 +            if (!ret && IW_IS_GET(cmd) &&
 14.2093 +                copy_to_user(arg, &ifr, 
 14.2094 +                             sizeof(struct ifreq)))
 14.2095 +                return -EFAULT;
 14.2096 +            return ret;
 14.2097 +        }
 14.2098  #endif	/* WIRELESS_EXT */
 14.2099 -			return -EINVAL;
 14.2100 -	}
 14.2101 +        return -EINVAL;
 14.2102 +    }
 14.2103  }
 14.2104  
 14.2105  
 14.2106 @@ -1594,13 +1519,13 @@ int dev_ioctl(unsigned int cmd, void *ar
 14.2107   
 14.2108  int dev_new_index(void)
 14.2109  {
 14.2110 -	static int ifindex;
 14.2111 -	for (;;) {
 14.2112 -		if (++ifindex <= 0)
 14.2113 -			ifindex=1;
 14.2114 -		if (__dev_get_by_index(ifindex) == NULL)
 14.2115 -			return ifindex;
 14.2116 -	}
 14.2117 +    static int ifindex;
 14.2118 +    for (;;) {
 14.2119 +        if (++ifindex <= 0)
 14.2120 +            ifindex=1;
 14.2121 +        if (__dev_get_by_index(ifindex) == NULL)
 14.2122 +            return ifindex;
 14.2123 +    }
 14.2124  }
 14.2125  
 14.2126  static int dev_boot_phase = 1;
 14.2127 @@ -1627,77 +1552,77 @@ int net_dev_init(void);
 14.2128  
 14.2129  int register_netdevice(struct net_device *dev)
 14.2130  {
 14.2131 -	struct net_device *d, **dp;
 14.2132 +    struct net_device *d, **dp;
 14.2133  #ifdef CONFIG_NET_DIVERT
 14.2134 -	int ret;
 14.2135 +    int ret;
 14.2136  #endif
 14.2137  
 14.2138 -	spin_lock_init(&dev->queue_lock);
 14.2139 -	spin_lock_init(&dev->xmit_lock);
 14.2140 -	dev->xmit_lock_owner = -1;
 14.2141 +    spin_lock_init(&dev->queue_lock);
 14.2142 +    spin_lock_init(&dev->xmit_lock);
 14.2143 +    dev->xmit_lock_owner = -1;
 14.2144  #ifdef CONFIG_NET_FASTROUTE
 14.2145 -	dev->fastpath_lock=RW_LOCK_UNLOCKED;
 14.2146 +    dev->fastpath_lock=RW_LOCK_UNLOCKED;
 14.2147  #endif
 14.2148  
 14.2149 -	if (dev_boot_phase)
 14.2150 -		net_dev_init();
 14.2151 +    if (dev_boot_phase)
 14.2152 +        net_dev_init();
 14.2153  
 14.2154  #ifdef CONFIG_NET_DIVERT
 14.2155 -	ret = alloc_divert_blk(dev);
 14.2156 -	if (ret)
 14.2157 -		return ret;
 14.2158 +    ret = alloc_divert_blk(dev);
 14.2159 +    if (ret)
 14.2160 +        return ret;
 14.2161  #endif /* CONFIG_NET_DIVERT */
 14.2162  	
 14.2163 -	dev->iflink = -1;
 14.2164 +    dev->iflink = -1;
 14.2165  
 14.2166 -	/* Init, if this function is available */
 14.2167 -	if (dev->init && dev->init(dev) != 0) {
 14.2168 +    /* Init, if this function is available */
 14.2169 +    if (dev->init && dev->init(dev) != 0) {
 14.2170  #ifdef CONFIG_NET_DIVERT
 14.2171 -		free_divert_blk(dev);
 14.2172 +        free_divert_blk(dev);
 14.2173  #endif
 14.2174 -		return -EIO;
 14.2175 -	}
 14.2176 +        return -EIO;
 14.2177 +    }
 14.2178  
 14.2179 -	dev->ifindex = dev_new_index();
 14.2180 -	if (dev->iflink == -1)
 14.2181 -		dev->iflink = dev->ifindex;
 14.2182 +    dev->ifindex = dev_new_index();
 14.2183 +    if (dev->iflink == -1)
 14.2184 +        dev->iflink = dev->ifindex;
 14.2185  
 14.2186 -	/* Check for existence, and append to tail of chain */
 14.2187 -	for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
 14.2188 -		if (d == dev || strcmp(d->name, dev->name) == 0) {
 14.2189 +    /* Check for existence, and append to tail of chain */
 14.2190 +    for (dp=&dev_base; (d=*dp) != NULL; dp=&d->next) {
 14.2191 +        if (d == dev || strcmp(d->name, dev->name) == 0) {
 14.2192  #ifdef CONFIG_NET_DIVERT
 14.2193 -			free_divert_blk(dev);
 14.2194 +            free_divert_blk(dev);
 14.2195  #endif
 14.2196 -			return -EEXIST;
 14.2197 -		}
 14.2198 -	}
 14.2199 -	/*
 14.2200 -	 *	nil rebuild_header routine,
 14.2201 -	 *	that should be never called and used as just bug trap.
 14.2202 -	 */
 14.2203 +            return -EEXIST;
 14.2204 +        }
 14.2205 +    }
 14.2206 +    /*
 14.2207 +     *	nil rebuild_header routine,
 14.2208 +     *	that should be never called and used as just bug trap.
 14.2209 +     */
 14.2210  
 14.2211 -	if (dev->rebuild_header == NULL)
 14.2212 -		dev->rebuild_header = default_rebuild_header;
 14.2213 +    if (dev->rebuild_header == NULL)
 14.2214 +        dev->rebuild_header = default_rebuild_header;
 14.2215  
 14.2216 -	/*
 14.2217 -	 *	Default initial state at registry is that the
 14.2218 -	 *	device is present.
 14.2219 -	 */
 14.2220 +    /*
 14.2221 +     *	Default initial state at registry is that the
 14.2222 +     *	device is present.
 14.2223 +     */
 14.2224  
 14.2225 -	set_bit(__LINK_STATE_PRESENT, &dev->state);
 14.2226 +    set_bit(__LINK_STATE_PRESENT, &dev->state);
 14.2227  
 14.2228 -	dev->next = NULL;
 14.2229 -	dev_init_scheduler(dev);
 14.2230 -	write_lock_bh(&dev_base_lock);
 14.2231 -	*dp = dev;
 14.2232 -	dev_hold(dev);
 14.2233 -	dev->deadbeaf = 0;
 14.2234 -	write_unlock_bh(&dev_base_lock);
 14.2235 +    dev->next = NULL;
 14.2236 +    dev_init_scheduler(dev);
 14.2237 +    write_lock_bh(&dev_base_lock);
 14.2238 +    *dp = dev;
 14.2239 +    dev_hold(dev);
 14.2240 +    dev->deadbeaf = 0;
 14.2241 +    write_unlock_bh(&dev_base_lock);
 14.2242  
 14.2243 -	/* Notify protocols, that a new device appeared. */
 14.2244 -	notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
 14.2245 +    /* Notify protocols, that a new device appeared. */
 14.2246 +    notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
 14.2247  
 14.2248 -	return 0;
 14.2249 +    return 0;
 14.2250  }
 14.2251  
 14.2252  /**
 14.2253 @@ -1710,23 +1635,24 @@ int register_netdevice(struct net_device
 14.2254   
 14.2255  int netdev_finish_unregister(struct net_device *dev)
 14.2256  {
 14.2257 -	BUG_TRAP(dev->ip_ptr==NULL);
 14.2258 -	BUG_TRAP(dev->ip6_ptr==NULL);
 14.2259 -	BUG_TRAP(dev->dn_ptr==NULL);
 14.2260 +    BUG_TRAP(dev->ip_ptr==NULL);
 14.2261 +    BUG_TRAP(dev->ip6_ptr==NULL);
 14.2262 +    BUG_TRAP(dev->dn_ptr==NULL);
 14.2263  
 14.2264 -	if (!dev->deadbeaf) {
 14.2265 -		printk(KERN_ERR "Freeing alive device %p, %s\n", dev, dev->name);
 14.2266 -		return 0;
 14.2267 -	}
 14.2268 +    if (!dev->deadbeaf) {
 14.2269 +        printk(KERN_ERR "Freeing alive device %p, %s\n",
 14.2270 +               dev, dev->name);
 14.2271 +        return 0;
 14.2272 +    }
 14.2273  #ifdef NET_REFCNT_DEBUG
 14.2274 -	printk(KERN_DEBUG "netdev_finish_unregister: %s%s.\n", dev->name,
 14.2275 -	       (dev->features & NETIF_F_DYNALLOC)?"":", old style");
 14.2276 +    printk(KERN_DEBUG "netdev_finish_unregister: %s%s.\n", dev->name,
 14.2277 +           (dev->features & NETIF_F_DYNALLOC)?"":", old style");
 14.2278  #endif
 14.2279 -	if (dev->destructor)
 14.2280 -		dev->destructor(dev);
 14.2281 -	if (dev->features & NETIF_F_DYNALLOC)
 14.2282 -		kfree(dev);
 14.2283 -	return 0;
 14.2284 +    if (dev->destructor)
 14.2285 +        dev->destructor(dev);
 14.2286 +    if (dev->features & NETIF_F_DYNALLOC)
 14.2287 +        kfree(dev);
 14.2288 +    return 0;
 14.2289  }
 14.2290  
 14.2291  /**
 14.2292 @@ -1744,118 +1670,121 @@ int netdev_finish_unregister(struct net_
 14.2293  
 14.2294  int unregister_netdevice(struct net_device *dev)
 14.2295  {
 14.2296 -	unsigned long now, warning_time;
 14.2297 -	struct net_device *d, **dp;
 14.2298 +    unsigned long now, warning_time;
 14.2299 +    struct net_device *d, **dp;
 14.2300  
 14.2301 -	/* If device is running, close it first. */
 14.2302 -	if (dev->flags & IFF_UP)
 14.2303 -		dev_close(dev);
 14.2304 +    /* If device is running, close it first. */
 14.2305 +    if (dev->flags & IFF_UP)
 14.2306 +        dev_close(dev);
 14.2307  
 14.2308 -	BUG_TRAP(dev->deadbeaf==0);
 14.2309 -	dev->deadbeaf = 1;
 14.2310 +    BUG_TRAP(dev->deadbeaf==0);
 14.2311 +    dev->deadbeaf = 1;
 14.2312  
 14.2313 -	/* And unlink it from device chain. */
 14.2314 -	for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
 14.2315 -		if (d == dev) {
 14.2316 -			write_lock_bh(&dev_base_lock);
 14.2317 -			*dp = d->next;
 14.2318 -			write_unlock_bh(&dev_base_lock);
 14.2319 -			break;
 14.2320 -		}
 14.2321 -	}
 14.2322 -	if (d == NULL) {
 14.2323 -		printk(KERN_DEBUG "unregister_netdevice: device %s/%p never was registered\n", dev->name, dev);
 14.2324 -		return -ENODEV;
 14.2325 -	}
 14.2326 +    /* And unlink it from device chain. */
 14.2327 +    for (dp = &dev_base; (d=*dp) != NULL; dp=&d->next) {
 14.2328 +        if (d == dev) {
 14.2329 +            write_lock_bh(&dev_base_lock);
 14.2330 +            *dp = d->next;
 14.2331 +            write_unlock_bh(&dev_base_lock);
 14.2332 +            break;
 14.2333 +        }
 14.2334 +    }
 14.2335 +    if (d == NULL) {
 14.2336 +        printk(KERN_DEBUG "unregister_netdevice: device %s/%p"
 14.2337 +               " not registered\n", dev->name, dev);
 14.2338 +        return -ENODEV;
 14.2339 +    }
 14.2340  
 14.2341 -	/* Synchronize to net_rx_action. */
 14.2342 -	br_write_lock_bh(BR_NETPROTO_LOCK);
 14.2343 -	br_write_unlock_bh(BR_NETPROTO_LOCK);
 14.2344 +    /* Synchronize to net_rx_action. */
 14.2345 +    br_write_lock_bh(BR_NETPROTO_LOCK);
 14.2346 +    br_write_unlock_bh(BR_NETPROTO_LOCK);
 14.2347  
 14.2348 -	if (dev_boot_phase == 0) {
 14.2349 +    if (dev_boot_phase == 0) {
 14.2350  
 14.2351 -		/* Shutdown queueing discipline. */
 14.2352 -		dev_shutdown(dev);
 14.2353 +        /* Shutdown queueing discipline. */
 14.2354 +        dev_shutdown(dev);
 14.2355  
 14.2356 -		/* Notify protocols, that we are about to destroy
 14.2357 -		   this device. They should clean all the things.
 14.2358 -		 */
 14.2359 -		notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
 14.2360 +        /* Notify protocols, that we are about to destroy
 14.2361 +           this device. They should clean all the things.
 14.2362 +        */
 14.2363 +        notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
 14.2364  
 14.2365 -		/*
 14.2366 -		 *	Flush the multicast chain
 14.2367 -		 */
 14.2368 -		dev_mc_discard(dev);
 14.2369 -	}
 14.2370 +        /*
 14.2371 +         *	Flush the multicast chain
 14.2372 +         */
 14.2373 +        dev_mc_discard(dev);
 14.2374 +    }
 14.2375  
 14.2376 -	if (dev->uninit)
 14.2377 -		dev->uninit(dev);
 14.2378 +    if (dev->uninit)
 14.2379 +        dev->uninit(dev);
 14.2380  
 14.2381 -	/* Notifier chain MUST detach us from master device. */
 14.2382 -	BUG_TRAP(dev->master==NULL);
 14.2383 +    /* Notifier chain MUST detach us from master device. */
 14.2384 +    BUG_TRAP(dev->master==NULL);
 14.2385  
 14.2386  #ifdef CONFIG_NET_DIVERT
 14.2387 -	free_divert_blk(dev);
 14.2388 +    free_divert_blk(dev);
 14.2389  #endif
 14.2390  
 14.2391 -	if (dev->features & NETIF_F_DYNALLOC) {
 14.2392 +    if (dev->features & NETIF_F_DYNALLOC) {
 14.2393  #ifdef NET_REFCNT_DEBUG
 14.2394 -		if (atomic_read(&dev->refcnt) != 1)
 14.2395 -			printk(KERN_DEBUG "unregister_netdevice: holding %s refcnt=%d\n", dev->name, atomic_read(&dev->refcnt)-1);
 14.2396 +        if (atomic_read(&dev->refcnt) != 1)
 14.2397 +            printk(KERN_DEBUG "unregister_netdevice: holding %s refcnt=%d\n",
 14.2398 +                   dev->name, atomic_read(&dev->refcnt)-1);
 14.2399  #endif
 14.2400 -		dev_put(dev);
 14.2401 -		return 0;
 14.2402 -	}
 14.2403 +        dev_put(dev);
 14.2404 +        return 0;
 14.2405 +    }
 14.2406  
 14.2407 -	/* Last reference is our one */
 14.2408 -	if (atomic_read(&dev->refcnt) == 1) {
 14.2409 -		dev_put(dev);
 14.2410 -		return 0;
 14.2411 -	}
 14.2412 +    /* Last reference is our one */
 14.2413 +    if (atomic_read(&dev->refcnt) == 1) {
 14.2414 +        dev_put(dev);
 14.2415 +        return 0;
 14.2416 +    }
 14.2417  
 14.2418  #ifdef NET_REFCNT_DEBUG
 14.2419 -	printk("unregister_netdevice: waiting %s refcnt=%d\n", dev->name, atomic_read(&dev->refcnt));
 14.2420 +    printk("unregister_netdevice: waiting %s refcnt=%d\n",
 14.2421 +           dev->name, atomic_read(&dev->refcnt));
 14.2422  #endif
 14.2423  
 14.2424 -	/* EXPLANATION. If dev->refcnt is not now 1 (our own reference)
 14.2425 -	   it means that someone in the kernel still has a reference
 14.2426 -	   to this device and we cannot release it.
 14.2427 +    /* EXPLANATION. If dev->refcnt is not now 1 (our own reference)
 14.2428 +       it means that someone in the kernel still has a reference
 14.2429 +       to this device and we cannot release it.
 14.2430  
 14.2431 -	   "New style" devices have destructors, hence we can return from this
 14.2432 -	   function and destructor will do all the work later.  As of kernel 2.4.0
 14.2433 -	   there are very few "New Style" devices.
 14.2434 +       "New style" devices have destructors, hence we can return from this
 14.2435 +       function and destructor will do all the work later.  As of kernel 2.4.0
 14.2436 +       there are very few "New Style" devices.
 14.2437  
 14.2438 -	   "Old style" devices expect that the device is free of any references
 14.2439 -	   upon exit from this function.
 14.2440 -	   We cannot return from this function until all such references have
 14.2441 -	   fallen away.  This is because the caller of this function will probably
 14.2442 -	   immediately kfree(*dev) and then be unloaded via sys_delete_module.
 14.2443 +       "Old style" devices expect that the device is free of any references
 14.2444 +       upon exit from this function.
 14.2445 +       We cannot return from this function until all such references have
 14.2446 +       fallen away.  This is because the caller of this function will probably
 14.2447 +       immediately kfree(*dev) and then be unloaded via sys_delete_module.
 14.2448  
 14.2449 -	   So, we linger until all references fall away.  The duration of the
 14.2450 -	   linger is basically unbounded! It is driven by, for example, the
 14.2451 -	   current setting of sysctl_ipfrag_time.
 14.2452 +       So, we linger until all references fall away.  The duration of the
 14.2453 +       linger is basically unbounded! It is driven by, for example, the
 14.2454 +       current setting of sysctl_ipfrag_time.
 14.2455  
 14.2456 -	   After 1 second, we start to rebroadcast unregister notifications
 14.2457 -	   in hope that careless clients will release the device.
 14.2458 +       After 1 second, we start to rebroadcast unregister notifications
 14.2459 +       in hope that careless clients will release the device.
 14.2460  
 14.2461 -	 */
 14.2462 +    */
 14.2463  
 14.2464 -	now = warning_time = jiffies;
 14.2465 -	while (atomic_read(&dev->refcnt) != 1) {
 14.2466 -		if ((jiffies - now) > 1*HZ) {
 14.2467 -			/* Rebroadcast unregister notification */
 14.2468 -			notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
 14.2469 -		}
 14.2470 -                mdelay(250);
 14.2471 -		if ((jiffies - warning_time) > 10*HZ) {
 14.2472 -			printk(KERN_EMERG "unregister_netdevice: waiting for %s to "
 14.2473 -					"become free. Usage count = %d\n",
 14.2474 -					dev->name, atomic_read(&dev->refcnt));
 14.2475 -			warning_time = jiffies;
 14.2476 -		}
 14.2477 -	}
 14.2478 -	dev_put(dev);
 14.2479 -	return 0;
 14.2480 +    now = warning_time = jiffies;
 14.2481 +    while (atomic_read(&dev->refcnt) != 1) {
 14.2482 +        if ((jiffies - now) > 1*HZ) {
 14.2483 +            /* Rebroadcast unregister notification */
 14.2484 +            notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
 14.2485 +        }
 14.2486 +        mdelay(250);
 14.2487 +        if ((jiffies - warning_time) > 10*HZ) {
 14.2488 +            printk(KERN_EMERG "unregister_netdevice: waiting for %s to "
 14.2489 +                   "become free. Usage count = %d\n",
 14.2490 +                   dev->name, atomic_read(&dev->refcnt));
 14.2491 +            warning_time = jiffies;
 14.2492 +        }
 14.2493 +    }
 14.2494 +    dev_put(dev);
 14.2495 +    return 0;
 14.2496  }
 14.2497  
 14.2498  
 14.2499 @@ -1879,128 +1808,115 @@ extern void dv_init(void);
 14.2500   */
 14.2501  int __init net_dev_init(void)
 14.2502  {
 14.2503 -	struct net_device *dev, **dp;
 14.2504 -	int i;
 14.2505 +    struct net_device *dev, **dp;
 14.2506 +    int i;
 14.2507 +
 14.2508 +    if ( !dev_boot_phase )
 14.2509 +        return 0;
 14.2510 +
 14.2511 +    /* KAF: was sone in socket_init, but that top-half stuff is gone. */
 14.2512 +    skb_init();
 14.2513  
 14.2514 -	if (!dev_boot_phase)
 14.2515 -		return 0;
 14.2516 +    /* Initialise the packet receive queues. */
 14.2517 +    for ( i = 0; i < NR_CPUS; i++ )
 14.2518 +    {
 14.2519 +        struct softnet_data *queue;
 14.2520 +        queue = &softnet_data[i];
 14.2521 +        queue->completion_queue = NULL;
 14.2522 +    }
 14.2523 +	
 14.2524 +    /*
 14.2525 +     *	Add the devices.
 14.2526 +     *	If the call to dev->init fails, the dev is removed
 14.2527 +     *	from the chain disconnecting the device until the
 14.2528 +     *	next reboot.
 14.2529 +     *
 14.2530 +     *	NB At boot phase networking is dead. No locking is required.
 14.2531 +     *	But we still preserve dev_base_lock for sanity.
 14.2532 +     */
 14.2533 +    dp = &dev_base;
 14.2534 +    while ((dev = *dp) != NULL) {
 14.2535 +        spin_lock_init(&dev->queue_lock);
 14.2536 +        spin_lock_init(&dev->xmit_lock);
 14.2537 +
 14.2538 +        dev->xmit_lock_owner = -1;
 14.2539 +        dev->iflink = -1;
 14.2540 +        dev_hold(dev);
 14.2541  
 14.2542          /*
 14.2543 -         * KAF: was sone in socket_init, but that top-half stuff is gone.
 14.2544 +         * Allocate name. If the init() fails
 14.2545 +         * the name will be reissued correctly.
 14.2546           */
 14.2547 -        skb_init();
 14.2548 -
 14.2549 -	/*
 14.2550 -	 *	Initialise the packet receive queues.
 14.2551 -	 */
 14.2552 -
 14.2553 -	for (i = 0; i < NR_CPUS; i++) {
 14.2554 -		struct softnet_data *queue;
 14.2555 -
 14.2556 -		queue = &softnet_data[i];
 14.2557 -                skb_queue_head_init(&queue->input_pkt_queue);
 14.2558 -		queue->throttle = 0;
 14.2559 -		queue->cng_level = 0;
 14.2560 -		queue->avg_blog = 10; /* arbitrary non-zero */
 14.2561 -		queue->completion_queue = NULL;
 14.2562 -	}
 14.2563 -	
 14.2564 -	/*
 14.2565 -	 *	Add the devices.
 14.2566 -	 *	If the call to dev->init fails, the dev is removed
 14.2567 -	 *	from the chain disconnecting the device until the
 14.2568 -	 *	next reboot.
 14.2569 -	 *
 14.2570 -	 *	NB At boot phase networking is dead. No locking is required.
 14.2571 -	 *	But we still preserve dev_base_lock for sanity.
 14.2572 -	 */
 14.2573 -
 14.2574 -	dp = &dev_base;
 14.2575 -	while ((dev = *dp) != NULL) {
 14.2576 -		spin_lock_init(&dev->queue_lock);
 14.2577 -		spin_lock_init(&dev->xmit_lock);
 14.2578 -
 14.2579 -		dev->xmit_lock_owner = -1;
 14.2580 -		dev->iflink = -1;
 14.2581 -		dev_hold(dev);
 14.2582 +        if (strchr(dev->name, '%'))
 14.2583 +            dev_alloc_name(dev, dev->name);
 14.2584  
 14.2585 -		/*
 14.2586 -		 * Allocate name. If the init() fails
 14.2587 -		 * the name will be reissued correctly.
 14.2588 -		 */
 14.2589 -		if (strchr(dev->name, '%'))
 14.2590 -			dev_alloc_name(dev, dev->name);
 14.2591 +        if (dev->init && dev->init(dev)) {
 14.2592 +            /*
 14.2593 +             * It failed to come up. It will be unhooked later.
 14.2594 +             * dev_alloc_name can now advance to next suitable
 14.2595 +             * name that is checked next.
 14.2596 +             */
 14.2597 +            dev->deadbeaf = 1;
 14.2598 +            dp = &dev->next;
 14.2599 +        } else {
 14.2600 +            dp = &dev->next;
 14.2601 +            dev->ifindex = dev_new_index();
 14.2602 +            if (dev->iflink == -1)
 14.2603 +                dev->iflink = dev->ifindex;
 14.2604 +            if (dev->rebuild_header == NULL)
 14.2605 +                dev->rebuild_header = default_rebuild_header;
 14.2606 +            dev_init_scheduler(dev);
 14.2607 +            set_bit(__LINK_STATE_PRESENT, &dev->state);
 14.2608 +        }
 14.2609 +    }
 14.2610  
 14.2611 -		if (dev->init && dev->init(dev)) {
 14.2612 -			/*
 14.2613 -			 * It failed to come up. It will be unhooked later.
 14.2614 -			 * dev_alloc_name can now advance to next suitable
 14.2615 -			 * name that is checked next.
 14.2616 -			 */
 14.2617 -			dev->deadbeaf = 1;
 14.2618 -			dp = &dev->next;
 14.2619 -		} else {
 14.2620 -			dp = &dev->next;
 14.2621 -			dev->ifindex = dev_new_index();
 14.2622 -			if (dev->iflink == -1)
 14.2623 -				dev->iflink = dev->ifindex;
 14.2624 -			if (dev->rebuild_header == NULL)
 14.2625 -				dev->rebuild_header = default_rebuild_header;
 14.2626 -			dev_init_scheduler(dev);
 14.2627 -			set_bit(__LINK_STATE_PRESENT, &dev->state);
 14.2628 -		}
 14.2629 -	}
 14.2630 +    /*
 14.2631 +     * Unhook devices that failed to come up
 14.2632 +     */
 14.2633 +    dp = &dev_base;
 14.2634 +    while ((dev = *dp) != NULL) {
 14.2635 +        if (dev->deadbeaf) {
 14.2636 +            write_lock_bh(&dev_base_lock);
 14.2637 +            *dp = dev->next;
 14.2638 +            write_unlock_bh(&dev_base_lock);
 14.2639 +            dev_put(dev);
 14.2640 +        } else {
 14.2641 +            dp = &dev->next;
 14.2642 +        }
 14.2643 +    }
 14.2644  
 14.2645 -	/*
 14.2646 -	 * Unhook devices that failed to come up
 14.2647 -	 */
 14.2648 -	dp = &dev_base;
 14.2649 -	while ((dev = *dp) != NULL) {
 14.2650 -		if (dev->deadbeaf) {
 14.2651 -			write_lock_bh(&dev_base_lock);
 14.2652 -			*dp = dev->next;
 14.2653 -			write_unlock_bh(&dev_base_lock);
 14.2654 -			dev_put(dev);
 14.2655 -		} else {
 14.2656 -			dp = &dev->next;
 14.2657 -		}
 14.2658 -	}
 14.2659 +    dev_boot_phase = 0;
 14.2660  
 14.2661 -	dev_boot_phase = 0;
 14.2662 -
 14.2663 -	open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
 14.2664 -	//open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
 14.2665 -
 14.2666 -	dst_init();
 14.2667 -	dev_mcast_init();
 14.2668 +    dst_init();
 14.2669 +    dev_mcast_init();
 14.2670  
 14.2671  #ifdef CONFIG_NET_SCHED
 14.2672 -	pktsched_init();
 14.2673 +    pktsched_init();
 14.2674  #endif
 14.2675  
 14.2676 -	/*
 14.2677 -	 *	Initialise network devices
 14.2678 -	 */
 14.2679 +    /*
 14.2680 +     *	Initialise network devices
 14.2681 +     */
 14.2682  	 
 14.2683 -	net_device_init();
 14.2684 +    net_device_init();
 14.2685  
 14.2686 -	return 0;
 14.2687 +    return 0;
 14.2688  }
 14.2689  
 14.2690  inline int init_tx_header(u8 *data, unsigned int len, struct net_device *dev)
 14.2691  {
 14.2692 -        memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
 14.2693 +    memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
 14.2694          
 14.2695 -        switch ( ntohs(*(unsigned short *)(data + 12)) )
 14.2696 -        {
 14.2697 -        case ETH_P_ARP:
 14.2698 -            if ( len < 42 ) break;
 14.2699 -            memcpy(data + 22, dev->dev_addr, 6);
 14.2700 -            return ETH_P_ARP;
 14.2701 -        case ETH_P_IP:
 14.2702 -            return ETH_P_IP;
 14.2703 -        }
 14.2704 -        return 0;
 14.2705 +    switch ( ntohs(*(unsigned short *)(data + 12)) )
 14.2706 +    {
 14.2707 +    case ETH_P_ARP:
 14.2708 +        if ( len < 42 ) break;
 14.2709 +        memcpy(data + 22, dev->dev_addr, 6);
 14.2710 +        return ETH_P_ARP;
 14.2711 +    case ETH_P_IP:
 14.2712 +        return ETH_P_IP;
 14.2713 +    }
 14.2714 +    return 0;
 14.2715  }
 14.2716  
 14.2717  
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xen-2.4.16/net/devinit.c	Sun Feb 23 11:22:39 2003 +0000
    15.3 @@ -0,0 +1,114 @@
    15.4 +/******************************************************************************
    15.5 + * devinit.c
    15.6 + * 
    15.7 + * This is the watchdog timer routines, ripped from sch_generic.c
    15.8 + * Original copyright notice appears below.
    15.9 + * 
   15.10 + */
   15.11 +
   15.12 +/*
   15.13 + *		This program is free software; you can redistribute it and/or
   15.14 + *		modify it under the terms of the GNU General Public License
   15.15 + *		as published by the Free Software Foundation; either version
   15.16 + *		2 of the License, or (at your option) any later version.
   15.17 + *
   15.18 + * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   15.19 + *              Jamal Hadi Salim, <hadi@nortelnetworks.com> 990601
   15.20 + *              - Ingress support
   15.21 + */
   15.22 +
   15.23 +#include <asm/uaccess.h>
   15.24 +#include <asm/system.h>
   15.25 +#include <asm/bitops.h>
   15.26 +#include <linux/config.h>
   15.27 +#include <linux/types.h>
   15.28 +#include <linux/sched.h>
   15.29 +#include <linux/lib.h>
   15.30 +#include <linux/mm.h>
   15.31 +#include <linux/socket.h>
   15.32 +#include <linux/sockios.h>
   15.33 +#include <linux/errno.h>
   15.34 +#include <linux/interrupt.h>
   15.35 +#include <linux/netdevice.h>
   15.36 +#include <linux/skbuff.h>
   15.37 +#include <linux/init.h>
   15.38 +
   15.39 +static void dev_watchdog(unsigned long arg)
   15.40 +{
   15.41 +    struct net_device *dev = (struct net_device *)arg;
   15.42 +
   15.43 +    spin_lock(&dev->xmit_lock);
   15.44 +    if (netif_device_present(dev) &&
   15.45 +        netif_running(dev) &&
   15.46 +        netif_carrier_ok(dev)) {
   15.47 +        if (netif_queue_stopped(dev) &&
   15.48 +            (jiffies - dev->trans_start) > dev->watchdog_timeo) {
   15.49 +            printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
   15.50 +            dev->tx_timeout(dev);
   15.51 +        }
   15.52 +        if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
   15.53 +            dev_hold(dev);
   15.54 +    }
   15.55 +    spin_unlock(&dev->xmit_lock);
   15.56 +
   15.57 +    dev_put(dev);
   15.58 +}
   15.59 +
   15.60 +static void dev_watchdog_init(struct net_device *dev)
   15.61 +{
   15.62 +    init_timer(&dev->watchdog_timer);
   15.63 +    dev->watchdog_timer.data = (unsigned long)dev;
   15.64 +    dev->watchdog_timer.function = dev_watchdog;
   15.65 +}
   15.66 +
   15.67 +void __netdev_watchdog_up(struct net_device *dev)
   15.68 +{
   15.69 +    if (dev->tx_timeout) {
   15.70 +        if (dev->watchdog_timeo <= 0)
   15.71 +            dev->watchdog_timeo = 5*HZ;
   15.72 +        if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
   15.73 +            dev_hold(dev);
   15.74 +    }
   15.75 +}
   15.76 +
   15.77 +static void dev_watchdog_up(struct net_device *dev)
   15.78 +{
   15.79 +    spin_lock_bh(&dev->xmit_lock);
   15.80 +    __netdev_watchdog_up(dev);
   15.81 +    spin_unlock_bh(&dev->xmit_lock);
   15.82 +}
   15.83 +
   15.84 +static void dev_watchdog_down(struct net_device *dev)
   15.85 +{
   15.86 +    spin_lock_bh(&dev->xmit_lock);
   15.87 +    if (del_timer(&dev->watchdog_timer))
   15.88 +        __dev_put(dev);
   15.89 +    spin_unlock_bh(&dev->xmit_lock);
   15.90 +}
   15.91 +
   15.92 +void dev_activate(struct net_device *dev)
   15.93 +{
   15.94 +    spin_lock_bh(&dev->queue_lock);
   15.95 +    dev->trans_start = jiffies;
   15.96 +    dev_watchdog_up(dev);
   15.97 +    spin_unlock_bh(&dev->queue_lock);
   15.98 +}
   15.99 +
  15.100 +void dev_deactivate(struct net_device *dev)
  15.101 +{
  15.102 +    dev_watchdog_down(dev);
  15.103 +
  15.104 +    while (test_bit(__LINK_STATE_SCHED, &dev->state)) {
  15.105 +        current->policy |= SCHED_YIELD;
  15.106 +        schedule();
  15.107 +    }
  15.108 +}
  15.109 +
  15.110 +void dev_init_scheduler(struct net_device *dev)
  15.111 +{
  15.112 +    dev_watchdog_init(dev);
  15.113 +}
  15.114 +
  15.115 +void dev_shutdown(struct net_device *dev)
  15.116 +{
  15.117 +}
    16.1 --- a/xen-2.4.16/net/sch_generic.c	Fri Feb 21 16:04:44 2003 +0000
    16.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.3 @@ -1,525 +0,0 @@
    16.4 -/*
    16.5 - * net/sched/sch_generic.c	Generic packet scheduler routines.
    16.6 - *
    16.7 - *		This program is free software; you can redistribute it and/or
    16.8 - *		modify it under the terms of the GNU General Public License
    16.9 - *		as published by the Free Software Foundation; either version
   16.10 - *		2 of the License, or (at your option) any later version.
   16.11 - *
   16.12 - * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   16.13 - *              Jamal Hadi Salim, <hadi@nortelnetworks.com> 990601
   16.14 - *              - Ingress support
   16.15 - */
   16.16 -
   16.17 -#include <asm/uaccess.h>
   16.18 -#include <asm/system.h>
   16.19 -#include <asm/bitops.h>
   16.20 -#include <linux/config.h>
   16.21 -#include <linux/types.h>
   16.22 -//#include <linux/kernel.h>
   16.23 -#include <linux/sched.h>
   16.24 -#include <linux/lib.h>
   16.25 -#include <linux/mm.h>
   16.26 -#include <linux/socket.h>
   16.27 -#include <linux/sockios.h>
   16.28 -//#include <linux/in.h>
   16.29 -#include <linux/errno.h>
   16.30 -#include <linux/interrupt.h>
   16.31 -#include <linux/netdevice.h>
   16.32 -#include <linux/skbuff.h>
   16.33 -//#include <linux/rtnetlink.h>
   16.34 -#include <linux/init.h>
   16.35 -//#include <net/sock.h>
   16.36 -#include <linux/pkt_sched.h>
   16.37 -
   16.38 -extern int net_ratelimit(void);
   16.39 -#define BUG_TRAP ASSERT
   16.40 -
   16.41 -/* Main transmission queue. */
   16.42 -
   16.43 -/* Main qdisc structure lock. 
   16.44 -
   16.45 -   However, modifications
   16.46 -   to data, participating in scheduling must be additionally
   16.47 -   protected with dev->queue_lock spinlock.
   16.48 -
   16.49 -   The idea is the following:
   16.50 -   - enqueue, dequeue are serialized via top level device
   16.51 -     spinlock dev->queue_lock.
   16.52 -   - tree walking is protected by read_lock(qdisc_tree_lock)
   16.53 -     and this lock is used only in process context.
   16.54 -   - updates to tree are made only under rtnl semaphore,
   16.55 -     hence this lock may be made without local bh disabling.
   16.56 -
   16.57 -   qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
   16.58 - */
   16.59 -rwlock_t qdisc_tree_lock = RW_LOCK_UNLOCKED;
   16.60 -
   16.61 -/* 
   16.62 -   dev->queue_lock serializes queue accesses for this device
   16.63 -   AND dev->qdisc pointer itself.
   16.64 -
   16.65 -   dev->xmit_lock serializes accesses to device driver.
   16.66 -
   16.67 -   dev->queue_lock and dev->xmit_lock are mutually exclusive,
   16.68 -   if one is grabbed, another must be free.
   16.69 - */
   16.70 -
   16.71 -
   16.72 -/* Kick device.
   16.73 -   Note, that this procedure can be called by a watchdog timer, so that
   16.74 -   we do not check dev->tbusy flag here.
   16.75 -
   16.76 -   Returns:  0  - queue is empty.
   16.77 -            >0  - queue is not empty, but throttled.
   16.78 -	    <0  - queue is not empty. Device is throttled, if dev->tbusy != 0.
   16.79 -
   16.80 -   NOTE: Called under dev->queue_lock with locally disabled BH.
   16.81 -*/
   16.82 -
   16.83 -int qdisc_restart(struct net_device *dev)
   16.84 -{
   16.85 -	struct Qdisc *q = dev->qdisc;
   16.86 -	struct sk_buff *skb;
   16.87 -
   16.88 -	/* Dequeue packet */
   16.89 -	if ((skb = q->dequeue(q)) != NULL) {
   16.90 -		if (spin_trylock(&dev->xmit_lock)) {
   16.91 -			/* Remember that the driver is grabbed by us. */
   16.92 -			dev->xmit_lock_owner = smp_processor_id();
   16.93 -
   16.94 -			/* And release queue */
   16.95 -			spin_unlock(&dev->queue_lock);
   16.96 -
   16.97 -			if (!netif_queue_stopped(dev)) {
   16.98 -#if 0
   16.99 -				if (netdev_nit)
  16.100 -					dev_queue_xmit_nit(skb, dev);
  16.101 -#endif
  16.102 -
  16.103 -				if (dev->hard_start_xmit(skb, dev) == 0) {
  16.104 -					dev->xmit_lock_owner = -1;
  16.105 -					spin_unlock(&dev->xmit_lock);
  16.106 -
  16.107 -					spin_lock(&dev->queue_lock);
  16.108 -					return -1;
  16.109 -				}
  16.110 -			}
  16.111 -
  16.112 -			/* Release the driver */
  16.113 -			dev->xmit_lock_owner = -1;
  16.114 -			spin_unlock(&dev->xmit_lock);
  16.115 -			spin_lock(&dev->queue_lock);
  16.116 -			q = dev->qdisc;
  16.117 -		} else {
  16.118 -			/* So, someone grabbed the driver. */
  16.119 -
  16.120 -			/* It may be transient configuration error,
  16.121 -			   when hard_start_xmit() recurses. We detect
  16.122 -			   it by checking xmit owner and drop the
  16.123 -			   packet when deadloop is detected.
  16.124 -			 */
  16.125 -			if (dev->xmit_lock_owner == smp_processor_id()) {
  16.126 -				kfree_skb(skb);
  16.127 -				if (net_ratelimit())
  16.128 -					printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
  16.129 -				return -1;
  16.130 -			}
  16.131 -			netdev_rx_stat[smp_processor_id()].cpu_collision++;
  16.132 -		}
  16.133 -
  16.134 -		/* Device kicked us out :(
  16.135 -		   This is possible in three cases:
  16.136 -
  16.137 -		   0. driver is locked
  16.138 -		   1. fastroute is enabled
  16.139 -		   2. device cannot determine busy state
  16.140 -		      before start of transmission (f.e. dialout)
  16.141 -		   3. device is buggy (ppp)
  16.142 -		 */
  16.143 -
  16.144 -		q->ops->requeue(skb, q);
  16.145 -		netif_schedule(dev);
  16.146 -		return 1;
  16.147 -	}
  16.148 -	return q->q.qlen;
  16.149 -}
  16.150 -
  16.151 -static void dev_watchdog(unsigned long arg)
  16.152 -{
  16.153 -	struct net_device *dev = (struct net_device *)arg;
  16.154 -
  16.155 -	spin_lock(&dev->xmit_lock);
  16.156 -	if (dev->qdisc != &noop_qdisc) {
  16.157 -		if (netif_device_present(dev) &&
  16.158 -		    netif_running(dev) &&
  16.159 -		    netif_carrier_ok(dev)) {
  16.160 -			if (netif_queue_stopped(dev) &&
  16.161 -			    (jiffies - dev->trans_start) > dev->watchdog_timeo) {
  16.162 -				printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name);
  16.163 -				dev->tx_timeout(dev);
  16.164 -			}
  16.165 -			if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
  16.166 -				dev_hold(dev);
  16.167 -		}
  16.168 -	}
  16.169 -	spin_unlock(&dev->xmit_lock);
  16.170 -
  16.171 -	dev_put(dev);
  16.172 -}
  16.173 -
  16.174 -static void dev_watchdog_init(struct net_device *dev)
  16.175 -{
  16.176 -	init_timer(&dev->watchdog_timer);
  16.177 -	dev->watchdog_timer.data = (unsigned long)dev;
  16.178 -	dev->watchdog_timer.function = dev_watchdog;
  16.179 -}
  16.180 -
  16.181 -void __netdev_watchdog_up(struct net_device *dev)
  16.182 -{
  16.183 -	if (dev->tx_timeout) {
  16.184 -		if (dev->watchdog_timeo <= 0)
  16.185 -			dev->watchdog_timeo = 5*HZ;
  16.186 -		if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
  16.187 -			dev_hold(dev);
  16.188 -	}
  16.189 -}
  16.190 -
  16.191 -static void dev_watchdog_up(struct net_device *dev)
  16.192 -{
  16.193 -	spin_lock_bh(&dev->xmit_lock);
  16.194 -	__netdev_watchdog_up(dev);
  16.195 -	spin_unlock_bh(&dev->xmit_lock);
  16.196 -}
  16.197 -
  16.198 -static void dev_watchdog_down(struct net_device *dev)
  16.199 -{
  16.200 -	spin_lock_bh(&dev->xmit_lock);
  16.201 -	if (del_timer(&dev->watchdog_timer))
  16.202 -		__dev_put(dev);
  16.203 -	spin_unlock_bh(&dev->xmit_lock);
  16.204 -}
  16.205 -
  16.206 -/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
  16.207 -   under all circumstances. It is difficult to invent anything faster or
  16.208 -   cheaper.
  16.209 - */
  16.210 -
  16.211 -static int
  16.212 -noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
  16.213 -{
  16.214 -	kfree_skb(skb);
  16.215 -	return NET_XMIT_CN;
  16.216 -}
  16.217 -
  16.218 -static struct sk_buff *
  16.219 -noop_dequeue(struct Qdisc * qdisc)
  16.220 -{
  16.221 -	return NULL;
  16.222 -}
  16.223 -
  16.224 -static int
  16.225 -noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
  16.226 -{
  16.227 -	if (net_ratelimit())
  16.228 -		printk(KERN_DEBUG "%s deferred output. It is buggy.\n", skb->dev->name);
  16.229 -	kfree_skb(skb);
  16.230 -	return NET_XMIT_CN;
  16.231 -}
  16.232 -
  16.233 -struct Qdisc_ops noop_qdisc_ops =
  16.234 -{
  16.235 -	NULL,
  16.236 -	NULL,
  16.237 -	"noop",
  16.238 -	0,
  16.239 -
  16.240 -	noop_enqueue,
  16.241 -	noop_dequeue,
  16.242 -	noop_requeue,
  16.243 -};
  16.244 -
  16.245 -struct Qdisc noop_qdisc =
  16.246 -{
  16.247 -	noop_enqueue,
  16.248 -	noop_dequeue,
  16.249 -	TCQ_F_BUILTIN,
  16.250 -	&noop_qdisc_ops,	
  16.251 -};
  16.252 -
  16.253 -
  16.254 -struct Qdisc_ops noqueue_qdisc_ops =
  16.255 -{
  16.256 -	NULL,
  16.257 -	NULL,
  16.258 -	"noqueue",
  16.259 -	0,
  16.260 -
  16.261 -	noop_enqueue,
  16.262 -	noop_dequeue,
  16.263 -	noop_requeue,
  16.264 -
  16.265 -};
  16.266 -
  16.267 -struct Qdisc noqueue_qdisc =
  16.268 -{
  16.269 -	NULL,
  16.270 -	noop_dequeue,
  16.271 -	TCQ_F_BUILTIN,
  16.272 -	&noqueue_qdisc_ops,
  16.273 -};
  16.274 -
  16.275 -
  16.276 -static const u8 prio2band[TC_PRIO_MAX+1] =
  16.277 -{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
  16.278 -
  16.279 -/* 3-band FIFO queue: old style, but should be a bit faster than
  16.280 -   generic prio+fifo combination.
  16.281 - */
  16.282 -
  16.283 -static int
  16.284 -pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
  16.285 -{
  16.286 -	struct sk_buff_head *list;
  16.287 -
  16.288 -	list = ((struct sk_buff_head*)qdisc->data) +
  16.289 -		prio2band[skb->priority&TC_PRIO_MAX];
  16.290 -
  16.291 -	if (list->qlen <= skb->dev->tx_queue_len) {
  16.292 -		__skb_queue_tail(list, skb);
  16.293 -		qdisc->q.qlen++;
  16.294 -		return 0;
  16.295 -	}
  16.296 -	//qdisc->stats.drops++;
  16.297 -	kfree_skb(skb);
  16.298 -	return NET_XMIT_DROP;
  16.299 -}
  16.300 -
  16.301 -static struct sk_buff *
  16.302 -pfifo_fast_dequeue(struct Qdisc* qdisc)
  16.303 -{
  16.304 -	int prio;
  16.305 -	struct sk_buff_head *list = ((struct sk_buff_head*)qdisc->data);
  16.306 -	struct sk_buff *skb;
  16.307 -
  16.308 -	for (prio = 0; prio < 3; prio++, list++) {
  16.309 -		skb = __skb_dequeue(list);
  16.310 -		if (skb) {
  16.311 -			qdisc->q.qlen--;
  16.312 -			return skb;
  16.313 -		}
  16.314 -	}
  16.315 -	return NULL;
  16.316 -}
  16.317 -
  16.318 -static int
  16.319 -pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
  16.320 -{
  16.321 -	struct sk_buff_head *list;
  16.322 -
  16.323 -	list = ((struct sk_buff_head*)qdisc->data) +
  16.324 -		prio2band[skb->priority&TC_PRIO_MAX];
  16.325 -
  16.326 -	__skb_queue_head(list, skb);
  16.327 -	qdisc->q.qlen++;
  16.328 -	return 0;
  16.329 -}
  16.330 -
  16.331 -static void
  16.332 -pfifo_fast_reset(struct Qdisc* qdisc)
  16.333 -{
  16.334 -	int prio;
  16.335 -	struct sk_buff_head *list = ((struct sk_buff_head*)qdisc->data);
  16.336 -
  16.337 -	for (prio=0; prio < 3; prio++)
  16.338 -		skb_queue_purge(list+prio);
  16.339 -	qdisc->q.qlen = 0;
  16.340 -}
  16.341 -
  16.342 -static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
  16.343 -{
  16.344 -	int i;
  16.345 -	struct sk_buff_head *list;
  16.346 -
  16.347 -	list = ((struct sk_buff_head*)qdisc->data);
  16.348 -
  16.349 -	for (i=0; i<3; i++)
  16.350 -		skb_queue_head_init(list+i);
  16.351 -
  16.352 -	return 0;
  16.353 -}
  16.354 -
  16.355 -static struct Qdisc_ops pfifo_fast_ops =
  16.356 -{
  16.357 -	NULL,
  16.358 -	NULL,
  16.359 -	"pfifo_fast",
  16.360 -	3 * sizeof(struct sk_buff_head),
  16.361 -
  16.362 -	pfifo_fast_enqueue,
  16.363 -	pfifo_fast_dequeue,
  16.364 -	pfifo_fast_requeue,
  16.365 -	NULL,
  16.366 -
  16.367 -	pfifo_fast_init,
  16.368 -	pfifo_fast_reset,
  16.369 -};
  16.370 -
  16.371 -struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
  16.372 -{
  16.373 -	struct Qdisc *sch;
  16.374 -	int size = sizeof(*sch) + ops->priv_size;
  16.375 -
  16.376 -	sch = kmalloc(size, GFP_KERNEL);
  16.377 -	if (!sch)
  16.378 -		return NULL;
  16.379 -	memset(sch, 0, size);
  16.380 -
  16.381 -	skb_queue_head_init(&sch->q);
  16.382 -	sch->ops = ops;
  16.383 -	sch->enqueue = ops->enqueue;
  16.384 -	sch->dequeue = ops->dequeue;
  16.385 -	sch->dev = dev;
  16.386 -	//sch->stats.lock = &dev->queue_lock;
  16.387 -	atomic_set(&sch->refcnt, 1);
  16.388 -	if (!ops->init || ops->init(sch, NULL) == 0)
  16.389 -		return sch;
  16.390 -
  16.391 -	kfree(sch);
  16.392 -	return NULL;
  16.393 -}
  16.394 -
  16.395 -/* Under dev->queue_lock and BH! */
  16.396 -
  16.397 -void qdisc_reset(struct Qdisc *qdisc)
  16.398 -{
  16.399 -	struct Qdisc_ops *ops = qdisc->ops;
  16.400 -
  16.401 -	if (ops->reset)
  16.402 -		ops->reset(qdisc);
  16.403 -}
  16.404 -
  16.405 -/* Under dev->queue_lock and BH! */
  16.406 -
  16.407 -void qdisc_destroy(struct Qdisc *qdisc)
  16.408 -{
  16.409 -	struct Qdisc_ops *ops = qdisc->ops;
  16.410 -	struct net_device *dev;
  16.411 -
  16.412 -	if (!atomic_dec_and_test(&qdisc->refcnt))
  16.413 -		return;
  16.414 -
  16.415 -	dev = qdisc->dev;
  16.416 -
  16.417 -#ifdef CONFIG_NET_SCHED
  16.418 -	if (dev) {
  16.419 -		struct Qdisc *q, **qp;
  16.420 -		for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next) {
  16.421 -			if (q == qdisc) {
  16.422 -				*qp = q->next;
  16.423 -				break;
  16.424 -			}
  16.425 -		}
  16.426 -	}
  16.427 -#ifdef CONFIG_NET_ESTIMATOR
  16.428 -	qdisc_kill_estimator(&qdisc->stats);
  16.429 -#endif
  16.430 -#endif
  16.431 -	if (ops->reset)
  16.432 -		ops->reset(qdisc);
  16.433 -	if (ops->destroy)
  16.434 -		ops->destroy(qdisc);
  16.435 -	if (!(qdisc->flags&TCQ_F_BUILTIN))
  16.436 -		kfree(qdisc);
  16.437 -}
  16.438 -
  16.439 -
  16.440 -void dev_activate(struct net_device *dev)
  16.441 -{
  16.442 -	/* No queueing discipline is attached to device;
  16.443 -	   create default one i.e. pfifo_fast for devices,
  16.444 -	   which need queueing and noqueue_qdisc for
  16.445 -	   virtual interfaces
  16.446 -	 */
  16.447 -
  16.448 -	if (dev->qdisc_sleeping == &noop_qdisc) {
  16.449 -		struct Qdisc *qdisc;
  16.450 -		if (dev->tx_queue_len) {
  16.451 -			qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
  16.452 -			if (qdisc == NULL) {
  16.453 -				printk(KERN_INFO "%s: activation failed\n", dev->name);
  16.454 -				return;
  16.455 -			}
  16.456 -		} else {
  16.457 -			qdisc =  &noqueue_qdisc;
  16.458 -		}
  16.459 -		write_lock(&qdisc_tree_lock);
  16.460 -		dev->qdisc_sleeping = qdisc;
  16.461 -		write_unlock(&qdisc_tree_lock);
  16.462 -	}
  16.463 -
  16.464 -	spin_lock_bh(&dev->queue_lock);
  16.465 -	if ((dev->qdisc = dev->qdisc_sleeping) != &noqueue_qdisc) {
  16.466 -		dev->trans_start = jiffies;
  16.467 -		dev_watchdog_up(dev);
  16.468 -	}
  16.469 -	spin_unlock_bh(&dev->queue_lock);
  16.470 -}
  16.471 -
  16.472 -void dev_deactivate(struct net_device *dev)
  16.473 -{
  16.474 -	struct Qdisc *qdisc;
  16.475 -
  16.476 -	spin_lock_bh(&dev->queue_lock);
  16.477 -	qdisc = dev->qdisc;
  16.478 -	dev->qdisc = &noop_qdisc;
  16.479 -
  16.480 -	qdisc_reset(qdisc);
  16.481 -
  16.482 -	spin_unlock_bh(&dev->queue_lock);
  16.483 -
  16.484 -	dev_watchdog_down(dev);
  16.485 -
  16.486 -	while (test_bit(__LINK_STATE_SCHED, &dev->state)) {
  16.487 -            current->policy |= SCHED_YIELD;
  16.488 -            schedule();
  16.489 -	}
  16.490 -
  16.491 -	spin_unlock_wait(&dev->xmit_lock);
  16.492 -}
  16.493 -
  16.494 -void dev_init_scheduler(struct net_device *dev)
  16.495 -{
  16.496 -	write_lock(&qdisc_tree_lock);
  16.497 -	spin_lock_bh(&dev->queue_lock);
  16.498 -	dev->qdisc = &noop_qdisc;
  16.499 -	spin_unlock_bh(&dev->queue_lock);
  16.500 -	dev->qdisc_sleeping = &noop_qdisc;
  16.501 -	dev->qdisc_list = NULL;
  16.502 -	write_unlock(&qdisc_tree_lock);
  16.503 -
  16.504 -	dev_watchdog_init(dev);
  16.505 -}
  16.506 -
  16.507 -void dev_shutdown(struct net_device *dev)
  16.508 -{
  16.509 -	struct Qdisc *qdisc;
  16.510 -
  16.511 -	write_lock(&qdisc_tree_lock);
  16.512 -	spin_lock_bh(&dev->queue_lock);
  16.513 -	qdisc = dev->qdisc_sleeping;
  16.514 -	dev->qdisc = &noop_qdisc;
  16.515 -	dev->qdisc_sleeping = &noop_qdisc;
  16.516 -	qdisc_destroy(qdisc);
  16.517 -#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
  16.518 -        if ((qdisc = dev->qdisc_ingress) != NULL) {
  16.519 -		dev->qdisc_ingress = NULL;
  16.520 -		qdisc_destroy(qdisc);
  16.521 -        }
  16.522 -#endif
  16.523 -	BUG_TRAP(dev->qdisc_list == NULL);
  16.524 -	BUG_TRAP(!timer_pending(&dev->watchdog_timer));
  16.525 -	dev->qdisc_list = NULL;
  16.526 -	spin_unlock_bh(&dev->queue_lock);
  16.527 -	write_unlock(&qdisc_tree_lock);
  16.528 -}
    17.1 --- a/xen-2.4.16/net/skbuff.c	Fri Feb 21 16:04:44 2003 +0000
    17.2 +++ b/xen-2.4.16/net/skbuff.c	Sun Feb 23 11:22:39 2003 +0000
    17.3 @@ -40,26 +40,14 @@
    17.4  #include <linux/lib.h>
    17.5  #include <linux/errno.h>
    17.6  #include <linux/types.h>
    17.7 -//#include <linux/kernel.h>
    17.8  #include <linux/sched.h>
    17.9  #include <linux/mm.h>
   17.10  #include <linux/interrupt.h>
   17.11 -//#include <linux/in.h>
   17.12 -//#include <linux/inet.h>
   17.13  #include <linux/slab.h>
   17.14  #include <linux/netdevice.h>
   17.15 -//#include <linux/string.h>
   17.16  #include <linux/skbuff.h>
   17.17  #include <linux/cache.h>
   17.18  #include <linux/init.h>
   17.19 -//#include <linux/highmem.h>
   17.20 -
   17.21 -//#include <net/ip.h>
   17.22 -//#include <net/protocol.h>
   17.23 -//#include <net/dst.h>
   17.24 -//#include <net/tcp.h>
   17.25 -//#include <net/udp.h>
   17.26 -//#include <net/sock.h>
   17.27  
   17.28  #include <asm/uaccess.h>
   17.29  #include <asm/system.h>
   17.30 @@ -162,7 +150,7 @@ static inline u8 *alloc_skb_data_page(st
   17.31  
   17.32          list_ptr = free_list.next;
   17.33          pf = list_entry(list_ptr, struct pfn_info, list);
   17.34 -        pf->flags = 0; // owned by dom0
   17.35 +        pf->flags = 0; /* owned by dom0 */
   17.36          list_del(&pf->list);
   17.37          free_pfns--;
   17.38  
   17.39 @@ -218,14 +206,9 @@ struct sk_buff *alloc_zc_skb(unsigned in
   17.40          if (data == NULL)
   17.41                  goto nodata;
   17.42  
   17.43 -        // This is so that pci_map_single does the right thing in the driver.
   17.44 -        // If you want to ever use this pointer otherwise, you need to regenerate it 
   17.45 -        // based on skb->pf.
   17.46 +        /* A FAKE virtual address, so that pci_map_xxx dor the right thing. */
   17.47          data = phys_to_virt((unsigned long)data); 
   17.48          
   17.49 -        /* XXX: does not include slab overhead */
   17.50 -        skb->truesize = size + sizeof(struct sk_buff);
   17.51 -
   17.52          /* Load the data pointers. */
   17.53          skb->head = data;
   17.54          skb->data = data;
   17.55 @@ -302,9 +285,6 @@ struct sk_buff *alloc_skb(unsigned int s
   17.56  	if (data == NULL)
   17.57  		goto nodata;
   17.58  
   17.59 -	/* XXX: does not include slab overhead */ 
   17.60 -	skb->truesize = size + sizeof(struct sk_buff);
   17.61 -
   17.62  	/* Load the data pointers. */
   17.63  	skb->head = data;
   17.64  	skb->data = data;
   17.65 @@ -343,15 +323,9 @@ static inline void skb_headerinit(void *
   17.66  	skb->next = NULL;
   17.67  	skb->prev = NULL;
   17.68  	skb->list = NULL;
   17.69 -	skb->sk = NULL;
   17.70 -	skb->stamp.tv_sec=0;	/* No idea about time */
   17.71  	skb->dev = NULL;
   17.72 -//	skb->dst = NULL;
   17.73 -	memset(skb->cb, 0, sizeof(skb->cb));
   17.74  	skb->pkt_type = PACKET_HOST;	/* Default type */
   17.75  	skb->ip_summed = 0;
   17.76 -	skb->priority = 0;
   17.77 -	skb->security = 0;	/* By default packets are insecure */
   17.78  	skb->destructor = NULL;
   17.79  
   17.80  #ifdef CONFIG_NETFILTER
   17.81 @@ -411,7 +385,7 @@ static void skb_release_data(struct sk_b
   17.82                  } 
   17.83                  else 
   17.84                  {
   17.85 -                    BUG(); //skb_release_data called with unknown skb type!
   17.86 +                    BUG();
   17.87                  }
   17.88  	}
   17.89  }
   17.90 @@ -442,7 +416,6 @@ void __kfree_skb(struct sk_buff *skb)
   17.91  		BUG();
   17.92  	}
   17.93  
   17.94 -//	dst_release(skb->dst);
   17.95  	if(skb->destructor) {
   17.96  		if (in_irq()) {
   17.97  			printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n",
   17.98 @@ -487,26 +460,18 @@ struct sk_buff *skb_clone(struct sk_buff
   17.99  
  17.100  	n->next = n->prev = NULL;
  17.101  	n->list = NULL;
  17.102 -	n->sk = NULL;
  17.103 -	C(stamp);
  17.104  	C(dev);
  17.105  	C(h);
  17.106  	C(nh);
  17.107  	C(mac);
  17.108 -//	C(dst);
  17.109 -//	dst_clone(n->dst);
  17.110 -	memcpy(n->cb, skb->cb, sizeof(skb->cb));
  17.111  	C(len);
  17.112  	C(data_len);
  17.113  	C(csum);
  17.114  	n->cloned = 1;
  17.115  	C(pkt_type);
  17.116  	C(ip_summed);
  17.117 -	C(priority);
  17.118  	atomic_set(&n->users, 1);
  17.119  	C(protocol);
  17.120 -	C(security);
  17.121 -	C(truesize);
  17.122  	C(head);
  17.123  	C(data);
  17.124  	C(tail);
  17.125 @@ -543,20 +508,14 @@ static void copy_skb_header(struct sk_bu
  17.126  	unsigned long offset = new->data - old->data;
  17.127  
  17.128  	new->list=NULL;
  17.129 -	new->sk=NULL;
  17.130  	new->dev=old->dev;
  17.131 -	new->priority=old->priority;
  17.132  	new->protocol=old->protocol;
  17.133 -//	new->dst=dst_clone(old->dst);
  17.134  	new->h.raw=old->h.raw+offset;
  17.135  	new->nh.raw=old->nh.raw+offset;
  17.136  	new->mac.raw=old->mac.raw+offset;
  17.137 -	memcpy(new->cb, old->cb, sizeof(old->cb));
  17.138  	atomic_set(&new->users, 1);
  17.139  	new->pkt_type=old->pkt_type;
  17.140 -	new->stamp=old->stamp;
  17.141  	new->destructor = NULL;
  17.142 -	new->security=old->security;
  17.143  #ifdef CONFIG_NETFILTER
  17.144  	new->nfmark=old->nfmark;
  17.145  	new->nfcache=old->nfcache;
    18.1 --- a/xen-2.4.16/net/utils.c	Fri Feb 21 16:04:44 2003 +0000
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,75 +0,0 @@
    18.4 -/*
    18.5 - *	Generic address resultion entity
    18.6 - *
    18.7 - *	Authors:
    18.8 - *	net_random Alan Cox
    18.9 - *	net_ratelimit Andy Kleen
   18.10 - *
   18.11 - *	Created by Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
   18.12 - *
   18.13 - *	This program is free software; you can redistribute it and/or
   18.14 - *      modify it under the terms of the GNU General Public License
   18.15 - *      as published by the Free Software Foundation; either version
   18.16 - *      2 of the License, or (at your option) any later version.
   18.17 - */
   18.18 -
   18.19 -#include <linux/config.h>
   18.20 -#include <linux/lib.h>
   18.21 -#include <asm/uaccess.h>
   18.22 -#include <asm/system.h>
   18.23 -#include <linux/types.h>
   18.24 -//#include <linux/kernel.h>
   18.25 -#include <linux/sched.h>
   18.26 -//#include <linux/string.h>
   18.27 -#include <linux/mm.h>
   18.28 -
   18.29 -static unsigned long net_rand_seed = 152L;
   18.30 -
   18.31 -unsigned long net_random(void)
   18.32 -{
   18.33 -	net_rand_seed=net_rand_seed*69069L+1;
   18.34 -        return net_rand_seed^jiffies;
   18.35 -}
   18.36 -
   18.37 -void net_srandom(unsigned long entropy)
   18.38 -{
   18.39 -	net_rand_seed ^= entropy;
   18.40 -	net_random();
   18.41 -}
   18.42 -
   18.43 -int net_msg_cost = 5*HZ;
   18.44 -int net_msg_burst = 10*5*HZ;
   18.45 -
   18.46 -/* 
   18.47 - * This enforces a rate limit: not more than one kernel message
   18.48 - * every 5secs to make a denial-of-service attack impossible.
   18.49 - *
   18.50 - * All warning printk()s should be guarded by this function. 
   18.51 - */ 
   18.52 -int net_ratelimit(void)
   18.53 -{
   18.54 -	static spinlock_t ratelimit_lock = SPIN_LOCK_UNLOCKED;
   18.55 -	static unsigned long toks = 10*5*HZ;
   18.56 -	static unsigned long last_msg; 
   18.57 -	static int missed;
   18.58 -	unsigned long flags;
   18.59 -	unsigned long now = jiffies;
   18.60 -
   18.61 -	spin_lock_irqsave(&ratelimit_lock, flags);
   18.62 -	toks += now - last_msg;
   18.63 -	last_msg = now;
   18.64 -	if (toks > net_msg_burst)
   18.65 -		toks = net_msg_burst;
   18.66 -	if (toks >= net_msg_cost) {
   18.67 -		int lost = missed;
   18.68 -		missed = 0;
   18.69 -		toks -= net_msg_cost;
   18.70 -		spin_unlock_irqrestore(&ratelimit_lock, flags);
   18.71 -		if (lost)
   18.72 -			printk(KERN_WARNING "NET: %d messages suppressed.\n", lost);
   18.73 -		return 1;
   18.74 -	}
   18.75 -	missed++;
   18.76 -	spin_unlock_irqrestore(&ratelimit_lock, flags);
   18.77 -	return 0;
   18.78 -}