ia64/xen-unstable

changeset 171:cf76e69e6979

bitkeeper revision 1.37 (3e4d1fcceiOur6tqnIwxDzqL7QTPSg)

removed skbuff inclusion on the guest side sparse tree.
author akw27@boulderdash.cl.cam.ac.uk
date Fri Feb 14 16:56:44 2003 +0000 (2003-02-14)
parents ac77b60e57a6
children 9713c0d10ee7
files .rootkeys xenolinux-2.4.16-sparse/include/linux/skbuff.h xenolinux-2.4.16-sparse/net/core/skbuff.c
line diff
     1.1 --- a/.rootkeys	Fri Feb 14 16:43:07 2003 +0000
     1.2 +++ b/.rootkeys	Fri Feb 14 16:56:44 2003 +0000
     1.3 @@ -441,7 +441,6 @@ 3ddb79bbqhb9X9qWOz5Bv4wOzrkITg xenolinux
     1.4  3ddb79bbA52x94o6uwDYsbzrH2hjzA xenolinux-2.4.16-sparse/include/asm-xeno/xor.h
     1.5  3e4a8cb7ON8EclY3NN3YPXyMT941hA xenolinux-2.4.16-sparse/include/linux/blk.h
     1.6  3e4a8cb7GJrKD0z7EF0VZOhdEa01Mw xenolinux-2.4.16-sparse/include/linux/major.h
     1.7 -3e37c39fVCSGQENtY6g7muaq_THliw xenolinux-2.4.16-sparse/include/linux/skbuff.h
     1.8  3ddb79bb_7YG4U75ZmEic9YXWTW7Vw xenolinux-2.4.16-sparse/include/linux/sunrpc/debug.h
     1.9  3e4a8cb7j05wwb1uPZgY16s68o7qAw xenolinux-2.4.16-sparse/init/main.c
    1.10  3ddb79bcxkVPfWlZ1PQKvDrfArzOVw xenolinux-2.4.16-sparse/kernel/panic.c
    1.11 @@ -449,4 +448,3 @@ 3ddb79bbP31im-mx2NbfthSeqty1Dg xenolinux
    1.12  3e15d52e0_j129JPvo7xfYGndVFpwQ xenolinux-2.4.16-sparse/mm/memory.c
    1.13  3e15d535DLvpzTrLRUIerB69LpJD1g xenolinux-2.4.16-sparse/mm/mremap.c
    1.14  3e15d531m1Y1_W8ki64AFOU_ua4C4w xenolinux-2.4.16-sparse/mm/swapfile.c
    1.15 -3e37c312QFuzIxXsuAgO6IRt3Tp96Q xenolinux-2.4.16-sparse/net/core/skbuff.c
     2.1 --- a/xenolinux-2.4.16-sparse/include/linux/skbuff.h	Fri Feb 14 16:43:07 2003 +0000
     2.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.3 @@ -1,1186 +0,0 @@
     2.4 -/*
     2.5 - *	Definitions for the 'struct sk_buff' memory handlers.
     2.6 - *
     2.7 - *	Authors:
     2.8 - *		Alan Cox, <gw4pts@gw4pts.ampr.org>
     2.9 - *		Florian La Roche, <rzsfl@rz.uni-sb.de>
    2.10 - *
    2.11 - *	This program is free software; you can redistribute it and/or
    2.12 - *	modify it under the terms of the GNU General Public License
    2.13 - *	as published by the Free Software Foundation; either version
    2.14 - *	2 of the License, or (at your option) any later version.
    2.15 - */
    2.16 - 
    2.17 -#ifndef _LINUX_SKBUFF_H
    2.18 -#define _LINUX_SKBUFF_H
    2.19 -
    2.20 -#include <linux/config.h>
    2.21 -#include <linux/kernel.h>
    2.22 -#include <linux/sched.h>
    2.23 -#include <linux/time.h>
    2.24 -#include <linux/cache.h>
    2.25 -
    2.26 -#include <asm/atomic.h>
    2.27 -#include <asm/types.h>
    2.28 -#include <linux/spinlock.h>
    2.29 -#include <linux/mm.h>
    2.30 -#include <linux/highmem.h>
    2.31 -
    2.32 -/* Zero Copy additions:
    2.33 - *
    2.34 - * (1) there are now two types of skb, as indicated by the skb_type field.
    2.35 - *     this is because, at least for the time being, there are two seperate types 
    2.36 - *     of memory that may be allocated to skb->data.
    2.37 - *
    2.38 - * (2) until discontiguous memory is fully supported, there will be a free list of pages
    2.39 - *     to be used by the net RX code.  This list will be allocated in the driver init code
    2.40 - *     but is declared here because the socket free code needs to return pages to it.
    2.41 - */
    2.42 -
    2.43 -// for skb->skb_type:
    2.44 -
    2.45 -#define SKB_NORMAL          0
    2.46 -#define SKB_ZERO_COPY       1
    2.47 -
    2.48 -#define NUM_NET_PAGES       9 // about 1Meg of buffers. (2^9)
    2.49 -
    2.50 -/*struct net_page_info {
    2.51 -        struct list_head list;
    2.52 -        unsigned long   virt_addr;
    2.53 -        unsigned long   ppte;
    2.54 -};
    2.55 -
    2.56 -extern char *net_page_chunk;
    2.57 -extern struct net_page_info *net_page_table;
    2.58 -extern struct list_head net_page_list;
    2.59 -extern spinlock_t net_page_list_lock;
    2.60 -extern unsigned int net_pages;
    2.61 -*/
    2.62 -/* End zero copy additions */
    2.63 -
    2.64 -#define HAVE_ALLOC_SKB		/* For the drivers to know */
    2.65 -#define HAVE_ALIGNABLE_SKB	/* Ditto 8)		   */
    2.66 -#define SLAB_SKB 		/* Slabified skbuffs 	   */
    2.67 -
    2.68 -#define CHECKSUM_NONE 0
    2.69 -#define CHECKSUM_HW 1
    2.70 -#define CHECKSUM_UNNECESSARY 2
    2.71 -
    2.72 -#define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
    2.73 -#define SKB_MAX_ORDER(X,ORDER)	(((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
    2.74 -#define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X),0))
    2.75 -#define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0,2))
    2.76 -
    2.77 -/* A. Checksumming of received packets by device.
    2.78 - *
    2.79 - *	NONE: device failed to checksum this packet.
    2.80 - *		skb->csum is undefined.
    2.81 - *
    2.82 - *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
    2.83 - *		skb->csum is undefined.
    2.84 - *	      It is bad option, but, unfortunately, many of vendors do this.
    2.85 - *	      Apparently with secret goal to sell you new device, when you
    2.86 - *	      will add new protocol to your host. F.e. IPv6. 8)
    2.87 - *
    2.88 - *	HW: the most generic way. Device supplied checksum of _all_
    2.89 - *	    the packet as seen by netif_rx in skb->csum.
    2.90 - *	    NOTE: Even if device supports only some protocols, but
    2.91 - *	    is able to produce some skb->csum, it MUST use HW,
    2.92 - *	    not UNNECESSARY.
    2.93 - *
    2.94 - * B. Checksumming on output.
    2.95 - *
    2.96 - *	NONE: skb is checksummed by protocol or csum is not required.
    2.97 - *
    2.98 - *	HW: device is required to csum packet as seen by hard_start_xmit
    2.99 - *	from skb->h.raw to the end and to record the checksum
   2.100 - *	at skb->h.raw+skb->csum.
   2.101 - *
   2.102 - *	Device must show its capabilities in dev->features, set
   2.103 - *	at device setup time.
   2.104 - *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
   2.105 - *			  everything.
   2.106 - *	NETIF_F_NO_CSUM - loopback or reliable single hop media.
   2.107 - *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
   2.108 - *			  TCP/UDP over IPv4. Sigh. Vendors like this
   2.109 - *			  way by an unknown reason. Though, see comment above
   2.110 - *			  about CHECKSUM_UNNECESSARY. 8)
   2.111 - *
   2.112 - *	Any questions? No questions, good. 		--ANK
   2.113 - */
   2.114 -
   2.115 -#ifdef __i386__
   2.116 -#define NET_CALLER(arg) (*(((void**)&arg)-1))
   2.117 -#else
   2.118 -#define NET_CALLER(arg) __builtin_return_address(0)
   2.119 -#endif
   2.120 -
   2.121 -#ifdef CONFIG_NETFILTER
   2.122 -struct nf_conntrack {
   2.123 -	atomic_t use;
   2.124 -	void (*destroy)(struct nf_conntrack *);
   2.125 -};
   2.126 -
   2.127 -struct nf_ct_info {
   2.128 -	struct nf_conntrack *master;
   2.129 -};
   2.130 -#endif
   2.131 -
   2.132 -struct sk_buff_head {
   2.133 -	/* These two members must be first. */
   2.134 -	struct sk_buff	* next;
   2.135 -	struct sk_buff	* prev;
   2.136 -
   2.137 -	__u32		qlen;
   2.138 -	spinlock_t	lock;
   2.139 -};
   2.140 -
   2.141 -struct sk_buff;
   2.142 -
   2.143 -#define MAX_SKB_FRAGS 6
   2.144 -
   2.145 -typedef struct skb_frag_struct skb_frag_t;
   2.146 -
   2.147 -struct skb_frag_struct
   2.148 -{
   2.149 -	struct page *page;
   2.150 -	__u16 page_offset;
   2.151 -	__u16 size;
   2.152 -};
   2.153 -
   2.154 -/* This data is invariant across clones and lives at
   2.155 - * the end of the header data, ie. at skb->end.
   2.156 - */
   2.157 -struct skb_shared_info {
   2.158 -	atomic_t	dataref;
   2.159 -	unsigned int	nr_frags;
   2.160 -	struct sk_buff	*frag_list;
   2.161 -	skb_frag_t	frags[MAX_SKB_FRAGS];
   2.162 -};
   2.163 -
   2.164 -struct sk_buff {
   2.165 -	/* These two members must be first. */
   2.166 -	struct sk_buff	* next;			/* Next buffer in list 				*/
   2.167 -	struct sk_buff	* prev;			/* Previous buffer in list 			*/
   2.168 -
   2.169 -	struct sk_buff_head * list;		/* List we are on				*/
   2.170 -	struct sock	*sk;			/* Socket we are owned by 			*/
   2.171 -	struct timeval	stamp;			/* Time we arrived				*/
   2.172 -	struct net_device	*dev;		/* Device we arrived on/are leaving by		*/
   2.173 -
   2.174 -	/* Transport layer header */
   2.175 -	union
   2.176 -	{
   2.177 -		struct tcphdr	*th;
   2.178 -		struct udphdr	*uh;
   2.179 -		struct icmphdr	*icmph;
   2.180 -		struct igmphdr	*igmph;
   2.181 -		struct iphdr	*ipiph;
   2.182 -		struct spxhdr	*spxh;
   2.183 -		unsigned char	*raw;
   2.184 -	} h;
   2.185 -
   2.186 -	/* Network layer header */
   2.187 -	union
   2.188 -	{
   2.189 -		struct iphdr	*iph;
   2.190 -		struct ipv6hdr	*ipv6h;
   2.191 -		struct arphdr	*arph;
   2.192 -		struct ipxhdr	*ipxh;
   2.193 -		unsigned char	*raw;
   2.194 -	} nh;
   2.195 -  
   2.196 -	/* Link layer header */
   2.197 -	union 
   2.198 -	{	
   2.199 -	  	struct ethhdr	*ethernet;
   2.200 -	  	unsigned char 	*raw;
   2.201 -	} mac;
   2.202 -
   2.203 -	struct  dst_entry *dst;
   2.204 -
   2.205 -	/* 
   2.206 -	 * This is the control buffer. It is free to use for every
   2.207 -	 * layer. Please put your private variables there. If you
   2.208 -	 * want to keep them across layers you have to do a skb_clone()
   2.209 -	 * first. This is owned by whoever has the skb queued ATM.
   2.210 -	 */ 
   2.211 -	char		cb[48];	 
   2.212 -
   2.213 -	unsigned int 	len;			/* Length of actual data			*/
   2.214 - 	unsigned int 	data_len;
   2.215 -	unsigned int	csum;			/* Checksum 					*/
   2.216 -	unsigned char 	__unused,		/* Dead field, may be reused			*/
   2.217 -			cloned, 		/* head may be cloned (check refcnt to be sure). */
   2.218 -  			pkt_type,		/* Packet class					*/
   2.219 -  			ip_summed;		/* Driver fed us an IP checksum			*/
   2.220 -	__u32		priority;		/* Packet queueing priority			*/
   2.221 -	atomic_t	users;			/* User count - see datagram.c,tcp.c 		*/
   2.222 -	unsigned short	protocol;		/* Packet protocol from driver. 		*/
   2.223 -	unsigned short	security;		/* Security level of packet			*/
   2.224 -	unsigned int	truesize;		/* Buffer size 					*/
   2.225 -
   2.226 -	unsigned char	*head;			/* Head of buffer 				*/
   2.227 -	unsigned char	*data;			/* Data head pointer				*/
   2.228 -	unsigned char	*tail;			/* Tail pointer					*/
   2.229 -	unsigned char 	*end;			/* End pointer					*/
   2.230 -
   2.231 -	void 		(*destructor)(struct sk_buff *);	/* Destruct function		*/
   2.232 -#ifdef CONFIG_NETFILTER
   2.233 -	/* Can be used for communication between hooks. */
   2.234 -        unsigned long	nfmark;
   2.235 -	/* Cache info */
   2.236 -	__u32		nfcache;
   2.237 -	/* Associated connection, if any */
   2.238 -	struct nf_ct_info *nfct;
   2.239 -#ifdef CONFIG_NETFILTER_DEBUG
   2.240 -        unsigned int nf_debug;
   2.241 -#endif
   2.242 -#endif /*CONFIG_NETFILTER*/
   2.243 -
   2.244 -#if defined(CONFIG_HIPPI)
   2.245 -	union{
   2.246 -		__u32	ifield;
   2.247 -	} private;
   2.248 -#endif
   2.249 -
   2.250 -#ifdef CONFIG_NET_SCHED
   2.251 -       __u32           tc_index;                /* traffic control index */
   2.252 -#endif
   2.253 -       unsigned int     skb_type;                /* for zero copy handling.                      */
   2.254 -       struct net_page_info *net_page;
   2.255 -};
   2.256 -
   2.257 -#define SK_WMEM_MAX	65535
   2.258 -#define SK_RMEM_MAX	65535
   2.259 -
   2.260 -#ifdef __KERNEL__
   2.261 -/*
   2.262 - *	Handling routines are only of interest to the kernel
   2.263 - */
   2.264 -#include <linux/slab.h>
   2.265 -
   2.266 -#include <asm/system.h>
   2.267 -
   2.268 -extern void			__kfree_skb(struct sk_buff *skb);
   2.269 -extern struct sk_buff *		alloc_skb(unsigned int size, int priority);
   2.270 -extern struct sk_buff *         alloc_zc_skb(unsigned int size, int priority);
   2.271 -extern void			kfree_skbmem(struct sk_buff *skb);
   2.272 -extern struct sk_buff *		skb_clone(struct sk_buff *skb, int priority);
   2.273 -extern struct sk_buff *		skb_copy(const struct sk_buff *skb, int priority);
   2.274 -extern struct sk_buff *		pskb_copy(struct sk_buff *skb, int gfp_mask);
   2.275 -extern int			pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
   2.276 -extern struct sk_buff *		skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
   2.277 -extern struct sk_buff *		skb_copy_expand(const struct sk_buff *skb, 
   2.278 -						int newheadroom,
   2.279 -						int newtailroom,
   2.280 -						int priority);
   2.281 -#define dev_kfree_skb(a)	kfree_skb(a)
   2.282 -extern void	skb_over_panic(struct sk_buff *skb, int len, void *here);
   2.283 -extern void	skb_under_panic(struct sk_buff *skb, int len, void *here);
   2.284 -
   2.285 -/* Internal */
   2.286 -#define skb_shinfo(SKB)		((struct skb_shared_info *)((SKB)->end))
   2.287 -
   2.288 -/**
   2.289 - *	skb_queue_empty - check if a queue is empty
   2.290 - *	@list: queue head
   2.291 - *
   2.292 - *	Returns true if the queue is empty, false otherwise.
   2.293 - */
   2.294 - 
   2.295 -static inline int skb_queue_empty(struct sk_buff_head *list)
   2.296 -{
   2.297 -	return (list->next == (struct sk_buff *) list);
   2.298 -}
   2.299 -
   2.300 -/**
   2.301 - *	skb_get - reference buffer
   2.302 - *	@skb: buffer to reference
   2.303 - *
   2.304 - *	Makes another reference to a socket buffer and returns a pointer
   2.305 - *	to the buffer.
   2.306 - */
   2.307 - 
   2.308 -static inline struct sk_buff *skb_get(struct sk_buff *skb)
   2.309 -{
   2.310 -	atomic_inc(&skb->users);
   2.311 -	return skb;
   2.312 -}
   2.313 -
   2.314 -/*
   2.315 - * If users==1, we are the only owner and are can avoid redundant
   2.316 - * atomic change.
   2.317 - */
   2.318 - 
   2.319 -/**
   2.320 - *	kfree_skb - free an sk_buff
   2.321 - *	@skb: buffer to free
   2.322 - *
   2.323 - *	Drop a reference to the buffer and free it if the usage count has
   2.324 - *	hit zero.
   2.325 - */
   2.326 - 
   2.327 -static inline void kfree_skb(struct sk_buff *skb)
   2.328 -{
   2.329 -	if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
   2.330 -		__kfree_skb(skb);
   2.331 -}
   2.332 -
   2.333 -/* Use this if you didn't touch the skb state [for fast switching] */
   2.334 -static inline void kfree_skb_fast(struct sk_buff *skb)
   2.335 -{
   2.336 -	if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
   2.337 -		kfree_skbmem(skb);	
   2.338 -}
   2.339 -
   2.340 -/**
   2.341 - *	skb_cloned - is the buffer a clone
   2.342 - *	@skb: buffer to check
   2.343 - *
   2.344 - *	Returns true if the buffer was generated with skb_clone() and is
   2.345 - *	one of multiple shared copies of the buffer. Cloned buffers are
   2.346 - *	shared data so must not be written to under normal circumstances.
   2.347 - */
   2.348 -
   2.349 -static inline int skb_cloned(struct sk_buff *skb)
   2.350 -{
   2.351 -	return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
   2.352 -}
   2.353 -
   2.354 -/**
   2.355 - *	skb_shared - is the buffer shared
   2.356 - *	@skb: buffer to check
   2.357 - *
   2.358 - *	Returns true if more than one person has a reference to this
   2.359 - *	buffer.
   2.360 - */
   2.361 - 
   2.362 -static inline int skb_shared(struct sk_buff *skb)
   2.363 -{
   2.364 -	return (atomic_read(&skb->users) != 1);
   2.365 -}
   2.366 -
   2.367 -/** 
   2.368 - *	skb_share_check - check if buffer is shared and if so clone it
   2.369 - *	@skb: buffer to check
   2.370 - *	@pri: priority for memory allocation
   2.371 - *	
   2.372 - *	If the buffer is shared the buffer is cloned and the old copy
   2.373 - *	drops a reference. A new clone with a single reference is returned.
   2.374 - *	If the buffer is not shared the original buffer is returned. When
   2.375 - *	being called from interrupt status or with spinlocks held pri must
   2.376 - *	be GFP_ATOMIC.
   2.377 - *
   2.378 - *	NULL is returned on a memory allocation failure.
   2.379 - */
   2.380 - 
   2.381 -static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
   2.382 -{
   2.383 -	if (skb_shared(skb)) {
   2.384 -		struct sk_buff *nskb;
   2.385 -		nskb = skb_clone(skb, pri);
   2.386 -		kfree_skb(skb);
   2.387 -		return nskb;
   2.388 -	}
   2.389 -	return skb;
   2.390 -}
   2.391 -
   2.392 -
   2.393 -/*
   2.394 - *	Copy shared buffers into a new sk_buff. We effectively do COW on
   2.395 - *	packets to handle cases where we have a local reader and forward
   2.396 - *	and a couple of other messy ones. The normal one is tcpdumping
   2.397 - *	a packet thats being forwarded.
   2.398 - */
   2.399 - 
   2.400 -/**
   2.401 - *	skb_unshare - make a copy of a shared buffer
   2.402 - *	@skb: buffer to check
   2.403 - *	@pri: priority for memory allocation
   2.404 - *
   2.405 - *	If the socket buffer is a clone then this function creates a new
   2.406 - *	copy of the data, drops a reference count on the old copy and returns
   2.407 - *	the new copy with the reference count at 1. If the buffer is not a clone
   2.408 - *	the original buffer is returned. When called with a spinlock held or
   2.409 - *	from interrupt state @pri must be %GFP_ATOMIC
   2.410 - *
   2.411 - *	%NULL is returned on a memory allocation failure.
   2.412 - */
   2.413 - 
   2.414 -static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
   2.415 -{
   2.416 -	struct sk_buff *nskb;
   2.417 -	if(!skb_cloned(skb))
   2.418 -		return skb;
   2.419 -	nskb=skb_copy(skb, pri);
   2.420 -	kfree_skb(skb);		/* Free our shared copy */
   2.421 -	return nskb;
   2.422 -}
   2.423 -
   2.424 -/**
   2.425 - *	skb_peek
   2.426 - *	@list_: list to peek at
   2.427 - *
   2.428 - *	Peek an &sk_buff. Unlike most other operations you _MUST_
   2.429 - *	be careful with this one. A peek leaves the buffer on the
   2.430 - *	list and someone else may run off with it. You must hold
   2.431 - *	the appropriate locks or have a private queue to do this.
   2.432 - *
   2.433 - *	Returns %NULL for an empty list or a pointer to the head element.
   2.434 - *	The reference count is not incremented and the reference is therefore
   2.435 - *	volatile. Use with caution.
   2.436 - */
   2.437 - 
   2.438 -static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
   2.439 -{
   2.440 -	struct sk_buff *list = ((struct sk_buff *)list_)->next;
   2.441 -	if (list == (struct sk_buff *)list_)
   2.442 -		list = NULL;
   2.443 -	return list;
   2.444 -}
   2.445 -
   2.446 -/**
   2.447 - *	skb_peek_tail
   2.448 - *	@list_: list to peek at
   2.449 - *
   2.450 - *	Peek an &sk_buff. Unlike most other operations you _MUST_
   2.451 - *	be careful with this one. A peek leaves the buffer on the
   2.452 - *	list and someone else may run off with it. You must hold
   2.453 - *	the appropriate locks or have a private queue to do this.
   2.454 - *
   2.455 - *	Returns %NULL for an empty list or a pointer to the tail element.
   2.456 - *	The reference count is not incremented and the reference is therefore
   2.457 - *	volatile. Use with caution.
   2.458 - */
   2.459 -
   2.460 -static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
   2.461 -{
   2.462 -	struct sk_buff *list = ((struct sk_buff *)list_)->prev;
   2.463 -	if (list == (struct sk_buff *)list_)
   2.464 -		list = NULL;
   2.465 -	return list;
   2.466 -}
   2.467 -
   2.468 -/**
   2.469 - *	skb_queue_len	- get queue length
   2.470 - *	@list_: list to measure
   2.471 - *
   2.472 - *	Return the length of an &sk_buff queue. 
   2.473 - */
   2.474 - 
   2.475 -static inline __u32 skb_queue_len(struct sk_buff_head *list_)
   2.476 -{
   2.477 -	return(list_->qlen);
   2.478 -}
   2.479 -
   2.480 -static inline void skb_queue_head_init(struct sk_buff_head *list)
   2.481 -{
   2.482 -	spin_lock_init(&list->lock);
   2.483 -	list->prev = (struct sk_buff *)list;
   2.484 -	list->next = (struct sk_buff *)list;
   2.485 -	list->qlen = 0;
   2.486 -}
   2.487 -
   2.488 -/*
   2.489 - *	Insert an sk_buff at the start of a list.
   2.490 - *
   2.491 - *	The "__skb_xxxx()" functions are the non-atomic ones that
   2.492 - *	can only be called with interrupts disabled.
   2.493 - */
   2.494 -
   2.495 -/**
   2.496 - *	__skb_queue_head - queue a buffer at the list head
   2.497 - *	@list: list to use
   2.498 - *	@newsk: buffer to queue
   2.499 - *
   2.500 - *	Queue a buffer at the start of a list. This function takes no locks
   2.501 - *	and you must therefore hold required locks before calling it.
   2.502 - *
   2.503 - *	A buffer cannot be placed on two lists at the same time.
   2.504 - */	
   2.505 - 
   2.506 -static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
   2.507 -{
   2.508 -	struct sk_buff *prev, *next;
   2.509 -
   2.510 -	newsk->list = list;
   2.511 -	list->qlen++;
   2.512 -	prev = (struct sk_buff *)list;
   2.513 -	next = prev->next;
   2.514 -	newsk->next = next;
   2.515 -	newsk->prev = prev;
   2.516 -	next->prev = newsk;
   2.517 -	prev->next = newsk;
   2.518 -}
   2.519 -
   2.520 -
   2.521 -/**
   2.522 - *	skb_queue_head - queue a buffer at the list head
   2.523 - *	@list: list to use
   2.524 - *	@newsk: buffer to queue
   2.525 - *
   2.526 - *	Queue a buffer at the start of the list. This function takes the
   2.527 - *	list lock and can be used safely with other locking &sk_buff functions
   2.528 - *	safely.
   2.529 - *
   2.530 - *	A buffer cannot be placed on two lists at the same time.
   2.531 - */	
   2.532 -
   2.533 -static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
   2.534 -{
   2.535 -	unsigned long flags;
   2.536 -
   2.537 -	spin_lock_irqsave(&list->lock, flags);
   2.538 -	__skb_queue_head(list, newsk);
   2.539 -	spin_unlock_irqrestore(&list->lock, flags);
   2.540 -}
   2.541 -
   2.542 -/**
   2.543 - *	__skb_queue_tail - queue a buffer at the list tail
   2.544 - *	@list: list to use
   2.545 - *	@newsk: buffer to queue
   2.546 - *
   2.547 - *	Queue a buffer at the end of a list. This function takes no locks
   2.548 - *	and you must therefore hold required locks before calling it.
   2.549 - *
   2.550 - *	A buffer cannot be placed on two lists at the same time.
   2.551 - */	
   2.552 - 
   2.553 -
   2.554 -static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
   2.555 -{
   2.556 -	struct sk_buff *prev, *next;
   2.557 -
   2.558 -	newsk->list = list;
   2.559 -	list->qlen++;
   2.560 -	next = (struct sk_buff *)list;
   2.561 -	prev = next->prev;
   2.562 -	newsk->next = next;
   2.563 -	newsk->prev = prev;
   2.564 -	next->prev = newsk;
   2.565 -	prev->next = newsk;
   2.566 -}
   2.567 -
   2.568 -/**
   2.569 - *	skb_queue_tail - queue a buffer at the list tail
   2.570 - *	@list: list to use
   2.571 - *	@newsk: buffer to queue
   2.572 - *
   2.573 - *	Queue a buffer at the tail of the list. This function takes the
   2.574 - *	list lock and can be used safely with other locking &sk_buff functions
   2.575 - *	safely.
   2.576 - *
   2.577 - *	A buffer cannot be placed on two lists at the same time.
   2.578 - */	
   2.579 -
   2.580 -static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
   2.581 -{
   2.582 -	unsigned long flags;
   2.583 -
   2.584 -	spin_lock_irqsave(&list->lock, flags);
   2.585 -	__skb_queue_tail(list, newsk);
   2.586 -	spin_unlock_irqrestore(&list->lock, flags);
   2.587 -}
   2.588 -
   2.589 -/**
   2.590 - *	__skb_dequeue - remove from the head of the queue
   2.591 - *	@list: list to dequeue from
   2.592 - *
   2.593 - *	Remove the head of the list. This function does not take any locks
   2.594 - *	so must be used with appropriate locks held only. The head item is
   2.595 - *	returned or %NULL if the list is empty.
   2.596 - */
   2.597 -
   2.598 -static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
   2.599 -{
   2.600 -	struct sk_buff *next, *prev, *result;
   2.601 -
   2.602 -	prev = (struct sk_buff *) list;
   2.603 -	next = prev->next;
   2.604 -	result = NULL;
   2.605 -	if (next != prev) {
   2.606 -		result = next;
   2.607 -		next = next->next;
   2.608 -		list->qlen--;
   2.609 -		next->prev = prev;
   2.610 -		prev->next = next;
   2.611 -		result->next = NULL;
   2.612 -		result->prev = NULL;
   2.613 -		result->list = NULL;
   2.614 -	}
   2.615 -	return result;
   2.616 -}
   2.617 -
   2.618 -/**
   2.619 - *	skb_dequeue - remove from the head of the queue
   2.620 - *	@list: list to dequeue from
   2.621 - *
   2.622 - *	Remove the head of the list. The list lock is taken so the function
   2.623 - *	may be used safely with other locking list functions. The head item is
   2.624 - *	returned or %NULL if the list is empty.
   2.625 - */
   2.626 -
   2.627 -static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
   2.628 -{
   2.629 -	long flags;
   2.630 -	struct sk_buff *result;
   2.631 -
   2.632 -	spin_lock_irqsave(&list->lock, flags);
   2.633 -	result = __skb_dequeue(list);
   2.634 -	spin_unlock_irqrestore(&list->lock, flags);
   2.635 -	return result;
   2.636 -}
   2.637 -
   2.638 -/*
   2.639 - *	Insert a packet on a list.
   2.640 - */
   2.641 -
   2.642 -static inline void __skb_insert(struct sk_buff *newsk,
   2.643 -	struct sk_buff * prev, struct sk_buff *next,
   2.644 -	struct sk_buff_head * list)
   2.645 -{
   2.646 -	newsk->next = next;
   2.647 -	newsk->prev = prev;
   2.648 -	next->prev = newsk;
   2.649 -	prev->next = newsk;
   2.650 -	newsk->list = list;
   2.651 -	list->qlen++;
   2.652 -}
   2.653 -
   2.654 -/**
   2.655 - *	skb_insert	-	insert a buffer
   2.656 - *	@old: buffer to insert before
   2.657 - *	@newsk: buffer to insert
   2.658 - *
   2.659 - *	Place a packet before a given packet in a list. The list locks are taken
   2.660 - *	and this function is atomic with respect to other list locked calls
   2.661 - *	A buffer cannot be placed on two lists at the same time.
   2.662 - */
   2.663 -
   2.664 -static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
   2.665 -{
   2.666 -	unsigned long flags;
   2.667 -
   2.668 -	spin_lock_irqsave(&old->list->lock, flags);
   2.669 -	__skb_insert(newsk, old->prev, old, old->list);
   2.670 -	spin_unlock_irqrestore(&old->list->lock, flags);
   2.671 -}
   2.672 -
   2.673 -/*
   2.674 - *	Place a packet after a given packet in a list.
   2.675 - */
   2.676 -
   2.677 -static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
   2.678 -{
   2.679 -	__skb_insert(newsk, old, old->next, old->list);
   2.680 -}
   2.681 -
   2.682 -/**
   2.683 - *	skb_append	-	append a buffer
   2.684 - *	@old: buffer to insert after
   2.685 - *	@newsk: buffer to insert
   2.686 - *
   2.687 - *	Place a packet after a given packet in a list. The list locks are taken
   2.688 - *	and this function is atomic with respect to other list locked calls.
   2.689 - *	A buffer cannot be placed on two lists at the same time.
   2.690 - */
   2.691 -
   2.692 -
   2.693 -static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
   2.694 -{
   2.695 -	unsigned long flags;
   2.696 -
   2.697 -	spin_lock_irqsave(&old->list->lock, flags);
   2.698 -	__skb_append(old, newsk);
   2.699 -	spin_unlock_irqrestore(&old->list->lock, flags);
   2.700 -}
   2.701 -
   2.702 -/*
   2.703 - * remove sk_buff from list. _Must_ be called atomically, and with
   2.704 - * the list known..
   2.705 - */
   2.706 - 
   2.707 -static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
   2.708 -{
   2.709 -	struct sk_buff * next, * prev;
   2.710 -
   2.711 -	list->qlen--;
   2.712 -	next = skb->next;
   2.713 -	prev = skb->prev;
   2.714 -	skb->next = NULL;
   2.715 -	skb->prev = NULL;
   2.716 -	skb->list = NULL;
   2.717 -	next->prev = prev;
   2.718 -	prev->next = next;
   2.719 -}
   2.720 -
   2.721 -/**
   2.722 - *	skb_unlink	-	remove a buffer from a list
   2.723 - *	@skb: buffer to remove
   2.724 - *
   2.725 - *	Place a packet after a given packet in a list. The list locks are taken
   2.726 - *	and this function is atomic with respect to other list locked calls
   2.727 - *	
   2.728 - *	Works even without knowing the list it is sitting on, which can be 
   2.729 - *	handy at times. It also means that THE LIST MUST EXIST when you 
   2.730 - *	unlink. Thus a list must have its contents unlinked before it is
   2.731 - *	destroyed.
   2.732 - */
   2.733 -
   2.734 -static inline void skb_unlink(struct sk_buff *skb)
   2.735 -{
   2.736 -	struct sk_buff_head *list = skb->list;
   2.737 -
   2.738 -	if(list) {
   2.739 -		unsigned long flags;
   2.740 -
   2.741 -		spin_lock_irqsave(&list->lock, flags);
   2.742 -		if(skb->list == list)
   2.743 -			__skb_unlink(skb, skb->list);
   2.744 -		spin_unlock_irqrestore(&list->lock, flags);
   2.745 -	}
   2.746 -}
   2.747 -
   2.748 -/* XXX: more streamlined implementation */
   2.749 -
   2.750 -/**
   2.751 - *	__skb_dequeue_tail - remove from the tail of the queue
   2.752 - *	@list: list to dequeue from
   2.753 - *
   2.754 - *	Remove the tail of the list. This function does not take any locks
   2.755 - *	so must be used with appropriate locks held only. The tail item is
   2.756 - *	returned or %NULL if the list is empty.
   2.757 - */
   2.758 -
   2.759 -static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
   2.760 -{
   2.761 -	struct sk_buff *skb = skb_peek_tail(list); 
   2.762 -	if (skb)
   2.763 -		__skb_unlink(skb, list);
   2.764 -	return skb;
   2.765 -}
   2.766 -
   2.767 -/**
   2.768 - *	skb_dequeue - remove from the head of the queue
   2.769 - *	@list: list to dequeue from
   2.770 - *
   2.771 - *	Remove the head of the list. The list lock is taken so the function
   2.772 - *	may be used safely with other locking list functions. The tail item is
   2.773 - *	returned or %NULL if the list is empty.
   2.774 - */
   2.775 -
   2.776 -static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
   2.777 -{
   2.778 -	long flags;
   2.779 -	struct sk_buff *result;
   2.780 -
   2.781 -	spin_lock_irqsave(&list->lock, flags);
   2.782 -	result = __skb_dequeue_tail(list);
   2.783 -	spin_unlock_irqrestore(&list->lock, flags);
   2.784 -	return result;
   2.785 -}
   2.786 -
   2.787 -static inline int skb_is_nonlinear(const struct sk_buff *skb)
   2.788 -{
   2.789 -	return skb->data_len;
   2.790 -}
   2.791 -
   2.792 -static inline int skb_headlen(const struct sk_buff *skb)
   2.793 -{
   2.794 -	return skb->len - skb->data_len;
   2.795 -}
   2.796 -
   2.797 -#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) BUG(); } while (0)
   2.798 -#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) BUG(); } while (0)
   2.799 -#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) BUG(); } while (0)
   2.800 -
   2.801 -/*
   2.802 - *	Add data to an sk_buff
   2.803 - */
   2.804 - 
   2.805 -static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
   2.806 -{
   2.807 -	unsigned char *tmp=skb->tail;
   2.808 -	SKB_LINEAR_ASSERT(skb);
   2.809 -	skb->tail+=len;
   2.810 -	skb->len+=len;
   2.811 -	return tmp;
   2.812 -}
   2.813 -
   2.814 -/**
   2.815 - *	skb_put - add data to a buffer
   2.816 - *	@skb: buffer to use 
   2.817 - *	@len: amount of data to add
   2.818 - *
   2.819 - *	This function extends the used data area of the buffer. If this would
   2.820 - *	exceed the total buffer size the kernel will panic. A pointer to the
   2.821 - *	first byte of the extra data is returned.
   2.822 - */
   2.823 - 
   2.824 -static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
   2.825 -{
   2.826 -	unsigned char *tmp=skb->tail;
   2.827 -	SKB_LINEAR_ASSERT(skb);
   2.828 -	skb->tail+=len;
   2.829 -	skb->len+=len;
   2.830 -	if(skb->tail>skb->end) {
   2.831 -		skb_over_panic(skb, len, current_text_addr());
   2.832 -	}
   2.833 -	return tmp;
   2.834 -}
   2.835 -
   2.836 -static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
   2.837 -{
   2.838 -	skb->data-=len;
   2.839 -	skb->len+=len;
   2.840 -	return skb->data;
   2.841 -}
   2.842 -
   2.843 -/**
   2.844 - *	skb_push - add data to the start of a buffer
   2.845 - *	@skb: buffer to use 
   2.846 - *	@len: amount of data to add
   2.847 - *
   2.848 - *	This function extends the used data area of the buffer at the buffer
   2.849 - *	start. If this would exceed the total buffer headroom the kernel will
   2.850 - *	panic. A pointer to the first byte of the extra data is returned.
   2.851 - */
   2.852 -
   2.853 -static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
   2.854 -{
   2.855 -	skb->data-=len;
   2.856 -	skb->len+=len;
   2.857 -	if(skb->data<skb->head) {
   2.858 -		skb_under_panic(skb, len, current_text_addr());
   2.859 -	}
   2.860 -	return skb->data;
   2.861 -}
   2.862 -
   2.863 -static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
   2.864 -{
   2.865 -	skb->len-=len;
   2.866 -	if (skb->len < skb->data_len)
   2.867 -		BUG();
   2.868 -	return 	skb->data+=len;
   2.869 -}
   2.870 -
   2.871 -/**
   2.872 - *	skb_pull - remove data from the start of a buffer
   2.873 - *	@skb: buffer to use 
   2.874 - *	@len: amount of data to remove
   2.875 - *
   2.876 - *	This function removes data from the start of a buffer, returning
   2.877 - *	the memory to the headroom. A pointer to the next data in the buffer
   2.878 - *	is returned. Once the data has been pulled future pushes will overwrite
   2.879 - *	the old data.
   2.880 - */
   2.881 -
   2.882 -static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
   2.883 -{	
   2.884 -	if (len > skb->len)
   2.885 -		return NULL;
   2.886 -	return __skb_pull(skb,len);
   2.887 -}
   2.888 -
   2.889 -extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
   2.890 -
   2.891 -static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
   2.892 -{
   2.893 -	if (len > skb_headlen(skb) &&
   2.894 -	    __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
   2.895 -		return NULL;
   2.896 -	skb->len -= len;
   2.897 -	return 	skb->data += len;
   2.898 -}
   2.899 -
   2.900 -static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
   2.901 -{	
   2.902 -	if (len > skb->len)
   2.903 -		return NULL;
   2.904 -	return __pskb_pull(skb,len);
   2.905 -}
   2.906 -
   2.907 -static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
   2.908 -{
   2.909 -	if (len <= skb_headlen(skb))
   2.910 -		return 1;
   2.911 -	if (len > skb->len)
   2.912 -		return 0;
   2.913 -	return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
   2.914 -}
   2.915 -
   2.916 -/**
   2.917 - *	skb_headroom - bytes at buffer head
   2.918 - *	@skb: buffer to check
   2.919 - *
   2.920 - *	Return the number of bytes of free space at the head of an &sk_buff.
   2.921 - */
   2.922 - 
   2.923 -static inline int skb_headroom(const struct sk_buff *skb)
   2.924 -{
   2.925 -	return skb->data-skb->head;
   2.926 -}
   2.927 -
   2.928 -/**
   2.929 - *	skb_tailroom - bytes at buffer end
   2.930 - *	@skb: buffer to check
   2.931 - *
   2.932 - *	Return the number of bytes of free space at the tail of an sk_buff
   2.933 - */
   2.934 -
   2.935 -static inline int skb_tailroom(const struct sk_buff *skb)
   2.936 -{
   2.937 -	return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
   2.938 -}
   2.939 -
   2.940 -/**
   2.941 - *	skb_reserve - adjust headroom
   2.942 - *	@skb: buffer to alter
   2.943 - *	@len: bytes to move
   2.944 - *
   2.945 - *	Increase the headroom of an empty &sk_buff by reducing the tail
   2.946 - *	room. This is only allowed for an empty buffer.
   2.947 - */
   2.948 -
   2.949 -static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
   2.950 -{
   2.951 -	skb->data+=len;
   2.952 -	skb->tail+=len;
   2.953 -}
   2.954 -
   2.955 -extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
   2.956 -
   2.957 -static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
   2.958 -{
   2.959 -	if (!skb->data_len) {
   2.960 -		skb->len = len;
   2.961 -		skb->tail = skb->data+len;
   2.962 -	} else {
   2.963 -		___pskb_trim(skb, len, 0);
   2.964 -	}
   2.965 -}
   2.966 -
   2.967 -/**
   2.968 - *	skb_trim - remove end from a buffer
   2.969 - *	@skb: buffer to alter
   2.970 - *	@len: new length
   2.971 - *
   2.972 - *	Cut the length of a buffer down by removing data from the tail. If
   2.973 - *	the buffer is already under the length specified it is not modified.
   2.974 - */
   2.975 -
   2.976 -static inline void skb_trim(struct sk_buff *skb, unsigned int len)
   2.977 -{
   2.978 -	if (skb->len > len) {
   2.979 -		__skb_trim(skb, len);
   2.980 -	}
   2.981 -}
   2.982 -
   2.983 -
   2.984 -static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
   2.985 -{
   2.986 -	if (!skb->data_len) {
   2.987 -		skb->len = len;
   2.988 -		skb->tail = skb->data+len;
   2.989 -		return 0;
   2.990 -	} else {
   2.991 -		return ___pskb_trim(skb, len, 1);
   2.992 -	}
   2.993 -}
   2.994 -
   2.995 -static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
   2.996 -{
   2.997 -	if (len < skb->len)
   2.998 -		return __pskb_trim(skb, len);
   2.999 -	return 0;
  2.1000 -}
  2.1001 -
  2.1002 -/**
  2.1003 - *	skb_orphan - orphan a buffer
  2.1004 - *	@skb: buffer to orphan
  2.1005 - *
  2.1006 - *	If a buffer currently has an owner then we call the owner's
  2.1007 - *	destructor function and make the @skb unowned. The buffer continues
  2.1008 - *	to exist but is no longer charged to its former owner.
  2.1009 - */
  2.1010 -
  2.1011 -
  2.1012 -static inline void skb_orphan(struct sk_buff *skb)
  2.1013 -{
  2.1014 -	if (skb->destructor)
  2.1015 -		skb->destructor(skb);
  2.1016 -	skb->destructor = NULL;
  2.1017 -	skb->sk = NULL;
  2.1018 -}
  2.1019 -
  2.1020 -/**
  2.1021 - *	skb_purge - empty a list
  2.1022 - *	@list: list to empty
  2.1023 - *
  2.1024 - *	Delete all buffers on an &sk_buff list. Each buffer is removed from
  2.1025 - *	the list and one reference dropped. This function takes the list
  2.1026 - *	lock and is atomic with respect to other list locking functions.
  2.1027 - */
  2.1028 -
  2.1029 -
  2.1030 -static inline void skb_queue_purge(struct sk_buff_head *list)
  2.1031 -{
  2.1032 -	struct sk_buff *skb;
  2.1033 -	while ((skb=skb_dequeue(list))!=NULL)
  2.1034 -		kfree_skb(skb);
  2.1035 -}
  2.1036 -
  2.1037 -/**
  2.1038 - *	__skb_purge - empty a list
  2.1039 - *	@list: list to empty
  2.1040 - *
  2.1041 - *	Delete all buffers on an &sk_buff list. Each buffer is removed from
  2.1042 - *	the list and one reference dropped. This function does not take the
  2.1043 - *	list lock and the caller must hold the relevant locks to use it.
  2.1044 - */
  2.1045 -
  2.1046 -
  2.1047 -static inline void __skb_queue_purge(struct sk_buff_head *list)
  2.1048 -{
  2.1049 -	struct sk_buff *skb;
  2.1050 -	while ((skb=__skb_dequeue(list))!=NULL)
  2.1051 -		kfree_skb(skb);
  2.1052 -}
  2.1053 -
  2.1054 -/**
  2.1055 - *	__dev_alloc_skb - allocate an skbuff for sending
  2.1056 - *	@length: length to allocate
  2.1057 - *	@gfp_mask: get_free_pages mask, passed to alloc_skb
  2.1058 - *
  2.1059 - *	Allocate a new &sk_buff and assign it a usage count of one. The
  2.1060 - *	buffer has unspecified headroom built in. Users should allocate
  2.1061 - *	the headroom they think they need without accounting for the
  2.1062 - *	built in space. The built in space is used for optimisations.
  2.1063 - *
  2.1064 - *	%NULL is returned in there is no free memory.
  2.1065 - */
  2.1066 - 
  2.1067 -static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
  2.1068 -					      int gfp_mask)
  2.1069 -{
  2.1070 -	struct sk_buff *skb;
  2.1071 -
  2.1072 -	skb = alloc_skb(length+16, gfp_mask);
  2.1073 -        //skb = alloc_zc_skb(length+16, gfp_mask);
  2.1074 -	if (skb)
  2.1075 -		skb_reserve(skb,16);
  2.1076 -	return skb;
  2.1077 -}
  2.1078 -
  2.1079 -/**
  2.1080 - *	dev_alloc_skb - allocate an skbuff for sending
  2.1081 - *	@length: length to allocate
  2.1082 - *
  2.1083 - *	Allocate a new &sk_buff and assign it a usage count of one. The
  2.1084 - *	buffer has unspecified headroom built in. Users should allocate
  2.1085 - *	the headroom they think they need without accounting for the
  2.1086 - *	built in space. The built in space is used for optimisations.
  2.1087 - *
  2.1088 - *	%NULL is returned in there is no free memory. Although this function
  2.1089 - *	allocates memory it can be called from an interrupt.
  2.1090 - */
  2.1091 - 
  2.1092 -static inline struct sk_buff *dev_alloc_skb(unsigned int length)
  2.1093 -{
  2.1094 -	return __dev_alloc_skb(length, GFP_ATOMIC);
  2.1095 -}
  2.1096 -
  2.1097 -/**
  2.1098 - *	skb_cow - copy header of skb when it is required
  2.1099 - *	@skb: buffer to cow
  2.1100 - *	@headroom: needed headroom
  2.1101 - *
  2.1102 - *	If the skb passed lacks sufficient headroom or its data part
  2.1103 - *	is shared, data is reallocated. If reallocation fails, an error
  2.1104 - *	is returned and original skb is not changed.
  2.1105 - *
  2.1106 - *	The result is skb with writable area skb->head...skb->tail
  2.1107 - *	and at least @headroom of space at head.
  2.1108 - */
  2.1109 -
  2.1110 -static inline int
  2.1111 -skb_cow(struct sk_buff *skb, unsigned int headroom)
  2.1112 -{
  2.1113 -	int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
  2.1114 -
  2.1115 -	if (delta < 0)
  2.1116 -		delta = 0;
  2.1117 -
  2.1118 -	if (delta || skb_cloned(skb))
  2.1119 -		return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
  2.1120 -	return 0;
  2.1121 -}
  2.1122 -
  2.1123 -/**
  2.1124 - *	skb_linearize - convert paged skb to linear one
  2.1125 - *	@skb: buffer to linarize
  2.1126 - *	@gfp: allocation mode
  2.1127 - *
  2.1128 - *	If there is no free memory -ENOMEM is returned, otherwise zero
  2.1129 - *	is returned and the old skb data released.  */
  2.1130 -int skb_linearize(struct sk_buff *skb, int gfp);
  2.1131 -
  2.1132 -static inline void *kmap_skb_frag(const skb_frag_t *frag)
  2.1133 -{
  2.1134 -#ifdef CONFIG_HIGHMEM
  2.1135 -	if (in_irq())
  2.1136 -		BUG();
  2.1137 -
  2.1138 -	local_bh_disable();
  2.1139 -#endif
  2.1140 -	return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
  2.1141 -}
  2.1142 -
  2.1143 -static inline void kunmap_skb_frag(void *vaddr)
  2.1144 -{
  2.1145 -	kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
  2.1146 -#ifdef CONFIG_HIGHMEM
  2.1147 -	local_bh_enable();
  2.1148 -#endif
  2.1149 -}
  2.1150 -
  2.1151 -#define skb_queue_walk(queue, skb) \
  2.1152 -		for (skb = (queue)->next;			\
  2.1153 -		     (skb != (struct sk_buff *)(queue));	\
  2.1154 -		     skb=skb->next)
  2.1155 -
  2.1156 -
  2.1157 -extern struct sk_buff *		skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
  2.1158 -extern unsigned int		datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
  2.1159 -extern int			skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);
  2.1160 -extern int			skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size);
  2.1161 -extern int			skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);
  2.1162 -extern int			skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov);
  2.1163 -extern void			skb_free_datagram(struct sock * sk, struct sk_buff *skb);
  2.1164 -
  2.1165 -extern unsigned int		skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
  2.1166 -extern int			skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
  2.1167 -extern unsigned int		skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
  2.1168 -extern void			skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
  2.1169 -
  2.1170 -extern void skb_init(void);
  2.1171 -extern void skb_add_mtu(int mtu);
  2.1172 -
  2.1173 -#ifdef CONFIG_NETFILTER
  2.1174 -static inline void
  2.1175 -nf_conntrack_put(struct nf_ct_info *nfct)
  2.1176 -{
  2.1177 -	if (nfct && atomic_dec_and_test(&nfct->master->use))
  2.1178 -		nfct->master->destroy(nfct->master);
  2.1179 -}
  2.1180 -static inline void
  2.1181 -nf_conntrack_get(struct nf_ct_info *nfct)
  2.1182 -{
  2.1183 -	if (nfct)
  2.1184 -		atomic_inc(&nfct->master->use);
  2.1185 -}
  2.1186 -#endif
  2.1187 -
  2.1188 -#endif	/* __KERNEL__ */
  2.1189 -#endif	/* _LINUX_SKBUFF_H */
     3.1 --- a/xenolinux-2.4.16-sparse/net/core/skbuff.c	Fri Feb 14 16:43:07 2003 +0000
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,1374 +0,0 @@
     3.4 -/*
     3.5 - *	Routines having to do with the 'struct sk_buff' memory handlers.
     3.6 - *
     3.7 - *	Authors:	Alan Cox <iiitac@pyr.swan.ac.uk>
     3.8 - *			Florian La Roche <rzsfl@rz.uni-sb.de>
     3.9 - *
    3.10 - *	Version:	$Id: skbuff.c,v 1.89 2001/08/06 13:25:02 davem Exp $
    3.11 - *
    3.12 - *	Fixes:	
    3.13 - *		Alan Cox	:	Fixed the worst of the load balancer bugs.
    3.14 - *		Dave Platt	:	Interrupt stacking fix.
    3.15 - *	Richard Kooijman	:	Timestamp fixes.
    3.16 - *		Alan Cox	:	Changed buffer format.
    3.17 - *		Alan Cox	:	destructor hook for AF_UNIX etc.
    3.18 - *		Linus Torvalds	:	Better skb_clone.
    3.19 - *		Alan Cox	:	Added skb_copy.
    3.20 - *		Alan Cox	:	Added all the changed routines Linus
    3.21 - *					only put in the headers
    3.22 - *		Ray VanTassle	:	Fixed --skb->lock in free
    3.23 - *		Alan Cox	:	skb_copy copy arp field
    3.24 - *		Andi Kleen	:	slabified it.
    3.25 - *
    3.26 - *	NOTE:
    3.27 - *		The __skb_ routines should be called with interrupts 
    3.28 - *	disabled, or you better be *real* sure that the operation is atomic 
    3.29 - *	with respect to whatever list is being frobbed (e.g. via lock_sock()
    3.30 - *	or via disabling bottom half handlers, etc).
    3.31 - *
    3.32 - *	This program is free software; you can redistribute it and/or
    3.33 - *	modify it under the terms of the GNU General Public License
    3.34 - *	as published by the Free Software Foundation; either version
    3.35 - *	2 of the License, or (at your option) any later version.
    3.36 - */
    3.37 -
    3.38 -/*
    3.39 - *	The functions in this file will not compile correctly with gcc 2.4.x
    3.40 - */
    3.41 -
    3.42 -#include <linux/config.h>
    3.43 -#include <linux/types.h>
    3.44 -#include <linux/kernel.h>
    3.45 -#include <linux/sched.h>
    3.46 -#include <linux/mm.h>
    3.47 -#include <linux/interrupt.h>
    3.48 -#include <linux/in.h>
    3.49 -#include <linux/inet.h>
    3.50 -#include <linux/slab.h>
    3.51 -#include <linux/netdevice.h>
    3.52 -#include <linux/string.h>
    3.53 -#include <linux/skbuff.h>
    3.54 -#include <linux/cache.h>
    3.55 -#include <linux/init.h>
    3.56 -#include <linux/highmem.h>
    3.57 -#include <linux/spinlock.h>
    3.58 -
    3.59 -#include <net/ip.h>
    3.60 -#include <net/protocol.h>
    3.61 -#include <net/dst.h>
    3.62 -#include <net/tcp.h>
    3.63 -#include <net/udp.h>
    3.64 -#include <net/sock.h>
    3.65 -#include <asm/io.h>
    3.66 -#include <asm/uaccess.h>
    3.67 -#include <asm/system.h>
    3.68 -
    3.69 -/* zc globals: */
    3.70 -/*
    3.71 -char *net_page_chunk;
    3.72 -struct net_page_info *net_page_table;
    3.73 -struct list_head net_page_list;
    3.74 -spinlock_t net_page_list_lock = SPIN_LOCK_UNLOCKED;
    3.75 -unsigned int net_pages;
    3.76 -*/
    3.77 -
    3.78 -
    3.79 -int sysctl_hot_list_len = 128;
    3.80 -
    3.81 -static kmem_cache_t *skbuff_head_cache;
    3.82 -
    3.83 -static union {
    3.84 -	struct sk_buff_head	list;
    3.85 -	char			pad[SMP_CACHE_BYTES];
    3.86 -} skb_head_pool[NR_CPUS];
    3.87 -
    3.88 -/*
    3.89 - *	Keep out-of-line to prevent kernel bloat.
    3.90 - *	__builtin_return_address is not used because it is not always
    3.91 - *	reliable. 
    3.92 - */
    3.93 -
    3.94 -/**
    3.95 - *	skb_over_panic	- 	private function
    3.96 - *	@skb: buffer
    3.97 - *	@sz: size
    3.98 - *	@here: address
    3.99 - *
   3.100 - *	Out of line support code for skb_put(). Not user callable.
   3.101 - */
   3.102 - 
   3.103 -void skb_over_panic(struct sk_buff *skb, int sz, void *here)
   3.104 -{
   3.105 -	printk("skput:over: %p:%d put:%d dev:%s", 
   3.106 -		here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
   3.107 -	BUG();
   3.108 -}
   3.109 -
   3.110 -/**
   3.111 - *	skb_under_panic	- 	private function
   3.112 - *	@skb: buffer
   3.113 - *	@sz: size
   3.114 - *	@here: address
   3.115 - *
   3.116 - *	Out of line support code for skb_push(). Not user callable.
   3.117 - */
   3.118 - 
   3.119 -
   3.120 -void skb_under_panic(struct sk_buff *skb, int sz, void *here)
   3.121 -{
   3.122 -        printk("skput:under: %p:%d put:%d dev:%s",
   3.123 -                here, skb->len, sz, skb->dev ? skb->dev->name : "<NULL>");
   3.124 -	BUG();
   3.125 -}
   3.126 -
   3.127 -static __inline__ struct sk_buff *skb_head_from_pool(void)
   3.128 -{
   3.129 -	struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
   3.130 -
   3.131 -	if (skb_queue_len(list)) {
   3.132 -		struct sk_buff *skb;
   3.133 -		unsigned long flags;
   3.134 -
   3.135 -		local_irq_save(flags);
   3.136 -		skb = __skb_dequeue(list);
   3.137 -		local_irq_restore(flags);
   3.138 -		return skb;
   3.139 -	}
   3.140 -	return NULL;
   3.141 -}
   3.142 -
   3.143 -static __inline__ void skb_head_to_pool(struct sk_buff *skb)
   3.144 -{
   3.145 -	struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
   3.146 -
   3.147 -	if (skb_queue_len(list) < sysctl_hot_list_len) {
   3.148 -		unsigned long flags;
   3.149 -
   3.150 -		local_irq_save(flags);
   3.151 -		__skb_queue_head(list, skb);
   3.152 -		local_irq_restore(flags);
   3.153 -
   3.154 -		return;
   3.155 -	}
   3.156 -	kmem_cache_free(skbuff_head_cache, skb);
   3.157 -}
   3.158 -
   3.159 -
   3.160 -/* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
   3.161 - *	'private' fields and also do memory statistics to find all the
   3.162 - *	[BEEP] leaks.
   3.163 - * 
   3.164 - */
   3.165 -
   3.166 -/**
   3.167 - *	alloc_skb	-	allocate a network buffer
   3.168 - *	@size: size to allocate
   3.169 - *	@gfp_mask: allocation mask
   3.170 - *
   3.171 - *	Allocate a new &sk_buff. The returned buffer has no headroom and a
   3.172 - *	tail room of size bytes. The object has a reference count of one.
   3.173 - *	The return is the buffer. On a failure the return is %NULL.
   3.174 - *
   3.175 - *	Buffers may only be allocated from interrupts using a @gfp_mask of
   3.176 - *	%GFP_ATOMIC.
   3.177 - */
   3.178 - 
   3.179 -struct sk_buff *alloc_skb(unsigned int size,int gfp_mask)
   3.180 -{
   3.181 -	struct sk_buff *skb;
   3.182 -	u8 *data;
   3.183 -
   3.184 -	if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
   3.185 -		static int count = 0;
   3.186 -		if (++count < 5) {
   3.187 -			printk(KERN_ERR "alloc_skb called nonatomically "
   3.188 -			       "from interrupt %p\n", NET_CALLER(size));
   3.189 - 			BUG();
   3.190 -		}
   3.191 -		gfp_mask &= ~__GFP_WAIT;
   3.192 -	}
   3.193 -
   3.194 -	/* Get the HEAD */
   3.195 -	skb = skb_head_from_pool();
   3.196 -	if (skb == NULL) {
   3.197 -		skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask & ~__GFP_DMA);
   3.198 -		if (skb == NULL)
   3.199 -			goto nohead;
   3.200 -	}
   3.201 -
   3.202 -	/* Get the DATA. Size must match skb_add_mtu(). */
   3.203 -	size = SKB_DATA_ALIGN(size);
   3.204 -	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
   3.205 -	if (data == NULL)
   3.206 -		goto nodata;
   3.207 -
   3.208 -	/* XXX: does not include slab overhead */ 
   3.209 -	skb->truesize = size + sizeof(struct sk_buff);
   3.210 -
   3.211 -	/* Load the data pointers. */
   3.212 -	skb->head = data;
   3.213 -	skb->data = data;
   3.214 -	skb->tail = data;
   3.215 -	skb->end = data + size;
   3.216 -
   3.217 -	/* Set up other state */
   3.218 -	skb->len = 0;
   3.219 -	skb->cloned = 0;
   3.220 -	skb->data_len = 0;
   3.221 -        skb->skb_type = SKB_NORMAL;
   3.222 -
   3.223 -	atomic_set(&skb->users, 1); 
   3.224 -	atomic_set(&(skb_shinfo(skb)->dataref), 1);
   3.225 -	skb_shinfo(skb)->nr_frags = 0;
   3.226 -	skb_shinfo(skb)->frag_list = NULL;
   3.227 -	return skb;
   3.228 -
   3.229 -nodata:
   3.230 -	skb_head_to_pool(skb);
   3.231 -nohead:
   3.232 -	return NULL;
   3.233 -}
   3.234 -
   3.235 -/* begin zc code additions: */
   3.236 -/*
   3.237 -void init_net_pages(unsigned long order_pages)
   3.238 -{
   3.239 -        int i;
   3.240 -        struct net_page_info *np;
   3.241 -        pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
   3.242 -        unsigned long nr_pages = 1 << order_pages;
   3.243 -        
   3.244 -        net_page_chunk = (char *)__get_free_pages(GFP_KERNEL, order_pages);
   3.245 -        net_page_table = kmalloc(nr_pages * sizeof(struct net_page_info), GFP_KERNEL);
   3.246 -
   3.247 -        INIT_LIST_HEAD(&net_page_list);
   3.248 -
   3.249 -        for (i = 0; i < nr_pages; i++) 
   3.250 -        {
   3.251 -                np = net_page_table + i;
   3.252 -                np->virt_addr = (unsigned long)net_page_chunk + (i * PAGE_SIZE);
   3.253 -
   3.254 -                // now fill the pte pointer:
   3.255 -                //np->ppte = 0xdeadbeef;
   3.256 -                //pgd = pgd_offset_k(np->virt_addr);
   3.257 -                //if (pgd_none(*pgd) || pgd_bad(*pgd)) BUG();
   3.258 -
   3.259 -                //if (pmd_none(*pmd)) BUG(); 
   3.260 -                //if (pmd_bad(*pmd)) BUG();
   3.261 -
   3.262 -                //ptep = pte_offset(pmd, np->virt_addr);
   3.263 -                //np->ppte = phys_to_machine(virt_to_phys(ptep));
   3.264 -                
   3.265 -                list_add_tail(&np->list, &net_page_list);
   3.266 -        }
   3.267 -        net_pages = nr_pages;
   3.268 -        
   3.269 -
   3.270 -}
   3.271 -
   3.272 -struct net_page_info *get_net_page(void)
   3.273 -{
   3.274 -
   3.275 -    struct list_head *list_ptr;
   3.276 -    struct net_page_info *np;
   3.277 -    unsigned long flags;
   3.278 -
   3.279 -    if (!net_pages) 
   3.280 -    {
   3.281 -            return NULL;
   3.282 -    }
   3.283 -    spin_lock_irqsave(&net_page_list_lock, flags);
   3.284 -    
   3.285 -    list_ptr = net_page_list.next;
   3.286 -    np = list_entry(list_ptr, struct net_page_info, list);
   3.287 -    list_del(&np->list);
   3.288 -    net_pages--;
   3.289 -    
   3.290 -    spin_unlock_irqrestore(&net_page_list_lock, flags);
   3.291 -    
   3.292 -    return np;
   3.293 -}
   3.294 -
   3.295 -void free_net_page(struct net_page_info *np)
   3.296 -{
   3.297 -    unsigned long flags;
   3.298 -  
   3.299 -    if (np == NULL) return;
   3.300 -    
   3.301 -    spin_lock_irqsave(&net_page_list_lock, flags);
   3.302 -    
   3.303 -    list_add(&np->list, &net_page_list);
   3.304 -    net_pages++;
   3.305 -
   3.306 -    spin_unlock_irqrestore(&net_page_list_lock, flags);
   3.307 -
   3.308 -}
   3.309 -*/
   3.310 -struct sk_buff *alloc_zc_skb(unsigned int size,int gfp_mask)
   3.311 -{
   3.312 -	struct sk_buff *skb;
   3.313 -	u8 *data;
   3.314 -
   3.315 -	if (in_interrupt() && (gfp_mask & __GFP_WAIT)) {
   3.316 -		static int count = 0;
   3.317 -		if (++count < 5) {
   3.318 -			printk(KERN_ERR "alloc_skb called nonatomically "
   3.319 -			       "from interrupt %p\n", NET_CALLER(size));
   3.320 - 			BUG();
   3.321 -		}
   3.322 -		gfp_mask &= ~__GFP_WAIT;
   3.323 -	}
   3.324 -
   3.325 -	/* Get the HEAD */
   3.326 -	skb = skb_head_from_pool();
   3.327 -	if (skb == NULL) {
   3.328 -		skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask & ~__GFP_DMA);
   3.329 -		if (skb == NULL)
   3.330 -			goto nohead;
   3.331 -	}
   3.332 -
   3.333 -	/* Get the DATA. Size must match skb_add_mtu(). */
   3.334 -	size = SKB_DATA_ALIGN(size);
   3.335 -        if (size > PAGE_SIZE)
   3.336 -        {
   3.337 -                printk("alloc_zc_skb called with unruly size.\n");
   3.338 -                size = PAGE_SIZE;
   3.339 -        }
   3.340 -	/*skb->net_page = get_net_page();
   3.341 -        if (skb->net_page == NULL)
   3.342 -        {
   3.343 -                goto nodata;
   3.344 -        }
   3.345 -        data = (u8 *)skb->net_page->virt_addr;*/
   3.346 -        data = (char *)__get_free_page(gfp_mask);
   3.347 -	if (data == NULL)
   3.348 -		goto nodata;
   3.349 -	/* XXX: does not include slab overhead */ 
   3.350 -	skb->truesize = size + sizeof(struct sk_buff);
   3.351 -
   3.352 -	/* Load the data pointers. */
   3.353 -	skb->head = data;
   3.354 -	skb->data = data;
   3.355 -	skb->tail = data;
   3.356 -	skb->end = data + size;
   3.357 -
   3.358 -	/* Set up other state */
   3.359 -	skb->len = 0;
   3.360 -	skb->cloned = 0;
   3.361 -	skb->data_len = 0;
   3.362 -        skb->skb_type = SKB_ZERO_COPY;
   3.363 -
   3.364 -	atomic_set(&skb->users, 1); 
   3.365 -	atomic_set(&(skb_shinfo(skb)->dataref), 1);
   3.366 -	skb_shinfo(skb)->nr_frags = 0;
   3.367 -	skb_shinfo(skb)->frag_list = NULL;
   3.368 -	return skb;
   3.369 -
   3.370 -nodata:
   3.371 -	skb_head_to_pool(skb);
   3.372 -nohead:
   3.373 -	return NULL;
   3.374 -}
   3.375 -
   3.376 -/* end zc code additions: */
   3.377 -
   3.378 -/*
   3.379 - *	Slab constructor for a skb head. 
   3.380 - */ 
   3.381 -static inline void skb_headerinit(void *p, kmem_cache_t *cache, 
   3.382 -				  unsigned long flags)
   3.383 -{
   3.384 -	struct sk_buff *skb = p;
   3.385 -
   3.386 -	skb->next = NULL;
   3.387 -	skb->prev = NULL;
   3.388 -	skb->list = NULL;
   3.389 -	skb->sk = NULL;
   3.390 -	skb->stamp.tv_sec=0;	/* No idea about time */
   3.391 -	skb->dev = NULL;
   3.392 -	skb->dst = NULL;
   3.393 -	memset(skb->cb, 0, sizeof(skb->cb));
   3.394 -	skb->pkt_type = PACKET_HOST;	/* Default type */
   3.395 -	skb->ip_summed = 0;
   3.396 -	skb->priority = 0;
   3.397 -	skb->security = 0;	/* By default packets are insecure */
   3.398 -	skb->destructor = NULL;
   3.399 -
   3.400 -#ifdef CONFIG_NETFILTER
   3.401 -	skb->nfmark = skb->nfcache = 0;
   3.402 -	skb->nfct = NULL;
   3.403 -#ifdef CONFIG_NETFILTER_DEBUG
   3.404 -	skb->nf_debug = 0;
   3.405 -#endif
   3.406 -#endif
   3.407 -#ifdef CONFIG_NET_SCHED
   3.408 -	skb->tc_index = 0;
   3.409 -#endif
   3.410 -}
   3.411 -
   3.412 -static void skb_drop_fraglist(struct sk_buff *skb)
   3.413 -{
   3.414 -	struct sk_buff *list = skb_shinfo(skb)->frag_list;
   3.415 -
   3.416 -	skb_shinfo(skb)->frag_list = NULL;
   3.417 -
   3.418 -	do {
   3.419 -		struct sk_buff *this = list;
   3.420 -		list = list->next;
   3.421 -		kfree_skb(this);
   3.422 -	} while (list);
   3.423 -}
   3.424 -
   3.425 -static void skb_clone_fraglist(struct sk_buff *skb)
   3.426 -{
   3.427 -	struct sk_buff *list;
   3.428 -
   3.429 -	for (list = skb_shinfo(skb)->frag_list; list; list=list->next)
   3.430 -		skb_get(list);
   3.431 -}
   3.432 -
   3.433 -static void skb_release_data(struct sk_buff *skb)
   3.434 -{
   3.435 -        if (!skb->cloned ||
   3.436 -	    atomic_dec_and_test(&(skb_shinfo(skb)->dataref))) {
   3.437 -		if (skb_shinfo(skb)->nr_frags) {
   3.438 -			int i;
   3.439 -printk("there were %u frags!\n", skb_shinfo(skb)->nr_frags);
   3.440 -			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 
   3.441 -{
   3.442 -				put_page(skb_shinfo(skb)->frags[i].page);
   3.443 -}
   3.444 -		}
   3.445 -
   3.446 -		if (skb_shinfo(skb)->frag_list)
   3.447 -			skb_drop_fraglist(skb);
   3.448 -
   3.449 -                //if (skb->skb_type == SKB_NORMAL)
   3.450 -                //{
   3.451 -		    kfree(skb->head);
   3.452 -                //} else {// SKB_ZERO_COPY
   3.453 -                    //free_net_page(skb->net_page);
   3.454 -//printk(KERN_ALERT "<%p>\n", phys_to_machine(virt_to_phys(skb->head)));
   3.455 -                //    free_page((void *)skb->head);
   3.456 -                //}
   3.457 -	}
   3.458 -
   3.459 -}
   3.460 -
   3.461 -/*
   3.462 - *	Free an skbuff by memory without cleaning the state. 
   3.463 - */
   3.464 -void kfree_skbmem(struct sk_buff *skb)
   3.465 -{
   3.466 -	skb_release_data(skb);
   3.467 -	skb_head_to_pool(skb);
   3.468 -}
   3.469 -
   3.470 -/**
   3.471 - *	__kfree_skb - private function 
   3.472 - *	@skb: buffer
   3.473 - *
   3.474 - *	Free an sk_buff. Release anything attached to the buffer. 
   3.475 - *	Clean the state. This is an internal helper function. Users should
   3.476 - *	always call kfree_skb
   3.477 - */
   3.478 -
   3.479 -void __kfree_skb(struct sk_buff *skb)
   3.480 -{
   3.481 -	if (skb->list) {
   3.482 -	 	printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
   3.483 -		       "on a list (from %p).\n", NET_CALLER(skb));
   3.484 -		BUG();
   3.485 -	}
   3.486 -
   3.487 -	dst_release(skb->dst);
   3.488 -	if(skb->destructor) {
   3.489 -		if (in_irq()) {
   3.490 -			printk(KERN_WARNING "Warning: kfree_skb on hard IRQ %p\n",
   3.491 -				NET_CALLER(skb));
   3.492 -		}
   3.493 -		skb->destructor(skb);
   3.494 -	}
   3.495 -#ifdef CONFIG_NETFILTER
   3.496 -	nf_conntrack_put(skb->nfct);
   3.497 -#endif
   3.498 -	skb_headerinit(skb, NULL, 0);  /* clean state */
   3.499 -	kfree_skbmem(skb);
   3.500 -}
   3.501 -
   3.502 -/**
   3.503 - *	skb_clone	-	duplicate an sk_buff
   3.504 - *	@skb: buffer to clone
   3.505 - *	@gfp_mask: allocation priority
   3.506 - *
   3.507 - *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
   3.508 - *	copies share the same packet data but not structure. The new
   3.509 - *	buffer has a reference count of 1. If the allocation fails the 
   3.510 - *	function returns %NULL otherwise the new buffer is returned.
   3.511 - *	
   3.512 - *	If this function is called from an interrupt gfp_mask() must be
   3.513 - *	%GFP_ATOMIC.
   3.514 - */
   3.515 -
   3.516 -struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
   3.517 -{
   3.518 -	struct sk_buff *n;
   3.519 -
   3.520 -	n = skb_head_from_pool();
   3.521 -	if (!n) {
   3.522 -		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
   3.523 -		if (!n)
   3.524 -			return NULL;
   3.525 -	}
   3.526 -
   3.527 -#define C(x) n->x = skb->x
   3.528 -
   3.529 -	n->next = n->prev = NULL;
   3.530 -	n->list = NULL;
   3.531 -	n->sk = NULL;
   3.532 -	C(stamp);
   3.533 -	C(dev);
   3.534 -	C(h);
   3.535 -	C(nh);
   3.536 -	C(mac);
   3.537 -	C(dst);
   3.538 -	dst_clone(n->dst);
   3.539 -	memcpy(n->cb, skb->cb, sizeof(skb->cb));
   3.540 -	C(len);
   3.541 -	C(data_len);
   3.542 -	C(csum);
   3.543 -	n->cloned = 1;
   3.544 -	C(pkt_type);
   3.545 -	C(ip_summed);
   3.546 -	C(priority);
   3.547 -	atomic_set(&n->users, 1);
   3.548 -	C(protocol);
   3.549 -	C(security);
   3.550 -	C(truesize);
   3.551 -	C(head);
   3.552 -	C(data);
   3.553 -	C(tail);
   3.554 -	C(end);
   3.555 -	n->destructor = NULL;
   3.556 -#ifdef CONFIG_NETFILTER
   3.557 -	C(nfmark);
   3.558 -	C(nfcache);
   3.559 -	C(nfct);
   3.560 -#ifdef CONFIG_NETFILTER_DEBUG
   3.561 -	C(nf_debug);
   3.562 -#endif
   3.563 -#endif /*CONFIG_NETFILTER*/
   3.564 -#if defined(CONFIG_HIPPI)
   3.565 -	C(private);
   3.566 -#endif
   3.567 -#ifdef CONFIG_NET_SCHED
   3.568 -	C(tc_index);
   3.569 -#endif
   3.570 -        C(skb_type);
   3.571 -        //C(net_page);
   3.572 -	atomic_inc(&(skb_shinfo(skb)->dataref));
   3.573 -	skb->cloned = 1;
   3.574 -#ifdef CONFIG_NETFILTER
   3.575 -	nf_conntrack_get(skb->nfct);
   3.576 -#endif
   3.577 -	return n;
   3.578 -}
   3.579 -
   3.580 -static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
   3.581 -{
   3.582 -	/*
   3.583 -	 *	Shift between the two data areas in bytes
   3.584 -	 */
   3.585 -	unsigned long offset = new->data - old->data;
   3.586 -
   3.587 -	new->list=NULL;
   3.588 -	new->sk=NULL;
   3.589 -	new->dev=old->dev;
   3.590 -	new->priority=old->priority;
   3.591 -	new->protocol=old->protocol;
   3.592 -	new->dst=dst_clone(old->dst);
   3.593 -	new->h.raw=old->h.raw+offset;
   3.594 -	new->nh.raw=old->nh.raw+offset;
   3.595 -	new->mac.raw=old->mac.raw+offset;
   3.596 -	memcpy(new->cb, old->cb, sizeof(old->cb));
   3.597 -	atomic_set(&new->users, 1);
   3.598 -	new->pkt_type=old->pkt_type;
   3.599 -	new->stamp=old->stamp;
   3.600 -	new->destructor = NULL;
   3.601 -	new->security=old->security;
   3.602 -#ifdef CONFIG_NETFILTER
   3.603 -	new->nfmark=old->nfmark;
   3.604 -	new->nfcache=old->nfcache;
   3.605 -	new->nfct=old->nfct;
   3.606 -	nf_conntrack_get(new->nfct);
   3.607 -#ifdef CONFIG_NETFILTER_DEBUG
   3.608 -	new->nf_debug=old->nf_debug;
   3.609 -#endif
   3.610 -#endif
   3.611 -#ifdef CONFIG_NET_SCHED
   3.612 -	new->tc_index = old->tc_index;
   3.613 -#endif
   3.614 -}
   3.615 -
   3.616 -/**
   3.617 - *	skb_copy	-	create private copy of an sk_buff
   3.618 - *	@skb: buffer to copy
   3.619 - *	@gfp_mask: allocation priority
   3.620 - *
   3.621 - *	Make a copy of both an &sk_buff and its data. This is used when the
   3.622 - *	caller wishes to modify the data and needs a private copy of the 
   3.623 - *	data to alter. Returns %NULL on failure or the pointer to the buffer
   3.624 - *	on success. The returned buffer has a reference count of 1.
   3.625 - *
   3.626 - *	As by-product this function converts non-linear &sk_buff to linear
   3.627 - *	one, so that &sk_buff becomes completely private and caller is allowed
   3.628 - *	to modify all the data of returned buffer. This means that this
   3.629 - *	function is not recommended for use in circumstances when only
   3.630 - *	header is going to be modified. Use pskb_copy() instead.
   3.631 - */
   3.632 - 
   3.633 -struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask)
   3.634 -{
   3.635 -	struct sk_buff *n;
   3.636 -	int headerlen = skb->data-skb->head;
   3.637 -
   3.638 -	/*
   3.639 -	 *	Allocate the copy buffer
   3.640 -	 */
   3.641 -	n=alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
   3.642 -	if(n==NULL)
   3.643 -		return NULL;
   3.644 -
   3.645 -	/* Set the data pointer */
   3.646 -	skb_reserve(n,headerlen);
   3.647 -	/* Set the tail pointer and length */
   3.648 -	skb_put(n,skb->len);
   3.649 -	n->csum = skb->csum;
   3.650 -	n->ip_summed = skb->ip_summed;
   3.651 -
   3.652 -	if (skb_copy_bits(skb, -headerlen, n->head, headerlen+skb->len))
   3.653 -		BUG();
   3.654 -
   3.655 -	copy_skb_header(n, skb);
   3.656 -
   3.657 -	return n;
   3.658 -}
   3.659 -
   3.660 -/* Keep head the same: replace data */
   3.661 -int skb_linearize(struct sk_buff *skb, int gfp_mask)
   3.662 -{
   3.663 -	unsigned int size;
   3.664 -	u8 *data;
   3.665 -	long offset;
   3.666 -	int headerlen = skb->data - skb->head;
   3.667 -	int expand = (skb->tail+skb->data_len) - skb->end;
   3.668 -
   3.669 -	if (skb_shared(skb))
   3.670 -		BUG();
   3.671 -
   3.672 -	if (expand <= 0)
   3.673 -		expand = 0;
   3.674 -
   3.675 -	size = (skb->end - skb->head + expand);
   3.676 -	size = SKB_DATA_ALIGN(size);
   3.677 -	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
   3.678 -	if (data == NULL)
   3.679 -		return -ENOMEM;
   3.680 -
   3.681 -	/* Copy entire thing */
   3.682 -	if (skb_copy_bits(skb, -headerlen, data, headerlen+skb->len))
   3.683 -		BUG();
   3.684 -
   3.685 -	/* Offset between the two in bytes */
   3.686 -	offset = data - skb->head;
   3.687 -
   3.688 -	/* Free old data. */
   3.689 -	skb_release_data(skb);
   3.690 -
   3.691 -	skb->head = data;
   3.692 -	skb->end  = data + size;
   3.693 -
   3.694 -	/* Set up new pointers */
   3.695 -	skb->h.raw += offset;
   3.696 -	skb->nh.raw += offset;
   3.697 -	skb->mac.raw += offset;
   3.698 -	skb->tail += offset;
   3.699 -	skb->data += offset;
   3.700 -
   3.701 -	/* Set up shinfo */
   3.702 -	atomic_set(&(skb_shinfo(skb)->dataref), 1);
   3.703 -	skb_shinfo(skb)->nr_frags = 0;
   3.704 -	skb_shinfo(skb)->frag_list = NULL;
   3.705 -
   3.706 -	/* We are no longer a clone, even if we were. */
   3.707 -	skb->cloned = 0;
   3.708 -
   3.709 -	skb->tail += skb->data_len;
   3.710 -	skb->data_len = 0;
   3.711 -	return 0;
   3.712 -}
   3.713 -
   3.714 -
   3.715 -/**
   3.716 - *	pskb_copy	-	create copy of an sk_buff with private head.
   3.717 - *	@skb: buffer to copy
   3.718 - *	@gfp_mask: allocation priority
   3.719 - *
   3.720 - *	Make a copy of both an &sk_buff and part of its data, located
   3.721 - *	in header. Fragmented data remain shared. This is used when
   3.722 - *	the caller wishes to modify only header of &sk_buff and needs
   3.723 - *	private copy of the header to alter. Returns %NULL on failure
   3.724 - *	or the pointer to the buffer on success.
   3.725 - *	The returned buffer has a reference count of 1.
   3.726 - */
   3.727 -
   3.728 -struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask)
   3.729 -{
   3.730 -	struct sk_buff *n;
   3.731 -
   3.732 -	/*
   3.733 -	 *	Allocate the copy buffer
   3.734 -	 */
   3.735 -	n=alloc_skb(skb->end - skb->head, gfp_mask);
   3.736 -	if(n==NULL)
   3.737 -		return NULL;
   3.738 -
   3.739 -	/* Set the data pointer */
   3.740 -	skb_reserve(n,skb->data-skb->head);
   3.741 -	/* Set the tail pointer and length */
   3.742 -	skb_put(n,skb_headlen(skb));
   3.743 -	/* Copy the bytes */
   3.744 -	memcpy(n->data, skb->data, n->len);
   3.745 -	n->csum = skb->csum;
   3.746 -	n->ip_summed = skb->ip_summed;
   3.747 -
   3.748 -	n->data_len = skb->data_len;
   3.749 -	n->len = skb->len;
   3.750 -
   3.751 -	if (skb_shinfo(skb)->nr_frags) {
   3.752 -		int i;
   3.753 -
   3.754 -		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
   3.755 -			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
   3.756 -			get_page(skb_shinfo(n)->frags[i].page);
   3.757 -		}
   3.758 -		skb_shinfo(n)->nr_frags = i;
   3.759 -	}
   3.760 -
   3.761 -	if (skb_shinfo(skb)->frag_list) {
   3.762 -		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
   3.763 -		skb_clone_fraglist(n);
   3.764 -	}
   3.765 -
   3.766 -	copy_skb_header(n, skb);
   3.767 -
   3.768 -	return n;
   3.769 -}
   3.770 -
   3.771 -/**
   3.772 - *	pskb_expand_head - reallocate header of &sk_buff
   3.773 - *	@skb: buffer to reallocate
   3.774 - *	@nhead: room to add at head
   3.775 - *	@ntail: room to add at tail
   3.776 - *	@gfp_mask: allocation priority
   3.777 - *
   3.778 - *	Expands (or creates identical copy, if &nhead and &ntail are zero)
   3.779 - *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
   3.780 - *	reference count of 1. Returns zero in the case of success or error,
   3.781 - *	if expansion failed. In the last case, &sk_buff is not changed.
   3.782 - *
   3.783 - *	All the pointers pointing into skb header may change and must be
   3.784 - *	reloaded after call to this function.
   3.785 - */
   3.786 -
   3.787 -int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask)
   3.788 -{
   3.789 -	int i;
   3.790 -	u8 *data;
   3.791 -	int size = nhead + (skb->end - skb->head) + ntail;
   3.792 -	long off;
   3.793 -
   3.794 -	if (skb_shared(skb))
   3.795 -		BUG();
   3.796 -
   3.797 -	size = SKB_DATA_ALIGN(size);
   3.798 -
   3.799 -	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
   3.800 -	if (data == NULL)
   3.801 -		goto nodata;
   3.802 -
   3.803 -	/* Copy only real data... and, alas, header. This should be
   3.804 -	 * optimized for the cases when header is void. */
   3.805 -	memcpy(data+nhead, skb->head, skb->tail-skb->head);
   3.806 -	memcpy(data+size, skb->end, sizeof(struct skb_shared_info));
   3.807 -
   3.808 -	for (i=0; i<skb_shinfo(skb)->nr_frags; i++)
   3.809 -		get_page(skb_shinfo(skb)->frags[i].page);
   3.810 -
   3.811 -	if (skb_shinfo(skb)->frag_list)
   3.812 -		skb_clone_fraglist(skb);
   3.813 -
   3.814 -	skb_release_data(skb);
   3.815 -
   3.816 -	off = (data+nhead) - skb->head;
   3.817 -
   3.818 -	skb->head = data;
   3.819 -	skb->end  = data+size;
   3.820 -
   3.821 -	skb->data += off;
   3.822 -	skb->tail += off;
   3.823 -	skb->mac.raw += off;
   3.824 -	skb->h.raw += off;
   3.825 -	skb->nh.raw += off;
   3.826 -	skb->cloned = 0;
   3.827 -	atomic_set(&skb_shinfo(skb)->dataref, 1);
   3.828 -	return 0;
   3.829 -
   3.830 -nodata:
   3.831 -	return -ENOMEM;
   3.832 -}
   3.833 -
   3.834 -/* Make private copy of skb with writable head and some headroom */
   3.835 -
   3.836 -struct sk_buff *
   3.837 -skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
   3.838 -{
   3.839 -	struct sk_buff *skb2;
   3.840 -	int delta = headroom - skb_headroom(skb);
   3.841 -
   3.842 -	if (delta <= 0)
   3.843 -		return pskb_copy(skb, GFP_ATOMIC);
   3.844 -
   3.845 -	skb2 = skb_clone(skb, GFP_ATOMIC);
   3.846 -	if (skb2 == NULL ||
   3.847 -	    !pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
   3.848 -		return skb2;
   3.849 -
   3.850 -	kfree_skb(skb2);
   3.851 -	return NULL;
   3.852 -}
   3.853 -
   3.854 -
   3.855 -/**
   3.856 - *	skb_copy_expand	-	copy and expand sk_buff
   3.857 - *	@skb: buffer to copy
   3.858 - *	@newheadroom: new free bytes at head
   3.859 - *	@newtailroom: new free bytes at tail
   3.860 - *	@gfp_mask: allocation priority
   3.861 - *
   3.862 - *	Make a copy of both an &sk_buff and its data and while doing so 
   3.863 - *	allocate additional space.
   3.864 - *
   3.865 - *	This is used when the caller wishes to modify the data and needs a 
   3.866 - *	private copy of the data to alter as well as more space for new fields.
   3.867 - *	Returns %NULL on failure or the pointer to the buffer
   3.868 - *	on success. The returned buffer has a reference count of 1.
   3.869 - *
   3.870 - *	You must pass %GFP_ATOMIC as the allocation priority if this function
   3.871 - *	is called from an interrupt.
   3.872 - */
   3.873 - 
   3.874 -
   3.875 -struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
   3.876 -				int newheadroom,
   3.877 -				int newtailroom,
   3.878 -				int gfp_mask)
   3.879 -{
   3.880 -	struct sk_buff *n;
   3.881 -
   3.882 -	/*
   3.883 -	 *	Allocate the copy buffer
   3.884 -	 */
   3.885 - 	 
   3.886 -	n=alloc_skb(newheadroom + skb->len + newtailroom,
   3.887 -		    gfp_mask);
   3.888 -	if(n==NULL)
   3.889 -		return NULL;
   3.890 -
   3.891 -	skb_reserve(n,newheadroom);
   3.892 -
   3.893 -	/* Set the tail pointer and length */
   3.894 -	skb_put(n,skb->len);
   3.895 -
   3.896 -	/* Copy the data only. */
   3.897 -	if (skb_copy_bits(skb, 0, n->data, skb->len))
   3.898 -		BUG();
   3.899 -
   3.900 -	copy_skb_header(n, skb);
   3.901 -	return n;
   3.902 -}
   3.903 -
   3.904 -/* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
   3.905 - * If realloc==0 and trimming is impossible without change of data,
   3.906 - * it is BUG().
   3.907 - */
   3.908 -
   3.909 -int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
   3.910 -{
   3.911 -	int offset = skb_headlen(skb);
   3.912 -	int nfrags = skb_shinfo(skb)->nr_frags;
   3.913 -	int i;
   3.914 -
   3.915 -	for (i=0; i<nfrags; i++) {
   3.916 -		int end = offset + skb_shinfo(skb)->frags[i].size;
   3.917 -		if (end > len) {
   3.918 -			if (skb_cloned(skb)) {
   3.919 -				if (!realloc)
   3.920 -					BUG();
   3.921 -				if (!pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
   3.922 -					return -ENOMEM;
   3.923 -			}
   3.924 -			if (len <= offset) {
   3.925 -				put_page(skb_shinfo(skb)->frags[i].page);
   3.926 -				skb_shinfo(skb)->nr_frags--;
   3.927 -			} else {
   3.928 -				skb_shinfo(skb)->frags[i].size = len-offset;
   3.929 -			}
   3.930 -		}
   3.931 -		offset = end;
   3.932 -	}
   3.933 -
   3.934 -	if (offset < len) {
   3.935 -		skb->data_len -= skb->len - len;
   3.936 -		skb->len = len;
   3.937 -	} else {
   3.938 -		if (len <= skb_headlen(skb)) {
   3.939 -			skb->len = len;
   3.940 -			skb->data_len = 0;
   3.941 -			skb->tail = skb->data + len;
   3.942 -			if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
   3.943 -				skb_drop_fraglist(skb);
   3.944 -		} else {
   3.945 -			skb->data_len -= skb->len - len;
   3.946 -			skb->len = len;
   3.947 -		}
   3.948 -	}
   3.949 -
   3.950 -	return 0;
   3.951 -}
   3.952 -
   3.953 -/**
   3.954 - *	__pskb_pull_tail - advance tail of skb header 
   3.955 - *	@skb: buffer to reallocate
   3.956 - *	@delta: number of bytes to advance tail
   3.957 - *
   3.958 - *	The function makes a sense only on a fragmented &sk_buff,
   3.959 - *	it expands header moving its tail forward and copying necessary
   3.960 - *	data from fragmented part.
   3.961 - *
   3.962 - *	&sk_buff MUST have reference count of 1.
   3.963 - *
   3.964 - *	Returns %NULL (and &sk_buff does not change) if pull failed
   3.965 - *	or value of new tail of skb in the case of success.
   3.966 - *
   3.967 - *	All the pointers pointing into skb header may change and must be
   3.968 - *	reloaded after call to this function.
   3.969 - */
   3.970 -
   3.971 -/* Moves tail of skb head forward, copying data from fragmented part,
   3.972 - * when it is necessary.
   3.973 - * 1. It may fail due to malloc failure.
   3.974 - * 2. It may change skb pointers.
   3.975 - *
   3.976 - * It is pretty complicated. Luckily, it is called only in exceptional cases.
   3.977 - */
   3.978 -unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta)
   3.979 -{
   3.980 -	int i, k, eat;
   3.981 -
   3.982 -	/* If skb has not enough free space at tail, get new one
   3.983 -	 * plus 128 bytes for future expansions. If we have enough
   3.984 -	 * room at tail, reallocate without expansion only if skb is cloned.
   3.985 -	 */
   3.986 -	eat = (skb->tail+delta) - skb->end;
   3.987 -
   3.988 -	if (eat > 0 || skb_cloned(skb)) {
   3.989 -		if (pskb_expand_head(skb, 0, eat>0 ? eat+128 : 0, GFP_ATOMIC))
   3.990 -			return NULL;
   3.991 -	}
   3.992 -
   3.993 -	if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
   3.994 -		BUG();
   3.995 -
   3.996 -	/* Optimization: no fragments, no reasons to preestimate
   3.997 -	 * size of pulled pages. Superb.
   3.998 -	 */
   3.999 -	if (skb_shinfo(skb)->frag_list == NULL)
  3.1000 -		goto pull_pages;
  3.1001 -
  3.1002 -	/* Estimate size of pulled pages. */
  3.1003 -	eat = delta;
  3.1004 -	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
  3.1005 -		if (skb_shinfo(skb)->frags[i].size >= eat)
  3.1006 -			goto pull_pages;
  3.1007 -		eat -= skb_shinfo(skb)->frags[i].size;
  3.1008 -	}
  3.1009 -
  3.1010 -	/* If we need update frag list, we are in troubles.
  3.1011 -	 * Certainly, it possible to add an offset to skb data,
  3.1012 -	 * but taking into account that pulling is expected to
  3.1013 -	 * be very rare operation, it is worth to fight against
  3.1014 -	 * further bloating skb head and crucify ourselves here instead.
  3.1015 -	 * Pure masohism, indeed. 8)8)
  3.1016 -	 */
  3.1017 -	if (eat) {
  3.1018 -		struct sk_buff *list = skb_shinfo(skb)->frag_list;
  3.1019 -		struct sk_buff *clone = NULL;
  3.1020 -		struct sk_buff *insp = NULL;
  3.1021 -
  3.1022 -		do {
  3.1023 -			if (list == NULL)
  3.1024 -				BUG();
  3.1025 -
  3.1026 -			if (list->len <= eat) {
  3.1027 -				/* Eaten as whole. */
  3.1028 -				eat -= list->len;
  3.1029 -				list = list->next;
  3.1030 -				insp = list;
  3.1031 -			} else {
  3.1032 -				/* Eaten partially. */
  3.1033 -
  3.1034 -				if (skb_shared(list)) {
  3.1035 -					/* Sucks! We need to fork list. :-( */
  3.1036 -					clone = skb_clone(list, GFP_ATOMIC);
  3.1037 -					if (clone == NULL)
  3.1038 -						return NULL;
  3.1039 -					insp = list->next;
  3.1040 -					list = clone;
  3.1041 -				} else {
  3.1042 -					/* This may be pulled without
  3.1043 -					 * problems. */
  3.1044 -					insp = list;
  3.1045 -				}
  3.1046 -				if (pskb_pull(list, eat) == NULL) {
  3.1047 -					if (clone)
  3.1048 -						kfree_skb(clone);
  3.1049 -					return NULL;
  3.1050 -				}
  3.1051 -				break;
  3.1052 -			}
  3.1053 -		} while (eat);
  3.1054 -
  3.1055 -		/* Free pulled out fragments. */
  3.1056 -		while ((list = skb_shinfo(skb)->frag_list) != insp) {
  3.1057 -			skb_shinfo(skb)->frag_list = list->next;
  3.1058 -			kfree_skb(list);
  3.1059 -		}
  3.1060 -		/* And insert new clone at head. */
  3.1061 -		if (clone) {
  3.1062 -			clone->next = list;
  3.1063 -			skb_shinfo(skb)->frag_list = clone;
  3.1064 -		}
  3.1065 -	}
  3.1066 -	/* Success! Now we may commit changes to skb data. */
  3.1067 -
  3.1068 -pull_pages:
  3.1069 -	eat = delta;
  3.1070 -	k = 0;
  3.1071 -	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
  3.1072 -		if (skb_shinfo(skb)->frags[i].size <= eat) {
  3.1073 -			put_page(skb_shinfo(skb)->frags[i].page);
  3.1074 -			eat -= skb_shinfo(skb)->frags[i].size;
  3.1075 -		} else {
  3.1076 -			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
  3.1077 -			if (eat) {
  3.1078 -				skb_shinfo(skb)->frags[k].page_offset += eat;
  3.1079 -				skb_shinfo(skb)->frags[k].size -= eat;
  3.1080 -				eat = 0;
  3.1081 -			}
  3.1082 -			k++;
  3.1083 -		}
  3.1084 -	}
  3.1085 -	skb_shinfo(skb)->nr_frags = k;
  3.1086 -
  3.1087 -	skb->tail += delta;
  3.1088 -	skb->data_len -= delta;
  3.1089 -
  3.1090 -	return skb->tail;
  3.1091 -}
  3.1092 -
  3.1093 -/* Copy some data bits from skb to kernel buffer. */
  3.1094 -
  3.1095 -int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
  3.1096 -{
  3.1097 -	int i, copy;
  3.1098 -	int start = skb->len - skb->data_len;
  3.1099 -
  3.1100 -	if (offset > (int)skb->len-len)
  3.1101 -		goto fault;
  3.1102 -
  3.1103 -	/* Copy header. */
  3.1104 -	if ((copy = start-offset) > 0) {
  3.1105 -		if (copy > len)
  3.1106 -			copy = len;
  3.1107 -		memcpy(to, skb->data + offset, copy);
  3.1108 -		if ((len -= copy) == 0)
  3.1109 -			return 0;
  3.1110 -		offset += copy;
  3.1111 -		to += copy;
  3.1112 -	}
  3.1113 -
  3.1114 -	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  3.1115 -		int end;
  3.1116 -
  3.1117 -		BUG_TRAP(start <= offset+len);
  3.1118 -
  3.1119 -		end = start + skb_shinfo(skb)->frags[i].size;
  3.1120 -		if ((copy = end-offset) > 0) {
  3.1121 -			u8 *vaddr;
  3.1122 -
  3.1123 -			if (copy > len)
  3.1124 -				copy = len;
  3.1125 -
  3.1126 -			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
  3.1127 -			memcpy(to, vaddr+skb_shinfo(skb)->frags[i].page_offset+
  3.1128 -			       offset-start, copy);
  3.1129 -			kunmap_skb_frag(vaddr);
  3.1130 -
  3.1131 -			if ((len -= copy) == 0)
  3.1132 -				return 0;
  3.1133 -			offset += copy;
  3.1134 -			to += copy;
  3.1135 -		}
  3.1136 -		start = end;
  3.1137 -	}
  3.1138 -
  3.1139 -	if (skb_shinfo(skb)->frag_list) {
  3.1140 -		struct sk_buff *list;
  3.1141 -
  3.1142 -		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
  3.1143 -			int end;
  3.1144 -
  3.1145 -			BUG_TRAP(start <= offset+len);
  3.1146 -
  3.1147 -			end = start + list->len;
  3.1148 -			if ((copy = end-offset) > 0) {
  3.1149 -				if (copy > len)
  3.1150 -					copy = len;
  3.1151 -				if (skb_copy_bits(list, offset-start, to, copy))
  3.1152 -					goto fault;
  3.1153 -				if ((len -= copy) == 0)
  3.1154 -					return 0;
  3.1155 -				offset += copy;
  3.1156 -				to += copy;
  3.1157 -			}
  3.1158 -			start = end;
  3.1159 -		}
  3.1160 -	}
  3.1161 -	if (len == 0)
  3.1162 -		return 0;
  3.1163 -
  3.1164 -fault:
  3.1165 -	return -EFAULT;
  3.1166 -}
  3.1167 -
  3.1168 -/* Checksum skb data. */
  3.1169 -
  3.1170 -unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum)
  3.1171 -{
  3.1172 -	int i, copy;
  3.1173 -	int start = skb->len - skb->data_len;
  3.1174 -	int pos = 0;
  3.1175 -
  3.1176 -	/* Checksum header. */
  3.1177 -	if ((copy = start-offset) > 0) {
  3.1178 -		if (copy > len)
  3.1179 -			copy = len;
  3.1180 -		csum = csum_partial(skb->data+offset, copy, csum);
  3.1181 -		if ((len -= copy) == 0)
  3.1182 -			return csum;
  3.1183 -		offset += copy;
  3.1184 -		pos = copy;
  3.1185 -	}
  3.1186 -
  3.1187 -	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
  3.1188 -		int end;
  3.1189 -
  3.1190 -		BUG_TRAP(start <= offset+len);
  3.1191 -
  3.1192 -		end = start + skb_shinfo(skb)->frags[i].size;
  3.1193 -		if ((copy = end-offset) > 0) {
  3.1194 -			unsigned int csum2;
  3.1195 -			u8 *vaddr;
  3.1196 -			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  3.1197 -
  3.1198 -			if (copy > len)
  3.1199 -				copy = len;
  3.1200 -			vaddr = kmap_skb_frag(frag);
  3.1201 -			csum2 = csum_partial(vaddr + frag->page_offset +
  3.1202 -					     offset-start, copy, 0);
  3.1203 -			kunmap_skb_frag(vaddr);
  3.1204 -			csum = csum_block_add(csum, csum2, pos);
  3.1205 -			if (!(len -= copy))
  3.1206 -				return csum;
  3.1207 -			offset += copy;
  3.1208 -			pos += copy;
  3.1209 -		}
  3.1210 -		start = end;
  3.1211 -	}
  3.1212 -
  3.1213 -	if (skb_shinfo(skb)->frag_list) {
  3.1214 -		struct sk_buff *list;
  3.1215 -
  3.1216 -		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
  3.1217 -			int end;
  3.1218 -
  3.1219 -			BUG_TRAP(start <= offset+len);
  3.1220 -
  3.1221 -			end = start + list->len;
  3.1222 -			if ((copy = end-offset) > 0) {
  3.1223 -				unsigned int csum2;
  3.1224 -				if (copy > len)
  3.1225 -					copy = len;
  3.1226 -				csum2 = skb_checksum(list, offset-start, copy, 0);
  3.1227 -				csum = csum_block_add(csum, csum2, pos);
  3.1228 -				if ((len -= copy) == 0)
  3.1229 -					return csum;
  3.1230 -				offset += copy;
  3.1231 -				pos += copy;
  3.1232 -			}
  3.1233 -			start = end;
  3.1234 -		}
  3.1235 -	}
  3.1236 -	if (len == 0)
  3.1237 -		return csum;
  3.1238 -
  3.1239 -	BUG();
  3.1240 -	return csum;
  3.1241 -}
  3.1242 -
  3.1243 -/* Both of above in one bottle. */
  3.1244 -
  3.1245 -unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum)
  3.1246 -{
  3.1247 -	int i, copy;
  3.1248 -	int start = skb->len - skb->data_len;
  3.1249 -	int pos = 0;
  3.1250 -
  3.1251 -	/* Copy header. */
  3.1252 -	if ((copy = start-offset) > 0) {
  3.1253 -		if (copy > len)
  3.1254 -			copy = len;
  3.1255 -		csum = csum_partial_copy_nocheck(skb->data+offset, to, copy, csum);
  3.1256 -		if ((len -= copy) == 0)
  3.1257 -			return csum;
  3.1258 -		offset += copy;
  3.1259 -		to += copy;
  3.1260 -		pos = copy;
  3.1261 -	}
  3.1262 -
  3.1263 -	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
  3.1264 -		int end;
  3.1265 -
  3.1266 -		BUG_TRAP(start <= offset+len);
  3.1267 -
  3.1268 -		end = start + skb_shinfo(skb)->frags[i].size;
  3.1269 -		if ((copy = end-offset) > 0) {
  3.1270 -			unsigned int csum2;
  3.1271 -			u8 *vaddr;
  3.1272 -			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  3.1273 -
  3.1274 -			if (copy > len)
  3.1275 -				copy = len;
  3.1276 -			vaddr = kmap_skb_frag(frag);
  3.1277 -			csum2 = csum_partial_copy_nocheck(vaddr + frag->page_offset +
  3.1278 -						      offset-start, to, copy, 0);
  3.1279 -			kunmap_skb_frag(vaddr);
  3.1280 -			csum = csum_block_add(csum, csum2, pos);
  3.1281 -			if (!(len -= copy))
  3.1282 -				return csum;
  3.1283 -			offset += copy;
  3.1284 -			to += copy;
  3.1285 -			pos += copy;
  3.1286 -		}
  3.1287 -		start = end;
  3.1288 -	}
  3.1289 -
  3.1290 -	if (skb_shinfo(skb)->frag_list) {
  3.1291 -		struct sk_buff *list;
  3.1292 -
  3.1293 -		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
  3.1294 -			unsigned int csum2;
  3.1295 -			int end;
  3.1296 -
  3.1297 -			BUG_TRAP(start <= offset+len);
  3.1298 -
  3.1299 -			end = start + list->len;
  3.1300 -			if ((copy = end-offset) > 0) {
  3.1301 -				if (copy > len)
  3.1302 -					copy = len;
  3.1303 -				csum2 = skb_copy_and_csum_bits(list, offset-start, to, copy, 0);
  3.1304 -				csum = csum_block_add(csum, csum2, pos);
  3.1305 -				if ((len -= copy) == 0)
  3.1306 -					return csum;
  3.1307 -				offset += copy;
  3.1308 -				to += copy;
  3.1309 -				pos += copy;
  3.1310 -			}
  3.1311 -			start = end;
  3.1312 -		}
  3.1313 -	}
  3.1314 -	if (len == 0)
  3.1315 -		return csum;
  3.1316 -
  3.1317 -	BUG();
  3.1318 -	return csum;
  3.1319 -}
  3.1320 -
  3.1321 -void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
  3.1322 -{
  3.1323 -	unsigned int csum;
  3.1324 -	long csstart;
  3.1325 -
  3.1326 -	if (skb->ip_summed == CHECKSUM_HW)
  3.1327 -		csstart = skb->h.raw - skb->data;
  3.1328 -	else
  3.1329 -		csstart = skb->len - skb->data_len;
  3.1330 -
  3.1331 -	if (csstart > skb->len - skb->data_len)
  3.1332 -		BUG();
  3.1333 -
  3.1334 -	memcpy(to, skb->data, csstart);
  3.1335 -
  3.1336 -	csum = 0;
  3.1337 -	if (csstart != skb->len)
  3.1338 -		csum = skb_copy_and_csum_bits(skb, csstart, to+csstart,
  3.1339 -				skb->len-csstart, 0);
  3.1340 -
  3.1341 -	if (skb->ip_summed == CHECKSUM_HW) {
  3.1342 -		long csstuff = csstart + skb->csum;
  3.1343 -
  3.1344 -		*((unsigned short *)(to + csstuff)) = csum_fold(csum);
  3.1345 -	}
  3.1346 -}
  3.1347 -
  3.1348 -#if 0
  3.1349 -/* 
  3.1350 - * 	Tune the memory allocator for a new MTU size.
  3.1351 - */
  3.1352 -void skb_add_mtu(int mtu)
  3.1353 -{
  3.1354 -	/* Must match allocation in alloc_skb */
  3.1355 -	mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
  3.1356 -
  3.1357 -	kmem_add_cache_size(mtu);
  3.1358 -}
  3.1359 -#endif
  3.1360 -
  3.1361 -void __init skb_init(void)
  3.1362 -{
  3.1363 -	int i;
  3.1364 -
  3.1365 -	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
  3.1366 -					      sizeof(struct sk_buff),
  3.1367 -					      0,
  3.1368 -					      SLAB_HWCACHE_ALIGN,
  3.1369 -					      skb_headerinit, NULL);
  3.1370 -	if (!skbuff_head_cache)
  3.1371 -		panic("cannot create skbuff cache");
  3.1372 -
  3.1373 -        //init_net_pages(NUM_NET_PAGES);
  3.1374 -
  3.1375 -	for (i=0; i<NR_CPUS; i++)
  3.1376 -		skb_queue_head_init(&skb_head_pool[i].list);
  3.1377 -}