ia64/linux-2.6.18-xen.hg

view drivers/net/ppp_generic.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Generic PPP layer for Linux.
3 *
4 * Copyright 1999-2002 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * The generic PPP layer handles the PPP network interfaces, the
12 * /dev/ppp device, packet and VJ compression, and multilink.
13 * It talks to PPP `channels' via the interface defined in
14 * include/linux/ppp_channel.h. Channels provide the basic means for
15 * sending and receiving PPP frames on some kind of communications
16 * channel.
17 *
18 * Part of the code in this driver was inspired by the old async-only
19 * PPP driver, written by Michael Callahan and Al Longyear, and
20 * subsequently hacked by Paul Mackerras.
21 *
22 * ==FILEVERSION 20041108==
23 */
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/kmod.h>
28 #include <linux/init.h>
29 #include <linux/list.h>
30 #include <linux/netdevice.h>
31 #include <linux/poll.h>
32 #include <linux/ppp_defs.h>
33 #include <linux/filter.h>
34 #include <linux/if_ppp.h>
35 #include <linux/ppp_channel.h>
36 #include <linux/ppp-comp.h>
37 #include <linux/skbuff.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/if_arp.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/spinlock.h>
43 #include <linux/smp_lock.h>
44 #include <linux/rwsem.h>
45 #include <linux/stddef.h>
46 #include <linux/device.h>
47 #include <linux/mutex.h>
48 #include <net/slhc_vj.h>
49 #include <asm/atomic.h>
51 #define PPP_VERSION "2.4.2"
53 /*
54 * Network protocols we support.
55 */
56 #define NP_IP 0 /* Internet Protocol V4 */
57 #define NP_IPV6 1 /* Internet Protocol V6 */
58 #define NP_IPX 2 /* IPX protocol */
59 #define NP_AT 3 /* Appletalk protocol */
60 #define NP_MPLS_UC 4 /* MPLS unicast */
61 #define NP_MPLS_MC 5 /* MPLS multicast */
62 #define NUM_NP 6 /* Number of NPs. */
64 #define MPHDRLEN 6 /* multilink protocol header length */
65 #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
66 #define MIN_FRAG_SIZE 64
68 /*
69 * An instance of /dev/ppp can be associated with either a ppp
70 * interface unit or a ppp channel. In both cases, file->private_data
71 * points to one of these.
72 */
73 struct ppp_file {
74 enum {
75 INTERFACE=1, CHANNEL
76 } kind;
77 struct sk_buff_head xq; /* pppd transmit queue */
78 struct sk_buff_head rq; /* receive queue for pppd */
79 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
80 atomic_t refcnt; /* # refs (incl /dev/ppp attached) */
81 int hdrlen; /* space to leave for headers */
82 int index; /* interface unit / channel number */
83 int dead; /* unit/channel has been shut down */
84 };
86 #define PF_TO_X(pf, X) ((X *)((char *)(pf) - offsetof(X, file)))
88 #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
89 #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
91 #define ROUNDUP(n, x) (((n) + (x) - 1) / (x))
93 /*
94 * Data structure describing one ppp unit.
95 * A ppp unit corresponds to a ppp network interface device
96 * and represents a multilink bundle.
97 * It can have 0 or more ppp channels connected to it.
98 */
99 struct ppp {
100 struct ppp_file file; /* stuff for read/write/poll 0 */
101 struct file *owner; /* file that owns this unit 48 */
102 struct list_head channels; /* list of attached channels 4c */
103 int n_channels; /* how many channels are attached 54 */
104 spinlock_t rlock; /* lock for receive side 58 */
105 spinlock_t wlock; /* lock for transmit side 5c */
106 int mru; /* max receive unit 60 */
107 unsigned int flags; /* control bits 64 */
108 unsigned int xstate; /* transmit state bits 68 */
109 unsigned int rstate; /* receive state bits 6c */
110 int debug; /* debug flags 70 */
111 struct slcompress *vj; /* state for VJ header compression */
112 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
113 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
114 struct compressor *xcomp; /* transmit packet compressor 8c */
115 void *xc_state; /* its internal state 90 */
116 struct compressor *rcomp; /* receive decompressor 94 */
117 void *rc_state; /* its internal state 98 */
118 unsigned long last_xmit; /* jiffies when last pkt sent 9c */
119 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */
120 struct net_device *dev; /* network interface device a4 */
121 #ifdef CONFIG_PPP_MULTILINK
122 int nxchan; /* next channel to send something on */
123 u32 nxseq; /* next sequence number to send */
124 int mrru; /* MP: max reconst. receive unit */
125 u32 nextseq; /* MP: seq no of next packet */
126 u32 minseq; /* MP: min of most recent seqnos */
127 struct sk_buff_head mrq; /* MP: receive reconstruction queue */
128 #endif /* CONFIG_PPP_MULTILINK */
129 struct net_device_stats stats; /* statistics */
130 #ifdef CONFIG_PPP_FILTER
131 struct sock_filter *pass_filter; /* filter for packets to pass */
132 struct sock_filter *active_filter;/* filter for pkts to reset idle */
133 unsigned pass_len, active_len;
134 #endif /* CONFIG_PPP_FILTER */
135 };
137 /*
138 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
139 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
140 * SC_MUST_COMP
141 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
142 * Bits in xstate: SC_COMP_RUN
143 */
144 #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
145 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
146 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
148 /*
149 * Private data structure for each channel.
150 * This includes the data structure used for multilink.
151 */
152 struct channel {
153 struct ppp_file file; /* stuff for read/write/poll */
154 struct list_head list; /* link in all/new_channels list */
155 struct ppp_channel *chan; /* public channel data structure */
156 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
157 spinlock_t downl; /* protects `chan', file.xq dequeue */
158 struct ppp *ppp; /* ppp unit we're connected to */
159 struct list_head clist; /* link in list of channels per unit */
160 rwlock_t upl; /* protects `ppp' */
161 #ifdef CONFIG_PPP_MULTILINK
162 u8 avail; /* flag used in multilink stuff */
163 u8 had_frag; /* >= 1 fragments have been sent */
164 u32 lastseq; /* MP: last sequence # received */
165 #endif /* CONFIG_PPP_MULTILINK */
166 };
168 /*
169 * SMP locking issues:
170 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
171 * list and the ppp.n_channels field, you need to take both locks
172 * before you modify them.
173 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
174 * channel.downl.
175 */
177 /*
178 * A cardmap represents a mapping from unsigned integers to pointers,
179 * and provides a fast "find lowest unused number" operation.
180 * It uses a broad (32-way) tree with a bitmap at each level.
181 * It is designed to be space-efficient for small numbers of entries
182 * and time-efficient for large numbers of entries.
183 */
184 #define CARDMAP_ORDER 5
185 #define CARDMAP_WIDTH (1U << CARDMAP_ORDER)
186 #define CARDMAP_MASK (CARDMAP_WIDTH - 1)
188 struct cardmap {
189 int shift;
190 unsigned long inuse;
191 struct cardmap *parent;
192 void *ptr[CARDMAP_WIDTH];
193 };
194 static void *cardmap_get(struct cardmap *map, unsigned int nr);
195 static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
196 static unsigned int cardmap_find_first_free(struct cardmap *map);
197 static void cardmap_destroy(struct cardmap **map);
199 /*
200 * all_ppp_mutex protects the all_ppp_units mapping.
201 * It also ensures that finding a ppp unit in the all_ppp_units map
202 * and updating its file.refcnt field is atomic.
203 */
204 static DEFINE_MUTEX(all_ppp_mutex);
205 static struct cardmap *all_ppp_units;
206 static atomic_t ppp_unit_count = ATOMIC_INIT(0);
208 /*
209 * all_channels_lock protects all_channels and last_channel_index,
210 * and the atomicity of find a channel and updating its file.refcnt
211 * field.
212 */
213 static DEFINE_SPINLOCK(all_channels_lock);
214 static LIST_HEAD(all_channels);
215 static LIST_HEAD(new_channels);
216 static int last_channel_index;
217 static atomic_t channel_count = ATOMIC_INIT(0);
219 /* Get the PPP protocol number from a skb */
220 #define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
222 /* We limit the length of ppp->file.rq to this (arbitrary) value */
223 #define PPP_MAX_RQLEN 32
225 /*
226 * Maximum number of multilink fragments queued up.
227 * This has to be large enough to cope with the maximum latency of
228 * the slowest channel relative to the others. Strictly it should
229 * depend on the number of channels and their characteristics.
230 */
231 #define PPP_MP_MAX_QLEN 128
233 /* Multilink header bits. */
234 #define B 0x80 /* this fragment begins a packet */
235 #define E 0x40 /* this fragment ends a packet */
237 /* Compare multilink sequence numbers (assumed to be 32 bits wide) */
238 #define seq_before(a, b) ((s32)((a) - (b)) < 0)
239 #define seq_after(a, b) ((s32)((a) - (b)) > 0)
241 /* Prototypes. */
242 static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
243 unsigned int cmd, unsigned long arg);
244 static void ppp_xmit_process(struct ppp *ppp);
245 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
246 static void ppp_push(struct ppp *ppp);
247 static void ppp_channel_push(struct channel *pch);
248 static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
249 struct channel *pch);
250 static void ppp_receive_error(struct ppp *ppp);
251 static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
252 static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
253 struct sk_buff *skb);
254 #ifdef CONFIG_PPP_MULTILINK
255 static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
256 struct channel *pch);
257 static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
258 static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
259 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
260 #endif /* CONFIG_PPP_MULTILINK */
261 static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
262 static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
263 static void ppp_ccp_closed(struct ppp *ppp);
264 static struct compressor *find_compressor(int type);
265 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
266 static struct ppp *ppp_create_interface(int unit, int *retp);
267 static void init_ppp_file(struct ppp_file *pf, int kind);
268 static void ppp_shutdown_interface(struct ppp *ppp);
269 static void ppp_destroy_interface(struct ppp *ppp);
270 static struct ppp *ppp_find_unit(int unit);
271 static struct channel *ppp_find_channel(int unit);
272 static int ppp_connect_channel(struct channel *pch, int unit);
273 static int ppp_disconnect_channel(struct channel *pch);
274 static void ppp_destroy_channel(struct channel *pch);
276 static struct class *ppp_class;
278 /* Translates a PPP protocol number to a NP index (NP == network protocol) */
279 static inline int proto_to_npindex(int proto)
280 {
281 switch (proto) {
282 case PPP_IP:
283 return NP_IP;
284 case PPP_IPV6:
285 return NP_IPV6;
286 case PPP_IPX:
287 return NP_IPX;
288 case PPP_AT:
289 return NP_AT;
290 case PPP_MPLS_UC:
291 return NP_MPLS_UC;
292 case PPP_MPLS_MC:
293 return NP_MPLS_MC;
294 }
295 return -EINVAL;
296 }
298 /* Translates an NP index into a PPP protocol number */
299 static const int npindex_to_proto[NUM_NP] = {
300 PPP_IP,
301 PPP_IPV6,
302 PPP_IPX,
303 PPP_AT,
304 PPP_MPLS_UC,
305 PPP_MPLS_MC,
306 };
308 /* Translates an ethertype into an NP index */
309 static inline int ethertype_to_npindex(int ethertype)
310 {
311 switch (ethertype) {
312 case ETH_P_IP:
313 return NP_IP;
314 case ETH_P_IPV6:
315 return NP_IPV6;
316 case ETH_P_IPX:
317 return NP_IPX;
318 case ETH_P_PPPTALK:
319 case ETH_P_ATALK:
320 return NP_AT;
321 case ETH_P_MPLS_UC:
322 return NP_MPLS_UC;
323 case ETH_P_MPLS_MC:
324 return NP_MPLS_MC;
325 }
326 return -1;
327 }
329 /* Translates an NP index into an ethertype */
330 static const int npindex_to_ethertype[NUM_NP] = {
331 ETH_P_IP,
332 ETH_P_IPV6,
333 ETH_P_IPX,
334 ETH_P_PPPTALK,
335 ETH_P_MPLS_UC,
336 ETH_P_MPLS_MC,
337 };
339 /*
340 * Locking shorthand.
341 */
342 #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock)
343 #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock)
344 #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock)
345 #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock)
346 #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
347 ppp_recv_lock(ppp); } while (0)
348 #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
349 ppp_xmit_unlock(ppp); } while (0)
351 /*
352 * /dev/ppp device routines.
353 * The /dev/ppp device is used by pppd to control the ppp unit.
354 * It supports the read, write, ioctl and poll functions.
355 * Open instances of /dev/ppp can be in one of three states:
356 * unattached, attached to a ppp unit, or attached to a ppp channel.
357 */
358 static int ppp_open(struct inode *inode, struct file *file)
359 {
360 /*
361 * This could (should?) be enforced by the permissions on /dev/ppp.
362 */
363 if (!capable(CAP_NET_ADMIN))
364 return -EPERM;
365 return 0;
366 }
368 static int ppp_release(struct inode *inode, struct file *file)
369 {
370 struct ppp_file *pf = file->private_data;
371 struct ppp *ppp;
373 if (pf != 0) {
374 file->private_data = NULL;
375 if (pf->kind == INTERFACE) {
376 ppp = PF_TO_PPP(pf);
377 if (file == ppp->owner)
378 ppp_shutdown_interface(ppp);
379 }
380 if (atomic_dec_and_test(&pf->refcnt)) {
381 switch (pf->kind) {
382 case INTERFACE:
383 ppp_destroy_interface(PF_TO_PPP(pf));
384 break;
385 case CHANNEL:
386 ppp_destroy_channel(PF_TO_CHANNEL(pf));
387 break;
388 }
389 }
390 }
391 return 0;
392 }
394 static ssize_t ppp_read(struct file *file, char __user *buf,
395 size_t count, loff_t *ppos)
396 {
397 struct ppp_file *pf = file->private_data;
398 DECLARE_WAITQUEUE(wait, current);
399 ssize_t ret;
400 struct sk_buff *skb = NULL;
402 ret = count;
404 if (pf == 0)
405 return -ENXIO;
406 add_wait_queue(&pf->rwait, &wait);
407 for (;;) {
408 set_current_state(TASK_INTERRUPTIBLE);
409 skb = skb_dequeue(&pf->rq);
410 if (skb)
411 break;
412 ret = 0;
413 if (pf->dead)
414 break;
415 if (pf->kind == INTERFACE) {
416 /*
417 * Return 0 (EOF) on an interface that has no
418 * channels connected, unless it is looping
419 * network traffic (demand mode).
420 */
421 struct ppp *ppp = PF_TO_PPP(pf);
422 if (ppp->n_channels == 0
423 && (ppp->flags & SC_LOOP_TRAFFIC) == 0)
424 break;
425 }
426 ret = -EAGAIN;
427 if (file->f_flags & O_NONBLOCK)
428 break;
429 ret = -ERESTARTSYS;
430 if (signal_pending(current))
431 break;
432 schedule();
433 }
434 set_current_state(TASK_RUNNING);
435 remove_wait_queue(&pf->rwait, &wait);
437 if (skb == 0)
438 goto out;
440 ret = -EOVERFLOW;
441 if (skb->len > count)
442 goto outf;
443 ret = -EFAULT;
444 if (copy_to_user(buf, skb->data, skb->len))
445 goto outf;
446 ret = skb->len;
448 outf:
449 kfree_skb(skb);
450 out:
451 return ret;
452 }
454 static ssize_t ppp_write(struct file *file, const char __user *buf,
455 size_t count, loff_t *ppos)
456 {
457 struct ppp_file *pf = file->private_data;
458 struct sk_buff *skb;
459 ssize_t ret;
461 if (pf == 0)
462 return -ENXIO;
463 ret = -ENOMEM;
464 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
465 if (skb == 0)
466 goto out;
467 skb_reserve(skb, pf->hdrlen);
468 ret = -EFAULT;
469 if (copy_from_user(skb_put(skb, count), buf, count)) {
470 kfree_skb(skb);
471 goto out;
472 }
474 skb_queue_tail(&pf->xq, skb);
476 switch (pf->kind) {
477 case INTERFACE:
478 ppp_xmit_process(PF_TO_PPP(pf));
479 break;
480 case CHANNEL:
481 ppp_channel_push(PF_TO_CHANNEL(pf));
482 break;
483 }
485 ret = count;
487 out:
488 return ret;
489 }
491 /* No kernel lock - fine */
492 static unsigned int ppp_poll(struct file *file, poll_table *wait)
493 {
494 struct ppp_file *pf = file->private_data;
495 unsigned int mask;
497 if (pf == 0)
498 return 0;
499 poll_wait(file, &pf->rwait, wait);
500 mask = POLLOUT | POLLWRNORM;
501 if (skb_peek(&pf->rq) != 0)
502 mask |= POLLIN | POLLRDNORM;
503 if (pf->dead)
504 mask |= POLLHUP;
505 else if (pf->kind == INTERFACE) {
506 /* see comment in ppp_read */
507 struct ppp *ppp = PF_TO_PPP(pf);
508 if (ppp->n_channels == 0
509 && (ppp->flags & SC_LOOP_TRAFFIC) == 0)
510 mask |= POLLIN | POLLRDNORM;
511 }
513 return mask;
514 }
516 #ifdef CONFIG_PPP_FILTER
517 static int get_filter(void __user *arg, struct sock_filter **p)
518 {
519 struct sock_fprog uprog;
520 struct sock_filter *code = NULL;
521 int len, err;
523 if (copy_from_user(&uprog, arg, sizeof(uprog)))
524 return -EFAULT;
526 if (!uprog.len) {
527 *p = NULL;
528 return 0;
529 }
531 len = uprog.len * sizeof(struct sock_filter);
532 code = kmalloc(len, GFP_KERNEL);
533 if (code == NULL)
534 return -ENOMEM;
536 if (copy_from_user(code, uprog.filter, len)) {
537 kfree(code);
538 return -EFAULT;
539 }
541 err = sk_chk_filter(code, uprog.len);
542 if (err) {
543 kfree(code);
544 return err;
545 }
547 *p = code;
548 return uprog.len;
549 }
550 #endif /* CONFIG_PPP_FILTER */
552 static int ppp_ioctl(struct inode *inode, struct file *file,
553 unsigned int cmd, unsigned long arg)
554 {
555 struct ppp_file *pf = file->private_data;
556 struct ppp *ppp;
557 int err = -EFAULT, val, val2, i;
558 struct ppp_idle idle;
559 struct npioctl npi;
560 int unit, cflags;
561 struct slcompress *vj;
562 void __user *argp = (void __user *)arg;
563 int __user *p = argp;
565 if (pf == 0)
566 return ppp_unattached_ioctl(pf, file, cmd, arg);
568 if (cmd == PPPIOCDETACH) {
569 /*
570 * We have to be careful here... if the file descriptor
571 * has been dup'd, we could have another process in the
572 * middle of a poll using the same file *, so we had
573 * better not free the interface data structures -
574 * instead we fail the ioctl. Even in this case, we
575 * shut down the interface if we are the owner of it.
576 * Actually, we should get rid of PPPIOCDETACH, userland
577 * (i.e. pppd) could achieve the same effect by closing
578 * this fd and reopening /dev/ppp.
579 */
580 err = -EINVAL;
581 if (pf->kind == INTERFACE) {
582 ppp = PF_TO_PPP(pf);
583 if (file == ppp->owner)
584 ppp_shutdown_interface(ppp);
585 }
586 if (atomic_read(&file->f_count) <= 2) {
587 ppp_release(inode, file);
588 err = 0;
589 } else
590 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n",
591 atomic_read(&file->f_count));
592 return err;
593 }
595 if (pf->kind == CHANNEL) {
596 struct channel *pch = PF_TO_CHANNEL(pf);
597 struct ppp_channel *chan;
599 switch (cmd) {
600 case PPPIOCCONNECT:
601 if (get_user(unit, p))
602 break;
603 err = ppp_connect_channel(pch, unit);
604 break;
606 case PPPIOCDISCONN:
607 err = ppp_disconnect_channel(pch);
608 break;
610 default:
611 down_read(&pch->chan_sem);
612 chan = pch->chan;
613 err = -ENOTTY;
614 if (chan && chan->ops->ioctl)
615 err = chan->ops->ioctl(chan, cmd, arg);
616 up_read(&pch->chan_sem);
617 }
618 return err;
619 }
621 if (pf->kind != INTERFACE) {
622 /* can't happen */
623 printk(KERN_ERR "PPP: not interface or channel??\n");
624 return -EINVAL;
625 }
627 ppp = PF_TO_PPP(pf);
628 switch (cmd) {
629 case PPPIOCSMRU:
630 if (get_user(val, p))
631 break;
632 ppp->mru = val;
633 err = 0;
634 break;
636 case PPPIOCSFLAGS:
637 if (get_user(val, p))
638 break;
639 ppp_lock(ppp);
640 cflags = ppp->flags & ~val;
641 ppp->flags = val & SC_FLAG_BITS;
642 ppp_unlock(ppp);
643 if (cflags & SC_CCP_OPEN)
644 ppp_ccp_closed(ppp);
645 err = 0;
646 break;
648 case PPPIOCGFLAGS:
649 val = ppp->flags | ppp->xstate | ppp->rstate;
650 if (put_user(val, p))
651 break;
652 err = 0;
653 break;
655 case PPPIOCSCOMPRESS:
656 err = ppp_set_compress(ppp, arg);
657 break;
659 case PPPIOCGUNIT:
660 if (put_user(ppp->file.index, p))
661 break;
662 err = 0;
663 break;
665 case PPPIOCSDEBUG:
666 if (get_user(val, p))
667 break;
668 ppp->debug = val;
669 err = 0;
670 break;
672 case PPPIOCGDEBUG:
673 if (put_user(ppp->debug, p))
674 break;
675 err = 0;
676 break;
678 case PPPIOCGIDLE:
679 idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
680 idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
681 if (copy_to_user(argp, &idle, sizeof(idle)))
682 break;
683 err = 0;
684 break;
686 case PPPIOCSMAXCID:
687 if (get_user(val, p))
688 break;
689 val2 = 15;
690 if ((val >> 16) != 0) {
691 val2 = val >> 16;
692 val &= 0xffff;
693 }
694 vj = slhc_init(val2+1, val+1);
695 if (vj == 0) {
696 printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
697 err = -ENOMEM;
698 break;
699 }
700 ppp_lock(ppp);
701 if (ppp->vj != 0)
702 slhc_free(ppp->vj);
703 ppp->vj = vj;
704 ppp_unlock(ppp);
705 err = 0;
706 break;
708 case PPPIOCGNPMODE:
709 case PPPIOCSNPMODE:
710 if (copy_from_user(&npi, argp, sizeof(npi)))
711 break;
712 err = proto_to_npindex(npi.protocol);
713 if (err < 0)
714 break;
715 i = err;
716 if (cmd == PPPIOCGNPMODE) {
717 err = -EFAULT;
718 npi.mode = ppp->npmode[i];
719 if (copy_to_user(argp, &npi, sizeof(npi)))
720 break;
721 } else {
722 ppp->npmode[i] = npi.mode;
723 /* we may be able to transmit more packets now (??) */
724 netif_wake_queue(ppp->dev);
725 }
726 err = 0;
727 break;
729 #ifdef CONFIG_PPP_FILTER
730 case PPPIOCSPASS:
731 {
732 struct sock_filter *code;
733 err = get_filter(argp, &code);
734 if (err >= 0) {
735 ppp_lock(ppp);
736 kfree(ppp->pass_filter);
737 ppp->pass_filter = code;
738 ppp->pass_len = err;
739 ppp_unlock(ppp);
740 err = 0;
741 }
742 break;
743 }
744 case PPPIOCSACTIVE:
745 {
746 struct sock_filter *code;
747 err = get_filter(argp, &code);
748 if (err >= 0) {
749 ppp_lock(ppp);
750 kfree(ppp->active_filter);
751 ppp->active_filter = code;
752 ppp->active_len = err;
753 ppp_unlock(ppp);
754 err = 0;
755 }
756 break;
757 }
758 #endif /* CONFIG_PPP_FILTER */
760 #ifdef CONFIG_PPP_MULTILINK
761 case PPPIOCSMRRU:
762 if (get_user(val, p))
763 break;
764 ppp_recv_lock(ppp);
765 ppp->mrru = val;
766 ppp_recv_unlock(ppp);
767 err = 0;
768 break;
769 #endif /* CONFIG_PPP_MULTILINK */
771 default:
772 err = -ENOTTY;
773 }
775 return err;
776 }
778 static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
779 unsigned int cmd, unsigned long arg)
780 {
781 int unit, err = -EFAULT;
782 struct ppp *ppp;
783 struct channel *chan;
784 int __user *p = (int __user *)arg;
786 switch (cmd) {
787 case PPPIOCNEWUNIT:
788 /* Create a new ppp unit */
789 if (get_user(unit, p))
790 break;
791 ppp = ppp_create_interface(unit, &err);
792 if (ppp == 0)
793 break;
794 file->private_data = &ppp->file;
795 ppp->owner = file;
796 err = -EFAULT;
797 if (put_user(ppp->file.index, p))
798 break;
799 err = 0;
800 break;
802 case PPPIOCATTACH:
803 /* Attach to an existing ppp unit */
804 if (get_user(unit, p))
805 break;
806 mutex_lock(&all_ppp_mutex);
807 err = -ENXIO;
808 ppp = ppp_find_unit(unit);
809 if (ppp != 0) {
810 atomic_inc(&ppp->file.refcnt);
811 file->private_data = &ppp->file;
812 err = 0;
813 }
814 mutex_unlock(&all_ppp_mutex);
815 break;
817 case PPPIOCATTCHAN:
818 if (get_user(unit, p))
819 break;
820 spin_lock_bh(&all_channels_lock);
821 err = -ENXIO;
822 chan = ppp_find_channel(unit);
823 if (chan != 0) {
824 atomic_inc(&chan->file.refcnt);
825 file->private_data = &chan->file;
826 err = 0;
827 }
828 spin_unlock_bh(&all_channels_lock);
829 break;
831 default:
832 err = -ENOTTY;
833 }
834 return err;
835 }
837 static struct file_operations ppp_device_fops = {
838 .owner = THIS_MODULE,
839 .read = ppp_read,
840 .write = ppp_write,
841 .poll = ppp_poll,
842 .ioctl = ppp_ioctl,
843 .open = ppp_open,
844 .release = ppp_release
845 };
847 #define PPP_MAJOR 108
849 /* Called at boot time if ppp is compiled into the kernel,
850 or at module load time (from init_module) if compiled as a module. */
851 static int __init ppp_init(void)
852 {
853 int err;
855 printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
856 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
857 if (!err) {
858 ppp_class = class_create(THIS_MODULE, "ppp");
859 if (IS_ERR(ppp_class)) {
860 err = PTR_ERR(ppp_class);
861 goto out_chrdev;
862 }
863 class_device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
864 }
866 out:
867 if (err)
868 printk(KERN_ERR "failed to register PPP device (%d)\n", err);
869 return err;
871 out_chrdev:
872 unregister_chrdev(PPP_MAJOR, "ppp");
873 goto out;
874 }
876 /*
877 * Network interface unit routines.
878 */
879 static int
880 ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
881 {
882 struct ppp *ppp = (struct ppp *) dev->priv;
883 int npi, proto;
884 unsigned char *pp;
886 npi = ethertype_to_npindex(ntohs(skb->protocol));
887 if (npi < 0)
888 goto outf;
890 /* Drop, accept or reject the packet */
891 switch (ppp->npmode[npi]) {
892 case NPMODE_PASS:
893 break;
894 case NPMODE_QUEUE:
895 /* it would be nice to have a way to tell the network
896 system to queue this one up for later. */
897 goto outf;
898 case NPMODE_DROP:
899 case NPMODE_ERROR:
900 goto outf;
901 }
903 /* Put the 2-byte PPP protocol number on the front,
904 making sure there is room for the address and control fields. */
905 if (skb_headroom(skb) < PPP_HDRLEN) {
906 struct sk_buff *ns;
908 ns = alloc_skb(skb->len + dev->hard_header_len, GFP_ATOMIC);
909 if (ns == 0)
910 goto outf;
911 skb_reserve(ns, dev->hard_header_len);
912 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
913 kfree_skb(skb);
914 skb = ns;
915 }
916 pp = skb_push(skb, 2);
917 proto = npindex_to_proto[npi];
918 pp[0] = proto >> 8;
919 pp[1] = proto;
921 netif_stop_queue(dev);
922 skb_queue_tail(&ppp->file.xq, skb);
923 ppp_xmit_process(ppp);
924 return 0;
926 outf:
927 kfree_skb(skb);
928 ++ppp->stats.tx_dropped;
929 return 0;
930 }
932 static struct net_device_stats *
933 ppp_net_stats(struct net_device *dev)
934 {
935 struct ppp *ppp = (struct ppp *) dev->priv;
937 return &ppp->stats;
938 }
940 static int
941 ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
942 {
943 struct ppp *ppp = dev->priv;
944 int err = -EFAULT;
945 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
946 struct ppp_stats stats;
947 struct ppp_comp_stats cstats;
948 char *vers;
950 switch (cmd) {
951 case SIOCGPPPSTATS:
952 ppp_get_stats(ppp, &stats);
953 if (copy_to_user(addr, &stats, sizeof(stats)))
954 break;
955 err = 0;
956 break;
958 case SIOCGPPPCSTATS:
959 memset(&cstats, 0, sizeof(cstats));
960 if (ppp->xc_state != 0)
961 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
962 if (ppp->rc_state != 0)
963 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
964 if (copy_to_user(addr, &cstats, sizeof(cstats)))
965 break;
966 err = 0;
967 break;
969 case SIOCGPPPVER:
970 vers = PPP_VERSION;
971 if (copy_to_user(addr, vers, strlen(vers) + 1))
972 break;
973 err = 0;
974 break;
976 default:
977 err = -EINVAL;
978 }
980 return err;
981 }
983 static void ppp_setup(struct net_device *dev)
984 {
985 dev->hard_header_len = PPP_HDRLEN;
986 dev->mtu = PPP_MTU;
987 dev->addr_len = 0;
988 dev->tx_queue_len = 3;
989 dev->type = ARPHRD_PPP;
990 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
991 }
993 /*
994 * Transmit-side routines.
995 */
997 /*
998 * Called to do any work queued up on the transmit side
999 * that can now be done.
1000 */
1001 static void
1002 ppp_xmit_process(struct ppp *ppp)
1004 struct sk_buff *skb;
1006 ppp_xmit_lock(ppp);
1007 if (ppp->dev != 0) {
1008 ppp_push(ppp);
1009 while (ppp->xmit_pending == 0
1010 && (skb = skb_dequeue(&ppp->file.xq)) != 0)
1011 ppp_send_frame(ppp, skb);
1012 /* If there's no work left to do, tell the core net
1013 code that we can accept some more. */
1014 if (ppp->xmit_pending == 0 && skb_peek(&ppp->file.xq) == 0)
1015 netif_wake_queue(ppp->dev);
1017 ppp_xmit_unlock(ppp);
1020 static inline struct sk_buff *
1021 pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
1023 struct sk_buff *new_skb;
1024 int len;
1025 int new_skb_size = ppp->dev->mtu +
1026 ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
1027 int compressor_skb_size = ppp->dev->mtu +
1028 ppp->xcomp->comp_extra + PPP_HDRLEN;
1029 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
1030 if (!new_skb) {
1031 if (net_ratelimit())
1032 printk(KERN_ERR "PPP: no memory (comp pkt)\n");
1033 return NULL;
1035 if (ppp->dev->hard_header_len > PPP_HDRLEN)
1036 skb_reserve(new_skb,
1037 ppp->dev->hard_header_len - PPP_HDRLEN);
1039 /* compressor still expects A/C bytes in hdr */
1040 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
1041 new_skb->data, skb->len + 2,
1042 compressor_skb_size);
1043 if (len > 0 && (ppp->flags & SC_CCP_UP)) {
1044 kfree_skb(skb);
1045 skb = new_skb;
1046 skb_put(skb, len);
1047 skb_pull(skb, 2); /* pull off A/C bytes */
1048 } else if (len == 0) {
1049 /* didn't compress, or CCP not up yet */
1050 kfree_skb(new_skb);
1051 new_skb = skb;
1052 } else {
1053 /*
1054 * (len < 0)
1055 * MPPE requires that we do not send unencrypted
1056 * frames. The compressor will return -1 if we
1057 * should drop the frame. We cannot simply test
1058 * the compress_proto because MPPE and MPPC share
1059 * the same number.
1060 */
1061 if (net_ratelimit())
1062 printk(KERN_ERR "ppp: compressor dropped pkt\n");
1063 kfree_skb(skb);
1064 kfree_skb(new_skb);
1065 new_skb = NULL;
1067 return new_skb;
1070 /*
1071 * Compress and send a frame.
1072 * The caller should have locked the xmit path,
1073 * and xmit_pending should be 0.
1074 */
1075 static void
1076 ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1078 int proto = PPP_PROTO(skb);
1079 struct sk_buff *new_skb;
1080 int len;
1081 unsigned char *cp;
1083 if (proto < 0x8000) {
1084 #ifdef CONFIG_PPP_FILTER
1085 /* check if we should pass this packet */
1086 /* the filter instructions are constructed assuming
1087 a four-byte PPP header on each packet */
1088 *skb_push(skb, 2) = 1;
1089 if (ppp->pass_filter
1090 && sk_run_filter(skb, ppp->pass_filter,
1091 ppp->pass_len) == 0) {
1092 if (ppp->debug & 1)
1093 printk(KERN_DEBUG "PPP: outbound frame not passed\n");
1094 kfree_skb(skb);
1095 return;
1097 /* if this packet passes the active filter, record the time */
1098 if (!(ppp->active_filter
1099 && sk_run_filter(skb, ppp->active_filter,
1100 ppp->active_len) == 0))
1101 ppp->last_xmit = jiffies;
1102 skb_pull(skb, 2);
1103 #else
1104 /* for data packets, record the time */
1105 ppp->last_xmit = jiffies;
1106 #endif /* CONFIG_PPP_FILTER */
1109 ++ppp->stats.tx_packets;
1110 ppp->stats.tx_bytes += skb->len - 2;
1112 switch (proto) {
1113 case PPP_IP:
1114 if (ppp->vj == 0 || (ppp->flags & SC_COMP_TCP) == 0)
1115 break;
1116 /* try to do VJ TCP header compression */
1117 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
1118 GFP_ATOMIC);
1119 if (new_skb == 0) {
1120 printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
1121 goto drop;
1123 skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
1124 cp = skb->data + 2;
1125 len = slhc_compress(ppp->vj, cp, skb->len - 2,
1126 new_skb->data + 2, &cp,
1127 !(ppp->flags & SC_NO_TCP_CCID));
1128 if (cp == skb->data + 2) {
1129 /* didn't compress */
1130 kfree_skb(new_skb);
1131 } else {
1132 if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
1133 proto = PPP_VJC_COMP;
1134 cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
1135 } else {
1136 proto = PPP_VJC_UNCOMP;
1137 cp[0] = skb->data[2];
1139 kfree_skb(skb);
1140 skb = new_skb;
1141 cp = skb_put(skb, len + 2);
1142 cp[0] = 0;
1143 cp[1] = proto;
1145 break;
1147 case PPP_CCP:
1148 /* peek at outbound CCP frames */
1149 ppp_ccp_peek(ppp, skb, 0);
1150 break;
1153 /* try to do packet compression */
1154 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0
1155 && proto != PPP_LCP && proto != PPP_CCP) {
1156 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
1157 if (net_ratelimit())
1158 printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
1159 goto drop;
1161 skb = pad_compress_skb(ppp, skb);
1162 if (!skb)
1163 goto drop;
1166 /*
1167 * If we are waiting for traffic (demand dialling),
1168 * queue it up for pppd to receive.
1169 */
1170 if (ppp->flags & SC_LOOP_TRAFFIC) {
1171 if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
1172 goto drop;
1173 skb_queue_tail(&ppp->file.rq, skb);
1174 wake_up_interruptible(&ppp->file.rwait);
1175 return;
1178 ppp->xmit_pending = skb;
1179 ppp_push(ppp);
1180 return;
1182 drop:
1183 if (skb)
1184 kfree_skb(skb);
1185 ++ppp->stats.tx_errors;
1188 /*
1189 * Try to send the frame in xmit_pending.
1190 * The caller should have the xmit path locked.
1191 */
1192 static void
1193 ppp_push(struct ppp *ppp)
1195 struct list_head *list;
1196 struct channel *pch;
1197 struct sk_buff *skb = ppp->xmit_pending;
1199 if (skb == 0)
1200 return;
1202 list = &ppp->channels;
1203 if (list_empty(list)) {
1204 /* nowhere to send the packet, just drop it */
1205 ppp->xmit_pending = NULL;
1206 kfree_skb(skb);
1207 return;
1210 if ((ppp->flags & SC_MULTILINK) == 0) {
1211 /* not doing multilink: send it down the first channel */
1212 list = list->next;
1213 pch = list_entry(list, struct channel, clist);
1215 spin_lock_bh(&pch->downl);
1216 if (pch->chan) {
1217 if (pch->chan->ops->start_xmit(pch->chan, skb))
1218 ppp->xmit_pending = NULL;
1219 } else {
1220 /* channel got unregistered */
1221 kfree_skb(skb);
1222 ppp->xmit_pending = NULL;
1224 spin_unlock_bh(&pch->downl);
1225 return;
1228 #ifdef CONFIG_PPP_MULTILINK
1229 /* Multilink: fragment the packet over as many links
1230 as can take the packet at the moment. */
1231 if (!ppp_mp_explode(ppp, skb))
1232 return;
1233 #endif /* CONFIG_PPP_MULTILINK */
1235 ppp->xmit_pending = NULL;
1236 kfree_skb(skb);
1239 #ifdef CONFIG_PPP_MULTILINK
1240 /*
1241 * Divide a packet to be transmitted into fragments and
1242 * send them out the individual links.
1243 */
1244 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1246 int len, fragsize;
1247 int i, bits, hdrlen, mtu;
1248 int flen;
1249 int navail, nfree;
1250 int nbigger;
1251 unsigned char *p, *q;
1252 struct list_head *list;
1253 struct channel *pch;
1254 struct sk_buff *frag;
1255 struct ppp_channel *chan;
1257 nfree = 0; /* # channels which have no packet already queued */
1258 navail = 0; /* total # of usable channels (not deregistered) */
1259 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1260 i = 0;
1261 list_for_each_entry(pch, &ppp->channels, clist) {
1262 navail += pch->avail = (pch->chan != NULL);
1263 if (pch->avail) {
1264 if (skb_queue_empty(&pch->file.xq) ||
1265 !pch->had_frag) {
1266 pch->avail = 2;
1267 ++nfree;
1269 if (!pch->had_frag && i < ppp->nxchan)
1270 ppp->nxchan = i;
1272 ++i;
1275 /*
1276 * Don't start sending this packet unless at least half of
1277 * the channels are free. This gives much better TCP
1278 * performance if we have a lot of channels.
1279 */
1280 if (nfree == 0 || nfree < navail / 2)
1281 return 0; /* can't take now, leave it in xmit_pending */
1283 /* Do protocol field compression (XXX this should be optional) */
1284 p = skb->data;
1285 len = skb->len;
1286 if (*p == 0) {
1287 ++p;
1288 --len;
1291 /*
1292 * Decide on fragment size.
1293 * We create a fragment for each free channel regardless of
1294 * how small they are (i.e. even 0 length) in order to minimize
1295 * the time that it will take to detect when a channel drops
1296 * a fragment.
1297 */
1298 fragsize = len;
1299 if (nfree > 1)
1300 fragsize = ROUNDUP(fragsize, nfree);
1301 /* nbigger channels get fragsize bytes, the rest get fragsize-1,
1302 except if nbigger==0, then they all get fragsize. */
1303 nbigger = len % nfree;
1305 /* skip to the channel after the one we last used
1306 and start at that one */
1307 list = &ppp->channels;
1308 for (i = 0; i < ppp->nxchan; ++i) {
1309 list = list->next;
1310 if (list == &ppp->channels) {
1311 i = 0;
1312 break;
1316 /* create a fragment for each channel */
1317 bits = B;
1318 while (nfree > 0 || len > 0) {
1319 list = list->next;
1320 if (list == &ppp->channels) {
1321 i = 0;
1322 continue;
1324 pch = list_entry(list, struct channel, clist);
1325 ++i;
1326 if (!pch->avail)
1327 continue;
1329 /*
1330 * Skip this channel if it has a fragment pending already and
1331 * we haven't given a fragment to all of the free channels.
1332 */
1333 if (pch->avail == 1) {
1334 if (nfree > 0)
1335 continue;
1336 } else {
1337 --nfree;
1338 pch->avail = 1;
1341 /* check the channel's mtu and whether it is still attached. */
1342 spin_lock_bh(&pch->downl);
1343 if (pch->chan == NULL) {
1344 /* can't use this channel, it's being deregistered */
1345 spin_unlock_bh(&pch->downl);
1346 pch->avail = 0;
1347 if (--navail == 0)
1348 break;
1349 continue;
1352 /*
1353 * Create a fragment for this channel of
1354 * min(max(mtu+2-hdrlen, 4), fragsize, len) bytes.
1355 * If mtu+2-hdrlen < 4, that is a ridiculously small
1356 * MTU, so we use mtu = 2 + hdrlen.
1357 */
1358 if (fragsize > len)
1359 fragsize = len;
1360 flen = fragsize;
1361 mtu = pch->chan->mtu + 2 - hdrlen;
1362 if (mtu < 4)
1363 mtu = 4;
1364 if (flen > mtu)
1365 flen = mtu;
1366 if (flen == len && nfree == 0)
1367 bits |= E;
1368 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
1369 if (frag == 0)
1370 goto noskb;
1371 q = skb_put(frag, flen + hdrlen);
1373 /* make the MP header */
1374 q[0] = PPP_MP >> 8;
1375 q[1] = PPP_MP;
1376 if (ppp->flags & SC_MP_XSHORTSEQ) {
1377 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1378 q[3] = ppp->nxseq;
1379 } else {
1380 q[2] = bits;
1381 q[3] = ppp->nxseq >> 16;
1382 q[4] = ppp->nxseq >> 8;
1383 q[5] = ppp->nxseq;
1386 /*
1387 * Copy the data in.
1388 * Unfortunately there is a bug in older versions of
1389 * the Linux PPP multilink reconstruction code where it
1390 * drops 0-length fragments. Therefore we make sure the
1391 * fragment has at least one byte of data. Any bytes
1392 * we add in this situation will end up as padding on the
1393 * end of the reconstructed packet.
1394 */
1395 if (flen == 0)
1396 *skb_put(frag, 1) = 0;
1397 else
1398 memcpy(q + hdrlen, p, flen);
1400 /* try to send it down the channel */
1401 chan = pch->chan;
1402 if (!skb_queue_empty(&pch->file.xq) ||
1403 !chan->ops->start_xmit(chan, frag))
1404 skb_queue_tail(&pch->file.xq, frag);
1405 pch->had_frag = 1;
1406 p += flen;
1407 len -= flen;
1408 ++ppp->nxseq;
1409 bits = 0;
1410 spin_unlock_bh(&pch->downl);
1412 if (--nbigger == 0 && fragsize > 0)
1413 --fragsize;
1415 ppp->nxchan = i;
1417 return 1;
1419 noskb:
1420 spin_unlock_bh(&pch->downl);
1421 if (ppp->debug & 1)
1422 printk(KERN_ERR "PPP: no memory (fragment)\n");
1423 ++ppp->stats.tx_errors;
1424 ++ppp->nxseq;
1425 return 1; /* abandon the frame */
1427 #endif /* CONFIG_PPP_MULTILINK */
1429 /*
1430 * Try to send data out on a channel.
1431 */
1432 static void
1433 ppp_channel_push(struct channel *pch)
1435 struct sk_buff *skb;
1436 struct ppp *ppp;
1438 spin_lock_bh(&pch->downl);
1439 if (pch->chan != 0) {
1440 while (!skb_queue_empty(&pch->file.xq)) {
1441 skb = skb_dequeue(&pch->file.xq);
1442 if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
1443 /* put the packet back and try again later */
1444 skb_queue_head(&pch->file.xq, skb);
1445 break;
1448 } else {
1449 /* channel got deregistered */
1450 skb_queue_purge(&pch->file.xq);
1452 spin_unlock_bh(&pch->downl);
1453 /* see if there is anything from the attached unit to be sent */
1454 if (skb_queue_empty(&pch->file.xq)) {
1455 read_lock_bh(&pch->upl);
1456 ppp = pch->ppp;
1457 if (ppp != 0)
1458 ppp_xmit_process(ppp);
1459 read_unlock_bh(&pch->upl);
1463 /*
1464 * Receive-side routines.
1465 */
1467 /* misuse a few fields of the skb for MP reconstruction */
1468 #define sequence priority
1469 #define BEbits cb[0]
1471 static inline void
1472 ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1474 ppp_recv_lock(ppp);
1475 /* ppp->dev == 0 means interface is closing down */
1476 if (ppp->dev != 0)
1477 ppp_receive_frame(ppp, skb, pch);
1478 else
1479 kfree_skb(skb);
1480 ppp_recv_unlock(ppp);
1483 void
1484 ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1486 struct channel *pch = chan->ppp;
1487 int proto;
1489 if (pch == 0 || skb->len == 0) {
1490 kfree_skb(skb);
1491 return;
1494 proto = PPP_PROTO(skb);
1495 read_lock_bh(&pch->upl);
1496 if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) {
1497 /* put it on the channel queue */
1498 skb_queue_tail(&pch->file.rq, skb);
1499 /* drop old frames if queue too long */
1500 while (pch->file.rq.qlen > PPP_MAX_RQLEN
1501 && (skb = skb_dequeue(&pch->file.rq)) != 0)
1502 kfree_skb(skb);
1503 wake_up_interruptible(&pch->file.rwait);
1504 } else {
1505 ppp_do_recv(pch->ppp, skb, pch);
1507 read_unlock_bh(&pch->upl);
1510 /* Put a 0-length skb in the receive queue as an error indication */
1511 void
1512 ppp_input_error(struct ppp_channel *chan, int code)
1514 struct channel *pch = chan->ppp;
1515 struct sk_buff *skb;
1517 if (pch == 0)
1518 return;
1520 read_lock_bh(&pch->upl);
1521 if (pch->ppp != 0) {
1522 skb = alloc_skb(0, GFP_ATOMIC);
1523 if (skb != 0) {
1524 skb->len = 0; /* probably unnecessary */
1525 skb->cb[0] = code;
1526 ppp_do_recv(pch->ppp, skb, pch);
1529 read_unlock_bh(&pch->upl);
1532 /*
1533 * We come in here to process a received frame.
1534 * The receive side of the ppp unit is locked.
1535 */
1536 static void
1537 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1539 if (skb->len >= 2) {
1540 #ifdef CONFIG_PPP_MULTILINK
1541 /* XXX do channel-level decompression here */
1542 if (PPP_PROTO(skb) == PPP_MP)
1543 ppp_receive_mp_frame(ppp, skb, pch);
1544 else
1545 #endif /* CONFIG_PPP_MULTILINK */
1546 ppp_receive_nonmp_frame(ppp, skb);
1547 return;
1550 if (skb->len > 0)
1551 /* note: a 0-length skb is used as an error indication */
1552 ++ppp->stats.rx_length_errors;
1554 kfree_skb(skb);
1555 ppp_receive_error(ppp);
1558 static void
1559 ppp_receive_error(struct ppp *ppp)
1561 ++ppp->stats.rx_errors;
1562 if (ppp->vj != 0)
1563 slhc_toss(ppp->vj);
1566 static void
1567 ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1569 struct sk_buff *ns;
1570 int proto, len, npi;
1572 /*
1573 * Decompress the frame, if compressed.
1574 * Note that some decompressors need to see uncompressed frames
1575 * that come in as well as compressed frames.
1576 */
1577 if (ppp->rc_state != 0 && (ppp->rstate & SC_DECOMP_RUN)
1578 && (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
1579 skb = ppp_decompress_frame(ppp, skb);
1581 if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
1582 goto err;
1584 proto = PPP_PROTO(skb);
1585 switch (proto) {
1586 case PPP_VJC_COMP:
1587 /* decompress VJ compressed packets */
1588 if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP))
1589 goto err;
1591 if (skb_tailroom(skb) < 124) {
1592 /* copy to a new sk_buff with more tailroom */
1593 ns = dev_alloc_skb(skb->len + 128);
1594 if (ns == 0) {
1595 printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
1596 goto err;
1598 skb_reserve(ns, 2);
1599 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
1600 kfree_skb(skb);
1601 skb = ns;
1603 else
1604 skb->ip_summed = CHECKSUM_NONE;
1606 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
1607 if (len <= 0) {
1608 printk(KERN_DEBUG "PPP: VJ decompression error\n");
1609 goto err;
1611 len += 2;
1612 if (len > skb->len)
1613 skb_put(skb, len - skb->len);
1614 else if (len < skb->len)
1615 skb_trim(skb, len);
1616 proto = PPP_IP;
1617 break;
1619 case PPP_VJC_UNCOMP:
1620 if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP))
1621 goto err;
1623 /* Until we fix the decompressor need to make sure
1624 * data portion is linear.
1625 */
1626 if (!pskb_may_pull(skb, skb->len))
1627 goto err;
1629 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
1630 printk(KERN_ERR "PPP: VJ uncompressed error\n");
1631 goto err;
1633 proto = PPP_IP;
1634 break;
1636 case PPP_CCP:
1637 ppp_ccp_peek(ppp, skb, 1);
1638 break;
1641 ++ppp->stats.rx_packets;
1642 ppp->stats.rx_bytes += skb->len - 2;
1644 npi = proto_to_npindex(proto);
1645 if (npi < 0) {
1646 /* control or unknown frame - pass it to pppd */
1647 skb_queue_tail(&ppp->file.rq, skb);
1648 /* limit queue length by dropping old frames */
1649 while (ppp->file.rq.qlen > PPP_MAX_RQLEN
1650 && (skb = skb_dequeue(&ppp->file.rq)) != 0)
1651 kfree_skb(skb);
1652 /* wake up any process polling or blocking on read */
1653 wake_up_interruptible(&ppp->file.rwait);
1655 } else {
1656 /* network protocol frame - give it to the kernel */
1658 #ifdef CONFIG_PPP_FILTER
1659 /* check if the packet passes the pass and active filters */
1660 /* the filter instructions are constructed assuming
1661 a four-byte PPP header on each packet */
1662 *skb_push(skb, 2) = 0;
1663 if (ppp->pass_filter
1664 && sk_run_filter(skb, ppp->pass_filter,
1665 ppp->pass_len) == 0) {
1666 if (ppp->debug & 1)
1667 printk(KERN_DEBUG "PPP: inbound frame not passed\n");
1668 kfree_skb(skb);
1669 return;
1671 if (!(ppp->active_filter
1672 && sk_run_filter(skb, ppp->active_filter,
1673 ppp->active_len) == 0))
1674 ppp->last_recv = jiffies;
1675 skb_pull(skb, 2);
1676 #else
1677 ppp->last_recv = jiffies;
1678 #endif /* CONFIG_PPP_FILTER */
1680 if ((ppp->dev->flags & IFF_UP) == 0
1681 || ppp->npmode[npi] != NPMODE_PASS) {
1682 kfree_skb(skb);
1683 } else {
1684 /* chop off protocol */
1685 skb_pull_rcsum(skb, 2);
1686 skb->dev = ppp->dev;
1687 skb->protocol = htons(npindex_to_ethertype[npi]);
1688 skb->mac.raw = skb->data;
1689 netif_rx(skb);
1690 ppp->dev->last_rx = jiffies;
1693 return;
1695 err:
1696 kfree_skb(skb);
1697 ppp_receive_error(ppp);
1700 static struct sk_buff *
1701 ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
1703 int proto = PPP_PROTO(skb);
1704 struct sk_buff *ns;
1705 int len;
1707 /* Until we fix all the decompressor's need to make sure
1708 * data portion is linear.
1709 */
1710 if (!pskb_may_pull(skb, skb->len))
1711 goto err;
1713 if (proto == PPP_COMP) {
1714 ns = dev_alloc_skb(ppp->mru + PPP_HDRLEN);
1715 if (ns == 0) {
1716 printk(KERN_ERR "ppp_decompress_frame: no memory\n");
1717 goto err;
1719 /* the decompressor still expects the A/C bytes in the hdr */
1720 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
1721 skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN);
1722 if (len < 0) {
1723 /* Pass the compressed frame to pppd as an
1724 error indication. */
1725 if (len == DECOMP_FATALERROR)
1726 ppp->rstate |= SC_DC_FERROR;
1727 kfree_skb(ns);
1728 goto err;
1731 kfree_skb(skb);
1732 skb = ns;
1733 skb_put(skb, len);
1734 skb_pull(skb, 2); /* pull off the A/C bytes */
1736 } else {
1737 /* Uncompressed frame - pass to decompressor so it
1738 can update its dictionary if necessary. */
1739 if (ppp->rcomp->incomp)
1740 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
1741 skb->len + 2);
1744 return skb;
1746 err:
1747 ppp->rstate |= SC_DC_ERROR;
1748 ppp_receive_error(ppp);
1749 return skb;
1752 #ifdef CONFIG_PPP_MULTILINK
1753 /*
1754 * Receive a multilink frame.
1755 * We put it on the reconstruction queue and then pull off
1756 * as many completed frames as we can.
1757 */
1758 static void
1759 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1761 u32 mask, seq;
1762 struct channel *ch;
1763 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1765 if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0)
1766 goto err; /* no good, throw it away */
1768 /* Decode sequence number and begin/end bits */
1769 if (ppp->flags & SC_MP_SHORTSEQ) {
1770 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
1771 mask = 0xfff;
1772 } else {
1773 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
1774 mask = 0xffffff;
1776 skb->BEbits = skb->data[2];
1777 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
1779 /*
1780 * Do protocol ID decompression on the first fragment of each packet.
1781 */
1782 if ((skb->BEbits & B) && (skb->data[0] & 1))
1783 *skb_push(skb, 1) = 0;
1785 /*
1786 * Expand sequence number to 32 bits, making it as close
1787 * as possible to ppp->minseq.
1788 */
1789 seq |= ppp->minseq & ~mask;
1790 if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
1791 seq += mask + 1;
1792 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
1793 seq -= mask + 1; /* should never happen */
1794 skb->sequence = seq;
1795 pch->lastseq = seq;
1797 /*
1798 * If this packet comes before the next one we were expecting,
1799 * drop it.
1800 */
1801 if (seq_before(seq, ppp->nextseq)) {
1802 kfree_skb(skb);
1803 ++ppp->stats.rx_dropped;
1804 ppp_receive_error(ppp);
1805 return;
1808 /*
1809 * Reevaluate minseq, the minimum over all channels of the
1810 * last sequence number received on each channel. Because of
1811 * the increasing sequence number rule, we know that any fragment
1812 * before `minseq' which hasn't arrived is never going to arrive.
1813 * The list of channels can't change because we have the receive
1814 * side of the ppp unit locked.
1815 */
1816 list_for_each_entry(ch, &ppp->channels, clist) {
1817 if (seq_before(ch->lastseq, seq))
1818 seq = ch->lastseq;
1820 if (seq_before(ppp->minseq, seq))
1821 ppp->minseq = seq;
1823 /* Put the fragment on the reconstruction queue */
1824 ppp_mp_insert(ppp, skb);
1826 /* If the queue is getting long, don't wait any longer for packets
1827 before the start of the queue. */
1828 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN
1829 && seq_before(ppp->minseq, ppp->mrq.next->sequence))
1830 ppp->minseq = ppp->mrq.next->sequence;
1832 /* Pull completed packets off the queue and receive them. */
1833 while ((skb = ppp_mp_reconstruct(ppp)) != 0)
1834 ppp_receive_nonmp_frame(ppp, skb);
1836 return;
1838 err:
1839 kfree_skb(skb);
1840 ppp_receive_error(ppp);
1843 /*
1844 * Insert a fragment on the MP reconstruction queue.
1845 * The queue is ordered by increasing sequence number.
1846 */
1847 static void
1848 ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
1850 struct sk_buff *p;
1851 struct sk_buff_head *list = &ppp->mrq;
1852 u32 seq = skb->sequence;
1854 /* N.B. we don't need to lock the list lock because we have the
1855 ppp unit receive-side lock. */
1856 for (p = list->next; p != (struct sk_buff *)list; p = p->next)
1857 if (seq_before(seq, p->sequence))
1858 break;
1859 __skb_insert(skb, p->prev, p, list);
1862 /*
1863 * Reconstruct a packet from the MP fragment queue.
1864 * We go through increasing sequence numbers until we find a
1865 * complete packet, or we get to the sequence number for a fragment
1866 * which hasn't arrived but might still do so.
1867 */
1868 struct sk_buff *
1869 ppp_mp_reconstruct(struct ppp *ppp)
1871 u32 seq = ppp->nextseq;
1872 u32 minseq = ppp->minseq;
1873 struct sk_buff_head *list = &ppp->mrq;
1874 struct sk_buff *p, *next;
1875 struct sk_buff *head, *tail;
1876 struct sk_buff *skb = NULL;
1877 int lost = 0, len = 0;
1879 if (ppp->mrru == 0) /* do nothing until mrru is set */
1880 return NULL;
1881 head = list->next;
1882 tail = NULL;
1883 for (p = head; p != (struct sk_buff *) list; p = next) {
1884 next = p->next;
1885 if (seq_before(p->sequence, seq)) {
1886 /* this can't happen, anyway ignore the skb */
1887 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
1888 p->sequence, seq);
1889 head = next;
1890 continue;
1892 if (p->sequence != seq) {
1893 /* Fragment `seq' is missing. If it is after
1894 minseq, it might arrive later, so stop here. */
1895 if (seq_after(seq, minseq))
1896 break;
1897 /* Fragment `seq' is lost, keep going. */
1898 lost = 1;
1899 seq = seq_before(minseq, p->sequence)?
1900 minseq + 1: p->sequence;
1901 next = p;
1902 continue;
1905 /*
1906 * At this point we know that all the fragments from
1907 * ppp->nextseq to seq are either present or lost.
1908 * Also, there are no complete packets in the queue
1909 * that have no missing fragments and end before this
1910 * fragment.
1911 */
1913 /* B bit set indicates this fragment starts a packet */
1914 if (p->BEbits & B) {
1915 head = p;
1916 lost = 0;
1917 len = 0;
1920 len += p->len;
1922 /* Got a complete packet yet? */
1923 if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) {
1924 if (len > ppp->mrru + 2) {
1925 ++ppp->stats.rx_length_errors;
1926 printk(KERN_DEBUG "PPP: reconstructed packet"
1927 " is too long (%d)\n", len);
1928 } else if (p == head) {
1929 /* fragment is complete packet - reuse skb */
1930 tail = p;
1931 skb = skb_get(p);
1932 break;
1933 } else if ((skb = dev_alloc_skb(len)) == NULL) {
1934 ++ppp->stats.rx_missed_errors;
1935 printk(KERN_DEBUG "PPP: no memory for "
1936 "reconstructed packet");
1937 } else {
1938 tail = p;
1939 break;
1941 ppp->nextseq = seq + 1;
1944 /*
1945 * If this is the ending fragment of a packet,
1946 * and we haven't found a complete valid packet yet,
1947 * we can discard up to and including this fragment.
1948 */
1949 if (p->BEbits & E)
1950 head = next;
1952 ++seq;
1955 /* If we have a complete packet, copy it all into one skb. */
1956 if (tail != NULL) {
1957 /* If we have discarded any fragments,
1958 signal a receive error. */
1959 if (head->sequence != ppp->nextseq) {
1960 if (ppp->debug & 1)
1961 printk(KERN_DEBUG " missed pkts %u..%u\n",
1962 ppp->nextseq, head->sequence-1);
1963 ++ppp->stats.rx_dropped;
1964 ppp_receive_error(ppp);
1967 if (head != tail)
1968 /* copy to a single skb */
1969 for (p = head; p != tail->next; p = p->next)
1970 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
1971 ppp->nextseq = tail->sequence + 1;
1972 head = tail->next;
1975 /* Discard all the skbuffs that we have copied the data out of
1976 or that we can't use. */
1977 while ((p = list->next) != head) {
1978 __skb_unlink(p, list);
1979 kfree_skb(p);
1982 return skb;
1984 #endif /* CONFIG_PPP_MULTILINK */
1986 /*
1987 * Channel interface.
1988 */
1990 /*
1991 * Create a new, unattached ppp channel.
1992 */
1993 int
1994 ppp_register_channel(struct ppp_channel *chan)
1996 struct channel *pch;
1998 pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1999 if (pch == 0)
2000 return -ENOMEM;
2001 pch->ppp = NULL;
2002 pch->chan = chan;
2003 chan->ppp = pch;
2004 init_ppp_file(&pch->file, CHANNEL);
2005 pch->file.hdrlen = chan->hdrlen;
2006 #ifdef CONFIG_PPP_MULTILINK
2007 pch->lastseq = -1;
2008 #endif /* CONFIG_PPP_MULTILINK */
2009 init_rwsem(&pch->chan_sem);
2010 spin_lock_init(&pch->downl);
2011 rwlock_init(&pch->upl);
2012 spin_lock_bh(&all_channels_lock);
2013 pch->file.index = ++last_channel_index;
2014 list_add(&pch->list, &new_channels);
2015 atomic_inc(&channel_count);
2016 spin_unlock_bh(&all_channels_lock);
2017 return 0;
2020 /*
2021 * Return the index of a channel.
2022 */
2023 int ppp_channel_index(struct ppp_channel *chan)
2025 struct channel *pch = chan->ppp;
2027 if (pch != 0)
2028 return pch->file.index;
2029 return -1;
2032 /*
2033 * Return the PPP unit number to which a channel is connected.
2034 */
2035 int ppp_unit_number(struct ppp_channel *chan)
2037 struct channel *pch = chan->ppp;
2038 int unit = -1;
2040 if (pch != 0) {
2041 read_lock_bh(&pch->upl);
2042 if (pch->ppp != 0)
2043 unit = pch->ppp->file.index;
2044 read_unlock_bh(&pch->upl);
2046 return unit;
2049 /*
2050 * Disconnect a channel from the generic layer.
2051 * This must be called in process context.
2052 */
2053 void
2054 ppp_unregister_channel(struct ppp_channel *chan)
2056 struct channel *pch = chan->ppp;
2058 if (pch == 0)
2059 return; /* should never happen */
2060 chan->ppp = NULL;
2062 /*
2063 * This ensures that we have returned from any calls into the
2064 * the channel's start_xmit or ioctl routine before we proceed.
2065 */
2066 down_write(&pch->chan_sem);
2067 spin_lock_bh(&pch->downl);
2068 pch->chan = NULL;
2069 spin_unlock_bh(&pch->downl);
2070 up_write(&pch->chan_sem);
2071 ppp_disconnect_channel(pch);
2072 spin_lock_bh(&all_channels_lock);
2073 list_del(&pch->list);
2074 spin_unlock_bh(&all_channels_lock);
2075 pch->file.dead = 1;
2076 wake_up_interruptible(&pch->file.rwait);
2077 if (atomic_dec_and_test(&pch->file.refcnt))
2078 ppp_destroy_channel(pch);
2081 /*
2082 * Callback from a channel when it can accept more to transmit.
2083 * This should be called at BH/softirq level, not interrupt level.
2084 */
2085 void
2086 ppp_output_wakeup(struct ppp_channel *chan)
2088 struct channel *pch = chan->ppp;
2090 if (pch == 0)
2091 return;
2092 ppp_channel_push(pch);
2095 /*
2096 * Compression control.
2097 */
2099 /* Process the PPPIOCSCOMPRESS ioctl. */
2100 static int
2101 ppp_set_compress(struct ppp *ppp, unsigned long arg)
2103 int err;
2104 struct compressor *cp, *ocomp;
2105 struct ppp_option_data data;
2106 void *state, *ostate;
2107 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
2109 err = -EFAULT;
2110 if (copy_from_user(&data, (void __user *) arg, sizeof(data))
2111 || (data.length <= CCP_MAX_OPTION_LENGTH
2112 && copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
2113 goto out;
2114 err = -EINVAL;
2115 if (data.length > CCP_MAX_OPTION_LENGTH
2116 || ccp_option[1] < 2 || ccp_option[1] > data.length)
2117 goto out;
2119 cp = find_compressor(ccp_option[0]);
2120 #ifdef CONFIG_KMOD
2121 if (cp == 0) {
2122 request_module("ppp-compress-%d", ccp_option[0]);
2123 cp = find_compressor(ccp_option[0]);
2125 #endif /* CONFIG_KMOD */
2126 if (cp == 0)
2127 goto out;
2129 err = -ENOBUFS;
2130 if (data.transmit) {
2131 state = cp->comp_alloc(ccp_option, data.length);
2132 if (state != 0) {
2133 ppp_xmit_lock(ppp);
2134 ppp->xstate &= ~SC_COMP_RUN;
2135 ocomp = ppp->xcomp;
2136 ostate = ppp->xc_state;
2137 ppp->xcomp = cp;
2138 ppp->xc_state = state;
2139 ppp_xmit_unlock(ppp);
2140 if (ostate != 0) {
2141 ocomp->comp_free(ostate);
2142 module_put(ocomp->owner);
2144 err = 0;
2145 } else
2146 module_put(cp->owner);
2148 } else {
2149 state = cp->decomp_alloc(ccp_option, data.length);
2150 if (state != 0) {
2151 ppp_recv_lock(ppp);
2152 ppp->rstate &= ~SC_DECOMP_RUN;
2153 ocomp = ppp->rcomp;
2154 ostate = ppp->rc_state;
2155 ppp->rcomp = cp;
2156 ppp->rc_state = state;
2157 ppp_recv_unlock(ppp);
2158 if (ostate != 0) {
2159 ocomp->decomp_free(ostate);
2160 module_put(ocomp->owner);
2162 err = 0;
2163 } else
2164 module_put(cp->owner);
2167 out:
2168 return err;
2171 /*
2172 * Look at a CCP packet and update our state accordingly.
2173 * We assume the caller has the xmit or recv path locked.
2174 */
2175 static void
2176 ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
2178 unsigned char *dp;
2179 int len;
2181 if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
2182 return; /* no header */
2183 dp = skb->data + 2;
2185 switch (CCP_CODE(dp)) {
2186 case CCP_CONFREQ:
2188 /* A ConfReq starts negotiation of compression
2189 * in one direction of transmission,
2190 * and hence brings it down...but which way?
2192 * Remember:
2193 * A ConfReq indicates what the sender would like to receive
2194 */
2195 if(inbound)
2196 /* He is proposing what I should send */
2197 ppp->xstate &= ~SC_COMP_RUN;
2198 else
2199 /* I am proposing to what he should send */
2200 ppp->rstate &= ~SC_DECOMP_RUN;
2202 break;
2204 case CCP_TERMREQ:
2205 case CCP_TERMACK:
2206 /*
2207 * CCP is going down, both directions of transmission
2208 */
2209 ppp->rstate &= ~SC_DECOMP_RUN;
2210 ppp->xstate &= ~SC_COMP_RUN;
2211 break;
2213 case CCP_CONFACK:
2214 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
2215 break;
2216 len = CCP_LENGTH(dp);
2217 if (!pskb_may_pull(skb, len + 2))
2218 return; /* too short */
2219 dp += CCP_HDRLEN;
2220 len -= CCP_HDRLEN;
2221 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
2222 break;
2223 if (inbound) {
2224 /* we will start receiving compressed packets */
2225 if (ppp->rc_state == 0)
2226 break;
2227 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
2228 ppp->file.index, 0, ppp->mru, ppp->debug)) {
2229 ppp->rstate |= SC_DECOMP_RUN;
2230 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
2232 } else {
2233 /* we will soon start sending compressed packets */
2234 if (ppp->xc_state == 0)
2235 break;
2236 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
2237 ppp->file.index, 0, ppp->debug))
2238 ppp->xstate |= SC_COMP_RUN;
2240 break;
2242 case CCP_RESETACK:
2243 /* reset the [de]compressor */
2244 if ((ppp->flags & SC_CCP_UP) == 0)
2245 break;
2246 if (inbound) {
2247 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
2248 ppp->rcomp->decomp_reset(ppp->rc_state);
2249 ppp->rstate &= ~SC_DC_ERROR;
2251 } else {
2252 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
2253 ppp->xcomp->comp_reset(ppp->xc_state);
2255 break;
2259 /* Free up compression resources. */
2260 static void
2261 ppp_ccp_closed(struct ppp *ppp)
2263 void *xstate, *rstate;
2264 struct compressor *xcomp, *rcomp;
2266 ppp_lock(ppp);
2267 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
2268 ppp->xstate = 0;
2269 xcomp = ppp->xcomp;
2270 xstate = ppp->xc_state;
2271 ppp->xc_state = NULL;
2272 ppp->rstate = 0;
2273 rcomp = ppp->rcomp;
2274 rstate = ppp->rc_state;
2275 ppp->rc_state = NULL;
2276 ppp_unlock(ppp);
2278 if (xstate) {
2279 xcomp->comp_free(xstate);
2280 module_put(xcomp->owner);
2282 if (rstate) {
2283 rcomp->decomp_free(rstate);
2284 module_put(rcomp->owner);
2288 /* List of compressors. */
2289 static LIST_HEAD(compressor_list);
2290 static DEFINE_SPINLOCK(compressor_list_lock);
2292 struct compressor_entry {
2293 struct list_head list;
2294 struct compressor *comp;
2295 };
2297 static struct compressor_entry *
2298 find_comp_entry(int proto)
2300 struct compressor_entry *ce;
2302 list_for_each_entry(ce, &compressor_list, list) {
2303 if (ce->comp->compress_proto == proto)
2304 return ce;
2306 return NULL;
2309 /* Register a compressor */
2310 int
2311 ppp_register_compressor(struct compressor *cp)
2313 struct compressor_entry *ce;
2314 int ret;
2315 spin_lock(&compressor_list_lock);
2316 ret = -EEXIST;
2317 if (find_comp_entry(cp->compress_proto) != 0)
2318 goto out;
2319 ret = -ENOMEM;
2320 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
2321 if (ce == 0)
2322 goto out;
2323 ret = 0;
2324 ce->comp = cp;
2325 list_add(&ce->list, &compressor_list);
2326 out:
2327 spin_unlock(&compressor_list_lock);
2328 return ret;
2331 /* Unregister a compressor */
2332 void
2333 ppp_unregister_compressor(struct compressor *cp)
2335 struct compressor_entry *ce;
2337 spin_lock(&compressor_list_lock);
2338 ce = find_comp_entry(cp->compress_proto);
2339 if (ce != 0 && ce->comp == cp) {
2340 list_del(&ce->list);
2341 kfree(ce);
2343 spin_unlock(&compressor_list_lock);
2346 /* Find a compressor. */
2347 static struct compressor *
2348 find_compressor(int type)
2350 struct compressor_entry *ce;
2351 struct compressor *cp = NULL;
2353 spin_lock(&compressor_list_lock);
2354 ce = find_comp_entry(type);
2355 if (ce != 0) {
2356 cp = ce->comp;
2357 if (!try_module_get(cp->owner))
2358 cp = NULL;
2360 spin_unlock(&compressor_list_lock);
2361 return cp;
2364 /*
2365 * Miscelleneous stuff.
2366 */
2368 static void
2369 ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2371 struct slcompress *vj = ppp->vj;
2373 memset(st, 0, sizeof(*st));
2374 st->p.ppp_ipackets = ppp->stats.rx_packets;
2375 st->p.ppp_ierrors = ppp->stats.rx_errors;
2376 st->p.ppp_ibytes = ppp->stats.rx_bytes;
2377 st->p.ppp_opackets = ppp->stats.tx_packets;
2378 st->p.ppp_oerrors = ppp->stats.tx_errors;
2379 st->p.ppp_obytes = ppp->stats.tx_bytes;
2380 if (vj == 0)
2381 return;
2382 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
2383 st->vj.vjs_compressed = vj->sls_o_compressed;
2384 st->vj.vjs_searches = vj->sls_o_searches;
2385 st->vj.vjs_misses = vj->sls_o_misses;
2386 st->vj.vjs_errorin = vj->sls_i_error;
2387 st->vj.vjs_tossed = vj->sls_i_tossed;
2388 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
2389 st->vj.vjs_compressedin = vj->sls_i_compressed;
2392 /*
2393 * Stuff for handling the lists of ppp units and channels
2394 * and for initialization.
2395 */
2397 /*
2398 * Create a new ppp interface unit. Fails if it can't allocate memory
2399 * or if there is already a unit with the requested number.
2400 * unit == -1 means allocate a new number.
2401 */
2402 static struct ppp *
2403 ppp_create_interface(int unit, int *retp)
2405 struct ppp *ppp;
2406 struct net_device *dev = NULL;
2407 int ret = -ENOMEM;
2408 int i;
2410 ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
2411 if (!ppp)
2412 goto out;
2413 dev = alloc_netdev(0, "", ppp_setup);
2414 if (!dev)
2415 goto out1;
2417 ppp->mru = PPP_MRU;
2418 init_ppp_file(&ppp->file, INTERFACE);
2419 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
2420 for (i = 0; i < NUM_NP; ++i)
2421 ppp->npmode[i] = NPMODE_PASS;
2422 INIT_LIST_HEAD(&ppp->channels);
2423 spin_lock_init(&ppp->rlock);
2424 spin_lock_init(&ppp->wlock);
2425 #ifdef CONFIG_PPP_MULTILINK
2426 ppp->minseq = -1;
2427 skb_queue_head_init(&ppp->mrq);
2428 #endif /* CONFIG_PPP_MULTILINK */
2429 ppp->dev = dev;
2430 dev->priv = ppp;
2432 dev->hard_start_xmit = ppp_start_xmit;
2433 dev->get_stats = ppp_net_stats;
2434 dev->do_ioctl = ppp_net_ioctl;
2436 ret = -EEXIST;
2437 mutex_lock(&all_ppp_mutex);
2438 if (unit < 0)
2439 unit = cardmap_find_first_free(all_ppp_units);
2440 else if (cardmap_get(all_ppp_units, unit) != NULL)
2441 goto out2; /* unit already exists */
2443 /* Initialize the new ppp unit */
2444 ppp->file.index = unit;
2445 sprintf(dev->name, "ppp%d", unit);
2447 ret = register_netdev(dev);
2448 if (ret != 0) {
2449 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
2450 dev->name, ret);
2451 goto out2;
2454 atomic_inc(&ppp_unit_count);
2455 ret = cardmap_set(&all_ppp_units, unit, ppp);
2456 if (ret != 0)
2457 goto out3;
2459 mutex_unlock(&all_ppp_mutex);
2460 *retp = 0;
2461 return ppp;
2463 out3:
2464 atomic_dec(&ppp_unit_count);
2465 out2:
2466 mutex_unlock(&all_ppp_mutex);
2467 free_netdev(dev);
2468 out1:
2469 kfree(ppp);
2470 out:
2471 *retp = ret;
2472 return NULL;
2475 /*
2476 * Initialize a ppp_file structure.
2477 */
2478 static void
2479 init_ppp_file(struct ppp_file *pf, int kind)
2481 pf->kind = kind;
2482 skb_queue_head_init(&pf->xq);
2483 skb_queue_head_init(&pf->rq);
2484 atomic_set(&pf->refcnt, 1);
2485 init_waitqueue_head(&pf->rwait);
2488 /*
2489 * Take down a ppp interface unit - called when the owning file
2490 * (the one that created the unit) is closed or detached.
2491 */
2492 static void ppp_shutdown_interface(struct ppp *ppp)
2494 struct net_device *dev;
2496 mutex_lock(&all_ppp_mutex);
2497 ppp_lock(ppp);
2498 dev = ppp->dev;
2499 ppp->dev = NULL;
2500 ppp_unlock(ppp);
2501 /* This will call dev_close() for us. */
2502 if (dev) {
2503 unregister_netdev(dev);
2504 free_netdev(dev);
2506 cardmap_set(&all_ppp_units, ppp->file.index, NULL);
2507 ppp->file.dead = 1;
2508 ppp->owner = NULL;
2509 wake_up_interruptible(&ppp->file.rwait);
2510 mutex_unlock(&all_ppp_mutex);
2513 /*
2514 * Free the memory used by a ppp unit. This is only called once
2515 * there are no channels connected to the unit and no file structs
2516 * that reference the unit.
2517 */
2518 static void ppp_destroy_interface(struct ppp *ppp)
2520 atomic_dec(&ppp_unit_count);
2522 if (!ppp->file.dead || ppp->n_channels) {
2523 /* "can't happen" */
2524 printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
2525 "n_channels=%d !\n", ppp, ppp->file.dead,
2526 ppp->n_channels);
2527 return;
2530 ppp_ccp_closed(ppp);
2531 if (ppp->vj) {
2532 slhc_free(ppp->vj);
2533 ppp->vj = NULL;
2535 skb_queue_purge(&ppp->file.xq);
2536 skb_queue_purge(&ppp->file.rq);
2537 #ifdef CONFIG_PPP_MULTILINK
2538 skb_queue_purge(&ppp->mrq);
2539 #endif /* CONFIG_PPP_MULTILINK */
2540 #ifdef CONFIG_PPP_FILTER
2541 kfree(ppp->pass_filter);
2542 ppp->pass_filter = NULL;
2543 kfree(ppp->active_filter);
2544 ppp->active_filter = NULL;
2545 #endif /* CONFIG_PPP_FILTER */
2547 kfree(ppp);
2550 /*
2551 * Locate an existing ppp unit.
2552 * The caller should have locked the all_ppp_mutex.
2553 */
2554 static struct ppp *
2555 ppp_find_unit(int unit)
2557 return cardmap_get(all_ppp_units, unit);
2560 /*
2561 * Locate an existing ppp channel.
2562 * The caller should have locked the all_channels_lock.
2563 * First we look in the new_channels list, then in the
2564 * all_channels list. If found in the new_channels list,
2565 * we move it to the all_channels list. This is for speed
2566 * when we have a lot of channels in use.
2567 */
2568 static struct channel *
2569 ppp_find_channel(int unit)
2571 struct channel *pch;
2573 list_for_each_entry(pch, &new_channels, list) {
2574 if (pch->file.index == unit) {
2575 list_move(&pch->list, &all_channels);
2576 return pch;
2579 list_for_each_entry(pch, &all_channels, list) {
2580 if (pch->file.index == unit)
2581 return pch;
2583 return NULL;
2586 /*
2587 * Connect a PPP channel to a PPP interface unit.
2588 */
2589 static int
2590 ppp_connect_channel(struct channel *pch, int unit)
2592 struct ppp *ppp;
2593 int ret = -ENXIO;
2594 int hdrlen;
2596 mutex_lock(&all_ppp_mutex);
2597 ppp = ppp_find_unit(unit);
2598 if (ppp == 0)
2599 goto out;
2600 write_lock_bh(&pch->upl);
2601 ret = -EINVAL;
2602 if (pch->ppp != 0)
2603 goto outl;
2605 ppp_lock(ppp);
2606 if (pch->file.hdrlen > ppp->file.hdrlen)
2607 ppp->file.hdrlen = pch->file.hdrlen;
2608 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
2609 if (ppp->dev && hdrlen > ppp->dev->hard_header_len)
2610 ppp->dev->hard_header_len = hdrlen;
2611 list_add_tail(&pch->clist, &ppp->channels);
2612 ++ppp->n_channels;
2613 pch->ppp = ppp;
2614 atomic_inc(&ppp->file.refcnt);
2615 ppp_unlock(ppp);
2616 ret = 0;
2618 outl:
2619 write_unlock_bh(&pch->upl);
2620 out:
2621 mutex_unlock(&all_ppp_mutex);
2622 return ret;
2625 /*
2626 * Disconnect a channel from its ppp unit.
2627 */
2628 static int
2629 ppp_disconnect_channel(struct channel *pch)
2631 struct ppp *ppp;
2632 int err = -EINVAL;
2634 write_lock_bh(&pch->upl);
2635 ppp = pch->ppp;
2636 pch->ppp = NULL;
2637 write_unlock_bh(&pch->upl);
2638 if (ppp != 0) {
2639 /* remove it from the ppp unit's list */
2640 ppp_lock(ppp);
2641 list_del(&pch->clist);
2642 if (--ppp->n_channels == 0)
2643 wake_up_interruptible(&ppp->file.rwait);
2644 ppp_unlock(ppp);
2645 if (atomic_dec_and_test(&ppp->file.refcnt))
2646 ppp_destroy_interface(ppp);
2647 err = 0;
2649 return err;
2652 /*
2653 * Free up the resources used by a ppp channel.
2654 */
2655 static void ppp_destroy_channel(struct channel *pch)
2657 atomic_dec(&channel_count);
2659 if (!pch->file.dead) {
2660 /* "can't happen" */
2661 printk(KERN_ERR "ppp: destroying undead channel %p !\n",
2662 pch);
2663 return;
2665 skb_queue_purge(&pch->file.xq);
2666 skb_queue_purge(&pch->file.rq);
2667 kfree(pch);
2670 static void __exit ppp_cleanup(void)
2672 /* should never happen */
2673 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2674 printk(KERN_ERR "PPP: removing module but units remain!\n");
2675 cardmap_destroy(&all_ppp_units);
2676 if (unregister_chrdev(PPP_MAJOR, "ppp") != 0)
2677 printk(KERN_ERR "PPP: failed to unregister PPP device\n");
2678 class_device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2679 class_destroy(ppp_class);
2682 /*
2683 * Cardmap implementation.
2684 */
2685 static void *cardmap_get(struct cardmap *map, unsigned int nr)
2687 struct cardmap *p;
2688 int i;
2690 for (p = map; p != NULL; ) {
2691 if ((i = nr >> p->shift) >= CARDMAP_WIDTH)
2692 return NULL;
2693 if (p->shift == 0)
2694 return p->ptr[i];
2695 nr &= ~(CARDMAP_MASK << p->shift);
2696 p = p->ptr[i];
2698 return NULL;
2701 static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2703 struct cardmap *p;
2704 int i;
2706 p = *pmap;
2707 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
2708 do {
2709 /* need a new top level */
2710 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2711 if (!np)
2712 goto enomem;
2713 np->ptr[0] = p;
2714 if (p != NULL) {
2715 np->shift = p->shift + CARDMAP_ORDER;
2716 p->parent = np;
2717 } else
2718 np->shift = 0;
2719 p = np;
2720 } while ((nr >> p->shift) >= CARDMAP_WIDTH);
2721 *pmap = p;
2723 while (p->shift > 0) {
2724 i = (nr >> p->shift) & CARDMAP_MASK;
2725 if (p->ptr[i] == NULL) {
2726 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2727 if (!np)
2728 goto enomem;
2729 np->shift = p->shift - CARDMAP_ORDER;
2730 np->parent = p;
2731 p->ptr[i] = np;
2733 if (ptr == NULL)
2734 clear_bit(i, &p->inuse);
2735 p = p->ptr[i];
2737 i = nr & CARDMAP_MASK;
2738 p->ptr[i] = ptr;
2739 if (ptr != NULL)
2740 set_bit(i, &p->inuse);
2741 else
2742 clear_bit(i, &p->inuse);
2743 return 0;
2744 enomem:
2745 return -ENOMEM;
2748 static unsigned int cardmap_find_first_free(struct cardmap *map)
2750 struct cardmap *p;
2751 unsigned int nr = 0;
2752 int i;
2754 if ((p = map) == NULL)
2755 return 0;
2756 for (;;) {
2757 i = find_first_zero_bit(&p->inuse, CARDMAP_WIDTH);
2758 if (i >= CARDMAP_WIDTH) {
2759 if (p->parent == NULL)
2760 return CARDMAP_WIDTH << p->shift;
2761 p = p->parent;
2762 i = (nr >> p->shift) & CARDMAP_MASK;
2763 set_bit(i, &p->inuse);
2764 continue;
2766 nr = (nr & (~CARDMAP_MASK << p->shift)) | (i << p->shift);
2767 if (p->shift == 0 || p->ptr[i] == NULL)
2768 return nr;
2769 p = p->ptr[i];
2773 static void cardmap_destroy(struct cardmap **pmap)
2775 struct cardmap *p, *np;
2776 int i;
2778 for (p = *pmap; p != NULL; p = np) {
2779 if (p->shift != 0) {
2780 for (i = 0; i < CARDMAP_WIDTH; ++i)
2781 if (p->ptr[i] != NULL)
2782 break;
2783 if (i < CARDMAP_WIDTH) {
2784 np = p->ptr[i];
2785 p->ptr[i] = NULL;
2786 continue;
2789 np = p->parent;
2790 kfree(p);
2792 *pmap = NULL;
2795 /* Module/initialization stuff */
2797 module_init(ppp_init);
2798 module_exit(ppp_cleanup);
2800 EXPORT_SYMBOL(ppp_register_channel);
2801 EXPORT_SYMBOL(ppp_unregister_channel);
2802 EXPORT_SYMBOL(ppp_channel_index);
2803 EXPORT_SYMBOL(ppp_unit_number);
2804 EXPORT_SYMBOL(ppp_input);
2805 EXPORT_SYMBOL(ppp_input_error);
2806 EXPORT_SYMBOL(ppp_output_wakeup);
2807 EXPORT_SYMBOL(ppp_register_compressor);
2808 EXPORT_SYMBOL(ppp_unregister_compressor);
2809 MODULE_LICENSE("GPL");
2810 MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR);
2811 MODULE_ALIAS("/dev/ppp");