ia64/linux-2.6.18-xen.hg

view drivers/net/ppp_synctty.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * PPP synchronous tty channel driver for Linux.
3 *
4 * This is a ppp channel driver that can be used with tty device drivers
5 * that are frame oriented, such as synchronous HDLC devices.
6 *
7 * Complete PPP frames without encoding/decoding are exchanged between
8 * the channel driver and the device driver.
9 *
10 * The async map IOCTL codes are implemented to keep the user mode
11 * applications happy if they call them. Synchronous PPP does not use
12 * the async maps.
13 *
14 * Copyright 1999 Paul Mackerras.
15 *
16 * Also touched by the grubby hands of Paul Fulghum paulkf@microgate.com
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * This driver provides the encapsulation and framing for sending
24 * and receiving PPP frames over sync serial lines. It relies on
25 * the generic PPP layer to give it frames to send and to process
26 * received frames. It implements the PPP line discipline.
27 *
28 * Part of the code in this driver was inspired by the old async-only
29 * PPP driver, written by Michael Callahan and Al Longyear, and
30 * subsequently hacked by Paul Mackerras.
31 *
32 * ==FILEVERSION 20040616==
33 */
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/skbuff.h>
38 #include <linux/tty.h>
39 #include <linux/netdevice.h>
40 #include <linux/poll.h>
41 #include <linux/ppp_defs.h>
42 #include <linux/if_ppp.h>
43 #include <linux/ppp_channel.h>
44 #include <linux/spinlock.h>
45 #include <linux/init.h>
46 #include <asm/uaccess.h>
47 #include <asm/semaphore.h>
49 #define PPP_VERSION "2.4.2"
51 /* Structure for storing local state. */
52 struct syncppp {
53 struct tty_struct *tty;
54 unsigned int flags;
55 unsigned int rbits;
56 int mru;
57 spinlock_t xmit_lock;
58 spinlock_t recv_lock;
59 unsigned long xmit_flags;
60 u32 xaccm[8];
61 u32 raccm;
62 unsigned int bytes_sent;
63 unsigned int bytes_rcvd;
65 struct sk_buff *tpkt;
66 unsigned long last_xmit;
68 struct sk_buff_head rqueue;
70 struct tasklet_struct tsk;
72 atomic_t refcnt;
73 struct semaphore dead_sem;
74 struct ppp_channel chan; /* interface to generic ppp layer */
75 };
77 /* Bit numbers in xmit_flags */
78 #define XMIT_WAKEUP 0
79 #define XMIT_FULL 1
81 /* Bits in rbits */
82 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
84 #define PPPSYNC_MAX_RQLEN 32 /* arbitrary */
86 /*
87 * Prototypes.
88 */
89 static struct sk_buff* ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *);
90 static int ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb);
91 static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
92 unsigned long arg);
93 static void ppp_sync_process(unsigned long arg);
94 static int ppp_sync_push(struct syncppp *ap);
95 static void ppp_sync_flush_output(struct syncppp *ap);
96 static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
97 char *flags, int count);
99 static struct ppp_channel_ops sync_ops = {
100 ppp_sync_send,
101 ppp_sync_ioctl
102 };
104 /*
105 * Utility procedures to print a buffer in hex/ascii
106 */
107 static void
108 ppp_print_hex (register __u8 * out, const __u8 * in, int count)
109 {
110 register __u8 next_ch;
111 static const char hex[] = "0123456789ABCDEF";
113 while (count-- > 0) {
114 next_ch = *in++;
115 *out++ = hex[(next_ch >> 4) & 0x0F];
116 *out++ = hex[next_ch & 0x0F];
117 ++out;
118 }
119 }
121 static void
122 ppp_print_char (register __u8 * out, const __u8 * in, int count)
123 {
124 register __u8 next_ch;
126 while (count-- > 0) {
127 next_ch = *in++;
129 if (next_ch < 0x20 || next_ch > 0x7e)
130 *out++ = '.';
131 else {
132 *out++ = next_ch;
133 if (next_ch == '%') /* printk/syslogd has a bug !! */
134 *out++ = '%';
135 }
136 }
137 *out = '\0';
138 }
140 static void
141 ppp_print_buffer (const char *name, const __u8 *buf, int count)
142 {
143 __u8 line[44];
145 if (name != NULL)
146 printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
148 while (count > 8) {
149 memset (line, 32, 44);
150 ppp_print_hex (line, buf, 8);
151 ppp_print_char (&line[8 * 3], buf, 8);
152 printk(KERN_DEBUG "%s\n", line);
153 count -= 8;
154 buf += 8;
155 }
157 if (count > 0) {
158 memset (line, 32, 44);
159 ppp_print_hex (line, buf, count);
160 ppp_print_char (&line[8 * 3], buf, count);
161 printk(KERN_DEBUG "%s\n", line);
162 }
163 }
166 /*
167 * Routines implementing the synchronous PPP line discipline.
168 */
170 /*
171 * We have a potential race on dereferencing tty->disc_data,
172 * because the tty layer provides no locking at all - thus one
173 * cpu could be running ppp_synctty_receive while another
174 * calls ppp_synctty_close, which zeroes tty->disc_data and
175 * frees the memory that ppp_synctty_receive is using. The best
176 * way to fix this is to use a rwlock in the tty struct, but for now
177 * we use a single global rwlock for all ttys in ppp line discipline.
178 *
179 * FIXME: Fixed in tty_io nowdays.
180 */
181 static DEFINE_RWLOCK(disc_data_lock);
183 static struct syncppp *sp_get(struct tty_struct *tty)
184 {
185 struct syncppp *ap;
187 read_lock(&disc_data_lock);
188 ap = tty->disc_data;
189 if (ap != NULL)
190 atomic_inc(&ap->refcnt);
191 read_unlock(&disc_data_lock);
192 return ap;
193 }
195 static void sp_put(struct syncppp *ap)
196 {
197 if (atomic_dec_and_test(&ap->refcnt))
198 up(&ap->dead_sem);
199 }
201 /*
202 * Called when a tty is put into sync-PPP line discipline.
203 */
204 static int
205 ppp_sync_open(struct tty_struct *tty)
206 {
207 struct syncppp *ap;
208 int err;
210 ap = kmalloc(sizeof(*ap), GFP_KERNEL);
211 err = -ENOMEM;
212 if (ap == 0)
213 goto out;
215 /* initialize the syncppp structure */
216 memset(ap, 0, sizeof(*ap));
217 ap->tty = tty;
218 ap->mru = PPP_MRU;
219 spin_lock_init(&ap->xmit_lock);
220 spin_lock_init(&ap->recv_lock);
221 ap->xaccm[0] = ~0U;
222 ap->xaccm[3] = 0x60000000U;
223 ap->raccm = ~0U;
225 skb_queue_head_init(&ap->rqueue);
226 tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
228 atomic_set(&ap->refcnt, 1);
229 init_MUTEX_LOCKED(&ap->dead_sem);
231 ap->chan.private = ap;
232 ap->chan.ops = &sync_ops;
233 ap->chan.mtu = PPP_MRU;
234 ap->chan.hdrlen = 2; /* for A/C bytes */
235 err = ppp_register_channel(&ap->chan);
236 if (err)
237 goto out_free;
239 tty->disc_data = ap;
240 tty->receive_room = 65536;
241 return 0;
243 out_free:
244 kfree(ap);
245 out:
246 return err;
247 }
249 /*
250 * Called when the tty is put into another line discipline
251 * or it hangs up. We have to wait for any cpu currently
252 * executing in any of the other ppp_synctty_* routines to
253 * finish before we can call ppp_unregister_channel and free
254 * the syncppp struct. This routine must be called from
255 * process context, not interrupt or softirq context.
256 */
257 static void
258 ppp_sync_close(struct tty_struct *tty)
259 {
260 struct syncppp *ap;
262 write_lock_irq(&disc_data_lock);
263 ap = tty->disc_data;
264 tty->disc_data = NULL;
265 write_unlock_irq(&disc_data_lock);
266 if (ap == 0)
267 return;
269 /*
270 * We have now ensured that nobody can start using ap from now
271 * on, but we have to wait for all existing users to finish.
272 * Note that ppp_unregister_channel ensures that no calls to
273 * our channel ops (i.e. ppp_sync_send/ioctl) are in progress
274 * by the time it returns.
275 */
276 if (!atomic_dec_and_test(&ap->refcnt))
277 down(&ap->dead_sem);
278 tasklet_kill(&ap->tsk);
280 ppp_unregister_channel(&ap->chan);
281 skb_queue_purge(&ap->rqueue);
282 if (ap->tpkt != 0)
283 kfree_skb(ap->tpkt);
284 kfree(ap);
285 }
287 /*
288 * Called on tty hangup in process context.
289 *
290 * Wait for I/O to driver to complete and unregister PPP channel.
291 * This is already done by the close routine, so just call that.
292 */
293 static int ppp_sync_hangup(struct tty_struct *tty)
294 {
295 ppp_sync_close(tty);
296 return 0;
297 }
299 /*
300 * Read does nothing - no data is ever available this way.
301 * Pppd reads and writes packets via /dev/ppp instead.
302 */
303 static ssize_t
304 ppp_sync_read(struct tty_struct *tty, struct file *file,
305 unsigned char __user *buf, size_t count)
306 {
307 return -EAGAIN;
308 }
310 /*
311 * Write on the tty does nothing, the packets all come in
312 * from the ppp generic stuff.
313 */
314 static ssize_t
315 ppp_sync_write(struct tty_struct *tty, struct file *file,
316 const unsigned char *buf, size_t count)
317 {
318 return -EAGAIN;
319 }
321 static int
322 ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
323 unsigned int cmd, unsigned long arg)
324 {
325 struct syncppp *ap = sp_get(tty);
326 int __user *p = (int __user *)arg;
327 int err, val;
329 if (ap == 0)
330 return -ENXIO;
331 err = -EFAULT;
332 switch (cmd) {
333 case PPPIOCGCHAN:
334 err = -ENXIO;
335 if (ap == 0)
336 break;
337 err = -EFAULT;
338 if (put_user(ppp_channel_index(&ap->chan), p))
339 break;
340 err = 0;
341 break;
343 case PPPIOCGUNIT:
344 err = -ENXIO;
345 if (ap == 0)
346 break;
347 err = -EFAULT;
348 if (put_user(ppp_unit_number(&ap->chan), p))
349 break;
350 err = 0;
351 break;
353 case TCGETS:
354 case TCGETA:
355 err = n_tty_ioctl(tty, file, cmd, arg);
356 break;
358 case TCFLSH:
359 /* flush our buffers and the serial port's buffer */
360 if (arg == TCIOFLUSH || arg == TCOFLUSH)
361 ppp_sync_flush_output(ap);
362 err = n_tty_ioctl(tty, file, cmd, arg);
363 break;
365 case FIONREAD:
366 val = 0;
367 if (put_user(val, p))
368 break;
369 err = 0;
370 break;
372 default:
373 err = -ENOIOCTLCMD;
374 }
376 sp_put(ap);
377 return err;
378 }
380 /* No kernel lock - fine */
381 static unsigned int
382 ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
383 {
384 return 0;
385 }
387 /*
388 * This can now be called from hard interrupt level as well
389 * as soft interrupt level or mainline.
390 */
391 static void
392 ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
393 char *cflags, int count)
394 {
395 struct syncppp *ap = sp_get(tty);
396 unsigned long flags;
398 if (ap == 0)
399 return;
400 spin_lock_irqsave(&ap->recv_lock, flags);
401 ppp_sync_input(ap, buf, cflags, count);
402 spin_unlock_irqrestore(&ap->recv_lock, flags);
403 if (!skb_queue_empty(&ap->rqueue))
404 tasklet_schedule(&ap->tsk);
405 sp_put(ap);
406 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
407 && tty->driver->unthrottle)
408 tty->driver->unthrottle(tty);
409 }
411 static void
412 ppp_sync_wakeup(struct tty_struct *tty)
413 {
414 struct syncppp *ap = sp_get(tty);
416 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
417 if (ap == 0)
418 return;
419 set_bit(XMIT_WAKEUP, &ap->xmit_flags);
420 tasklet_schedule(&ap->tsk);
421 sp_put(ap);
422 }
425 static struct tty_ldisc ppp_sync_ldisc = {
426 .owner = THIS_MODULE,
427 .magic = TTY_LDISC_MAGIC,
428 .name = "pppsync",
429 .open = ppp_sync_open,
430 .close = ppp_sync_close,
431 .hangup = ppp_sync_hangup,
432 .read = ppp_sync_read,
433 .write = ppp_sync_write,
434 .ioctl = ppp_synctty_ioctl,
435 .poll = ppp_sync_poll,
436 .receive_buf = ppp_sync_receive,
437 .write_wakeup = ppp_sync_wakeup,
438 };
440 static int __init
441 ppp_sync_init(void)
442 {
443 int err;
445 err = tty_register_ldisc(N_SYNC_PPP, &ppp_sync_ldisc);
446 if (err != 0)
447 printk(KERN_ERR "PPP_sync: error %d registering line disc.\n",
448 err);
449 return err;
450 }
452 /*
453 * The following routines provide the PPP channel interface.
454 */
455 static int
456 ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
457 {
458 struct syncppp *ap = chan->private;
459 int err, val;
460 u32 accm[8];
461 void __user *argp = (void __user *)arg;
462 u32 __user *p = argp;
464 err = -EFAULT;
465 switch (cmd) {
466 case PPPIOCGFLAGS:
467 val = ap->flags | ap->rbits;
468 if (put_user(val, (int __user *) argp))
469 break;
470 err = 0;
471 break;
472 case PPPIOCSFLAGS:
473 if (get_user(val, (int __user *) argp))
474 break;
475 ap->flags = val & ~SC_RCV_BITS;
476 spin_lock_irq(&ap->recv_lock);
477 ap->rbits = val & SC_RCV_BITS;
478 spin_unlock_irq(&ap->recv_lock);
479 err = 0;
480 break;
482 case PPPIOCGASYNCMAP:
483 if (put_user(ap->xaccm[0], p))
484 break;
485 err = 0;
486 break;
487 case PPPIOCSASYNCMAP:
488 if (get_user(ap->xaccm[0], p))
489 break;
490 err = 0;
491 break;
493 case PPPIOCGRASYNCMAP:
494 if (put_user(ap->raccm, p))
495 break;
496 err = 0;
497 break;
498 case PPPIOCSRASYNCMAP:
499 if (get_user(ap->raccm, p))
500 break;
501 err = 0;
502 break;
504 case PPPIOCGXASYNCMAP:
505 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
506 break;
507 err = 0;
508 break;
509 case PPPIOCSXASYNCMAP:
510 if (copy_from_user(accm, argp, sizeof(accm)))
511 break;
512 accm[2] &= ~0x40000000U; /* can't escape 0x5e */
513 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
514 memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
515 err = 0;
516 break;
518 case PPPIOCGMRU:
519 if (put_user(ap->mru, (int __user *) argp))
520 break;
521 err = 0;
522 break;
523 case PPPIOCSMRU:
524 if (get_user(val, (int __user *) argp))
525 break;
526 if (val < PPP_MRU)
527 val = PPP_MRU;
528 ap->mru = val;
529 err = 0;
530 break;
532 default:
533 err = -ENOTTY;
534 }
535 return err;
536 }
538 /*
539 * This is called at softirq level to deliver received packets
540 * to the ppp_generic code, and to tell the ppp_generic code
541 * if we can accept more output now.
542 */
543 static void ppp_sync_process(unsigned long arg)
544 {
545 struct syncppp *ap = (struct syncppp *) arg;
546 struct sk_buff *skb;
548 /* process received packets */
549 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
550 if (skb->len == 0) {
551 /* zero length buffers indicate error */
552 ppp_input_error(&ap->chan, 0);
553 kfree_skb(skb);
554 }
555 else
556 ppp_input(&ap->chan, skb);
557 }
559 /* try to push more stuff out */
560 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_sync_push(ap))
561 ppp_output_wakeup(&ap->chan);
562 }
564 /*
565 * Procedures for encapsulation and framing.
566 */
568 struct sk_buff*
569 ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
570 {
571 int proto;
572 unsigned char *data;
573 int islcp;
575 data = skb->data;
576 proto = (data[0] << 8) + data[1];
578 /* LCP packets with codes between 1 (configure-request)
579 * and 7 (code-reject) must be sent as though no options
580 * have been negotiated.
581 */
582 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
584 /* compress protocol field if option enabled */
585 if (data[0] == 0 && (ap->flags & SC_COMP_PROT) && !islcp)
586 skb_pull(skb,1);
588 /* prepend address/control fields if necessary */
589 if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
590 if (skb_headroom(skb) < 2) {
591 struct sk_buff *npkt = dev_alloc_skb(skb->len + 2);
592 if (npkt == NULL) {
593 kfree_skb(skb);
594 return NULL;
595 }
596 skb_reserve(npkt,2);
597 memcpy(skb_put(npkt,skb->len), skb->data, skb->len);
598 kfree_skb(skb);
599 skb = npkt;
600 }
601 skb_push(skb,2);
602 skb->data[0] = PPP_ALLSTATIONS;
603 skb->data[1] = PPP_UI;
604 }
606 ap->last_xmit = jiffies;
608 if (skb && ap->flags & SC_LOG_OUTPKT)
609 ppp_print_buffer ("send buffer", skb->data, skb->len);
611 return skb;
612 }
614 /*
615 * Transmit-side routines.
616 */
618 /*
619 * Send a packet to the peer over an sync tty line.
620 * Returns 1 iff the packet was accepted.
621 * If the packet was not accepted, we will call ppp_output_wakeup
622 * at some later time.
623 */
624 static int
625 ppp_sync_send(struct ppp_channel *chan, struct sk_buff *skb)
626 {
627 struct syncppp *ap = chan->private;
629 ppp_sync_push(ap);
631 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
632 return 0; /* already full */
633 skb = ppp_sync_txmunge(ap, skb);
634 if (skb != NULL)
635 ap->tpkt = skb;
636 else
637 clear_bit(XMIT_FULL, &ap->xmit_flags);
639 ppp_sync_push(ap);
640 return 1;
641 }
643 /*
644 * Push as much data as possible out to the tty.
645 */
646 static int
647 ppp_sync_push(struct syncppp *ap)
648 {
649 int sent, done = 0;
650 struct tty_struct *tty = ap->tty;
651 int tty_stuffed = 0;
653 if (!spin_trylock_bh(&ap->xmit_lock))
654 return 0;
655 for (;;) {
656 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
657 tty_stuffed = 0;
658 if (!tty_stuffed && ap->tpkt != 0) {
659 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
660 sent = tty->driver->write(tty, ap->tpkt->data, ap->tpkt->len);
661 if (sent < 0)
662 goto flush; /* error, e.g. loss of CD */
663 if (sent < ap->tpkt->len) {
664 tty_stuffed = 1;
665 } else {
666 kfree_skb(ap->tpkt);
667 ap->tpkt = NULL;
668 clear_bit(XMIT_FULL, &ap->xmit_flags);
669 done = 1;
670 }
671 continue;
672 }
673 /* haven't made any progress */
674 spin_unlock_bh(&ap->xmit_lock);
675 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
676 || (!tty_stuffed && ap->tpkt != 0)))
677 break;
678 if (!spin_trylock_bh(&ap->xmit_lock))
679 break;
680 }
681 return done;
683 flush:
684 if (ap->tpkt != 0) {
685 kfree_skb(ap->tpkt);
686 ap->tpkt = NULL;
687 clear_bit(XMIT_FULL, &ap->xmit_flags);
688 done = 1;
689 }
690 spin_unlock_bh(&ap->xmit_lock);
691 return done;
692 }
694 /*
695 * Flush output from our internal buffers.
696 * Called for the TCFLSH ioctl.
697 */
698 static void
699 ppp_sync_flush_output(struct syncppp *ap)
700 {
701 int done = 0;
703 spin_lock_bh(&ap->xmit_lock);
704 if (ap->tpkt != NULL) {
705 kfree_skb(ap->tpkt);
706 ap->tpkt = NULL;
707 clear_bit(XMIT_FULL, &ap->xmit_flags);
708 done = 1;
709 }
710 spin_unlock_bh(&ap->xmit_lock);
711 if (done)
712 ppp_output_wakeup(&ap->chan);
713 }
715 /*
716 * Receive-side routines.
717 */
719 /* called when the tty driver has data for us.
720 *
721 * Data is frame oriented: each call to ppp_sync_input is considered
722 * a whole frame. If the 1st flag byte is non-zero then the whole
723 * frame is considered to be in error and is tossed.
724 */
725 static void
726 ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
727 char *flags, int count)
728 {
729 struct sk_buff *skb;
730 unsigned char *p;
732 if (count == 0)
733 return;
735 if (ap->flags & SC_LOG_INPKT)
736 ppp_print_buffer ("receive buffer", buf, count);
738 /* stuff the chars in the skb */
739 if ((skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2)) == 0) {
740 printk(KERN_ERR "PPPsync: no memory (input pkt)\n");
741 goto err;
742 }
743 /* Try to get the payload 4-byte aligned */
744 if (buf[0] != PPP_ALLSTATIONS)
745 skb_reserve(skb, 2 + (buf[0] & 1));
747 if (flags != 0 && *flags) {
748 /* error flag set, ignore frame */
749 goto err;
750 } else if (count > skb_tailroom(skb)) {
751 /* packet overflowed MRU */
752 goto err;
753 }
755 p = skb_put(skb, count);
756 memcpy(p, buf, count);
758 /* strip address/control field if present */
759 p = skb->data;
760 if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
761 /* chop off address/control */
762 if (skb->len < 3)
763 goto err;
764 p = skb_pull(skb, 2);
765 }
767 /* decompress protocol field if compressed */
768 if (p[0] & 1) {
769 /* protocol is compressed */
770 skb_push(skb, 1)[0] = 0;
771 } else if (skb->len < 2)
772 goto err;
774 /* queue the frame to be processed */
775 skb_queue_tail(&ap->rqueue, skb);
776 return;
778 err:
779 /* queue zero length packet as error indication */
780 if (skb || (skb = dev_alloc_skb(0))) {
781 skb_trim(skb, 0);
782 skb_queue_tail(&ap->rqueue, skb);
783 }
784 }
786 static void __exit
787 ppp_sync_cleanup(void)
788 {
789 if (tty_unregister_ldisc(N_SYNC_PPP) != 0)
790 printk(KERN_ERR "failed to unregister Sync PPP line discipline\n");
791 }
793 module_init(ppp_sync_init);
794 module_exit(ppp_sync_cleanup);
795 MODULE_LICENSE("GPL");
796 MODULE_ALIAS_LDISC(N_SYNC_PPP);