ia64/linux-2.6.18-xen.hg

view drivers/serial/jsm/jsm_tty.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /************************************************************************
2 * Copyright 2003 Digi International (www.digi.com)
3 *
4 * Copyright (C) 2004 IBM Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
14 * PURPOSE. See the GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 * Temple Place - Suite 330, Boston,
19 * MA 02111-1307, USA.
20 *
21 * Contact Information:
22 * Scott H Kilau <Scott_Kilau@digi.com>
23 * Ananda Venkatarman <mansarov@us.ibm.com>
24 * Modifications:
25 * 01/19/06: changed jsm_input routine to use the dynamically allocated
26 * tty_buffer changes. Contributors: Scott Kilau and Ananda V.
27 ***********************************************************************/
28 #include <linux/tty.h>
29 #include <linux/tty_flip.h>
30 #include <linux/serial_reg.h>
31 #include <linux/delay.h> /* For udelay */
32 #include <linux/pci.h>
34 #include "jsm.h"
36 static void jsm_carrier(struct jsm_channel *ch);
38 static inline int jsm_get_mstat(struct jsm_channel *ch)
39 {
40 unsigned char mstat;
41 unsigned result;
43 jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "start\n");
45 mstat = (ch->ch_mostat | ch->ch_mistat);
47 result = 0;
49 if (mstat & UART_MCR_DTR)
50 result |= TIOCM_DTR;
51 if (mstat & UART_MCR_RTS)
52 result |= TIOCM_RTS;
53 if (mstat & UART_MSR_CTS)
54 result |= TIOCM_CTS;
55 if (mstat & UART_MSR_DSR)
56 result |= TIOCM_DSR;
57 if (mstat & UART_MSR_RI)
58 result |= TIOCM_RI;
59 if (mstat & UART_MSR_DCD)
60 result |= TIOCM_CD;
62 jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n");
63 return result;
64 }
66 static unsigned int jsm_tty_tx_empty(struct uart_port *port)
67 {
68 return TIOCSER_TEMT;
69 }
71 /*
72 * Return modem signals to ld.
73 */
74 static unsigned int jsm_tty_get_mctrl(struct uart_port *port)
75 {
76 int result;
77 struct jsm_channel *channel = (struct jsm_channel *)port;
79 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "start\n");
81 result = jsm_get_mstat(channel);
83 if (result < 0)
84 return -ENXIO;
86 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n");
88 return result;
89 }
91 /*
92 * jsm_set_modem_info()
93 *
94 * Set modem signals, called by ld.
95 */
96 static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl)
97 {
98 struct jsm_channel *channel = (struct jsm_channel *)port;
100 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "start\n");
102 if (mctrl & TIOCM_RTS)
103 channel->ch_mostat |= UART_MCR_RTS;
104 else
105 channel->ch_mostat &= ~UART_MCR_RTS;
107 if (mctrl & TIOCM_DTR)
108 channel->ch_mostat |= UART_MCR_DTR;
109 else
110 channel->ch_mostat &= ~UART_MCR_DTR;
112 channel->ch_bd->bd_ops->assert_modem_signals(channel);
114 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n");
115 udelay(10);
116 }
118 static void jsm_tty_start_tx(struct uart_port *port)
119 {
120 struct jsm_channel *channel = (struct jsm_channel *)port;
122 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "start\n");
124 channel->ch_flags &= ~(CH_STOP);
125 jsm_tty_write(port);
127 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n");
128 }
130 static void jsm_tty_stop_tx(struct uart_port *port)
131 {
132 struct jsm_channel *channel = (struct jsm_channel *)port;
134 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "start\n");
136 channel->ch_flags |= (CH_STOP);
138 jsm_printk(IOCTL, INFO, &channel->ch_bd->pci_dev, "finish\n");
139 }
141 static void jsm_tty_send_xchar(struct uart_port *port, char ch)
142 {
143 unsigned long lock_flags;
144 struct jsm_channel *channel = (struct jsm_channel *)port;
145 struct termios *termios;
147 spin_lock_irqsave(&port->lock, lock_flags);
148 termios = port->info->tty->termios;
149 if (ch == termios->c_cc[VSTART])
150 channel->ch_bd->bd_ops->send_start_character(channel);
152 if (ch == termios->c_cc[VSTOP])
153 channel->ch_bd->bd_ops->send_stop_character(channel);
154 spin_unlock_irqrestore(&port->lock, lock_flags);
155 }
157 static void jsm_tty_stop_rx(struct uart_port *port)
158 {
159 struct jsm_channel *channel = (struct jsm_channel *)port;
161 channel->ch_bd->bd_ops->disable_receiver(channel);
162 }
164 static void jsm_tty_break(struct uart_port *port, int break_state)
165 {
166 unsigned long lock_flags;
167 struct jsm_channel *channel = (struct jsm_channel *)port;
169 spin_lock_irqsave(&port->lock, lock_flags);
170 if (break_state == -1)
171 channel->ch_bd->bd_ops->send_break(channel);
172 else
173 channel->ch_bd->bd_ops->clear_break(channel, 0);
175 spin_unlock_irqrestore(&port->lock, lock_flags);
176 }
178 static int jsm_tty_open(struct uart_port *port)
179 {
180 struct jsm_board *brd;
181 int rc = 0;
182 struct jsm_channel *channel = (struct jsm_channel *)port;
183 struct termios *termios;
185 /* Get board pointer from our array of majors we have allocated */
186 brd = channel->ch_bd;
188 /*
189 * Allocate channel buffers for read/write/error.
190 * Set flag, so we don't get trounced on.
191 */
192 channel->ch_flags |= (CH_OPENING);
194 /* Drop locks, as malloc with GFP_KERNEL can sleep */
196 if (!channel->ch_rqueue) {
197 channel->ch_rqueue = (u8 *) kmalloc(RQUEUESIZE, GFP_KERNEL);
198 if (!channel->ch_rqueue) {
199 jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev,
200 "unable to allocate read queue buf");
201 return -ENOMEM;
202 }
203 memset(channel->ch_rqueue, 0, RQUEUESIZE);
204 }
205 if (!channel->ch_equeue) {
206 channel->ch_equeue = (u8 *) kmalloc(EQUEUESIZE, GFP_KERNEL);
207 if (!channel->ch_equeue) {
208 jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev,
209 "unable to allocate error queue buf");
210 return -ENOMEM;
211 }
212 memset(channel->ch_equeue, 0, EQUEUESIZE);
213 }
214 if (!channel->ch_wqueue) {
215 channel->ch_wqueue = (u8 *) kmalloc(WQUEUESIZE, GFP_KERNEL);
216 if (!channel->ch_wqueue) {
217 jsm_printk(INIT, ERR, &channel->ch_bd->pci_dev,
218 "unable to allocate write queue buf");
219 return -ENOMEM;
220 }
221 memset(channel->ch_wqueue, 0, WQUEUESIZE);
222 }
224 channel->ch_flags &= ~(CH_OPENING);
225 /*
226 * Initialize if neither terminal is open.
227 */
228 jsm_printk(OPEN, INFO, &channel->ch_bd->pci_dev,
229 "jsm_open: initializing channel in open...\n");
231 /*
232 * Flush input queues.
233 */
234 channel->ch_r_head = channel->ch_r_tail = 0;
235 channel->ch_e_head = channel->ch_e_tail = 0;
236 channel->ch_w_head = channel->ch_w_tail = 0;
238 brd->bd_ops->flush_uart_write(channel);
239 brd->bd_ops->flush_uart_read(channel);
241 channel->ch_flags = 0;
242 channel->ch_cached_lsr = 0;
243 channel->ch_stops_sent = 0;
245 termios = port->info->tty->termios;
246 channel->ch_c_cflag = termios->c_cflag;
247 channel->ch_c_iflag = termios->c_iflag;
248 channel->ch_c_oflag = termios->c_oflag;
249 channel->ch_c_lflag = termios->c_lflag;
250 channel->ch_startc = termios->c_cc[VSTART];
251 channel->ch_stopc = termios->c_cc[VSTOP];
253 /* Tell UART to init itself */
254 brd->bd_ops->uart_init(channel);
256 /*
257 * Run param in case we changed anything
258 */
259 brd->bd_ops->param(channel);
261 jsm_carrier(channel);
263 channel->ch_open_count++;
265 jsm_printk(OPEN, INFO, &channel->ch_bd->pci_dev, "finish\n");
266 return rc;
267 }
269 static void jsm_tty_close(struct uart_port *port)
270 {
271 struct jsm_board *bd;
272 struct termios *ts;
273 struct jsm_channel *channel = (struct jsm_channel *)port;
275 jsm_printk(CLOSE, INFO, &channel->ch_bd->pci_dev, "start\n");
277 bd = channel->ch_bd;
278 ts = channel->uart_port.info->tty->termios;
280 channel->ch_flags &= ~(CH_STOPI);
282 channel->ch_open_count--;
284 /*
285 * If we have HUPCL set, lower DTR and RTS
286 */
287 if (channel->ch_c_cflag & HUPCL) {
288 jsm_printk(CLOSE, INFO, &channel->ch_bd->pci_dev,
289 "Close. HUPCL set, dropping DTR/RTS\n");
291 /* Drop RTS/DTR */
292 channel->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
293 bd->bd_ops->assert_modem_signals(channel);
294 }
296 channel->ch_old_baud = 0;
298 /* Turn off UART interrupts for this port */
299 channel->ch_bd->bd_ops->uart_off(channel);
301 jsm_printk(CLOSE, INFO, &channel->ch_bd->pci_dev, "finish\n");
302 }
304 static void jsm_tty_set_termios(struct uart_port *port,
305 struct termios *termios,
306 struct termios *old_termios)
307 {
308 unsigned long lock_flags;
309 struct jsm_channel *channel = (struct jsm_channel *)port;
311 spin_lock_irqsave(&port->lock, lock_flags);
312 channel->ch_c_cflag = termios->c_cflag;
313 channel->ch_c_iflag = termios->c_iflag;
314 channel->ch_c_oflag = termios->c_oflag;
315 channel->ch_c_lflag = termios->c_lflag;
316 channel->ch_startc = termios->c_cc[VSTART];
317 channel->ch_stopc = termios->c_cc[VSTOP];
319 channel->ch_bd->bd_ops->param(channel);
320 jsm_carrier(channel);
321 spin_unlock_irqrestore(&port->lock, lock_flags);
322 }
324 static const char *jsm_tty_type(struct uart_port *port)
325 {
326 return "jsm";
327 }
329 static void jsm_tty_release_port(struct uart_port *port)
330 {
331 }
333 static int jsm_tty_request_port(struct uart_port *port)
334 {
335 return 0;
336 }
338 static void jsm_config_port(struct uart_port *port, int flags)
339 {
340 port->type = PORT_JSM;
341 }
343 static struct uart_ops jsm_ops = {
344 .tx_empty = jsm_tty_tx_empty,
345 .set_mctrl = jsm_tty_set_mctrl,
346 .get_mctrl = jsm_tty_get_mctrl,
347 .stop_tx = jsm_tty_stop_tx,
348 .start_tx = jsm_tty_start_tx,
349 .send_xchar = jsm_tty_send_xchar,
350 .stop_rx = jsm_tty_stop_rx,
351 .break_ctl = jsm_tty_break,
352 .startup = jsm_tty_open,
353 .shutdown = jsm_tty_close,
354 .set_termios = jsm_tty_set_termios,
355 .type = jsm_tty_type,
356 .release_port = jsm_tty_release_port,
357 .request_port = jsm_tty_request_port,
358 .config_port = jsm_config_port,
359 };
361 /*
362 * jsm_tty_init()
363 *
364 * Init the tty subsystem. Called once per board after board has been
365 * downloaded and init'ed.
366 */
367 int jsm_tty_init(struct jsm_board *brd)
368 {
369 int i;
370 void __iomem *vaddr;
371 struct jsm_channel *ch;
373 if (!brd)
374 return -ENXIO;
376 jsm_printk(INIT, INFO, &brd->pci_dev, "start\n");
378 /*
379 * Initialize board structure elements.
380 */
382 brd->nasync = brd->maxports;
384 /*
385 * Allocate channel memory that might not have been allocated
386 * when the driver was first loaded.
387 */
388 for (i = 0; i < brd->nasync; i++) {
389 if (!brd->channels[i]) {
391 /*
392 * Okay to malloc with GFP_KERNEL, we are not at
393 * interrupt context, and there are no locks held.
394 */
395 brd->channels[i] = kmalloc(sizeof(struct jsm_channel), GFP_KERNEL);
396 if (!brd->channels[i]) {
397 jsm_printk(CORE, ERR, &brd->pci_dev,
398 "%s:%d Unable to allocate memory for channel struct\n",
399 __FILE__, __LINE__);
400 }
401 memset(brd->channels[i], 0, sizeof(struct jsm_channel));
402 }
403 }
405 ch = brd->channels[0];
406 vaddr = brd->re_map_membase;
408 /* Set up channel variables */
409 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
411 if (!brd->channels[i])
412 continue;
414 spin_lock_init(&ch->ch_lock);
416 if (brd->bd_uart_offset == 0x200)
417 ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
419 ch->ch_bd = brd;
420 ch->ch_portnum = i;
422 /* .25 second delay */
423 ch->ch_close_delay = 250;
425 init_waitqueue_head(&ch->ch_flags_wait);
426 }
428 jsm_printk(INIT, INFO, &brd->pci_dev, "finish\n");
429 return 0;
430 }
432 int jsm_uart_port_init(struct jsm_board *brd)
433 {
434 int i;
435 struct jsm_channel *ch;
437 if (!brd)
438 return -ENXIO;
440 jsm_printk(INIT, INFO, &brd->pci_dev, "start\n");
442 /*
443 * Initialize board structure elements.
444 */
446 brd->nasync = brd->maxports;
448 /* Set up channel variables */
449 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
451 if (!brd->channels[i])
452 continue;
454 brd->channels[i]->uart_port.irq = brd->irq;
455 brd->channels[i]->uart_port.type = PORT_JSM;
456 brd->channels[i]->uart_port.iotype = UPIO_MEM;
457 brd->channels[i]->uart_port.membase = brd->re_map_membase;
458 brd->channels[i]->uart_port.fifosize = 16;
459 brd->channels[i]->uart_port.ops = &jsm_ops;
460 brd->channels[i]->uart_port.line = brd->channels[i]->ch_portnum + brd->boardnum * 2;
461 if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
462 printk(KERN_INFO "Added device failed\n");
463 else
464 printk(KERN_INFO "Added device \n");
465 }
467 jsm_printk(INIT, INFO, &brd->pci_dev, "finish\n");
468 return 0;
469 }
471 int jsm_remove_uart_port(struct jsm_board *brd)
472 {
473 int i;
474 struct jsm_channel *ch;
476 if (!brd)
477 return -ENXIO;
479 jsm_printk(INIT, INFO, &brd->pci_dev, "start\n");
481 /*
482 * Initialize board structure elements.
483 */
485 brd->nasync = brd->maxports;
487 /* Set up channel variables */
488 for (i = 0; i < brd->nasync; i++) {
490 if (!brd->channels[i])
491 continue;
493 ch = brd->channels[i];
495 uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port);
496 }
498 jsm_printk(INIT, INFO, &brd->pci_dev, "finish\n");
499 return 0;
500 }
502 void jsm_input(struct jsm_channel *ch)
503 {
504 struct jsm_board *bd;
505 struct tty_struct *tp;
506 struct tty_ldisc *ld;
507 u32 rmask;
508 u16 head;
509 u16 tail;
510 int data_len;
511 unsigned long lock_flags;
512 int flip_len = 0;
513 int len = 0;
514 int n = 0;
515 int s = 0;
516 int i = 0;
518 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n");
520 if (!ch)
521 return;
523 tp = ch->uart_port.info->tty;
525 bd = ch->ch_bd;
526 if(!bd)
527 return;
529 spin_lock_irqsave(&ch->ch_lock, lock_flags);
531 /*
532 *Figure the number of characters in the buffer.
533 *Exit immediately if none.
534 */
536 rmask = RQUEUEMASK;
538 head = ch->ch_r_head & rmask;
539 tail = ch->ch_r_tail & rmask;
541 data_len = (head - tail) & rmask;
542 if (data_len == 0) {
543 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
544 return;
545 }
547 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start\n");
549 /*
550 *If the device is not open, or CREAD is off, flush
551 *input data and return immediately.
552 */
553 if (!tp ||
554 !(tp->termios->c_cflag & CREAD) ) {
556 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
557 "input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum);
558 ch->ch_r_head = tail;
560 /* Force queue flow control to be released, if needed */
561 jsm_check_queue_flow_control(ch);
563 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
564 return;
565 }
567 /*
568 * If we are throttled, simply don't read any data.
569 */
570 if (ch->ch_flags & CH_STOPI) {
571 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
572 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
573 "Port %d throttled, not reading any data. head: %x tail: %x\n",
574 ch->ch_portnum, head, tail);
575 return;
576 }
578 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "start 2\n");
580 /*
581 * If the rxbuf is empty and we are not throttled, put as much
582 * as we can directly into the linux TTY buffer.
583 *
584 */
585 flip_len = TTY_FLIPBUF_SIZE;
587 len = min(data_len, flip_len);
588 len = min(len, (N_TTY_BUF_SIZE - 1) - tp->read_cnt);
589 ld = tty_ldisc_ref(tp);
591 /*
592 * If we were unable to get a reference to the ld,
593 * don't flush our buffer, and act like the ld doesn't
594 * have any space to put the data right now.
595 */
596 if (!ld) {
597 len = 0;
598 } else {
599 /*
600 * If ld doesn't have a pointer to a receive_buf function,
601 * flush the data, then act like the ld doesn't have any
602 * space to put the data right now.
603 */
604 if (!ld->receive_buf) {
605 ch->ch_r_head = ch->ch_r_tail;
606 len = 0;
607 }
608 }
610 if (len <= 0) {
611 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
612 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "jsm_input 1\n");
613 if (ld)
614 tty_ldisc_deref(ld);
615 return;
616 }
618 len = tty_buffer_request_room(tp, len);
619 n = len;
621 /*
622 * n now contains the most amount of data we can copy,
623 * bounded either by the flip buffer size or the amount
624 * of data the card actually has pending...
625 */
626 while (n) {
627 s = ((head >= tail) ? head : RQUEUESIZE) - tail;
628 s = min(s, n);
630 if (s <= 0)
631 break;
633 /*
634 * If conditions are such that ld needs to see all
635 * UART errors, we will have to walk each character
636 * and error byte and send them to the buffer one at
637 * a time.
638 */
640 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
641 for (i = 0; i < s; i++) {
642 /*
643 * Give the Linux ld the flags in the
644 * format it likes.
645 */
646 if (*(ch->ch_equeue +tail +i) & UART_LSR_BI)
647 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_BREAK);
648 else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE)
649 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_PARITY);
650 else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE)
651 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_FRAME);
652 else
653 tty_insert_flip_char(tp, *(ch->ch_rqueue +tail +i), TTY_NORMAL);
654 }
655 } else {
656 tty_insert_flip_string(tp, ch->ch_rqueue + tail, s) ;
657 }
658 tail += s;
659 n -= s;
660 /* Flip queue if needed */
661 tail &= rmask;
662 }
664 ch->ch_r_tail = tail & rmask;
665 ch->ch_e_tail = tail & rmask;
666 jsm_check_queue_flow_control(ch);
667 spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
669 /* Tell the tty layer its okay to "eat" the data now */
670 tty_flip_buffer_push(tp);
672 if (ld)
673 tty_ldisc_deref(ld);
675 jsm_printk(IOCTL, INFO, &ch->ch_bd->pci_dev, "finish\n");
676 }
678 static void jsm_carrier(struct jsm_channel *ch)
679 {
680 struct jsm_board *bd;
682 int virt_carrier = 0;
683 int phys_carrier = 0;
685 jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev, "start\n");
686 if (!ch)
687 return;
689 bd = ch->ch_bd;
691 if (!bd)
692 return;
694 if (ch->ch_mistat & UART_MSR_DCD) {
695 jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
696 "mistat: %x D_CD: %x\n", ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD);
697 phys_carrier = 1;
698 }
700 if (ch->ch_c_cflag & CLOCAL)
701 virt_carrier = 1;
703 jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
704 "DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier);
706 /*
707 * Test for a VIRTUAL carrier transition to HIGH.
708 */
709 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
711 /*
712 * When carrier rises, wake any threads waiting
713 * for carrier in the open routine.
714 */
716 jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
717 "carrier: virt DCD rose\n");
719 if (waitqueue_active(&(ch->ch_flags_wait)))
720 wake_up_interruptible(&ch->ch_flags_wait);
721 }
723 /*
724 * Test for a PHYSICAL carrier transition to HIGH.
725 */
726 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
728 /*
729 * When carrier rises, wake any threads waiting
730 * for carrier in the open routine.
731 */
733 jsm_printk(CARR, INFO, &ch->ch_bd->pci_dev,
734 "carrier: physical DCD rose\n");
736 if (waitqueue_active(&(ch->ch_flags_wait)))
737 wake_up_interruptible(&ch->ch_flags_wait);
738 }
740 /*
741 * Test for a PHYSICAL transition to low, so long as we aren't
742 * currently ignoring physical transitions (which is what "virtual
743 * carrier" indicates).
744 *
745 * The transition of the virtual carrier to low really doesn't
746 * matter... it really only means "ignore carrier state", not
747 * "make pretend that carrier is there".
748 */
749 if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0)
750 && (phys_carrier == 0)) {
751 /*
752 * When carrier drops:
753 *
754 * Drop carrier on all open units.
755 *
756 * Flush queues, waking up any task waiting in the
757 * line discipline.
758 *
759 * Send a hangup to the control terminal.
760 *
761 * Enable all select calls.
762 */
763 if (waitqueue_active(&(ch->ch_flags_wait)))
764 wake_up_interruptible(&ch->ch_flags_wait);
765 }
767 /*
768 * Make sure that our cached values reflect the current reality.
769 */
770 if (virt_carrier == 1)
771 ch->ch_flags |= CH_FCAR;
772 else
773 ch->ch_flags &= ~CH_FCAR;
775 if (phys_carrier == 1)
776 ch->ch_flags |= CH_CD;
777 else
778 ch->ch_flags &= ~CH_CD;
779 }
782 void jsm_check_queue_flow_control(struct jsm_channel *ch)
783 {
784 struct board_ops *bd_ops = ch->ch_bd->bd_ops;
785 int qleft = 0;
787 /* Store how much space we have left in the queue */
788 if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0)
789 qleft += RQUEUEMASK + 1;
791 /*
792 * Check to see if we should enforce flow control on our queue because
793 * the ld (or user) isn't reading data out of our queue fast enuf.
794 *
795 * NOTE: This is done based on what the current flow control of the
796 * port is set for.
797 *
798 * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt.
799 * This will cause the UART's FIFO to back up, and force
800 * the RTS signal to be dropped.
801 * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to
802 * the other side, in hopes it will stop sending data to us.
803 * 3) NONE - Nothing we can do. We will simply drop any extra data
804 * that gets sent into us when the queue fills up.
805 */
806 if (qleft < 256) {
807 /* HWFLOW */
808 if (ch->ch_c_cflag & CRTSCTS) {
809 if(!(ch->ch_flags & CH_RECEIVER_OFF)) {
810 bd_ops->disable_receiver(ch);
811 ch->ch_flags |= (CH_RECEIVER_OFF);
812 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
813 "Internal queue hit hilevel mark (%d)! Turning off interrupts.\n",
814 qleft);
815 }
816 }
817 /* SWFLOW */
818 else if (ch->ch_c_iflag & IXOFF) {
819 if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
820 bd_ops->send_stop_character(ch);
821 ch->ch_stops_sent++;
822 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
823 "Sending stop char! Times sent: %x\n", ch->ch_stops_sent);
824 }
825 }
826 }
828 /*
829 * Check to see if we should unenforce flow control because
830 * ld (or user) finally read enuf data out of our queue.
831 *
832 * NOTE: This is done based on what the current flow control of the
833 * port is set for.
834 *
835 * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt.
836 * This will cause the UART's FIFO to raise RTS back up,
837 * which will allow the other side to start sending data again.
838 * 2) SWFLOW (IXOFF) - Send a start character to
839 * the other side, so it will start sending data to us again.
840 * 3) NONE - Do nothing. Since we didn't do anything to turn off the
841 * other side, we don't need to do anything now.
842 */
843 if (qleft > (RQUEUESIZE / 2)) {
844 /* HWFLOW */
845 if (ch->ch_c_cflag & CRTSCTS) {
846 if (ch->ch_flags & CH_RECEIVER_OFF) {
847 bd_ops->enable_receiver(ch);
848 ch->ch_flags &= ~(CH_RECEIVER_OFF);
849 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev,
850 "Internal queue hit lowlevel mark (%d)! Turning on interrupts.\n",
851 qleft);
852 }
853 }
854 /* SWFLOW */
855 else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
856 ch->ch_stops_sent = 0;
857 bd_ops->send_start_character(ch);
858 jsm_printk(READ, INFO, &ch->ch_bd->pci_dev, "Sending start char!\n");
859 }
860 }
861 }
863 /*
864 * jsm_tty_write()
865 *
866 * Take data from the user or kernel and send it out to the FEP.
867 * In here exists all the Transparent Print magic as well.
868 */
869 int jsm_tty_write(struct uart_port *port)
870 {
871 int bufcount = 0, n = 0;
872 int data_count = 0,data_count1 =0;
873 u16 head;
874 u16 tail;
875 u16 tmask;
876 u32 remain;
877 int temp_tail = port->info->xmit.tail;
878 struct jsm_channel *channel = (struct jsm_channel *)port;
880 tmask = WQUEUEMASK;
881 head = (channel->ch_w_head) & tmask;
882 tail = (channel->ch_w_tail) & tmask;
884 if ((bufcount = tail - head - 1) < 0)
885 bufcount += WQUEUESIZE;
887 n = bufcount;
889 n = min(n, 56);
890 remain = WQUEUESIZE - head;
892 data_count = 0;
893 if (n >= remain) {
894 n -= remain;
895 while ((port->info->xmit.head != temp_tail) &&
896 (data_count < remain)) {
897 channel->ch_wqueue[head++] =
898 port->info->xmit.buf[temp_tail];
900 temp_tail++;
901 temp_tail &= (UART_XMIT_SIZE - 1);
902 data_count++;
903 }
904 if (data_count == remain) head = 0;
905 }
907 data_count1 = 0;
908 if (n > 0) {
909 remain = n;
910 while ((port->info->xmit.head != temp_tail) &&
911 (data_count1 < remain)) {
912 channel->ch_wqueue[head++] =
913 port->info->xmit.buf[temp_tail];
915 temp_tail++;
916 temp_tail &= (UART_XMIT_SIZE - 1);
917 data_count1++;
919 }
920 }
922 port->info->xmit.tail = temp_tail;
924 data_count += data_count1;
925 if (data_count) {
926 head &= tmask;
927 channel->ch_w_head = head;
928 }
930 if (data_count) {
931 channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel);
932 }
934 return data_count;
935 }