ia64/linux-2.6.18-xen.hg

view drivers/serial/mpsc.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*
2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240,
3 * GT64260, MV64340, MV64360, GT96100, ... ).
4 *
5 * Author: Mark A. Greer <mgreer@mvista.com>
6 *
7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to
8 * have been created by Chris Zankel (formerly of MontaVista) but there
9 * is no proper Copyright so I'm not sure. Apparently, parts were also
10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c
11 * by Russell King.
12 *
13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
14 * the terms of the GNU General Public License version 2. This program
15 * is licensed "as is" without any warranty of any kind, whether express
16 * or implied.
17 */
18 /*
19 * The MPSC interface is much like a typical network controller's interface.
20 * That is, you set up separate rings of descriptors for transmitting and
21 * receiving data. There is also a pool of buffers with (one buffer per
22 * descriptor) that incoming data are dma'd into or outgoing data are dma'd
23 * out of.
24 *
25 * The MPSC requires two other controllers to be able to work. The Baud Rate
26 * Generator (BRG) provides a clock at programmable frequencies which determines
27 * the baud rate. The Serial DMA Controller (SDMA) takes incoming data from the
28 * MPSC and DMA's it into memory or DMA's outgoing data and passes it to the
29 * MPSC. It is actually the SDMA interrupt that the driver uses to keep the
30 * transmit and receive "engines" going (i.e., indicate data has been
31 * transmitted or received).
32 *
33 * NOTES:
34 *
35 * 1) Some chips have an erratum where several regs cannot be
36 * read. To work around that, we keep a local copy of those regs in
37 * 'mpsc_port_info'.
38 *
39 * 2) Some chips have an erratum where the ctlr will hang when the SDMA ctlr
40 * accesses system mem with coherency enabled. For that reason, the driver
41 * assumes that coherency for that ctlr has been disabled. This means
42 * that when in a cache coherent system, the driver has to manually manage
43 * the data cache on the areas that it touches because the dma_* macro are
44 * basically no-ops.
45 *
46 * 3) There is an erratum (on PPC) where you can't use the instruction to do
47 * a DMA_TO_DEVICE/cache clean so DMA_BIDIRECTIONAL/flushes are used in places
48 * where a DMA_TO_DEVICE/clean would have [otherwise] sufficed.
49 *
50 * 4) AFAICT, hardware flow control isn't supported by the controller --MAG.
51 */
54 #if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
55 #define SUPPORT_SYSRQ
56 #endif
58 #include <linux/module.h>
59 #include <linux/moduleparam.h>
60 #include <linux/tty.h>
61 #include <linux/tty_flip.h>
62 #include <linux/ioport.h>
63 #include <linux/init.h>
64 #include <linux/console.h>
65 #include <linux/sysrq.h>
66 #include <linux/serial.h>
67 #include <linux/serial_core.h>
68 #include <linux/delay.h>
69 #include <linux/device.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/mv643xx.h>
72 #include <linux/platform_device.h>
74 #include <asm/io.h>
75 #include <asm/irq.h>
77 #if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
78 #define SUPPORT_SYSRQ
79 #endif
81 #define MPSC_NUM_CTLRS 2
83 /*
84 * Descriptors and buffers must be cache line aligned.
85 * Buffers lengths must be multiple of cache line size.
86 * Number of Tx & Rx descriptors must be powers of 2.
87 */
88 #define MPSC_RXR_ENTRIES 32
89 #define MPSC_RXRE_SIZE dma_get_cache_alignment()
90 #define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE)
91 #define MPSC_RXBE_SIZE dma_get_cache_alignment()
92 #define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE)
94 #define MPSC_TXR_ENTRIES 32
95 #define MPSC_TXRE_SIZE dma_get_cache_alignment()
96 #define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE)
97 #define MPSC_TXBE_SIZE dma_get_cache_alignment()
98 #define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE)
100 #define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + \
101 MPSC_TXR_SIZE + MPSC_TXB_SIZE + \
102 dma_get_cache_alignment() /* for alignment */)
104 /* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */
105 struct mpsc_rx_desc {
106 u16 bufsize;
107 u16 bytecnt;
108 u32 cmdstat;
109 u32 link;
110 u32 buf_ptr;
111 } __attribute((packed));
113 struct mpsc_tx_desc {
114 u16 bytecnt;
115 u16 shadow;
116 u32 cmdstat;
117 u32 link;
118 u32 buf_ptr;
119 } __attribute((packed));
121 /*
122 * Some regs that have the erratum that you can't read them are are shared
123 * between the two MPSC controllers. This struct contains those shared regs.
124 */
125 struct mpsc_shared_regs {
126 phys_addr_t mpsc_routing_base_p;
127 phys_addr_t sdma_intr_base_p;
129 void __iomem *mpsc_routing_base;
130 void __iomem *sdma_intr_base;
132 u32 MPSC_MRR_m;
133 u32 MPSC_RCRR_m;
134 u32 MPSC_TCRR_m;
135 u32 SDMA_INTR_CAUSE_m;
136 u32 SDMA_INTR_MASK_m;
137 };
139 /* The main driver data structure */
140 struct mpsc_port_info {
141 struct uart_port port; /* Overlay uart_port structure */
143 /* Internal driver state for this ctlr */
144 u8 ready;
145 u8 rcv_data;
146 tcflag_t c_iflag; /* save termios->c_iflag */
147 tcflag_t c_cflag; /* save termios->c_cflag */
149 /* Info passed in from platform */
150 u8 mirror_regs; /* Need to mirror regs? */
151 u8 cache_mgmt; /* Need manual cache mgmt? */
152 u8 brg_can_tune; /* BRG has baud tuning? */
153 u32 brg_clk_src;
154 u16 mpsc_max_idle;
155 int default_baud;
156 int default_bits;
157 int default_parity;
158 int default_flow;
160 /* Physical addresses of various blocks of registers (from platform) */
161 phys_addr_t mpsc_base_p;
162 phys_addr_t sdma_base_p;
163 phys_addr_t brg_base_p;
165 /* Virtual addresses of various blocks of registers (from platform) */
166 void __iomem *mpsc_base;
167 void __iomem *sdma_base;
168 void __iomem *brg_base;
170 /* Descriptor ring and buffer allocations */
171 void *dma_region;
172 dma_addr_t dma_region_p;
174 dma_addr_t rxr; /* Rx descriptor ring */
175 dma_addr_t rxr_p; /* Phys addr of rxr */
176 u8 *rxb; /* Rx Ring I/O buf */
177 u8 *rxb_p; /* Phys addr of rxb */
178 u32 rxr_posn; /* First desc w/ Rx data */
180 dma_addr_t txr; /* Tx descriptor ring */
181 dma_addr_t txr_p; /* Phys addr of txr */
182 u8 *txb; /* Tx Ring I/O buf */
183 u8 *txb_p; /* Phys addr of txb */
184 int txr_head; /* Where new data goes */
185 int txr_tail; /* Where sent data comes off */
187 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */
188 u32 MPSC_MPCR_m;
189 u32 MPSC_CHR_1_m;
190 u32 MPSC_CHR_2_m;
191 u32 MPSC_CHR_10_m;
192 u32 BRG_BCR_m;
193 struct mpsc_shared_regs *shared_regs;
194 };
196 /* Hooks to platform-specific code */
197 int mpsc_platform_register_driver(void);
198 void mpsc_platform_unregister_driver(void);
200 /* Hooks back in to mpsc common to be called by platform-specific code */
201 struct mpsc_port_info *mpsc_device_probe(int index);
202 struct mpsc_port_info *mpsc_device_remove(int index);
204 /* Main MPSC Configuration Register Offsets */
205 #define MPSC_MMCRL 0x0000
206 #define MPSC_MMCRH 0x0004
207 #define MPSC_MPCR 0x0008
208 #define MPSC_CHR_1 0x000c
209 #define MPSC_CHR_2 0x0010
210 #define MPSC_CHR_3 0x0014
211 #define MPSC_CHR_4 0x0018
212 #define MPSC_CHR_5 0x001c
213 #define MPSC_CHR_6 0x0020
214 #define MPSC_CHR_7 0x0024
215 #define MPSC_CHR_8 0x0028
216 #define MPSC_CHR_9 0x002c
217 #define MPSC_CHR_10 0x0030
218 #define MPSC_CHR_11 0x0034
220 #define MPSC_MPCR_FRZ (1 << 9)
221 #define MPSC_MPCR_CL_5 0
222 #define MPSC_MPCR_CL_6 1
223 #define MPSC_MPCR_CL_7 2
224 #define MPSC_MPCR_CL_8 3
225 #define MPSC_MPCR_SBL_1 0
226 #define MPSC_MPCR_SBL_2 1
228 #define MPSC_CHR_2_TEV (1<<1)
229 #define MPSC_CHR_2_TA (1<<7)
230 #define MPSC_CHR_2_TTCS (1<<9)
231 #define MPSC_CHR_2_REV (1<<17)
232 #define MPSC_CHR_2_RA (1<<23)
233 #define MPSC_CHR_2_CRD (1<<25)
234 #define MPSC_CHR_2_EH (1<<31)
235 #define MPSC_CHR_2_PAR_ODD 0
236 #define MPSC_CHR_2_PAR_SPACE 1
237 #define MPSC_CHR_2_PAR_EVEN 2
238 #define MPSC_CHR_2_PAR_MARK 3
240 /* MPSC Signal Routing */
241 #define MPSC_MRR 0x0000
242 #define MPSC_RCRR 0x0004
243 #define MPSC_TCRR 0x0008
245 /* Serial DMA Controller Interface Registers */
246 #define SDMA_SDC 0x0000
247 #define SDMA_SDCM 0x0008
248 #define SDMA_RX_DESC 0x0800
249 #define SDMA_RX_BUF_PTR 0x0808
250 #define SDMA_SCRDP 0x0810
251 #define SDMA_TX_DESC 0x0c00
252 #define SDMA_SCTDP 0x0c10
253 #define SDMA_SFTDP 0x0c14
255 #define SDMA_DESC_CMDSTAT_PE (1<<0)
256 #define SDMA_DESC_CMDSTAT_CDL (1<<1)
257 #define SDMA_DESC_CMDSTAT_FR (1<<3)
258 #define SDMA_DESC_CMDSTAT_OR (1<<6)
259 #define SDMA_DESC_CMDSTAT_BR (1<<9)
260 #define SDMA_DESC_CMDSTAT_MI (1<<10)
261 #define SDMA_DESC_CMDSTAT_A (1<<11)
262 #define SDMA_DESC_CMDSTAT_AM (1<<12)
263 #define SDMA_DESC_CMDSTAT_CT (1<<13)
264 #define SDMA_DESC_CMDSTAT_C (1<<14)
265 #define SDMA_DESC_CMDSTAT_ES (1<<15)
266 #define SDMA_DESC_CMDSTAT_L (1<<16)
267 #define SDMA_DESC_CMDSTAT_F (1<<17)
268 #define SDMA_DESC_CMDSTAT_P (1<<18)
269 #define SDMA_DESC_CMDSTAT_EI (1<<23)
270 #define SDMA_DESC_CMDSTAT_O (1<<31)
272 #define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O | \
273 SDMA_DESC_CMDSTAT_EI)
275 #define SDMA_SDC_RFT (1<<0)
276 #define SDMA_SDC_SFM (1<<1)
277 #define SDMA_SDC_BLMR (1<<6)
278 #define SDMA_SDC_BLMT (1<<7)
279 #define SDMA_SDC_POVR (1<<8)
280 #define SDMA_SDC_RIFB (1<<9)
282 #define SDMA_SDCM_ERD (1<<7)
283 #define SDMA_SDCM_AR (1<<15)
284 #define SDMA_SDCM_STD (1<<16)
285 #define SDMA_SDCM_TXD (1<<23)
286 #define SDMA_SDCM_AT (1<<31)
288 #define SDMA_0_CAUSE_RXBUF (1<<0)
289 #define SDMA_0_CAUSE_RXERR (1<<1)
290 #define SDMA_0_CAUSE_TXBUF (1<<2)
291 #define SDMA_0_CAUSE_TXEND (1<<3)
292 #define SDMA_1_CAUSE_RXBUF (1<<8)
293 #define SDMA_1_CAUSE_RXERR (1<<9)
294 #define SDMA_1_CAUSE_TXBUF (1<<10)
295 #define SDMA_1_CAUSE_TXEND (1<<11)
297 #define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR | \
298 SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR)
299 #define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND | \
300 SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND)
302 /* SDMA Interrupt registers */
303 #define SDMA_INTR_CAUSE 0x0000
304 #define SDMA_INTR_MASK 0x0080
306 /* Baud Rate Generator Interface Registers */
307 #define BRG_BCR 0x0000
308 #define BRG_BTR 0x0004
310 /*
311 * Define how this driver is known to the outside (we've been assigned a
312 * range on the "Low-density serial ports" major).
313 */
314 #define MPSC_MAJOR 204
315 #define MPSC_MINOR_START 44
316 #define MPSC_DRIVER_NAME "MPSC"
317 #define MPSC_DEV_NAME "ttyMM"
318 #define MPSC_VERSION "1.00"
320 static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS];
321 static struct mpsc_shared_regs mpsc_shared_regs;
322 static struct uart_driver mpsc_reg;
324 static void mpsc_start_rx(struct mpsc_port_info *pi);
325 static void mpsc_free_ring_mem(struct mpsc_port_info *pi);
326 static void mpsc_release_port(struct uart_port *port);
327 /*
328 ******************************************************************************
329 *
330 * Baud Rate Generator Routines (BRG)
331 *
332 ******************************************************************************
333 */
334 static void
335 mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src)
336 {
337 u32 v;
339 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
340 v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18);
342 if (pi->brg_can_tune)
343 v &= ~(1 << 25);
345 if (pi->mirror_regs)
346 pi->BRG_BCR_m = v;
347 writel(v, pi->brg_base + BRG_BCR);
349 writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000,
350 pi->brg_base + BRG_BTR);
351 return;
352 }
354 static void
355 mpsc_brg_enable(struct mpsc_port_info *pi)
356 {
357 u32 v;
359 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
360 v |= (1 << 16);
362 if (pi->mirror_regs)
363 pi->BRG_BCR_m = v;
364 writel(v, pi->brg_base + BRG_BCR);
365 return;
366 }
368 static void
369 mpsc_brg_disable(struct mpsc_port_info *pi)
370 {
371 u32 v;
373 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
374 v &= ~(1 << 16);
376 if (pi->mirror_regs)
377 pi->BRG_BCR_m = v;
378 writel(v, pi->brg_base + BRG_BCR);
379 return;
380 }
382 static inline void
383 mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud)
384 {
385 /*
386 * To set the baud, we adjust the CDV field in the BRG_BCR reg.
387 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1.
388 * However, the input clock is divided by 16 in the MPSC b/c of how
389 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our
390 * calculation by 16 to account for that. So the real calculation
391 * that accounts for the way the mpsc is set up is:
392 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1.
393 */
394 u32 cdv = (pi->port.uartclk / (baud << 5)) - 1;
395 u32 v;
397 mpsc_brg_disable(pi);
398 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR);
399 v = (v & 0xffff0000) | (cdv & 0xffff);
401 if (pi->mirror_regs)
402 pi->BRG_BCR_m = v;
403 writel(v, pi->brg_base + BRG_BCR);
404 mpsc_brg_enable(pi);
406 return;
407 }
409 /*
410 ******************************************************************************
411 *
412 * Serial DMA Routines (SDMA)
413 *
414 ******************************************************************************
415 */
417 static void
418 mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size)
419 {
420 u32 v;
422 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n",
423 pi->port.line, burst_size);
425 burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */
427 if (burst_size < 2)
428 v = 0x0; /* 1 64-bit word */
429 else if (burst_size < 4)
430 v = 0x1; /* 2 64-bit words */
431 else if (burst_size < 8)
432 v = 0x2; /* 4 64-bit words */
433 else
434 v = 0x3; /* 8 64-bit words */
436 writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12),
437 pi->sdma_base + SDMA_SDC);
438 return;
439 }
441 static void
442 mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size)
443 {
444 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line,
445 burst_size);
447 writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f,
448 pi->sdma_base + SDMA_SDC);
449 mpsc_sdma_burstsize(pi, burst_size);
450 return;
451 }
453 static inline u32
454 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask)
455 {
456 u32 old, v;
458 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask);
460 old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
461 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
463 mask &= 0xf;
464 if (pi->port.line)
465 mask <<= 8;
466 v &= ~mask;
468 if (pi->mirror_regs)
469 pi->shared_regs->SDMA_INTR_MASK_m = v;
470 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
472 if (pi->port.line)
473 old >>= 8;
474 return old & 0xf;
475 }
477 static inline void
478 mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask)
479 {
480 u32 v;
482 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask);
484 v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m :
485 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
487 mask &= 0xf;
488 if (pi->port.line)
489 mask <<= 8;
490 v |= mask;
492 if (pi->mirror_regs)
493 pi->shared_regs->SDMA_INTR_MASK_m = v;
494 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK);
495 return;
496 }
498 static inline void
499 mpsc_sdma_intr_ack(struct mpsc_port_info *pi)
500 {
501 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line);
503 if (pi->mirror_regs)
504 pi->shared_regs->SDMA_INTR_CAUSE_m = 0;
505 writel(0, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE);
506 return;
507 }
509 static inline void
510 mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi, struct mpsc_rx_desc *rxre_p)
511 {
512 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n",
513 pi->port.line, (u32) rxre_p);
515 writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP);
516 return;
517 }
519 static inline void
520 mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi, struct mpsc_tx_desc *txre_p)
521 {
522 writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP);
523 writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP);
524 return;
525 }
527 static inline void
528 mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val)
529 {
530 u32 v;
532 v = readl(pi->sdma_base + SDMA_SDCM);
533 if (val)
534 v |= val;
535 else
536 v = 0;
537 wmb();
538 writel(v, pi->sdma_base + SDMA_SDCM);
539 wmb();
540 return;
541 }
543 static inline uint
544 mpsc_sdma_tx_active(struct mpsc_port_info *pi)
545 {
546 return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD;
547 }
549 static inline void
550 mpsc_sdma_start_tx(struct mpsc_port_info *pi)
551 {
552 struct mpsc_tx_desc *txre, *txre_p;
554 /* If tx isn't running & there's a desc ready to go, start it */
555 if (!mpsc_sdma_tx_active(pi)) {
556 txre = (struct mpsc_tx_desc *)(pi->txr +
557 (pi->txr_tail * MPSC_TXRE_SIZE));
558 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
559 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
560 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
561 invalidate_dcache_range((ulong)txre,
562 (ulong)txre + MPSC_TXRE_SIZE);
563 #endif
565 if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) {
566 txre_p = (struct mpsc_tx_desc *)(pi->txr_p +
567 (pi->txr_tail *
568 MPSC_TXRE_SIZE));
570 mpsc_sdma_set_tx_ring(pi, txre_p);
571 mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD);
572 }
573 }
575 return;
576 }
578 static inline void
579 mpsc_sdma_stop(struct mpsc_port_info *pi)
580 {
581 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line);
583 /* Abort any SDMA transfers */
584 mpsc_sdma_cmd(pi, 0);
585 mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT);
587 /* Clear the SDMA current and first TX and RX pointers */
588 mpsc_sdma_set_tx_ring(pi, NULL);
589 mpsc_sdma_set_rx_ring(pi, NULL);
591 /* Disable interrupts */
592 mpsc_sdma_intr_mask(pi, 0xf);
593 mpsc_sdma_intr_ack(pi);
595 return;
596 }
598 /*
599 ******************************************************************************
600 *
601 * Multi-Protocol Serial Controller Routines (MPSC)
602 *
603 ******************************************************************************
604 */
606 static void
607 mpsc_hw_init(struct mpsc_port_info *pi)
608 {
609 u32 v;
611 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line);
613 /* Set up clock routing */
614 if (pi->mirror_regs) {
615 v = pi->shared_regs->MPSC_MRR_m;
616 v &= ~0x1c7;
617 pi->shared_regs->MPSC_MRR_m = v;
618 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
620 v = pi->shared_regs->MPSC_RCRR_m;
621 v = (v & ~0xf0f) | 0x100;
622 pi->shared_regs->MPSC_RCRR_m = v;
623 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
625 v = pi->shared_regs->MPSC_TCRR_m;
626 v = (v & ~0xf0f) | 0x100;
627 pi->shared_regs->MPSC_TCRR_m = v;
628 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
629 }
630 else {
631 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR);
632 v &= ~0x1c7;
633 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR);
635 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
636 v = (v & ~0xf0f) | 0x100;
637 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR);
639 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
640 v = (v & ~0xf0f) | 0x100;
641 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR);
642 }
644 /* Put MPSC in UART mode & enabel Tx/Rx egines */
645 writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL);
647 /* No preamble, 16x divider, low-latency, */
648 writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
650 if (pi->mirror_regs) {
651 pi->MPSC_CHR_1_m = 0;
652 pi->MPSC_CHR_2_m = 0;
653 }
654 writel(0, pi->mpsc_base + MPSC_CHR_1);
655 writel(0, pi->mpsc_base + MPSC_CHR_2);
656 writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3);
657 writel(0, pi->mpsc_base + MPSC_CHR_4);
658 writel(0, pi->mpsc_base + MPSC_CHR_5);
659 writel(0, pi->mpsc_base + MPSC_CHR_6);
660 writel(0, pi->mpsc_base + MPSC_CHR_7);
661 writel(0, pi->mpsc_base + MPSC_CHR_8);
662 writel(0, pi->mpsc_base + MPSC_CHR_9);
663 writel(0, pi->mpsc_base + MPSC_CHR_10);
665 return;
666 }
668 static inline void
669 mpsc_enter_hunt(struct mpsc_port_info *pi)
670 {
671 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line);
673 if (pi->mirror_regs) {
674 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH,
675 pi->mpsc_base + MPSC_CHR_2);
676 /* Erratum prevents reading CHR_2 so just delay for a while */
677 udelay(100);
678 }
679 else {
680 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH,
681 pi->mpsc_base + MPSC_CHR_2);
683 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH)
684 udelay(10);
685 }
687 return;
688 }
690 static inline void
691 mpsc_freeze(struct mpsc_port_info *pi)
692 {
693 u32 v;
695 pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line);
697 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
698 readl(pi->mpsc_base + MPSC_MPCR);
699 v |= MPSC_MPCR_FRZ;
701 if (pi->mirror_regs)
702 pi->MPSC_MPCR_m = v;
703 writel(v, pi->mpsc_base + MPSC_MPCR);
704 return;
705 }
707 static inline void
708 mpsc_unfreeze(struct mpsc_port_info *pi)
709 {
710 u32 v;
712 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
713 readl(pi->mpsc_base + MPSC_MPCR);
714 v &= ~MPSC_MPCR_FRZ;
716 if (pi->mirror_regs)
717 pi->MPSC_MPCR_m = v;
718 writel(v, pi->mpsc_base + MPSC_MPCR);
720 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line);
721 return;
722 }
724 static inline void
725 mpsc_set_char_length(struct mpsc_port_info *pi, u32 len)
726 {
727 u32 v;
729 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len);
731 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
732 readl(pi->mpsc_base + MPSC_MPCR);
733 v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12);
735 if (pi->mirror_regs)
736 pi->MPSC_MPCR_m = v;
737 writel(v, pi->mpsc_base + MPSC_MPCR);
738 return;
739 }
741 static inline void
742 mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len)
743 {
744 u32 v;
746 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n",
747 pi->port.line, len);
749 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m :
750 readl(pi->mpsc_base + MPSC_MPCR);
752 v = (v & ~(1 << 14)) | ((len & 0x1) << 14);
754 if (pi->mirror_regs)
755 pi->MPSC_MPCR_m = v;
756 writel(v, pi->mpsc_base + MPSC_MPCR);
757 return;
758 }
760 static inline void
761 mpsc_set_parity(struct mpsc_port_info *pi, u32 p)
762 {
763 u32 v;
765 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p);
767 v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m :
768 readl(pi->mpsc_base + MPSC_CHR_2);
770 p &= 0x3;
771 v = (v & ~0xc000c) | (p << 18) | (p << 2);
773 if (pi->mirror_regs)
774 pi->MPSC_CHR_2_m = v;
775 writel(v, pi->mpsc_base + MPSC_CHR_2);
776 return;
777 }
779 /*
780 ******************************************************************************
781 *
782 * Driver Init Routines
783 *
784 ******************************************************************************
785 */
787 static void
788 mpsc_init_hw(struct mpsc_port_info *pi)
789 {
790 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line);
792 mpsc_brg_init(pi, pi->brg_clk_src);
793 mpsc_brg_enable(pi);
794 mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */
795 mpsc_sdma_stop(pi);
796 mpsc_hw_init(pi);
798 return;
799 }
801 static int
802 mpsc_alloc_ring_mem(struct mpsc_port_info *pi)
803 {
804 int rc = 0;
806 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n",
807 pi->port.line);
809 if (!pi->dma_region) {
810 if (!dma_supported(pi->port.dev, 0xffffffff)) {
811 printk(KERN_ERR "MPSC: Inadequate DMA support\n");
812 rc = -ENXIO;
813 }
814 else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev,
815 MPSC_DMA_ALLOC_SIZE, &pi->dma_region_p, GFP_KERNEL))
816 == NULL) {
818 printk(KERN_ERR "MPSC: Can't alloc Desc region\n");
819 rc = -ENOMEM;
820 }
821 }
823 return rc;
824 }
826 static void
827 mpsc_free_ring_mem(struct mpsc_port_info *pi)
828 {
829 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line);
831 if (pi->dma_region) {
832 dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE,
833 pi->dma_region, pi->dma_region_p);
834 pi->dma_region = NULL;
835 pi->dma_region_p = (dma_addr_t) NULL;
836 }
838 return;
839 }
841 static void
842 mpsc_init_rings(struct mpsc_port_info *pi)
843 {
844 struct mpsc_rx_desc *rxre;
845 struct mpsc_tx_desc *txre;
846 dma_addr_t dp, dp_p;
847 u8 *bp, *bp_p;
848 int i;
850 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line);
852 BUG_ON(pi->dma_region == NULL);
854 memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE);
856 /*
857 * Descriptors & buffers are multiples of cacheline size and must be
858 * cacheline aligned.
859 */
860 dp = ALIGN((u32) pi->dma_region, dma_get_cache_alignment());
861 dp_p = ALIGN((u32) pi->dma_region_p, dma_get_cache_alignment());
863 /*
864 * Partition dma region into rx ring descriptor, rx buffers,
865 * tx ring descriptors, and tx buffers.
866 */
867 pi->rxr = dp;
868 pi->rxr_p = dp_p;
869 dp += MPSC_RXR_SIZE;
870 dp_p += MPSC_RXR_SIZE;
872 pi->rxb = (u8 *) dp;
873 pi->rxb_p = (u8 *) dp_p;
874 dp += MPSC_RXB_SIZE;
875 dp_p += MPSC_RXB_SIZE;
877 pi->rxr_posn = 0;
879 pi->txr = dp;
880 pi->txr_p = dp_p;
881 dp += MPSC_TXR_SIZE;
882 dp_p += MPSC_TXR_SIZE;
884 pi->txb = (u8 *) dp;
885 pi->txb_p = (u8 *) dp_p;
887 pi->txr_head = 0;
888 pi->txr_tail = 0;
890 /* Init rx ring descriptors */
891 dp = pi->rxr;
892 dp_p = pi->rxr_p;
893 bp = pi->rxb;
894 bp_p = pi->rxb_p;
896 for (i = 0; i < MPSC_RXR_ENTRIES; i++) {
897 rxre = (struct mpsc_rx_desc *)dp;
899 rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE);
900 rxre->bytecnt = cpu_to_be16(0);
901 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
902 SDMA_DESC_CMDSTAT_EI |
903 SDMA_DESC_CMDSTAT_F |
904 SDMA_DESC_CMDSTAT_L);
905 rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE);
906 rxre->buf_ptr = cpu_to_be32(bp_p);
908 dp += MPSC_RXRE_SIZE;
909 dp_p += MPSC_RXRE_SIZE;
910 bp += MPSC_RXBE_SIZE;
911 bp_p += MPSC_RXBE_SIZE;
912 }
913 rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */
915 /* Init tx ring descriptors */
916 dp = pi->txr;
917 dp_p = pi->txr_p;
918 bp = pi->txb;
919 bp_p = pi->txb_p;
921 for (i = 0; i < MPSC_TXR_ENTRIES; i++) {
922 txre = (struct mpsc_tx_desc *)dp;
924 txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE);
925 txre->buf_ptr = cpu_to_be32(bp_p);
927 dp += MPSC_TXRE_SIZE;
928 dp_p += MPSC_TXRE_SIZE;
929 bp += MPSC_TXBE_SIZE;
930 bp_p += MPSC_TXBE_SIZE;
931 }
932 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */
934 dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
935 DMA_BIDIRECTIONAL);
936 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
937 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
938 flush_dcache_range((ulong)pi->dma_region,
939 (ulong)pi->dma_region + MPSC_DMA_ALLOC_SIZE);
940 #endif
942 return;
943 }
945 static void
946 mpsc_uninit_rings(struct mpsc_port_info *pi)
947 {
948 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line);
950 BUG_ON(pi->dma_region == NULL);
952 pi->rxr = 0;
953 pi->rxr_p = 0;
954 pi->rxb = NULL;
955 pi->rxb_p = NULL;
956 pi->rxr_posn = 0;
958 pi->txr = 0;
959 pi->txr_p = 0;
960 pi->txb = NULL;
961 pi->txb_p = NULL;
962 pi->txr_head = 0;
963 pi->txr_tail = 0;
965 return;
966 }
968 static int
969 mpsc_make_ready(struct mpsc_port_info *pi)
970 {
971 int rc;
973 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line);
975 if (!pi->ready) {
976 mpsc_init_hw(pi);
977 if ((rc = mpsc_alloc_ring_mem(pi)))
978 return rc;
979 mpsc_init_rings(pi);
980 pi->ready = 1;
981 }
983 return 0;
984 }
986 /*
987 ******************************************************************************
988 *
989 * Interrupt Handling Routines
990 *
991 ******************************************************************************
992 */
994 static inline int
995 mpsc_rx_intr(struct mpsc_port_info *pi, struct pt_regs *regs)
996 {
997 struct mpsc_rx_desc *rxre;
998 struct tty_struct *tty = pi->port.info->tty;
999 u32 cmdstat, bytes_in, i;
1000 int rc = 0;
1001 u8 *bp;
1002 char flag = TTY_NORMAL;
1004 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line);
1006 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
1008 dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1009 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1010 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1011 invalidate_dcache_range((ulong)rxre,
1012 (ulong)rxre + MPSC_RXRE_SIZE);
1013 #endif
1015 /*
1016 * Loop through Rx descriptors handling ones that have been completed.
1017 */
1018 while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) & SDMA_DESC_CMDSTAT_O)){
1019 bytes_in = be16_to_cpu(rxre->bytecnt);
1021 /* Following use of tty struct directly is deprecated */
1022 if (unlikely(tty_buffer_request_room(tty, bytes_in) < bytes_in)) {
1023 if (tty->low_latency)
1024 tty_flip_buffer_push(tty);
1025 /*
1026 * If this failed then we will throw away the bytes
1027 * but must do so to clear interrupts.
1028 */
1031 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
1032 dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
1033 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1034 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1035 invalidate_dcache_range((ulong)bp,
1036 (ulong)bp + MPSC_RXBE_SIZE);
1037 #endif
1039 /*
1040 * Other than for parity error, the manual provides little
1041 * info on what data will be in a frame flagged by any of
1042 * these errors. For parity error, it is the last byte in
1043 * the buffer that had the error. As for the rest, I guess
1044 * we'll assume there is no data in the buffer.
1045 * If there is...it gets lost.
1046 */
1047 if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1048 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) {
1050 pi->port.icount.rx++;
1052 if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */
1053 pi->port.icount.brk++;
1055 if (uart_handle_break(&pi->port))
1056 goto next_frame;
1058 else if (cmdstat & SDMA_DESC_CMDSTAT_FR)/* Framing */
1059 pi->port.icount.frame++;
1060 else if (cmdstat & SDMA_DESC_CMDSTAT_OR) /* Overrun */
1061 pi->port.icount.overrun++;
1063 cmdstat &= pi->port.read_status_mask;
1065 if (cmdstat & SDMA_DESC_CMDSTAT_BR)
1066 flag = TTY_BREAK;
1067 else if (cmdstat & SDMA_DESC_CMDSTAT_FR)
1068 flag = TTY_FRAME;
1069 else if (cmdstat & SDMA_DESC_CMDSTAT_OR)
1070 flag = TTY_OVERRUN;
1071 else if (cmdstat & SDMA_DESC_CMDSTAT_PE)
1072 flag = TTY_PARITY;
1075 if (uart_handle_sysrq_char(&pi->port, *bp, regs)) {
1076 bp++;
1077 bytes_in--;
1078 goto next_frame;
1081 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR |
1082 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) &&
1083 !(cmdstat & pi->port.ignore_status_mask))
1085 tty_insert_flip_char(tty, *bp, flag);
1086 else {
1087 for (i=0; i<bytes_in; i++)
1088 tty_insert_flip_char(tty, *bp++, TTY_NORMAL);
1090 pi->port.icount.rx += bytes_in;
1093 next_frame:
1094 rxre->bytecnt = cpu_to_be16(0);
1095 wmb();
1096 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O |
1097 SDMA_DESC_CMDSTAT_EI |
1098 SDMA_DESC_CMDSTAT_F |
1099 SDMA_DESC_CMDSTAT_L);
1100 wmb();
1101 dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
1102 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1103 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1104 flush_dcache_range((ulong)rxre,
1105 (ulong)rxre + MPSC_RXRE_SIZE);
1106 #endif
1108 /* Advance to next descriptor */
1109 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
1110 rxre = (struct mpsc_rx_desc *)(pi->rxr +
1111 (pi->rxr_posn * MPSC_RXRE_SIZE));
1112 dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
1113 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1114 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1115 invalidate_dcache_range((ulong)rxre,
1116 (ulong)rxre + MPSC_RXRE_SIZE);
1117 #endif
1119 rc = 1;
1122 /* Restart rx engine, if its stopped */
1123 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
1124 mpsc_start_rx(pi);
1126 tty_flip_buffer_push(tty);
1127 return rc;
1130 static inline void
1131 mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
1133 struct mpsc_tx_desc *txre;
1135 txre = (struct mpsc_tx_desc *)(pi->txr +
1136 (pi->txr_head * MPSC_TXRE_SIZE));
1138 txre->bytecnt = cpu_to_be16(count);
1139 txre->shadow = txre->bytecnt;
1140 wmb(); /* ensure cmdstat is last field updated */
1141 txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F |
1142 SDMA_DESC_CMDSTAT_L | ((intr) ?
1143 SDMA_DESC_CMDSTAT_EI
1144 : 0));
1145 wmb();
1146 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
1147 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1148 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1149 flush_dcache_range((ulong)txre,
1150 (ulong)txre + MPSC_TXRE_SIZE);
1151 #endif
1153 return;
1156 static inline void
1157 mpsc_copy_tx_data(struct mpsc_port_info *pi)
1159 struct circ_buf *xmit = &pi->port.info->xmit;
1160 u8 *bp;
1161 u32 i;
1163 /* Make sure the desc ring isn't full */
1164 while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES) <
1165 (MPSC_TXR_ENTRIES - 1)) {
1166 if (pi->port.x_char) {
1167 /*
1168 * Ideally, we should use the TCS field in
1169 * CHR_1 to put the x_char out immediately but
1170 * errata prevents us from being able to read
1171 * CHR_2 to know that its safe to write to
1172 * CHR_1. Instead, just put it in-band with
1173 * all the other Tx data.
1174 */
1175 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1176 *bp = pi->port.x_char;
1177 pi->port.x_char = 0;
1178 i = 1;
1180 else if (!uart_circ_empty(xmit) && !uart_tx_stopped(&pi->port)){
1181 i = min((u32) MPSC_TXBE_SIZE,
1182 (u32) uart_circ_chars_pending(xmit));
1183 i = min(i, (u32) CIRC_CNT_TO_END(xmit->head, xmit->tail,
1184 UART_XMIT_SIZE));
1185 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1186 memcpy(bp, &xmit->buf[xmit->tail], i);
1187 xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
1189 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1190 uart_write_wakeup(&pi->port);
1192 else /* All tx data copied into ring bufs */
1193 return;
1195 dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
1196 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1197 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1198 flush_dcache_range((ulong)bp,
1199 (ulong)bp + MPSC_TXBE_SIZE);
1200 #endif
1201 mpsc_setup_tx_desc(pi, i, 1);
1203 /* Advance to next descriptor */
1204 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1207 return;
1210 static inline int
1211 mpsc_tx_intr(struct mpsc_port_info *pi)
1213 struct mpsc_tx_desc *txre;
1214 int rc = 0;
1216 if (!mpsc_sdma_tx_active(pi)) {
1217 txre = (struct mpsc_tx_desc *)(pi->txr +
1218 (pi->txr_tail * MPSC_TXRE_SIZE));
1220 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
1221 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1222 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1223 invalidate_dcache_range((ulong)txre,
1224 (ulong)txre + MPSC_TXRE_SIZE);
1225 #endif
1227 while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) {
1228 rc = 1;
1229 pi->port.icount.tx += be16_to_cpu(txre->bytecnt);
1230 pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1);
1232 /* If no more data to tx, fall out of loop */
1233 if (pi->txr_head == pi->txr_tail)
1234 break;
1236 txre = (struct mpsc_tx_desc *)(pi->txr +
1237 (pi->txr_tail * MPSC_TXRE_SIZE));
1238 dma_cache_sync((void *) txre, MPSC_TXRE_SIZE,
1239 DMA_FROM_DEVICE);
1240 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1241 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1242 invalidate_dcache_range((ulong)txre,
1243 (ulong)txre + MPSC_TXRE_SIZE);
1244 #endif
1247 mpsc_copy_tx_data(pi);
1248 mpsc_sdma_start_tx(pi); /* start next desc if ready */
1251 return rc;
1254 /*
1255 * This is the driver's interrupt handler. To avoid a race, we first clear
1256 * the interrupt, then handle any completed Rx/Tx descriptors. When done
1257 * handling those descriptors, we restart the Rx/Tx engines if they're stopped.
1258 */
1259 static irqreturn_t
1260 mpsc_sdma_intr(int irq, void *dev_id, struct pt_regs *regs)
1262 struct mpsc_port_info *pi = dev_id;
1263 ulong iflags;
1264 int rc = IRQ_NONE;
1266 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line);
1268 spin_lock_irqsave(&pi->port.lock, iflags);
1269 mpsc_sdma_intr_ack(pi);
1270 if (mpsc_rx_intr(pi, regs))
1271 rc = IRQ_HANDLED;
1272 if (mpsc_tx_intr(pi))
1273 rc = IRQ_HANDLED;
1274 spin_unlock_irqrestore(&pi->port.lock, iflags);
1276 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line);
1277 return rc;
1280 /*
1281 ******************************************************************************
1283 * serial_core.c Interface routines
1285 ******************************************************************************
1286 */
1287 static uint
1288 mpsc_tx_empty(struct uart_port *port)
1290 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1291 ulong iflags;
1292 uint rc;
1294 spin_lock_irqsave(&pi->port.lock, iflags);
1295 rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT;
1296 spin_unlock_irqrestore(&pi->port.lock, iflags);
1298 return rc;
1301 static void
1302 mpsc_set_mctrl(struct uart_port *port, uint mctrl)
1304 /* Have no way to set modem control lines AFAICT */
1305 return;
1308 static uint
1309 mpsc_get_mctrl(struct uart_port *port)
1311 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1312 u32 mflags, status;
1314 status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m :
1315 readl(pi->mpsc_base + MPSC_CHR_10);
1317 mflags = 0;
1318 if (status & 0x1)
1319 mflags |= TIOCM_CTS;
1320 if (status & 0x2)
1321 mflags |= TIOCM_CAR;
1323 return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */
1326 static void
1327 mpsc_stop_tx(struct uart_port *port)
1329 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1331 pr_debug("mpsc_stop_tx[%d]\n", port->line);
1333 mpsc_freeze(pi);
1334 return;
1337 static void
1338 mpsc_start_tx(struct uart_port *port)
1340 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1342 mpsc_unfreeze(pi);
1343 mpsc_copy_tx_data(pi);
1344 mpsc_sdma_start_tx(pi);
1346 pr_debug("mpsc_start_tx[%d]\n", port->line);
1347 return;
1350 static void
1351 mpsc_start_rx(struct mpsc_port_info *pi)
1353 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line);
1355 /* Issue a Receive Abort to clear any receive errors */
1356 writel(MPSC_CHR_2_RA, pi->mpsc_base + MPSC_CHR_2);
1357 if (pi->rcv_data) {
1358 mpsc_enter_hunt(pi);
1359 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD);
1361 return;
1364 static void
1365 mpsc_stop_rx(struct uart_port *port)
1367 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1369 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line);
1371 mpsc_sdma_cmd(pi, SDMA_SDCM_AR);
1372 return;
1375 static void
1376 mpsc_enable_ms(struct uart_port *port)
1378 return; /* Not supported */
1381 static void
1382 mpsc_break_ctl(struct uart_port *port, int ctl)
1384 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1385 ulong flags;
1386 u32 v;
1388 v = ctl ? 0x00ff0000 : 0;
1390 spin_lock_irqsave(&pi->port.lock, flags);
1391 if (pi->mirror_regs)
1392 pi->MPSC_CHR_1_m = v;
1393 writel(v, pi->mpsc_base + MPSC_CHR_1);
1394 spin_unlock_irqrestore(&pi->port.lock, flags);
1396 return;
1399 static int
1400 mpsc_startup(struct uart_port *port)
1402 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1403 u32 flag = 0;
1404 int rc;
1406 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n",
1407 port->line, pi->port.irq);
1409 if ((rc = mpsc_make_ready(pi)) == 0) {
1410 /* Setup IRQ handler */
1411 mpsc_sdma_intr_ack(pi);
1413 /* If irq's are shared, need to set flag */
1414 if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq)
1415 flag = IRQF_SHARED;
1417 if (request_irq(pi->port.irq, mpsc_sdma_intr, flag,
1418 "mpsc-sdma", pi))
1419 printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n",
1420 pi->port.irq);
1422 mpsc_sdma_intr_unmask(pi, 0xf);
1423 mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p +
1424 (pi->rxr_posn * MPSC_RXRE_SIZE)));
1427 return rc;
1430 static void
1431 mpsc_shutdown(struct uart_port *port)
1433 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1435 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line);
1437 mpsc_sdma_stop(pi);
1438 free_irq(pi->port.irq, pi);
1439 return;
1442 static void
1443 mpsc_set_termios(struct uart_port *port, struct termios *termios,
1444 struct termios *old)
1446 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1447 u32 baud;
1448 ulong flags;
1449 u32 chr_bits, stop_bits, par;
1451 pi->c_iflag = termios->c_iflag;
1452 pi->c_cflag = termios->c_cflag;
1454 switch (termios->c_cflag & CSIZE) {
1455 case CS5:
1456 chr_bits = MPSC_MPCR_CL_5;
1457 break;
1458 case CS6:
1459 chr_bits = MPSC_MPCR_CL_6;
1460 break;
1461 case CS7:
1462 chr_bits = MPSC_MPCR_CL_7;
1463 break;
1464 case CS8:
1465 default:
1466 chr_bits = MPSC_MPCR_CL_8;
1467 break;
1470 if (termios->c_cflag & CSTOPB)
1471 stop_bits = MPSC_MPCR_SBL_2;
1472 else
1473 stop_bits = MPSC_MPCR_SBL_1;
1475 par = MPSC_CHR_2_PAR_EVEN;
1476 if (termios->c_cflag & PARENB)
1477 if (termios->c_cflag & PARODD)
1478 par = MPSC_CHR_2_PAR_ODD;
1479 #ifdef CMSPAR
1480 if (termios->c_cflag & CMSPAR) {
1481 if (termios->c_cflag & PARODD)
1482 par = MPSC_CHR_2_PAR_MARK;
1483 else
1484 par = MPSC_CHR_2_PAR_SPACE;
1486 #endif
1488 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk);
1490 spin_lock_irqsave(&pi->port.lock, flags);
1492 uart_update_timeout(port, termios->c_cflag, baud);
1494 mpsc_set_char_length(pi, chr_bits);
1495 mpsc_set_stop_bit_length(pi, stop_bits);
1496 mpsc_set_parity(pi, par);
1497 mpsc_set_baudrate(pi, baud);
1499 /* Characters/events to read */
1500 pi->rcv_data = 1;
1501 pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR;
1503 if (termios->c_iflag & INPCK)
1504 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE |
1505 SDMA_DESC_CMDSTAT_FR;
1507 if (termios->c_iflag & (BRKINT | PARMRK))
1508 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
1510 /* Characters/events to ignore */
1511 pi->port.ignore_status_mask = 0;
1513 if (termios->c_iflag & IGNPAR)
1514 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE |
1515 SDMA_DESC_CMDSTAT_FR;
1517 if (termios->c_iflag & IGNBRK) {
1518 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR;
1520 if (termios->c_iflag & IGNPAR)
1521 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR;
1524 /* Ignore all chars if CREAD not set */
1525 if (!(termios->c_cflag & CREAD))
1526 pi->rcv_data = 0;
1527 else
1528 mpsc_start_rx(pi);
1530 spin_unlock_irqrestore(&pi->port.lock, flags);
1531 return;
1534 static const char *
1535 mpsc_type(struct uart_port *port)
1537 pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME);
1538 return MPSC_DRIVER_NAME;
1541 static int
1542 mpsc_request_port(struct uart_port *port)
1544 /* Should make chip/platform specific call */
1545 return 0;
1548 static void
1549 mpsc_release_port(struct uart_port *port)
1551 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1553 if (pi->ready) {
1554 mpsc_uninit_rings(pi);
1555 mpsc_free_ring_mem(pi);
1556 pi->ready = 0;
1559 return;
1562 static void
1563 mpsc_config_port(struct uart_port *port, int flags)
1565 return;
1568 static int
1569 mpsc_verify_port(struct uart_port *port, struct serial_struct *ser)
1571 struct mpsc_port_info *pi = (struct mpsc_port_info *)port;
1572 int rc = 0;
1574 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line);
1576 if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC)
1577 rc = -EINVAL;
1578 else if (pi->port.irq != ser->irq)
1579 rc = -EINVAL;
1580 else if (ser->io_type != SERIAL_IO_MEM)
1581 rc = -EINVAL;
1582 else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */
1583 rc = -EINVAL;
1584 else if ((void *)pi->port.mapbase != ser->iomem_base)
1585 rc = -EINVAL;
1586 else if (pi->port.iobase != ser->port)
1587 rc = -EINVAL;
1588 else if (ser->hub6 != 0)
1589 rc = -EINVAL;
1591 return rc;
1594 static struct uart_ops mpsc_pops = {
1595 .tx_empty = mpsc_tx_empty,
1596 .set_mctrl = mpsc_set_mctrl,
1597 .get_mctrl = mpsc_get_mctrl,
1598 .stop_tx = mpsc_stop_tx,
1599 .start_tx = mpsc_start_tx,
1600 .stop_rx = mpsc_stop_rx,
1601 .enable_ms = mpsc_enable_ms,
1602 .break_ctl = mpsc_break_ctl,
1603 .startup = mpsc_startup,
1604 .shutdown = mpsc_shutdown,
1605 .set_termios = mpsc_set_termios,
1606 .type = mpsc_type,
1607 .release_port = mpsc_release_port,
1608 .request_port = mpsc_request_port,
1609 .config_port = mpsc_config_port,
1610 .verify_port = mpsc_verify_port,
1611 };
1613 /*
1614 ******************************************************************************
1616 * Console Interface Routines
1618 ******************************************************************************
1619 */
1621 #ifdef CONFIG_SERIAL_MPSC_CONSOLE
1622 static void
1623 mpsc_console_write(struct console *co, const char *s, uint count)
1625 struct mpsc_port_info *pi = &mpsc_ports[co->index];
1626 u8 *bp, *dp, add_cr = 0;
1627 int i;
1629 while (mpsc_sdma_tx_active(pi))
1630 udelay(100);
1632 while (count > 0) {
1633 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE);
1635 for (i = 0; i < MPSC_TXBE_SIZE; i++) {
1636 if (count == 0)
1637 break;
1639 if (add_cr) {
1640 *(dp++) = '\r';
1641 add_cr = 0;
1643 else {
1644 *(dp++) = *s;
1646 if (*(s++) == '\n') { /* add '\r' after '\n' */
1647 add_cr = 1;
1648 count++;
1652 count--;
1655 dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
1656 #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
1657 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
1658 flush_dcache_range((ulong)bp,
1659 (ulong)bp + MPSC_TXBE_SIZE);
1660 #endif
1661 mpsc_setup_tx_desc(pi, i, 0);
1662 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1);
1663 mpsc_sdma_start_tx(pi);
1665 while (mpsc_sdma_tx_active(pi))
1666 udelay(100);
1668 pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1);
1671 return;
1674 static int __init
1675 mpsc_console_setup(struct console *co, char *options)
1677 struct mpsc_port_info *pi;
1678 int baud, bits, parity, flow;
1680 pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options);
1682 if (co->index >= MPSC_NUM_CTLRS)
1683 co->index = 0;
1685 pi = &mpsc_ports[co->index];
1687 baud = pi->default_baud;
1688 bits = pi->default_bits;
1689 parity = pi->default_parity;
1690 flow = pi->default_flow;
1692 if (!pi->port.ops)
1693 return -ENODEV;
1695 spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */
1697 if (options)
1698 uart_parse_options(options, &baud, &parity, &bits, &flow);
1700 return uart_set_options(&pi->port, co, baud, parity, bits, flow);
1703 static struct console mpsc_console = {
1704 .name = MPSC_DEV_NAME,
1705 .write = mpsc_console_write,
1706 .device = uart_console_device,
1707 .setup = mpsc_console_setup,
1708 .flags = CON_PRINTBUFFER,
1709 .index = -1,
1710 .data = &mpsc_reg,
1711 };
1713 static int __init
1714 mpsc_late_console_init(void)
1716 pr_debug("mpsc_late_console_init: Enter\n");
1718 if (!(mpsc_console.flags & CON_ENABLED))
1719 register_console(&mpsc_console);
1720 return 0;
1723 late_initcall(mpsc_late_console_init);
1725 #define MPSC_CONSOLE &mpsc_console
1726 #else
1727 #define MPSC_CONSOLE NULL
1728 #endif
1729 /*
1730 ******************************************************************************
1732 * Dummy Platform Driver to extract & map shared register regions
1734 ******************************************************************************
1735 */
1736 static void
1737 mpsc_resource_err(char *s)
1739 printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s);
1740 return;
1743 static int
1744 mpsc_shared_map_regs(struct platform_device *pd)
1746 struct resource *r;
1748 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1749 MPSC_ROUTING_BASE_ORDER)) && request_mem_region(r->start,
1750 MPSC_ROUTING_REG_BLOCK_SIZE, "mpsc_routing_regs")) {
1752 mpsc_shared_regs.mpsc_routing_base = ioremap(r->start,
1753 MPSC_ROUTING_REG_BLOCK_SIZE);
1754 mpsc_shared_regs.mpsc_routing_base_p = r->start;
1756 else {
1757 mpsc_resource_err("MPSC routing base");
1758 return -ENOMEM;
1761 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1762 MPSC_SDMA_INTR_BASE_ORDER)) && request_mem_region(r->start,
1763 MPSC_SDMA_INTR_REG_BLOCK_SIZE, "sdma_intr_regs")) {
1765 mpsc_shared_regs.sdma_intr_base = ioremap(r->start,
1766 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1767 mpsc_shared_regs.sdma_intr_base_p = r->start;
1769 else {
1770 iounmap(mpsc_shared_regs.mpsc_routing_base);
1771 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1772 MPSC_ROUTING_REG_BLOCK_SIZE);
1773 mpsc_resource_err("SDMA intr base");
1774 return -ENOMEM;
1777 return 0;
1780 static void
1781 mpsc_shared_unmap_regs(void)
1783 if (!mpsc_shared_regs.mpsc_routing_base) {
1784 iounmap(mpsc_shared_regs.mpsc_routing_base);
1785 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
1786 MPSC_ROUTING_REG_BLOCK_SIZE);
1788 if (!mpsc_shared_regs.sdma_intr_base) {
1789 iounmap(mpsc_shared_regs.sdma_intr_base);
1790 release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
1791 MPSC_SDMA_INTR_REG_BLOCK_SIZE);
1794 mpsc_shared_regs.mpsc_routing_base = NULL;
1795 mpsc_shared_regs.sdma_intr_base = NULL;
1797 mpsc_shared_regs.mpsc_routing_base_p = 0;
1798 mpsc_shared_regs.sdma_intr_base_p = 0;
1800 return;
1803 static int
1804 mpsc_shared_drv_probe(struct platform_device *dev)
1806 struct mpsc_shared_pdata *pdata;
1807 int rc = -ENODEV;
1809 if (dev->id == 0) {
1810 if (!(rc = mpsc_shared_map_regs(dev))) {
1811 pdata = (struct mpsc_shared_pdata *)dev->dev.platform_data;
1813 mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
1814 mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
1815 mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
1816 mpsc_shared_regs.SDMA_INTR_CAUSE_m =
1817 pdata->intr_cause_val;
1818 mpsc_shared_regs.SDMA_INTR_MASK_m =
1819 pdata->intr_mask_val;
1821 rc = 0;
1825 return rc;
1828 static int
1829 mpsc_shared_drv_remove(struct platform_device *dev)
1831 int rc = -ENODEV;
1833 if (dev->id == 0) {
1834 mpsc_shared_unmap_regs();
1835 mpsc_shared_regs.MPSC_MRR_m = 0;
1836 mpsc_shared_regs.MPSC_RCRR_m = 0;
1837 mpsc_shared_regs.MPSC_TCRR_m = 0;
1838 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
1839 mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
1840 rc = 0;
1843 return rc;
1846 static struct platform_driver mpsc_shared_driver = {
1847 .probe = mpsc_shared_drv_probe,
1848 .remove = mpsc_shared_drv_remove,
1849 .driver = {
1850 .name = MPSC_SHARED_NAME,
1851 },
1852 };
1854 /*
1855 ******************************************************************************
1857 * Driver Interface Routines
1859 ******************************************************************************
1860 */
1861 static struct uart_driver mpsc_reg = {
1862 .owner = THIS_MODULE,
1863 .driver_name = MPSC_DRIVER_NAME,
1864 .dev_name = MPSC_DEV_NAME,
1865 .major = MPSC_MAJOR,
1866 .minor = MPSC_MINOR_START,
1867 .nr = MPSC_NUM_CTLRS,
1868 .cons = MPSC_CONSOLE,
1869 };
1871 static int
1872 mpsc_drv_map_regs(struct mpsc_port_info *pi, struct platform_device *pd)
1874 struct resource *r;
1876 if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER)) &&
1877 request_mem_region(r->start, MPSC_REG_BLOCK_SIZE, "mpsc_regs")){
1879 pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE);
1880 pi->mpsc_base_p = r->start;
1882 else {
1883 mpsc_resource_err("MPSC base");
1884 return -ENOMEM;
1887 if ((r = platform_get_resource(pd, IORESOURCE_MEM,
1888 MPSC_SDMA_BASE_ORDER)) && request_mem_region(r->start,
1889 MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) {
1891 pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE);
1892 pi->sdma_base_p = r->start;
1894 else {
1895 mpsc_resource_err("SDMA base");
1896 return -ENOMEM;
1899 if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER))
1900 && request_mem_region(r->start, MPSC_BRG_REG_BLOCK_SIZE,
1901 "brg_regs")) {
1903 pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE);
1904 pi->brg_base_p = r->start;
1906 else {
1907 mpsc_resource_err("BRG base");
1908 return -ENOMEM;
1911 return 0;
1914 static void
1915 mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
1917 if (!pi->mpsc_base) {
1918 iounmap(pi->mpsc_base);
1919 release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
1921 if (!pi->sdma_base) {
1922 iounmap(pi->sdma_base);
1923 release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
1925 if (!pi->brg_base) {
1926 iounmap(pi->brg_base);
1927 release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
1930 pi->mpsc_base = NULL;
1931 pi->sdma_base = NULL;
1932 pi->brg_base = NULL;
1934 pi->mpsc_base_p = 0;
1935 pi->sdma_base_p = 0;
1936 pi->brg_base_p = 0;
1938 return;
1941 static void
1942 mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
1943 struct platform_device *pd, int num)
1945 struct mpsc_pdata *pdata;
1947 pdata = (struct mpsc_pdata *)pd->dev.platform_data;
1949 pi->port.uartclk = pdata->brg_clk_freq;
1950 pi->port.iotype = UPIO_MEM;
1951 pi->port.line = num;
1952 pi->port.type = PORT_MPSC;
1953 pi->port.fifosize = MPSC_TXBE_SIZE;
1954 pi->port.membase = pi->mpsc_base;
1955 pi->port.mapbase = (ulong)pi->mpsc_base;
1956 pi->port.ops = &mpsc_pops;
1958 pi->mirror_regs = pdata->mirror_regs;
1959 pi->cache_mgmt = pdata->cache_mgmt;
1960 pi->brg_can_tune = pdata->brg_can_tune;
1961 pi->brg_clk_src = pdata->brg_clk_src;
1962 pi->mpsc_max_idle = pdata->max_idle;
1963 pi->default_baud = pdata->default_baud;
1964 pi->default_bits = pdata->default_bits;
1965 pi->default_parity = pdata->default_parity;
1966 pi->default_flow = pdata->default_flow;
1968 /* Initial values of mirrored regs */
1969 pi->MPSC_CHR_1_m = pdata->chr_1_val;
1970 pi->MPSC_CHR_2_m = pdata->chr_2_val;
1971 pi->MPSC_CHR_10_m = pdata->chr_10_val;
1972 pi->MPSC_MPCR_m = pdata->mpcr_val;
1973 pi->BRG_BCR_m = pdata->bcr_val;
1975 pi->shared_regs = &mpsc_shared_regs;
1977 pi->port.irq = platform_get_irq(pd, 0);
1979 return;
1982 static int
1983 mpsc_drv_probe(struct platform_device *dev)
1985 struct mpsc_port_info *pi;
1986 int rc = -ENODEV;
1988 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id);
1990 if (dev->id < MPSC_NUM_CTLRS) {
1991 pi = &mpsc_ports[dev->id];
1993 if (!(rc = mpsc_drv_map_regs(pi, dev))) {
1994 mpsc_drv_get_platform_data(pi, dev, dev->id);
1996 if (!(rc = mpsc_make_ready(pi)))
1997 if (!(rc = uart_add_one_port(&mpsc_reg,
1998 &pi->port)))
1999 rc = 0;
2000 else {
2001 mpsc_release_port(
2002 (struct uart_port *)pi);
2003 mpsc_drv_unmap_regs(pi);
2005 else
2006 mpsc_drv_unmap_regs(pi);
2010 return rc;
2013 static int
2014 mpsc_drv_remove(struct platform_device *dev)
2016 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id);
2018 if (dev->id < MPSC_NUM_CTLRS) {
2019 uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port);
2020 mpsc_release_port((struct uart_port *)&mpsc_ports[dev->id].port);
2021 mpsc_drv_unmap_regs(&mpsc_ports[dev->id]);
2022 return 0;
2024 else
2025 return -ENODEV;
2028 static struct platform_driver mpsc_driver = {
2029 .probe = mpsc_drv_probe,
2030 .remove = mpsc_drv_remove,
2031 .driver = {
2032 .name = MPSC_CTLR_NAME,
2033 },
2034 };
2036 static int __init
2037 mpsc_drv_init(void)
2039 int rc;
2041 printk(KERN_INFO "Serial: MPSC driver $Revision: 1.00 $\n");
2043 memset(mpsc_ports, 0, sizeof(mpsc_ports));
2044 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2046 if (!(rc = uart_register_driver(&mpsc_reg))) {
2047 if (!(rc = platform_driver_register(&mpsc_shared_driver))) {
2048 if ((rc = platform_driver_register(&mpsc_driver))) {
2049 platform_driver_unregister(&mpsc_shared_driver);
2050 uart_unregister_driver(&mpsc_reg);
2053 else
2054 uart_unregister_driver(&mpsc_reg);
2057 return rc;
2061 static void __exit
2062 mpsc_drv_exit(void)
2064 platform_driver_unregister(&mpsc_driver);
2065 platform_driver_unregister(&mpsc_shared_driver);
2066 uart_unregister_driver(&mpsc_reg);
2067 memset(mpsc_ports, 0, sizeof(mpsc_ports));
2068 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
2069 return;
2072 module_init(mpsc_drv_init);
2073 module_exit(mpsc_drv_exit);
2075 MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
2076 MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver $Revision: 1.00 $");
2077 MODULE_VERSION(MPSC_VERSION);
2078 MODULE_LICENSE("GPL");
2079 MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR);