ia64/linux-2.6.18-xen.hg

view drivers/atm/fore200e.h @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* $Id: fore200e.h,v 1.4 2000/04/14 10:10:34 davem Exp $ */
2 #ifndef _FORE200E_H
3 #define _FORE200E_H
5 #ifdef __KERNEL__
7 /* rx buffer sizes */
9 #define SMALL_BUFFER_SIZE 384 /* size of small buffers (multiple of 48 (PCA) and 64 (SBA) bytes) */
10 #define LARGE_BUFFER_SIZE 4032 /* size of large buffers (multiple of 48 (PCA) and 64 (SBA) bytes) */
13 #define RBD_BLK_SIZE 32 /* nbr of supplied rx buffers per rbd */
16 #define MAX_PDU_SIZE 65535 /* maximum PDU size supported by AALs */
19 #define BUFFER_S1_SIZE SMALL_BUFFER_SIZE /* size of small buffers, scheme 1 */
20 #define BUFFER_L1_SIZE LARGE_BUFFER_SIZE /* size of large buffers, scheme 1 */
22 #define BUFFER_S2_SIZE SMALL_BUFFER_SIZE /* size of small buffers, scheme 2 */
23 #define BUFFER_L2_SIZE LARGE_BUFFER_SIZE /* size of large buffers, scheme 2 */
25 #define BUFFER_S1_NBR (RBD_BLK_SIZE * 6)
26 #define BUFFER_L1_NBR (RBD_BLK_SIZE * 4)
28 #define BUFFER_S2_NBR (RBD_BLK_SIZE * 6)
29 #define BUFFER_L2_NBR (RBD_BLK_SIZE * 4)
32 #define QUEUE_SIZE_CMD 16 /* command queue capacity */
33 #define QUEUE_SIZE_RX 64 /* receive queue capacity */
34 #define QUEUE_SIZE_TX 256 /* transmit queue capacity */
35 #define QUEUE_SIZE_BS 32 /* buffer supply queue capacity */
37 #define FORE200E_VPI_BITS 0
38 #define FORE200E_VCI_BITS 10
39 #define NBR_CONNECT (1 << (FORE200E_VPI_BITS + FORE200E_VCI_BITS)) /* number of connections */
42 #define TSD_FIXED 2
43 #define TSD_EXTENSION 0
44 #define TSD_NBR (TSD_FIXED + TSD_EXTENSION)
47 /* the cp starts putting a received PDU into one *small* buffer,
48 then it uses a number of *large* buffers for the trailing data.
49 we compute here the total number of receive segment descriptors
50 required to hold the largest possible PDU */
52 #define RSD_REQUIRED (((MAX_PDU_SIZE - SMALL_BUFFER_SIZE + LARGE_BUFFER_SIZE) / LARGE_BUFFER_SIZE) + 1)
54 #define RSD_FIXED 3
56 /* RSD_REQUIRED receive segment descriptors are enough to describe a max-sized PDU,
57 but we have to keep the size of the receive PDU descriptor multiple of 32 bytes,
58 so we add one extra RSD to RSD_EXTENSION
59 (WARNING: THIS MAY CHANGE IF BUFFER SIZES ARE MODIFIED) */
61 #define RSD_EXTENSION ((RSD_REQUIRED - RSD_FIXED) + 1)
62 #define RSD_NBR (RSD_FIXED + RSD_EXTENSION)
65 #define FORE200E_DEV(d) ((struct fore200e*)((d)->dev_data))
66 #define FORE200E_VCC(d) ((struct fore200e_vcc*)((d)->dev_data))
68 /* bitfields endian games */
70 #if defined(__LITTLE_ENDIAN_BITFIELD)
71 #define BITFIELD2(b1, b2) b1; b2;
72 #define BITFIELD3(b1, b2, b3) b1; b2; b3;
73 #define BITFIELD4(b1, b2, b3, b4) b1; b2; b3; b4;
74 #define BITFIELD5(b1, b2, b3, b4, b5) b1; b2; b3; b4; b5;
75 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b1; b2; b3; b4; b5; b6;
76 #elif defined(__BIG_ENDIAN_BITFIELD)
77 #define BITFIELD2(b1, b2) b2; b1;
78 #define BITFIELD3(b1, b2, b3) b3; b2; b1;
79 #define BITFIELD4(b1, b2, b3, b4) b4; b3; b2; b1;
80 #define BITFIELD5(b1, b2, b3, b4, b5) b5; b4; b3; b2; b1;
81 #define BITFIELD6(b1, b2, b3, b4, b5, b6) b6; b5; b4; b3; b2; b1;
82 #else
83 #error unknown bitfield endianess
84 #endif
87 /* ATM cell header (minus HEC byte) */
89 typedef struct atm_header {
90 BITFIELD5(
91 u32 clp : 1, /* cell loss priority */
92 u32 plt : 3, /* payload type */
93 u32 vci : 16, /* virtual channel identifier */
94 u32 vpi : 8, /* virtual path identifier */
95 u32 gfc : 4 /* generic flow control */
96 )
97 } atm_header_t;
100 /* ATM adaptation layer id */
102 typedef enum fore200e_aal {
103 FORE200E_AAL0 = 0,
104 FORE200E_AAL34 = 4,
105 FORE200E_AAL5 = 5,
106 } fore200e_aal_t;
109 /* transmit PDU descriptor specification */
111 typedef struct tpd_spec {
112 BITFIELD4(
113 u32 length : 16, /* total PDU length */
114 u32 nseg : 8, /* number of transmit segments */
115 enum fore200e_aal aal : 4, /* adaptation layer */
116 u32 intr : 4 /* interrupt requested */
117 )
118 } tpd_spec_t;
121 /* transmit PDU rate control */
123 typedef struct tpd_rate
124 {
125 BITFIELD2(
126 u32 idle_cells : 16, /* number of idle cells to insert */
127 u32 data_cells : 16 /* number of data cells to transmit */
128 )
129 } tpd_rate_t;
132 /* transmit segment descriptor */
134 typedef struct tsd {
135 u32 buffer; /* transmit buffer DMA address */
136 u32 length; /* number of bytes in buffer */
137 } tsd_t;
140 /* transmit PDU descriptor */
142 typedef struct tpd {
143 struct atm_header atm_header; /* ATM header minus HEC byte */
144 struct tpd_spec spec; /* tpd specification */
145 struct tpd_rate rate; /* tpd rate control */
146 u32 pad; /* reserved */
147 struct tsd tsd[ TSD_NBR ]; /* transmit segment descriptors */
148 } tpd_t;
151 /* receive segment descriptor */
153 typedef struct rsd {
154 u32 handle; /* host supplied receive buffer handle */
155 u32 length; /* number of bytes in buffer */
156 } rsd_t;
159 /* receive PDU descriptor */
161 typedef struct rpd {
162 struct atm_header atm_header; /* ATM header minus HEC byte */
163 u32 nseg; /* number of receive segments */
164 struct rsd rsd[ RSD_NBR ]; /* receive segment descriptors */
165 } rpd_t;
168 /* buffer scheme */
170 typedef enum buffer_scheme {
171 BUFFER_SCHEME_ONE,
172 BUFFER_SCHEME_TWO,
173 BUFFER_SCHEME_NBR /* always last */
174 } buffer_scheme_t;
177 /* buffer magnitude */
179 typedef enum buffer_magn {
180 BUFFER_MAGN_SMALL,
181 BUFFER_MAGN_LARGE,
182 BUFFER_MAGN_NBR /* always last */
183 } buffer_magn_t;
186 /* receive buffer descriptor */
188 typedef struct rbd {
189 u32 handle; /* host supplied handle */
190 u32 buffer_haddr; /* host DMA address of host buffer */
191 } rbd_t;
194 /* receive buffer descriptor block */
196 typedef struct rbd_block {
197 struct rbd rbd[ RBD_BLK_SIZE ]; /* receive buffer descriptor */
198 } rbd_block_t;
201 /* tpd DMA address */
203 typedef struct tpd_haddr {
204 BITFIELD3(
205 u32 size : 4, /* tpd size expressed in 32 byte blocks */
206 u32 pad : 1, /* reserved */
207 u32 haddr : 27 /* tpd DMA addr aligned on 32 byte boundary */
208 )
209 } tpd_haddr_t;
211 #define TPD_HADDR_SHIFT 5 /* addr aligned on 32 byte boundary */
213 /* cp resident transmit queue entry */
215 typedef struct cp_txq_entry {
216 struct tpd_haddr tpd_haddr; /* host DMA address of tpd */
217 u32 status_haddr; /* host DMA address of completion status */
218 } cp_txq_entry_t;
221 /* cp resident receive queue entry */
223 typedef struct cp_rxq_entry {
224 u32 rpd_haddr; /* host DMA address of rpd */
225 u32 status_haddr; /* host DMA address of completion status */
226 } cp_rxq_entry_t;
229 /* cp resident buffer supply queue entry */
231 typedef struct cp_bsq_entry {
232 u32 rbd_block_haddr; /* host DMA address of rbd block */
233 u32 status_haddr; /* host DMA address of completion status */
234 } cp_bsq_entry_t;
237 /* completion status */
239 typedef volatile enum status {
240 STATUS_PENDING = (1<<0), /* initial status (written by host) */
241 STATUS_COMPLETE = (1<<1), /* completion status (written by cp) */
242 STATUS_FREE = (1<<2), /* initial status (written by host) */
243 STATUS_ERROR = (1<<3) /* completion status (written by cp) */
244 } status_t;
247 /* cp operation code */
249 typedef enum opcode {
250 OPCODE_INITIALIZE = 1, /* initialize board */
251 OPCODE_ACTIVATE_VCIN, /* activate incoming VCI */
252 OPCODE_ACTIVATE_VCOUT, /* activate outgoing VCI */
253 OPCODE_DEACTIVATE_VCIN, /* deactivate incoming VCI */
254 OPCODE_DEACTIVATE_VCOUT, /* deactivate incoing VCI */
255 OPCODE_GET_STATS, /* get board statistics */
256 OPCODE_SET_OC3, /* set OC-3 registers */
257 OPCODE_GET_OC3, /* get OC-3 registers */
258 OPCODE_RESET_STATS, /* reset board statistics */
259 OPCODE_GET_PROM, /* get expansion PROM data (PCI specific) */
260 OPCODE_SET_VPI_BITS, /* set x bits of those decoded by the
261 firmware to be low order bits from
262 the VPI field of the ATM cell header */
263 OPCODE_REQUEST_INTR = (1<<7) /* request interrupt */
264 } opcode_t;
267 /* virtual path / virtual channel identifers */
269 typedef struct vpvc {
270 BITFIELD3(
271 u32 vci : 16, /* virtual channel identifier */
272 u32 vpi : 8, /* virtual path identifier */
273 u32 pad : 8 /* reserved */
274 )
275 } vpvc_t;
278 /* activate VC command opcode */
280 typedef struct activate_opcode {
281 BITFIELD4(
282 enum opcode opcode : 8, /* cp opcode */
283 enum fore200e_aal aal : 8, /* adaptation layer */
284 enum buffer_scheme scheme : 8, /* buffer scheme */
285 u32 pad : 8 /* reserved */
286 )
287 } activate_opcode_t;
290 /* activate VC command block */
292 typedef struct activate_block {
293 struct activate_opcode opcode; /* activate VC command opcode */
294 struct vpvc vpvc; /* VPI/VCI */
295 u32 mtu; /* for AAL0 only */
297 } activate_block_t;
300 /* deactivate VC command opcode */
302 typedef struct deactivate_opcode {
303 BITFIELD2(
304 enum opcode opcode : 8, /* cp opcode */
305 u32 pad : 24 /* reserved */
306 )
307 } deactivate_opcode_t;
310 /* deactivate VC command block */
312 typedef struct deactivate_block {
313 struct deactivate_opcode opcode; /* deactivate VC command opcode */
314 struct vpvc vpvc; /* VPI/VCI */
315 } deactivate_block_t;
318 /* OC-3 registers */
320 typedef struct oc3_regs {
321 u32 reg[ 128 ]; /* see the PMC Sierra PC5346 S/UNI-155-Lite
322 Saturn User Network Interface documentation
323 for a description of the OC-3 chip registers */
324 } oc3_regs_t;
327 /* set/get OC-3 regs command opcode */
329 typedef struct oc3_opcode {
330 BITFIELD4(
331 enum opcode opcode : 8, /* cp opcode */
332 u32 reg : 8, /* register index */
333 u32 value : 8, /* register value */
334 u32 mask : 8 /* register mask that specifies which
335 bits of the register value field
336 are significant */
337 )
338 } oc3_opcode_t;
341 /* set/get OC-3 regs command block */
343 typedef struct oc3_block {
344 struct oc3_opcode opcode; /* set/get OC-3 regs command opcode */
345 u32 regs_haddr; /* host DMA address of OC-3 regs buffer */
346 } oc3_block_t;
349 /* physical encoding statistics */
351 typedef struct stats_phy {
352 u32 crc_header_errors; /* cells received with bad header CRC */
353 u32 framing_errors; /* cells received with bad framing */
354 u32 pad[ 2 ]; /* i960 padding */
355 } stats_phy_t;
358 /* OC-3 statistics */
360 typedef struct stats_oc3 {
361 u32 section_bip8_errors; /* section 8 bit interleaved parity */
362 u32 path_bip8_errors; /* path 8 bit interleaved parity */
363 u32 line_bip24_errors; /* line 24 bit interleaved parity */
364 u32 line_febe_errors; /* line far end block errors */
365 u32 path_febe_errors; /* path far end block errors */
366 u32 corr_hcs_errors; /* correctable header check sequence */
367 u32 ucorr_hcs_errors; /* uncorrectable header check sequence */
368 u32 pad[ 1 ]; /* i960 padding */
369 } stats_oc3_t;
372 /* ATM statistics */
374 typedef struct stats_atm {
375 u32 cells_transmitted; /* cells transmitted */
376 u32 cells_received; /* cells received */
377 u32 vpi_bad_range; /* cell drops: VPI out of range */
378 u32 vpi_no_conn; /* cell drops: no connection for VPI */
379 u32 vci_bad_range; /* cell drops: VCI out of range */
380 u32 vci_no_conn; /* cell drops: no connection for VCI */
381 u32 pad[ 2 ]; /* i960 padding */
382 } stats_atm_t;
384 /* AAL0 statistics */
386 typedef struct stats_aal0 {
387 u32 cells_transmitted; /* cells transmitted */
388 u32 cells_received; /* cells received */
389 u32 cells_dropped; /* cells dropped */
390 u32 pad[ 1 ]; /* i960 padding */
391 } stats_aal0_t;
394 /* AAL3/4 statistics */
396 typedef struct stats_aal34 {
397 u32 cells_transmitted; /* cells transmitted from segmented PDUs */
398 u32 cells_received; /* cells reassembled into PDUs */
399 u32 cells_crc_errors; /* payload CRC error count */
400 u32 cells_protocol_errors; /* SAR or CS layer protocol errors */
401 u32 cells_dropped; /* cells dropped: partial reassembly */
402 u32 cspdus_transmitted; /* CS PDUs transmitted */
403 u32 cspdus_received; /* CS PDUs received */
404 u32 cspdus_protocol_errors; /* CS layer protocol errors */
405 u32 cspdus_dropped; /* reassembled PDUs drop'd (in cells) */
406 u32 pad[ 3 ]; /* i960 padding */
407 } stats_aal34_t;
410 /* AAL5 statistics */
412 typedef struct stats_aal5 {
413 u32 cells_transmitted; /* cells transmitted from segmented SDUs */
414 u32 cells_received; /* cells reassembled into SDUs */
415 u32 cells_dropped; /* reassembled PDUs dropped (in cells) */
416 u32 congestion_experienced; /* CRC error and length wrong */
417 u32 cspdus_transmitted; /* CS PDUs transmitted */
418 u32 cspdus_received; /* CS PDUs received */
419 u32 cspdus_crc_errors; /* CS PDUs CRC errors */
420 u32 cspdus_protocol_errors; /* CS layer protocol errors */
421 u32 cspdus_dropped; /* reassembled PDUs dropped */
422 u32 pad[ 3 ]; /* i960 padding */
423 } stats_aal5_t;
426 /* auxiliary statistics */
428 typedef struct stats_aux {
429 u32 small_b1_failed; /* receive BD allocation failures */
430 u32 large_b1_failed; /* receive BD allocation failures */
431 u32 small_b2_failed; /* receive BD allocation failures */
432 u32 large_b2_failed; /* receive BD allocation failures */
433 u32 rpd_alloc_failed; /* receive PDU allocation failures */
434 u32 receive_carrier; /* no carrier = 0, carrier = 1 */
435 u32 pad[ 2 ]; /* i960 padding */
436 } stats_aux_t;
439 /* whole statistics buffer */
441 typedef struct stats {
442 struct stats_phy phy; /* physical encoding statistics */
443 struct stats_oc3 oc3; /* OC-3 statistics */
444 struct stats_atm atm; /* ATM statistics */
445 struct stats_aal0 aal0; /* AAL0 statistics */
446 struct stats_aal34 aal34; /* AAL3/4 statistics */
447 struct stats_aal5 aal5; /* AAL5 statistics */
448 struct stats_aux aux; /* auxiliary statistics */
449 } stats_t;
452 /* get statistics command opcode */
454 typedef struct stats_opcode {
455 BITFIELD2(
456 enum opcode opcode : 8, /* cp opcode */
457 u32 pad : 24 /* reserved */
458 )
459 } stats_opcode_t;
462 /* get statistics command block */
464 typedef struct stats_block {
465 struct stats_opcode opcode; /* get statistics command opcode */
466 u32 stats_haddr; /* host DMA address of stats buffer */
467 } stats_block_t;
470 /* expansion PROM data (PCI specific) */
472 typedef struct prom_data {
473 u32 hw_revision; /* hardware revision */
474 u32 serial_number; /* board serial number */
475 u8 mac_addr[ 8 ]; /* board MAC address */
476 } prom_data_t;
479 /* get expansion PROM data command opcode */
481 typedef struct prom_opcode {
482 BITFIELD2(
483 enum opcode opcode : 8, /* cp opcode */
484 u32 pad : 24 /* reserved */
485 )
486 } prom_opcode_t;
489 /* get expansion PROM data command block */
491 typedef struct prom_block {
492 struct prom_opcode opcode; /* get PROM data command opcode */
493 u32 prom_haddr; /* host DMA address of PROM buffer */
494 } prom_block_t;
497 /* cp command */
499 typedef union cmd {
500 enum opcode opcode; /* operation code */
501 struct activate_block activate_block; /* activate VC */
502 struct deactivate_block deactivate_block; /* deactivate VC */
503 struct stats_block stats_block; /* get statistics */
504 struct prom_block prom_block; /* get expansion PROM data */
505 struct oc3_block oc3_block; /* get/set OC-3 registers */
506 u32 pad[ 4 ]; /* i960 padding */
507 } cmd_t;
510 /* cp resident command queue */
512 typedef struct cp_cmdq_entry {
513 union cmd cmd; /* command */
514 u32 status_haddr; /* host DMA address of completion status */
515 u32 pad[ 3 ]; /* i960 padding */
516 } cp_cmdq_entry_t;
519 /* host resident transmit queue entry */
521 typedef struct host_txq_entry {
522 struct cp_txq_entry __iomem *cp_entry; /* addr of cp resident tx queue entry */
523 enum status* status; /* addr of host resident status */
524 struct tpd* tpd; /* addr of transmit PDU descriptor */
525 u32 tpd_dma; /* DMA address of tpd */
526 struct sk_buff* skb; /* related skb */
527 void* data; /* copy of misaligned data */
528 unsigned long incarn; /* vc_map incarnation when submitted for tx */
529 struct fore200e_vc_map* vc_map;
531 } host_txq_entry_t;
534 /* host resident receive queue entry */
536 typedef struct host_rxq_entry {
537 struct cp_rxq_entry __iomem *cp_entry; /* addr of cp resident rx queue entry */
538 enum status* status; /* addr of host resident status */
539 struct rpd* rpd; /* addr of receive PDU descriptor */
540 u32 rpd_dma; /* DMA address of rpd */
541 } host_rxq_entry_t;
544 /* host resident buffer supply queue entry */
546 typedef struct host_bsq_entry {
547 struct cp_bsq_entry __iomem *cp_entry; /* addr of cp resident buffer supply queue entry */
548 enum status* status; /* addr of host resident status */
549 struct rbd_block* rbd_block; /* addr of receive buffer descriptor block */
550 u32 rbd_block_dma; /* DMA address od rdb */
551 } host_bsq_entry_t;
554 /* host resident command queue entry */
556 typedef struct host_cmdq_entry {
557 struct cp_cmdq_entry __iomem *cp_entry; /* addr of cp resident cmd queue entry */
558 enum status *status; /* addr of host resident status */
559 } host_cmdq_entry_t;
562 /* chunk of memory */
564 typedef struct chunk {
565 void* alloc_addr; /* base address of allocated chunk */
566 void* align_addr; /* base address of aligned chunk */
567 dma_addr_t dma_addr; /* DMA address of aligned chunk */
568 int direction; /* direction of DMA mapping */
569 u32 alloc_size; /* length of allocated chunk */
570 u32 align_size; /* length of aligned chunk */
571 } chunk_t;
573 #define dma_size align_size /* DMA useable size */
576 /* host resident receive buffer */
578 typedef struct buffer {
579 struct buffer* next; /* next receive buffer */
580 enum buffer_scheme scheme; /* buffer scheme */
581 enum buffer_magn magn; /* buffer magnitude */
582 struct chunk data; /* data buffer */
583 #ifdef FORE200E_BSQ_DEBUG
584 unsigned long index; /* buffer # in queue */
585 int supplied; /* 'buffer supplied' flag */
586 #endif
587 } buffer_t;
590 #if (BITS_PER_LONG == 32)
591 #define FORE200E_BUF2HDL(buffer) ((u32)(buffer))
592 #define FORE200E_HDL2BUF(handle) ((struct buffer*)(handle))
593 #else /* deal with 64 bit pointers */
594 #define FORE200E_BUF2HDL(buffer) ((u32)((u64)(buffer)))
595 #define FORE200E_HDL2BUF(handle) ((struct buffer*)(((u64)(handle)) | PAGE_OFFSET))
596 #endif
599 /* host resident command queue */
601 typedef struct host_cmdq {
602 struct host_cmdq_entry host_entry[ QUEUE_SIZE_CMD ]; /* host resident cmd queue entries */
603 int head; /* head of cmd queue */
604 struct chunk status; /* array of completion status */
605 } host_cmdq_t;
608 /* host resident transmit queue */
610 typedef struct host_txq {
611 struct host_txq_entry host_entry[ QUEUE_SIZE_TX ]; /* host resident tx queue entries */
612 int head; /* head of tx queue */
613 int tail; /* tail of tx queue */
614 struct chunk tpd; /* array of tpds */
615 struct chunk status; /* arry of completion status */
616 int txing; /* number of pending PDUs in tx queue */
617 } host_txq_t;
620 /* host resident receive queue */
622 typedef struct host_rxq {
623 struct host_rxq_entry host_entry[ QUEUE_SIZE_RX ]; /* host resident rx queue entries */
624 int head; /* head of rx queue */
625 struct chunk rpd; /* array of rpds */
626 struct chunk status; /* array of completion status */
627 } host_rxq_t;
630 /* host resident buffer supply queues */
632 typedef struct host_bsq {
633 struct host_bsq_entry host_entry[ QUEUE_SIZE_BS ]; /* host resident buffer supply queue entries */
634 int head; /* head of buffer supply queue */
635 struct chunk rbd_block; /* array of rbds */
636 struct chunk status; /* array of completion status */
637 struct buffer* buffer; /* array of rx buffers */
638 struct buffer* freebuf; /* list of free rx buffers */
639 volatile int freebuf_count; /* count of free rx buffers */
640 } host_bsq_t;
643 /* header of the firmware image */
645 typedef struct fw_header {
646 u32 magic; /* magic number */
647 u32 version; /* firmware version id */
648 u32 load_offset; /* fw load offset in board memory */
649 u32 start_offset; /* fw execution start address in board memory */
650 } fw_header_t;
652 #define FW_HEADER_MAGIC 0x65726f66 /* 'fore' */
655 /* receive buffer supply queues scheme specification */
657 typedef struct bs_spec {
658 u32 queue_length; /* queue capacity */
659 u32 buffer_size; /* host buffer size */
660 u32 pool_size; /* number of rbds */
661 u32 supply_blksize; /* num of rbds in I/O block (multiple
662 of 4 between 4 and 124 inclusive) */
663 } bs_spec_t;
666 /* initialization command block (one-time command, not in cmd queue) */
668 typedef struct init_block {
669 enum opcode opcode; /* initialize command */
670 enum status status; /* related status word */
671 u32 receive_threshold; /* not used */
672 u32 num_connect; /* ATM connections */
673 u32 cmd_queue_len; /* length of command queue */
674 u32 tx_queue_len; /* length of transmit queue */
675 u32 rx_queue_len; /* length of receive queue */
676 u32 rsd_extension; /* number of extra 32 byte blocks */
677 u32 tsd_extension; /* number of extra 32 byte blocks */
678 u32 conless_vpvc; /* not used */
679 u32 pad[ 2 ]; /* force quad alignment */
680 struct bs_spec bs_spec[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ]; /* buffer supply queues spec */
681 } init_block_t;
684 typedef enum media_type {
685 MEDIA_TYPE_CAT5_UTP = 0x06, /* unshielded twisted pair */
686 MEDIA_TYPE_MM_OC3_ST = 0x16, /* multimode fiber ST */
687 MEDIA_TYPE_MM_OC3_SC = 0x26, /* multimode fiber SC */
688 MEDIA_TYPE_SM_OC3_ST = 0x36, /* single-mode fiber ST */
689 MEDIA_TYPE_SM_OC3_SC = 0x46 /* single-mode fiber SC */
690 } media_type_t;
692 #define FORE200E_MEDIA_INDEX(media_type) ((media_type)>>4)
695 /* cp resident queues */
697 typedef struct cp_queues {
698 u32 cp_cmdq; /* command queue */
699 u32 cp_txq; /* transmit queue */
700 u32 cp_rxq; /* receive queue */
701 u32 cp_bsq[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ]; /* buffer supply queues */
702 u32 imask; /* 1 enables cp to host interrupts */
703 u32 istat; /* 1 for interrupt posted */
704 u32 heap_base; /* offset form beginning of ram */
705 u32 heap_size; /* space available for queues */
706 u32 hlogger; /* non zero for host logging */
707 u32 heartbeat; /* cp heartbeat */
708 u32 fw_release; /* firmware version */
709 u32 mon960_release; /* i960 monitor version */
710 u32 tq_plen; /* transmit throughput measurements */
711 /* make sure the init block remains on a quad word boundary */
712 struct init_block init; /* one time cmd, not in cmd queue */
713 enum media_type media_type; /* media type id */
714 u32 oc3_revision; /* OC-3 revision number */
715 } cp_queues_t;
718 /* boot status */
720 typedef enum boot_status {
721 BSTAT_COLD_START = (u32) 0xc01dc01d, /* cold start */
722 BSTAT_SELFTEST_OK = (u32) 0x02201958, /* self-test ok */
723 BSTAT_SELFTEST_FAIL = (u32) 0xadbadbad, /* self-test failed */
724 BSTAT_CP_RUNNING = (u32) 0xce11feed, /* cp is running */
725 BSTAT_MON_TOO_BIG = (u32) 0x10aded00 /* i960 monitor is too big */
726 } boot_status_t;
729 /* software UART */
731 typedef struct soft_uart {
732 u32 send; /* write register */
733 u32 recv; /* read register */
734 } soft_uart_t;
736 #define FORE200E_CP_MONITOR_UART_FREE 0x00000000
737 #define FORE200E_CP_MONITOR_UART_AVAIL 0x01000000
740 /* i960 monitor */
742 typedef struct cp_monitor {
743 struct soft_uart soft_uart; /* software UART */
744 enum boot_status bstat; /* boot status */
745 u32 app_base; /* application base offset */
746 u32 mon_version; /* i960 monitor version */
747 } cp_monitor_t;
750 /* device state */
752 typedef enum fore200e_state {
753 FORE200E_STATE_BLANK, /* initial state */
754 FORE200E_STATE_REGISTER, /* device registered */
755 FORE200E_STATE_CONFIGURE, /* bus interface configured */
756 FORE200E_STATE_MAP, /* board space mapped in host memory */
757 FORE200E_STATE_RESET, /* board resetted */
758 FORE200E_STATE_LOAD_FW, /* firmware loaded */
759 FORE200E_STATE_START_FW, /* firmware started */
760 FORE200E_STATE_INITIALIZE, /* initialize command successful */
761 FORE200E_STATE_INIT_CMDQ, /* command queue initialized */
762 FORE200E_STATE_INIT_TXQ, /* transmit queue initialized */
763 FORE200E_STATE_INIT_RXQ, /* receive queue initialized */
764 FORE200E_STATE_INIT_BSQ, /* buffer supply queue initialized */
765 FORE200E_STATE_ALLOC_BUF, /* receive buffers allocated */
766 FORE200E_STATE_IRQ, /* host interrupt requested */
767 FORE200E_STATE_COMPLETE /* initialization completed */
768 } fore200e_state;
771 /* PCA-200E registers */
773 typedef struct fore200e_pca_regs {
774 volatile u32 __iomem * hcr; /* address of host control register */
775 volatile u32 __iomem * imr; /* address of host interrupt mask register */
776 volatile u32 __iomem * psr; /* address of PCI specific register */
777 } fore200e_pca_regs_t;
780 /* SBA-200E registers */
782 typedef struct fore200e_sba_regs {
783 volatile u32 __iomem *hcr; /* address of host control register */
784 volatile u32 __iomem *bsr; /* address of burst transfer size register */
785 volatile u32 __iomem *isr; /* address of interrupt level selection register */
786 } fore200e_sba_regs_t;
789 /* model-specific registers */
791 typedef union fore200e_regs {
792 struct fore200e_pca_regs pca; /* PCA-200E registers */
793 struct fore200e_sba_regs sba; /* SBA-200E registers */
794 } fore200e_regs;
797 struct fore200e;
799 /* bus-dependent data */
801 typedef struct fore200e_bus {
802 char* model_name; /* board model name */
803 char* proc_name; /* board name under /proc/atm */
804 int descr_alignment; /* tpd/rpd/rbd DMA alignment requirement */
805 int buffer_alignment; /* rx buffers DMA alignment requirement */
806 int status_alignment; /* status words DMA alignment requirement */
807 const unsigned char* fw_data; /* address of firmware data start */
808 const unsigned int* fw_size; /* address of firmware data size */
809 u32 (*read)(volatile u32 __iomem *);
810 void (*write)(u32, volatile u32 __iomem *);
811 u32 (*dma_map)(struct fore200e*, void*, int, int);
812 void (*dma_unmap)(struct fore200e*, u32, int, int);
813 void (*dma_sync_for_cpu)(struct fore200e*, u32, int, int);
814 void (*dma_sync_for_device)(struct fore200e*, u32, int, int);
815 int (*dma_chunk_alloc)(struct fore200e*, struct chunk*, int, int, int);
816 void (*dma_chunk_free)(struct fore200e*, struct chunk*);
817 struct fore200e* (*detect)(const struct fore200e_bus*, int);
818 int (*configure)(struct fore200e*);
819 int (*map)(struct fore200e*);
820 void (*reset)(struct fore200e*);
821 int (*prom_read)(struct fore200e*, struct prom_data*);
822 void (*unmap)(struct fore200e*);
823 void (*irq_enable)(struct fore200e*);
824 int (*irq_check)(struct fore200e*);
825 void (*irq_ack)(struct fore200e*);
826 int (*proc_read)(struct fore200e*, char*);
827 } fore200e_bus_t;
829 /* vc mapping */
831 typedef struct fore200e_vc_map {
832 struct atm_vcc* vcc; /* vcc entry */
833 unsigned long incarn; /* vcc incarnation number */
834 } fore200e_vc_map_t;
836 #define FORE200E_VC_MAP(fore200e, vpi, vci) \
837 (& (fore200e)->vc_map[ ((vpi) << FORE200E_VCI_BITS) | (vci) ])
840 /* per-device data */
842 typedef struct fore200e {
843 struct list_head entry; /* next device */
844 const struct fore200e_bus* bus; /* bus-dependent code and data */
845 union fore200e_regs regs; /* bus-dependent registers */
846 struct atm_dev* atm_dev; /* ATM device */
848 enum fore200e_state state; /* device state */
850 char name[16]; /* device name */
851 void* bus_dev; /* bus-specific kernel data */
852 int irq; /* irq number */
853 unsigned long phys_base; /* physical base address */
854 void __iomem * virt_base; /* virtual base address */
856 unsigned char esi[ ESI_LEN ]; /* end system identifier */
858 struct cp_monitor __iomem * cp_monitor; /* i960 monitor address */
859 struct cp_queues __iomem * cp_queues; /* cp resident queues */
860 struct host_cmdq host_cmdq; /* host resident cmd queue */
861 struct host_txq host_txq; /* host resident tx queue */
862 struct host_rxq host_rxq; /* host resident rx queue */
863 /* host resident buffer supply queues */
864 struct host_bsq host_bsq[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ];
866 u32 available_cell_rate; /* remaining pseudo-CBR bw on link */
868 int loop_mode; /* S/UNI loopback mode */
870 struct stats* stats; /* last snapshot of the stats */
872 struct semaphore rate_sf; /* protects rate reservation ops */
873 spinlock_t q_lock; /* protects queue ops */
874 #ifdef FORE200E_USE_TASKLET
875 struct tasklet_struct tx_tasklet; /* performs tx interrupt work */
876 struct tasklet_struct rx_tasklet; /* performs rx interrupt work */
877 #endif
878 unsigned long tx_sat; /* tx queue saturation count */
880 unsigned long incarn_count;
881 struct fore200e_vc_map vc_map[ NBR_CONNECT ]; /* vc mapping */
882 } fore200e_t;
885 /* per-vcc data */
887 typedef struct fore200e_vcc {
888 enum buffer_scheme scheme; /* rx buffer scheme */
889 struct tpd_rate rate; /* tx rate control data */
890 int rx_min_pdu; /* size of smallest PDU received */
891 int rx_max_pdu; /* size of largest PDU received */
892 int tx_min_pdu; /* size of smallest PDU transmitted */
893 int tx_max_pdu; /* size of largest PDU transmitted */
894 unsigned long tx_pdu; /* nbr of tx pdus */
895 unsigned long rx_pdu; /* nbr of rx pdus */
896 } fore200e_vcc_t;
900 /* 200E-series common memory layout */
902 #define FORE200E_CP_MONITOR_OFFSET 0x00000400 /* i960 monitor interface */
903 #define FORE200E_CP_QUEUES_OFFSET 0x00004d40 /* cp resident queues */
906 /* PCA-200E memory layout */
908 #define PCA200E_IOSPACE_LENGTH 0x00200000
910 #define PCA200E_HCR_OFFSET 0x00100000 /* board control register */
911 #define PCA200E_IMR_OFFSET 0x00100004 /* host IRQ mask register */
912 #define PCA200E_PSR_OFFSET 0x00100008 /* PCI specific register */
915 /* PCA-200E host control register */
917 #define PCA200E_HCR_RESET (1<<0) /* read / write */
918 #define PCA200E_HCR_HOLD_LOCK (1<<1) /* read / write */
919 #define PCA200E_HCR_I960FAIL (1<<2) /* read */
920 #define PCA200E_HCR_INTRB (1<<2) /* write */
921 #define PCA200E_HCR_HOLD_ACK (1<<3) /* read */
922 #define PCA200E_HCR_INTRA (1<<3) /* write */
923 #define PCA200E_HCR_OUTFULL (1<<4) /* read */
924 #define PCA200E_HCR_CLRINTR (1<<4) /* write */
925 #define PCA200E_HCR_ESPHOLD (1<<5) /* read */
926 #define PCA200E_HCR_INFULL (1<<6) /* read */
927 #define PCA200E_HCR_TESTMODE (1<<7) /* read */
930 /* PCA-200E PCI bus interface regs (offsets in PCI config space) */
932 #define PCA200E_PCI_LATENCY 0x40 /* maximum slave latenty */
933 #define PCA200E_PCI_MASTER_CTRL 0x41 /* master control */
934 #define PCA200E_PCI_THRESHOLD 0x42 /* burst / continous req threshold */
936 /* PBI master control register */
938 #define PCA200E_CTRL_DIS_CACHE_RD (1<<0) /* disable cache-line reads */
939 #define PCA200E_CTRL_DIS_WRT_INVAL (1<<1) /* disable writes and invalidates */
940 #define PCA200E_CTRL_2_CACHE_WRT_INVAL (1<<2) /* require 2 cache-lines for writes and invalidates */
941 #define PCA200E_CTRL_IGN_LAT_TIMER (1<<3) /* ignore the latency timer */
942 #define PCA200E_CTRL_ENA_CONT_REQ_MODE (1<<4) /* enable continuous request mode */
943 #define PCA200E_CTRL_LARGE_PCI_BURSTS (1<<5) /* force large PCI bus bursts */
944 #define PCA200E_CTRL_CONVERT_ENDIAN (1<<6) /* convert endianess of slave RAM accesses */
948 #define SBA200E_PROM_NAME "FORE,sba-200e" /* device name in openprom tree */
951 /* size of SBA-200E registers */
953 #define SBA200E_HCR_LENGTH 4
954 #define SBA200E_BSR_LENGTH 4
955 #define SBA200E_ISR_LENGTH 4
956 #define SBA200E_RAM_LENGTH 0x40000
959 /* SBA-200E SBUS burst transfer size register */
961 #define SBA200E_BSR_BURST4 0x04
962 #define SBA200E_BSR_BURST8 0x08
963 #define SBA200E_BSR_BURST16 0x10
966 /* SBA-200E host control register */
968 #define SBA200E_HCR_RESET (1<<0) /* read / write (sticky) */
969 #define SBA200E_HCR_HOLD_LOCK (1<<1) /* read / write (sticky) */
970 #define SBA200E_HCR_I960FAIL (1<<2) /* read */
971 #define SBA200E_HCR_I960SETINTR (1<<2) /* write */
972 #define SBA200E_HCR_OUTFULL (1<<3) /* read */
973 #define SBA200E_HCR_INTR_CLR (1<<3) /* write */
974 #define SBA200E_HCR_INTR_ENA (1<<4) /* read / write (sticky) */
975 #define SBA200E_HCR_ESPHOLD (1<<5) /* read */
976 #define SBA200E_HCR_INFULL (1<<6) /* read */
977 #define SBA200E_HCR_TESTMODE (1<<7) /* read */
978 #define SBA200E_HCR_INTR_REQ (1<<8) /* read */
980 #define SBA200E_HCR_STICKY (SBA200E_HCR_RESET | SBA200E_HCR_HOLD_LOCK | SBA200E_HCR_INTR_ENA)
983 #endif /* __KERNEL__ */
984 #endif /* _FORE200E_H */