ia64/linux-2.6.18-xen.hg

view drivers/net/acenic.h @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 #ifndef _ACENIC_H_
2 #define _ACENIC_H_
5 /*
6 * Generate TX index update each time, when TX ring is closed.
7 * Normally, this is not useful, because results in more dma (and irqs
8 * without TX_COAL_INTS_ONLY).
9 */
10 #define USE_TX_COAL_NOW 0
12 /*
13 * Addressing:
14 *
15 * The Tigon uses 64-bit host addresses, regardless of their actual
16 * length, and it expects a big-endian format. For 32 bit systems the
17 * upper 32 bits of the address are simply ignored (zero), however for
18 * little endian 64 bit systems (Alpha) this looks strange with the
19 * two parts of the address word being swapped.
20 *
21 * The addresses are split in two 32 bit words for all architectures
22 * as some of them are in PCI shared memory and it is necessary to use
23 * readl/writel to access them.
24 *
25 * The addressing code is derived from Pete Wyckoff's work, but
26 * modified to deal properly with readl/writel usage.
27 */
29 struct ace_regs {
30 u32 pad0[16]; /* PCI control registers */
32 u32 HostCtrl; /* 0x40 */
33 u32 LocalCtrl;
35 u32 pad1[2];
37 u32 MiscCfg; /* 0x50 */
39 u32 pad2[2];
41 u32 PciState;
43 u32 pad3[2]; /* 0x60 */
45 u32 WinBase;
46 u32 WinData;
48 u32 pad4[12]; /* 0x70 */
50 u32 DmaWriteState; /* 0xa0 */
51 u32 pad5[3];
52 u32 DmaReadState; /* 0xb0 */
54 u32 pad6[26];
56 u32 AssistState;
58 u32 pad7[8]; /* 0x120 */
60 u32 CpuCtrl; /* 0x140 */
61 u32 Pc;
63 u32 pad8[3];
65 u32 SramAddr; /* 0x154 */
66 u32 SramData;
68 u32 pad9[49];
70 u32 MacRxState; /* 0x220 */
72 u32 pad10[7];
74 u32 CpuBCtrl; /* 0x240 */
75 u32 PcB;
77 u32 pad11[3];
79 u32 SramBAddr; /* 0x254 */
80 u32 SramBData;
82 u32 pad12[105];
84 u32 pad13[32]; /* 0x400 */
85 u32 Stats[32];
87 u32 Mb0Hi; /* 0x500 */
88 u32 Mb0Lo;
89 u32 Mb1Hi;
90 u32 CmdPrd;
91 u32 Mb2Hi;
92 u32 TxPrd;
93 u32 Mb3Hi;
94 u32 RxStdPrd;
95 u32 Mb4Hi;
96 u32 RxJumboPrd;
97 u32 Mb5Hi;
98 u32 RxMiniPrd;
99 u32 Mb6Hi;
100 u32 Mb6Lo;
101 u32 Mb7Hi;
102 u32 Mb7Lo;
103 u32 Mb8Hi;
104 u32 Mb8Lo;
105 u32 Mb9Hi;
106 u32 Mb9Lo;
107 u32 MbAHi;
108 u32 MbALo;
109 u32 MbBHi;
110 u32 MbBLo;
111 u32 MbCHi;
112 u32 MbCLo;
113 u32 MbDHi;
114 u32 MbDLo;
115 u32 MbEHi;
116 u32 MbELo;
117 u32 MbFHi;
118 u32 MbFLo;
120 u32 pad14[32];
122 u32 MacAddrHi; /* 0x600 */
123 u32 MacAddrLo;
124 u32 InfoPtrHi;
125 u32 InfoPtrLo;
126 u32 MultiCastHi; /* 0x610 */
127 u32 MultiCastLo;
128 u32 ModeStat;
129 u32 DmaReadCfg;
130 u32 DmaWriteCfg; /* 0x620 */
131 u32 TxBufRat;
132 u32 EvtCsm;
133 u32 CmdCsm;
134 u32 TuneRxCoalTicks;/* 0x630 */
135 u32 TuneTxCoalTicks;
136 u32 TuneStatTicks;
137 u32 TuneMaxTxDesc;
138 u32 TuneMaxRxDesc; /* 0x640 */
139 u32 TuneTrace;
140 u32 TuneLink;
141 u32 TuneFastLink;
142 u32 TracePtr; /* 0x650 */
143 u32 TraceStrt;
144 u32 TraceLen;
145 u32 IfIdx;
146 u32 IfMtu; /* 0x660 */
147 u32 MaskInt;
148 u32 GigLnkState;
149 u32 FastLnkState;
150 u32 pad16[4]; /* 0x670 */
151 u32 RxRetCsm; /* 0x680 */
153 u32 pad17[31];
155 u32 CmdRng[64]; /* 0x700 */
156 u32 Window[0x200];
157 };
160 typedef struct {
161 u32 addrhi;
162 u32 addrlo;
163 } aceaddr;
166 #define ACE_WINDOW_SIZE 0x800
168 #define ACE_JUMBO_MTU 9000
169 #define ACE_STD_MTU 1500
171 #define ACE_TRACE_SIZE 0x8000
173 /*
174 * Host control register bits.
175 */
177 #define IN_INT 0x01
178 #define CLR_INT 0x02
179 #define HW_RESET 0x08
180 #define BYTE_SWAP 0x10
181 #define WORD_SWAP 0x20
182 #define MASK_INTS 0x40
184 /*
185 * Local control register bits.
186 */
188 #define EEPROM_DATA_IN 0x800000
189 #define EEPROM_DATA_OUT 0x400000
190 #define EEPROM_WRITE_ENABLE 0x200000
191 #define EEPROM_CLK_OUT 0x100000
193 #define EEPROM_BASE 0xa0000000
195 #define EEPROM_WRITE_SELECT 0xa0
196 #define EEPROM_READ_SELECT 0xa1
198 #define SRAM_BANK_512K 0x200
201 /*
202 * udelay() values for when clocking the eeprom
203 */
204 #define ACE_SHORT_DELAY 2
205 #define ACE_LONG_DELAY 4
208 /*
209 * Misc Config bits
210 */
212 #define SYNC_SRAM_TIMING 0x100000
215 /*
216 * CPU state bits.
217 */
219 #define CPU_RESET 0x01
220 #define CPU_TRACE 0x02
221 #define CPU_PROM_FAILED 0x10
222 #define CPU_HALT 0x00010000
223 #define CPU_HALTED 0xffff0000
226 /*
227 * PCI State bits.
228 */
230 #define DMA_READ_MAX_4 0x04
231 #define DMA_READ_MAX_16 0x08
232 #define DMA_READ_MAX_32 0x0c
233 #define DMA_READ_MAX_64 0x10
234 #define DMA_READ_MAX_128 0x14
235 #define DMA_READ_MAX_256 0x18
236 #define DMA_READ_MAX_1K 0x1c
237 #define DMA_WRITE_MAX_4 0x20
238 #define DMA_WRITE_MAX_16 0x40
239 #define DMA_WRITE_MAX_32 0x60
240 #define DMA_WRITE_MAX_64 0x80
241 #define DMA_WRITE_MAX_128 0xa0
242 #define DMA_WRITE_MAX_256 0xc0
243 #define DMA_WRITE_MAX_1K 0xe0
244 #define DMA_READ_WRITE_MASK 0xfc
245 #define MEM_READ_MULTIPLE 0x00020000
246 #define PCI_66MHZ 0x00080000
247 #define PCI_32BIT 0x00100000
248 #define DMA_WRITE_ALL_ALIGN 0x00800000
249 #define READ_CMD_MEM 0x06000000
250 #define WRITE_CMD_MEM 0x70000000
253 /*
254 * Mode status
255 */
257 #define ACE_BYTE_SWAP_BD 0x02
258 #define ACE_WORD_SWAP_BD 0x04 /* not actually used */
259 #define ACE_WARN 0x08
260 #define ACE_BYTE_SWAP_DMA 0x10
261 #define ACE_NO_JUMBO_FRAG 0x200
262 #define ACE_FATAL 0x40000000
265 /*
266 * DMA config
267 */
269 #define DMA_THRESH_1W 0x10
270 #define DMA_THRESH_2W 0x20
271 #define DMA_THRESH_4W 0x40
272 #define DMA_THRESH_8W 0x80
273 #define DMA_THRESH_16W 0x100
274 #define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */
277 /*
278 * Tuning parameters
279 */
281 #define TICKS_PER_SEC 1000000
284 /*
285 * Link bits
286 */
288 #define LNK_PREF 0x00008000
289 #define LNK_10MB 0x00010000
290 #define LNK_100MB 0x00020000
291 #define LNK_1000MB 0x00040000
292 #define LNK_FULL_DUPLEX 0x00080000
293 #define LNK_HALF_DUPLEX 0x00100000
294 #define LNK_TX_FLOW_CTL_Y 0x00200000
295 #define LNK_NEG_ADVANCED 0x00400000
296 #define LNK_RX_FLOW_CTL_Y 0x00800000
297 #define LNK_NIC 0x01000000
298 #define LNK_JAM 0x02000000
299 #define LNK_JUMBO 0x04000000
300 #define LNK_ALTEON 0x08000000
301 #define LNK_NEG_FCTL 0x10000000
302 #define LNK_NEGOTIATE 0x20000000
303 #define LNK_ENABLE 0x40000000
304 #define LNK_UP 0x80000000
307 /*
308 * Event definitions
309 */
311 #define EVT_RING_ENTRIES 256
312 #define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event))
314 struct event {
315 #ifdef __LITTLE_ENDIAN_BITFIELD
316 u32 idx:12;
317 u32 code:12;
318 u32 evt:8;
319 #else
320 u32 evt:8;
321 u32 code:12;
322 u32 idx:12;
323 #endif
324 u32 pad;
325 };
328 /*
329 * Events
330 */
332 #define E_FW_RUNNING 0x01
333 #define E_STATS_UPDATED 0x04
335 #define E_STATS_UPDATE 0x04
337 #define E_LNK_STATE 0x06
338 #define E_C_LINK_UP 0x01
339 #define E_C_LINK_DOWN 0x02
340 #define E_C_LINK_10_100 0x03
342 #define E_ERROR 0x07
343 #define E_C_ERR_INVAL_CMD 0x01
344 #define E_C_ERR_UNIMP_CMD 0x02
345 #define E_C_ERR_BAD_CFG 0x03
347 #define E_MCAST_LIST 0x08
348 #define E_C_MCAST_ADDR_ADD 0x01
349 #define E_C_MCAST_ADDR_DEL 0x02
351 #define E_RESET_JUMBO_RNG 0x09
354 /*
355 * Commands
356 */
358 #define CMD_RING_ENTRIES 64
360 struct cmd {
361 #ifdef __LITTLE_ENDIAN_BITFIELD
362 u32 idx:12;
363 u32 code:12;
364 u32 evt:8;
365 #else
366 u32 evt:8;
367 u32 code:12;
368 u32 idx:12;
369 #endif
370 };
373 #define C_HOST_STATE 0x01
374 #define C_C_STACK_UP 0x01
375 #define C_C_STACK_DOWN 0x02
377 #define C_FDR_FILTERING 0x02
378 #define C_C_FDR_FILT_ENABLE 0x01
379 #define C_C_FDR_FILT_DISABLE 0x02
381 #define C_SET_RX_PRD_IDX 0x03
382 #define C_UPDATE_STATS 0x04
383 #define C_RESET_JUMBO_RNG 0x05
384 #define C_ADD_MULTICAST_ADDR 0x08
385 #define C_DEL_MULTICAST_ADDR 0x09
387 #define C_SET_PROMISC_MODE 0x0a
388 #define C_C_PROMISC_ENABLE 0x01
389 #define C_C_PROMISC_DISABLE 0x02
391 #define C_LNK_NEGOTIATION 0x0b
392 #define C_C_NEGOTIATE_BOTH 0x00
393 #define C_C_NEGOTIATE_GIG 0x01
394 #define C_C_NEGOTIATE_10_100 0x02
396 #define C_SET_MAC_ADDR 0x0c
397 #define C_CLEAR_PROFILE 0x0d
399 #define C_SET_MULTICAST_MODE 0x0e
400 #define C_C_MCAST_ENABLE 0x01
401 #define C_C_MCAST_DISABLE 0x02
403 #define C_CLEAR_STATS 0x0f
404 #define C_SET_RX_JUMBO_PRD_IDX 0x10
405 #define C_REFRESH_STATS 0x11
408 /*
409 * Descriptor flags
410 */
411 #define BD_FLG_TCP_UDP_SUM 0x01
412 #define BD_FLG_IP_SUM 0x02
413 #define BD_FLG_END 0x04
414 #define BD_FLG_MORE 0x08
415 #define BD_FLG_JUMBO 0x10
416 #define BD_FLG_UCAST 0x20
417 #define BD_FLG_MCAST 0x40
418 #define BD_FLG_BCAST 0x60
419 #define BD_FLG_TYP_MASK 0x60
420 #define BD_FLG_IP_FRAG 0x80
421 #define BD_FLG_IP_FRAG_END 0x100
422 #define BD_FLG_VLAN_TAG 0x200
423 #define BD_FLG_FRAME_ERROR 0x400
424 #define BD_FLG_COAL_NOW 0x800
425 #define BD_FLG_MINI 0x1000
428 /*
429 * Ring Control block flags
430 */
431 #define RCB_FLG_TCP_UDP_SUM 0x01
432 #define RCB_FLG_IP_SUM 0x02
433 #define RCB_FLG_NO_PSEUDO_HDR 0x08
434 #define RCB_FLG_VLAN_ASSIST 0x10
435 #define RCB_FLG_COAL_INT_ONLY 0x20
436 #define RCB_FLG_TX_HOST_RING 0x40
437 #define RCB_FLG_IEEE_SNAP_SUM 0x80
438 #define RCB_FLG_EXT_RX_BD 0x100
439 #define RCB_FLG_RNG_DISABLE 0x200
442 /*
443 * TX ring - maximum TX ring entries for Tigon I's is 128
444 */
445 #define MAX_TX_RING_ENTRIES 256
446 #define TIGON_I_TX_RING_ENTRIES 128
447 #define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc))
448 #define TX_RING_BASE 0x3800
450 struct tx_desc{
451 aceaddr addr;
452 u32 flagsize;
453 #if 0
454 /*
455 * This is in PCI shared mem and must be accessed with readl/writel
456 * real layout is:
457 */
458 #if __LITTLE_ENDIAN
459 u16 flags;
460 u16 size;
461 u16 vlan;
462 u16 reserved;
463 #else
464 u16 size;
465 u16 flags;
466 u16 reserved;
467 u16 vlan;
468 #endif
469 #endif
470 u32 vlanres;
471 };
474 #define RX_STD_RING_ENTRIES 512
475 #define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc))
477 #define RX_JUMBO_RING_ENTRIES 256
478 #define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc))
480 #define RX_MINI_RING_ENTRIES 1024
481 #define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc))
483 #define RX_RETURN_RING_ENTRIES 2048
484 #define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \
485 sizeof(struct rx_desc))
487 struct rx_desc{
488 aceaddr addr;
489 #ifdef __LITTLE_ENDIAN
490 u16 size;
491 u16 idx;
492 #else
493 u16 idx;
494 u16 size;
495 #endif
496 #ifdef __LITTLE_ENDIAN
497 u16 flags;
498 u16 type;
499 #else
500 u16 type;
501 u16 flags;
502 #endif
503 #ifdef __LITTLE_ENDIAN
504 u16 tcp_udp_csum;
505 u16 ip_csum;
506 #else
507 u16 ip_csum;
508 u16 tcp_udp_csum;
509 #endif
510 #ifdef __LITTLE_ENDIAN
511 u16 vlan;
512 u16 err_flags;
513 #else
514 u16 err_flags;
515 u16 vlan;
516 #endif
517 u32 reserved;
518 u32 opague;
519 };
522 /*
523 * This struct is shared with the NIC firmware.
524 */
525 struct ring_ctrl {
526 aceaddr rngptr;
527 #ifdef __LITTLE_ENDIAN
528 u16 flags;
529 u16 max_len;
530 #else
531 u16 max_len;
532 u16 flags;
533 #endif
534 u32 pad;
535 };
538 struct ace_mac_stats {
539 u32 excess_colls;
540 u32 coll_1;
541 u32 coll_2;
542 u32 coll_3;
543 u32 coll_4;
544 u32 coll_5;
545 u32 coll_6;
546 u32 coll_7;
547 u32 coll_8;
548 u32 coll_9;
549 u32 coll_10;
550 u32 coll_11;
551 u32 coll_12;
552 u32 coll_13;
553 u32 coll_14;
554 u32 coll_15;
555 u32 late_coll;
556 u32 defers;
557 u32 crc_err;
558 u32 underrun;
559 u32 crs_err;
560 u32 pad[3];
561 u32 drop_ula;
562 u32 drop_mc;
563 u32 drop_fc;
564 u32 drop_space;
565 u32 coll;
566 u32 kept_bc;
567 u32 kept_mc;
568 u32 kept_uc;
569 };
572 struct ace_info {
573 union {
574 u32 stats[256];
575 } s;
576 struct ring_ctrl evt_ctrl;
577 struct ring_ctrl cmd_ctrl;
578 struct ring_ctrl tx_ctrl;
579 struct ring_ctrl rx_std_ctrl;
580 struct ring_ctrl rx_jumbo_ctrl;
581 struct ring_ctrl rx_mini_ctrl;
582 struct ring_ctrl rx_return_ctrl;
583 aceaddr evt_prd_ptr;
584 aceaddr rx_ret_prd_ptr;
585 aceaddr tx_csm_ptr;
586 aceaddr stats2_ptr;
587 };
590 struct ring_info {
591 struct sk_buff *skb;
592 DECLARE_PCI_UNMAP_ADDR(mapping)
593 };
596 /*
597 * Funny... As soon as we add maplen on alpha, it starts to work
598 * much slower. Hmm... is it because struct does not fit to one cacheline?
599 * So, split tx_ring_info.
600 */
601 struct tx_ring_info {
602 struct sk_buff *skb;
603 DECLARE_PCI_UNMAP_ADDR(mapping)
604 DECLARE_PCI_UNMAP_LEN(maplen)
605 };
608 /*
609 * struct ace_skb holding the rings of skb's. This is an awful lot of
610 * pointers, but I don't see any other smart mode to do this in an
611 * efficient manner ;-(
612 */
613 struct ace_skb
614 {
615 struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES];
616 struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES];
617 struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES];
618 struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES];
619 };
622 /*
623 * Struct private for the AceNIC.
624 *
625 * Elements are grouped so variables used by the tx handling goes
626 * together, and will go into the same cache lines etc. in order to
627 * avoid cache line contention between the rx and tx handling on SMP.
628 *
629 * Frequently accessed variables are put at the beginning of the
630 * struct to help the compiler generate better/shorter code.
631 */
632 struct ace_private
633 {
634 struct ace_info *info;
635 struct ace_regs __iomem *regs; /* register base */
636 struct ace_skb *skb;
637 dma_addr_t info_dma; /* 32/64 bit */
639 int version, link;
640 int promisc, mcast_all;
642 /*
643 * TX elements
644 */
645 struct tx_desc *tx_ring;
646 u32 tx_prd;
647 volatile u32 tx_ret_csm;
648 int tx_ring_entries;
650 /*
651 * RX elements
652 */
653 unsigned long std_refill_busy
654 __attribute__ ((aligned (SMP_CACHE_BYTES)));
655 unsigned long mini_refill_busy, jumbo_refill_busy;
656 atomic_t cur_rx_bufs;
657 atomic_t cur_mini_bufs;
658 atomic_t cur_jumbo_bufs;
659 u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd;
660 u32 cur_rx;
662 struct rx_desc *rx_std_ring;
663 struct rx_desc *rx_jumbo_ring;
664 struct rx_desc *rx_mini_ring;
665 struct rx_desc *rx_return_ring;
667 #if ACENIC_DO_VLAN
668 struct vlan_group *vlgrp;
669 #endif
671 int tasklet_pending, jumbo;
672 struct tasklet_struct ace_tasklet;
674 struct event *evt_ring;
676 volatile u32 *evt_prd, *rx_ret_prd, *tx_csm;
678 dma_addr_t tx_ring_dma; /* 32/64 bit */
679 dma_addr_t rx_ring_base_dma;
680 dma_addr_t evt_ring_dma;
681 dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma;
683 unsigned char *trace_buf;
684 struct pci_dev *pdev;
685 struct net_device *next;
686 volatile int fw_running;
687 int board_idx;
688 u16 pci_command;
689 u8 pci_latency;
690 const char *name;
691 #ifdef INDEX_DEBUG
692 spinlock_t debug_lock
693 __attribute__ ((aligned (SMP_CACHE_BYTES)));
694 u32 last_tx, last_std_rx, last_mini_rx;
695 #endif
696 struct net_device_stats stats;
697 int pci_using_dac;
698 };
701 #define TX_RESERVED MAX_SKB_FRAGS
703 static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd)
704 {
705 return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1);
706 }
708 #define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap)
709 #define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED)
711 static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr)
712 {
713 u64 baddr = (u64) addr;
714 aa->addrlo = baddr & 0xffffffff;
715 aa->addrhi = baddr >> 32;
716 wmb();
717 }
720 static inline void ace_set_txprd(struct ace_regs __iomem *regs,
721 struct ace_private *ap, u32 value)
722 {
723 #ifdef INDEX_DEBUG
724 unsigned long flags;
725 spin_lock_irqsave(&ap->debug_lock, flags);
726 writel(value, &regs->TxPrd);
727 if (value == ap->last_tx)
728 printk(KERN_ERR "AceNIC RACE ALERT! writing identical value "
729 "to tx producer (%i)\n", value);
730 ap->last_tx = value;
731 spin_unlock_irqrestore(&ap->debug_lock, flags);
732 #else
733 writel(value, &regs->TxPrd);
734 #endif
735 wmb();
736 }
739 static inline void ace_mask_irq(struct net_device *dev)
740 {
741 struct ace_private *ap = netdev_priv(dev);
742 struct ace_regs __iomem *regs = ap->regs;
744 if (ACE_IS_TIGON_I(ap))
745 writel(1, &regs->MaskInt);
746 else
747 writel(readl(&regs->HostCtrl) | MASK_INTS, &regs->HostCtrl);
749 ace_sync_irq(dev->irq);
750 }
753 static inline void ace_unmask_irq(struct net_device *dev)
754 {
755 struct ace_private *ap = netdev_priv(dev);
756 struct ace_regs __iomem *regs = ap->regs;
758 if (ACE_IS_TIGON_I(ap))
759 writel(0, &regs->MaskInt);
760 else
761 writel(readl(&regs->HostCtrl) & ~MASK_INTS, &regs->HostCtrl);
762 }
765 /*
766 * Prototypes
767 */
768 static int ace_init(struct net_device *dev);
769 static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs);
770 static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs);
771 static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs);
772 static irqreturn_t ace_interrupt(int irq, void *dev_id, struct pt_regs *regs);
773 static int ace_load_firmware(struct net_device *dev);
774 static int ace_open(struct net_device *dev);
775 static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev);
776 static int ace_close(struct net_device *dev);
777 static void ace_tasklet(unsigned long dev);
778 static void ace_dump_trace(struct ace_private *ap);
779 static void ace_set_multicast_list(struct net_device *dev);
780 static int ace_change_mtu(struct net_device *dev, int new_mtu);
781 static int ace_set_mac_addr(struct net_device *dev, void *p);
782 static void ace_set_rxtx_parms(struct net_device *dev, int jumbo);
783 static int ace_allocate_descriptors(struct net_device *dev);
784 static void ace_free_descriptors(struct net_device *dev);
785 static void ace_init_cleanup(struct net_device *dev);
786 static struct net_device_stats *ace_get_stats(struct net_device *dev);
787 static int read_eeprom_byte(struct net_device *dev, unsigned long offset);
788 #if ACENIC_DO_VLAN
789 static void ace_vlan_rx_register(struct net_device *dev, struct vlan_group *grp);
790 static void ace_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
791 #endif
793 #endif /* _ACENIC_H_ */