ia64/linux-2.6.18-xen.hg

view drivers/net/e100.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /*******************************************************************************
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 /*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
97 * IV. Recieve
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
117 * placed.
118 *
119 * V. Miscellaneous
120 *
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
126 *
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
128 *
129 * Thanks to JC (jchapman@katalix.com) for helping with
130 * testing/troubleshooting the development driver.
131 *
132 * TODO:
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
135 *
136 * FIXES:
137 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
138 * - Stratus87247: protect MDI control register manipulations
139 */
141 #include <linux/module.h>
142 #include <linux/moduleparam.h>
143 #include <linux/kernel.h>
144 #include <linux/types.h>
145 #include <linux/slab.h>
146 #include <linux/delay.h>
147 #include <linux/init.h>
148 #include <linux/pci.h>
149 #include <linux/dma-mapping.h>
150 #include <linux/netdevice.h>
151 #include <linux/etherdevice.h>
152 #include <linux/mii.h>
153 #include <linux/if_vlan.h>
154 #include <linux/skbuff.h>
155 #include <linux/ethtool.h>
156 #include <linux/string.h>
157 #include <asm/unaligned.h>
160 #define DRV_NAME "e100"
161 #define DRV_EXT "-NAPI"
162 #define DRV_VERSION "3.5.10-k2"DRV_EXT
163 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
164 #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
165 #define PFX DRV_NAME ": "
167 #define E100_WATCHDOG_PERIOD (2 * HZ)
168 #define E100_NAPI_WEIGHT 16
170 MODULE_DESCRIPTION(DRV_DESCRIPTION);
171 MODULE_AUTHOR(DRV_COPYRIGHT);
172 MODULE_LICENSE("GPL");
173 MODULE_VERSION(DRV_VERSION);
175 static int debug = 3;
176 static int eeprom_bad_csum_allow = 0;
177 module_param(debug, int, 0);
178 module_param(eeprom_bad_csum_allow, int, 0);
179 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
180 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
181 #define DPRINTK(nlevel, klevel, fmt, args...) \
182 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
183 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
184 __FUNCTION__ , ## args))
186 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
187 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
188 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
189 static struct pci_device_id e100_id_table[] = {
190 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
191 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
192 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
193 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
194 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
195 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
196 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
197 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
198 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
199 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
200 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
201 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
202 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
203 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
204 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
205 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
206 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
207 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
208 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
209 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
210 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
211 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
212 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
213 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
214 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
215 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
216 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
217 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
218 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
219 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
220 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
221 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
222 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
223 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
224 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
225 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
226 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
227 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
228 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
229 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
230 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
231 { 0, }
232 };
233 MODULE_DEVICE_TABLE(pci, e100_id_table);
235 enum mac {
236 mac_82557_D100_A = 0,
237 mac_82557_D100_B = 1,
238 mac_82557_D100_C = 2,
239 mac_82558_D101_A4 = 4,
240 mac_82558_D101_B0 = 5,
241 mac_82559_D101M = 8,
242 mac_82559_D101S = 9,
243 mac_82550_D102 = 12,
244 mac_82550_D102_C = 13,
245 mac_82551_E = 14,
246 mac_82551_F = 15,
247 mac_82551_10 = 16,
248 mac_unknown = 0xFF,
249 };
251 enum phy {
252 phy_100a = 0x000003E0,
253 phy_100c = 0x035002A8,
254 phy_82555_tx = 0x015002A8,
255 phy_nsc_tx = 0x5C002000,
256 phy_82562_et = 0x033002A8,
257 phy_82562_em = 0x032002A8,
258 phy_82562_ek = 0x031002A8,
259 phy_82562_eh = 0x017002A8,
260 phy_unknown = 0xFFFFFFFF,
261 };
263 /* CSR (Control/Status Registers) */
264 struct csr {
265 struct {
266 u8 status;
267 u8 stat_ack;
268 u8 cmd_lo;
269 u8 cmd_hi;
270 u32 gen_ptr;
271 } scb;
272 u32 port;
273 u16 flash_ctrl;
274 u8 eeprom_ctrl_lo;
275 u8 eeprom_ctrl_hi;
276 u32 mdi_ctrl;
277 u32 rx_dma_count;
278 };
280 enum scb_status {
281 rus_ready = 0x10,
282 rus_mask = 0x3C,
283 };
285 enum ru_state {
286 RU_SUSPENDED = 0,
287 RU_RUNNING = 1,
288 RU_UNINITIALIZED = -1,
289 };
291 enum scb_stat_ack {
292 stat_ack_not_ours = 0x00,
293 stat_ack_sw_gen = 0x04,
294 stat_ack_rnr = 0x10,
295 stat_ack_cu_idle = 0x20,
296 stat_ack_frame_rx = 0x40,
297 stat_ack_cu_cmd_done = 0x80,
298 stat_ack_not_present = 0xFF,
299 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
300 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
301 };
303 enum scb_cmd_hi {
304 irq_mask_none = 0x00,
305 irq_mask_all = 0x01,
306 irq_sw_gen = 0x02,
307 };
309 enum scb_cmd_lo {
310 cuc_nop = 0x00,
311 ruc_start = 0x01,
312 ruc_load_base = 0x06,
313 cuc_start = 0x10,
314 cuc_resume = 0x20,
315 cuc_dump_addr = 0x40,
316 cuc_dump_stats = 0x50,
317 cuc_load_base = 0x60,
318 cuc_dump_reset = 0x70,
319 };
321 enum cuc_dump {
322 cuc_dump_complete = 0x0000A005,
323 cuc_dump_reset_complete = 0x0000A007,
324 };
326 enum port {
327 software_reset = 0x0000,
328 selftest = 0x0001,
329 selective_reset = 0x0002,
330 };
332 enum eeprom_ctrl_lo {
333 eesk = 0x01,
334 eecs = 0x02,
335 eedi = 0x04,
336 eedo = 0x08,
337 };
339 enum mdi_ctrl {
340 mdi_write = 0x04000000,
341 mdi_read = 0x08000000,
342 mdi_ready = 0x10000000,
343 };
345 enum eeprom_op {
346 op_write = 0x05,
347 op_read = 0x06,
348 op_ewds = 0x10,
349 op_ewen = 0x13,
350 };
352 enum eeprom_offsets {
353 eeprom_cnfg_mdix = 0x03,
354 eeprom_id = 0x0A,
355 eeprom_config_asf = 0x0D,
356 eeprom_smbus_addr = 0x90,
357 };
359 enum eeprom_cnfg_mdix {
360 eeprom_mdix_enabled = 0x0080,
361 };
363 enum eeprom_id {
364 eeprom_id_wol = 0x0020,
365 };
367 enum eeprom_config_asf {
368 eeprom_asf = 0x8000,
369 eeprom_gcl = 0x4000,
370 };
372 enum cb_status {
373 cb_complete = 0x8000,
374 cb_ok = 0x2000,
375 };
377 enum cb_command {
378 cb_nop = 0x0000,
379 cb_iaaddr = 0x0001,
380 cb_config = 0x0002,
381 cb_multi = 0x0003,
382 cb_tx = 0x0004,
383 cb_ucode = 0x0005,
384 cb_dump = 0x0006,
385 cb_tx_sf = 0x0008,
386 cb_cid = 0x1f00,
387 cb_i = 0x2000,
388 cb_s = 0x4000,
389 cb_el = 0x8000,
390 };
392 struct rfd {
393 u16 status;
394 u16 command;
395 u32 link;
396 u32 rbd;
397 u16 actual_size;
398 u16 size;
399 };
401 struct rx {
402 struct rx *next, *prev;
403 struct sk_buff *skb;
404 dma_addr_t dma_addr;
405 };
407 #if defined(__BIG_ENDIAN_BITFIELD)
408 #define X(a,b) b,a
409 #else
410 #define X(a,b) a,b
411 #endif
412 struct config {
413 /*0*/ u8 X(byte_count:6, pad0:2);
414 /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
415 /*2*/ u8 adaptive_ifs;
416 /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
417 term_write_cache_line:1), pad3:4);
418 /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
419 /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
420 /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
421 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
422 rx_discard_overruns:1), rx_save_bad_frames:1);
423 /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
424 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
425 tx_dynamic_tbd:1);
426 /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
427 /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
428 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
429 /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
430 loopback:2);
431 /*11*/ u8 X(linear_priority:3, pad11:5);
432 /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
433 /*13*/ u8 ip_addr_lo;
434 /*14*/ u8 ip_addr_hi;
435 /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
436 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
437 pad15_2:1), crs_or_cdt:1);
438 /*16*/ u8 fc_delay_lo;
439 /*17*/ u8 fc_delay_hi;
440 /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
441 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
442 /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
443 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
444 full_duplex_force:1), full_duplex_pin:1);
445 /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
446 /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
447 /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
448 u8 pad_d102[9];
449 };
451 #define E100_MAX_MULTICAST_ADDRS 64
452 struct multi {
453 u16 count;
454 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
455 };
457 /* Important: keep total struct u32-aligned */
458 #define UCODE_SIZE 134
459 struct cb {
460 u16 status;
461 u16 command;
462 u32 link;
463 union {
464 u8 iaaddr[ETH_ALEN];
465 u32 ucode[UCODE_SIZE];
466 struct config config;
467 struct multi multi;
468 struct {
469 u32 tbd_array;
470 u16 tcb_byte_count;
471 u8 threshold;
472 u8 tbd_count;
473 struct {
474 u32 buf_addr;
475 u16 size;
476 u16 eol;
477 } tbd;
478 } tcb;
479 u32 dump_buffer_addr;
480 } u;
481 struct cb *next, *prev;
482 dma_addr_t dma_addr;
483 struct sk_buff *skb;
484 };
486 enum loopback {
487 lb_none = 0, lb_mac = 1, lb_phy = 3,
488 };
490 struct stats {
491 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
492 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
493 tx_multiple_collisions, tx_total_collisions;
494 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
495 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
496 rx_short_frame_errors;
497 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
498 u16 xmt_tco_frames, rcv_tco_frames;
499 u32 complete;
500 };
502 struct mem {
503 struct {
504 u32 signature;
505 u32 result;
506 } selftest;
507 struct stats stats;
508 u8 dump_buf[596];
509 };
511 struct param_range {
512 u32 min;
513 u32 max;
514 u32 count;
515 };
517 struct params {
518 struct param_range rfds;
519 struct param_range cbs;
520 };
522 struct nic {
523 /* Begin: frequently used values: keep adjacent for cache effect */
524 u32 msg_enable ____cacheline_aligned;
525 struct net_device *netdev;
526 struct pci_dev *pdev;
528 struct rx *rxs ____cacheline_aligned;
529 struct rx *rx_to_use;
530 struct rx *rx_to_clean;
531 struct rfd blank_rfd;
532 enum ru_state ru_running;
534 spinlock_t cb_lock ____cacheline_aligned;
535 spinlock_t cmd_lock;
536 struct csr __iomem *csr;
537 enum scb_cmd_lo cuc_cmd;
538 unsigned int cbs_avail;
539 struct cb *cbs;
540 struct cb *cb_to_use;
541 struct cb *cb_to_send;
542 struct cb *cb_to_clean;
543 u16 tx_command;
544 /* End: frequently used values: keep adjacent for cache effect */
546 enum {
547 ich = (1 << 0),
548 promiscuous = (1 << 1),
549 multicast_all = (1 << 2),
550 wol_magic = (1 << 3),
551 ich_10h_workaround = (1 << 4),
552 } flags ____cacheline_aligned;
554 enum mac mac;
555 enum phy phy;
556 struct params params;
557 struct net_device_stats net_stats;
558 struct timer_list watchdog;
559 struct timer_list blink_timer;
560 struct mii_if_info mii;
561 struct work_struct tx_timeout_task;
562 enum loopback loopback;
564 struct mem *mem;
565 dma_addr_t dma_addr;
567 dma_addr_t cbs_dma_addr;
568 u8 adaptive_ifs;
569 u8 tx_threshold;
570 u32 tx_frames;
571 u32 tx_collisions;
572 u32 tx_deferred;
573 u32 tx_single_collisions;
574 u32 tx_multiple_collisions;
575 u32 tx_fc_pause;
576 u32 tx_tco_frames;
578 u32 rx_fc_pause;
579 u32 rx_fc_unsupported;
580 u32 rx_tco_frames;
581 u32 rx_over_length_errors;
583 u8 rev_id;
584 u16 leds;
585 u16 eeprom_wc;
586 u16 eeprom[256];
587 spinlock_t mdio_lock;
588 };
590 static inline void e100_write_flush(struct nic *nic)
591 {
592 /* Flush previous PCI writes through intermediate bridges
593 * by doing a benign read */
594 (void)readb(&nic->csr->scb.status);
595 }
597 static void e100_enable_irq(struct nic *nic)
598 {
599 unsigned long flags;
601 spin_lock_irqsave(&nic->cmd_lock, flags);
602 writeb(irq_mask_none, &nic->csr->scb.cmd_hi);
603 e100_write_flush(nic);
604 spin_unlock_irqrestore(&nic->cmd_lock, flags);
605 }
607 static void e100_disable_irq(struct nic *nic)
608 {
609 unsigned long flags;
611 spin_lock_irqsave(&nic->cmd_lock, flags);
612 writeb(irq_mask_all, &nic->csr->scb.cmd_hi);
613 e100_write_flush(nic);
614 spin_unlock_irqrestore(&nic->cmd_lock, flags);
615 }
617 static void e100_hw_reset(struct nic *nic)
618 {
619 /* Put CU and RU into idle with a selective reset to get
620 * device off of PCI bus */
621 writel(selective_reset, &nic->csr->port);
622 e100_write_flush(nic); udelay(20);
624 /* Now fully reset device */
625 writel(software_reset, &nic->csr->port);
626 e100_write_flush(nic); udelay(20);
628 /* Mask off our interrupt line - it's unmasked after reset */
629 e100_disable_irq(nic);
630 }
632 static int e100_self_test(struct nic *nic)
633 {
634 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
636 /* Passing the self-test is a pretty good indication
637 * that the device can DMA to/from host memory */
639 nic->mem->selftest.signature = 0;
640 nic->mem->selftest.result = 0xFFFFFFFF;
642 writel(selftest | dma_addr, &nic->csr->port);
643 e100_write_flush(nic);
644 /* Wait 10 msec for self-test to complete */
645 msleep(10);
647 /* Interrupts are enabled after self-test */
648 e100_disable_irq(nic);
650 /* Check results of self-test */
651 if(nic->mem->selftest.result != 0) {
652 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
653 nic->mem->selftest.result);
654 return -ETIMEDOUT;
655 }
656 if(nic->mem->selftest.signature == 0) {
657 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
658 return -ETIMEDOUT;
659 }
661 return 0;
662 }
664 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
665 {
666 u32 cmd_addr_data[3];
667 u8 ctrl;
668 int i, j;
670 /* Three cmds: write/erase enable, write data, write/erase disable */
671 cmd_addr_data[0] = op_ewen << (addr_len - 2);
672 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
673 cpu_to_le16(data);
674 cmd_addr_data[2] = op_ewds << (addr_len - 2);
676 /* Bit-bang cmds to write word to eeprom */
677 for(j = 0; j < 3; j++) {
679 /* Chip select */
680 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
681 e100_write_flush(nic); udelay(4);
683 for(i = 31; i >= 0; i--) {
684 ctrl = (cmd_addr_data[j] & (1 << i)) ?
685 eecs | eedi : eecs;
686 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
687 e100_write_flush(nic); udelay(4);
689 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
690 e100_write_flush(nic); udelay(4);
691 }
692 /* Wait 10 msec for cmd to complete */
693 msleep(10);
695 /* Chip deselect */
696 writeb(0, &nic->csr->eeprom_ctrl_lo);
697 e100_write_flush(nic); udelay(4);
698 }
699 };
701 /* General technique stolen from the eepro100 driver - very clever */
702 static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
703 {
704 u32 cmd_addr_data;
705 u16 data = 0;
706 u8 ctrl;
707 int i;
709 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
711 /* Chip select */
712 writeb(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
713 e100_write_flush(nic); udelay(4);
715 /* Bit-bang to read word from eeprom */
716 for(i = 31; i >= 0; i--) {
717 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
718 writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
719 e100_write_flush(nic); udelay(4);
721 writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
722 e100_write_flush(nic); udelay(4);
724 /* Eeprom drives a dummy zero to EEDO after receiving
725 * complete address. Use this to adjust addr_len. */
726 ctrl = readb(&nic->csr->eeprom_ctrl_lo);
727 if(!(ctrl & eedo) && i > 16) {
728 *addr_len -= (i - 16);
729 i = 17;
730 }
732 data = (data << 1) | (ctrl & eedo ? 1 : 0);
733 }
735 /* Chip deselect */
736 writeb(0, &nic->csr->eeprom_ctrl_lo);
737 e100_write_flush(nic); udelay(4);
739 return le16_to_cpu(data);
740 };
742 /* Load entire EEPROM image into driver cache and validate checksum */
743 static int e100_eeprom_load(struct nic *nic)
744 {
745 u16 addr, addr_len = 8, checksum = 0;
747 /* Try reading with an 8-bit addr len to discover actual addr len */
748 e100_eeprom_read(nic, &addr_len, 0);
749 nic->eeprom_wc = 1 << addr_len;
751 for(addr = 0; addr < nic->eeprom_wc; addr++) {
752 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
753 if(addr < nic->eeprom_wc - 1)
754 checksum += cpu_to_le16(nic->eeprom[addr]);
755 }
757 /* The checksum, stored in the last word, is calculated such that
758 * the sum of words should be 0xBABA */
759 checksum = le16_to_cpu(0xBABA - checksum);
760 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
761 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
762 if (!eeprom_bad_csum_allow)
763 return -EAGAIN;
764 }
766 return 0;
767 }
769 /* Save (portion of) driver EEPROM cache to device and update checksum */
770 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
771 {
772 u16 addr, addr_len = 8, checksum = 0;
774 /* Try reading with an 8-bit addr len to discover actual addr len */
775 e100_eeprom_read(nic, &addr_len, 0);
776 nic->eeprom_wc = 1 << addr_len;
778 if(start + count >= nic->eeprom_wc)
779 return -EINVAL;
781 for(addr = start; addr < start + count; addr++)
782 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
784 /* The checksum, stored in the last word, is calculated such that
785 * the sum of words should be 0xBABA */
786 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
787 checksum += cpu_to_le16(nic->eeprom[addr]);
788 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
789 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
790 nic->eeprom[nic->eeprom_wc - 1]);
792 return 0;
793 }
795 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
796 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */
797 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
798 {
799 unsigned long flags;
800 unsigned int i;
801 int err = 0;
803 spin_lock_irqsave(&nic->cmd_lock, flags);
805 /* Previous command is accepted when SCB clears */
806 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
807 if(likely(!readb(&nic->csr->scb.cmd_lo)))
808 break;
809 cpu_relax();
810 if(unlikely(i > E100_WAIT_SCB_FAST))
811 udelay(5);
812 }
813 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
814 err = -EAGAIN;
815 goto err_unlock;
816 }
818 if(unlikely(cmd != cuc_resume))
819 writel(dma_addr, &nic->csr->scb.gen_ptr);
820 writeb(cmd, &nic->csr->scb.cmd_lo);
822 err_unlock:
823 spin_unlock_irqrestore(&nic->cmd_lock, flags);
825 return err;
826 }
828 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
829 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
830 {
831 struct cb *cb;
832 unsigned long flags;
833 int err = 0;
835 spin_lock_irqsave(&nic->cb_lock, flags);
837 if(unlikely(!nic->cbs_avail)) {
838 err = -ENOMEM;
839 goto err_unlock;
840 }
842 cb = nic->cb_to_use;
843 nic->cb_to_use = cb->next;
844 nic->cbs_avail--;
845 cb->skb = skb;
847 if(unlikely(!nic->cbs_avail))
848 err = -ENOSPC;
850 cb_prepare(nic, cb, skb);
852 /* Order is important otherwise we'll be in a race with h/w:
853 * set S-bit in current first, then clear S-bit in previous. */
854 cb->command |= cpu_to_le16(cb_s);
855 wmb();
856 cb->prev->command &= cpu_to_le16(~cb_s);
858 while(nic->cb_to_send != nic->cb_to_use) {
859 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
860 nic->cb_to_send->dma_addr))) {
861 /* Ok, here's where things get sticky. It's
862 * possible that we can't schedule the command
863 * because the controller is too busy, so
864 * let's just queue the command and try again
865 * when another command is scheduled. */
866 if(err == -ENOSPC) {
867 //request a reset
868 schedule_work(&nic->tx_timeout_task);
869 }
870 break;
871 } else {
872 nic->cuc_cmd = cuc_resume;
873 nic->cb_to_send = nic->cb_to_send->next;
874 }
875 }
877 err_unlock:
878 spin_unlock_irqrestore(&nic->cb_lock, flags);
880 return err;
881 }
883 static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
884 {
885 u32 data_out = 0;
886 unsigned int i;
887 unsigned long flags;
890 /*
891 * Stratus87247: we shouldn't be writing the MDI control
892 * register until the Ready bit shows True. Also, since
893 * manipulation of the MDI control registers is a multi-step
894 * procedure it should be done under lock.
895 */
896 spin_lock_irqsave(&nic->mdio_lock, flags);
897 for (i = 100; i; --i) {
898 if (readl(&nic->csr->mdi_ctrl) & mdi_ready)
899 break;
900 udelay(20);
901 }
902 if (unlikely(!i)) {
903 printk("e100.mdio_ctrl(%s) won't go Ready\n",
904 nic->netdev->name );
905 spin_unlock_irqrestore(&nic->mdio_lock, flags);
906 return 0; /* No way to indicate timeout error */
907 }
908 writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
910 for (i = 0; i < 100; i++) {
911 udelay(20);
912 if ((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
913 break;
914 }
915 spin_unlock_irqrestore(&nic->mdio_lock, flags);
916 DPRINTK(HW, DEBUG,
917 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
918 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
919 return (u16)data_out;
920 }
922 static int mdio_read(struct net_device *netdev, int addr, int reg)
923 {
924 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
925 }
927 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
928 {
929 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
930 }
932 static void e100_get_defaults(struct nic *nic)
933 {
934 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
935 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
937 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
938 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
939 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->rev_id;
940 if(nic->mac == mac_unknown)
941 nic->mac = mac_82557_D100_A;
943 nic->params.rfds = rfds;
944 nic->params.cbs = cbs;
946 /* Quadwords to DMA into FIFO before starting frame transmit */
947 nic->tx_threshold = 0xE0;
949 /* no interrupt for every tx completion, delay = 256us if not 557*/
950 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
951 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
953 /* Template for a freshly allocated RFD */
954 nic->blank_rfd.command = cpu_to_le16(cb_el);
955 nic->blank_rfd.rbd = 0xFFFFFFFF;
956 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
958 /* MII setup */
959 nic->mii.phy_id_mask = 0x1F;
960 nic->mii.reg_num_mask = 0x1F;
961 nic->mii.dev = nic->netdev;
962 nic->mii.mdio_read = mdio_read;
963 nic->mii.mdio_write = mdio_write;
964 }
966 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
967 {
968 struct config *config = &cb->u.config;
969 u8 *c = (u8 *)config;
971 cb->command = cpu_to_le16(cb_config);
973 memset(config, 0, sizeof(struct config));
975 config->byte_count = 0x16; /* bytes in this struct */
976 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
977 config->direct_rx_dma = 0x1; /* reserved */
978 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
979 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
980 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
981 config->tx_underrun_retry = 0x3; /* # of underrun retries */
982 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
983 config->pad10 = 0x6;
984 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
985 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
986 config->ifs = 0x6; /* x16 = inter frame spacing */
987 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
988 config->pad15_1 = 0x1;
989 config->pad15_2 = 0x1;
990 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
991 config->fc_delay_hi = 0x40; /* time delay for fc frame */
992 config->tx_padding = 0x1; /* 1=pad short frames */
993 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
994 config->pad18 = 0x1;
995 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
996 config->pad20_1 = 0x1F;
997 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
998 config->pad21_1 = 0x5;
1000 config->adaptive_ifs = nic->adaptive_ifs;
1001 config->loopback = nic->loopback;
1003 if(nic->mii.force_media && nic->mii.full_duplex)
1004 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1006 if(nic->flags & promiscuous || nic->loopback) {
1007 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1008 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1009 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1012 if(nic->flags & multicast_all)
1013 config->multicast_all = 0x1; /* 1=accept, 0=no */
1015 /* disable WoL when up */
1016 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1017 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1019 if(nic->mac >= mac_82558_D101_A4) {
1020 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1021 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1022 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1023 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1024 if(nic->mac >= mac_82559_D101M)
1025 config->tno_intr = 0x1; /* TCO stats enable */
1026 else
1027 config->standard_stat_counter = 0x0;
1030 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1031 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1032 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1033 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1034 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1035 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1038 /********************************************************/
1039 /* Micro code for 8086:1229 Rev 8 */
1040 /********************************************************/
1042 /* Parameter values for the D101M B-step */
1043 #define D101M_CPUSAVER_TIMER_DWORD 78
1044 #define D101M_CPUSAVER_BUNDLE_DWORD 65
1045 #define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1047 #define D101M_B_RCVBUNDLE_UCODE \
1048 {\
1049 0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
1050 0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
1051 0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
1052 0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
1053 0x00380438, 0x00000000, 0x00140000, 0x00380555, \
1054 0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
1055 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
1056 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
1057 0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
1058 0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
1059 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1060 0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
1061 0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
1062 0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
1063 0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
1064 0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
1065 0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
1066 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1067 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1068 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
1069 0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
1070 0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
1071 0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
1072 0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
1073 0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
1074 0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
1075 0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
1076 0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
1077 0x00380559, 0x00000000, 0x00000000, 0x00000000, \
1078 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1079 0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
1080 0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
1081 0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1084 /********************************************************/
1085 /* Micro code for 8086:1229 Rev 9 */
1086 /********************************************************/
1088 /* Parameter values for the D101S */
1089 #define D101S_CPUSAVER_TIMER_DWORD 78
1090 #define D101S_CPUSAVER_BUNDLE_DWORD 67
1091 #define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1093 #define D101S_RCVBUNDLE_UCODE \
1094 {\
1095 0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
1096 0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
1097 0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
1098 0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
1099 0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
1100 0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
1101 0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
1102 0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
1103 0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
1104 0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
1105 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1106 0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
1107 0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
1108 0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
1109 0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
1110 0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
1111 0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
1112 0x00101313, 0x00380700, 0x00000000, 0x00000000, \
1113 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1114 0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
1115 0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
1116 0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
1117 0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
1118 0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
1119 0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
1120 0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
1121 0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
1122 0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
1123 0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
1124 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1125 0x00000000, 0x00000000, 0x00000000, 0x00130831, \
1126 0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
1127 0x00041000, 0x00010004, 0x00380700 \
1130 /********************************************************/
1131 /* Micro code for the 8086:1229 Rev F/10 */
1132 /********************************************************/
1134 /* Parameter values for the D102 E-step */
1135 #define D102_E_CPUSAVER_TIMER_DWORD 42
1136 #define D102_E_CPUSAVER_BUNDLE_DWORD 54
1137 #define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1139 #define D102_E_RCVBUNDLE_UCODE \
1140 {\
1141 0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
1142 0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
1143 0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
1144 0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
1145 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1146 0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
1147 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1148 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1149 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1150 0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
1151 0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
1152 0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
1153 0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
1154 0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
1155 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1156 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1157 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1158 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
1159 0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
1160 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1161 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1162 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1163 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1164 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1165 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1166 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1167 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1168 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1169 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1170 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1171 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1172 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1173 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
1176 static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1178 /* *INDENT-OFF* */
1179 static struct {
1180 u32 ucode[UCODE_SIZE + 1];
1181 u8 mac;
1182 u8 timer_dword;
1183 u8 bundle_dword;
1184 u8 min_size_dword;
1185 } ucode_opts[] = {
1186 { D101M_B_RCVBUNDLE_UCODE,
1187 mac_82559_D101M,
1188 D101M_CPUSAVER_TIMER_DWORD,
1189 D101M_CPUSAVER_BUNDLE_DWORD,
1190 D101M_CPUSAVER_MIN_SIZE_DWORD },
1191 { D101S_RCVBUNDLE_UCODE,
1192 mac_82559_D101S,
1193 D101S_CPUSAVER_TIMER_DWORD,
1194 D101S_CPUSAVER_BUNDLE_DWORD,
1195 D101S_CPUSAVER_MIN_SIZE_DWORD },
1196 { D102_E_RCVBUNDLE_UCODE,
1197 mac_82551_F,
1198 D102_E_CPUSAVER_TIMER_DWORD,
1199 D102_E_CPUSAVER_BUNDLE_DWORD,
1200 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1201 { D102_E_RCVBUNDLE_UCODE,
1202 mac_82551_10,
1203 D102_E_CPUSAVER_TIMER_DWORD,
1204 D102_E_CPUSAVER_BUNDLE_DWORD,
1205 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1206 { {0}, 0, 0, 0, 0}
1207 }, *opts;
1208 /* *INDENT-ON* */
1210 /*************************************************************************
1211 * CPUSaver parameters
1213 * All CPUSaver parameters are 16-bit literals that are part of a
1214 * "move immediate value" instruction. By changing the value of
1215 * the literal in the instruction before the code is loaded, the
1216 * driver can change the algorithm.
1218 * INTDELAY - This loads the dead-man timer with its inital value.
1219 * When this timer expires the interrupt is asserted, and the
1220 * timer is reset each time a new packet is received. (see
1221 * BUNDLEMAX below to set the limit on number of chained packets)
1222 * The current default is 0x600 or 1536. Experiments show that
1223 * the value should probably stay within the 0x200 - 0x1000.
1225 * BUNDLEMAX -
1226 * This sets the maximum number of frames that will be bundled. In
1227 * some situations, such as the TCP windowing algorithm, it may be
1228 * better to limit the growth of the bundle size than let it go as
1229 * high as it can, because that could cause too much added latency.
1230 * The default is six, because this is the number of packets in the
1231 * default TCP window size. A value of 1 would make CPUSaver indicate
1232 * an interrupt for every frame received. If you do not want to put
1233 * a limit on the bundle size, set this value to xFFFF.
1235 * BUNDLESMALL -
1236 * This contains a bit-mask describing the minimum size frame that
1237 * will be bundled. The default masks the lower 7 bits, which means
1238 * that any frame less than 128 bytes in length will not be bundled,
1239 * but will instead immediately generate an interrupt. This does
1240 * not affect the current bundle in any way. Any frame that is 128
1241 * bytes or large will be bundled normally. This feature is meant
1242 * to provide immediate indication of ACK frames in a TCP environment.
1243 * Customers were seeing poor performance when a machine with CPUSaver
1244 * enabled was sending but not receiving. The delay introduced when
1245 * the ACKs were received was enough to reduce total throughput, because
1246 * the sender would sit idle until the ACK was finally seen.
1248 * The current default is 0xFF80, which masks out the lower 7 bits.
1249 * This means that any frame which is x7F (127) bytes or smaller
1250 * will cause an immediate interrupt. Because this value must be a
1251 * bit mask, there are only a few valid values that can be used. To
1252 * turn this feature off, the driver can write the value xFFFF to the
1253 * lower word of this instruction (in the same way that the other
1254 * parameters are used). Likewise, a value of 0xF800 (2047) would
1255 * cause an interrupt to be generated for every frame, because all
1256 * standard Ethernet frames are <= 2047 bytes in length.
1257 *************************************************************************/
1259 /* if you wish to disable the ucode functionality, while maintaining the
1260 * workarounds it provides, set the following defines to:
1261 * BUNDLESMALL 0
1262 * BUNDLEMAX 1
1263 * INTDELAY 1
1264 */
1265 #define BUNDLESMALL 1
1266 #define BUNDLEMAX (u16)6
1267 #define INTDELAY (u16)1536 /* 0x600 */
1269 /* do not load u-code for ICH devices */
1270 if (nic->flags & ich)
1271 goto noloaducode;
1273 /* Search for ucode match against h/w rev_id */
1274 for (opts = ucode_opts; opts->mac; opts++) {
1275 int i;
1276 u32 *ucode = opts->ucode;
1277 if (nic->mac != opts->mac)
1278 continue;
1280 /* Insert user-tunable settings */
1281 ucode[opts->timer_dword] &= 0xFFFF0000;
1282 ucode[opts->timer_dword] |= INTDELAY;
1283 ucode[opts->bundle_dword] &= 0xFFFF0000;
1284 ucode[opts->bundle_dword] |= BUNDLEMAX;
1285 ucode[opts->min_size_dword] &= 0xFFFF0000;
1286 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1288 for (i = 0; i < UCODE_SIZE; i++)
1289 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
1290 cb->command = cpu_to_le16(cb_ucode | cb_el);
1291 return;
1294 noloaducode:
1295 cb->command = cpu_to_le16(cb_nop | cb_el);
1298 static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1299 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1301 int err = 0, counter = 50;
1302 struct cb *cb = nic->cb_to_clean;
1304 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1305 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
1307 /* must restart cuc */
1308 nic->cuc_cmd = cuc_start;
1310 /* wait for completion */
1311 e100_write_flush(nic);
1312 udelay(10);
1314 /* wait for possibly (ouch) 500ms */
1315 while (!(cb->status & cpu_to_le16(cb_complete))) {
1316 msleep(10);
1317 if (!--counter) break;
1320 /* ack any interupts, something could have been set */
1321 writeb(~0, &nic->csr->scb.stat_ack);
1323 /* if the command failed, or is not OK, notify and return */
1324 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1325 DPRINTK(PROBE,ERR, "ucode load failed\n");
1326 err = -EPERM;
1329 return err;
1332 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1333 struct sk_buff *skb)
1335 cb->command = cpu_to_le16(cb_iaaddr);
1336 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1339 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1341 cb->command = cpu_to_le16(cb_dump);
1342 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1343 offsetof(struct mem, dump_buf));
1346 #define NCONFIG_AUTO_SWITCH 0x0080
1347 #define MII_NSC_CONG MII_RESV1
1348 #define NSC_CONG_ENABLE 0x0100
1349 #define NSC_CONG_TXREADY 0x0400
1350 #define ADVERTISE_FC_SUPPORTED 0x0400
1351 static int e100_phy_init(struct nic *nic)
1353 struct net_device *netdev = nic->netdev;
1354 u32 addr;
1355 u16 bmcr, stat, id_lo, id_hi, cong;
1357 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1358 for(addr = 0; addr < 32; addr++) {
1359 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1360 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1361 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1362 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1363 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1364 break;
1366 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1367 if(addr == 32)
1368 return -EAGAIN;
1370 /* Selected the phy and isolate the rest */
1371 for(addr = 0; addr < 32; addr++) {
1372 if(addr != nic->mii.phy_id) {
1373 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1374 } else {
1375 bmcr = mdio_read(netdev, addr, MII_BMCR);
1376 mdio_write(netdev, addr, MII_BMCR,
1377 bmcr & ~BMCR_ISOLATE);
1381 /* Get phy ID */
1382 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1383 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1384 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1385 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1387 /* Handle National tx phys */
1388 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1389 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1390 /* Disable congestion control */
1391 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1392 cong |= NSC_CONG_TXREADY;
1393 cong &= ~NSC_CONG_ENABLE;
1394 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1397 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1398 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
1399 /* enable/disable MDI/MDI-X auto-switching.
1400 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
1401 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
1402 (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
1403 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
1404 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
1405 else
1406 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
1409 return 0;
1412 static int e100_hw_init(struct nic *nic)
1414 int err;
1416 e100_hw_reset(nic);
1418 DPRINTK(HW, ERR, "e100_hw_init\n");
1419 if(!in_interrupt() && (err = e100_self_test(nic)))
1420 return err;
1422 if((err = e100_phy_init(nic)))
1423 return err;
1424 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1425 return err;
1426 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1427 return err;
1428 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1429 return err;
1430 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1431 return err;
1432 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1433 return err;
1434 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1435 nic->dma_addr + offsetof(struct mem, stats))))
1436 return err;
1437 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1438 return err;
1440 e100_disable_irq(nic);
1442 return 0;
1445 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1447 struct net_device *netdev = nic->netdev;
1448 struct dev_mc_list *list = netdev->mc_list;
1449 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1451 cb->command = cpu_to_le16(cb_multi);
1452 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1453 for(i = 0; list && i < count; i++, list = list->next)
1454 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1455 ETH_ALEN);
1458 static void e100_set_multicast_list(struct net_device *netdev)
1460 struct nic *nic = netdev_priv(netdev);
1462 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1463 netdev->mc_count, netdev->flags);
1465 if(netdev->flags & IFF_PROMISC)
1466 nic->flags |= promiscuous;
1467 else
1468 nic->flags &= ~promiscuous;
1470 if(netdev->flags & IFF_ALLMULTI ||
1471 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1472 nic->flags |= multicast_all;
1473 else
1474 nic->flags &= ~multicast_all;
1476 e100_exec_cb(nic, NULL, e100_configure);
1477 e100_exec_cb(nic, NULL, e100_multi);
1480 static void e100_update_stats(struct nic *nic)
1482 struct net_device_stats *ns = &nic->net_stats;
1483 struct stats *s = &nic->mem->stats;
1484 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1485 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1486 &s->complete;
1488 /* Device's stats reporting may take several microseconds to
1489 * complete, so where always waiting for results of the
1490 * previous command. */
1492 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1493 *complete = 0;
1494 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1495 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1496 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1497 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1498 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1499 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1500 ns->collisions += nic->tx_collisions;
1501 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1502 le32_to_cpu(s->tx_lost_crs);
1503 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1504 nic->rx_over_length_errors;
1505 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1506 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1507 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1508 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1509 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1510 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1511 le32_to_cpu(s->rx_alignment_errors) +
1512 le32_to_cpu(s->rx_short_frame_errors) +
1513 le32_to_cpu(s->rx_cdt_errors);
1514 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1515 nic->tx_single_collisions +=
1516 le32_to_cpu(s->tx_single_collisions);
1517 nic->tx_multiple_collisions +=
1518 le32_to_cpu(s->tx_multiple_collisions);
1519 if(nic->mac >= mac_82558_D101_A4) {
1520 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1521 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1522 nic->rx_fc_unsupported +=
1523 le32_to_cpu(s->fc_rcv_unsupported);
1524 if(nic->mac >= mac_82559_D101M) {
1525 nic->tx_tco_frames +=
1526 le16_to_cpu(s->xmt_tco_frames);
1527 nic->rx_tco_frames +=
1528 le16_to_cpu(s->rcv_tco_frames);
1534 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1535 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1538 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1540 /* Adjust inter-frame-spacing (IFS) between two transmits if
1541 * we're getting collisions on a half-duplex connection. */
1543 if(duplex == DUPLEX_HALF) {
1544 u32 prev = nic->adaptive_ifs;
1545 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1547 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1548 (nic->tx_frames > min_frames)) {
1549 if(nic->adaptive_ifs < 60)
1550 nic->adaptive_ifs += 5;
1551 } else if (nic->tx_frames < min_frames) {
1552 if(nic->adaptive_ifs >= 5)
1553 nic->adaptive_ifs -= 5;
1555 if(nic->adaptive_ifs != prev)
1556 e100_exec_cb(nic, NULL, e100_configure);
1560 static void e100_watchdog(unsigned long data)
1562 struct nic *nic = (struct nic *)data;
1563 struct ethtool_cmd cmd;
1565 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1567 /* mii library handles link maintenance tasks */
1569 mii_ethtool_gset(&nic->mii, &cmd);
1571 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1572 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1573 cmd.speed == SPEED_100 ? "100" : "10",
1574 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1575 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1576 DPRINTK(LINK, INFO, "link down\n");
1579 mii_check_link(&nic->mii);
1581 /* Software generated interrupt to recover from (rare) Rx
1582 * allocation failure.
1583 * Unfortunately have to use a spinlock to not re-enable interrupts
1584 * accidentally, due to hardware that shares a register between the
1585 * interrupt mask bit and the SW Interrupt generation bit */
1586 spin_lock_irq(&nic->cmd_lock);
1587 writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1588 e100_write_flush(nic);
1589 spin_unlock_irq(&nic->cmd_lock);
1591 e100_update_stats(nic);
1592 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1594 if(nic->mac <= mac_82557_D100_C)
1595 /* Issue a multicast command to workaround a 557 lock up */
1596 e100_set_multicast_list(nic->netdev);
1598 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1599 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1600 nic->flags |= ich_10h_workaround;
1601 else
1602 nic->flags &= ~ich_10h_workaround;
1604 mod_timer(&nic->watchdog, jiffies + E100_WATCHDOG_PERIOD);
1607 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1608 struct sk_buff *skb)
1610 cb->command = nic->tx_command;
1611 /* interrupt every 16 packets regardless of delay */
1612 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1613 cb->command |= cpu_to_le16(cb_i);
1614 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1615 cb->u.tcb.tcb_byte_count = 0;
1616 cb->u.tcb.threshold = nic->tx_threshold;
1617 cb->u.tcb.tbd_count = 1;
1618 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1619 skb->data, skb->len, PCI_DMA_TODEVICE));
1620 /* check for mapping failure? */
1621 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1624 static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1626 struct nic *nic = netdev_priv(netdev);
1627 int err;
1629 if(nic->flags & ich_10h_workaround) {
1630 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1631 Issue a NOP command followed by a 1us delay before
1632 issuing the Tx command. */
1633 if(e100_exec_cmd(nic, cuc_nop, 0))
1634 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1635 udelay(1);
1638 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1640 switch(err) {
1641 case -ENOSPC:
1642 /* We queued the skb, but now we're out of space. */
1643 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1644 netif_stop_queue(netdev);
1645 break;
1646 case -ENOMEM:
1647 /* This is a hard error - log it. */
1648 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1649 netif_stop_queue(netdev);
1650 return 1;
1653 netdev->trans_start = jiffies;
1654 return 0;
1657 static int e100_tx_clean(struct nic *nic)
1659 struct cb *cb;
1660 int tx_cleaned = 0;
1662 spin_lock(&nic->cb_lock);
1664 DPRINTK(TX_DONE, DEBUG, "cb->status = 0x%04X\n",
1665 nic->cb_to_clean->status);
1667 /* Clean CBs marked complete */
1668 for(cb = nic->cb_to_clean;
1669 cb->status & cpu_to_le16(cb_complete);
1670 cb = nic->cb_to_clean = cb->next) {
1671 if(likely(cb->skb != NULL)) {
1672 nic->net_stats.tx_packets++;
1673 nic->net_stats.tx_bytes += cb->skb->len;
1675 pci_unmap_single(nic->pdev,
1676 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1677 le16_to_cpu(cb->u.tcb.tbd.size),
1678 PCI_DMA_TODEVICE);
1679 dev_kfree_skb_any(cb->skb);
1680 cb->skb = NULL;
1681 tx_cleaned = 1;
1683 cb->status = 0;
1684 nic->cbs_avail++;
1687 spin_unlock(&nic->cb_lock);
1689 /* Recover from running out of Tx resources in xmit_frame */
1690 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1691 netif_wake_queue(nic->netdev);
1693 return tx_cleaned;
1696 static void e100_clean_cbs(struct nic *nic)
1698 if(nic->cbs) {
1699 while(nic->cbs_avail != nic->params.cbs.count) {
1700 struct cb *cb = nic->cb_to_clean;
1701 if(cb->skb) {
1702 pci_unmap_single(nic->pdev,
1703 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1704 le16_to_cpu(cb->u.tcb.tbd.size),
1705 PCI_DMA_TODEVICE);
1706 dev_kfree_skb(cb->skb);
1708 nic->cb_to_clean = nic->cb_to_clean->next;
1709 nic->cbs_avail++;
1711 pci_free_consistent(nic->pdev,
1712 sizeof(struct cb) * nic->params.cbs.count,
1713 nic->cbs, nic->cbs_dma_addr);
1714 nic->cbs = NULL;
1715 nic->cbs_avail = 0;
1717 nic->cuc_cmd = cuc_start;
1718 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1719 nic->cbs;
1722 static int e100_alloc_cbs(struct nic *nic)
1724 struct cb *cb;
1725 unsigned int i, count = nic->params.cbs.count;
1727 nic->cuc_cmd = cuc_start;
1728 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1729 nic->cbs_avail = 0;
1731 nic->cbs = pci_alloc_consistent(nic->pdev,
1732 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1733 if(!nic->cbs)
1734 return -ENOMEM;
1736 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1737 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1738 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1740 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1741 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1742 ((i+1) % count) * sizeof(struct cb));
1743 cb->skb = NULL;
1746 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1747 nic->cbs_avail = count;
1749 return 0;
1752 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1754 if(!nic->rxs) return;
1755 if(RU_SUSPENDED != nic->ru_running) return;
1757 /* handle init time starts */
1758 if(!rx) rx = nic->rxs;
1760 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1761 if(rx->skb) {
1762 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1763 nic->ru_running = RU_RUNNING;
1767 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1768 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1770 if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN)))
1771 return -ENOMEM;
1773 /* Align, init, and map the RFD. */
1774 rx->skb->dev = nic->netdev;
1775 skb_reserve(rx->skb, NET_IP_ALIGN);
1776 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
1777 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1778 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1780 if(pci_dma_mapping_error(rx->dma_addr)) {
1781 dev_kfree_skb_any(rx->skb);
1782 rx->skb = NULL;
1783 rx->dma_addr = 0;
1784 return -ENOMEM;
1787 /* Link the RFD to end of RFA by linking previous RFD to
1788 * this one, and clearing EL bit of previous. */
1789 if(rx->prev->skb) {
1790 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1791 put_unaligned(cpu_to_le32(rx->dma_addr),
1792 (u32 *)&prev_rfd->link);
1793 wmb();
1794 prev_rfd->command &= ~cpu_to_le16(cb_el);
1795 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1796 sizeof(struct rfd), PCI_DMA_TODEVICE);
1799 return 0;
1802 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1803 unsigned int *work_done, unsigned int work_to_do)
1805 struct sk_buff *skb = rx->skb;
1806 struct rfd *rfd = (struct rfd *)skb->data;
1807 u16 rfd_status, actual_size;
1809 if(unlikely(work_done && *work_done >= work_to_do))
1810 return -EAGAIN;
1812 /* Need to sync before taking a peek at cb_complete bit */
1813 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1814 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1815 rfd_status = le16_to_cpu(rfd->status);
1817 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1819 /* If data isn't ready, nothing to indicate */
1820 if(unlikely(!(rfd_status & cb_complete)))
1821 return -ENODATA;
1823 /* Get actual data size */
1824 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1825 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1826 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1828 /* Get data */
1829 pci_unmap_single(nic->pdev, rx->dma_addr,
1830 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1832 /* this allows for a fast restart without re-enabling interrupts */
1833 if(le16_to_cpu(rfd->command) & cb_el)
1834 nic->ru_running = RU_SUSPENDED;
1836 /* Pull off the RFD and put the actual data (minus eth hdr) */
1837 skb_reserve(skb, sizeof(struct rfd));
1838 skb_put(skb, actual_size);
1839 skb->protocol = eth_type_trans(skb, nic->netdev);
1841 if(unlikely(!(rfd_status & cb_ok))) {
1842 /* Don't indicate if hardware indicates errors */
1843 dev_kfree_skb_any(skb);
1844 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1845 /* Don't indicate oversized frames */
1846 nic->rx_over_length_errors++;
1847 dev_kfree_skb_any(skb);
1848 } else {
1849 nic->net_stats.rx_packets++;
1850 nic->net_stats.rx_bytes += actual_size;
1851 nic->netdev->last_rx = jiffies;
1852 netif_receive_skb(skb);
1853 if(work_done)
1854 (*work_done)++;
1857 rx->skb = NULL;
1859 return 0;
1862 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1863 unsigned int work_to_do)
1865 struct rx *rx;
1866 int restart_required = 0;
1867 struct rx *rx_to_start = NULL;
1869 /* are we already rnr? then pay attention!!! this ensures that
1870 * the state machine progression never allows a start with a
1871 * partially cleaned list, avoiding a race between hardware
1872 * and rx_to_clean when in NAPI mode */
1873 if(RU_SUSPENDED == nic->ru_running)
1874 restart_required = 1;
1876 /* Indicate newly arrived packets */
1877 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
1878 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1879 if(-EAGAIN == err) {
1880 /* hit quota so have more work to do, restart once
1881 * cleanup is complete */
1882 restart_required = 0;
1883 break;
1884 } else if(-ENODATA == err)
1885 break; /* No more to clean */
1888 /* save our starting point as the place we'll restart the receiver */
1889 if(restart_required)
1890 rx_to_start = nic->rx_to_clean;
1892 /* Alloc new skbs to refill list */
1893 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1894 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1895 break; /* Better luck next time (see watchdog) */
1898 if(restart_required) {
1899 // ack the rnr?
1900 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1901 e100_start_receiver(nic, rx_to_start);
1902 if(work_done)
1903 (*work_done)++;
1907 static void e100_rx_clean_list(struct nic *nic)
1909 struct rx *rx;
1910 unsigned int i, count = nic->params.rfds.count;
1912 nic->ru_running = RU_UNINITIALIZED;
1914 if(nic->rxs) {
1915 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1916 if(rx->skb) {
1917 pci_unmap_single(nic->pdev, rx->dma_addr,
1918 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1919 dev_kfree_skb(rx->skb);
1922 kfree(nic->rxs);
1923 nic->rxs = NULL;
1926 nic->rx_to_use = nic->rx_to_clean = NULL;
1929 static int e100_rx_alloc_list(struct nic *nic)
1931 struct rx *rx;
1932 unsigned int i, count = nic->params.rfds.count;
1934 nic->rx_to_use = nic->rx_to_clean = NULL;
1935 nic->ru_running = RU_UNINITIALIZED;
1937 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
1938 return -ENOMEM;
1939 memset(nic->rxs, 0, sizeof(struct rx) * count);
1941 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1942 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1943 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1944 if(e100_rx_alloc_skb(nic, rx)) {
1945 e100_rx_clean_list(nic);
1946 return -ENOMEM;
1950 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
1951 nic->ru_running = RU_SUSPENDED;
1953 return 0;
1956 static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
1958 struct net_device *netdev = dev_id;
1959 struct nic *nic = netdev_priv(netdev);
1960 u8 stat_ack = readb(&nic->csr->scb.stat_ack);
1962 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1964 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1965 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1966 return IRQ_NONE;
1968 /* Ack interrupt(s) */
1969 writeb(stat_ack, &nic->csr->scb.stat_ack);
1971 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1972 if(stat_ack & stat_ack_rnr)
1973 nic->ru_running = RU_SUSPENDED;
1975 if(likely(netif_rx_schedule_prep(netdev))) {
1976 e100_disable_irq(nic);
1977 __netif_rx_schedule(netdev);
1980 return IRQ_HANDLED;
1983 static int e100_poll(struct net_device *netdev, int *budget)
1985 struct nic *nic = netdev_priv(netdev);
1986 unsigned int work_to_do = min(netdev->quota, *budget);
1987 unsigned int work_done = 0;
1988 int tx_cleaned;
1990 e100_rx_clean(nic, &work_done, work_to_do);
1991 tx_cleaned = e100_tx_clean(nic);
1993 /* If no Rx and Tx cleanup work was done, exit polling mode. */
1994 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1995 netif_rx_complete(netdev);
1996 e100_enable_irq(nic);
1997 return 0;
2000 *budget -= work_done;
2001 netdev->quota -= work_done;
2003 return 1;
2006 #ifdef CONFIG_NET_POLL_CONTROLLER
2007 static void e100_netpoll(struct net_device *netdev)
2009 struct nic *nic = netdev_priv(netdev);
2011 e100_disable_irq(nic);
2012 e100_intr(nic->pdev->irq, netdev, NULL);
2013 e100_tx_clean(nic);
2014 e100_enable_irq(nic);
2016 #endif
2018 static struct net_device_stats *e100_get_stats(struct net_device *netdev)
2020 struct nic *nic = netdev_priv(netdev);
2021 return &nic->net_stats;
2024 static int e100_set_mac_address(struct net_device *netdev, void *p)
2026 struct nic *nic = netdev_priv(netdev);
2027 struct sockaddr *addr = p;
2029 if (!is_valid_ether_addr(addr->sa_data))
2030 return -EADDRNOTAVAIL;
2032 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2033 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2035 return 0;
2038 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2040 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2041 return -EINVAL;
2042 netdev->mtu = new_mtu;
2043 return 0;
2046 #ifdef CONFIG_PM
2047 static int e100_asf(struct nic *nic)
2049 /* ASF can be enabled from eeprom */
2050 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2051 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2052 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2053 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2055 #endif
2057 static int e100_up(struct nic *nic)
2059 int err;
2061 if((err = e100_rx_alloc_list(nic)))
2062 return err;
2063 if((err = e100_alloc_cbs(nic)))
2064 goto err_rx_clean_list;
2065 if((err = e100_hw_init(nic)))
2066 goto err_clean_cbs;
2067 e100_set_multicast_list(nic->netdev);
2068 e100_start_receiver(nic, NULL);
2069 mod_timer(&nic->watchdog, jiffies);
2070 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2071 nic->netdev->name, nic->netdev)))
2072 goto err_no_irq;
2073 netif_wake_queue(nic->netdev);
2074 netif_poll_enable(nic->netdev);
2075 /* enable ints _after_ enabling poll, preventing a race between
2076 * disable ints+schedule */
2077 e100_enable_irq(nic);
2078 return 0;
2080 err_no_irq:
2081 del_timer_sync(&nic->watchdog);
2082 err_clean_cbs:
2083 e100_clean_cbs(nic);
2084 err_rx_clean_list:
2085 e100_rx_clean_list(nic);
2086 return err;
2089 static void e100_down(struct nic *nic)
2091 /* wait here for poll to complete */
2092 netif_poll_disable(nic->netdev);
2093 netif_stop_queue(nic->netdev);
2094 e100_hw_reset(nic);
2095 free_irq(nic->pdev->irq, nic->netdev);
2096 del_timer_sync(&nic->watchdog);
2097 netif_carrier_off(nic->netdev);
2098 e100_clean_cbs(nic);
2099 e100_rx_clean_list(nic);
2102 static void e100_tx_timeout(struct net_device *netdev)
2104 struct nic *nic = netdev_priv(netdev);
2106 /* Reset outside of interrupt context, to avoid request_irq
2107 * in interrupt context */
2108 schedule_work(&nic->tx_timeout_task);
2111 static void e100_tx_timeout_task(struct net_device *netdev)
2113 struct nic *nic = netdev_priv(netdev);
2115 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
2116 readb(&nic->csr->scb.status));
2117 e100_down(netdev_priv(netdev));
2118 e100_up(netdev_priv(netdev));
2121 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2123 int err;
2124 struct sk_buff *skb;
2126 /* Use driver resources to perform internal MAC or PHY
2127 * loopback test. A single packet is prepared and transmitted
2128 * in loopback mode, and the test passes if the received
2129 * packet compares byte-for-byte to the transmitted packet. */
2131 if((err = e100_rx_alloc_list(nic)))
2132 return err;
2133 if((err = e100_alloc_cbs(nic)))
2134 goto err_clean_rx;
2136 /* ICH PHY loopback is broken so do MAC loopback instead */
2137 if(nic->flags & ich && loopback_mode == lb_phy)
2138 loopback_mode = lb_mac;
2140 nic->loopback = loopback_mode;
2141 if((err = e100_hw_init(nic)))
2142 goto err_loopback_none;
2144 if(loopback_mode == lb_phy)
2145 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2146 BMCR_LOOPBACK);
2148 e100_start_receiver(nic, NULL);
2150 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
2151 err = -ENOMEM;
2152 goto err_loopback_none;
2154 skb_put(skb, ETH_DATA_LEN);
2155 memset(skb->data, 0xFF, ETH_DATA_LEN);
2156 e100_xmit_frame(skb, nic->netdev);
2158 msleep(10);
2160 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2161 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2163 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2164 skb->data, ETH_DATA_LEN))
2165 err = -EAGAIN;
2167 err_loopback_none:
2168 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2169 nic->loopback = lb_none;
2170 e100_clean_cbs(nic);
2171 e100_hw_reset(nic);
2172 err_clean_rx:
2173 e100_rx_clean_list(nic);
2174 return err;
2177 #define MII_LED_CONTROL 0x1B
2178 static void e100_blink_led(unsigned long data)
2180 struct nic *nic = (struct nic *)data;
2181 enum led_state {
2182 led_on = 0x01,
2183 led_off = 0x04,
2184 led_on_559 = 0x05,
2185 led_on_557 = 0x07,
2186 };
2188 nic->leds = (nic->leds & led_on) ? led_off :
2189 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2190 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2191 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2194 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2196 struct nic *nic = netdev_priv(netdev);
2197 return mii_ethtool_gset(&nic->mii, cmd);
2200 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2202 struct nic *nic = netdev_priv(netdev);
2203 int err;
2205 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2206 err = mii_ethtool_sset(&nic->mii, cmd);
2207 e100_exec_cb(nic, NULL, e100_configure);
2209 return err;
2212 static void e100_get_drvinfo(struct net_device *netdev,
2213 struct ethtool_drvinfo *info)
2215 struct nic *nic = netdev_priv(netdev);
2216 strcpy(info->driver, DRV_NAME);
2217 strcpy(info->version, DRV_VERSION);
2218 strcpy(info->fw_version, "N/A");
2219 strcpy(info->bus_info, pci_name(nic->pdev));
2222 static int e100_get_regs_len(struct net_device *netdev)
2224 struct nic *nic = netdev_priv(netdev);
2225 #define E100_PHY_REGS 0x1C
2226 #define E100_REGS_LEN 1 + E100_PHY_REGS + \
2227 sizeof(nic->mem->dump_buf) / sizeof(u32)
2228 return E100_REGS_LEN * sizeof(u32);
2231 static void e100_get_regs(struct net_device *netdev,
2232 struct ethtool_regs *regs, void *p)
2234 struct nic *nic = netdev_priv(netdev);
2235 u32 *buff = p;
2236 int i;
2238 regs->version = (1 << 24) | nic->rev_id;
2239 buff[0] = readb(&nic->csr->scb.cmd_hi) << 24 |
2240 readb(&nic->csr->scb.cmd_lo) << 16 |
2241 readw(&nic->csr->scb.status);
2242 for(i = E100_PHY_REGS; i >= 0; i--)
2243 buff[1 + E100_PHY_REGS - i] =
2244 mdio_read(netdev, nic->mii.phy_id, i);
2245 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2246 e100_exec_cb(nic, NULL, e100_dump);
2247 msleep(10);
2248 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2249 sizeof(nic->mem->dump_buf));
2252 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2254 struct nic *nic = netdev_priv(netdev);
2255 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2256 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2259 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2261 struct nic *nic = netdev_priv(netdev);
2263 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2264 return -EOPNOTSUPP;
2266 if(wol->wolopts)
2267 nic->flags |= wol_magic;
2268 else
2269 nic->flags &= ~wol_magic;
2271 e100_exec_cb(nic, NULL, e100_configure);
2273 return 0;
2276 static u32 e100_get_msglevel(struct net_device *netdev)
2278 struct nic *nic = netdev_priv(netdev);
2279 return nic->msg_enable;
2282 static void e100_set_msglevel(struct net_device *netdev, u32 value)
2284 struct nic *nic = netdev_priv(netdev);
2285 nic->msg_enable = value;
2288 static int e100_nway_reset(struct net_device *netdev)
2290 struct nic *nic = netdev_priv(netdev);
2291 return mii_nway_restart(&nic->mii);
2294 static u32 e100_get_link(struct net_device *netdev)
2296 struct nic *nic = netdev_priv(netdev);
2297 return mii_link_ok(&nic->mii);
2300 static int e100_get_eeprom_len(struct net_device *netdev)
2302 struct nic *nic = netdev_priv(netdev);
2303 return nic->eeprom_wc << 1;
2306 #define E100_EEPROM_MAGIC 0x1234
2307 static int e100_get_eeprom(struct net_device *netdev,
2308 struct ethtool_eeprom *eeprom, u8 *bytes)
2310 struct nic *nic = netdev_priv(netdev);
2312 eeprom->magic = E100_EEPROM_MAGIC;
2313 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2315 return 0;
2318 static int e100_set_eeprom(struct net_device *netdev,
2319 struct ethtool_eeprom *eeprom, u8 *bytes)
2321 struct nic *nic = netdev_priv(netdev);
2323 if(eeprom->magic != E100_EEPROM_MAGIC)
2324 return -EINVAL;
2326 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2328 return e100_eeprom_save(nic, eeprom->offset >> 1,
2329 (eeprom->len >> 1) + 1);
2332 static void e100_get_ringparam(struct net_device *netdev,
2333 struct ethtool_ringparam *ring)
2335 struct nic *nic = netdev_priv(netdev);
2336 struct param_range *rfds = &nic->params.rfds;
2337 struct param_range *cbs = &nic->params.cbs;
2339 ring->rx_max_pending = rfds->max;
2340 ring->tx_max_pending = cbs->max;
2341 ring->rx_mini_max_pending = 0;
2342 ring->rx_jumbo_max_pending = 0;
2343 ring->rx_pending = rfds->count;
2344 ring->tx_pending = cbs->count;
2345 ring->rx_mini_pending = 0;
2346 ring->rx_jumbo_pending = 0;
2349 static int e100_set_ringparam(struct net_device *netdev,
2350 struct ethtool_ringparam *ring)
2352 struct nic *nic = netdev_priv(netdev);
2353 struct param_range *rfds = &nic->params.rfds;
2354 struct param_range *cbs = &nic->params.cbs;
2356 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2357 return -EINVAL;
2359 if(netif_running(netdev))
2360 e100_down(nic);
2361 rfds->count = max(ring->rx_pending, rfds->min);
2362 rfds->count = min(rfds->count, rfds->max);
2363 cbs->count = max(ring->tx_pending, cbs->min);
2364 cbs->count = min(cbs->count, cbs->max);
2365 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2366 rfds->count, cbs->count);
2367 if(netif_running(netdev))
2368 e100_up(nic);
2370 return 0;
2373 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2374 "Link test (on/offline)",
2375 "Eeprom test (on/offline)",
2376 "Self test (offline)",
2377 "Mac loopback (offline)",
2378 "Phy loopback (offline)",
2379 };
2380 #define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2382 static int e100_diag_test_count(struct net_device *netdev)
2384 return E100_TEST_LEN;
2387 static void e100_diag_test(struct net_device *netdev,
2388 struct ethtool_test *test, u64 *data)
2390 struct ethtool_cmd cmd;
2391 struct nic *nic = netdev_priv(netdev);
2392 int i, err;
2394 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2395 data[0] = !mii_link_ok(&nic->mii);
2396 data[1] = e100_eeprom_load(nic);
2397 if(test->flags & ETH_TEST_FL_OFFLINE) {
2399 /* save speed, duplex & autoneg settings */
2400 err = mii_ethtool_gset(&nic->mii, &cmd);
2402 if(netif_running(netdev))
2403 e100_down(nic);
2404 data[2] = e100_self_test(nic);
2405 data[3] = e100_loopback_test(nic, lb_mac);
2406 data[4] = e100_loopback_test(nic, lb_phy);
2408 /* restore speed, duplex & autoneg settings */
2409 err = mii_ethtool_sset(&nic->mii, &cmd);
2411 if(netif_running(netdev))
2412 e100_up(nic);
2414 for(i = 0; i < E100_TEST_LEN; i++)
2415 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2417 msleep_interruptible(4 * 1000);
2420 static int e100_phys_id(struct net_device *netdev, u32 data)
2422 struct nic *nic = netdev_priv(netdev);
2424 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2425 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2426 mod_timer(&nic->blink_timer, jiffies);
2427 msleep_interruptible(data * 1000);
2428 del_timer_sync(&nic->blink_timer);
2429 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2431 return 0;
2434 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2435 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2436 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2437 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2438 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2439 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2440 "tx_heartbeat_errors", "tx_window_errors",
2441 /* device-specific stats */
2442 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2443 "tx_flow_control_pause", "rx_flow_control_pause",
2444 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2445 };
2446 #define E100_NET_STATS_LEN 21
2447 #define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2449 static int e100_get_stats_count(struct net_device *netdev)
2451 return E100_STATS_LEN;
2454 static void e100_get_ethtool_stats(struct net_device *netdev,
2455 struct ethtool_stats *stats, u64 *data)
2457 struct nic *nic = netdev_priv(netdev);
2458 int i;
2460 for(i = 0; i < E100_NET_STATS_LEN; i++)
2461 data[i] = ((unsigned long *)&nic->net_stats)[i];
2463 data[i++] = nic->tx_deferred;
2464 data[i++] = nic->tx_single_collisions;
2465 data[i++] = nic->tx_multiple_collisions;
2466 data[i++] = nic->tx_fc_pause;
2467 data[i++] = nic->rx_fc_pause;
2468 data[i++] = nic->rx_fc_unsupported;
2469 data[i++] = nic->tx_tco_frames;
2470 data[i++] = nic->rx_tco_frames;
2473 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2475 switch(stringset) {
2476 case ETH_SS_TEST:
2477 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2478 break;
2479 case ETH_SS_STATS:
2480 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2481 break;
2485 static struct ethtool_ops e100_ethtool_ops = {
2486 .get_settings = e100_get_settings,
2487 .set_settings = e100_set_settings,
2488 .get_drvinfo = e100_get_drvinfo,
2489 .get_regs_len = e100_get_regs_len,
2490 .get_regs = e100_get_regs,
2491 .get_wol = e100_get_wol,
2492 .set_wol = e100_set_wol,
2493 .get_msglevel = e100_get_msglevel,
2494 .set_msglevel = e100_set_msglevel,
2495 .nway_reset = e100_nway_reset,
2496 .get_link = e100_get_link,
2497 .get_eeprom_len = e100_get_eeprom_len,
2498 .get_eeprom = e100_get_eeprom,
2499 .set_eeprom = e100_set_eeprom,
2500 .get_ringparam = e100_get_ringparam,
2501 .set_ringparam = e100_set_ringparam,
2502 .self_test_count = e100_diag_test_count,
2503 .self_test = e100_diag_test,
2504 .get_strings = e100_get_strings,
2505 .phys_id = e100_phys_id,
2506 .get_stats_count = e100_get_stats_count,
2507 .get_ethtool_stats = e100_get_ethtool_stats,
2508 .get_perm_addr = ethtool_op_get_perm_addr,
2509 };
2511 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2513 struct nic *nic = netdev_priv(netdev);
2515 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2518 static int e100_alloc(struct nic *nic)
2520 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2521 &nic->dma_addr);
2522 return nic->mem ? 0 : -ENOMEM;
2525 static void e100_free(struct nic *nic)
2527 if(nic->mem) {
2528 pci_free_consistent(nic->pdev, sizeof(struct mem),
2529 nic->mem, nic->dma_addr);
2530 nic->mem = NULL;
2534 static int e100_open(struct net_device *netdev)
2536 struct nic *nic = netdev_priv(netdev);
2537 int err = 0;
2539 netif_carrier_off(netdev);
2540 if((err = e100_up(nic)))
2541 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2542 return err;
2545 static int e100_close(struct net_device *netdev)
2547 e100_down(netdev_priv(netdev));
2548 return 0;
2551 static int __devinit e100_probe(struct pci_dev *pdev,
2552 const struct pci_device_id *ent)
2554 struct net_device *netdev;
2555 struct nic *nic;
2556 int err;
2558 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2559 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2560 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2561 return -ENOMEM;
2564 netdev->open = e100_open;
2565 netdev->stop = e100_close;
2566 netdev->hard_start_xmit = e100_xmit_frame;
2567 netdev->get_stats = e100_get_stats;
2568 netdev->set_multicast_list = e100_set_multicast_list;
2569 netdev->set_mac_address = e100_set_mac_address;
2570 netdev->change_mtu = e100_change_mtu;
2571 netdev->do_ioctl = e100_do_ioctl;
2572 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2573 netdev->tx_timeout = e100_tx_timeout;
2574 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2575 netdev->poll = e100_poll;
2576 netdev->weight = E100_NAPI_WEIGHT;
2577 #ifdef CONFIG_NET_POLL_CONTROLLER
2578 netdev->poll_controller = e100_netpoll;
2579 #endif
2580 strcpy(netdev->name, pci_name(pdev));
2582 nic = netdev_priv(netdev);
2583 nic->netdev = netdev;
2584 nic->pdev = pdev;
2585 nic->msg_enable = (1 << debug) - 1;
2586 pci_set_drvdata(pdev, netdev);
2588 if((err = pci_enable_device(pdev))) {
2589 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2590 goto err_out_free_dev;
2593 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2594 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2595 "base address, aborting.\n");
2596 err = -ENODEV;
2597 goto err_out_disable_pdev;
2600 if((err = pci_request_regions(pdev, DRV_NAME))) {
2601 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2602 goto err_out_disable_pdev;
2605 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
2606 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2607 goto err_out_free_res;
2610 SET_MODULE_OWNER(netdev);
2611 SET_NETDEV_DEV(netdev, &pdev->dev);
2613 nic->csr = ioremap(pci_resource_start(pdev, 0), sizeof(struct csr));
2614 if(!nic->csr) {
2615 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2616 err = -ENOMEM;
2617 goto err_out_free_res;
2620 if(ent->driver_data)
2621 nic->flags |= ich;
2622 else
2623 nic->flags &= ~ich;
2625 e100_get_defaults(nic);
2627 /* locks must be initialized before calling hw_reset */
2628 spin_lock_init(&nic->cb_lock);
2629 spin_lock_init(&nic->cmd_lock);
2630 spin_lock_init(&nic->mdio_lock);
2632 /* Reset the device before pci_set_master() in case device is in some
2633 * funky state and has an interrupt pending - hint: we don't have the
2634 * interrupt handler registered yet. */
2635 e100_hw_reset(nic);
2637 pci_set_master(pdev);
2639 init_timer(&nic->watchdog);
2640 nic->watchdog.function = e100_watchdog;
2641 nic->watchdog.data = (unsigned long)nic;
2642 init_timer(&nic->blink_timer);
2643 nic->blink_timer.function = e100_blink_led;
2644 nic->blink_timer.data = (unsigned long)nic;
2646 INIT_WORK(&nic->tx_timeout_task,
2647 (void (*)(void *))e100_tx_timeout_task, netdev);
2649 if((err = e100_alloc(nic))) {
2650 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2651 goto err_out_iounmap;
2654 if((err = e100_eeprom_load(nic)))
2655 goto err_out_free;
2657 e100_phy_init(nic);
2659 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2660 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2661 if(!is_valid_ether_addr(netdev->perm_addr)) {
2662 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2663 "EEPROM, aborting.\n");
2664 err = -EAGAIN;
2665 goto err_out_free;
2668 /* Wol magic packet can be enabled from eeprom */
2669 if((nic->mac >= mac_82558_D101_A4) &&
2670 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2671 nic->flags |= wol_magic;
2673 /* ack any pending wake events, disable PME */
2674 err = pci_enable_wake(pdev, 0, 0);
2675 if (err)
2676 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
2678 strcpy(netdev->name, "eth%d");
2679 if((err = register_netdev(netdev))) {
2680 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2681 goto err_out_free;
2684 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, "
2685 "MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
2686 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
2687 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2688 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2690 return 0;
2692 err_out_free:
2693 e100_free(nic);
2694 err_out_iounmap:
2695 iounmap(nic->csr);
2696 err_out_free_res:
2697 pci_release_regions(pdev);
2698 err_out_disable_pdev:
2699 pci_disable_device(pdev);
2700 err_out_free_dev:
2701 pci_set_drvdata(pdev, NULL);
2702 free_netdev(netdev);
2703 return err;
2706 static void __devexit e100_remove(struct pci_dev *pdev)
2708 struct net_device *netdev = pci_get_drvdata(pdev);
2710 if(netdev) {
2711 struct nic *nic = netdev_priv(netdev);
2712 unregister_netdev(netdev);
2713 e100_free(nic);
2714 iounmap(nic->csr);
2715 free_netdev(netdev);
2716 pci_release_regions(pdev);
2717 pci_disable_device(pdev);
2718 pci_set_drvdata(pdev, NULL);
2722 #ifdef CONFIG_PM
2723 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2725 struct net_device *netdev = pci_get_drvdata(pdev);
2726 struct nic *nic = netdev_priv(netdev);
2727 int retval;
2729 if(netif_running(netdev))
2730 e100_down(nic);
2731 e100_hw_reset(nic);
2732 netif_device_detach(netdev);
2734 pci_save_state(pdev);
2735 retval = pci_enable_wake(pdev, pci_choose_state(pdev, state),
2736 nic->flags & (wol_magic | e100_asf(nic)));
2737 if (retval)
2738 DPRINTK(PROBE,ERR, "Error enabling wake\n");
2739 pci_disable_device(pdev);
2740 retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
2741 if (retval)
2742 DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval);
2744 return 0;
2747 static int e100_resume(struct pci_dev *pdev)
2749 struct net_device *netdev = pci_get_drvdata(pdev);
2750 struct nic *nic = netdev_priv(netdev);
2751 int retval;
2753 retval = pci_set_power_state(pdev, PCI_D0);
2754 if (retval)
2755 DPRINTK(PROBE,ERR, "Error waking adapter\n");
2756 pci_restore_state(pdev);
2757 /* ack any pending wake events, disable PME */
2758 retval = pci_enable_wake(pdev, 0, 0);
2759 if (retval)
2760 DPRINTK(PROBE,ERR, "Error clearing wake events\n");
2762 netif_device_attach(netdev);
2763 if(netif_running(netdev))
2764 e100_up(nic);
2766 return 0;
2768 #endif
2771 static void e100_shutdown(struct pci_dev *pdev)
2773 struct net_device *netdev = pci_get_drvdata(pdev);
2774 struct nic *nic = netdev_priv(netdev);
2775 int retval;
2777 #ifdef CONFIG_PM
2778 retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
2779 #else
2780 retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
2781 #endif
2782 if (retval)
2783 DPRINTK(PROBE,ERR, "Error enabling wake\n");
2786 /* ------------------ PCI Error Recovery infrastructure -------------- */
2787 /**
2788 * e100_io_error_detected - called when PCI error is detected.
2789 * @pdev: Pointer to PCI device
2790 * @state: The current pci conneection state
2791 */
2792 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2794 struct net_device *netdev = pci_get_drvdata(pdev);
2796 /* Similar to calling e100_down(), but avoids adpater I/O. */
2797 netdev->stop(netdev);
2799 /* Detach; put netif into state similar to hotplug unplug. */
2800 netif_poll_enable(netdev);
2801 netif_device_detach(netdev);
2803 /* Request a slot reset. */
2804 return PCI_ERS_RESULT_NEED_RESET;
2807 /**
2808 * e100_io_slot_reset - called after the pci bus has been reset.
2809 * @pdev: Pointer to PCI device
2811 * Restart the card from scratch.
2812 */
2813 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2815 struct net_device *netdev = pci_get_drvdata(pdev);
2816 struct nic *nic = netdev_priv(netdev);
2818 if (pci_enable_device(pdev)) {
2819 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2820 return PCI_ERS_RESULT_DISCONNECT;
2822 pci_set_master(pdev);
2824 /* Only one device per card can do a reset */
2825 if (0 != PCI_FUNC(pdev->devfn))
2826 return PCI_ERS_RESULT_RECOVERED;
2827 e100_hw_reset(nic);
2828 e100_phy_init(nic);
2830 return PCI_ERS_RESULT_RECOVERED;
2833 /**
2834 * e100_io_resume - resume normal operations
2835 * @pdev: Pointer to PCI device
2837 * Resume normal operations after an error recovery
2838 * sequence has been completed.
2839 */
2840 static void e100_io_resume(struct pci_dev *pdev)
2842 struct net_device *netdev = pci_get_drvdata(pdev);
2843 struct nic *nic = netdev_priv(netdev);
2845 /* ack any pending wake events, disable PME */
2846 pci_enable_wake(pdev, 0, 0);
2848 netif_device_attach(netdev);
2849 if (netif_running(netdev)) {
2850 e100_open(netdev);
2851 mod_timer(&nic->watchdog, jiffies);
2855 static struct pci_error_handlers e100_err_handler = {
2856 .error_detected = e100_io_error_detected,
2857 .slot_reset = e100_io_slot_reset,
2858 .resume = e100_io_resume,
2859 };
2861 static struct pci_driver e100_driver = {
2862 .name = DRV_NAME,
2863 .id_table = e100_id_table,
2864 .probe = e100_probe,
2865 .remove = __devexit_p(e100_remove),
2866 #ifdef CONFIG_PM
2867 .suspend = e100_suspend,
2868 .resume = e100_resume,
2869 #endif
2870 .shutdown = e100_shutdown,
2871 .err_handler = &e100_err_handler,
2872 };
2874 static int __init e100_init_module(void)
2876 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2877 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2878 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2880 return pci_module_init(&e100_driver);
2883 static void __exit e100_cleanup_module(void)
2885 pci_unregister_driver(&e100_driver);
2888 module_init(e100_init_module);
2889 module_exit(e100_cleanup_module);