ia64/xen-unstable

view xen/drivers/net/e1000/e1000_main.c @ 1119:33e3adc72d8b

bitkeeper revision 1.745 (403b535dxe-hvv9dM0KddL1LhR9pdQ)

e1000_main.c, cciss.c:
Quieten unused device drivers in Xen.
author kaf24@scramble.cl.cam.ac.uk
date Tue Feb 24 13:36:29 2004 +0000 (2004-02-24)
parents dce3446ac01e
children 890460f07ddf
line source
1 /*******************************************************************************
4 Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #include "e1000.h"
31 /* Change Log
32 *
33 * 5.2.16 8/8/03
34 * o Added support for new controllers: 82545GM, 82546GB, 82541/7_B1
35 * o Bug fix: reset h/w before first EEPROM read because we don't know
36 * who may have been messing with the device before we got there.
37 * [Dave Johnson (ddj -a-t- cascv.brown.edu)]
38 * o Bug fix: read the correct work from EEPROM to detect programmed
39 * WoL settings.
40 * o Bug fix: TSO would hang if space left in FIFO was being miscalculated
41 * when mss dropped without a correspoding drop in the DMA buffer size.
42 * o ASF for Fiber nics isn't supported.
43 * o Bug fix: Workaround added for potential hang with 82544 running in
44 * PCI-X if send buffer terminates within an evenly-aligned dword.
45 * o Feature: Add support for ethtool flow control setting.
46 * o Feature: Add support for ethtool TSO setting.
47 * o Feature: Increase default Tx Descriptor count to 1024 for >= 82544.
48 *
49 * 5.1.13 5/28/03
50 * o Bug fix: request_irq() failure resulted in freeing resources twice!
51 * [Don Fry (brazilnut@us.ibm.com)]
52 * o Bug fix: fix VLAN support on ppc64 [Mark Rakes (mrakes@vivato.net)]
53 * o Bug fix: missing Tx cleanup opportunities during interrupt handling.
54 * o Bug fix: alloc_etherdev failure didn't cleanup regions in probe.
55 * o Cleanup: s/int/unsigned int/ for descriptor ring indexes.
56 *
57 * 5.1.11 5/6/03
58 */
60 char e1000_driver_name[] = "e1000";
61 char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
62 char e1000_driver_version[] = "5.2.16";
63 char e1000_copyright[] = "Copyright (c) 1999-2003 Intel Corporation.";
65 /* e1000_pci_tbl - PCI Device ID Table
66 *
67 * Wildcard entries (PCI_ANY_ID) should come last
68 * Last entry must be all 0s
69 *
70 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
71 * Class, Class Mask, private data (not used) }
72 */
73 static struct pci_device_id e1000_pci_tbl[] __devinitdata = {
74 {0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
75 {0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
76 {0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
77 {0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
78 {0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
79 {0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
80 {0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
81 {0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
82 {0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
83 {0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
84 {0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
85 {0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
86 {0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
87 {0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
88 {0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
89 {0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
90 {0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
91 {0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
92 {0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
93 {0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
94 {0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
95 {0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
96 {0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
97 {0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
98 {0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
99 {0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
100 {0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
101 {0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
102 {0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
103 {0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
104 {0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
105 /* required last entry */
106 {0,}
107 };
109 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
111 /* Local Function Prototypes */
113 int e1000_up(struct e1000_adapter *adapter);
114 void e1000_down(struct e1000_adapter *adapter);
115 void e1000_reset(struct e1000_adapter *adapter);
116 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
118 static int e1000_init_module(void);
119 static void e1000_exit_module(void);
120 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
121 static void __devexit e1000_remove(struct pci_dev *pdev);
122 static int e1000_sw_init(struct e1000_adapter *adapter);
123 static int e1000_open(struct net_device *netdev);
124 static int e1000_close(struct net_device *netdev);
125 static int e1000_setup_tx_resources(struct e1000_adapter *adapter);
126 static int e1000_setup_rx_resources(struct e1000_adapter *adapter);
127 static void e1000_configure_tx(struct e1000_adapter *adapter);
128 static void e1000_configure_rx(struct e1000_adapter *adapter);
129 static void e1000_setup_rctl(struct e1000_adapter *adapter);
130 static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
131 static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
132 static void e1000_free_tx_resources(struct e1000_adapter *adapter);
133 static void e1000_free_rx_resources(struct e1000_adapter *adapter);
134 static void e1000_set_multi(struct net_device *netdev);
135 static void e1000_update_phy_info(unsigned long data);
136 static void e1000_watchdog(unsigned long data);
137 static void e1000_82547_tx_fifo_stall(unsigned long data);
138 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
139 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
140 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
141 static int e1000_set_mac(struct net_device *netdev, void *p);
142 static void e1000_update_stats(struct e1000_adapter *adapter);
143 static inline void e1000_irq_disable(struct e1000_adapter *adapter);
144 static inline void e1000_irq_enable(struct e1000_adapter *adapter);
145 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
146 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
147 #ifdef CONFIG_E1000_NAPI
148 static int e1000_clean(struct net_device *netdev, int *budget);
149 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
150 int *work_done, int work_to_do);
151 #else
152 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
153 #endif
154 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
155 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
156 #ifdef SIOCGMIIPHY
157 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
158 int cmd);
159 #endif
160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
161 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
162 static inline void e1000_rx_checksum(struct e1000_adapter *adapter,
163 struct e1000_rx_desc *rx_desc,
164 struct sk_buff *skb);
165 static void e1000_tx_timeout(struct net_device *dev);
166 static void e1000_tx_timeout_task(struct net_device *dev);
167 static void e1000_smartspeed(struct e1000_adapter *adapter);
168 static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
169 struct sk_buff *skb);
171 #ifdef NETIF_F_HW_VLAN_TX
172 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
173 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
174 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
175 static void e1000_restore_vlan(struct e1000_adapter *adapter);
176 #endif
178 static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
179 static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
180 #ifdef CONFIG_PM
181 static int e1000_resume(struct pci_dev *pdev);
182 #endif
184 struct notifier_block e1000_notifier_reboot = {
185 .notifier_call = e1000_notify_reboot,
186 .next = NULL,
187 .priority = 0
188 };
190 /* Exported from other modules */
192 extern void e1000_check_options(struct e1000_adapter *adapter);
193 extern int e1000_ethtool_ioctl(struct net_device *netdev, struct ifreq *ifr);
195 static struct pci_driver e1000_driver = {
196 .name = e1000_driver_name,
197 .id_table = e1000_pci_tbl,
198 .probe = e1000_probe,
199 .remove = __devexit_p(e1000_remove),
200 /* Power Managment Hooks */
201 #ifdef CONFIG_PM
202 .suspend = e1000_suspend,
203 .resume = e1000_resume
204 #endif
205 };
207 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
208 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
209 MODULE_LICENSE("GPL");
211 /**
212 * e1000_init_module - Driver Registration Routine
213 *
214 * e1000_init_module is the first routine called when the driver is
215 * loaded. All it does is register with the PCI subsystem.
216 **/
218 static int __init
219 e1000_init_module(void)
220 {
221 int ret;
223 ret = pci_module_init(&e1000_driver);
224 if(ret >= 0) {
225 printk(KERN_INFO "%s - version %s\n",
226 e1000_driver_string, e1000_driver_version);
227 printk(KERN_INFO "%s\n", e1000_copyright);
228 //register_reboot_notifier(&e1000_notifier_reboot);
229 }
230 return ret;
231 }
233 module_init(e1000_init_module);
235 /**
236 * e1000_exit_module - Driver Exit Cleanup Routine
237 *
238 * e1000_exit_module is called just before the driver is removed
239 * from memory.
240 **/
242 static void __exit
243 e1000_exit_module(void)
244 {
245 unregister_reboot_notifier(&e1000_notifier_reboot);
246 pci_unregister_driver(&e1000_driver);
247 }
249 module_exit(e1000_exit_module);
252 int
253 e1000_up(struct e1000_adapter *adapter)
254 {
255 struct net_device *netdev = adapter->netdev;
257 /* hardware has been reset, we need to reload some things */
259 e1000_set_multi(netdev);
261 #ifdef NETIF_F_HW_VLAN_TX
262 e1000_restore_vlan(adapter);
263 #endif
265 e1000_configure_tx(adapter);
266 e1000_setup_rctl(adapter);
267 e1000_configure_rx(adapter);
268 e1000_alloc_rx_buffers(adapter);
270 if(request_irq(netdev->irq, &e1000_intr, SA_SHIRQ | SA_SAMPLE_RANDOM,
271 netdev->name, netdev))
272 return -1;
274 mod_timer(&adapter->watchdog_timer, jiffies);
275 e1000_irq_enable(adapter);
277 return 0;
278 }
280 void
281 e1000_down(struct e1000_adapter *adapter)
282 {
283 struct net_device *netdev = adapter->netdev;
285 e1000_irq_disable(adapter);
286 free_irq(netdev->irq, netdev);
287 del_timer_sync(&adapter->tx_fifo_stall_timer);
288 del_timer_sync(&adapter->watchdog_timer);
289 del_timer_sync(&adapter->phy_info_timer);
290 adapter->link_speed = 0;
291 adapter->link_duplex = 0;
292 netif_carrier_off(netdev);
293 netif_stop_queue(netdev);
295 e1000_reset(adapter);
296 e1000_clean_tx_ring(adapter);
297 e1000_clean_rx_ring(adapter);
298 }
300 void
301 e1000_reset(struct e1000_adapter *adapter)
302 {
303 uint32_t pba;
304 /* Repartition Pba for greater than 9k mtu
305 * To take effect CTRL.RST is required.
306 */
308 if(adapter->hw.mac_type < e1000_82547) {
309 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
310 pba = E1000_PBA_40K;
311 else
312 pba = E1000_PBA_48K;
313 } else {
314 if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
315 pba = E1000_PBA_22K;
316 else
317 pba = E1000_PBA_30K;
318 adapter->tx_fifo_head = 0;
319 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
320 adapter->tx_fifo_size =
321 (E1000_PBA_40K - pba) << E1000_TX_FIFO_SIZE_SHIFT;
322 atomic_set(&adapter->tx_fifo_stall, 0);
323 }
324 E1000_WRITE_REG(&adapter->hw, PBA, pba);
326 adapter->hw.fc = adapter->hw.original_fc;
327 e1000_reset_hw(&adapter->hw);
328 if(adapter->hw.mac_type >= e1000_82544)
329 E1000_WRITE_REG(&adapter->hw, WUC, 0);
330 e1000_init_hw(&adapter->hw);
331 e1000_reset_adaptive(&adapter->hw);
332 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
333 }
335 /**
336 * e1000_probe - Device Initialization Routine
337 * @pdev: PCI device information struct
338 * @ent: entry in e1000_pci_tbl
339 *
340 * Returns 0 on success, negative on failure
341 *
342 * e1000_probe initializes an adapter identified by a pci_dev structure.
343 * The OS initialization, configuring of the adapter private structure,
344 * and a hardware reset occur.
345 **/
347 static int __devinit
348 e1000_probe(struct pci_dev *pdev,
349 const struct pci_device_id *ent)
350 {
351 struct net_device *netdev;
352 struct e1000_adapter *adapter;
353 static int cards_found = 0;
354 unsigned long mmio_start;
355 int mmio_len;
356 int pci_using_dac;
357 int i;
358 uint16_t eeprom_data;
360 if((i = pci_enable_device(pdev)))
361 return i;
363 if(!(i = pci_set_dma_mask(pdev, PCI_DMA_64BIT))) {
364 pci_using_dac = 1;
365 } else {
366 if((i = pci_set_dma_mask(pdev, PCI_DMA_32BIT))) {
367 E1000_ERR("No usable DMA configuration, aborting\n");
368 return i;
369 }
370 pci_using_dac = 0;
371 }
373 if((i = pci_request_regions(pdev, e1000_driver_name)))
374 return i;
376 pci_set_master(pdev);
378 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
379 if(!netdev)
380 goto err_alloc_etherdev;
382 SET_MODULE_OWNER(netdev);
383 SET_NETDEV_DEV(netdev, &pdev->dev);
385 pci_set_drvdata(pdev, netdev);
386 adapter = netdev->priv;
387 adapter->netdev = netdev;
388 adapter->pdev = pdev;
389 adapter->hw.back = adapter;
391 mmio_start = pci_resource_start(pdev, BAR_0);
392 mmio_len = pci_resource_len(pdev, BAR_0);
394 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
395 if(!adapter->hw.hw_addr)
396 goto err_ioremap;
398 for(i = BAR_1; i <= BAR_5; i++) {
399 if(pci_resource_len(pdev, i) == 0)
400 continue;
401 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
402 adapter->hw.io_base = pci_resource_start(pdev, i);
403 break;
404 }
405 }
407 netdev->open = &e1000_open;
408 netdev->stop = &e1000_close;
409 netdev->hard_start_xmit = &e1000_xmit_frame;
410 netdev->get_stats = &e1000_get_stats;
411 netdev->set_multicast_list = &e1000_set_multi;
412 netdev->set_mac_address = &e1000_set_mac;
413 netdev->change_mtu = &e1000_change_mtu;
414 netdev->do_ioctl = &e1000_ioctl;
415 #ifdef HAVE_TX_TIMEOUT
416 netdev->tx_timeout = &e1000_tx_timeout;
417 netdev->watchdog_timeo = 5 * HZ;
418 #endif
419 #ifdef CONFIG_E1000_NAPI
420 netdev->poll = &e1000_clean;
421 netdev->weight = 64;
422 #endif
423 #ifdef NETIF_F_HW_VLAN_TX
424 netdev->vlan_rx_register = e1000_vlan_rx_register;
425 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
426 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
427 #endif
429 netdev->irq = pdev->irq;
430 netdev->mem_start = mmio_start;
431 netdev->mem_end = mmio_start + mmio_len;
432 netdev->base_addr = adapter->hw.io_base;
434 adapter->bd_number = cards_found;
436 /* setup the private structure */
438 if(e1000_sw_init(adapter))
439 goto err_sw_init;
441 #ifdef MAX_SKB_FRAGS
442 if(adapter->hw.mac_type >= e1000_82543) {
443 #ifdef NETIF_F_HW_VLAN_TX
444 netdev->features = NETIF_F_SG |
445 NETIF_F_HW_CSUM |
446 NETIF_F_HW_VLAN_TX |
447 NETIF_F_HW_VLAN_RX |
448 NETIF_F_HW_VLAN_FILTER;
449 #else
450 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
451 #endif
452 } else {
453 netdev->features = NETIF_F_SG;
454 }
456 #ifdef NETIF_F_TSO
457 if((adapter->hw.mac_type >= e1000_82544) &&
458 (adapter->hw.mac_type != e1000_82547))
459 netdev->features |= NETIF_F_TSO;
460 #endif
462 if(pci_using_dac)
463 netdev->features |= NETIF_F_HIGHDMA;
464 #endif
466 /* before reading the EEPROM, reset the controller to
467 * put the device in a known good starting state */
469 e1000_reset_hw(&adapter->hw);
471 /* make sure the EEPROM is good */
473 if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
474 printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
475 goto err_eeprom;
476 }
478 /* copy the MAC address out of the EEPROM */
480 e1000_read_mac_addr(&adapter->hw);
481 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
483 if(!is_valid_ether_addr(netdev->dev_addr))
484 goto err_eeprom;
486 e1000_read_part_num(&adapter->hw, &(adapter->part_num));
488 e1000_get_bus_info(&adapter->hw);
490 init_timer(&adapter->tx_fifo_stall_timer);
491 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
492 adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
494 init_timer(&adapter->watchdog_timer);
495 adapter->watchdog_timer.function = &e1000_watchdog;
496 adapter->watchdog_timer.data = (unsigned long) adapter;
498 init_timer(&adapter->phy_info_timer);
499 adapter->phy_info_timer.function = &e1000_update_phy_info;
500 adapter->phy_info_timer.data = (unsigned long) adapter;
502 INIT_WORK(&adapter->tx_timeout_task,
503 (void (*)(void *))e1000_tx_timeout_task, netdev);
505 register_netdev(netdev);
507 /* we're going to reset, so assume we have no link for now */
509 netif_carrier_off(netdev);
510 netif_stop_queue(netdev);
512 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Connection\n",
513 netdev->name);
514 e1000_check_options(adapter);
516 /* Initial Wake on LAN setting
517 * If APM wake is enabled in the EEPROM,
518 * enable the ACPI Magic Packet filter
519 */
521 switch(adapter->hw.mac_type) {
522 case e1000_82542_rev2_0:
523 case e1000_82542_rev2_1:
524 case e1000_82543:
525 break;
526 case e1000_82546:
527 case e1000_82546_rev_3:
528 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
529 && (adapter->hw.media_type == e1000_media_type_copper)) {
530 e1000_read_eeprom(&adapter->hw,
531 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
532 break;
533 }
534 /* Fall Through */
535 default:
536 e1000_read_eeprom(&adapter->hw,
537 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
538 break;
539 }
540 if(eeprom_data & E1000_EEPROM_APME)
541 adapter->wol |= E1000_WUFC_MAG;
543 /* reset the hardware with the new settings */
545 e1000_reset(adapter);
547 cards_found++;
548 return 0;
550 err_sw_init:
551 err_eeprom:
552 iounmap(adapter->hw.hw_addr);
553 err_ioremap:
554 kfree(netdev);
555 err_alloc_etherdev:
556 pci_release_regions(pdev);
557 return -ENOMEM;
558 }
560 /**
561 * e1000_remove - Device Removal Routine
562 * @pdev: PCI device information struct
563 *
564 * e1000_remove is called by the PCI subsystem to alert the driver
565 * that it should release a PCI device. The could be caused by a
566 * Hot-Plug event, or because the driver is going to be removed from
567 * memory.
568 **/
570 static void __devexit
571 e1000_remove(struct pci_dev *pdev)
572 {
573 struct net_device *netdev = pci_get_drvdata(pdev);
574 struct e1000_adapter *adapter = netdev->priv;
575 uint32_t manc;
577 if(adapter->hw.mac_type >= e1000_82540 &&
578 adapter->hw.media_type == e1000_media_type_copper) {
579 manc = E1000_READ_REG(&adapter->hw, MANC);
580 if(manc & E1000_MANC_SMBUS_EN) {
581 manc |= E1000_MANC_ARP_EN;
582 E1000_WRITE_REG(&adapter->hw, MANC, manc);
583 }
584 }
586 unregister_netdev(netdev);
588 e1000_phy_hw_reset(&adapter->hw);
590 iounmap(adapter->hw.hw_addr);
591 pci_release_regions(pdev);
593 kfree(netdev);
594 }
596 /**
597 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
598 * @adapter: board private structure to initialize
599 *
600 * e1000_sw_init initializes the Adapter private data structure.
601 * Fields are initialized based on PCI device information and
602 * OS network device settings (MTU size).
603 **/
605 static int __devinit
606 e1000_sw_init(struct e1000_adapter *adapter)
607 {
608 struct e1000_hw *hw = &adapter->hw;
609 struct net_device *netdev = adapter->netdev;
610 struct pci_dev *pdev = adapter->pdev;
612 /* PCI config space info */
614 hw->vendor_id = pdev->vendor;
615 hw->device_id = pdev->device;
616 hw->subsystem_vendor_id = pdev->subsystem_vendor;
617 hw->subsystem_id = pdev->subsystem_device;
619 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
621 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
623 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
624 hw->max_frame_size = netdev->mtu +
625 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
626 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
628 /* identify the MAC */
630 if (e1000_set_mac_type(hw)) {
631 E1000_ERR("Unknown MAC Type\n");
632 return -1;
633 }
635 /* initialize eeprom parameters */
637 e1000_init_eeprom_params(hw);
639 /* flow control settings */
641 hw->fc_high_water = E1000_FC_HIGH_THRESH;
642 hw->fc_low_water = E1000_FC_LOW_THRESH;
643 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
644 hw->fc_send_xon = 1;
646 if((hw->mac_type == e1000_82541) ||
647 (hw->mac_type == e1000_82547) ||
648 (hw->mac_type == e1000_82541_rev_2) ||
649 (hw->mac_type == e1000_82547_rev_2))
650 hw->phy_init_script = 1;
652 e1000_set_media_type(hw);
654 if(hw->mac_type < e1000_82543)
655 hw->report_tx_early = 0;
656 else
657 hw->report_tx_early = 1;
659 hw->wait_autoneg_complete = FALSE;
660 hw->tbi_compatibility_en = TRUE;
661 hw->adaptive_ifs = TRUE;
663 /* Copper options */
665 if(hw->media_type == e1000_media_type_copper) {
666 hw->mdix = AUTO_ALL_MODES;
667 hw->disable_polarity_correction = FALSE;
668 hw->master_slave = E1000_MASTER_SLAVE;
669 }
671 atomic_set(&adapter->irq_sem, 1);
672 spin_lock_init(&adapter->stats_lock);
674 return 0;
675 }
677 /**
678 * e1000_open - Called when a network interface is made active
679 * @netdev: network interface device structure
680 *
681 * Returns 0 on success, negative value on failure
682 *
683 * The open entry point is called when a network interface is made
684 * active by the system (IFF_UP). At this point all resources needed
685 * for transmit and receive operations are allocated, the interrupt
686 * handler is registered with the OS, the watchdog timer is started,
687 * and the stack is notified that the interface is ready.
688 **/
690 static int
691 e1000_open(struct net_device *netdev)
692 {
693 struct e1000_adapter *adapter = netdev->priv;
695 /* allocate transmit descriptors */
697 if(e1000_setup_tx_resources(adapter))
698 goto err_setup_tx;
700 /* allocate receive descriptors */
702 if(e1000_setup_rx_resources(adapter))
703 goto err_setup_rx;
705 if(e1000_up(adapter))
706 goto err_up;
708 return 0;
710 err_up:
711 e1000_free_rx_resources(adapter);
712 err_setup_rx:
713 e1000_free_tx_resources(adapter);
714 err_setup_tx:
715 e1000_reset(adapter);
717 return -EBUSY;
718 }
720 /**
721 * e1000_close - Disables a network interface
722 * @netdev: network interface device structure
723 *
724 * Returns 0, this is not allowed to fail
725 *
726 * The close entry point is called when an interface is de-activated
727 * by the OS. The hardware is still under the drivers control, but
728 * needs to be disabled. A global MAC reset is issued to stop the
729 * hardware, and all transmit and receive resources are freed.
730 **/
732 static int
733 e1000_close(struct net_device *netdev)
734 {
735 struct e1000_adapter *adapter = netdev->priv;
737 e1000_down(adapter);
739 e1000_free_tx_resources(adapter);
740 e1000_free_rx_resources(adapter);
742 return 0;
743 }
745 /**
746 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
747 * @adapter: board private structure
748 *
749 * Return 0 on success, negative on failure
750 **/
752 static int
753 e1000_setup_tx_resources(struct e1000_adapter *adapter)
754 {
755 struct e1000_desc_ring *txdr = &adapter->tx_ring;
756 struct pci_dev *pdev = adapter->pdev;
757 int size;
759 size = sizeof(struct e1000_buffer) * txdr->count;
760 txdr->buffer_info = kmalloc(size, GFP_KERNEL);
761 if(!txdr->buffer_info) {
762 return -ENOMEM;
763 }
764 memset(txdr->buffer_info, 0, size);
766 /* round up to nearest 4K */
768 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
769 E1000_ROUNDUP(txdr->size, 4096);
771 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
772 if(!txdr->desc) {
773 kfree(txdr->buffer_info);
774 return -ENOMEM;
775 }
776 memset(txdr->desc, 0, txdr->size);
778 txdr->next_to_use = 0;
779 txdr->next_to_clean = 0;
781 return 0;
782 }
784 /**
785 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
786 * @adapter: board private structure
787 *
788 * Configure the Tx unit of the MAC after a reset.
789 **/
791 static void
792 e1000_configure_tx(struct e1000_adapter *adapter)
793 {
794 uint64_t tdba = adapter->tx_ring.dma;
795 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
796 uint32_t tctl, tipg;
798 E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
799 E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
801 E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
803 /* Setup the HW Tx Head and Tail descriptor pointers */
805 E1000_WRITE_REG(&adapter->hw, TDH, 0);
806 E1000_WRITE_REG(&adapter->hw, TDT, 0);
808 /* Set the default values for the Tx Inter Packet Gap timer */
810 switch (adapter->hw.mac_type) {
811 case e1000_82542_rev2_0:
812 case e1000_82542_rev2_1:
813 tipg = DEFAULT_82542_TIPG_IPGT;
814 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
815 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
816 break;
817 default:
818 if(adapter->hw.media_type == e1000_media_type_fiber ||
819 adapter->hw.media_type == e1000_media_type_internal_serdes)
820 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
821 else
822 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
823 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
824 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
825 }
826 E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
828 /* Set the Tx Interrupt Delay register */
830 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
831 if(adapter->hw.mac_type >= e1000_82540)
832 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
834 /* Program the Transmit Control Register */
836 tctl = E1000_READ_REG(&adapter->hw, TCTL);
838 tctl &= ~E1000_TCTL_CT;
839 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
840 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
842 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
844 e1000_config_collision_dist(&adapter->hw);
846 /* Setup Transmit Descriptor Settings for eop descriptor */
847 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
848 E1000_TXD_CMD_IFCS;
850 if(adapter->hw.report_tx_early == 1)
851 adapter->txd_cmd |= E1000_TXD_CMD_RS;
852 else
853 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
855 /* Cache if we're 82544 running in PCI-X because we'll
856 * need this to apply a workaround later in the send path. */
857 if(adapter->hw.mac_type == e1000_82544 &&
858 adapter->hw.bus_type == e1000_bus_type_pcix)
859 adapter->pcix_82544 = 1;
860 }
862 /**
863 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
864 * @adapter: board private structure
865 *
866 * Returns 0 on success, negative on failure
867 **/
869 static int
870 e1000_setup_rx_resources(struct e1000_adapter *adapter)
871 {
872 struct e1000_desc_ring *rxdr = &adapter->rx_ring;
873 struct pci_dev *pdev = adapter->pdev;
874 int size;
876 size = sizeof(struct e1000_buffer) * rxdr->count;
877 rxdr->buffer_info = kmalloc(size, GFP_KERNEL);
878 if(!rxdr->buffer_info) {
879 return -ENOMEM;
880 }
881 memset(rxdr->buffer_info, 0, size);
883 /* Round up to nearest 4K */
885 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
886 E1000_ROUNDUP(rxdr->size, 4096);
888 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
890 if(!rxdr->desc) {
891 kfree(rxdr->buffer_info);
892 return -ENOMEM;
893 }
894 memset(rxdr->desc, 0, rxdr->size);
896 rxdr->next_to_clean = 0;
897 rxdr->next_to_use = 0;
899 return 0;
900 }
902 /**
903 * e1000_setup_rctl - configure the receive control register
904 * @adapter: Board private structure
905 **/
907 static void
908 e1000_setup_rctl(struct e1000_adapter *adapter)
909 {
910 uint32_t rctl;
912 rctl = E1000_READ_REG(&adapter->hw, RCTL);
914 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
916 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
917 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
918 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
920 if(adapter->hw.tbi_compatibility_on == 1)
921 rctl |= E1000_RCTL_SBP;
922 else
923 rctl &= ~E1000_RCTL_SBP;
925 rctl &= ~(E1000_RCTL_SZ_4096);
926 switch (adapter->rx_buffer_len) {
927 case E1000_RXBUFFER_2048:
928 default:
929 rctl |= E1000_RCTL_SZ_2048;
930 rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
931 break;
932 case E1000_RXBUFFER_4096:
933 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
934 break;
935 case E1000_RXBUFFER_8192:
936 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
937 break;
938 case E1000_RXBUFFER_16384:
939 rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
940 break;
941 }
943 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
944 }
946 /**
947 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
948 * @adapter: board private structure
949 *
950 * Configure the Rx unit of the MAC after a reset.
951 **/
953 static void
954 e1000_configure_rx(struct e1000_adapter *adapter)
955 {
956 uint64_t rdba = adapter->rx_ring.dma;
957 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
958 uint32_t rctl;
959 uint32_t rxcsum;
961 /* make sure receives are disabled while setting up the descriptors */
963 rctl = E1000_READ_REG(&adapter->hw, RCTL);
964 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
966 /* set the Receive Delay Timer Register */
968 E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
970 if(adapter->hw.mac_type >= e1000_82540) {
971 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
972 if(adapter->itr > 1)
973 E1000_WRITE_REG(&adapter->hw, ITR,
974 1000000000 / (adapter->itr * 256));
975 }
977 /* Setup the Base and Length of the Rx Descriptor Ring */
979 E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
980 E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
982 E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
984 /* Setup the HW Rx Head and Tail Descriptor Pointers */
985 E1000_WRITE_REG(&adapter->hw, RDH, 0);
986 E1000_WRITE_REG(&adapter->hw, RDT, 0);
988 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
989 if((adapter->hw.mac_type >= e1000_82543) &&
990 (adapter->rx_csum == TRUE)) {
991 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
992 rxcsum |= E1000_RXCSUM_TUOFL;
993 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
994 }
996 /* Enable Receives */
998 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
999 }
1001 /**
1002 * e1000_free_tx_resources - Free Tx Resources
1003 * @adapter: board private structure
1005 * Free all transmit software resources
1006 **/
1008 static void
1009 e1000_free_tx_resources(struct e1000_adapter *adapter)
1011 struct pci_dev *pdev = adapter->pdev;
1013 e1000_clean_tx_ring(adapter);
1015 kfree(adapter->tx_ring.buffer_info);
1016 adapter->tx_ring.buffer_info = NULL;
1018 pci_free_consistent(pdev, adapter->tx_ring.size,
1019 adapter->tx_ring.desc, adapter->tx_ring.dma);
1021 adapter->tx_ring.desc = NULL;
1024 /**
1025 * e1000_clean_tx_ring - Free Tx Buffers
1026 * @adapter: board private structure
1027 **/
1029 static void
1030 e1000_clean_tx_ring(struct e1000_adapter *adapter)
1032 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1033 struct e1000_buffer *buffer_info;
1034 struct pci_dev *pdev = adapter->pdev;
1035 unsigned long size;
1036 unsigned int i;
1038 /* Free all the Tx ring sk_buffs */
1040 for(i = 0; i < tx_ring->count; i++) {
1041 buffer_info = &tx_ring->buffer_info[i];
1042 if(buffer_info->skb) {
1044 pci_unmap_page(pdev,
1045 buffer_info->dma,
1046 buffer_info->length,
1047 PCI_DMA_TODEVICE);
1049 dev_kfree_skb(buffer_info->skb);
1051 buffer_info->skb = NULL;
1055 size = sizeof(struct e1000_buffer) * tx_ring->count;
1056 memset(tx_ring->buffer_info, 0, size);
1058 /* Zero out the descriptor ring */
1060 memset(tx_ring->desc, 0, tx_ring->size);
1062 tx_ring->next_to_use = 0;
1063 tx_ring->next_to_clean = 0;
1065 E1000_WRITE_REG(&adapter->hw, TDH, 0);
1066 E1000_WRITE_REG(&adapter->hw, TDT, 0);
1069 /**
1070 * e1000_free_rx_resources - Free Rx Resources
1071 * @adapter: board private structure
1073 * Free all receive software resources
1074 **/
1076 static void
1077 e1000_free_rx_resources(struct e1000_adapter *adapter)
1079 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1080 struct pci_dev *pdev = adapter->pdev;
1082 e1000_clean_rx_ring(adapter);
1084 kfree(rx_ring->buffer_info);
1085 rx_ring->buffer_info = NULL;
1087 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1089 rx_ring->desc = NULL;
1092 /**
1093 * e1000_clean_rx_ring - Free Rx Buffers
1094 * @adapter: board private structure
1095 **/
1097 static void
1098 e1000_clean_rx_ring(struct e1000_adapter *adapter)
1100 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1101 struct e1000_buffer *buffer_info;
1102 struct pci_dev *pdev = adapter->pdev;
1103 unsigned long size;
1104 unsigned int i;
1106 /* Free all the Rx ring sk_buffs */
1108 for(i = 0; i < rx_ring->count; i++) {
1109 buffer_info = &rx_ring->buffer_info[i];
1110 if(buffer_info->skb) {
1112 pci_unmap_single(pdev,
1113 buffer_info->dma,
1114 buffer_info->length,
1115 PCI_DMA_FROMDEVICE);
1117 dev_kfree_skb(buffer_info->skb);
1119 buffer_info->skb = NULL;
1123 size = sizeof(struct e1000_buffer) * rx_ring->count;
1124 memset(rx_ring->buffer_info, 0, size);
1126 /* Zero out the descriptor ring */
1128 memset(rx_ring->desc, 0, rx_ring->size);
1130 rx_ring->next_to_clean = 0;
1131 rx_ring->next_to_use = 0;
1133 E1000_WRITE_REG(&adapter->hw, RDH, 0);
1134 E1000_WRITE_REG(&adapter->hw, RDT, 0);
1137 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
1138 * and memory write and invalidate disabled for certain operations
1139 */
1140 static void
1141 e1000_enter_82542_rst(struct e1000_adapter *adapter)
1143 struct net_device *netdev = adapter->netdev;
1144 uint32_t rctl;
1146 e1000_pci_clear_mwi(&adapter->hw);
1148 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1149 rctl |= E1000_RCTL_RST;
1150 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1151 E1000_WRITE_FLUSH(&adapter->hw);
1152 mdelay(5);
1154 if(netif_running(netdev))
1155 e1000_clean_rx_ring(adapter);
1158 static void
1159 e1000_leave_82542_rst(struct e1000_adapter *adapter)
1161 struct net_device *netdev = adapter->netdev;
1162 uint32_t rctl;
1164 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1165 rctl &= ~E1000_RCTL_RST;
1166 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1167 E1000_WRITE_FLUSH(&adapter->hw);
1168 mdelay(5);
1170 if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1171 e1000_pci_set_mwi(&adapter->hw);
1173 if(netif_running(netdev)) {
1174 e1000_configure_rx(adapter);
1175 e1000_alloc_rx_buffers(adapter);
1179 /**
1180 * e1000_set_mac - Change the Ethernet Address of the NIC
1181 * @netdev: network interface device structure
1182 * @p: pointer to an address structure
1184 * Returns 0 on success, negative on failure
1185 **/
1187 static int
1188 e1000_set_mac(struct net_device *netdev, void *p)
1190 struct e1000_adapter *adapter = netdev->priv;
1191 struct sockaddr *addr = p;
1193 if(!is_valid_ether_addr(addr->sa_data))
1194 return -EADDRNOTAVAIL;
1196 /* 82542 2.0 needs to be in reset to write receive address registers */
1198 if(adapter->hw.mac_type == e1000_82542_rev2_0)
1199 e1000_enter_82542_rst(adapter);
1201 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1202 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1204 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1206 if(adapter->hw.mac_type == e1000_82542_rev2_0)
1207 e1000_leave_82542_rst(adapter);
1209 return 0;
1212 /**
1213 * e1000_set_multi - Multicast and Promiscuous mode set
1214 * @netdev: network interface device structure
1216 * The set_multi entry point is called whenever the multicast address
1217 * list or the network interface flags are updated. This routine is
1218 * responsible for configuring the hardware for proper multicast,
1219 * promiscuous mode, and all-multi behavior.
1220 **/
1222 static void
1223 e1000_set_multi(struct net_device *netdev)
1225 struct e1000_adapter *adapter = netdev->priv;
1226 struct e1000_hw *hw = &adapter->hw;
1227 struct dev_mc_list *mc_ptr;
1228 uint32_t rctl;
1229 uint32_t hash_value;
1230 int i;
1232 /* Check for Promiscuous and All Multicast modes */
1234 rctl = E1000_READ_REG(hw, RCTL);
1236 if(netdev->flags & IFF_PROMISC) {
1237 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1238 } else if(netdev->flags & IFF_ALLMULTI) {
1239 rctl |= E1000_RCTL_MPE;
1240 rctl &= ~E1000_RCTL_UPE;
1241 } else {
1242 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1245 E1000_WRITE_REG(hw, RCTL, rctl);
1247 /* 82542 2.0 needs to be in reset to write receive address registers */
1249 if(hw->mac_type == e1000_82542_rev2_0)
1250 e1000_enter_82542_rst(adapter);
1252 /* load the first 14 multicast address into the exact filters 1-14
1253 * RAR 0 is used for the station MAC adddress
1254 * if there are not 14 addresses, go ahead and clear the filters
1255 */
1256 mc_ptr = netdev->mc_list;
1258 for(i = 1; i < E1000_RAR_ENTRIES; i++) {
1259 if(mc_ptr) {
1260 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1261 mc_ptr = mc_ptr->next;
1262 } else {
1263 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1264 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1268 /* clear the old settings from the multicast hash table */
1270 for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
1271 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1273 /* load any remaining addresses into the hash table */
1275 for(; mc_ptr; mc_ptr = mc_ptr->next) {
1276 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
1277 e1000_mta_set(hw, hash_value);
1280 if(hw->mac_type == e1000_82542_rev2_0)
1281 e1000_leave_82542_rst(adapter);
1284 static void
1285 e1000_tx_flush(struct e1000_adapter *adapter)
1287 uint32_t ctrl, tctl, txcw, icr;
1289 e1000_irq_disable(adapter);
1291 if(adapter->hw.mac_type < e1000_82543) {
1292 /* Transmit Unit Reset */
1293 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1294 E1000_WRITE_REG(&adapter->hw, TCTL, tctl | E1000_TCTL_RST);
1295 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1296 e1000_clean_tx_ring(adapter);
1297 e1000_configure_tx(adapter);
1298 } else {
1299 txcw = E1000_READ_REG(&adapter->hw, TXCW);
1300 E1000_WRITE_REG(&adapter->hw, TXCW, txcw & ~E1000_TXCW_ANE);
1302 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
1303 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl | E1000_CTRL_SLU |
1304 E1000_CTRL_ILOS);
1306 mdelay(10);
1308 e1000_clean_tx_irq(adapter);
1309 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
1310 E1000_WRITE_REG(&adapter->hw, TXCW, txcw);
1312 /* clear the link status change interrupts this caused */
1313 icr = E1000_READ_REG(&adapter->hw, ICR);
1316 e1000_irq_enable(adapter);
1319 /* need to wait a few seconds after link up to get diagnostic information from the phy */
1321 static void
1322 e1000_update_phy_info(unsigned long data)
1324 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1325 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
1328 /**
1329 * e1000_82547_tx_fifo_stall - Timer Call-back
1330 * @data: pointer to adapter cast into an unsigned long
1331 **/
1333 static void
1334 e1000_82547_tx_fifo_stall(unsigned long data)
1336 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1337 struct net_device *netdev = adapter->netdev;
1338 uint32_t tctl;
1340 if(atomic_read(&adapter->tx_fifo_stall)) {
1341 if((E1000_READ_REG(&adapter->hw, TDT) ==
1342 E1000_READ_REG(&adapter->hw, TDH)) &&
1343 (E1000_READ_REG(&adapter->hw, TDFT) ==
1344 E1000_READ_REG(&adapter->hw, TDFH)) &&
1345 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1346 E1000_READ_REG(&adapter->hw, TDFHS))) {
1347 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1348 E1000_WRITE_REG(&adapter->hw, TCTL,
1349 tctl & ~E1000_TCTL_EN);
1350 E1000_WRITE_REG(&adapter->hw, TDFT,
1351 adapter->tx_head_addr);
1352 E1000_WRITE_REG(&adapter->hw, TDFH,
1353 adapter->tx_head_addr);
1354 E1000_WRITE_REG(&adapter->hw, TDFTS,
1355 adapter->tx_head_addr);
1356 E1000_WRITE_REG(&adapter->hw, TDFHS,
1357 adapter->tx_head_addr);
1358 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1359 E1000_WRITE_FLUSH(&adapter->hw);
1361 adapter->tx_fifo_head = 0;
1362 atomic_set(&adapter->tx_fifo_stall, 0);
1363 netif_wake_queue(netdev);
1364 } else {
1365 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
1370 /**
1371 * e1000_watchdog - Timer Call-back
1372 * @data: pointer to netdev cast into an unsigned long
1373 **/
1375 static void
1376 e1000_watchdog(unsigned long data)
1378 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1379 struct net_device *netdev = adapter->netdev;
1380 struct e1000_desc_ring *txdr = &adapter->tx_ring;
1381 unsigned int i;
1383 e1000_check_for_link(&adapter->hw);
1385 if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1386 if(!netif_carrier_ok(netdev)) {
1387 e1000_get_speed_and_duplex(&adapter->hw,
1388 &adapter->link_speed,
1389 &adapter->link_duplex);
1391 printk(KERN_INFO
1392 "e1000: %s NIC Link is Up %d Mbps %s\n",
1393 netdev->name, adapter->link_speed,
1394 adapter->link_duplex == FULL_DUPLEX ?
1395 "Full Duplex" : "Half Duplex");
1397 netif_carrier_on(netdev);
1398 netif_wake_queue(netdev);
1399 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1400 adapter->smartspeed = 0;
1402 } else {
1403 if(netif_carrier_ok(netdev)) {
1404 adapter->link_speed = 0;
1405 adapter->link_duplex = 0;
1406 printk(KERN_INFO
1407 "e1000: %s NIC Link is Down\n",
1408 netdev->name);
1409 netif_carrier_off(netdev);
1410 netif_stop_queue(netdev);
1411 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1414 e1000_smartspeed(adapter);
1417 e1000_update_stats(adapter);
1418 e1000_update_adaptive(&adapter->hw);
1420 if(!netif_carrier_ok(netdev)) {
1421 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1422 unsigned long flags;
1423 spin_lock_irqsave(&netdev->xmit_lock, flags);
1424 e1000_tx_flush(adapter);
1425 spin_unlock_irqrestore(&netdev->xmit_lock, flags);
1429 /* Dynamic mode for Interrupt Throttle Rate (ITR) */
1430 if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
1431 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
1432 * asymmetrical Tx or Rx gets ITR=8000; everyone
1433 * else is between 2000-8000. */
1434 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
1435 uint32_t dif = (adapter->gotcl > adapter->gorcl ?
1436 adapter->gotcl - adapter->gorcl :
1437 adapter->gorcl - adapter->gotcl) / 10000;
1438 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
1439 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
1442 /* Cause software interrupt to ensure rx ring is cleaned */
1443 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1445 /* Early detection of hung controller */
1446 i = txdr->next_to_clean;
1447 if(txdr->buffer_info[i].dma &&
1448 time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
1449 !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
1450 netif_stop_queue(netdev);
1452 /* Reset the timer */
1453 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1456 #define E1000_TX_FLAGS_CSUM 0x00000001
1457 #define E1000_TX_FLAGS_VLAN 0x00000002
1458 #define E1000_TX_FLAGS_TSO 0x00000004
1459 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
1460 #define E1000_TX_FLAGS_VLAN_SHIFT 16
1462 static inline boolean_t
1463 e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1465 #ifdef NETIF_F_TSO
1466 struct e1000_context_desc *context_desc;
1467 unsigned int i;
1468 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1469 uint16_t ipcse, tucse, mss;
1471 if(skb_shinfo(skb)->tso_size) {
1472 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1473 mss = skb_shinfo(skb)->tso_size;
1474 skb->nh.iph->tot_len = 0;
1475 skb->nh.iph->check = 0;
1476 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1477 skb->nh.iph->daddr,
1478 0,
1479 IPPROTO_TCP,
1480 0);
1481 ipcss = skb->nh.raw - skb->data;
1482 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1483 ipcse = skb->h.raw - skb->data - 1;
1484 tucss = skb->h.raw - skb->data;
1485 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1486 tucse = 0;
1488 i = adapter->tx_ring.next_to_use;
1489 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1491 context_desc->lower_setup.ip_fields.ipcss = ipcss;
1492 context_desc->lower_setup.ip_fields.ipcso = ipcso;
1493 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
1494 context_desc->upper_setup.tcp_fields.tucss = tucss;
1495 context_desc->upper_setup.tcp_fields.tucso = tucso;
1496 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
1497 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
1498 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1499 context_desc->cmd_and_length = cpu_to_le32(
1500 E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1501 E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
1502 (skb->len - (hdr_len)));
1504 if(++i == adapter->tx_ring.count) i = 0;
1505 adapter->tx_ring.next_to_use = i;
1507 return TRUE;
1509 #endif
1511 return FALSE;
1514 static inline boolean_t
1515 e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1517 struct e1000_context_desc *context_desc;
1518 unsigned int i;
1519 uint8_t css, cso;
1521 if(skb->ip_summed == CHECKSUM_HW) {
1522 css = skb->h.raw - skb->data;
1523 cso = (skb->h.raw + skb->csum) - skb->data;
1525 i = adapter->tx_ring.next_to_use;
1526 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1528 context_desc->upper_setup.tcp_fields.tucss = css;
1529 context_desc->upper_setup.tcp_fields.tucso = cso;
1530 context_desc->upper_setup.tcp_fields.tucse = 0;
1531 context_desc->tcp_seg_setup.data = 0;
1532 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1534 if(++i == adapter->tx_ring.count) i = 0;
1535 adapter->tx_ring.next_to_use = i;
1537 return TRUE;
1540 return FALSE;
1543 #define E1000_MAX_TXD_PWR 12
1544 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
1546 static inline int
1547 e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
1548 unsigned int first)
1550 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1551 struct e1000_buffer *buffer_info;
1552 unsigned int len = skb->len, max_per_txd = E1000_MAX_DATA_PER_TXD;
1553 unsigned int offset = 0, size, count = 0, i;
1555 #ifdef MAX_SKB_FRAGS
1556 #ifdef NETIF_F_TSO
1557 unsigned int mss = skb_shinfo(skb)->tso_size;
1558 /* The controller does a simple calculation to
1559 * make sure there is enough room in the FIFO before
1560 * initiating the DMA for each buffer. The calc is:
1561 * 4 = ceil(buffer len/mss). To make sure we don't
1562 * overrun the FIFO, adjust the max buffer len if mss
1563 * drops. */
1564 if(mss) max_per_txd = min(mss << 2, max_per_txd);
1565 #endif
1566 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1567 unsigned int f;
1568 len -= skb->data_len;
1569 #endif
1571 i = tx_ring->next_to_use;
1573 while(len) {
1574 buffer_info = &tx_ring->buffer_info[i];
1575 size = min(len, max_per_txd);
1576 #ifdef NETIF_F_TSO
1577 /* Workaround for premature desc write-backs
1578 * in TSO mode. Append 4-byte sentinel desc */
1579 if(mss && !nr_frags && size == len && size > 8)
1580 size -= 4;
1581 #endif
1582 /* Workaround for potential 82544 hang in PCI-X. Avoid
1583 * terminating buffers within evenly-aligned dwords. */
1584 if(adapter->pcix_82544 &&
1585 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
1586 size > 4)
1587 size -= 4;
1589 buffer_info->length = size;
1590 buffer_info->dma =
1591 pci_map_single(adapter->pdev,
1592 skb->data + offset,
1593 size,
1594 PCI_DMA_TODEVICE);
1595 buffer_info->time_stamp = jiffies;
1597 len -= size;
1598 offset += size;
1599 count++;
1600 if(++i == tx_ring->count) i = 0;
1603 #ifdef MAX_SKB_FRAGS
1604 for(f = 0; f < nr_frags; f++) {
1605 struct skb_frag_struct *frag;
1607 frag = &skb_shinfo(skb)->frags[f];
1608 len = frag->size;
1609 offset = frag->page_offset;
1611 while(len) {
1612 buffer_info = &tx_ring->buffer_info[i];
1613 size = min(len, max_per_txd);
1614 #ifdef NETIF_F_TSO
1615 /* Workaround for premature desc write-backs
1616 * in TSO mode. Append 4-byte sentinel desc */
1617 if(mss && f == (nr_frags-1) && size == len && size > 8)
1618 size -= 4;
1619 #endif
1620 /* Workaround for potential 82544 hang in PCI-X.
1621 * Avoid terminating buffers within evenly-aligned
1622 * dwords. */
1623 if(adapter->pcix_82544 &&
1624 !((unsigned long)(frag->page+offset+size-1) & 4) &&
1625 size > 4)
1626 size -= 4;
1628 buffer_info->length = size;
1629 buffer_info->dma =
1630 pci_map_page(adapter->pdev,
1631 frag->page,
1632 offset,
1633 size,
1634 PCI_DMA_TODEVICE);
1635 buffer_info->time_stamp = jiffies;
1637 len -= size;
1638 offset += size;
1639 count++;
1640 if(++i == tx_ring->count) i = 0;
1643 #endif
1645 if(E1000_DESC_UNUSED(&adapter->tx_ring) < count) {
1647 /* There aren't enough descriptors available to queue up
1648 * this send, so undo the mapping and abort the send.
1649 * We could have done the check before we mapped the skb,
1650 * but because of all the workarounds (above), it's too
1651 * difficult to predict how many we're going to need.*/
1652 i = first;
1654 while(count--) {
1655 buffer_info = &tx_ring->buffer_info[i];
1656 if(buffer_info->dma) {
1657 pci_unmap_page(adapter->pdev,
1658 buffer_info->dma,
1659 buffer_info->length,
1660 PCI_DMA_TODEVICE);
1661 buffer_info->dma = 0;
1663 if(++i == tx_ring->count) i = 0;
1666 return 0;
1669 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1670 tx_ring->buffer_info[i].skb = skb;
1671 tx_ring->buffer_info[first].next_to_watch = i;
1673 return count;
1676 static inline void
1677 e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
1679 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1680 struct e1000_tx_desc *tx_desc = NULL;
1681 struct e1000_buffer *buffer_info;
1682 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1683 unsigned int i;
1685 if(tx_flags & E1000_TX_FLAGS_TSO) {
1686 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
1687 E1000_TXD_CMD_TSE;
1688 txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
1691 if(tx_flags & E1000_TX_FLAGS_CSUM) {
1692 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
1693 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
1696 if(tx_flags & E1000_TX_FLAGS_VLAN) {
1697 txd_lower |= E1000_TXD_CMD_VLE;
1698 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
1701 i = tx_ring->next_to_use;
1703 while(count--) {
1704 buffer_info = &tx_ring->buffer_info[i];
1705 tx_desc = E1000_TX_DESC(*tx_ring, i);
1706 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1707 tx_desc->lower.data =
1708 cpu_to_le32(txd_lower | buffer_info->length);
1709 tx_desc->upper.data = cpu_to_le32(txd_upper);
1710 if(++i == tx_ring->count) i = 0;
1713 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
1715 /* Force memory writes to complete before letting h/w
1716 * know there are new descriptors to fetch. (Only
1717 * applicable for weak-ordered memory model archs,
1718 * such as IA-64). */
1719 wmb();
1721 tx_ring->next_to_use = i;
1722 E1000_WRITE_REG(&adapter->hw, TDT, i);
1725 /**
1726 * 82547 workaround to avoid controller hang in half-duplex environment.
1727 * The workaround is to avoid queuing a large packet that would span
1728 * the internal Tx FIFO ring boundary by notifying the stack to resend
1729 * the packet at a later time. This gives the Tx FIFO an opportunity to
1730 * flush all packets. When that occurs, we reset the Tx FIFO pointers
1731 * to the beginning of the Tx FIFO.
1732 **/
1734 #define E1000_FIFO_HDR 0x10
1735 #define E1000_82547_PAD_LEN 0x3E0
1737 static inline int
1738 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
1740 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1741 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
1743 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
1745 if(adapter->link_duplex != HALF_DUPLEX)
1746 goto no_fifo_stall_required;
1748 if(atomic_read(&adapter->tx_fifo_stall))
1749 return 1;
1751 if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1752 atomic_set(&adapter->tx_fifo_stall, 1);
1753 return 1;
1756 no_fifo_stall_required:
1757 adapter->tx_fifo_head += skb_fifo_len;
1758 if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
1759 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1760 return 0;
1763 static int
1764 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1766 struct e1000_adapter *adapter = netdev->priv;
1767 unsigned int first;
1768 unsigned int tx_flags = 0;
1769 int count;
1771 if(skb->len <= 0) {
1772 dev_kfree_skb_any(skb);
1773 return 0;
1776 if(adapter->hw.mac_type == e1000_82547) {
1777 if(e1000_82547_fifo_workaround(adapter, skb)) {
1778 netif_stop_queue(netdev);
1779 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
1780 return 1;
1784 #ifdef NETIF_F_HW_VLAN_TX
1785 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1786 tx_flags |= E1000_TX_FLAGS_VLAN;
1787 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
1789 #endif
1791 first = adapter->tx_ring.next_to_use;
1793 if(e1000_tso(adapter, skb))
1794 tx_flags |= E1000_TX_FLAGS_TSO;
1795 else if(e1000_tx_csum(adapter, skb))
1796 tx_flags |= E1000_TX_FLAGS_CSUM;
1798 if((count = e1000_tx_map(adapter, skb, first)))
1799 e1000_tx_queue(adapter, count, tx_flags);
1800 else {
1801 netif_stop_queue(netdev);
1802 return 1;
1805 netdev->trans_start = jiffies;
1807 return 0;
1810 /**
1811 * e1000_tx_timeout - Respond to a Tx Hang
1812 * @netdev: network interface device structure
1813 **/
1815 static void
1816 e1000_tx_timeout(struct net_device *netdev)
1818 #if 0
1819 struct e1000_adapter *adapter = netdev->priv;
1821 /* Do the reset outside of interrupt context */
1822 schedule_work(&adapter->tx_timeout_task);
1823 #endif
1824 e1000_tx_timeout_task(netdev); // XXXX HACK!!! XEN
1827 static void
1828 e1000_tx_timeout_task(struct net_device *netdev)
1830 struct e1000_adapter *adapter = netdev->priv;
1832 netif_device_detach(netdev);
1833 e1000_down(adapter);
1834 e1000_up(adapter);
1835 netif_device_attach(netdev);
1838 /**
1839 * e1000_get_stats - Get System Network Statistics
1840 * @netdev: network interface device structure
1842 * Returns the address of the device statistics structure.
1843 * The statistics are actually updated from the timer callback.
1844 **/
1846 static struct net_device_stats *
1847 e1000_get_stats(struct net_device *netdev)
1849 struct e1000_adapter *adapter = netdev->priv;
1851 return &adapter->net_stats;
1854 /**
1855 * e1000_change_mtu - Change the Maximum Transfer Unit
1856 * @netdev: network interface device structure
1857 * @new_mtu: new value for maximum frame size
1859 * Returns 0 on success, negative on failure
1860 **/
1862 static int
1863 e1000_change_mtu(struct net_device *netdev, int new_mtu)
1865 struct e1000_adapter *adapter = netdev->priv;
1866 int old_mtu = adapter->rx_buffer_len;
1867 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1869 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
1870 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
1871 E1000_ERR("Invalid MTU setting\n");
1872 return -EINVAL;
1875 if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {
1876 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
1878 } else if(adapter->hw.mac_type < e1000_82543) {
1879 E1000_ERR("Jumbo Frames not supported on 82542\n");
1880 return -EINVAL;
1882 } else if(max_frame <= E1000_RXBUFFER_4096) {
1883 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
1885 } else if(max_frame <= E1000_RXBUFFER_8192) {
1886 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
1888 } else {
1889 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
1892 if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
1894 e1000_down(adapter);
1895 e1000_up(adapter);
1898 netdev->mtu = new_mtu;
1899 adapter->hw.max_frame_size = max_frame;
1901 return 0;
1904 /**
1905 * e1000_update_stats - Update the board statistics counters
1906 * @adapter: board private structure
1907 **/
1909 static void
1910 e1000_update_stats(struct e1000_adapter *adapter)
1912 struct e1000_hw *hw = &adapter->hw;
1913 unsigned long flags;
1914 uint16_t phy_tmp;
1916 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
1918 spin_lock_irqsave(&adapter->stats_lock, flags);
1920 /* these counters are modified from e1000_adjust_tbi_stats,
1921 * called from the interrupt context, so they must only
1922 * be written while holding adapter->stats_lock
1923 */
1925 adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
1926 adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
1927 adapter->gorcl = E1000_READ_REG(hw, GORCL);
1928 adapter->stats.gorcl += adapter->gorcl;
1929 adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
1930 adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
1931 adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
1932 adapter->stats.roc += E1000_READ_REG(hw, ROC);
1933 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
1934 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
1935 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
1936 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
1937 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
1938 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
1940 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1942 /* the rest of the counters are only modified here */
1944 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
1945 adapter->stats.mpc += E1000_READ_REG(hw, MPC);
1946 adapter->stats.scc += E1000_READ_REG(hw, SCC);
1947 adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
1948 adapter->stats.mcc += E1000_READ_REG(hw, MCC);
1949 adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
1950 adapter->stats.dc += E1000_READ_REG(hw, DC);
1951 adapter->stats.sec += E1000_READ_REG(hw, SEC);
1952 adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
1953 adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
1954 adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
1955 adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
1956 adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
1957 adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
1958 adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
1959 adapter->gotcl = E1000_READ_REG(hw, GOTCL);
1960 adapter->stats.gotcl += adapter->gotcl;
1961 adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
1962 adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
1963 adapter->stats.ruc += E1000_READ_REG(hw, RUC);
1964 adapter->stats.rfc += E1000_READ_REG(hw, RFC);
1965 adapter->stats.rjc += E1000_READ_REG(hw, RJC);
1966 adapter->stats.torl += E1000_READ_REG(hw, TORL);
1967 adapter->stats.torh += E1000_READ_REG(hw, TORH);
1968 adapter->stats.totl += E1000_READ_REG(hw, TOTL);
1969 adapter->stats.toth += E1000_READ_REG(hw, TOTH);
1970 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
1971 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
1972 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
1973 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
1974 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
1975 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
1976 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
1977 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
1978 adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
1980 /* used for adaptive IFS */
1982 hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
1983 adapter->stats.tpt += hw->tx_packet_delta;
1984 hw->collision_delta = E1000_READ_REG(hw, COLC);
1985 adapter->stats.colc += hw->collision_delta;
1987 if(hw->mac_type >= e1000_82543) {
1988 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
1989 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
1990 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
1991 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
1992 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
1993 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
1996 /* Fill out the OS statistics structure */
1998 adapter->net_stats.rx_packets = adapter->stats.gprc;
1999 adapter->net_stats.tx_packets = adapter->stats.gptc;
2000 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2001 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2002 adapter->net_stats.multicast = adapter->stats.mprc;
2003 adapter->net_stats.collisions = adapter->stats.colc;
2005 /* Rx Errors */
2007 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2008 adapter->stats.crcerrs + adapter->stats.algnerrc +
2009 adapter->stats.rlec + adapter->stats.rnbc +
2010 adapter->stats.mpc + adapter->stats.cexterr;
2011 adapter->net_stats.rx_dropped = adapter->stats.rnbc;
2012 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2013 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2014 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2015 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
2016 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2018 /* Tx Errors */
2020 adapter->net_stats.tx_errors = adapter->stats.ecol +
2021 adapter->stats.latecol;
2022 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2023 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2024 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2026 /* Tx Dropped needs to be maintained elsewhere */
2028 /* Phy Stats */
2030 if(hw->media_type == e1000_media_type_copper) {
2031 if((adapter->link_speed == SPEED_1000) &&
2032 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
2033 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2034 adapter->phy_stats.idle_errors += phy_tmp;
2037 if((hw->mac_type <= e1000_82546) &&
2038 (hw->phy_type == e1000_phy_m88) &&
2039 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
2040 adapter->phy_stats.receive_errors += phy_tmp;
2044 /**
2045 * e1000_irq_disable - Mask off interrupt generation on the NIC
2046 * @adapter: board private structure
2047 **/
2049 static inline void
2050 e1000_irq_disable(struct e1000_adapter *adapter)
2052 atomic_inc(&adapter->irq_sem);
2053 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
2054 E1000_WRITE_FLUSH(&adapter->hw);
2055 synchronize_irq(adapter->netdev->irq);
2058 /**
2059 * e1000_irq_enable - Enable default interrupt generation settings
2060 * @adapter: board private structure
2061 **/
2063 static inline void
2064 e1000_irq_enable(struct e1000_adapter *adapter)
2066 if(atomic_dec_and_test(&adapter->irq_sem)) {
2067 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
2068 E1000_WRITE_FLUSH(&adapter->hw);
2072 /**
2073 * e1000_intr - Interrupt Handler
2074 * @irq: interrupt number
2075 * @data: pointer to a network interface device structure
2076 * @pt_regs: CPU registers structure
2077 **/
2079 static irqreturn_t
2080 e1000_intr(int irq, void *data, struct pt_regs *regs)
2082 struct net_device *netdev = data;
2083 struct e1000_adapter *adapter = netdev->priv;
2084 uint32_t icr = E1000_READ_REG(&adapter->hw, ICR);
2085 #ifndef CONFIG_E1000_NAPI
2086 unsigned int i;
2087 #endif
2089 if(!icr)
2090 return IRQ_NONE; /* Not our interrupt */
2092 if(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2093 adapter->hw.get_link_status = 1;
2094 mod_timer(&adapter->watchdog_timer, jiffies);
2097 #ifdef CONFIG_E1000_NAPI
2098 if(netif_rx_schedule_prep(netdev)) {
2100 /* Disable interrupts and register for poll. The flush
2101 of the posted write is intentionally left out.
2102 */
2104 atomic_inc(&adapter->irq_sem);
2105 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
2106 __netif_rx_schedule(netdev);
2108 #else
2109 for(i = 0; i < E1000_MAX_INTR; i++)
2110 if(!e1000_clean_rx_irq(adapter) &
2111 !e1000_clean_tx_irq(adapter))
2112 break;
2113 #endif
2114 #ifdef E1000_COUNT_ICR
2115 adapter->icr_txdw += icr & 0x01;
2116 icr >>= 1;
2117 adapter->icr_txqe += icr & 0x01;
2118 icr >>= 1;
2119 adapter->icr_lsc += icr & 0x01;
2120 icr >>= 1;
2121 adapter->icr_rxseq += icr & 0x01;
2122 icr >>= 1;
2123 adapter->icr_rxdmt += icr & 0x01;
2124 icr >>= 2;
2125 adapter->icr_rxo += icr & 0x01;
2126 icr >>= 1;
2127 adapter->icr_rxt += icr & 0x01;
2128 icr >>= 2;
2129 adapter->icr_mdac += icr & 0x01;
2130 icr >>= 1;
2131 adapter->icr_rxcfg += icr & 0x01;
2132 icr >>= 1;
2133 adapter->icr_gpi += icr & 0x01;
2134 #endif
2136 return IRQ_HANDLED;
2139 #ifdef CONFIG_E1000_NAPI
2140 /**
2141 * e1000_clean - NAPI Rx polling callback
2142 * @adapter: board private structure
2143 **/
2145 static int
2146 e1000_clean(struct net_device *netdev, int *budget)
2148 struct e1000_adapter *adapter = netdev->priv;
2149 int work_to_do = min(*budget, netdev->quota);
2150 int work_done = 0;
2152 e1000_clean_tx_irq(adapter);
2153 e1000_clean_rx_irq(adapter, &work_done, work_to_do);
2155 *budget -= work_done;
2156 netdev->quota -= work_done;
2158 if(work_done < work_to_do) {
2159 netif_rx_complete(netdev);
2160 e1000_irq_enable(adapter);
2163 return (work_done >= work_to_do);
2165 #endif
2167 /**
2168 * e1000_clean_tx_irq - Reclaim resources after transmit completes
2169 * @adapter: board private structure
2170 **/
2172 static boolean_t
2173 e1000_clean_tx_irq(struct e1000_adapter *adapter)
2175 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2176 struct net_device *netdev = adapter->netdev;
2177 struct pci_dev *pdev = adapter->pdev;
2178 struct e1000_tx_desc *tx_desc, *eop_desc;
2179 struct e1000_buffer *buffer_info;
2180 unsigned int i, eop;
2181 boolean_t cleaned = FALSE;
2183 i = tx_ring->next_to_clean;
2184 eop = tx_ring->buffer_info[i].next_to_watch;
2185 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2187 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2189 for(cleaned = FALSE; !cleaned; ) {
2190 tx_desc = E1000_TX_DESC(*tx_ring, i);
2191 buffer_info = &tx_ring->buffer_info[i];
2193 if(buffer_info->dma) {
2195 pci_unmap_page(pdev,
2196 buffer_info->dma,
2197 buffer_info->length,
2198 PCI_DMA_TODEVICE);
2200 buffer_info->dma = 0;
2203 if(buffer_info->skb) {
2205 dev_kfree_skb_any(buffer_info->skb);
2207 buffer_info->skb = NULL;
2210 tx_desc->buffer_addr = 0;
2211 tx_desc->lower.data = 0;
2212 tx_desc->upper.data = 0;
2214 cleaned = (i == eop);
2215 if(++i == tx_ring->count) i = 0;
2218 eop = tx_ring->buffer_info[i].next_to_watch;
2219 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2222 tx_ring->next_to_clean = i;
2224 if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
2225 netif_wake_queue(netdev);
2227 return cleaned;
2230 /**
2231 * e1000_clean_rx_irq - Send received data up the network stack,
2232 * @adapter: board private structure
2233 **/
2235 static boolean_t
2236 #ifdef CONFIG_E1000_NAPI
2237 e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
2238 int work_to_do)
2239 #else
2240 e1000_clean_rx_irq(struct e1000_adapter *adapter)
2241 #endif
2243 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2244 struct net_device *netdev = adapter->netdev;
2245 struct pci_dev *pdev = adapter->pdev;
2246 struct e1000_rx_desc *rx_desc;
2247 struct e1000_buffer *buffer_info;
2248 struct sk_buff *skb;
2249 unsigned long flags;
2250 uint32_t length;
2251 uint8_t last_byte;
2252 unsigned int i;
2253 boolean_t cleaned = FALSE;
2255 i = rx_ring->next_to_clean;
2256 rx_desc = E1000_RX_DESC(*rx_ring, i);
2258 while(rx_desc->status & E1000_RXD_STAT_DD) {
2259 buffer_info = &rx_ring->buffer_info[i];
2261 #ifdef CONFIG_E1000_NAPI
2262 if(*work_done >= work_to_do)
2263 break;
2265 (*work_done)++;
2266 #endif
2268 cleaned = TRUE;
2270 pci_unmap_single(pdev,
2271 buffer_info->dma,
2272 buffer_info->length,
2273 PCI_DMA_FROMDEVICE);
2275 skb = buffer_info->skb;
2276 length = le16_to_cpu(rx_desc->length);
2278 if(!(rx_desc->status & E1000_RXD_STAT_EOP)) {
2280 /* All receives must fit into a single buffer */
2282 E1000_DBG("Receive packet consumed multiple buffers\n");
2284 dev_kfree_skb_irq(skb);
2285 rx_desc->status = 0;
2286 buffer_info->skb = NULL;
2288 if(++i == rx_ring->count) i = 0;
2290 rx_desc = E1000_RX_DESC(*rx_ring, i);
2291 continue;
2294 if(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2296 last_byte = *(skb->data + length - 1);
2298 if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
2299 rx_desc->errors, length, last_byte)) {
2301 spin_lock_irqsave(&adapter->stats_lock, flags);
2303 e1000_tbi_adjust_stats(&adapter->hw,
2304 &adapter->stats,
2305 length, skb->data);
2307 spin_unlock_irqrestore(&adapter->stats_lock,
2308 flags);
2309 length--;
2310 } else {
2312 dev_kfree_skb_irq(skb);
2313 rx_desc->status = 0;
2314 buffer_info->skb = NULL;
2316 if(++i == rx_ring->count) i = 0;
2318 rx_desc = E1000_RX_DESC(*rx_ring, i);
2319 continue;
2323 /* Good Receive */
2324 skb_put(skb, length - ETHERNET_FCS_SIZE);
2326 /* Receive Checksum Offload */
2327 e1000_rx_checksum(adapter, rx_desc, skb);
2329 skb->protocol = eth_type_trans(skb, netdev);
2330 #ifdef CONFIG_E1000_NAPI
2331 #ifdef NETIF_F_HW_VLAN_TX
2332 if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2333 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2334 le16_to_cpu(rx_desc->special &
2335 E1000_RXD_SPC_VLAN_MASK));
2336 } else {
2337 netif_receive_skb(skb);
2339 #else
2340 netif_receive_skb(skb);
2341 #endif
2342 #else /* CONFIG_E1000_NAPI */
2343 #ifdef NETIF_F_HW_VLAN_TX
2344 if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2345 vlan_hwaccel_rx(skb, adapter->vlgrp,
2346 le16_to_cpu(rx_desc->special &
2347 E1000_RXD_SPC_VLAN_MASK));
2348 } else {
2349 netif_rx(skb);
2351 #else
2352 netif_rx(skb);
2353 #endif
2354 #endif /* CONFIG_E1000_NAPI */
2355 netdev->last_rx = jiffies;
2357 rx_desc->status = 0;
2358 buffer_info->skb = NULL;
2360 if(++i == rx_ring->count) i = 0;
2362 rx_desc = E1000_RX_DESC(*rx_ring, i);
2365 rx_ring->next_to_clean = i;
2367 e1000_alloc_rx_buffers(adapter);
2369 return cleaned;
2372 /**
2373 * e1000_alloc_rx_buffers - Replace used receive buffers
2374 * @data: address of board private structure
2375 **/
2377 static void
2378 e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2380 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2381 struct net_device *netdev = adapter->netdev;
2382 struct pci_dev *pdev = adapter->pdev;
2383 struct e1000_rx_desc *rx_desc;
2384 struct e1000_buffer *buffer_info;
2385 struct sk_buff *skb;
2386 int reserve_len = 2;
2387 unsigned int i;
2389 i = rx_ring->next_to_use;
2390 buffer_info = &rx_ring->buffer_info[i];
2392 while(!buffer_info->skb) {
2393 rx_desc = E1000_RX_DESC(*rx_ring, i);
2395 skb = dev_alloc_skb(adapter->rx_buffer_len + reserve_len);
2397 if(!skb) {
2398 /* Better luck next round */
2399 break;
2402 /* Make buffer alignment 2 beyond a 16 byte boundary
2403 * this will result in a 16 byte aligned IP header after
2404 * the 14 byte MAC header is removed
2405 */
2406 skb_reserve(skb, reserve_len);
2408 skb->dev = netdev;
2410 buffer_info->skb = skb;
2411 buffer_info->length = adapter->rx_buffer_len;
2412 buffer_info->dma =
2413 pci_map_single(pdev,
2414 skb->data,
2415 adapter->rx_buffer_len,
2416 PCI_DMA_FROMDEVICE);
2418 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2420 if((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i) {
2421 /* Force memory writes to complete before letting h/w
2422 * know there are new descriptors to fetch. (Only
2423 * applicable for weak-ordered memory model archs,
2424 * such as IA-64). */
2425 wmb();
2427 E1000_WRITE_REG(&adapter->hw, RDT, i);
2430 if(++i == rx_ring->count) i = 0;
2431 buffer_info = &rx_ring->buffer_info[i];
2434 rx_ring->next_to_use = i;
2437 /**
2438 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
2439 * @adapter:
2440 **/
2442 static void
2443 e1000_smartspeed(struct e1000_adapter *adapter)
2445 uint16_t phy_status;
2446 uint16_t phy_ctrl;
2448 if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
2449 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2450 return;
2452 if(adapter->smartspeed == 0) {
2453 /* If Master/Slave config fault is asserted twice,
2454 * we assume back-to-back */
2455 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2456 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2457 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2458 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2459 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2460 if(phy_ctrl & CR_1000T_MS_ENABLE) {
2461 phy_ctrl &= ~CR_1000T_MS_ENABLE;
2462 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2463 phy_ctrl);
2464 adapter->smartspeed++;
2465 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2466 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
2467 &phy_ctrl)) {
2468 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2469 MII_CR_RESTART_AUTO_NEG);
2470 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
2471 phy_ctrl);
2474 return;
2475 } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
2476 /* If still no link, perhaps using 2/3 pair cable */
2477 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2478 phy_ctrl |= CR_1000T_MS_ENABLE;
2479 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
2480 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2481 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
2482 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2483 MII_CR_RESTART_AUTO_NEG);
2484 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
2487 /* Restart process after E1000_SMARTSPEED_MAX iterations */
2488 if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
2489 adapter->smartspeed = 0;
2492 /**
2493 * e1000_ioctl -
2494 * @netdev:
2495 * @ifreq:
2496 * @cmd:
2497 **/
2499 static int
2500 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2502 switch (cmd) {
2503 #ifdef SIOCGMIIPHY
2504 case SIOCGMIIPHY:
2505 case SIOCGMIIREG:
2506 case SIOCSMIIREG:
2507 return e1000_mii_ioctl(netdev, ifr, cmd);
2508 #endif
2509 #ifdef SIOCETHTOOL
2510 case SIOCETHTOOL:
2511 return e1000_ethtool_ioctl(netdev, ifr);
2512 #endif
2513 default:
2514 return -EOPNOTSUPP;
2518 #ifdef SIOCGMIIPHY
2519 /**
2520 * e1000_mii_ioctl -
2521 * @netdev:
2522 * @ifreq:
2523 * @cmd:
2524 **/
2526 static int
2527 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2529 struct e1000_adapter *adapter = netdev->priv;
2530 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
2531 int retval;
2532 uint16_t mii_reg;
2533 uint16_t spddplx;
2535 if(adapter->hw.media_type != e1000_media_type_copper)
2536 return -EOPNOTSUPP;
2538 switch (cmd) {
2539 case SIOCGMIIPHY:
2540 data->phy_id = adapter->hw.phy_addr;
2541 break;
2542 case SIOCGMIIREG:
2543 if (!capable(CAP_NET_ADMIN))
2544 return -EPERM;
2545 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
2546 &data->val_out))
2547 return -EIO;
2548 break;
2549 case SIOCSMIIREG:
2550 if (!capable(CAP_NET_ADMIN))
2551 return -EPERM;
2552 if (data->reg_num & ~(0x1F))
2553 return -EFAULT;
2554 mii_reg = data->val_in;
2555 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
2556 data->val_in))
2557 return -EIO;
2558 if (adapter->hw.phy_type == e1000_phy_m88) {
2559 switch (data->reg_num) {
2560 case PHY_CTRL:
2561 if(data->val_in & MII_CR_AUTO_NEG_EN) {
2562 adapter->hw.autoneg = 1;
2563 adapter->hw.autoneg_advertised = 0x2F;
2564 } else {
2565 if (data->val_in & 0x40)
2566 spddplx = SPEED_1000;
2567 else if (data->val_in & 0x2000)
2568 spddplx = SPEED_100;
2569 else
2570 spddplx = SPEED_10;
2571 spddplx += (data->val_in & 0x100)
2572 ? FULL_DUPLEX :
2573 HALF_DUPLEX;
2574 retval = e1000_set_spd_dplx(adapter,
2575 spddplx);
2576 if(retval)
2577 return retval;
2579 if(netif_running(adapter->netdev)) {
2580 e1000_down(adapter);
2581 e1000_up(adapter);
2582 } else
2583 e1000_reset(adapter);
2584 break;
2585 case M88E1000_PHY_SPEC_CTRL:
2586 case M88E1000_EXT_PHY_SPEC_CTRL:
2587 if (e1000_phy_reset(&adapter->hw))
2588 return -EIO;
2589 break;
2592 break;
2593 default:
2594 return -EOPNOTSUPP;
2596 return E1000_SUCCESS;
2598 #endif
2600 /**
2601 * e1000_rx_checksum - Receive Checksum Offload for 82543
2602 * @adapter: board private structure
2603 * @rx_desc: receive descriptor
2604 * @sk_buff: socket buffer with received data
2605 **/
2607 static inline void
2608 e1000_rx_checksum(struct e1000_adapter *adapter,
2609 struct e1000_rx_desc *rx_desc,
2610 struct sk_buff *skb)
2612 /* 82543 or newer only */
2613 if((adapter->hw.mac_type < e1000_82543) ||
2614 /* Ignore Checksum bit is set */
2615 (rx_desc->status & E1000_RXD_STAT_IXSM) ||
2616 /* TCP Checksum has not been calculated */
2617 (!(rx_desc->status & E1000_RXD_STAT_TCPCS))) {
2618 skb->ip_summed = CHECKSUM_NONE;
2619 return;
2622 /* At this point we know the hardware did the TCP checksum */
2623 /* now look at the TCP checksum error bit */
2624 if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
2625 /* let the stack verify checksum errors */
2626 skb->ip_summed = CHECKSUM_NONE;
2627 adapter->hw_csum_err++;
2628 } else {
2629 /* TCP checksum is good */
2630 skb->ip_summed = CHECKSUM_UNNECESSARY;
2631 adapter->hw_csum_good++;
2635 void
2636 e1000_pci_set_mwi(struct e1000_hw *hw)
2638 struct e1000_adapter *adapter = hw->back;
2640 #ifdef HAVE_PCI_SET_MWI
2641 pci_set_mwi(adapter->pdev);
2642 #else
2643 pci_write_config_word(adapter->pdev, PCI_COMMAND,
2644 adapter->hw.pci_cmd_word |
2645 PCI_COMMAND_INVALIDATE);
2646 #endif
2649 void
2650 e1000_pci_clear_mwi(struct e1000_hw *hw)
2652 struct e1000_adapter *adapter = hw->back;
2654 #ifdef HAVE_PCI_SET_MWI
2655 pci_clear_mwi(adapter->pdev);
2656 #else
2657 pci_write_config_word(adapter->pdev, PCI_COMMAND,
2658 adapter->hw.pci_cmd_word &
2659 ~PCI_COMMAND_INVALIDATE);
2660 #endif
2663 void
2664 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2666 struct e1000_adapter *adapter = hw->back;
2668 pci_read_config_word(adapter->pdev, reg, value);
2671 void
2672 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2674 struct e1000_adapter *adapter = hw->back;
2676 pci_write_config_word(adapter->pdev, reg, *value);
2679 uint32_t
2680 e1000_io_read(struct e1000_hw *hw, uint32_t port)
2682 return inl(port);
2685 void
2686 e1000_io_write(struct e1000_hw *hw, uint32_t port, uint32_t value)
2688 outl(value, port);
2691 #ifdef NETIF_F_HW_VLAN_TX
2692 static void
2693 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2695 struct e1000_adapter *adapter = netdev->priv;
2696 uint32_t ctrl, rctl;
2698 e1000_irq_disable(adapter);
2699 adapter->vlgrp = grp;
2701 if(grp) {
2702 /* enable VLAN tag insert/strip */
2704 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
2706 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2707 ctrl |= E1000_CTRL_VME;
2708 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2710 /* enable VLAN receive filtering */
2712 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2713 rctl |= E1000_RCTL_VFE;
2714 rctl &= ~E1000_RCTL_CFIEN;
2715 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2716 } else {
2717 /* disable VLAN tag insert/strip */
2719 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2720 ctrl &= ~E1000_CTRL_VME;
2721 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2723 /* disable VLAN filtering */
2725 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2726 rctl &= ~E1000_RCTL_VFE;
2727 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2730 e1000_irq_enable(adapter);
2733 static void
2734 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2736 struct e1000_adapter *adapter = netdev->priv;
2737 uint32_t vfta, index;
2739 /* add VID to filter table */
2741 index = (vid >> 5) & 0x7F;
2742 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2743 vfta |= (1 << (vid & 0x1F));
2744 e1000_write_vfta(&adapter->hw, index, vfta);
2747 static void
2748 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2750 struct e1000_adapter *adapter = netdev->priv;
2751 uint32_t vfta, index;
2753 e1000_irq_disable(adapter);
2755 if(adapter->vlgrp)
2756 adapter->vlgrp->vlan_devices[vid] = NULL;
2758 e1000_irq_enable(adapter);
2760 /* remove VID from filter table*/
2762 index = (vid >> 5) & 0x7F;
2763 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2764 vfta &= ~(1 << (vid & 0x1F));
2765 e1000_write_vfta(&adapter->hw, index, vfta);
2768 static void
2769 e1000_restore_vlan(struct e1000_adapter *adapter)
2771 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2773 if(adapter->vlgrp) {
2774 uint16_t vid;
2775 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2776 if(!adapter->vlgrp->vlan_devices[vid])
2777 continue;
2778 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2782 #endif
2784 int
2785 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
2787 adapter->hw.autoneg = 0;
2789 switch(spddplx) {
2790 case SPEED_10 + DUPLEX_HALF:
2791 adapter->hw.forced_speed_duplex = e1000_10_half;
2792 break;
2793 case SPEED_10 + DUPLEX_FULL:
2794 adapter->hw.forced_speed_duplex = e1000_10_full;
2795 break;
2796 case SPEED_100 + DUPLEX_HALF:
2797 adapter->hw.forced_speed_duplex = e1000_100_half;
2798 break;
2799 case SPEED_100 + DUPLEX_FULL:
2800 adapter->hw.forced_speed_duplex = e1000_100_full;
2801 break;
2802 case SPEED_1000 + DUPLEX_FULL:
2803 adapter->hw.autoneg = 1;
2804 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
2805 break;
2806 case SPEED_1000 + DUPLEX_HALF: /* not supported */
2807 default:
2808 return -EINVAL;
2810 return 0;
2813 static int
2814 e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2816 struct pci_dev *pdev = NULL;
2818 switch(event) {
2819 case SYS_DOWN:
2820 case SYS_HALT:
2821 case SYS_POWER_OFF:
2822 while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2823 if(pci_dev_driver(pdev) == &e1000_driver)
2824 e1000_suspend(pdev, 3);
2827 return NOTIFY_DONE;
2830 static int
2831 e1000_suspend(struct pci_dev *pdev, uint32_t state)
2833 struct net_device *netdev = pci_get_drvdata(pdev);
2834 struct e1000_adapter *adapter = netdev->priv;
2835 uint32_t ctrl, ctrl_ext, rctl, manc, status;
2836 uint32_t wufc = adapter->wol;
2838 netif_device_detach(netdev);
2840 if(netif_running(netdev))
2841 e1000_down(adapter);
2843 status = E1000_READ_REG(&adapter->hw, STATUS);
2844 if(status & E1000_STATUS_LU)
2845 wufc &= ~E1000_WUFC_LNKC;
2847 if(wufc) {
2848 e1000_setup_rctl(adapter);
2849 e1000_set_multi(netdev);
2851 /* turn on all-multi mode if wake on multicast is enabled */
2852 if(adapter->wol & E1000_WUFC_MC) {
2853 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2854 rctl |= E1000_RCTL_MPE;
2855 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2858 if(adapter->hw.mac_type >= e1000_82540) {
2859 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2860 /* advertise wake from D3Cold */
2861 #define E1000_CTRL_ADVD3WUC 0x00100000
2862 /* phy power management enable */
2863 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
2864 ctrl |= E1000_CTRL_ADVD3WUC |
2865 E1000_CTRL_EN_PHY_PWR_MGMT;
2866 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2869 if(adapter->hw.media_type == e1000_media_type_fiber ||
2870 adapter->hw.media_type == e1000_media_type_internal_serdes) {
2871 /* keep the laser running in D3 */
2872 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
2873 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
2874 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
2877 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
2878 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
2879 pci_enable_wake(pdev, 3, 1);
2880 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2881 } else {
2882 E1000_WRITE_REG(&adapter->hw, WUC, 0);
2883 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
2884 pci_enable_wake(pdev, 3, 0);
2885 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2888 pci_save_state(pdev, adapter->pci_state);
2890 if(adapter->hw.mac_type >= e1000_82540 &&
2891 adapter->hw.media_type == e1000_media_type_copper) {
2892 manc = E1000_READ_REG(&adapter->hw, MANC);
2893 if(manc & E1000_MANC_SMBUS_EN) {
2894 manc |= E1000_MANC_ARP_EN;
2895 E1000_WRITE_REG(&adapter->hw, MANC, manc);
2896 pci_enable_wake(pdev, 3, 1);
2897 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2901 state = (state > 0) ? 3 : 0;
2902 pci_set_power_state(pdev, state);
2904 return 0;
2907 #ifdef CONFIG_PM
2908 static int
2909 e1000_resume(struct pci_dev *pdev)
2911 struct net_device *netdev = pci_get_drvdata(pdev);
2912 struct e1000_adapter *adapter = netdev->priv;
2913 uint32_t manc;
2915 pci_set_power_state(pdev, 0);
2916 pci_restore_state(pdev, adapter->pci_state);
2918 pci_enable_wake(pdev, 3, 0);
2919 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2921 e1000_reset(adapter);
2922 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
2924 if(netif_running(netdev))
2925 e1000_up(adapter);
2927 netif_device_attach(netdev);
2929 if(adapter->hw.mac_type >= e1000_82540 &&
2930 adapter->hw.media_type == e1000_media_type_copper) {
2931 manc = E1000_READ_REG(&adapter->hw, MANC);
2932 manc &= ~(E1000_MANC_ARP_EN);
2933 E1000_WRITE_REG(&adapter->hw, MANC, manc);
2936 return 0;
2938 #endif
2940 /* e1000_main.c */