ia64/linux-2.6.18-xen.hg

annotate drivers/net/typhoon.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
rev   line source
ian@0 1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
ian@0 2 /*
ian@0 3 Written 2002-2004 by David Dillow <dave@thedillows.org>
ian@0 4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
ian@0 5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
ian@0 6
ian@0 7 This software may be used and distributed according to the terms of
ian@0 8 the GNU General Public License (GPL), incorporated herein by reference.
ian@0 9 Drivers based on or derived from this code fall under the GPL and must
ian@0 10 retain the authorship, copyright and license notice. This file is not
ian@0 11 a complete program and may only be used when the entire operating
ian@0 12 system is licensed under the GPL.
ian@0 13
ian@0 14 This software is available on a public web site. It may enable
ian@0 15 cryptographic capabilities of the 3Com hardware, and may be
ian@0 16 exported from the United States under License Exception "TSU"
ian@0 17 pursuant to 15 C.F.R. Section 740.13(e).
ian@0 18
ian@0 19 This work was funded by the National Library of Medicine under
ian@0 20 the Department of Energy project number 0274DD06D1 and NLM project
ian@0 21 number Y1-LM-2015-01.
ian@0 22
ian@0 23 This driver is designed for the 3Com 3CR990 Family of cards with the
ian@0 24 3XP Processor. It has been tested on x86 and sparc64.
ian@0 25
ian@0 26 KNOWN ISSUES:
ian@0 27 *) The current firmware always strips the VLAN tag off, even if
ian@0 28 we tell it not to. You should filter VLANs at the switch
ian@0 29 as a workaround (good practice in any event) until we can
ian@0 30 get this fixed.
ian@0 31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
ian@0 32 issue. Hopefully 3Com will fix it.
ian@0 33 *) Waiting for a command response takes 8ms due to non-preemptable
ian@0 34 polling. Only significant for getting stats and creating
ian@0 35 SAs, but an ugly wart never the less.
ian@0 36
ian@0 37 TODO:
ian@0 38 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
ian@0 39 *) Add more support for ethtool (especially for NIC stats)
ian@0 40 *) Allow disabling of RX checksum offloading
ian@0 41 *) Fix MAC changing to work while the interface is up
ian@0 42 (Need to put commands on the TX ring, which changes
ian@0 43 the locking)
ian@0 44 *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
ian@0 45 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
ian@0 46 */
ian@0 47
ian@0 48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
ian@0 49 * Setting to > 1518 effectively disables this feature.
ian@0 50 */
ian@0 51 static int rx_copybreak = 200;
ian@0 52
ian@0 53 /* Should we use MMIO or Port IO?
ian@0 54 * 0: Port IO
ian@0 55 * 1: MMIO
ian@0 56 * 2: Try MMIO, fallback to Port IO
ian@0 57 */
ian@0 58 static unsigned int use_mmio = 2;
ian@0 59
ian@0 60 /* end user-configurable values */
ian@0 61
ian@0 62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
ian@0 63 */
ian@0 64 static const int multicast_filter_limit = 32;
ian@0 65
ian@0 66 /* Operational parameters that are set at compile time. */
ian@0 67
ian@0 68 /* Keep the ring sizes a power of two for compile efficiency.
ian@0 69 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
ian@0 70 * Making the Tx ring too large decreases the effectiveness of channel
ian@0 71 * bonding and packet priority.
ian@0 72 * There are no ill effects from too-large receive rings.
ian@0 73 *
ian@0 74 * We don't currently use the Hi Tx ring so, don't make it very big.
ian@0 75 *
ian@0 76 * Beware that if we start using the Hi Tx ring, we will need to change
ian@0 77 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
ian@0 78 */
ian@0 79 #define TXHI_ENTRIES 2
ian@0 80 #define TXLO_ENTRIES 128
ian@0 81 #define RX_ENTRIES 32
ian@0 82 #define COMMAND_ENTRIES 16
ian@0 83 #define RESPONSE_ENTRIES 32
ian@0 84
ian@0 85 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
ian@0 86 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
ian@0 87
ian@0 88 /* The 3XP will preload and remove 64 entries from the free buffer
ian@0 89 * list, and we need one entry to keep the ring from wrapping, so
ian@0 90 * to keep this a power of two, we use 128 entries.
ian@0 91 */
ian@0 92 #define RXFREE_ENTRIES 128
ian@0 93 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
ian@0 94
ian@0 95 /* Operational parameters that usually are not changed. */
ian@0 96
ian@0 97 /* Time in jiffies before concluding the transmitter is hung. */
ian@0 98 #define TX_TIMEOUT (2*HZ)
ian@0 99
ian@0 100 #define PKT_BUF_SZ 1536
ian@0 101
ian@0 102 #define DRV_MODULE_NAME "typhoon"
ian@0 103 #define DRV_MODULE_VERSION "1.5.7"
ian@0 104 #define DRV_MODULE_RELDATE "05/01/07"
ian@0 105 #define PFX DRV_MODULE_NAME ": "
ian@0 106 #define ERR_PFX KERN_ERR PFX
ian@0 107
ian@0 108 #include <linux/module.h>
ian@0 109 #include <linux/kernel.h>
ian@0 110 #include <linux/string.h>
ian@0 111 #include <linux/timer.h>
ian@0 112 #include <linux/errno.h>
ian@0 113 #include <linux/ioport.h>
ian@0 114 #include <linux/slab.h>
ian@0 115 #include <linux/interrupt.h>
ian@0 116 #include <linux/pci.h>
ian@0 117 #include <linux/netdevice.h>
ian@0 118 #include <linux/etherdevice.h>
ian@0 119 #include <linux/skbuff.h>
ian@0 120 #include <linux/init.h>
ian@0 121 #include <linux/delay.h>
ian@0 122 #include <linux/ethtool.h>
ian@0 123 #include <linux/if_vlan.h>
ian@0 124 #include <linux/crc32.h>
ian@0 125 #include <linux/bitops.h>
ian@0 126 #include <asm/processor.h>
ian@0 127 #include <asm/io.h>
ian@0 128 #include <asm/uaccess.h>
ian@0 129 #include <linux/in6.h>
ian@0 130 #include <asm/checksum.h>
ian@0 131 #include <linux/version.h>
ian@0 132 #include <linux/dma-mapping.h>
ian@0 133
ian@0 134 #include "typhoon.h"
ian@0 135 #include "typhoon-firmware.h"
ian@0 136
ian@0 137 static const char version[] __devinitdata =
ian@0 138 "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
ian@0 139
ian@0 140 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
ian@0 141 MODULE_VERSION(DRV_MODULE_VERSION);
ian@0 142 MODULE_LICENSE("GPL");
ian@0 143 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
ian@0 144 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
ian@0 145 "the buffer given back to the NIC. Default "
ian@0 146 "is 200.");
ian@0 147 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
ian@0 148 "Default is to try MMIO and fallback to PIO.");
ian@0 149 module_param(rx_copybreak, int, 0);
ian@0 150 module_param(use_mmio, int, 0);
ian@0 151
ian@0 152 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
ian@0 153 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
ian@0 154 #undef NETIF_F_TSO
ian@0 155 #endif
ian@0 156
ian@0 157 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
ian@0 158 #error TX ring too small!
ian@0 159 #endif
ian@0 160
ian@0 161 struct typhoon_card_info {
ian@0 162 char *name;
ian@0 163 int capabilities;
ian@0 164 };
ian@0 165
ian@0 166 #define TYPHOON_CRYPTO_NONE 0x00
ian@0 167 #define TYPHOON_CRYPTO_DES 0x01
ian@0 168 #define TYPHOON_CRYPTO_3DES 0x02
ian@0 169 #define TYPHOON_CRYPTO_VARIABLE 0x04
ian@0 170 #define TYPHOON_FIBER 0x08
ian@0 171 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10
ian@0 172
ian@0 173 enum typhoon_cards {
ian@0 174 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
ian@0 175 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
ian@0 176 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
ian@0 177 TYPHOON_FXM,
ian@0 178 };
ian@0 179
ian@0 180 /* directly indexed by enum typhoon_cards, above */
ian@0 181 static const struct typhoon_card_info typhoon_card_info[] __devinitdata = {
ian@0 182 { "3Com Typhoon (3C990-TX)",
ian@0 183 TYPHOON_CRYPTO_NONE},
ian@0 184 { "3Com Typhoon (3CR990-TX-95)",
ian@0 185 TYPHOON_CRYPTO_DES},
ian@0 186 { "3Com Typhoon (3CR990-TX-97)",
ian@0 187 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
ian@0 188 { "3Com Typhoon (3C990SVR)",
ian@0 189 TYPHOON_CRYPTO_NONE},
ian@0 190 { "3Com Typhoon (3CR990SVR95)",
ian@0 191 TYPHOON_CRYPTO_DES},
ian@0 192 { "3Com Typhoon (3CR990SVR97)",
ian@0 193 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
ian@0 194 { "3Com Typhoon2 (3C990B-TX-M)",
ian@0 195 TYPHOON_CRYPTO_VARIABLE},
ian@0 196 { "3Com Typhoon2 (3C990BSVR)",
ian@0 197 TYPHOON_CRYPTO_VARIABLE},
ian@0 198 { "3Com Typhoon (3CR990-FX-95)",
ian@0 199 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
ian@0 200 { "3Com Typhoon (3CR990-FX-97)",
ian@0 201 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
ian@0 202 { "3Com Typhoon (3CR990-FX-95 Server)",
ian@0 203 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
ian@0 204 { "3Com Typhoon (3CR990-FX-97 Server)",
ian@0 205 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
ian@0 206 { "3Com Typhoon2 (3C990B-FX-97)",
ian@0 207 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
ian@0 208 };
ian@0 209
ian@0 210 /* Notes on the new subsystem numbering scheme:
ian@0 211 * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
ian@0 212 * bit 4 indicates if this card has secured firmware (we don't support it)
ian@0 213 * bit 8 indicates if this is a (0) copper or (1) fiber card
ian@0 214 * bits 12-16 indicate card type: (0) client and (1) server
ian@0 215 */
ian@0 216 static struct pci_device_id typhoon_pci_tbl[] = {
ian@0 217 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
ian@0 218 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
ian@0 219 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
ian@0 220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
ian@0 221 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
ian@0 222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
ian@0 223 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
ian@0 224 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
ian@0 225 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
ian@0 226 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
ian@0 227 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
ian@0 228 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
ian@0 229 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
ian@0 230 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
ian@0 231 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
ian@0 232 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
ian@0 233 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
ian@0 234 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
ian@0 235 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
ian@0 236 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
ian@0 237 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
ian@0 238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
ian@0 239 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
ian@0 240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
ian@0 241 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
ian@0 242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
ian@0 243 { 0, }
ian@0 244 };
ian@0 245 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
ian@0 246
ian@0 247 /* Define the shared memory area
ian@0 248 * Align everything the 3XP will normally be using.
ian@0 249 * We'll need to move/align txHi if we start using that ring.
ian@0 250 */
ian@0 251 #define __3xp_aligned ____cacheline_aligned
ian@0 252 struct typhoon_shared {
ian@0 253 struct typhoon_interface iface;
ian@0 254 struct typhoon_indexes indexes __3xp_aligned;
ian@0 255 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
ian@0 256 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
ian@0 257 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
ian@0 258 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
ian@0 259 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
ian@0 260 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
ian@0 261 u32 zeroWord;
ian@0 262 struct tx_desc txHi[TXHI_ENTRIES];
ian@0 263 } __attribute__ ((packed));
ian@0 264
ian@0 265 struct rxbuff_ent {
ian@0 266 struct sk_buff *skb;
ian@0 267 dma_addr_t dma_addr;
ian@0 268 };
ian@0 269
ian@0 270 struct typhoon {
ian@0 271 /* Tx cache line section */
ian@0 272 struct transmit_ring txLoRing ____cacheline_aligned;
ian@0 273 struct pci_dev * tx_pdev;
ian@0 274 void __iomem *tx_ioaddr;
ian@0 275 u32 txlo_dma_addr;
ian@0 276
ian@0 277 /* Irq/Rx cache line section */
ian@0 278 void __iomem *ioaddr ____cacheline_aligned;
ian@0 279 struct typhoon_indexes *indexes;
ian@0 280 u8 awaiting_resp;
ian@0 281 u8 duplex;
ian@0 282 u8 speed;
ian@0 283 u8 card_state;
ian@0 284 struct basic_ring rxLoRing;
ian@0 285 struct pci_dev * pdev;
ian@0 286 struct net_device * dev;
ian@0 287 spinlock_t state_lock;
ian@0 288 struct vlan_group * vlgrp;
ian@0 289 struct basic_ring rxHiRing;
ian@0 290 struct basic_ring rxBuffRing;
ian@0 291 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
ian@0 292
ian@0 293 /* general section */
ian@0 294 spinlock_t command_lock ____cacheline_aligned;
ian@0 295 struct basic_ring cmdRing;
ian@0 296 struct basic_ring respRing;
ian@0 297 struct net_device_stats stats;
ian@0 298 struct net_device_stats stats_saved;
ian@0 299 const char * name;
ian@0 300 struct typhoon_shared * shared;
ian@0 301 dma_addr_t shared_dma;
ian@0 302 u16 xcvr_select;
ian@0 303 u16 wol_events;
ian@0 304 u32 offload;
ian@0 305
ian@0 306 /* unused stuff (future use) */
ian@0 307 int capabilities;
ian@0 308 struct transmit_ring txHiRing;
ian@0 309 };
ian@0 310
ian@0 311 enum completion_wait_values {
ian@0 312 NoWait = 0, WaitNoSleep, WaitSleep,
ian@0 313 };
ian@0 314
ian@0 315 /* These are the values for the typhoon.card_state variable.
ian@0 316 * These determine where the statistics will come from in get_stats().
ian@0 317 * The sleep image does not support the statistics we need.
ian@0 318 */
ian@0 319 enum state_values {
ian@0 320 Sleeping = 0, Running,
ian@0 321 };
ian@0 322
ian@0 323 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
ian@0 324 * cannot pass a read, so this forces current writes to post.
ian@0 325 */
ian@0 326 #define typhoon_post_pci_writes(x) \
ian@0 327 do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
ian@0 328
ian@0 329 /* We'll wait up to six seconds for a reset, and half a second normally.
ian@0 330 */
ian@0 331 #define TYPHOON_UDELAY 50
ian@0 332 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
ian@0 333 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
ian@0 334 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
ian@0 335
ian@0 336 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
ian@0 337 #define typhoon_synchronize_irq(x) synchronize_irq()
ian@0 338 #else
ian@0 339 #define typhoon_synchronize_irq(x) synchronize_irq(x)
ian@0 340 #endif
ian@0 341
ian@0 342 #if defined(NETIF_F_TSO)
ian@0 343 #define skb_tso_size(x) (skb_shinfo(x)->gso_size)
ian@0 344 #define TSO_NUM_DESCRIPTORS 2
ian@0 345 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
ian@0 346 #else
ian@0 347 #define NETIF_F_TSO 0
ian@0 348 #define skb_tso_size(x) 0
ian@0 349 #define TSO_NUM_DESCRIPTORS 0
ian@0 350 #define TSO_OFFLOAD_ON 0
ian@0 351 #endif
ian@0 352
ian@0 353 static inline void
ian@0 354 typhoon_inc_index(u32 *index, const int count, const int num_entries)
ian@0 355 {
ian@0 356 /* Increment a ring index -- we can use this for all rings execept
ian@0 357 * the Rx rings, as they use different size descriptors
ian@0 358 * otherwise, everything is the same size as a cmd_desc
ian@0 359 */
ian@0 360 *index += count * sizeof(struct cmd_desc);
ian@0 361 *index %= num_entries * sizeof(struct cmd_desc);
ian@0 362 }
ian@0 363
ian@0 364 static inline void
ian@0 365 typhoon_inc_cmd_index(u32 *index, const int count)
ian@0 366 {
ian@0 367 typhoon_inc_index(index, count, COMMAND_ENTRIES);
ian@0 368 }
ian@0 369
ian@0 370 static inline void
ian@0 371 typhoon_inc_resp_index(u32 *index, const int count)
ian@0 372 {
ian@0 373 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
ian@0 374 }
ian@0 375
ian@0 376 static inline void
ian@0 377 typhoon_inc_rxfree_index(u32 *index, const int count)
ian@0 378 {
ian@0 379 typhoon_inc_index(index, count, RXFREE_ENTRIES);
ian@0 380 }
ian@0 381
ian@0 382 static inline void
ian@0 383 typhoon_inc_tx_index(u32 *index, const int count)
ian@0 384 {
ian@0 385 /* if we start using the Hi Tx ring, this needs updateing */
ian@0 386 typhoon_inc_index(index, count, TXLO_ENTRIES);
ian@0 387 }
ian@0 388
ian@0 389 static inline void
ian@0 390 typhoon_inc_rx_index(u32 *index, const int count)
ian@0 391 {
ian@0 392 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
ian@0 393 *index += count * sizeof(struct rx_desc);
ian@0 394 *index %= RX_ENTRIES * sizeof(struct rx_desc);
ian@0 395 }
ian@0 396
ian@0 397 static int
ian@0 398 typhoon_reset(void __iomem *ioaddr, int wait_type)
ian@0 399 {
ian@0 400 int i, err = 0;
ian@0 401 int timeout;
ian@0 402
ian@0 403 if(wait_type == WaitNoSleep)
ian@0 404 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
ian@0 405 else
ian@0 406 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
ian@0 407
ian@0 408 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 409 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 410
ian@0 411 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
ian@0 412 typhoon_post_pci_writes(ioaddr);
ian@0 413 udelay(1);
ian@0 414 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
ian@0 415
ian@0 416 if(wait_type != NoWait) {
ian@0 417 for(i = 0; i < timeout; i++) {
ian@0 418 if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
ian@0 419 TYPHOON_STATUS_WAITING_FOR_HOST)
ian@0 420 goto out;
ian@0 421
ian@0 422 if(wait_type == WaitSleep)
ian@0 423 schedule_timeout_uninterruptible(1);
ian@0 424 else
ian@0 425 udelay(TYPHOON_UDELAY);
ian@0 426 }
ian@0 427
ian@0 428 err = -ETIMEDOUT;
ian@0 429 }
ian@0 430
ian@0 431 out:
ian@0 432 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 433 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 434
ian@0 435 /* The 3XP seems to need a little extra time to complete the load
ian@0 436 * of the sleep image before we can reliably boot it. Failure to
ian@0 437 * do this occasionally results in a hung adapter after boot in
ian@0 438 * typhoon_init_one() while trying to read the MAC address or
ian@0 439 * putting the card to sleep. 3Com's driver waits 5ms, but
ian@0 440 * that seems to be overkill. However, if we can sleep, we might
ian@0 441 * as well give it that much time. Otherwise, we'll give it 500us,
ian@0 442 * which should be enough (I've see it work well at 100us, but still
ian@0 443 * saw occasional problems.)
ian@0 444 */
ian@0 445 if(wait_type == WaitSleep)
ian@0 446 msleep(5);
ian@0 447 else
ian@0 448 udelay(500);
ian@0 449 return err;
ian@0 450 }
ian@0 451
ian@0 452 static int
ian@0 453 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
ian@0 454 {
ian@0 455 int i, err = 0;
ian@0 456
ian@0 457 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
ian@0 458 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
ian@0 459 goto out;
ian@0 460 udelay(TYPHOON_UDELAY);
ian@0 461 }
ian@0 462
ian@0 463 err = -ETIMEDOUT;
ian@0 464
ian@0 465 out:
ian@0 466 return err;
ian@0 467 }
ian@0 468
ian@0 469 static inline void
ian@0 470 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
ian@0 471 {
ian@0 472 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
ian@0 473 netif_carrier_off(dev);
ian@0 474 else
ian@0 475 netif_carrier_on(dev);
ian@0 476 }
ian@0 477
ian@0 478 static inline void
ian@0 479 typhoon_hello(struct typhoon *tp)
ian@0 480 {
ian@0 481 struct basic_ring *ring = &tp->cmdRing;
ian@0 482 struct cmd_desc *cmd;
ian@0 483
ian@0 484 /* We only get a hello request if we've not sent anything to the
ian@0 485 * card in a long while. If the lock is held, then we're in the
ian@0 486 * process of issuing a command, so we don't need to respond.
ian@0 487 */
ian@0 488 if(spin_trylock(&tp->command_lock)) {
ian@0 489 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
ian@0 490 typhoon_inc_cmd_index(&ring->lastWrite, 1);
ian@0 491
ian@0 492 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
ian@0 493 smp_wmb();
ian@0 494 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
ian@0 495 spin_unlock(&tp->command_lock);
ian@0 496 }
ian@0 497 }
ian@0 498
ian@0 499 static int
ian@0 500 typhoon_process_response(struct typhoon *tp, int resp_size,
ian@0 501 struct resp_desc *resp_save)
ian@0 502 {
ian@0 503 struct typhoon_indexes *indexes = tp->indexes;
ian@0 504 struct resp_desc *resp;
ian@0 505 u8 *base = tp->respRing.ringBase;
ian@0 506 int count, len, wrap_len;
ian@0 507 u32 cleared;
ian@0 508 u32 ready;
ian@0 509
ian@0 510 cleared = le32_to_cpu(indexes->respCleared);
ian@0 511 ready = le32_to_cpu(indexes->respReady);
ian@0 512 while(cleared != ready) {
ian@0 513 resp = (struct resp_desc *)(base + cleared);
ian@0 514 count = resp->numDesc + 1;
ian@0 515 if(resp_save && resp->seqNo) {
ian@0 516 if(count > resp_size) {
ian@0 517 resp_save->flags = TYPHOON_RESP_ERROR;
ian@0 518 goto cleanup;
ian@0 519 }
ian@0 520
ian@0 521 wrap_len = 0;
ian@0 522 len = count * sizeof(*resp);
ian@0 523 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
ian@0 524 wrap_len = cleared + len - RESPONSE_RING_SIZE;
ian@0 525 len = RESPONSE_RING_SIZE - cleared;
ian@0 526 }
ian@0 527
ian@0 528 memcpy(resp_save, resp, len);
ian@0 529 if(unlikely(wrap_len)) {
ian@0 530 resp_save += len / sizeof(*resp);
ian@0 531 memcpy(resp_save, base, wrap_len);
ian@0 532 }
ian@0 533
ian@0 534 resp_save = NULL;
ian@0 535 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
ian@0 536 typhoon_media_status(tp->dev, resp);
ian@0 537 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
ian@0 538 typhoon_hello(tp);
ian@0 539 } else {
ian@0 540 printk(KERN_ERR "%s: dumping unexpected response "
ian@0 541 "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
ian@0 542 tp->name, le16_to_cpu(resp->cmd),
ian@0 543 resp->numDesc, resp->flags,
ian@0 544 le16_to_cpu(resp->parm1),
ian@0 545 le32_to_cpu(resp->parm2),
ian@0 546 le32_to_cpu(resp->parm3));
ian@0 547 }
ian@0 548
ian@0 549 cleanup:
ian@0 550 typhoon_inc_resp_index(&cleared, count);
ian@0 551 }
ian@0 552
ian@0 553 indexes->respCleared = cpu_to_le32(cleared);
ian@0 554 wmb();
ian@0 555 return (resp_save == NULL);
ian@0 556 }
ian@0 557
ian@0 558 static inline int
ian@0 559 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
ian@0 560 {
ian@0 561 /* this works for all descriptors but rx_desc, as they are a
ian@0 562 * different size than the cmd_desc -- everyone else is the same
ian@0 563 */
ian@0 564 lastWrite /= sizeof(struct cmd_desc);
ian@0 565 lastRead /= sizeof(struct cmd_desc);
ian@0 566 return (ringSize + lastRead - lastWrite - 1) % ringSize;
ian@0 567 }
ian@0 568
ian@0 569 static inline int
ian@0 570 typhoon_num_free_cmd(struct typhoon *tp)
ian@0 571 {
ian@0 572 int lastWrite = tp->cmdRing.lastWrite;
ian@0 573 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
ian@0 574
ian@0 575 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
ian@0 576 }
ian@0 577
ian@0 578 static inline int
ian@0 579 typhoon_num_free_resp(struct typhoon *tp)
ian@0 580 {
ian@0 581 int respReady = le32_to_cpu(tp->indexes->respReady);
ian@0 582 int respCleared = le32_to_cpu(tp->indexes->respCleared);
ian@0 583
ian@0 584 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
ian@0 585 }
ian@0 586
ian@0 587 static inline int
ian@0 588 typhoon_num_free_tx(struct transmit_ring *ring)
ian@0 589 {
ian@0 590 /* if we start using the Hi Tx ring, this needs updating */
ian@0 591 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
ian@0 592 }
ian@0 593
ian@0 594 static int
ian@0 595 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
ian@0 596 int num_resp, struct resp_desc *resp)
ian@0 597 {
ian@0 598 struct typhoon_indexes *indexes = tp->indexes;
ian@0 599 struct basic_ring *ring = &tp->cmdRing;
ian@0 600 struct resp_desc local_resp;
ian@0 601 int i, err = 0;
ian@0 602 int got_resp;
ian@0 603 int freeCmd, freeResp;
ian@0 604 int len, wrap_len;
ian@0 605
ian@0 606 spin_lock(&tp->command_lock);
ian@0 607
ian@0 608 freeCmd = typhoon_num_free_cmd(tp);
ian@0 609 freeResp = typhoon_num_free_resp(tp);
ian@0 610
ian@0 611 if(freeCmd < num_cmd || freeResp < num_resp) {
ian@0 612 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
ian@0 613 "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
ian@0 614 freeResp, num_resp);
ian@0 615 err = -ENOMEM;
ian@0 616 goto out;
ian@0 617 }
ian@0 618
ian@0 619 if(cmd->flags & TYPHOON_CMD_RESPOND) {
ian@0 620 /* If we're expecting a response, but the caller hasn't given
ian@0 621 * us a place to put it, we'll provide one.
ian@0 622 */
ian@0 623 tp->awaiting_resp = 1;
ian@0 624 if(resp == NULL) {
ian@0 625 resp = &local_resp;
ian@0 626 num_resp = 1;
ian@0 627 }
ian@0 628 }
ian@0 629
ian@0 630 wrap_len = 0;
ian@0 631 len = num_cmd * sizeof(*cmd);
ian@0 632 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
ian@0 633 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
ian@0 634 len = COMMAND_RING_SIZE - ring->lastWrite;
ian@0 635 }
ian@0 636
ian@0 637 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
ian@0 638 if(unlikely(wrap_len)) {
ian@0 639 struct cmd_desc *wrap_ptr = cmd;
ian@0 640 wrap_ptr += len / sizeof(*cmd);
ian@0 641 memcpy(ring->ringBase, wrap_ptr, wrap_len);
ian@0 642 }
ian@0 643
ian@0 644 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
ian@0 645
ian@0 646 /* "I feel a presence... another warrior is on the the mesa."
ian@0 647 */
ian@0 648 wmb();
ian@0 649 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
ian@0 650 typhoon_post_pci_writes(tp->ioaddr);
ian@0 651
ian@0 652 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
ian@0 653 goto out;
ian@0 654
ian@0 655 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
ian@0 656 * preempt or do anything other than take interrupts. So, don't
ian@0 657 * wait for a response unless you have to.
ian@0 658 *
ian@0 659 * I've thought about trying to sleep here, but we're called
ian@0 660 * from many contexts that don't allow that. Also, given the way
ian@0 661 * 3Com has implemented irq coalescing, we would likely timeout --
ian@0 662 * this has been observed in real life!
ian@0 663 *
ian@0 664 * The big killer is we have to wait to get stats from the card,
ian@0 665 * though we could go to a periodic refresh of those if we don't
ian@0 666 * mind them getting somewhat stale. The rest of the waiting
ian@0 667 * commands occur during open/close/suspend/resume, so they aren't
ian@0 668 * time critical. Creating SAs in the future will also have to
ian@0 669 * wait here.
ian@0 670 */
ian@0 671 got_resp = 0;
ian@0 672 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
ian@0 673 if(indexes->respCleared != indexes->respReady)
ian@0 674 got_resp = typhoon_process_response(tp, num_resp,
ian@0 675 resp);
ian@0 676 udelay(TYPHOON_UDELAY);
ian@0 677 }
ian@0 678
ian@0 679 if(!got_resp) {
ian@0 680 err = -ETIMEDOUT;
ian@0 681 goto out;
ian@0 682 }
ian@0 683
ian@0 684 /* Collect the error response even if we don't care about the
ian@0 685 * rest of the response
ian@0 686 */
ian@0 687 if(resp->flags & TYPHOON_RESP_ERROR)
ian@0 688 err = -EIO;
ian@0 689
ian@0 690 out:
ian@0 691 if(tp->awaiting_resp) {
ian@0 692 tp->awaiting_resp = 0;
ian@0 693 smp_wmb();
ian@0 694
ian@0 695 /* Ugh. If a response was added to the ring between
ian@0 696 * the call to typhoon_process_response() and the clearing
ian@0 697 * of tp->awaiting_resp, we could have missed the interrupt
ian@0 698 * and it could hang in the ring an indeterminate amount of
ian@0 699 * time. So, check for it, and interrupt ourselves if this
ian@0 700 * is the case.
ian@0 701 */
ian@0 702 if(indexes->respCleared != indexes->respReady)
ian@0 703 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
ian@0 704 }
ian@0 705
ian@0 706 spin_unlock(&tp->command_lock);
ian@0 707 return err;
ian@0 708 }
ian@0 709
ian@0 710 static void
ian@0 711 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
ian@0 712 {
ian@0 713 struct typhoon *tp = netdev_priv(dev);
ian@0 714 struct cmd_desc xp_cmd;
ian@0 715 int err;
ian@0 716
ian@0 717 spin_lock_bh(&tp->state_lock);
ian@0 718 if(!tp->vlgrp != !grp) {
ian@0 719 /* We've either been turned on for the first time, or we've
ian@0 720 * been turned off. Update the 3XP.
ian@0 721 */
ian@0 722 if(grp)
ian@0 723 tp->offload |= TYPHOON_OFFLOAD_VLAN;
ian@0 724 else
ian@0 725 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
ian@0 726
ian@0 727 /* If the interface is up, the runtime is running -- and we
ian@0 728 * must be up for the vlan core to call us.
ian@0 729 *
ian@0 730 * Do the command outside of the spin lock, as it is slow.
ian@0 731 */
ian@0 732 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
ian@0 733 TYPHOON_CMD_SET_OFFLOAD_TASKS);
ian@0 734 xp_cmd.parm2 = tp->offload;
ian@0 735 xp_cmd.parm3 = tp->offload;
ian@0 736 spin_unlock_bh(&tp->state_lock);
ian@0 737 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 738 if(err < 0)
ian@0 739 printk("%s: vlan offload error %d\n", tp->name, -err);
ian@0 740 spin_lock_bh(&tp->state_lock);
ian@0 741 }
ian@0 742
ian@0 743 /* now make the change visible */
ian@0 744 tp->vlgrp = grp;
ian@0 745 spin_unlock_bh(&tp->state_lock);
ian@0 746 }
ian@0 747
ian@0 748 static void
ian@0 749 typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
ian@0 750 {
ian@0 751 struct typhoon *tp = netdev_priv(dev);
ian@0 752 spin_lock_bh(&tp->state_lock);
ian@0 753 if(tp->vlgrp)
ian@0 754 tp->vlgrp->vlan_devices[vid] = NULL;
ian@0 755 spin_unlock_bh(&tp->state_lock);
ian@0 756 }
ian@0 757
ian@0 758 static inline void
ian@0 759 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
ian@0 760 u32 ring_dma)
ian@0 761 {
ian@0 762 struct tcpopt_desc *tcpd;
ian@0 763 u32 tcpd_offset = ring_dma;
ian@0 764
ian@0 765 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
ian@0 766 tcpd_offset += txRing->lastWrite;
ian@0 767 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
ian@0 768 typhoon_inc_tx_index(&txRing->lastWrite, 1);
ian@0 769
ian@0 770 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
ian@0 771 tcpd->numDesc = 1;
ian@0 772 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
ian@0 773 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
ian@0 774 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
ian@0 775 tcpd->bytesTx = cpu_to_le32(skb->len);
ian@0 776 tcpd->status = 0;
ian@0 777 }
ian@0 778
ian@0 779 static int
ian@0 780 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
ian@0 781 {
ian@0 782 struct typhoon *tp = netdev_priv(dev);
ian@0 783 struct transmit_ring *txRing;
ian@0 784 struct tx_desc *txd, *first_txd;
ian@0 785 dma_addr_t skb_dma;
ian@0 786 int numDesc;
ian@0 787
ian@0 788 /* we have two rings to choose from, but we only use txLo for now
ian@0 789 * If we start using the Hi ring as well, we'll need to update
ian@0 790 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
ian@0 791 * and TXHI_ENTRIES to match, as well as update the TSO code below
ian@0 792 * to get the right DMA address
ian@0 793 */
ian@0 794 txRing = &tp->txLoRing;
ian@0 795
ian@0 796 /* We need one descriptor for each fragment of the sk_buff, plus the
ian@0 797 * one for the ->data area of it.
ian@0 798 *
ian@0 799 * The docs say a maximum of 16 fragment descriptors per TCP option
ian@0 800 * descriptor, then make a new packet descriptor and option descriptor
ian@0 801 * for the next 16 fragments. The engineers say just an option
ian@0 802 * descriptor is needed. I've tested up to 26 fragments with a single
ian@0 803 * packet descriptor/option descriptor combo, so I use that for now.
ian@0 804 *
ian@0 805 * If problems develop with TSO, check this first.
ian@0 806 */
ian@0 807 numDesc = skb_shinfo(skb)->nr_frags + 1;
ian@0 808 if (skb_is_gso(skb))
ian@0 809 numDesc++;
ian@0 810
ian@0 811 /* When checking for free space in the ring, we need to also
ian@0 812 * account for the initial Tx descriptor, and we always must leave
ian@0 813 * at least one descriptor unused in the ring so that it doesn't
ian@0 814 * wrap and look empty.
ian@0 815 *
ian@0 816 * The only time we should loop here is when we hit the race
ian@0 817 * between marking the queue awake and updating the cleared index.
ian@0 818 * Just loop and it will appear. This comes from the acenic driver.
ian@0 819 */
ian@0 820 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
ian@0 821 smp_rmb();
ian@0 822
ian@0 823 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
ian@0 824 typhoon_inc_tx_index(&txRing->lastWrite, 1);
ian@0 825
ian@0 826 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
ian@0 827 first_txd->numDesc = 0;
ian@0 828 first_txd->len = 0;
ian@0 829 first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
ian@0 830 first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
ian@0 831 first_txd->processFlags = 0;
ian@0 832
ian@0 833 if(skb->ip_summed == CHECKSUM_HW) {
ian@0 834 /* The 3XP will figure out if this is UDP/TCP */
ian@0 835 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
ian@0 836 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
ian@0 837 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
ian@0 838 }
ian@0 839
ian@0 840 if(vlan_tx_tag_present(skb)) {
ian@0 841 first_txd->processFlags |=
ian@0 842 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
ian@0 843 first_txd->processFlags |=
ian@0 844 cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
ian@0 845 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
ian@0 846 }
ian@0 847
ian@0 848 if (skb_is_gso(skb)) {
ian@0 849 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
ian@0 850 first_txd->numDesc++;
ian@0 851
ian@0 852 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
ian@0 853 }
ian@0 854
ian@0 855 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
ian@0 856 typhoon_inc_tx_index(&txRing->lastWrite, 1);
ian@0 857
ian@0 858 /* No need to worry about padding packet -- the firmware pads
ian@0 859 * it with zeros to ETH_ZLEN for us.
ian@0 860 */
ian@0 861 if(skb_shinfo(skb)->nr_frags == 0) {
ian@0 862 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
ian@0 863 PCI_DMA_TODEVICE);
ian@0 864 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
ian@0 865 txd->len = cpu_to_le16(skb->len);
ian@0 866 txd->addr = cpu_to_le32(skb_dma);
ian@0 867 txd->addrHi = 0;
ian@0 868 first_txd->numDesc++;
ian@0 869 } else {
ian@0 870 int i, len;
ian@0 871
ian@0 872 len = skb_headlen(skb);
ian@0 873 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
ian@0 874 PCI_DMA_TODEVICE);
ian@0 875 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
ian@0 876 txd->len = cpu_to_le16(len);
ian@0 877 txd->addr = cpu_to_le32(skb_dma);
ian@0 878 txd->addrHi = 0;
ian@0 879 first_txd->numDesc++;
ian@0 880
ian@0 881 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ian@0 882 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ian@0 883 void *frag_addr;
ian@0 884
ian@0 885 txd = (struct tx_desc *) (txRing->ringBase +
ian@0 886 txRing->lastWrite);
ian@0 887 typhoon_inc_tx_index(&txRing->lastWrite, 1);
ian@0 888
ian@0 889 len = frag->size;
ian@0 890 frag_addr = (void *) page_address(frag->page) +
ian@0 891 frag->page_offset;
ian@0 892 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
ian@0 893 PCI_DMA_TODEVICE);
ian@0 894 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
ian@0 895 txd->len = cpu_to_le16(len);
ian@0 896 txd->addr = cpu_to_le32(skb_dma);
ian@0 897 txd->addrHi = 0;
ian@0 898 first_txd->numDesc++;
ian@0 899 }
ian@0 900 }
ian@0 901
ian@0 902 /* Kick the 3XP
ian@0 903 */
ian@0 904 wmb();
ian@0 905 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
ian@0 906
ian@0 907 dev->trans_start = jiffies;
ian@0 908
ian@0 909 /* If we don't have room to put the worst case packet on the
ian@0 910 * queue, then we must stop the queue. We need 2 extra
ian@0 911 * descriptors -- one to prevent ring wrap, and one for the
ian@0 912 * Tx header.
ian@0 913 */
ian@0 914 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
ian@0 915
ian@0 916 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
ian@0 917 netif_stop_queue(dev);
ian@0 918
ian@0 919 /* A Tx complete IRQ could have gotten inbetween, making
ian@0 920 * the ring free again. Only need to recheck here, since
ian@0 921 * Tx is serialized.
ian@0 922 */
ian@0 923 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
ian@0 924 netif_wake_queue(dev);
ian@0 925 }
ian@0 926
ian@0 927 return 0;
ian@0 928 }
ian@0 929
ian@0 930 static void
ian@0 931 typhoon_set_rx_mode(struct net_device *dev)
ian@0 932 {
ian@0 933 struct typhoon *tp = netdev_priv(dev);
ian@0 934 struct cmd_desc xp_cmd;
ian@0 935 u32 mc_filter[2];
ian@0 936 u16 filter;
ian@0 937
ian@0 938 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
ian@0 939 if(dev->flags & IFF_PROMISC) {
ian@0 940 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
ian@0 941 dev->name);
ian@0 942 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
ian@0 943 } else if((dev->mc_count > multicast_filter_limit) ||
ian@0 944 (dev->flags & IFF_ALLMULTI)) {
ian@0 945 /* Too many to match, or accept all multicasts. */
ian@0 946 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
ian@0 947 } else if(dev->mc_count) {
ian@0 948 struct dev_mc_list *mclist;
ian@0 949 int i;
ian@0 950
ian@0 951 memset(mc_filter, 0, sizeof(mc_filter));
ian@0 952 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
ian@0 953 i++, mclist = mclist->next) {
ian@0 954 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
ian@0 955 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
ian@0 956 }
ian@0 957
ian@0 958 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
ian@0 959 TYPHOON_CMD_SET_MULTICAST_HASH);
ian@0 960 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
ian@0 961 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
ian@0 962 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
ian@0 963 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 964
ian@0 965 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
ian@0 966 }
ian@0 967
ian@0 968 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
ian@0 969 xp_cmd.parm1 = filter;
ian@0 970 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 971 }
ian@0 972
ian@0 973 static int
ian@0 974 typhoon_do_get_stats(struct typhoon *tp)
ian@0 975 {
ian@0 976 struct net_device_stats *stats = &tp->stats;
ian@0 977 struct net_device_stats *saved = &tp->stats_saved;
ian@0 978 struct cmd_desc xp_cmd;
ian@0 979 struct resp_desc xp_resp[7];
ian@0 980 struct stats_resp *s = (struct stats_resp *) xp_resp;
ian@0 981 int err;
ian@0 982
ian@0 983 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
ian@0 984 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
ian@0 985 if(err < 0)
ian@0 986 return err;
ian@0 987
ian@0 988 /* 3Com's Linux driver uses txMultipleCollisions as it's
ian@0 989 * collisions value, but there is some other collision info as well...
ian@0 990 *
ian@0 991 * The extra status reported would be a good candidate for
ian@0 992 * ethtool_ops->get_{strings,stats}()
ian@0 993 */
ian@0 994 stats->tx_packets = le32_to_cpu(s->txPackets);
ian@0 995 stats->tx_bytes = le32_to_cpu(s->txBytes);
ian@0 996 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
ian@0 997 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
ian@0 998 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
ian@0 999 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
ian@0 1000 stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
ian@0 1001 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
ian@0 1002 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
ian@0 1003 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
ian@0 1004 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
ian@0 1005 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
ian@0 1006 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
ian@0 1007 SPEED_100 : SPEED_10;
ian@0 1008 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
ian@0 1009 DUPLEX_FULL : DUPLEX_HALF;
ian@0 1010
ian@0 1011 /* add in the saved statistics
ian@0 1012 */
ian@0 1013 stats->tx_packets += saved->tx_packets;
ian@0 1014 stats->tx_bytes += saved->tx_bytes;
ian@0 1015 stats->tx_errors += saved->tx_errors;
ian@0 1016 stats->collisions += saved->collisions;
ian@0 1017 stats->rx_packets += saved->rx_packets;
ian@0 1018 stats->rx_bytes += saved->rx_bytes;
ian@0 1019 stats->rx_fifo_errors += saved->rx_fifo_errors;
ian@0 1020 stats->rx_errors += saved->rx_errors;
ian@0 1021 stats->rx_crc_errors += saved->rx_crc_errors;
ian@0 1022 stats->rx_length_errors += saved->rx_length_errors;
ian@0 1023
ian@0 1024 return 0;
ian@0 1025 }
ian@0 1026
ian@0 1027 static struct net_device_stats *
ian@0 1028 typhoon_get_stats(struct net_device *dev)
ian@0 1029 {
ian@0 1030 struct typhoon *tp = netdev_priv(dev);
ian@0 1031 struct net_device_stats *stats = &tp->stats;
ian@0 1032 struct net_device_stats *saved = &tp->stats_saved;
ian@0 1033
ian@0 1034 smp_rmb();
ian@0 1035 if(tp->card_state == Sleeping)
ian@0 1036 return saved;
ian@0 1037
ian@0 1038 if(typhoon_do_get_stats(tp) < 0) {
ian@0 1039 printk(KERN_ERR "%s: error getting stats\n", dev->name);
ian@0 1040 return saved;
ian@0 1041 }
ian@0 1042
ian@0 1043 return stats;
ian@0 1044 }
ian@0 1045
ian@0 1046 static int
ian@0 1047 typhoon_set_mac_address(struct net_device *dev, void *addr)
ian@0 1048 {
ian@0 1049 struct sockaddr *saddr = (struct sockaddr *) addr;
ian@0 1050
ian@0 1051 if(netif_running(dev))
ian@0 1052 return -EBUSY;
ian@0 1053
ian@0 1054 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
ian@0 1055 return 0;
ian@0 1056 }
ian@0 1057
ian@0 1058 static void
ian@0 1059 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
ian@0 1060 {
ian@0 1061 struct typhoon *tp = netdev_priv(dev);
ian@0 1062 struct pci_dev *pci_dev = tp->pdev;
ian@0 1063 struct cmd_desc xp_cmd;
ian@0 1064 struct resp_desc xp_resp[3];
ian@0 1065
ian@0 1066 smp_rmb();
ian@0 1067 if(tp->card_state == Sleeping) {
ian@0 1068 strcpy(info->fw_version, "Sleep image");
ian@0 1069 } else {
ian@0 1070 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
ian@0 1071 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
ian@0 1072 strcpy(info->fw_version, "Unknown runtime");
ian@0 1073 } else {
ian@0 1074 u32 sleep_ver = xp_resp[0].parm2;
ian@0 1075 snprintf(info->fw_version, 32, "%02x.%03x.%03x",
ian@0 1076 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
ian@0 1077 sleep_ver & 0xfff);
ian@0 1078 }
ian@0 1079 }
ian@0 1080
ian@0 1081 strcpy(info->driver, DRV_MODULE_NAME);
ian@0 1082 strcpy(info->version, DRV_MODULE_VERSION);
ian@0 1083 strcpy(info->bus_info, pci_name(pci_dev));
ian@0 1084 }
ian@0 1085
ian@0 1086 static int
ian@0 1087 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ian@0 1088 {
ian@0 1089 struct typhoon *tp = netdev_priv(dev);
ian@0 1090
ian@0 1091 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
ian@0 1092 SUPPORTED_Autoneg;
ian@0 1093
ian@0 1094 switch (tp->xcvr_select) {
ian@0 1095 case TYPHOON_XCVR_10HALF:
ian@0 1096 cmd->advertising = ADVERTISED_10baseT_Half;
ian@0 1097 break;
ian@0 1098 case TYPHOON_XCVR_10FULL:
ian@0 1099 cmd->advertising = ADVERTISED_10baseT_Full;
ian@0 1100 break;
ian@0 1101 case TYPHOON_XCVR_100HALF:
ian@0 1102 cmd->advertising = ADVERTISED_100baseT_Half;
ian@0 1103 break;
ian@0 1104 case TYPHOON_XCVR_100FULL:
ian@0 1105 cmd->advertising = ADVERTISED_100baseT_Full;
ian@0 1106 break;
ian@0 1107 case TYPHOON_XCVR_AUTONEG:
ian@0 1108 cmd->advertising = ADVERTISED_10baseT_Half |
ian@0 1109 ADVERTISED_10baseT_Full |
ian@0 1110 ADVERTISED_100baseT_Half |
ian@0 1111 ADVERTISED_100baseT_Full |
ian@0 1112 ADVERTISED_Autoneg;
ian@0 1113 break;
ian@0 1114 }
ian@0 1115
ian@0 1116 if(tp->capabilities & TYPHOON_FIBER) {
ian@0 1117 cmd->supported |= SUPPORTED_FIBRE;
ian@0 1118 cmd->advertising |= ADVERTISED_FIBRE;
ian@0 1119 cmd->port = PORT_FIBRE;
ian@0 1120 } else {
ian@0 1121 cmd->supported |= SUPPORTED_10baseT_Half |
ian@0 1122 SUPPORTED_10baseT_Full |
ian@0 1123 SUPPORTED_TP;
ian@0 1124 cmd->advertising |= ADVERTISED_TP;
ian@0 1125 cmd->port = PORT_TP;
ian@0 1126 }
ian@0 1127
ian@0 1128 /* need to get stats to make these link speed/duplex valid */
ian@0 1129 typhoon_do_get_stats(tp);
ian@0 1130 cmd->speed = tp->speed;
ian@0 1131 cmd->duplex = tp->duplex;
ian@0 1132 cmd->phy_address = 0;
ian@0 1133 cmd->transceiver = XCVR_INTERNAL;
ian@0 1134 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
ian@0 1135 cmd->autoneg = AUTONEG_ENABLE;
ian@0 1136 else
ian@0 1137 cmd->autoneg = AUTONEG_DISABLE;
ian@0 1138 cmd->maxtxpkt = 1;
ian@0 1139 cmd->maxrxpkt = 1;
ian@0 1140
ian@0 1141 return 0;
ian@0 1142 }
ian@0 1143
ian@0 1144 static int
ian@0 1145 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ian@0 1146 {
ian@0 1147 struct typhoon *tp = netdev_priv(dev);
ian@0 1148 struct cmd_desc xp_cmd;
ian@0 1149 int xcvr;
ian@0 1150 int err;
ian@0 1151
ian@0 1152 err = -EINVAL;
ian@0 1153 if(cmd->autoneg == AUTONEG_ENABLE) {
ian@0 1154 xcvr = TYPHOON_XCVR_AUTONEG;
ian@0 1155 } else {
ian@0 1156 if(cmd->duplex == DUPLEX_HALF) {
ian@0 1157 if(cmd->speed == SPEED_10)
ian@0 1158 xcvr = TYPHOON_XCVR_10HALF;
ian@0 1159 else if(cmd->speed == SPEED_100)
ian@0 1160 xcvr = TYPHOON_XCVR_100HALF;
ian@0 1161 else
ian@0 1162 goto out;
ian@0 1163 } else if(cmd->duplex == DUPLEX_FULL) {
ian@0 1164 if(cmd->speed == SPEED_10)
ian@0 1165 xcvr = TYPHOON_XCVR_10FULL;
ian@0 1166 else if(cmd->speed == SPEED_100)
ian@0 1167 xcvr = TYPHOON_XCVR_100FULL;
ian@0 1168 else
ian@0 1169 goto out;
ian@0 1170 } else
ian@0 1171 goto out;
ian@0 1172 }
ian@0 1173
ian@0 1174 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
ian@0 1175 xp_cmd.parm1 = cpu_to_le16(xcvr);
ian@0 1176 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 1177 if(err < 0)
ian@0 1178 goto out;
ian@0 1179
ian@0 1180 tp->xcvr_select = xcvr;
ian@0 1181 if(cmd->autoneg == AUTONEG_ENABLE) {
ian@0 1182 tp->speed = 0xff; /* invalid */
ian@0 1183 tp->duplex = 0xff; /* invalid */
ian@0 1184 } else {
ian@0 1185 tp->speed = cmd->speed;
ian@0 1186 tp->duplex = cmd->duplex;
ian@0 1187 }
ian@0 1188
ian@0 1189 out:
ian@0 1190 return err;
ian@0 1191 }
ian@0 1192
ian@0 1193 static void
ian@0 1194 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
ian@0 1195 {
ian@0 1196 struct typhoon *tp = netdev_priv(dev);
ian@0 1197
ian@0 1198 wol->supported = WAKE_PHY | WAKE_MAGIC;
ian@0 1199 wol->wolopts = 0;
ian@0 1200 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
ian@0 1201 wol->wolopts |= WAKE_PHY;
ian@0 1202 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
ian@0 1203 wol->wolopts |= WAKE_MAGIC;
ian@0 1204 memset(&wol->sopass, 0, sizeof(wol->sopass));
ian@0 1205 }
ian@0 1206
ian@0 1207 static int
ian@0 1208 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
ian@0 1209 {
ian@0 1210 struct typhoon *tp = netdev_priv(dev);
ian@0 1211
ian@0 1212 if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
ian@0 1213 return -EINVAL;
ian@0 1214
ian@0 1215 tp->wol_events = 0;
ian@0 1216 if(wol->wolopts & WAKE_PHY)
ian@0 1217 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
ian@0 1218 if(wol->wolopts & WAKE_MAGIC)
ian@0 1219 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
ian@0 1220
ian@0 1221 return 0;
ian@0 1222 }
ian@0 1223
ian@0 1224 static u32
ian@0 1225 typhoon_get_rx_csum(struct net_device *dev)
ian@0 1226 {
ian@0 1227 /* For now, we don't allow turning off RX checksums.
ian@0 1228 */
ian@0 1229 return 1;
ian@0 1230 }
ian@0 1231
ian@0 1232 static void
ian@0 1233 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
ian@0 1234 {
ian@0 1235 ering->rx_max_pending = RXENT_ENTRIES;
ian@0 1236 ering->rx_mini_max_pending = 0;
ian@0 1237 ering->rx_jumbo_max_pending = 0;
ian@0 1238 ering->tx_max_pending = TXLO_ENTRIES - 1;
ian@0 1239
ian@0 1240 ering->rx_pending = RXENT_ENTRIES;
ian@0 1241 ering->rx_mini_pending = 0;
ian@0 1242 ering->rx_jumbo_pending = 0;
ian@0 1243 ering->tx_pending = TXLO_ENTRIES - 1;
ian@0 1244 }
ian@0 1245
ian@0 1246 static struct ethtool_ops typhoon_ethtool_ops = {
ian@0 1247 .get_settings = typhoon_get_settings,
ian@0 1248 .set_settings = typhoon_set_settings,
ian@0 1249 .get_drvinfo = typhoon_get_drvinfo,
ian@0 1250 .get_wol = typhoon_get_wol,
ian@0 1251 .set_wol = typhoon_set_wol,
ian@0 1252 .get_link = ethtool_op_get_link,
ian@0 1253 .get_rx_csum = typhoon_get_rx_csum,
ian@0 1254 .get_tx_csum = ethtool_op_get_tx_csum,
ian@0 1255 .set_tx_csum = ethtool_op_set_tx_csum,
ian@0 1256 .get_sg = ethtool_op_get_sg,
ian@0 1257 .set_sg = ethtool_op_set_sg,
ian@0 1258 .get_tso = ethtool_op_get_tso,
ian@0 1259 .set_tso = ethtool_op_set_tso,
ian@0 1260 .get_ringparam = typhoon_get_ringparam,
ian@0 1261 };
ian@0 1262
ian@0 1263 static int
ian@0 1264 typhoon_wait_interrupt(void __iomem *ioaddr)
ian@0 1265 {
ian@0 1266 int i, err = 0;
ian@0 1267
ian@0 1268 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
ian@0 1269 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
ian@0 1270 TYPHOON_INTR_BOOTCMD)
ian@0 1271 goto out;
ian@0 1272 udelay(TYPHOON_UDELAY);
ian@0 1273 }
ian@0 1274
ian@0 1275 err = -ETIMEDOUT;
ian@0 1276
ian@0 1277 out:
ian@0 1278 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 1279 return err;
ian@0 1280 }
ian@0 1281
ian@0 1282 #define shared_offset(x) offsetof(struct typhoon_shared, x)
ian@0 1283
ian@0 1284 static void
ian@0 1285 typhoon_init_interface(struct typhoon *tp)
ian@0 1286 {
ian@0 1287 struct typhoon_interface *iface = &tp->shared->iface;
ian@0 1288 dma_addr_t shared_dma;
ian@0 1289
ian@0 1290 memset(tp->shared, 0, sizeof(struct typhoon_shared));
ian@0 1291
ian@0 1292 /* The *Hi members of iface are all init'd to zero by the memset().
ian@0 1293 */
ian@0 1294 shared_dma = tp->shared_dma + shared_offset(indexes);
ian@0 1295 iface->ringIndex = cpu_to_le32(shared_dma);
ian@0 1296
ian@0 1297 shared_dma = tp->shared_dma + shared_offset(txLo);
ian@0 1298 iface->txLoAddr = cpu_to_le32(shared_dma);
ian@0 1299 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
ian@0 1300
ian@0 1301 shared_dma = tp->shared_dma + shared_offset(txHi);
ian@0 1302 iface->txHiAddr = cpu_to_le32(shared_dma);
ian@0 1303 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
ian@0 1304
ian@0 1305 shared_dma = tp->shared_dma + shared_offset(rxBuff);
ian@0 1306 iface->rxBuffAddr = cpu_to_le32(shared_dma);
ian@0 1307 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
ian@0 1308 sizeof(struct rx_free));
ian@0 1309
ian@0 1310 shared_dma = tp->shared_dma + shared_offset(rxLo);
ian@0 1311 iface->rxLoAddr = cpu_to_le32(shared_dma);
ian@0 1312 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
ian@0 1313
ian@0 1314 shared_dma = tp->shared_dma + shared_offset(rxHi);
ian@0 1315 iface->rxHiAddr = cpu_to_le32(shared_dma);
ian@0 1316 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
ian@0 1317
ian@0 1318 shared_dma = tp->shared_dma + shared_offset(cmd);
ian@0 1319 iface->cmdAddr = cpu_to_le32(shared_dma);
ian@0 1320 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
ian@0 1321
ian@0 1322 shared_dma = tp->shared_dma + shared_offset(resp);
ian@0 1323 iface->respAddr = cpu_to_le32(shared_dma);
ian@0 1324 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
ian@0 1325
ian@0 1326 shared_dma = tp->shared_dma + shared_offset(zeroWord);
ian@0 1327 iface->zeroAddr = cpu_to_le32(shared_dma);
ian@0 1328
ian@0 1329 tp->indexes = &tp->shared->indexes;
ian@0 1330 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
ian@0 1331 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
ian@0 1332 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
ian@0 1333 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
ian@0 1334 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
ian@0 1335 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
ian@0 1336 tp->respRing.ringBase = (u8 *) tp->shared->resp;
ian@0 1337
ian@0 1338 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
ian@0 1339 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
ian@0 1340
ian@0 1341 tp->txlo_dma_addr = iface->txLoAddr;
ian@0 1342 tp->card_state = Sleeping;
ian@0 1343 smp_wmb();
ian@0 1344
ian@0 1345 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
ian@0 1346 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
ian@0 1347
ian@0 1348 spin_lock_init(&tp->command_lock);
ian@0 1349 spin_lock_init(&tp->state_lock);
ian@0 1350 }
ian@0 1351
ian@0 1352 static void
ian@0 1353 typhoon_init_rings(struct typhoon *tp)
ian@0 1354 {
ian@0 1355 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
ian@0 1356
ian@0 1357 tp->txLoRing.lastWrite = 0;
ian@0 1358 tp->txHiRing.lastWrite = 0;
ian@0 1359 tp->rxLoRing.lastWrite = 0;
ian@0 1360 tp->rxHiRing.lastWrite = 0;
ian@0 1361 tp->rxBuffRing.lastWrite = 0;
ian@0 1362 tp->cmdRing.lastWrite = 0;
ian@0 1363 tp->cmdRing.lastWrite = 0;
ian@0 1364
ian@0 1365 tp->txLoRing.lastRead = 0;
ian@0 1366 tp->txHiRing.lastRead = 0;
ian@0 1367 }
ian@0 1368
ian@0 1369 static int
ian@0 1370 typhoon_download_firmware(struct typhoon *tp)
ian@0 1371 {
ian@0 1372 void __iomem *ioaddr = tp->ioaddr;
ian@0 1373 struct pci_dev *pdev = tp->pdev;
ian@0 1374 struct typhoon_file_header *fHdr;
ian@0 1375 struct typhoon_section_header *sHdr;
ian@0 1376 u8 *image_data;
ian@0 1377 void *dpage;
ian@0 1378 dma_addr_t dpage_dma;
ian@0 1379 unsigned int csum;
ian@0 1380 u32 irqEnabled;
ian@0 1381 u32 irqMasked;
ian@0 1382 u32 numSections;
ian@0 1383 u32 section_len;
ian@0 1384 u32 len;
ian@0 1385 u32 load_addr;
ian@0 1386 u32 hmac;
ian@0 1387 int i;
ian@0 1388 int err;
ian@0 1389
ian@0 1390 err = -EINVAL;
ian@0 1391 fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
ian@0 1392 image_data = (u8 *) fHdr;
ian@0 1393
ian@0 1394 if(memcmp(fHdr->tag, "TYPHOON", 8)) {
ian@0 1395 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
ian@0 1396 goto err_out;
ian@0 1397 }
ian@0 1398
ian@0 1399 /* Cannot just map the firmware image using pci_map_single() as
ian@0 1400 * the firmware is part of the kernel/module image, so we allocate
ian@0 1401 * some consistent memory to copy the sections into, as it is simpler,
ian@0 1402 * and short-lived. If we ever split out and require a userland
ian@0 1403 * firmware loader, then we can revisit this.
ian@0 1404 */
ian@0 1405 err = -ENOMEM;
ian@0 1406 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
ian@0 1407 if(!dpage) {
ian@0 1408 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
ian@0 1409 goto err_out;
ian@0 1410 }
ian@0 1411
ian@0 1412 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
ian@0 1413 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
ian@0 1414 ioaddr + TYPHOON_REG_INTR_ENABLE);
ian@0 1415 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 1416 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
ian@0 1417 ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 1418
ian@0 1419 err = -ETIMEDOUT;
ian@0 1420 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
ian@0 1421 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
ian@0 1422 goto err_out_irq;
ian@0 1423 }
ian@0 1424
ian@0 1425 numSections = le32_to_cpu(fHdr->numSections);
ian@0 1426 load_addr = le32_to_cpu(fHdr->startAddr);
ian@0 1427
ian@0 1428 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 1429 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
ian@0 1430 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
ian@0 1431 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
ian@0 1432 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
ian@0 1433 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
ian@0 1434 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
ian@0 1435 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
ian@0 1436 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
ian@0 1437 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
ian@0 1438 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
ian@0 1439 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
ian@0 1440 typhoon_post_pci_writes(ioaddr);
ian@0 1441 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
ian@0 1442
ian@0 1443 image_data += sizeof(struct typhoon_file_header);
ian@0 1444
ian@0 1445 /* The ioread32() in typhoon_wait_interrupt() will force the
ian@0 1446 * last write to the command register to post, so
ian@0 1447 * we don't need a typhoon_post_pci_writes() after it.
ian@0 1448 */
ian@0 1449 for(i = 0; i < numSections; i++) {
ian@0 1450 sHdr = (struct typhoon_section_header *) image_data;
ian@0 1451 image_data += sizeof(struct typhoon_section_header);
ian@0 1452 load_addr = le32_to_cpu(sHdr->startAddr);
ian@0 1453 section_len = le32_to_cpu(sHdr->len);
ian@0 1454
ian@0 1455 while(section_len) {
ian@0 1456 len = min_t(u32, section_len, PAGE_SIZE);
ian@0 1457
ian@0 1458 if(typhoon_wait_interrupt(ioaddr) < 0 ||
ian@0 1459 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
ian@0 1460 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
ian@0 1461 printk(KERN_ERR "%s: segment ready timeout\n",
ian@0 1462 tp->name);
ian@0 1463 goto err_out_irq;
ian@0 1464 }
ian@0 1465
ian@0 1466 /* Do an pseudo IPv4 checksum on the data -- first
ian@0 1467 * need to convert each u16 to cpu order before
ian@0 1468 * summing. Fortunately, due to the properties of
ian@0 1469 * the checksum, we can do this once, at the end.
ian@0 1470 */
ian@0 1471 csum = csum_partial_copy_nocheck(image_data, dpage,
ian@0 1472 len, 0);
ian@0 1473 csum = csum_fold(csum);
ian@0 1474 csum = le16_to_cpu(csum);
ian@0 1475
ian@0 1476 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
ian@0 1477 iowrite32(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
ian@0 1478 iowrite32(load_addr,
ian@0 1479 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
ian@0 1480 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
ian@0 1481 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
ian@0 1482 typhoon_post_pci_writes(ioaddr);
ian@0 1483 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
ian@0 1484 ioaddr + TYPHOON_REG_COMMAND);
ian@0 1485
ian@0 1486 image_data += len;
ian@0 1487 load_addr += len;
ian@0 1488 section_len -= len;
ian@0 1489 }
ian@0 1490 }
ian@0 1491
ian@0 1492 if(typhoon_wait_interrupt(ioaddr) < 0 ||
ian@0 1493 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
ian@0 1494 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
ian@0 1495 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
ian@0 1496 goto err_out_irq;
ian@0 1497 }
ian@0 1498
ian@0 1499 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
ian@0 1500
ian@0 1501 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
ian@0 1502 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
ian@0 1503 tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
ian@0 1504 goto err_out_irq;
ian@0 1505 }
ian@0 1506
ian@0 1507 err = 0;
ian@0 1508
ian@0 1509 err_out_irq:
ian@0 1510 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 1511 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
ian@0 1512
ian@0 1513 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
ian@0 1514
ian@0 1515 err_out:
ian@0 1516 return err;
ian@0 1517 }
ian@0 1518
ian@0 1519 static int
ian@0 1520 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
ian@0 1521 {
ian@0 1522 void __iomem *ioaddr = tp->ioaddr;
ian@0 1523
ian@0 1524 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
ian@0 1525 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
ian@0 1526 goto out_timeout;
ian@0 1527 }
ian@0 1528
ian@0 1529 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
ian@0 1530 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
ian@0 1531 typhoon_post_pci_writes(ioaddr);
ian@0 1532 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
ian@0 1533 ioaddr + TYPHOON_REG_COMMAND);
ian@0 1534
ian@0 1535 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
ian@0 1536 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
ian@0 1537 tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
ian@0 1538 goto out_timeout;
ian@0 1539 }
ian@0 1540
ian@0 1541 /* Clear the Transmit and Command ready registers
ian@0 1542 */
ian@0 1543 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
ian@0 1544 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
ian@0 1545 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
ian@0 1546 typhoon_post_pci_writes(ioaddr);
ian@0 1547 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
ian@0 1548
ian@0 1549 return 0;
ian@0 1550
ian@0 1551 out_timeout:
ian@0 1552 return -ETIMEDOUT;
ian@0 1553 }
ian@0 1554
ian@0 1555 static u32
ian@0 1556 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
ian@0 1557 volatile u32 * index)
ian@0 1558 {
ian@0 1559 u32 lastRead = txRing->lastRead;
ian@0 1560 struct tx_desc *tx;
ian@0 1561 dma_addr_t skb_dma;
ian@0 1562 int dma_len;
ian@0 1563 int type;
ian@0 1564
ian@0 1565 while(lastRead != le32_to_cpu(*index)) {
ian@0 1566 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
ian@0 1567 type = tx->flags & TYPHOON_TYPE_MASK;
ian@0 1568
ian@0 1569 if(type == TYPHOON_TX_DESC) {
ian@0 1570 /* This tx_desc describes a packet.
ian@0 1571 */
ian@0 1572 unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
ian@0 1573 struct sk_buff *skb = (struct sk_buff *) ptr;
ian@0 1574 dev_kfree_skb_irq(skb);
ian@0 1575 } else if(type == TYPHOON_FRAG_DESC) {
ian@0 1576 /* This tx_desc describes a memory mapping. Free it.
ian@0 1577 */
ian@0 1578 skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
ian@0 1579 dma_len = le16_to_cpu(tx->len);
ian@0 1580 pci_unmap_single(tp->pdev, skb_dma, dma_len,
ian@0 1581 PCI_DMA_TODEVICE);
ian@0 1582 }
ian@0 1583
ian@0 1584 tx->flags = 0;
ian@0 1585 typhoon_inc_tx_index(&lastRead, 1);
ian@0 1586 }
ian@0 1587
ian@0 1588 return lastRead;
ian@0 1589 }
ian@0 1590
ian@0 1591 static void
ian@0 1592 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
ian@0 1593 volatile u32 * index)
ian@0 1594 {
ian@0 1595 u32 lastRead;
ian@0 1596 int numDesc = MAX_SKB_FRAGS + 1;
ian@0 1597
ian@0 1598 /* This will need changing if we start to use the Hi Tx ring. */
ian@0 1599 lastRead = typhoon_clean_tx(tp, txRing, index);
ian@0 1600 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
ian@0 1601 lastRead, TXLO_ENTRIES) > (numDesc + 2))
ian@0 1602 netif_wake_queue(tp->dev);
ian@0 1603
ian@0 1604 txRing->lastRead = lastRead;
ian@0 1605 smp_wmb();
ian@0 1606 }
ian@0 1607
ian@0 1608 static void
ian@0 1609 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
ian@0 1610 {
ian@0 1611 struct typhoon_indexes *indexes = tp->indexes;
ian@0 1612 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
ian@0 1613 struct basic_ring *ring = &tp->rxBuffRing;
ian@0 1614 struct rx_free *r;
ian@0 1615
ian@0 1616 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
ian@0 1617 indexes->rxBuffCleared) {
ian@0 1618 /* no room in ring, just drop the skb
ian@0 1619 */
ian@0 1620 dev_kfree_skb_any(rxb->skb);
ian@0 1621 rxb->skb = NULL;
ian@0 1622 return;
ian@0 1623 }
ian@0 1624
ian@0 1625 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
ian@0 1626 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
ian@0 1627 r->virtAddr = idx;
ian@0 1628 r->physAddr = cpu_to_le32(rxb->dma_addr);
ian@0 1629
ian@0 1630 /* Tell the card about it */
ian@0 1631 wmb();
ian@0 1632 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
ian@0 1633 }
ian@0 1634
ian@0 1635 static int
ian@0 1636 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
ian@0 1637 {
ian@0 1638 struct typhoon_indexes *indexes = tp->indexes;
ian@0 1639 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
ian@0 1640 struct basic_ring *ring = &tp->rxBuffRing;
ian@0 1641 struct rx_free *r;
ian@0 1642 struct sk_buff *skb;
ian@0 1643 dma_addr_t dma_addr;
ian@0 1644
ian@0 1645 rxb->skb = NULL;
ian@0 1646
ian@0 1647 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
ian@0 1648 indexes->rxBuffCleared)
ian@0 1649 return -ENOMEM;
ian@0 1650
ian@0 1651 skb = dev_alloc_skb(PKT_BUF_SZ);
ian@0 1652 if(!skb)
ian@0 1653 return -ENOMEM;
ian@0 1654
ian@0 1655 #if 0
ian@0 1656 /* Please, 3com, fix the firmware to allow DMA to a unaligned
ian@0 1657 * address! Pretty please?
ian@0 1658 */
ian@0 1659 skb_reserve(skb, 2);
ian@0 1660 #endif
ian@0 1661
ian@0 1662 skb->dev = tp->dev;
ian@0 1663 dma_addr = pci_map_single(tp->pdev, skb->data,
ian@0 1664 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
ian@0 1665
ian@0 1666 /* Since no card does 64 bit DAC, the high bits will never
ian@0 1667 * change from zero.
ian@0 1668 */
ian@0 1669 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
ian@0 1670 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
ian@0 1671 r->virtAddr = idx;
ian@0 1672 r->physAddr = cpu_to_le32(dma_addr);
ian@0 1673 rxb->skb = skb;
ian@0 1674 rxb->dma_addr = dma_addr;
ian@0 1675
ian@0 1676 /* Tell the card about it */
ian@0 1677 wmb();
ian@0 1678 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
ian@0 1679 return 0;
ian@0 1680 }
ian@0 1681
ian@0 1682 static int
ian@0 1683 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
ian@0 1684 volatile u32 * cleared, int budget)
ian@0 1685 {
ian@0 1686 struct rx_desc *rx;
ian@0 1687 struct sk_buff *skb, *new_skb;
ian@0 1688 struct rxbuff_ent *rxb;
ian@0 1689 dma_addr_t dma_addr;
ian@0 1690 u32 local_ready;
ian@0 1691 u32 rxaddr;
ian@0 1692 int pkt_len;
ian@0 1693 u32 idx;
ian@0 1694 u32 csum_bits;
ian@0 1695 int received;
ian@0 1696
ian@0 1697 received = 0;
ian@0 1698 local_ready = le32_to_cpu(*ready);
ian@0 1699 rxaddr = le32_to_cpu(*cleared);
ian@0 1700 while(rxaddr != local_ready && budget > 0) {
ian@0 1701 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
ian@0 1702 idx = rx->addr;
ian@0 1703 rxb = &tp->rxbuffers[idx];
ian@0 1704 skb = rxb->skb;
ian@0 1705 dma_addr = rxb->dma_addr;
ian@0 1706
ian@0 1707 typhoon_inc_rx_index(&rxaddr, 1);
ian@0 1708
ian@0 1709 if(rx->flags & TYPHOON_RX_ERROR) {
ian@0 1710 typhoon_recycle_rx_skb(tp, idx);
ian@0 1711 continue;
ian@0 1712 }
ian@0 1713
ian@0 1714 pkt_len = le16_to_cpu(rx->frameLen);
ian@0 1715
ian@0 1716 if(pkt_len < rx_copybreak &&
ian@0 1717 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
ian@0 1718 new_skb->dev = tp->dev;
ian@0 1719 skb_reserve(new_skb, 2);
ian@0 1720 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
ian@0 1721 PKT_BUF_SZ,
ian@0 1722 PCI_DMA_FROMDEVICE);
ian@0 1723 eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
ian@0 1724 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
ian@0 1725 PKT_BUF_SZ,
ian@0 1726 PCI_DMA_FROMDEVICE);
ian@0 1727 skb_put(new_skb, pkt_len);
ian@0 1728 typhoon_recycle_rx_skb(tp, idx);
ian@0 1729 } else {
ian@0 1730 new_skb = skb;
ian@0 1731 skb_put(new_skb, pkt_len);
ian@0 1732 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
ian@0 1733 PCI_DMA_FROMDEVICE);
ian@0 1734 typhoon_alloc_rx_skb(tp, idx);
ian@0 1735 }
ian@0 1736 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
ian@0 1737 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
ian@0 1738 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
ian@0 1739 if(csum_bits ==
ian@0 1740 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
ian@0 1741 || csum_bits ==
ian@0 1742 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
ian@0 1743 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
ian@0 1744 } else
ian@0 1745 new_skb->ip_summed = CHECKSUM_NONE;
ian@0 1746
ian@0 1747 spin_lock(&tp->state_lock);
ian@0 1748 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
ian@0 1749 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
ian@0 1750 ntohl(rx->vlanTag) & 0xffff);
ian@0 1751 else
ian@0 1752 netif_receive_skb(new_skb);
ian@0 1753 spin_unlock(&tp->state_lock);
ian@0 1754
ian@0 1755 tp->dev->last_rx = jiffies;
ian@0 1756 received++;
ian@0 1757 budget--;
ian@0 1758 }
ian@0 1759 *cleared = cpu_to_le32(rxaddr);
ian@0 1760
ian@0 1761 return received;
ian@0 1762 }
ian@0 1763
ian@0 1764 static void
ian@0 1765 typhoon_fill_free_ring(struct typhoon *tp)
ian@0 1766 {
ian@0 1767 u32 i;
ian@0 1768
ian@0 1769 for(i = 0; i < RXENT_ENTRIES; i++) {
ian@0 1770 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
ian@0 1771 if(rxb->skb)
ian@0 1772 continue;
ian@0 1773 if(typhoon_alloc_rx_skb(tp, i) < 0)
ian@0 1774 break;
ian@0 1775 }
ian@0 1776 }
ian@0 1777
ian@0 1778 static int
ian@0 1779 typhoon_poll(struct net_device *dev, int *total_budget)
ian@0 1780 {
ian@0 1781 struct typhoon *tp = netdev_priv(dev);
ian@0 1782 struct typhoon_indexes *indexes = tp->indexes;
ian@0 1783 int orig_budget = *total_budget;
ian@0 1784 int budget, work_done, done;
ian@0 1785
ian@0 1786 rmb();
ian@0 1787 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
ian@0 1788 typhoon_process_response(tp, 0, NULL);
ian@0 1789
ian@0 1790 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
ian@0 1791 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
ian@0 1792
ian@0 1793 if(orig_budget > dev->quota)
ian@0 1794 orig_budget = dev->quota;
ian@0 1795
ian@0 1796 budget = orig_budget;
ian@0 1797 work_done = 0;
ian@0 1798 done = 1;
ian@0 1799
ian@0 1800 if(indexes->rxHiCleared != indexes->rxHiReady) {
ian@0 1801 work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
ian@0 1802 &indexes->rxHiCleared, budget);
ian@0 1803 budget -= work_done;
ian@0 1804 }
ian@0 1805
ian@0 1806 if(indexes->rxLoCleared != indexes->rxLoReady) {
ian@0 1807 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
ian@0 1808 &indexes->rxLoCleared, budget);
ian@0 1809 }
ian@0 1810
ian@0 1811 if(work_done) {
ian@0 1812 *total_budget -= work_done;
ian@0 1813 dev->quota -= work_done;
ian@0 1814
ian@0 1815 if(work_done >= orig_budget)
ian@0 1816 done = 0;
ian@0 1817 }
ian@0 1818
ian@0 1819 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
ian@0 1820 /* rxBuff ring is empty, try to fill it. */
ian@0 1821 typhoon_fill_free_ring(tp);
ian@0 1822 }
ian@0 1823
ian@0 1824 if(done) {
ian@0 1825 netif_rx_complete(dev);
ian@0 1826 iowrite32(TYPHOON_INTR_NONE,
ian@0 1827 tp->ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 1828 typhoon_post_pci_writes(tp->ioaddr);
ian@0 1829 }
ian@0 1830
ian@0 1831 return (done ? 0 : 1);
ian@0 1832 }
ian@0 1833
ian@0 1834 static irqreturn_t
ian@0 1835 typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
ian@0 1836 {
ian@0 1837 struct net_device *dev = (struct net_device *) dev_instance;
ian@0 1838 struct typhoon *tp = dev->priv;
ian@0 1839 void __iomem *ioaddr = tp->ioaddr;
ian@0 1840 u32 intr_status;
ian@0 1841
ian@0 1842 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 1843 if(!(intr_status & TYPHOON_INTR_HOST_INT))
ian@0 1844 return IRQ_NONE;
ian@0 1845
ian@0 1846 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 1847
ian@0 1848 if(netif_rx_schedule_prep(dev)) {
ian@0 1849 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 1850 typhoon_post_pci_writes(ioaddr);
ian@0 1851 __netif_rx_schedule(dev);
ian@0 1852 } else {
ian@0 1853 printk(KERN_ERR "%s: Error, poll already scheduled\n",
ian@0 1854 dev->name);
ian@0 1855 }
ian@0 1856 return IRQ_HANDLED;
ian@0 1857 }
ian@0 1858
ian@0 1859 static void
ian@0 1860 typhoon_free_rx_rings(struct typhoon *tp)
ian@0 1861 {
ian@0 1862 u32 i;
ian@0 1863
ian@0 1864 for(i = 0; i < RXENT_ENTRIES; i++) {
ian@0 1865 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
ian@0 1866 if(rxb->skb) {
ian@0 1867 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
ian@0 1868 PCI_DMA_FROMDEVICE);
ian@0 1869 dev_kfree_skb(rxb->skb);
ian@0 1870 rxb->skb = NULL;
ian@0 1871 }
ian@0 1872 }
ian@0 1873 }
ian@0 1874
ian@0 1875 static int
ian@0 1876 typhoon_sleep(struct typhoon *tp, pci_power_t state, u16 events)
ian@0 1877 {
ian@0 1878 struct pci_dev *pdev = tp->pdev;
ian@0 1879 void __iomem *ioaddr = tp->ioaddr;
ian@0 1880 struct cmd_desc xp_cmd;
ian@0 1881 int err;
ian@0 1882
ian@0 1883 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
ian@0 1884 xp_cmd.parm1 = events;
ian@0 1885 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 1886 if(err < 0) {
ian@0 1887 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
ian@0 1888 tp->name, err);
ian@0 1889 return err;
ian@0 1890 }
ian@0 1891
ian@0 1892 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
ian@0 1893 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 1894 if(err < 0) {
ian@0 1895 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
ian@0 1896 tp->name, err);
ian@0 1897 return err;
ian@0 1898 }
ian@0 1899
ian@0 1900 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
ian@0 1901 return -ETIMEDOUT;
ian@0 1902
ian@0 1903 /* Since we cannot monitor the status of the link while sleeping,
ian@0 1904 * tell the world it went away.
ian@0 1905 */
ian@0 1906 netif_carrier_off(tp->dev);
ian@0 1907
ian@0 1908 pci_enable_wake(tp->pdev, state, 1);
ian@0 1909 pci_disable_device(pdev);
ian@0 1910 return pci_set_power_state(pdev, state);
ian@0 1911 }
ian@0 1912
ian@0 1913 static int
ian@0 1914 typhoon_wakeup(struct typhoon *tp, int wait_type)
ian@0 1915 {
ian@0 1916 struct pci_dev *pdev = tp->pdev;
ian@0 1917 void __iomem *ioaddr = tp->ioaddr;
ian@0 1918
ian@0 1919 pci_set_power_state(pdev, PCI_D0);
ian@0 1920 pci_restore_state(pdev);
ian@0 1921
ian@0 1922 /* Post 2.x.x versions of the Sleep Image require a reset before
ian@0 1923 * we can download the Runtime Image. But let's not make users of
ian@0 1924 * the old firmware pay for the reset.
ian@0 1925 */
ian@0 1926 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
ian@0 1927 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
ian@0 1928 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
ian@0 1929 return typhoon_reset(ioaddr, wait_type);
ian@0 1930
ian@0 1931 return 0;
ian@0 1932 }
ian@0 1933
ian@0 1934 static int
ian@0 1935 typhoon_start_runtime(struct typhoon *tp)
ian@0 1936 {
ian@0 1937 struct net_device *dev = tp->dev;
ian@0 1938 void __iomem *ioaddr = tp->ioaddr;
ian@0 1939 struct cmd_desc xp_cmd;
ian@0 1940 int err;
ian@0 1941
ian@0 1942 typhoon_init_rings(tp);
ian@0 1943 typhoon_fill_free_ring(tp);
ian@0 1944
ian@0 1945 err = typhoon_download_firmware(tp);
ian@0 1946 if(err < 0) {
ian@0 1947 printk("%s: cannot load runtime on 3XP\n", tp->name);
ian@0 1948 goto error_out;
ian@0 1949 }
ian@0 1950
ian@0 1951 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
ian@0 1952 printk("%s: cannot boot 3XP\n", tp->name);
ian@0 1953 err = -EIO;
ian@0 1954 goto error_out;
ian@0 1955 }
ian@0 1956
ian@0 1957 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
ian@0 1958 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
ian@0 1959 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 1960 if(err < 0)
ian@0 1961 goto error_out;
ian@0 1962
ian@0 1963 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
ian@0 1964 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
ian@0 1965 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
ian@0 1966 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 1967 if(err < 0)
ian@0 1968 goto error_out;
ian@0 1969
ian@0 1970 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
ian@0 1971 * us some more information on how to control it.
ian@0 1972 */
ian@0 1973 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
ian@0 1974 xp_cmd.parm1 = 0;
ian@0 1975 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 1976 if(err < 0)
ian@0 1977 goto error_out;
ian@0 1978
ian@0 1979 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
ian@0 1980 xp_cmd.parm1 = tp->xcvr_select;
ian@0 1981 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 1982 if(err < 0)
ian@0 1983 goto error_out;
ian@0 1984
ian@0 1985 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
ian@0 1986 xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
ian@0 1987 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 1988 if(err < 0)
ian@0 1989 goto error_out;
ian@0 1990
ian@0 1991 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
ian@0 1992 spin_lock_bh(&tp->state_lock);
ian@0 1993 xp_cmd.parm2 = tp->offload;
ian@0 1994 xp_cmd.parm3 = tp->offload;
ian@0 1995 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 1996 spin_unlock_bh(&tp->state_lock);
ian@0 1997 if(err < 0)
ian@0 1998 goto error_out;
ian@0 1999
ian@0 2000 typhoon_set_rx_mode(dev);
ian@0 2001
ian@0 2002 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
ian@0 2003 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 2004 if(err < 0)
ian@0 2005 goto error_out;
ian@0 2006
ian@0 2007 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
ian@0 2008 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 2009 if(err < 0)
ian@0 2010 goto error_out;
ian@0 2011
ian@0 2012 tp->card_state = Running;
ian@0 2013 smp_wmb();
ian@0 2014
ian@0 2015 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
ian@0 2016 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 2017 typhoon_post_pci_writes(ioaddr);
ian@0 2018
ian@0 2019 return 0;
ian@0 2020
ian@0 2021 error_out:
ian@0 2022 typhoon_reset(ioaddr, WaitNoSleep);
ian@0 2023 typhoon_free_rx_rings(tp);
ian@0 2024 typhoon_init_rings(tp);
ian@0 2025 return err;
ian@0 2026 }
ian@0 2027
ian@0 2028 static int
ian@0 2029 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
ian@0 2030 {
ian@0 2031 struct typhoon_indexes *indexes = tp->indexes;
ian@0 2032 struct transmit_ring *txLo = &tp->txLoRing;
ian@0 2033 void __iomem *ioaddr = tp->ioaddr;
ian@0 2034 struct cmd_desc xp_cmd;
ian@0 2035 int i;
ian@0 2036
ian@0 2037 /* Disable interrupts early, since we can't schedule a poll
ian@0 2038 * when called with !netif_running(). This will be posted
ian@0 2039 * when we force the posting of the command.
ian@0 2040 */
ian@0 2041 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
ian@0 2042
ian@0 2043 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
ian@0 2044 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 2045
ian@0 2046 /* Wait 1/2 sec for any outstanding transmits to occur
ian@0 2047 * We'll cleanup after the reset if this times out.
ian@0 2048 */
ian@0 2049 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
ian@0 2050 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
ian@0 2051 break;
ian@0 2052 udelay(TYPHOON_UDELAY);
ian@0 2053 }
ian@0 2054
ian@0 2055 if(i == TYPHOON_WAIT_TIMEOUT)
ian@0 2056 printk(KERN_ERR
ian@0 2057 "%s: halt timed out waiting for Tx to complete\n",
ian@0 2058 tp->name);
ian@0 2059
ian@0 2060 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
ian@0 2061 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 2062
ian@0 2063 /* save the statistics so when we bring the interface up again,
ian@0 2064 * the values reported to userspace are correct.
ian@0 2065 */
ian@0 2066 tp->card_state = Sleeping;
ian@0 2067 smp_wmb();
ian@0 2068 typhoon_do_get_stats(tp);
ian@0 2069 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
ian@0 2070
ian@0 2071 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
ian@0 2072 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
ian@0 2073
ian@0 2074 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
ian@0 2075 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
ian@0 2076 tp->name);
ian@0 2077
ian@0 2078 if(typhoon_reset(ioaddr, wait_type) < 0) {
ian@0 2079 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
ian@0 2080 return -ETIMEDOUT;
ian@0 2081 }
ian@0 2082
ian@0 2083 /* cleanup any outstanding Tx packets */
ian@0 2084 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
ian@0 2085 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
ian@0 2086 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
ian@0 2087 }
ian@0 2088
ian@0 2089 return 0;
ian@0 2090 }
ian@0 2091
ian@0 2092 static void
ian@0 2093 typhoon_tx_timeout(struct net_device *dev)
ian@0 2094 {
ian@0 2095 struct typhoon *tp = netdev_priv(dev);
ian@0 2096
ian@0 2097 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
ian@0 2098 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
ian@0 2099 dev->name);
ian@0 2100 goto truely_dead;
ian@0 2101 }
ian@0 2102
ian@0 2103 /* If we ever start using the Hi ring, it will need cleaning too */
ian@0 2104 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
ian@0 2105 typhoon_free_rx_rings(tp);
ian@0 2106
ian@0 2107 if(typhoon_start_runtime(tp) < 0) {
ian@0 2108 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
ian@0 2109 dev->name);
ian@0 2110 goto truely_dead;
ian@0 2111 }
ian@0 2112
ian@0 2113 netif_wake_queue(dev);
ian@0 2114 return;
ian@0 2115
ian@0 2116 truely_dead:
ian@0 2117 /* Reset the hardware, and turn off carrier to avoid more timeouts */
ian@0 2118 typhoon_reset(tp->ioaddr, NoWait);
ian@0 2119 netif_carrier_off(dev);
ian@0 2120 }
ian@0 2121
ian@0 2122 static int
ian@0 2123 typhoon_open(struct net_device *dev)
ian@0 2124 {
ian@0 2125 struct typhoon *tp = netdev_priv(dev);
ian@0 2126 int err;
ian@0 2127
ian@0 2128 err = typhoon_wakeup(tp, WaitSleep);
ian@0 2129 if(err < 0) {
ian@0 2130 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
ian@0 2131 goto out_sleep;
ian@0 2132 }
ian@0 2133
ian@0 2134 err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED,
ian@0 2135 dev->name, dev);
ian@0 2136 if(err < 0)
ian@0 2137 goto out_sleep;
ian@0 2138
ian@0 2139 err = typhoon_start_runtime(tp);
ian@0 2140 if(err < 0)
ian@0 2141 goto out_irq;
ian@0 2142
ian@0 2143 netif_start_queue(dev);
ian@0 2144 return 0;
ian@0 2145
ian@0 2146 out_irq:
ian@0 2147 free_irq(dev->irq, dev);
ian@0 2148
ian@0 2149 out_sleep:
ian@0 2150 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
ian@0 2151 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
ian@0 2152 dev->name);
ian@0 2153 typhoon_reset(tp->ioaddr, NoWait);
ian@0 2154 goto out;
ian@0 2155 }
ian@0 2156
ian@0 2157 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
ian@0 2158 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
ian@0 2159
ian@0 2160 out:
ian@0 2161 return err;
ian@0 2162 }
ian@0 2163
ian@0 2164 static int
ian@0 2165 typhoon_close(struct net_device *dev)
ian@0 2166 {
ian@0 2167 struct typhoon *tp = netdev_priv(dev);
ian@0 2168
ian@0 2169 netif_stop_queue(dev);
ian@0 2170
ian@0 2171 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
ian@0 2172 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
ian@0 2173
ian@0 2174 /* Make sure there is no irq handler running on a different CPU. */
ian@0 2175 typhoon_synchronize_irq(dev->irq);
ian@0 2176 free_irq(dev->irq, dev);
ian@0 2177
ian@0 2178 typhoon_free_rx_rings(tp);
ian@0 2179 typhoon_init_rings(tp);
ian@0 2180
ian@0 2181 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
ian@0 2182 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
ian@0 2183
ian@0 2184 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
ian@0 2185 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
ian@0 2186
ian@0 2187 return 0;
ian@0 2188 }
ian@0 2189
ian@0 2190 #ifdef CONFIG_PM
ian@0 2191 static int
ian@0 2192 typhoon_resume(struct pci_dev *pdev)
ian@0 2193 {
ian@0 2194 struct net_device *dev = pci_get_drvdata(pdev);
ian@0 2195 struct typhoon *tp = netdev_priv(dev);
ian@0 2196
ian@0 2197 /* If we're down, resume when we are upped.
ian@0 2198 */
ian@0 2199 if(!netif_running(dev))
ian@0 2200 return 0;
ian@0 2201
ian@0 2202 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
ian@0 2203 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
ian@0 2204 dev->name);
ian@0 2205 goto reset;
ian@0 2206 }
ian@0 2207
ian@0 2208 if(typhoon_start_runtime(tp) < 0) {
ian@0 2209 printk(KERN_ERR "%s: critical: could not start runtime in "
ian@0 2210 "resume\n", dev->name);
ian@0 2211 goto reset;
ian@0 2212 }
ian@0 2213
ian@0 2214 netif_device_attach(dev);
ian@0 2215 netif_start_queue(dev);
ian@0 2216 return 0;
ian@0 2217
ian@0 2218 reset:
ian@0 2219 typhoon_reset(tp->ioaddr, NoWait);
ian@0 2220 return -EBUSY;
ian@0 2221 }
ian@0 2222
ian@0 2223 static int
ian@0 2224 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
ian@0 2225 {
ian@0 2226 struct net_device *dev = pci_get_drvdata(pdev);
ian@0 2227 struct typhoon *tp = netdev_priv(dev);
ian@0 2228 struct cmd_desc xp_cmd;
ian@0 2229
ian@0 2230 /* If we're down, we're already suspended.
ian@0 2231 */
ian@0 2232 if(!netif_running(dev))
ian@0 2233 return 0;
ian@0 2234
ian@0 2235 spin_lock_bh(&tp->state_lock);
ian@0 2236 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
ian@0 2237 spin_unlock_bh(&tp->state_lock);
ian@0 2238 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
ian@0 2239 dev->name);
ian@0 2240 return -EBUSY;
ian@0 2241 }
ian@0 2242 spin_unlock_bh(&tp->state_lock);
ian@0 2243
ian@0 2244 netif_device_detach(dev);
ian@0 2245
ian@0 2246 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
ian@0 2247 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
ian@0 2248 goto need_resume;
ian@0 2249 }
ian@0 2250
ian@0 2251 typhoon_free_rx_rings(tp);
ian@0 2252 typhoon_init_rings(tp);
ian@0 2253
ian@0 2254 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
ian@0 2255 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
ian@0 2256 goto need_resume;
ian@0 2257 }
ian@0 2258
ian@0 2259 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
ian@0 2260 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
ian@0 2261 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
ian@0 2262 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
ian@0 2263 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
ian@0 2264 dev->name);
ian@0 2265 goto need_resume;
ian@0 2266 }
ian@0 2267
ian@0 2268 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
ian@0 2269 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
ian@0 2270 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
ian@0 2271 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
ian@0 2272 dev->name);
ian@0 2273 goto need_resume;
ian@0 2274 }
ian@0 2275
ian@0 2276 if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
ian@0 2277 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
ian@0 2278 goto need_resume;
ian@0 2279 }
ian@0 2280
ian@0 2281 return 0;
ian@0 2282
ian@0 2283 need_resume:
ian@0 2284 typhoon_resume(pdev);
ian@0 2285 return -EBUSY;
ian@0 2286 }
ian@0 2287
ian@0 2288 static int
ian@0 2289 typhoon_enable_wake(struct pci_dev *pdev, pci_power_t state, int enable)
ian@0 2290 {
ian@0 2291 return pci_enable_wake(pdev, state, enable);
ian@0 2292 }
ian@0 2293 #endif
ian@0 2294
ian@0 2295 static int __devinit
ian@0 2296 typhoon_test_mmio(struct pci_dev *pdev)
ian@0 2297 {
ian@0 2298 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
ian@0 2299 int mode = 0;
ian@0 2300 u32 val;
ian@0 2301
ian@0 2302 if(!ioaddr)
ian@0 2303 goto out;
ian@0 2304
ian@0 2305 if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
ian@0 2306 TYPHOON_STATUS_WAITING_FOR_HOST)
ian@0 2307 goto out_unmap;
ian@0 2308
ian@0 2309 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 2310 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 2311 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
ian@0 2312
ian@0 2313 /* Ok, see if we can change our interrupt status register by
ian@0 2314 * sending ourselves an interrupt. If so, then MMIO works.
ian@0 2315 * The 50usec delay is arbitrary -- it could probably be smaller.
ian@0 2316 */
ian@0 2317 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 2318 if((val & TYPHOON_INTR_SELF) == 0) {
ian@0 2319 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
ian@0 2320 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 2321 udelay(50);
ian@0 2322 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 2323 if(val & TYPHOON_INTR_SELF)
ian@0 2324 mode = 1;
ian@0 2325 }
ian@0 2326
ian@0 2327 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
ian@0 2328 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 2329 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
ian@0 2330 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
ian@0 2331
ian@0 2332 out_unmap:
ian@0 2333 pci_iounmap(pdev, ioaddr);
ian@0 2334
ian@0 2335 out:
ian@0 2336 if(!mode)
ian@0 2337 printk(KERN_INFO PFX "falling back to port IO\n");
ian@0 2338 return mode;
ian@0 2339 }
ian@0 2340
ian@0 2341 static int __devinit
ian@0 2342 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ian@0 2343 {
ian@0 2344 static int did_version = 0;
ian@0 2345 struct net_device *dev;
ian@0 2346 struct typhoon *tp;
ian@0 2347 int card_id = (int) ent->driver_data;
ian@0 2348 void __iomem *ioaddr;
ian@0 2349 void *shared;
ian@0 2350 dma_addr_t shared_dma;
ian@0 2351 struct cmd_desc xp_cmd;
ian@0 2352 struct resp_desc xp_resp[3];
ian@0 2353 int i;
ian@0 2354 int err = 0;
ian@0 2355
ian@0 2356 if(!did_version++)
ian@0 2357 printk(KERN_INFO "%s", version);
ian@0 2358
ian@0 2359 dev = alloc_etherdev(sizeof(*tp));
ian@0 2360 if(dev == NULL) {
ian@0 2361 printk(ERR_PFX "%s: unable to alloc new net device\n",
ian@0 2362 pci_name(pdev));
ian@0 2363 err = -ENOMEM;
ian@0 2364 goto error_out;
ian@0 2365 }
ian@0 2366 SET_MODULE_OWNER(dev);
ian@0 2367 SET_NETDEV_DEV(dev, &pdev->dev);
ian@0 2368
ian@0 2369 err = pci_enable_device(pdev);
ian@0 2370 if(err < 0) {
ian@0 2371 printk(ERR_PFX "%s: unable to enable device\n",
ian@0 2372 pci_name(pdev));
ian@0 2373 goto error_out_dev;
ian@0 2374 }
ian@0 2375
ian@0 2376 err = pci_set_mwi(pdev);
ian@0 2377 if(err < 0) {
ian@0 2378 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
ian@0 2379 goto error_out_disable;
ian@0 2380 }
ian@0 2381
ian@0 2382 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
ian@0 2383 if(err < 0) {
ian@0 2384 printk(ERR_PFX "%s: No usable DMA configuration\n",
ian@0 2385 pci_name(pdev));
ian@0 2386 goto error_out_mwi;
ian@0 2387 }
ian@0 2388
ian@0 2389 /* sanity checks on IO and MMIO BARs
ian@0 2390 */
ian@0 2391 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
ian@0 2392 printk(ERR_PFX
ian@0 2393 "%s: region #1 not a PCI IO resource, aborting\n",
ian@0 2394 pci_name(pdev));
ian@0 2395 err = -ENODEV;
ian@0 2396 goto error_out_mwi;
ian@0 2397 }
ian@0 2398 if(pci_resource_len(pdev, 0) < 128) {
ian@0 2399 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
ian@0 2400 pci_name(pdev));
ian@0 2401 err = -ENODEV;
ian@0 2402 goto error_out_mwi;
ian@0 2403 }
ian@0 2404 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
ian@0 2405 printk(ERR_PFX
ian@0 2406 "%s: region #1 not a PCI MMIO resource, aborting\n",
ian@0 2407 pci_name(pdev));
ian@0 2408 err = -ENODEV;
ian@0 2409 goto error_out_mwi;
ian@0 2410 }
ian@0 2411 if(pci_resource_len(pdev, 1) < 128) {
ian@0 2412 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
ian@0 2413 pci_name(pdev));
ian@0 2414 err = -ENODEV;
ian@0 2415 goto error_out_mwi;
ian@0 2416 }
ian@0 2417
ian@0 2418 err = pci_request_regions(pdev, "typhoon");
ian@0 2419 if(err < 0) {
ian@0 2420 printk(ERR_PFX "%s: could not request regions\n",
ian@0 2421 pci_name(pdev));
ian@0 2422 goto error_out_mwi;
ian@0 2423 }
ian@0 2424
ian@0 2425 /* map our registers
ian@0 2426 */
ian@0 2427 if(use_mmio != 0 && use_mmio != 1)
ian@0 2428 use_mmio = typhoon_test_mmio(pdev);
ian@0 2429
ian@0 2430 ioaddr = pci_iomap(pdev, use_mmio, 128);
ian@0 2431 if (!ioaddr) {
ian@0 2432 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
ian@0 2433 pci_name(pdev));
ian@0 2434 err = -EIO;
ian@0 2435 goto error_out_regions;
ian@0 2436 }
ian@0 2437
ian@0 2438 /* allocate pci dma space for rx and tx descriptor rings
ian@0 2439 */
ian@0 2440 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
ian@0 2441 &shared_dma);
ian@0 2442 if(!shared) {
ian@0 2443 printk(ERR_PFX "%s: could not allocate DMA memory\n",
ian@0 2444 pci_name(pdev));
ian@0 2445 err = -ENOMEM;
ian@0 2446 goto error_out_remap;
ian@0 2447 }
ian@0 2448
ian@0 2449 dev->irq = pdev->irq;
ian@0 2450 tp = netdev_priv(dev);
ian@0 2451 tp->shared = (struct typhoon_shared *) shared;
ian@0 2452 tp->shared_dma = shared_dma;
ian@0 2453 tp->pdev = pdev;
ian@0 2454 tp->tx_pdev = pdev;
ian@0 2455 tp->ioaddr = ioaddr;
ian@0 2456 tp->tx_ioaddr = ioaddr;
ian@0 2457 tp->dev = dev;
ian@0 2458
ian@0 2459 /* Init sequence:
ian@0 2460 * 1) Reset the adapter to clear any bad juju
ian@0 2461 * 2) Reload the sleep image
ian@0 2462 * 3) Boot the sleep image
ian@0 2463 * 4) Get the hardware address.
ian@0 2464 * 5) Put the card to sleep.
ian@0 2465 */
ian@0 2466 if (typhoon_reset(ioaddr, WaitSleep) < 0) {
ian@0 2467 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
ian@0 2468 err = -EIO;
ian@0 2469 goto error_out_dma;
ian@0 2470 }
ian@0 2471
ian@0 2472 /* Now that we've reset the 3XP and are sure it's not going to
ian@0 2473 * write all over memory, enable bus mastering, and save our
ian@0 2474 * state for resuming after a suspend.
ian@0 2475 */
ian@0 2476 pci_set_master(pdev);
ian@0 2477 pci_save_state(pdev);
ian@0 2478
ian@0 2479 /* dev->name is not valid until we register, but we need to
ian@0 2480 * use some common routines to initialize the card. So that those
ian@0 2481 * routines print the right name, we keep our oun pointer to the name
ian@0 2482 */
ian@0 2483 tp->name = pci_name(pdev);
ian@0 2484
ian@0 2485 typhoon_init_interface(tp);
ian@0 2486 typhoon_init_rings(tp);
ian@0 2487
ian@0 2488 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
ian@0 2489 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
ian@0 2490 pci_name(pdev));
ian@0 2491 err = -EIO;
ian@0 2492 goto error_out_reset;
ian@0 2493 }
ian@0 2494
ian@0 2495 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
ian@0 2496 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
ian@0 2497 printk(ERR_PFX "%s: cannot read MAC address\n",
ian@0 2498 pci_name(pdev));
ian@0 2499 err = -EIO;
ian@0 2500 goto error_out_reset;
ian@0 2501 }
ian@0 2502
ian@0 2503 *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
ian@0 2504 *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
ian@0 2505
ian@0 2506 if(!is_valid_ether_addr(dev->dev_addr)) {
ian@0 2507 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
ian@0 2508 "aborting\n", pci_name(pdev));
ian@0 2509 goto error_out_reset;
ian@0 2510 }
ian@0 2511
ian@0 2512 /* Read the Sleep Image version last, so the response is valid
ian@0 2513 * later when we print out the version reported.
ian@0 2514 */
ian@0 2515 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
ian@0 2516 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
ian@0 2517 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
ian@0 2518 pci_name(pdev));
ian@0 2519 goto error_out_reset;
ian@0 2520 }
ian@0 2521
ian@0 2522 tp->capabilities = typhoon_card_info[card_id].capabilities;
ian@0 2523 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
ian@0 2524
ian@0 2525 /* Typhoon 1.0 Sleep Images return one response descriptor to the
ian@0 2526 * READ_VERSIONS command. Those versions are OK after waking up
ian@0 2527 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
ian@0 2528 * seem to need a little extra help to get started. Since we don't
ian@0 2529 * know how to nudge it along, just kick it.
ian@0 2530 */
ian@0 2531 if(xp_resp[0].numDesc != 0)
ian@0 2532 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
ian@0 2533
ian@0 2534 if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
ian@0 2535 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
ian@0 2536 pci_name(pdev));
ian@0 2537 err = -EIO;
ian@0 2538 goto error_out_reset;
ian@0 2539 }
ian@0 2540
ian@0 2541 /* The chip-specific entries in the device structure. */
ian@0 2542 dev->open = typhoon_open;
ian@0 2543 dev->hard_start_xmit = typhoon_start_tx;
ian@0 2544 dev->stop = typhoon_close;
ian@0 2545 dev->set_multicast_list = typhoon_set_rx_mode;
ian@0 2546 dev->tx_timeout = typhoon_tx_timeout;
ian@0 2547 dev->poll = typhoon_poll;
ian@0 2548 dev->weight = 16;
ian@0 2549 dev->watchdog_timeo = TX_TIMEOUT;
ian@0 2550 dev->get_stats = typhoon_get_stats;
ian@0 2551 dev->set_mac_address = typhoon_set_mac_address;
ian@0 2552 dev->vlan_rx_register = typhoon_vlan_rx_register;
ian@0 2553 dev->vlan_rx_kill_vid = typhoon_vlan_rx_kill_vid;
ian@0 2554 SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
ian@0 2555
ian@0 2556 /* We can handle scatter gather, up to 16 entries, and
ian@0 2557 * we can do IP checksumming (only version 4, doh...)
ian@0 2558 */
ian@0 2559 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
ian@0 2560 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
ian@0 2561 dev->features |= NETIF_F_TSO;
ian@0 2562
ian@0 2563 if(register_netdev(dev) < 0)
ian@0 2564 goto error_out_reset;
ian@0 2565
ian@0 2566 /* fixup our local name */
ian@0 2567 tp->name = dev->name;
ian@0 2568
ian@0 2569 pci_set_drvdata(pdev, dev);
ian@0 2570
ian@0 2571 printk(KERN_INFO "%s: %s at %s 0x%llx, ",
ian@0 2572 dev->name, typhoon_card_info[card_id].name,
ian@0 2573 use_mmio ? "MMIO" : "IO",
ian@0 2574 (unsigned long long)pci_resource_start(pdev, use_mmio));
ian@0 2575 for(i = 0; i < 5; i++)
ian@0 2576 printk("%2.2x:", dev->dev_addr[i]);
ian@0 2577 printk("%2.2x\n", dev->dev_addr[i]);
ian@0 2578
ian@0 2579 /* xp_resp still contains the response to the READ_VERSIONS command.
ian@0 2580 * For debugging, let the user know what version he has.
ian@0 2581 */
ian@0 2582 if(xp_resp[0].numDesc == 0) {
ian@0 2583 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
ian@0 2584 * of version is Month/Day of build.
ian@0 2585 */
ian@0 2586 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
ian@0 2587 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
ian@0 2588 "%02u/%02u/2000\n", dev->name, monthday >> 8,
ian@0 2589 monthday & 0xff);
ian@0 2590 } else if(xp_resp[0].numDesc == 2) {
ian@0 2591 /* This is the Typhoon 1.1+ type Sleep Image
ian@0 2592 */
ian@0 2593 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
ian@0 2594 u8 *ver_string = (u8 *) &xp_resp[1];
ian@0 2595 ver_string[25] = 0;
ian@0 2596 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
ian@0 2597 "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
ian@0 2598 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
ian@0 2599 ver_string);
ian@0 2600 } else {
ian@0 2601 printk(KERN_WARNING "%s: Unknown Sleep Image version "
ian@0 2602 "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
ian@0 2603 le32_to_cpu(xp_resp[0].parm2));
ian@0 2604 }
ian@0 2605
ian@0 2606 return 0;
ian@0 2607
ian@0 2608 error_out_reset:
ian@0 2609 typhoon_reset(ioaddr, NoWait);
ian@0 2610
ian@0 2611 error_out_dma:
ian@0 2612 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
ian@0 2613 shared, shared_dma);
ian@0 2614 error_out_remap:
ian@0 2615 pci_iounmap(pdev, ioaddr);
ian@0 2616 error_out_regions:
ian@0 2617 pci_release_regions(pdev);
ian@0 2618 error_out_mwi:
ian@0 2619 pci_clear_mwi(pdev);
ian@0 2620 error_out_disable:
ian@0 2621 pci_disable_device(pdev);
ian@0 2622 error_out_dev:
ian@0 2623 free_netdev(dev);
ian@0 2624 error_out:
ian@0 2625 return err;
ian@0 2626 }
ian@0 2627
ian@0 2628 static void __devexit
ian@0 2629 typhoon_remove_one(struct pci_dev *pdev)
ian@0 2630 {
ian@0 2631 struct net_device *dev = pci_get_drvdata(pdev);
ian@0 2632 struct typhoon *tp = netdev_priv(dev);
ian@0 2633
ian@0 2634 unregister_netdev(dev);
ian@0 2635 pci_set_power_state(pdev, PCI_D0);
ian@0 2636 pci_restore_state(pdev);
ian@0 2637 typhoon_reset(tp->ioaddr, NoWait);
ian@0 2638 pci_iounmap(pdev, tp->ioaddr);
ian@0 2639 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
ian@0 2640 tp->shared, tp->shared_dma);
ian@0 2641 pci_release_regions(pdev);
ian@0 2642 pci_clear_mwi(pdev);
ian@0 2643 pci_disable_device(pdev);
ian@0 2644 pci_set_drvdata(pdev, NULL);
ian@0 2645 free_netdev(dev);
ian@0 2646 }
ian@0 2647
ian@0 2648 static struct pci_driver typhoon_driver = {
ian@0 2649 .name = DRV_MODULE_NAME,
ian@0 2650 .id_table = typhoon_pci_tbl,
ian@0 2651 .probe = typhoon_init_one,
ian@0 2652 .remove = __devexit_p(typhoon_remove_one),
ian@0 2653 #ifdef CONFIG_PM
ian@0 2654 .suspend = typhoon_suspend,
ian@0 2655 .resume = typhoon_resume,
ian@0 2656 .enable_wake = typhoon_enable_wake,
ian@0 2657 #endif
ian@0 2658 };
ian@0 2659
ian@0 2660 static int __init
ian@0 2661 typhoon_init(void)
ian@0 2662 {
ian@0 2663 return pci_module_init(&typhoon_driver);
ian@0 2664 }
ian@0 2665
ian@0 2666 static void __exit
ian@0 2667 typhoon_cleanup(void)
ian@0 2668 {
ian@0 2669 pci_unregister_driver(&typhoon_driver);
ian@0 2670 }
ian@0 2671
ian@0 2672 module_init(typhoon_init);
ian@0 2673 module_exit(typhoon_cleanup);