ia64/linux-2.6.18-xen.hg

view drivers/net/mv643xx_eth.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 3e8752eb6d9c
children
line source
1 /*
2 * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 *
5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 rabeeh@galileo.co.il
7 *
8 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani
10 *
11 * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
12 *
13 * Copyright (C) 2004-2006 MontaVista Software, Inc.
14 * Dale Farnsworth <dale@farnsworth.org>
15 *
16 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
17 * <sjhill@realitydiluted.com>
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 2
22 * of the License, or (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
32 */
33 #include <linux/init.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/in.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/etherdevice.h>
41 #include <linux/bitops.h>
42 #include <linux/delay.h>
43 #include <linux/ethtool.h>
44 #include <linux/platform_device.h>
46 #include <asm/io.h>
47 #include <asm/types.h>
48 #include <asm/pgtable.h>
49 #include <asm/system.h>
50 #include <asm/delay.h>
51 #include "mv643xx_eth.h"
53 /* Static function declarations */
54 static void eth_port_uc_addr_get(struct net_device *dev,
55 unsigned char *MacAddr);
56 static void eth_port_set_multicast_list(struct net_device *);
57 static void mv643xx_eth_port_enable_tx(unsigned int port_num,
58 unsigned int queues);
59 static void mv643xx_eth_port_enable_rx(unsigned int port_num,
60 unsigned int queues);
61 static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num);
62 static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num);
63 static int mv643xx_eth_open(struct net_device *);
64 static int mv643xx_eth_stop(struct net_device *);
65 static int mv643xx_eth_change_mtu(struct net_device *, int);
66 static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *);
67 static void eth_port_init_mac_tables(unsigned int eth_port_num);
68 #ifdef MV643XX_NAPI
69 static int mv643xx_poll(struct net_device *dev, int *budget);
70 #endif
71 static int ethernet_phy_get(unsigned int eth_port_num);
72 static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
73 static int ethernet_phy_detect(unsigned int eth_port_num);
74 static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location);
75 static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val);
76 static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
77 static struct ethtool_ops mv643xx_ethtool_ops;
79 static char mv643xx_driver_name[] = "mv643xx_eth";
80 static char mv643xx_driver_version[] = "1.0";
82 static void __iomem *mv643xx_eth_shared_base;
84 /* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */
85 static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
87 static inline u32 mv_read(int offset)
88 {
89 void __iomem *reg_base;
91 reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS;
93 return readl(reg_base + offset);
94 }
96 static inline void mv_write(int offset, u32 data)
97 {
98 void __iomem *reg_base;
100 reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS;
101 writel(data, reg_base + offset);
102 }
104 /*
105 * Changes MTU (maximum transfer unit) of the gigabit ethenret port
106 *
107 * Input : pointer to ethernet interface network device structure
108 * new mtu size
109 * Output : 0 upon success, -EINVAL upon failure
110 */
111 static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
112 {
113 if ((new_mtu > 9500) || (new_mtu < 64))
114 return -EINVAL;
116 dev->mtu = new_mtu;
117 /*
118 * Stop then re-open the interface. This will allocate RX skb's with
119 * the new MTU.
120 * There is a possible danger that the open will not successed, due
121 * to memory is full, which might fail the open function.
122 */
123 if (netif_running(dev)) {
124 mv643xx_eth_stop(dev);
125 if (mv643xx_eth_open(dev))
126 printk(KERN_ERR
127 "%s: Fatal error on opening device\n",
128 dev->name);
129 }
131 return 0;
132 }
134 /*
135 * mv643xx_eth_rx_refill_descs
136 *
137 * Fills / refills RX queue on a certain gigabit ethernet port
138 *
139 * Input : pointer to ethernet interface network device structure
140 * Output : N/A
141 */
142 static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
143 {
144 struct mv643xx_private *mp = netdev_priv(dev);
145 struct pkt_info pkt_info;
146 struct sk_buff *skb;
147 int unaligned;
149 while (mp->rx_desc_count < mp->rx_ring_size) {
150 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN);
151 if (!skb)
152 break;
153 mp->rx_desc_count++;
154 unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1);
155 if (unaligned)
156 skb_reserve(skb, ETH_DMA_ALIGN - unaligned);
157 pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
158 pkt_info.byte_cnt = ETH_RX_SKB_SIZE;
159 pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
160 ETH_RX_SKB_SIZE, DMA_FROM_DEVICE);
161 pkt_info.return_info = skb;
162 if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) {
163 printk(KERN_ERR
164 "%s: Error allocating RX Ring\n", dev->name);
165 break;
166 }
167 skb_reserve(skb, ETH_HW_IP_ALIGN);
168 }
169 /*
170 * If RX ring is empty of SKB, set a timer to try allocating
171 * again at a later time.
172 */
173 if (mp->rx_desc_count == 0) {
174 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
175 mp->timeout.expires = jiffies + (HZ / 10); /* 100 mSec */
176 add_timer(&mp->timeout);
177 }
178 }
180 /*
181 * mv643xx_eth_rx_refill_descs_timer_wrapper
182 *
183 * Timer routine to wake up RX queue filling task. This function is
184 * used only in case the RX queue is empty, and all alloc_skb has
185 * failed (due to out of memory event).
186 *
187 * Input : pointer to ethernet interface network device structure
188 * Output : N/A
189 */
190 static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
191 {
192 mv643xx_eth_rx_refill_descs((struct net_device *)data);
193 }
195 /*
196 * mv643xx_eth_update_mac_address
197 *
198 * Update the MAC address of the port in the address table
199 *
200 * Input : pointer to ethernet interface network device structure
201 * Output : N/A
202 */
203 static void mv643xx_eth_update_mac_address(struct net_device *dev)
204 {
205 struct mv643xx_private *mp = netdev_priv(dev);
206 unsigned int port_num = mp->port_num;
208 eth_port_init_mac_tables(port_num);
209 eth_port_uc_addr_set(port_num, dev->dev_addr);
210 }
212 /*
213 * mv643xx_eth_set_rx_mode
214 *
215 * Change from promiscuos to regular rx mode
216 *
217 * Input : pointer to ethernet interface network device structure
218 * Output : N/A
219 */
220 static void mv643xx_eth_set_rx_mode(struct net_device *dev)
221 {
222 struct mv643xx_private *mp = netdev_priv(dev);
223 u32 config_reg;
225 config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num));
226 if (dev->flags & IFF_PROMISC)
227 config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
228 else
229 config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
230 mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg);
232 eth_port_set_multicast_list(dev);
233 }
235 /*
236 * mv643xx_eth_set_mac_address
237 *
238 * Change the interface's mac address.
239 * No special hardware thing should be done because interface is always
240 * put in promiscuous mode.
241 *
242 * Input : pointer to ethernet interface network device structure and
243 * a pointer to the designated entry to be added to the cache.
244 * Output : zero upon success, negative upon failure
245 */
246 static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
247 {
248 int i;
250 for (i = 0; i < 6; i++)
251 /* +2 is for the offset of the HW addr type */
252 dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
253 mv643xx_eth_update_mac_address(dev);
254 return 0;
255 }
257 /*
258 * mv643xx_eth_tx_timeout
259 *
260 * Called upon a timeout on transmitting a packet
261 *
262 * Input : pointer to ethernet interface network device structure.
263 * Output : N/A
264 */
265 static void mv643xx_eth_tx_timeout(struct net_device *dev)
266 {
267 struct mv643xx_private *mp = netdev_priv(dev);
269 printk(KERN_INFO "%s: TX timeout ", dev->name);
271 /* Do the reset outside of interrupt context */
272 schedule_work(&mp->tx_timeout_task);
273 }
275 /*
276 * mv643xx_eth_tx_timeout_task
277 *
278 * Actual routine to reset the adapter when a timeout on Tx has occurred
279 */
280 static void mv643xx_eth_tx_timeout_task(struct net_device *dev)
281 {
282 struct mv643xx_private *mp = netdev_priv(dev);
284 if (!netif_running(dev))
285 return;
287 netif_stop_queue(dev);
289 eth_port_reset(mp->port_num);
290 eth_port_start(dev);
292 if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
293 netif_wake_queue(dev);
294 }
296 /**
297 * mv643xx_eth_free_tx_descs - Free the tx desc data for completed descriptors
298 *
299 * If force is non-zero, frees uncompleted descriptors as well
300 */
301 int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
302 {
303 struct mv643xx_private *mp = netdev_priv(dev);
304 struct eth_tx_desc *desc;
305 u32 cmd_sts;
306 struct sk_buff *skb;
307 unsigned long flags;
308 int tx_index;
309 dma_addr_t addr;
310 int count;
311 int released = 0;
313 while (mp->tx_desc_count > 0) {
314 spin_lock_irqsave(&mp->lock, flags);
315 tx_index = mp->tx_used_desc_q;
316 desc = &mp->p_tx_desc_area[tx_index];
317 cmd_sts = desc->cmd_sts;
319 if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) {
320 spin_unlock_irqrestore(&mp->lock, flags);
321 return released;
322 }
324 mp->tx_used_desc_q = (tx_index + 1) % mp->tx_ring_size;
325 mp->tx_desc_count--;
327 addr = desc->buf_ptr;
328 count = desc->byte_cnt;
329 skb = mp->tx_skb[tx_index];
330 if (skb)
331 mp->tx_skb[tx_index] = NULL;
333 spin_unlock_irqrestore(&mp->lock, flags);
335 if (cmd_sts & ETH_ERROR_SUMMARY) {
336 printk("%s: Error in TX\n", dev->name);
337 mp->stats.tx_errors++;
338 }
340 if (cmd_sts & ETH_TX_FIRST_DESC)
341 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE);
342 else
343 dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
345 if (skb)
346 dev_kfree_skb_irq(skb);
348 released = 1;
349 }
351 return released;
352 }
354 static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev)
355 {
356 struct mv643xx_private *mp = netdev_priv(dev);
358 if (mv643xx_eth_free_tx_descs(dev, 0) &&
359 mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
360 netif_wake_queue(dev);
361 }
363 static void mv643xx_eth_free_all_tx_descs(struct net_device *dev)
364 {
365 mv643xx_eth_free_tx_descs(dev, 1);
366 }
368 /*
369 * mv643xx_eth_receive
370 *
371 * This function is forward packets that are received from the port's
372 * queues toward kernel core or FastRoute them to another interface.
373 *
374 * Input : dev - a pointer to the required interface
375 * max - maximum number to receive (0 means unlimted)
376 *
377 * Output : number of served packets
378 */
379 static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
380 {
381 struct mv643xx_private *mp = netdev_priv(dev);
382 struct net_device_stats *stats = &mp->stats;
383 unsigned int received_packets = 0;
384 struct sk_buff *skb;
385 struct pkt_info pkt_info;
387 while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) {
388 dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE,
389 DMA_FROM_DEVICE);
390 mp->rx_desc_count--;
391 received_packets++;
393 /*
394 * Update statistics.
395 * Note byte count includes 4 byte CRC count
396 */
397 stats->rx_packets++;
398 stats->rx_bytes += pkt_info.byte_cnt;
399 skb = pkt_info.return_info;
400 /*
401 * In case received a packet without first / last bits on OR
402 * the error summary bit is on, the packets needs to be dropeed.
403 */
404 if (((pkt_info.cmd_sts
405 & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
406 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC))
407 || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) {
408 stats->rx_dropped++;
409 if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC |
410 ETH_RX_LAST_DESC)) !=
411 (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) {
412 if (net_ratelimit())
413 printk(KERN_ERR
414 "%s: Received packet spread "
415 "on multiple descriptors\n",
416 dev->name);
417 }
418 if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)
419 stats->rx_errors++;
421 dev_kfree_skb_irq(skb);
422 } else {
423 /*
424 * The -4 is for the CRC in the trailer of the
425 * received packet
426 */
427 skb_put(skb, pkt_info.byte_cnt - 4);
428 skb->dev = dev;
430 if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) {
431 skb->ip_summed = CHECKSUM_UNNECESSARY;
432 skb->csum = htons(
433 (pkt_info.cmd_sts & 0x0007fff8) >> 3);
434 }
435 skb->protocol = eth_type_trans(skb, dev);
436 #ifdef MV643XX_NAPI
437 netif_receive_skb(skb);
438 #else
439 netif_rx(skb);
440 #endif
441 }
442 dev->last_rx = jiffies;
443 }
444 mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
446 return received_packets;
447 }
449 /* Set the mv643xx port configuration register for the speed/duplex mode. */
450 static void mv643xx_eth_update_pscr(struct net_device *dev,
451 struct ethtool_cmd *ecmd)
452 {
453 struct mv643xx_private *mp = netdev_priv(dev);
454 int port_num = mp->port_num;
455 u32 o_pscr, n_pscr;
456 unsigned int queues;
458 o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num));
459 n_pscr = o_pscr;
461 /* clear speed, duplex and rx buffer size fields */
462 n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 |
463 MV643XX_ETH_SET_GMII_SPEED_TO_1000 |
464 MV643XX_ETH_SET_FULL_DUPLEX_MODE |
465 MV643XX_ETH_MAX_RX_PACKET_MASK);
467 if (ecmd->duplex == DUPLEX_FULL)
468 n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE;
470 if (ecmd->speed == SPEED_1000)
471 n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 |
472 MV643XX_ETH_MAX_RX_PACKET_9700BYTE;
473 else {
474 if (ecmd->speed == SPEED_100)
475 n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100;
476 n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE;
477 }
479 if (n_pscr != o_pscr) {
480 if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0)
481 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
482 n_pscr);
483 else {
484 queues = mv643xx_eth_port_disable_tx(port_num);
486 o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE;
487 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
488 o_pscr);
489 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
490 n_pscr);
491 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num),
492 n_pscr);
493 if (queues)
494 mv643xx_eth_port_enable_tx(port_num, queues);
495 }
496 }
497 }
499 /*
500 * mv643xx_eth_int_handler
501 *
502 * Main interrupt handler for the gigbit ethernet ports
503 *
504 * Input : irq - irq number (not used)
505 * dev_id - a pointer to the required interface's data structure
506 * regs - not used
507 * Output : N/A
508 */
510 static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
511 struct pt_regs *regs)
512 {
513 struct net_device *dev = (struct net_device *)dev_id;
514 struct mv643xx_private *mp = netdev_priv(dev);
515 u32 eth_int_cause, eth_int_cause_ext = 0;
516 unsigned int port_num = mp->port_num;
518 /* Read interrupt cause registers */
519 eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
520 ETH_INT_UNMASK_ALL;
521 if (eth_int_cause & ETH_INT_CAUSE_EXT) {
522 eth_int_cause_ext = mv_read(
523 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
524 ETH_INT_UNMASK_ALL_EXT;
525 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),
526 ~eth_int_cause_ext);
527 }
529 /* PHY status changed */
530 if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) {
531 struct ethtool_cmd cmd;
533 if (mii_link_ok(&mp->mii)) {
534 mii_ethtool_gset(&mp->mii, &cmd);
535 mv643xx_eth_update_pscr(dev, &cmd);
536 mv643xx_eth_port_enable_tx(port_num,
537 ETH_TX_QUEUES_ENABLED);
538 if (!netif_carrier_ok(dev)) {
539 netif_carrier_on(dev);
540 if (mp->tx_ring_size - mp->tx_desc_count >=
541 MAX_DESCS_PER_SKB)
542 netif_wake_queue(dev);
543 }
544 } else if (netif_carrier_ok(dev)) {
545 netif_stop_queue(dev);
546 netif_carrier_off(dev);
547 }
548 }
550 #ifdef MV643XX_NAPI
551 if (eth_int_cause & ETH_INT_CAUSE_RX) {
552 /* schedule the NAPI poll routine to maintain port */
553 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
554 ETH_INT_MASK_ALL);
555 /* wait for previous write to complete */
556 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
558 netif_rx_schedule(dev);
559 }
560 #else
561 if (eth_int_cause & ETH_INT_CAUSE_RX)
562 mv643xx_eth_receive_queue(dev, INT_MAX);
563 #endif
564 if (eth_int_cause_ext & ETH_INT_CAUSE_TX)
565 mv643xx_eth_free_completed_tx_descs(dev);
567 /*
568 * If no real interrupt occured, exit.
569 * This can happen when using gigE interrupt coalescing mechanism.
570 */
571 if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0))
572 return IRQ_NONE;
574 return IRQ_HANDLED;
575 }
577 #ifdef MV643XX_COAL
579 /*
580 * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path
581 *
582 * DESCRIPTION:
583 * This routine sets the RX coalescing interrupt mechanism parameter.
584 * This parameter is a timeout counter, that counts in 64 t_clk
585 * chunks ; that when timeout event occurs a maskable interrupt
586 * occurs.
587 * The parameter is calculated using the tClk of the MV-643xx chip
588 * , and the required delay of the interrupt in usec.
589 *
590 * INPUT:
591 * unsigned int eth_port_num Ethernet port number
592 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
593 * unsigned int delay Delay in usec
594 *
595 * OUTPUT:
596 * Interrupt coalescing mechanism value is set in MV-643xx chip.
597 *
598 * RETURN:
599 * The interrupt coalescing value set in the gigE port.
600 *
601 */
602 static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
603 unsigned int t_clk, unsigned int delay)
604 {
605 unsigned int coal = ((t_clk / 1000000) * delay) / 64;
607 /* Set RX Coalescing mechanism */
608 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num),
609 ((coal & 0x3fff) << 8) |
610 (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num))
611 & 0xffc000ff));
613 return coal;
614 }
615 #endif
617 /*
618 * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path
619 *
620 * DESCRIPTION:
621 * This routine sets the TX coalescing interrupt mechanism parameter.
622 * This parameter is a timeout counter, that counts in 64 t_clk
623 * chunks ; that when timeout event occurs a maskable interrupt
624 * occurs.
625 * The parameter is calculated using the t_cLK frequency of the
626 * MV-643xx chip and the required delay in the interrupt in uSec
627 *
628 * INPUT:
629 * unsigned int eth_port_num Ethernet port number
630 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
631 * unsigned int delay Delay in uSeconds
632 *
633 * OUTPUT:
634 * Interrupt coalescing mechanism value is set in MV-643xx chip.
635 *
636 * RETURN:
637 * The interrupt coalescing value set in the gigE port.
638 *
639 */
640 static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
641 unsigned int t_clk, unsigned int delay)
642 {
643 unsigned int coal;
644 coal = ((t_clk / 1000000) * delay) / 64;
645 /* Set TX Coalescing mechanism */
646 mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num),
647 coal << 4);
648 return coal;
649 }
651 /*
652 * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
653 *
654 * DESCRIPTION:
655 * This function prepares a Rx chained list of descriptors and packet
656 * buffers in a form of a ring. The routine must be called after port
657 * initialization routine and before port start routine.
658 * The Ethernet SDMA engine uses CPU bus addresses to access the various
659 * devices in the system (i.e. DRAM). This function uses the ethernet
660 * struct 'virtual to physical' routine (set by the user) to set the ring
661 * with physical addresses.
662 *
663 * INPUT:
664 * struct mv643xx_private *mp Ethernet Port Control srtuct.
665 *
666 * OUTPUT:
667 * The routine updates the Ethernet port control struct with information
668 * regarding the Rx descriptors and buffers.
669 *
670 * RETURN:
671 * None.
672 */
673 static void ether_init_rx_desc_ring(struct mv643xx_private *mp)
674 {
675 volatile struct eth_rx_desc *p_rx_desc;
676 int rx_desc_num = mp->rx_ring_size;
677 int i;
679 /* initialize the next_desc_ptr links in the Rx descriptors ring */
680 p_rx_desc = (struct eth_rx_desc *)mp->p_rx_desc_area;
681 for (i = 0; i < rx_desc_num; i++) {
682 p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma +
683 ((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc);
684 }
686 /* Save Rx desc pointer to driver struct. */
687 mp->rx_curr_desc_q = 0;
688 mp->rx_used_desc_q = 0;
690 mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc);
691 }
693 /*
694 * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory.
695 *
696 * DESCRIPTION:
697 * This function prepares a Tx chained list of descriptors and packet
698 * buffers in a form of a ring. The routine must be called after port
699 * initialization routine and before port start routine.
700 * The Ethernet SDMA engine uses CPU bus addresses to access the various
701 * devices in the system (i.e. DRAM). This function uses the ethernet
702 * struct 'virtual to physical' routine (set by the user) to set the ring
703 * with physical addresses.
704 *
705 * INPUT:
706 * struct mv643xx_private *mp Ethernet Port Control srtuct.
707 *
708 * OUTPUT:
709 * The routine updates the Ethernet port control struct with information
710 * regarding the Tx descriptors and buffers.
711 *
712 * RETURN:
713 * None.
714 */
715 static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
716 {
717 int tx_desc_num = mp->tx_ring_size;
718 struct eth_tx_desc *p_tx_desc;
719 int i;
721 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
722 p_tx_desc = (struct eth_tx_desc *)mp->p_tx_desc_area;
723 for (i = 0; i < tx_desc_num; i++) {
724 p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma +
725 ((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc);
726 }
728 mp->tx_curr_desc_q = 0;
729 mp->tx_used_desc_q = 0;
731 mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc);
732 }
734 static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
735 {
736 struct mv643xx_private *mp = netdev_priv(dev);
737 int err;
739 spin_lock_irq(&mp->lock);
740 err = mii_ethtool_sset(&mp->mii, cmd);
741 spin_unlock_irq(&mp->lock);
743 return err;
744 }
746 static int mv643xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
747 {
748 struct mv643xx_private *mp = netdev_priv(dev);
749 int err;
751 spin_lock_irq(&mp->lock);
752 err = mii_ethtool_gset(&mp->mii, cmd);
753 spin_unlock_irq(&mp->lock);
755 /* The PHY may support 1000baseT_Half, but the mv643xx does not */
756 cmd->supported &= ~SUPPORTED_1000baseT_Half;
757 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
759 return err;
760 }
762 /*
763 * mv643xx_eth_open
764 *
765 * This function is called when openning the network device. The function
766 * should initialize all the hardware, initialize cyclic Rx/Tx
767 * descriptors chain and buffers and allocate an IRQ to the network
768 * device.
769 *
770 * Input : a pointer to the network device structure
771 *
772 * Output : zero of success , nonzero if fails.
773 */
775 static int mv643xx_eth_open(struct net_device *dev)
776 {
777 struct mv643xx_private *mp = netdev_priv(dev);
778 unsigned int port_num = mp->port_num;
779 unsigned int size;
780 int err;
782 err = request_irq(dev->irq, mv643xx_eth_int_handler,
783 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
784 if (err) {
785 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
786 port_num);
787 return -EAGAIN;
788 }
790 eth_port_init(mp);
792 memset(&mp->timeout, 0, sizeof(struct timer_list));
793 mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper;
794 mp->timeout.data = (unsigned long)dev;
796 /* Allocate RX and TX skb rings */
797 mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size,
798 GFP_KERNEL);
799 if (!mp->rx_skb) {
800 printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
801 err = -ENOMEM;
802 goto out_free_irq;
803 }
804 mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
805 GFP_KERNEL);
806 if (!mp->tx_skb) {
807 printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
808 err = -ENOMEM;
809 goto out_free_rx_skb;
810 }
812 /* Allocate TX ring */
813 mp->tx_desc_count = 0;
814 size = mp->tx_ring_size * sizeof(struct eth_tx_desc);
815 mp->tx_desc_area_size = size;
817 if (mp->tx_sram_size) {
818 mp->p_tx_desc_area = ioremap(mp->tx_sram_addr,
819 mp->tx_sram_size);
820 mp->tx_desc_dma = mp->tx_sram_addr;
821 } else
822 mp->p_tx_desc_area = dma_alloc_coherent(NULL, size,
823 &mp->tx_desc_dma,
824 GFP_KERNEL);
826 if (!mp->p_tx_desc_area) {
827 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
828 dev->name, size);
829 err = -ENOMEM;
830 goto out_free_tx_skb;
831 }
832 BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
833 memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
835 ether_init_tx_desc_ring(mp);
837 /* Allocate RX ring */
838 mp->rx_desc_count = 0;
839 size = mp->rx_ring_size * sizeof(struct eth_rx_desc);
840 mp->rx_desc_area_size = size;
842 if (mp->rx_sram_size) {
843 mp->p_rx_desc_area = ioremap(mp->rx_sram_addr,
844 mp->rx_sram_size);
845 mp->rx_desc_dma = mp->rx_sram_addr;
846 } else
847 mp->p_rx_desc_area = dma_alloc_coherent(NULL, size,
848 &mp->rx_desc_dma,
849 GFP_KERNEL);
851 if (!mp->p_rx_desc_area) {
852 printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n",
853 dev->name, size);
854 printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
855 dev->name);
856 if (mp->rx_sram_size)
857 iounmap(mp->p_tx_desc_area);
858 else
859 dma_free_coherent(NULL, mp->tx_desc_area_size,
860 mp->p_tx_desc_area, mp->tx_desc_dma);
861 err = -ENOMEM;
862 goto out_free_tx_skb;
863 }
864 memset((void *)mp->p_rx_desc_area, 0, size);
866 ether_init_rx_desc_ring(mp);
868 mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
870 /* Clear any pending ethernet port interrupts */
871 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
872 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
874 eth_port_start(dev);
876 /* Interrupt Coalescing */
878 #ifdef MV643XX_COAL
879 mp->rx_int_coal =
880 eth_port_set_rx_coal(port_num, 133000000, MV643XX_RX_COAL);
881 #endif
883 mp->tx_int_coal =
884 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
886 /* Unmask phy and link status changes interrupts */
887 mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
888 ETH_INT_UNMASK_ALL_EXT);
890 /* Unmask RX buffer and TX end interrupt */
891 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
893 return 0;
895 out_free_tx_skb:
896 kfree(mp->tx_skb);
897 out_free_rx_skb:
898 kfree(mp->rx_skb);
899 out_free_irq:
900 free_irq(dev->irq, dev);
902 return err;
903 }
905 static void mv643xx_eth_free_tx_rings(struct net_device *dev)
906 {
907 struct mv643xx_private *mp = netdev_priv(dev);
909 /* Stop Tx Queues */
910 mv643xx_eth_port_disable_tx(mp->port_num);
912 /* Free outstanding skb's on TX ring */
913 mv643xx_eth_free_all_tx_descs(dev);
915 BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q);
917 /* Free TX ring */
918 if (mp->tx_sram_size)
919 iounmap(mp->p_tx_desc_area);
920 else
921 dma_free_coherent(NULL, mp->tx_desc_area_size,
922 mp->p_tx_desc_area, mp->tx_desc_dma);
923 }
925 static void mv643xx_eth_free_rx_rings(struct net_device *dev)
926 {
927 struct mv643xx_private *mp = netdev_priv(dev);
928 unsigned int port_num = mp->port_num;
929 int curr;
931 /* Stop RX Queues */
932 mv643xx_eth_port_disable_rx(port_num);
934 /* Free preallocated skb's on RX rings */
935 for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
936 if (mp->rx_skb[curr]) {
937 dev_kfree_skb(mp->rx_skb[curr]);
938 mp->rx_desc_count--;
939 }
940 }
942 if (mp->rx_desc_count)
943 printk(KERN_ERR
944 "%s: Error in freeing Rx Ring. %d skb's still"
945 " stuck in RX Ring - ignoring them\n", dev->name,
946 mp->rx_desc_count);
947 /* Free RX ring */
948 if (mp->rx_sram_size)
949 iounmap(mp->p_rx_desc_area);
950 else
951 dma_free_coherent(NULL, mp->rx_desc_area_size,
952 mp->p_rx_desc_area, mp->rx_desc_dma);
953 }
955 /*
956 * mv643xx_eth_stop
957 *
958 * This function is used when closing the network device.
959 * It updates the hardware,
960 * release all memory that holds buffers and descriptors and release the IRQ.
961 * Input : a pointer to the device structure
962 * Output : zero if success , nonzero if fails
963 */
965 static int mv643xx_eth_stop(struct net_device *dev)
966 {
967 struct mv643xx_private *mp = netdev_priv(dev);
968 unsigned int port_num = mp->port_num;
970 /* Mask all interrupts on ethernet port */
971 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
972 /* wait for previous write to complete */
973 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
975 #ifdef MV643XX_NAPI
976 netif_poll_disable(dev);
977 #endif
978 netif_carrier_off(dev);
979 netif_stop_queue(dev);
981 eth_port_reset(mp->port_num);
983 mv643xx_eth_free_tx_rings(dev);
984 mv643xx_eth_free_rx_rings(dev);
986 #ifdef MV643XX_NAPI
987 netif_poll_enable(dev);
988 #endif
990 free_irq(dev->irq, dev);
992 return 0;
993 }
995 #ifdef MV643XX_NAPI
996 /*
997 * mv643xx_poll
998 *
999 * This function is used in case of NAPI
1000 */
1001 static int mv643xx_poll(struct net_device *dev, int *budget)
1003 struct mv643xx_private *mp = netdev_priv(dev);
1004 int done = 1, orig_budget, work_done;
1005 unsigned int port_num = mp->port_num;
1007 #ifdef MV643XX_TX_FAST_REFILL
1008 if (++mp->tx_clean_threshold > 5) {
1009 mv643xx_eth_free_completed_tx_descs(dev);
1010 mp->tx_clean_threshold = 0;
1012 #endif
1014 if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
1015 != (u32) mp->rx_used_desc_q) {
1016 orig_budget = *budget;
1017 if (orig_budget > dev->quota)
1018 orig_budget = dev->quota;
1019 work_done = mv643xx_eth_receive_queue(dev, orig_budget);
1020 *budget -= work_done;
1021 dev->quota -= work_done;
1022 if (work_done >= orig_budget)
1023 done = 0;
1026 if (done) {
1027 netif_rx_complete(dev);
1028 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
1029 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1030 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
1031 ETH_INT_UNMASK_ALL);
1034 return done ? 0 : 1;
1036 #endif
1038 /**
1039 * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments
1041 * Hardware can't handle unaligned fragments smaller than 9 bytes.
1042 * This helper function detects that case.
1043 */
1045 static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
1047 unsigned int frag;
1048 skb_frag_t *fragp;
1050 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1051 fragp = &skb_shinfo(skb)->frags[frag];
1052 if (fragp->size <= 8 && fragp->page_offset & 0x7)
1053 return 1;
1055 return 0;
1058 /**
1059 * eth_alloc_tx_desc_index - return the index of the next available tx desc
1060 */
1061 static int eth_alloc_tx_desc_index(struct mv643xx_private *mp)
1063 int tx_desc_curr;
1065 BUG_ON(mp->tx_desc_count >= mp->tx_ring_size);
1067 tx_desc_curr = mp->tx_curr_desc_q;
1068 mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size;
1070 BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q);
1072 return tx_desc_curr;
1075 /**
1076 * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments.
1078 * Ensure the data for each fragment to be transmitted is mapped properly,
1079 * then fill in descriptors in the tx hw queue.
1080 */
1081 static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
1082 struct sk_buff *skb)
1084 int frag;
1085 int tx_index;
1086 struct eth_tx_desc *desc;
1088 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1089 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1091 tx_index = eth_alloc_tx_desc_index(mp);
1092 desc = &mp->p_tx_desc_area[tx_index];
1094 desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA;
1095 /* Last Frag enables interrupt and frees the skb */
1096 if (frag == (skb_shinfo(skb)->nr_frags - 1)) {
1097 desc->cmd_sts |= ETH_ZERO_PADDING |
1098 ETH_TX_LAST_DESC |
1099 ETH_TX_ENABLE_INTERRUPT;
1100 mp->tx_skb[tx_index] = skb;
1101 } else
1102 mp->tx_skb[tx_index] = 0;
1104 desc = &mp->p_tx_desc_area[tx_index];
1105 desc->l4i_chk = 0;
1106 desc->byte_cnt = this_frag->size;
1107 desc->buf_ptr = dma_map_page(NULL, this_frag->page,
1108 this_frag->page_offset,
1109 this_frag->size,
1110 DMA_TO_DEVICE);
1114 /**
1115 * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw
1117 * Ensure the data for an skb to be transmitted is mapped properly,
1118 * then fill in descriptors in the tx hw queue and start the hardware.
1119 */
1120 static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1121 struct sk_buff *skb)
1123 int tx_index;
1124 struct eth_tx_desc *desc;
1125 u32 cmd_sts;
1126 int length;
1127 int nr_frags = skb_shinfo(skb)->nr_frags;
1129 cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA;
1131 tx_index = eth_alloc_tx_desc_index(mp);
1132 desc = &mp->p_tx_desc_area[tx_index];
1134 if (nr_frags) {
1135 eth_tx_fill_frag_descs(mp, skb);
1137 length = skb_headlen(skb);
1138 mp->tx_skb[tx_index] = 0;
1139 } else {
1140 cmd_sts |= ETH_ZERO_PADDING |
1141 ETH_TX_LAST_DESC |
1142 ETH_TX_ENABLE_INTERRUPT;
1143 length = skb->len;
1144 mp->tx_skb[tx_index] = skb;
1147 desc->byte_cnt = length;
1148 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
1150 if (skb->ip_summed == CHECKSUM_HW) {
1151 BUG_ON(skb->protocol != ETH_P_IP);
1153 cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
1154 ETH_GEN_IP_V_4_CHECKSUM |
1155 skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
1157 switch (skb->nh.iph->protocol) {
1158 case IPPROTO_UDP:
1159 cmd_sts |= ETH_UDP_FRAME;
1160 desc->l4i_chk = skb->h.uh->check;
1161 break;
1162 case IPPROTO_TCP:
1163 desc->l4i_chk = skb->h.th->check;
1164 break;
1165 default:
1166 BUG();
1168 } else {
1169 /* Errata BTS #50, IHL must be 5 if no HW checksum */
1170 cmd_sts |= 5 << ETH_TX_IHL_SHIFT;
1171 desc->l4i_chk = 0;
1174 /* ensure all other descriptors are written before first cmd_sts */
1175 wmb();
1176 desc->cmd_sts = cmd_sts;
1178 /* ensure all descriptors are written before poking hardware */
1179 wmb();
1180 mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED);
1182 mp->tx_desc_count += nr_frags + 1;
1185 /**
1186 * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission
1188 */
1189 static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1191 struct mv643xx_private *mp = netdev_priv(dev);
1192 struct net_device_stats *stats = &mp->stats;
1193 unsigned long flags;
1195 BUG_ON(netif_queue_stopped(dev));
1196 BUG_ON(skb == NULL);
1198 if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
1199 printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
1200 netif_stop_queue(dev);
1201 return 1;
1204 if (has_tiny_unaligned_frags(skb)) {
1205 if (__skb_linearize(skb)) {
1206 stats->tx_dropped++;
1207 printk(KERN_DEBUG "%s: failed to linearize tiny "
1208 "unaligned fragment\n", dev->name);
1209 return 1;
1213 spin_lock_irqsave(&mp->lock, flags);
1215 eth_tx_submit_descs_for_skb(mp, skb);
1216 stats->tx_bytes = skb->len;
1217 stats->tx_packets++;
1218 dev->trans_start = jiffies;
1220 if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB)
1221 netif_stop_queue(dev);
1223 spin_unlock_irqrestore(&mp->lock, flags);
1225 return 0; /* success */
1228 /*
1229 * mv643xx_eth_get_stats
1231 * Returns a pointer to the interface statistics.
1233 * Input : dev - a pointer to the required interface
1235 * Output : a pointer to the interface's statistics
1236 */
1238 static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1240 struct mv643xx_private *mp = netdev_priv(dev);
1242 return &mp->stats;
1245 #ifdef CONFIG_NET_POLL_CONTROLLER
1246 static void mv643xx_netpoll(struct net_device *netdev)
1248 struct mv643xx_private *mp = netdev_priv(netdev);
1249 int port_num = mp->port_num;
1251 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1252 /* wait for previous write to complete */
1253 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
1255 mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
1257 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1259 #endif
1261 static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address,
1262 int speed, int duplex,
1263 struct ethtool_cmd *cmd)
1265 struct mv643xx_private *mp = netdev_priv(dev);
1267 memset(cmd, 0, sizeof(*cmd));
1269 cmd->port = PORT_MII;
1270 cmd->transceiver = XCVR_INTERNAL;
1271 cmd->phy_address = phy_address;
1273 if (speed == 0) {
1274 cmd->autoneg = AUTONEG_ENABLE;
1275 /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */
1276 cmd->speed = SPEED_100;
1277 cmd->advertising = ADVERTISED_10baseT_Half |
1278 ADVERTISED_10baseT_Full |
1279 ADVERTISED_100baseT_Half |
1280 ADVERTISED_100baseT_Full;
1281 if (mp->mii.supports_gmii)
1282 cmd->advertising |= ADVERTISED_1000baseT_Full;
1283 } else {
1284 cmd->autoneg = AUTONEG_DISABLE;
1285 cmd->speed = speed;
1286 cmd->duplex = duplex;
1290 /*/
1291 * mv643xx_eth_probe
1293 * First function called after registering the network device.
1294 * It's purpose is to initialize the device as an ethernet device,
1295 * fill the ethernet device structure with pointers * to functions,
1296 * and set the MAC address of the interface
1298 * Input : struct device *
1299 * Output : -ENOMEM if failed , 0 if success
1300 */
1301 static int mv643xx_eth_probe(struct platform_device *pdev)
1303 struct mv643xx_eth_platform_data *pd;
1304 int port_num = pdev->id;
1305 struct mv643xx_private *mp;
1306 struct net_device *dev;
1307 u8 *p;
1308 struct resource *res;
1309 int err;
1310 struct ethtool_cmd cmd;
1311 int duplex = DUPLEX_HALF;
1312 int speed = 0; /* default to auto-negotiation */
1314 dev = alloc_etherdev(sizeof(struct mv643xx_private));
1315 if (!dev)
1316 return -ENOMEM;
1318 platform_set_drvdata(pdev, dev);
1320 mp = netdev_priv(dev);
1322 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1323 BUG_ON(!res);
1324 dev->irq = res->start;
1326 mp->port_num = port_num;
1328 dev->open = mv643xx_eth_open;
1329 dev->stop = mv643xx_eth_stop;
1330 dev->hard_start_xmit = mv643xx_eth_start_xmit;
1331 dev->get_stats = mv643xx_eth_get_stats;
1332 dev->set_mac_address = mv643xx_eth_set_mac_address;
1333 dev->set_multicast_list = mv643xx_eth_set_rx_mode;
1335 /* No need to Tx Timeout */
1336 dev->tx_timeout = mv643xx_eth_tx_timeout;
1337 #ifdef MV643XX_NAPI
1338 dev->poll = mv643xx_poll;
1339 dev->weight = 64;
1340 #endif
1342 #ifdef CONFIG_NET_POLL_CONTROLLER
1343 dev->poll_controller = mv643xx_netpoll;
1344 #endif
1346 dev->watchdog_timeo = 2 * HZ;
1347 dev->tx_queue_len = mp->tx_ring_size;
1348 dev->base_addr = 0;
1349 dev->change_mtu = mv643xx_eth_change_mtu;
1350 dev->do_ioctl = mv643xx_eth_do_ioctl;
1351 SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops);
1353 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1354 #ifdef MAX_SKB_FRAGS
1355 /*
1356 * Zero copy can only work if we use Discovery II memory. Else, we will
1357 * have to map the buffers to ISA memory which is only 16 MB
1358 */
1359 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
1360 #endif
1361 #endif
1363 /* Configure the timeout task */
1364 INIT_WORK(&mp->tx_timeout_task,
1365 (void (*)(void *))mv643xx_eth_tx_timeout_task, dev);
1367 spin_lock_init(&mp->lock);
1369 /* set default config values */
1370 eth_port_uc_addr_get(dev, dev->dev_addr);
1371 mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
1372 mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
1374 pd = pdev->dev.platform_data;
1375 if (pd) {
1376 if (pd->mac_addr)
1377 memcpy(dev->dev_addr, pd->mac_addr, 6);
1379 if (pd->phy_addr || pd->force_phy_addr)
1380 ethernet_phy_set(port_num, pd->phy_addr);
1382 if (pd->rx_queue_size)
1383 mp->rx_ring_size = pd->rx_queue_size;
1385 if (pd->tx_queue_size)
1386 mp->tx_ring_size = pd->tx_queue_size;
1388 if (pd->tx_sram_size) {
1389 mp->tx_sram_size = pd->tx_sram_size;
1390 mp->tx_sram_addr = pd->tx_sram_addr;
1393 if (pd->rx_sram_size) {
1394 mp->rx_sram_size = pd->rx_sram_size;
1395 mp->rx_sram_addr = pd->rx_sram_addr;
1398 duplex = pd->duplex;
1399 speed = pd->speed;
1402 /* Hook up MII support for ethtool */
1403 mp->mii.dev = dev;
1404 mp->mii.mdio_read = mv643xx_mdio_read;
1405 mp->mii.mdio_write = mv643xx_mdio_write;
1406 mp->mii.phy_id = ethernet_phy_get(port_num);
1407 mp->mii.phy_id_mask = 0x3f;
1408 mp->mii.reg_num_mask = 0x1f;
1410 err = ethernet_phy_detect(port_num);
1411 if (err) {
1412 pr_debug("MV643xx ethernet port %d: "
1413 "No PHY detected at addr %d\n",
1414 port_num, ethernet_phy_get(port_num));
1415 goto out;
1418 ethernet_phy_reset(port_num);
1419 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
1420 mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
1421 mv643xx_eth_update_pscr(dev, &cmd);
1422 mv643xx_set_settings(dev, &cmd);
1424 SET_MODULE_OWNER(dev);
1425 SET_NETDEV_DEV(dev, &pdev->dev);
1426 err = register_netdev(dev);
1427 if (err)
1428 goto out;
1430 p = dev->dev_addr;
1431 printk(KERN_NOTICE
1432 "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
1433 dev->name, port_num, p[0], p[1], p[2], p[3], p[4], p[5]);
1435 if (dev->features & NETIF_F_SG)
1436 printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name);
1438 if (dev->features & NETIF_F_IP_CSUM)
1439 printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n",
1440 dev->name);
1442 #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
1443 printk(KERN_NOTICE "%s: RX TCP/UDP Checksum Offload ON \n", dev->name);
1444 #endif
1446 #ifdef MV643XX_COAL
1447 printk(KERN_NOTICE "%s: TX and RX Interrupt Coalescing ON \n",
1448 dev->name);
1449 #endif
1451 #ifdef MV643XX_NAPI
1452 printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
1453 #endif
1455 if (mp->tx_sram_size > 0)
1456 printk(KERN_NOTICE "%s: Using SRAM\n", dev->name);
1458 return 0;
1460 out:
1461 free_netdev(dev);
1463 return err;
1466 static int mv643xx_eth_remove(struct platform_device *pdev)
1468 struct net_device *dev = platform_get_drvdata(pdev);
1470 unregister_netdev(dev);
1471 flush_scheduled_work();
1473 free_netdev(dev);
1474 platform_set_drvdata(pdev, NULL);
1475 return 0;
1478 static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1480 struct resource *res;
1482 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
1484 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1485 if (res == NULL)
1486 return -ENODEV;
1488 mv643xx_eth_shared_base = ioremap(res->start,
1489 MV643XX_ETH_SHARED_REGS_SIZE);
1490 if (mv643xx_eth_shared_base == NULL)
1491 return -ENOMEM;
1493 return 0;
1497 static int mv643xx_eth_shared_remove(struct platform_device *pdev)
1499 iounmap(mv643xx_eth_shared_base);
1500 mv643xx_eth_shared_base = NULL;
1502 return 0;
1505 static struct platform_driver mv643xx_eth_driver = {
1506 .probe = mv643xx_eth_probe,
1507 .remove = mv643xx_eth_remove,
1508 .driver = {
1509 .name = MV643XX_ETH_NAME,
1510 },
1511 };
1513 static struct platform_driver mv643xx_eth_shared_driver = {
1514 .probe = mv643xx_eth_shared_probe,
1515 .remove = mv643xx_eth_shared_remove,
1516 .driver = {
1517 .name = MV643XX_ETH_SHARED_NAME,
1518 },
1519 };
1521 /*
1522 * mv643xx_init_module
1524 * Registers the network drivers into the Linux kernel
1526 * Input : N/A
1528 * Output : N/A
1529 */
1530 static int __init mv643xx_init_module(void)
1532 int rc;
1534 rc = platform_driver_register(&mv643xx_eth_shared_driver);
1535 if (!rc) {
1536 rc = platform_driver_register(&mv643xx_eth_driver);
1537 if (rc)
1538 platform_driver_unregister(&mv643xx_eth_shared_driver);
1540 return rc;
1543 /*
1544 * mv643xx_cleanup_module
1546 * Registers the network drivers into the Linux kernel
1548 * Input : N/A
1550 * Output : N/A
1551 */
1552 static void __exit mv643xx_cleanup_module(void)
1554 platform_driver_unregister(&mv643xx_eth_driver);
1555 platform_driver_unregister(&mv643xx_eth_shared_driver);
1558 module_init(mv643xx_init_module);
1559 module_exit(mv643xx_cleanup_module);
1561 MODULE_LICENSE("GPL");
1562 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
1563 " and Dale Farnsworth");
1564 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
1566 /*
1567 * The second part is the low level driver of the gigE ethernet ports.
1568 */
1570 /*
1571 * Marvell's Gigabit Ethernet controller low level driver
1573 * DESCRIPTION:
1574 * This file introduce low level API to Marvell's Gigabit Ethernet
1575 * controller. This Gigabit Ethernet Controller driver API controls
1576 * 1) Operations (i.e. port init, start, reset etc').
1577 * 2) Data flow (i.e. port send, receive etc').
1578 * Each Gigabit Ethernet port is controlled via
1579 * struct mv643xx_private.
1580 * This struct includes user configuration information as well as
1581 * driver internal data needed for its operations.
1583 * Supported Features:
1584 * - This low level driver is OS independent. Allocating memory for
1585 * the descriptor rings and buffers are not within the scope of
1586 * this driver.
1587 * - The user is free from Rx/Tx queue managing.
1588 * - This low level driver introduce functionality API that enable
1589 * the to operate Marvell's Gigabit Ethernet Controller in a
1590 * convenient way.
1591 * - Simple Gigabit Ethernet port operation API.
1592 * - Simple Gigabit Ethernet port data flow API.
1593 * - Data flow and operation API support per queue functionality.
1594 * - Support cached descriptors for better performance.
1595 * - Enable access to all four DRAM banks and internal SRAM memory
1596 * spaces.
1597 * - PHY access and control API.
1598 * - Port control register configuration API.
1599 * - Full control over Unicast and Multicast MAC configurations.
1601 * Operation flow:
1603 * Initialization phase
1604 * This phase complete the initialization of the the
1605 * mv643xx_private struct.
1606 * User information regarding port configuration has to be set
1607 * prior to calling the port initialization routine.
1609 * In this phase any port Tx/Rx activity is halted, MIB counters
1610 * are cleared, PHY address is set according to user parameter and
1611 * access to DRAM and internal SRAM memory spaces.
1613 * Driver ring initialization
1614 * Allocating memory for the descriptor rings and buffers is not
1615 * within the scope of this driver. Thus, the user is required to
1616 * allocate memory for the descriptors ring and buffers. Those
1617 * memory parameters are used by the Rx and Tx ring initialization
1618 * routines in order to curve the descriptor linked list in a form
1619 * of a ring.
1620 * Note: Pay special attention to alignment issues when using
1621 * cached descriptors/buffers. In this phase the driver store
1622 * information in the mv643xx_private struct regarding each queue
1623 * ring.
1625 * Driver start
1626 * This phase prepares the Ethernet port for Rx and Tx activity.
1627 * It uses the information stored in the mv643xx_private struct to
1628 * initialize the various port registers.
1630 * Data flow:
1631 * All packet references to/from the driver are done using
1632 * struct pkt_info.
1633 * This struct is a unified struct used with Rx and Tx operations.
1634 * This way the user is not required to be familiar with neither
1635 * Tx nor Rx descriptors structures.
1636 * The driver's descriptors rings are management by indexes.
1637 * Those indexes controls the ring resources and used to indicate
1638 * a SW resource error:
1639 * 'current'
1640 * This index points to the current available resource for use. For
1641 * example in Rx process this index will point to the descriptor
1642 * that will be passed to the user upon calling the receive
1643 * routine. In Tx process, this index will point to the descriptor
1644 * that will be assigned with the user packet info and transmitted.
1645 * 'used'
1646 * This index points to the descriptor that need to restore its
1647 * resources. For example in Rx process, using the Rx buffer return
1648 * API will attach the buffer returned in packet info to the
1649 * descriptor pointed by 'used'. In Tx process, using the Tx
1650 * descriptor return will merely return the user packet info with
1651 * the command status of the transmitted buffer pointed by the
1652 * 'used' index. Nevertheless, it is essential to use this routine
1653 * to update the 'used' index.
1654 * 'first'
1655 * This index supports Tx Scatter-Gather. It points to the first
1656 * descriptor of a packet assembled of multiple buffers. For
1657 * example when in middle of Such packet we have a Tx resource
1658 * error the 'curr' index get the value of 'first' to indicate
1659 * that the ring returned to its state before trying to transmit
1660 * this packet.
1662 * Receive operation:
1663 * The eth_port_receive API set the packet information struct,
1664 * passed by the caller, with received information from the
1665 * 'current' SDMA descriptor.
1666 * It is the user responsibility to return this resource back
1667 * to the Rx descriptor ring to enable the reuse of this source.
1668 * Return Rx resource is done using the eth_rx_return_buff API.
1670 * Prior to calling the initialization routine eth_port_init() the user
1671 * must set the following fields under mv643xx_private struct:
1672 * port_num User Ethernet port number.
1673 * port_config User port configuration value.
1674 * port_config_extend User port config extend value.
1675 * port_sdma_config User port SDMA config value.
1676 * port_serial_control User port serial control value.
1678 * This driver data flow is done using the struct pkt_info which
1679 * is a unified struct for Rx and Tx operations:
1681 * byte_cnt Tx/Rx descriptor buffer byte count.
1682 * l4i_chk CPU provided TCP Checksum. For Tx operation
1683 * only.
1684 * cmd_sts Tx/Rx descriptor command status.
1685 * buf_ptr Tx/Rx descriptor buffer pointer.
1686 * return_info Tx/Rx user resource return information.
1687 */
1689 /* PHY routines */
1690 static int ethernet_phy_get(unsigned int eth_port_num);
1691 static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
1693 /* Ethernet Port routines */
1694 static void eth_port_set_filter_table_entry(int table, unsigned char entry);
1696 /*
1697 * eth_port_init - Initialize the Ethernet port driver
1699 * DESCRIPTION:
1700 * This function prepares the ethernet port to start its activity:
1701 * 1) Completes the ethernet port driver struct initialization toward port
1702 * start routine.
1703 * 2) Resets the device to a quiescent state in case of warm reboot.
1704 * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM.
1705 * 4) Clean MAC tables. The reset status of those tables is unknown.
1706 * 5) Set PHY address.
1707 * Note: Call this routine prior to eth_port_start routine and after
1708 * setting user values in the user fields of Ethernet port control
1709 * struct.
1711 * INPUT:
1712 * struct mv643xx_private *mp Ethernet port control struct
1714 * OUTPUT:
1715 * See description.
1717 * RETURN:
1718 * None.
1719 */
1720 static void eth_port_init(struct mv643xx_private *mp)
1722 mp->rx_resource_err = 0;
1724 eth_port_reset(mp->port_num);
1726 eth_port_init_mac_tables(mp->port_num);
1729 /*
1730 * eth_port_start - Start the Ethernet port activity.
1732 * DESCRIPTION:
1733 * This routine prepares the Ethernet port for Rx and Tx activity:
1734 * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that
1735 * has been initialized a descriptor's ring (using
1736 * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx)
1737 * 2. Initialize and enable the Ethernet configuration port by writing to
1738 * the port's configuration and command registers.
1739 * 3. Initialize and enable the SDMA by writing to the SDMA's
1740 * configuration and command registers. After completing these steps,
1741 * the ethernet port SDMA can starts to perform Rx and Tx activities.
1743 * Note: Each Rx and Tx queue descriptor's list must be initialized prior
1744 * to calling this function (use ether_init_tx_desc_ring for Tx queues
1745 * and ether_init_rx_desc_ring for Rx queues).
1747 * INPUT:
1748 * dev - a pointer to the required interface
1750 * OUTPUT:
1751 * Ethernet port is ready to receive and transmit.
1753 * RETURN:
1754 * None.
1755 */
1756 static void eth_port_start(struct net_device *dev)
1758 struct mv643xx_private *mp = netdev_priv(dev);
1759 unsigned int port_num = mp->port_num;
1760 int tx_curr_desc, rx_curr_desc;
1761 u32 pscr;
1762 struct ethtool_cmd ethtool_cmd;
1764 /* Assignment of Tx CTRP of given queue */
1765 tx_curr_desc = mp->tx_curr_desc_q;
1766 mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
1767 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
1769 /* Assignment of Rx CRDP of given queue */
1770 rx_curr_desc = mp->rx_curr_desc_q;
1771 mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
1772 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
1774 /* Add the assigned Ethernet address to the port's address table */
1775 eth_port_uc_addr_set(port_num, dev->dev_addr);
1777 /* Assign port configuration and command. */
1778 mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num),
1779 MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE);
1781 mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num),
1782 MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE);
1784 pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num));
1786 pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS);
1787 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr);
1789 pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1790 MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII |
1791 MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX |
1792 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL |
1793 MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED;
1795 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr);
1797 pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE;
1798 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr);
1800 /* Assign port SDMA configuration */
1801 mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num),
1802 MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE);
1804 /* Enable port Rx. */
1805 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED);
1807 /* Disable port bandwidth limits by clearing MTU register */
1808 mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0);
1810 /* save phy settings across reset */
1811 mv643xx_get_settings(dev, &ethtool_cmd);
1812 ethernet_phy_reset(mp->port_num);
1813 mv643xx_set_settings(dev, &ethtool_cmd);
1816 /*
1817 * eth_port_uc_addr_set - This function Set the port Unicast address.
1819 * DESCRIPTION:
1820 * This function Set the port Ethernet MAC address.
1822 * INPUT:
1823 * unsigned int eth_port_num Port number.
1824 * char * p_addr Address to be set
1826 * OUTPUT:
1827 * Set MAC address low and high registers. also calls
1828 * eth_port_set_filter_table_entry() to set the unicast
1829 * table with the proper information.
1831 * RETURN:
1832 * N/A.
1834 */
1835 static void eth_port_uc_addr_set(unsigned int eth_port_num,
1836 unsigned char *p_addr)
1838 unsigned int mac_h;
1839 unsigned int mac_l;
1840 int table;
1842 mac_l = (p_addr[4] << 8) | (p_addr[5]);
1843 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
1844 (p_addr[3] << 0);
1846 mv_write(MV643XX_ETH_MAC_ADDR_LOW(eth_port_num), mac_l);
1847 mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h);
1849 /* Accept frames of this address */
1850 table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(eth_port_num);
1851 eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f);
1854 /*
1855 * eth_port_uc_addr_get - This function retrieves the port Unicast address
1856 * (MAC address) from the ethernet hw registers.
1858 * DESCRIPTION:
1859 * This function retrieves the port Ethernet MAC address.
1861 * INPUT:
1862 * unsigned int eth_port_num Port number.
1863 * char *MacAddr pointer where the MAC address is stored
1865 * OUTPUT:
1866 * Copy the MAC address to the location pointed to by MacAddr
1868 * RETURN:
1869 * N/A.
1871 */
1872 static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *p_addr)
1874 struct mv643xx_private *mp = netdev_priv(dev);
1875 unsigned int mac_h;
1876 unsigned int mac_l;
1878 mac_h = mv_read(MV643XX_ETH_MAC_ADDR_HIGH(mp->port_num));
1879 mac_l = mv_read(MV643XX_ETH_MAC_ADDR_LOW(mp->port_num));
1881 p_addr[0] = (mac_h >> 24) & 0xff;
1882 p_addr[1] = (mac_h >> 16) & 0xff;
1883 p_addr[2] = (mac_h >> 8) & 0xff;
1884 p_addr[3] = mac_h & 0xff;
1885 p_addr[4] = (mac_l >> 8) & 0xff;
1886 p_addr[5] = mac_l & 0xff;
1889 /*
1890 * The entries in each table are indexed by a hash of a packet's MAC
1891 * address. One bit in each entry determines whether the packet is
1892 * accepted. There are 4 entries (each 8 bits wide) in each register
1893 * of the table. The bits in each entry are defined as follows:
1894 * 0 Accept=1, Drop=0
1895 * 3-1 Queue (ETH_Q0=0)
1896 * 7-4 Reserved = 0;
1897 */
1898 static void eth_port_set_filter_table_entry(int table, unsigned char entry)
1900 unsigned int table_reg;
1901 unsigned int tbl_offset;
1902 unsigned int reg_offset;
1904 tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */
1905 reg_offset = entry % 4; /* Entry offset within the register */
1907 /* Set "accepts frame bit" at specified table entry */
1908 table_reg = mv_read(table + tbl_offset);
1909 table_reg |= 0x01 << (8 * reg_offset);
1910 mv_write(table + tbl_offset, table_reg);
1913 /*
1914 * eth_port_mc_addr - Multicast address settings.
1916 * The MV device supports multicast using two tables:
1917 * 1) Special Multicast Table for MAC addresses of the form
1918 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
1919 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1920 * Table entries in the DA-Filter table.
1921 * 2) Other Multicast Table for multicast of another type. A CRC-8bit
1922 * is used as an index to the Other Multicast Table entries in the
1923 * DA-Filter table. This function calculates the CRC-8bit value.
1924 * In either case, eth_port_set_filter_table_entry() is then called
1925 * to set to set the actual table entry.
1926 */
1927 static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
1929 unsigned int mac_h;
1930 unsigned int mac_l;
1931 unsigned char crc_result = 0;
1932 int table;
1933 int mac_array[48];
1934 int crc[8];
1935 int i;
1937 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
1938 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
1939 table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
1940 (eth_port_num);
1941 eth_port_set_filter_table_entry(table, p_addr[5]);
1942 return;
1945 /* Calculate CRC-8 out of the given address */
1946 mac_h = (p_addr[0] << 8) | (p_addr[1]);
1947 mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
1948 (p_addr[4] << 8) | (p_addr[5] << 0);
1950 for (i = 0; i < 32; i++)
1951 mac_array[i] = (mac_l >> i) & 0x1;
1952 for (i = 32; i < 48; i++)
1953 mac_array[i] = (mac_h >> (i - 32)) & 0x1;
1955 crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
1956 mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
1957 mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
1958 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
1959 mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0];
1961 crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
1962 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
1963 mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
1964 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
1965 mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
1966 mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
1967 mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0];
1969 crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
1970 mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
1971 mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
1972 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
1973 mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^
1974 mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0];
1976 crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
1977 mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
1978 mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
1979 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
1980 mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^
1981 mac_array[3] ^ mac_array[2] ^ mac_array[1];
1983 crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
1984 mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
1985 mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
1986 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
1987 mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^
1988 mac_array[3] ^ mac_array[2];
1990 crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
1991 mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
1992 mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
1993 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
1994 mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^
1995 mac_array[4] ^ mac_array[3];
1997 crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
1998 mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
1999 mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
2000 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
2001 mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^
2002 mac_array[4];
2004 crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
2005 mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
2006 mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
2007 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
2008 mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5];
2010 for (i = 0; i < 8; i++)
2011 crc_result = crc_result | (crc[i] << i);
2013 table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
2014 eth_port_set_filter_table_entry(table, crc_result);
2017 /*
2018 * Set the entire multicast list based on dev->mc_list.
2019 */
2020 static void eth_port_set_multicast_list(struct net_device *dev)
2023 struct dev_mc_list *mc_list;
2024 int i;
2025 int table_index;
2026 struct mv643xx_private *mp = netdev_priv(dev);
2027 unsigned int eth_port_num = mp->port_num;
2029 /* If the device is in promiscuous mode or in all multicast mode,
2030 * we will fully populate both multicast tables with accept.
2031 * This is guaranteed to yield a match on all multicast addresses...
2032 */
2033 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
2034 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2035 /* Set all entries in DA filter special multicast
2036 * table (Ex_dFSMT)
2037 * Set for ETH_Q0 for now
2038 * Bits
2039 * 0 Accept=1, Drop=0
2040 * 3-1 Queue ETH_Q0=0
2041 * 7-4 Reserved = 0;
2042 */
2043 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2045 /* Set all entries in DA filter other multicast
2046 * table (Ex_dFOMT)
2047 * Set for ETH_Q0 for now
2048 * Bits
2049 * 0 Accept=1, Drop=0
2050 * 3-1 Queue ETH_Q0=0
2051 * 7-4 Reserved = 0;
2052 */
2053 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2055 return;
2058 /* We will clear out multicast tables every time we get the list.
2059 * Then add the entire new list...
2060 */
2061 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2062 /* Clear DA filter special multicast table (Ex_dFSMT) */
2063 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2064 (eth_port_num) + table_index, 0);
2066 /* Clear DA filter other multicast table (Ex_dFOMT) */
2067 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2068 (eth_port_num) + table_index, 0);
2071 /* Get pointer to net_device multicast list and add each one... */
2072 for (i = 0, mc_list = dev->mc_list;
2073 (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
2074 i++, mc_list = mc_list->next)
2075 if (mc_list->dmi_addrlen == 6)
2076 eth_port_mc_addr(eth_port_num, mc_list->dmi_addr);
2079 /*
2080 * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
2082 * DESCRIPTION:
2083 * Go through all the DA filter tables (Unicast, Special Multicast &
2084 * Other Multicast) and set each entry to 0.
2086 * INPUT:
2087 * unsigned int eth_port_num Ethernet Port number.
2089 * OUTPUT:
2090 * Multicast and Unicast packets are rejected.
2092 * RETURN:
2093 * None.
2094 */
2095 static void eth_port_init_mac_tables(unsigned int eth_port_num)
2097 int table_index;
2099 /* Clear DA filter unicast table (Ex_dFUT) */
2100 for (table_index = 0; table_index <= 0xC; table_index += 4)
2101 mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE
2102 (eth_port_num) + table_index, 0);
2104 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2105 /* Clear DA filter special multicast table (Ex_dFSMT) */
2106 mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2107 (eth_port_num) + table_index, 0);
2108 /* Clear DA filter other multicast table (Ex_dFOMT) */
2109 mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2110 (eth_port_num) + table_index, 0);
2114 /*
2115 * eth_clear_mib_counters - Clear all MIB counters
2117 * DESCRIPTION:
2118 * This function clears all MIB counters of a specific ethernet port.
2119 * A read from the MIB counter will reset the counter.
2121 * INPUT:
2122 * unsigned int eth_port_num Ethernet Port number.
2124 * OUTPUT:
2125 * After reading all MIB counters, the counters resets.
2127 * RETURN:
2128 * MIB counter value.
2130 */
2131 static void eth_clear_mib_counters(unsigned int eth_port_num)
2133 int i;
2135 /* Perform dummy reads from MIB counters */
2136 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2137 i += 4)
2138 mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num) + i);
2141 static inline u32 read_mib(struct mv643xx_private *mp, int offset)
2143 return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp->port_num) + offset);
2146 static void eth_update_mib_counters(struct mv643xx_private *mp)
2148 struct mv643xx_mib_counters *p = &mp->mib_counters;
2149 int offset;
2151 p->good_octets_received +=
2152 read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
2153 p->good_octets_received +=
2154 (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH) << 32;
2156 for (offset = ETH_MIB_BAD_OCTETS_RECEIVED;
2157 offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS;
2158 offset += 4)
2159 *(u32 *)((char *)p + offset) = read_mib(mp, offset);
2161 p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW);
2162 p->good_octets_sent +=
2163 (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_HIGH) << 32;
2165 for (offset = ETH_MIB_GOOD_FRAMES_SENT;
2166 offset <= ETH_MIB_LATE_COLLISION;
2167 offset += 4)
2168 *(u32 *)((char *)p + offset) = read_mib(mp, offset);
2171 /*
2172 * ethernet_phy_detect - Detect whether a phy is present
2174 * DESCRIPTION:
2175 * This function tests whether there is a PHY present on
2176 * the specified port.
2178 * INPUT:
2179 * unsigned int eth_port_num Ethernet Port number.
2181 * OUTPUT:
2182 * None
2184 * RETURN:
2185 * 0 on success
2186 * -ENODEV on failure
2188 */
2189 static int ethernet_phy_detect(unsigned int port_num)
2191 unsigned int phy_reg_data0;
2192 int auto_neg;
2194 eth_port_read_smi_reg(port_num, 0, &phy_reg_data0);
2195 auto_neg = phy_reg_data0 & 0x1000;
2196 phy_reg_data0 ^= 0x1000; /* invert auto_neg */
2197 eth_port_write_smi_reg(port_num, 0, phy_reg_data0);
2199 eth_port_read_smi_reg(port_num, 0, &phy_reg_data0);
2200 if ((phy_reg_data0 & 0x1000) == auto_neg)
2201 return -ENODEV; /* change didn't take */
2203 phy_reg_data0 ^= 0x1000;
2204 eth_port_write_smi_reg(port_num, 0, phy_reg_data0);
2205 return 0;
2208 /*
2209 * ethernet_phy_get - Get the ethernet port PHY address.
2211 * DESCRIPTION:
2212 * This routine returns the given ethernet port PHY address.
2214 * INPUT:
2215 * unsigned int eth_port_num Ethernet Port number.
2217 * OUTPUT:
2218 * None.
2220 * RETURN:
2221 * PHY address.
2223 */
2224 static int ethernet_phy_get(unsigned int eth_port_num)
2226 unsigned int reg_data;
2228 reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG);
2230 return ((reg_data >> (5 * eth_port_num)) & 0x1f);
2233 /*
2234 * ethernet_phy_set - Set the ethernet port PHY address.
2236 * DESCRIPTION:
2237 * This routine sets the given ethernet port PHY address.
2239 * INPUT:
2240 * unsigned int eth_port_num Ethernet Port number.
2241 * int phy_addr PHY address.
2243 * OUTPUT:
2244 * None.
2246 * RETURN:
2247 * None.
2249 */
2250 static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
2252 u32 reg_data;
2253 int addr_shift = 5 * eth_port_num;
2255 reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG);
2256 reg_data &= ~(0x1f << addr_shift);
2257 reg_data |= (phy_addr & 0x1f) << addr_shift;
2258 mv_write(MV643XX_ETH_PHY_ADDR_REG, reg_data);
2261 /*
2262 * ethernet_phy_reset - Reset Ethernet port PHY.
2264 * DESCRIPTION:
2265 * This routine utilizes the SMI interface to reset the ethernet port PHY.
2267 * INPUT:
2268 * unsigned int eth_port_num Ethernet Port number.
2270 * OUTPUT:
2271 * The PHY is reset.
2273 * RETURN:
2274 * None.
2276 */
2277 static void ethernet_phy_reset(unsigned int eth_port_num)
2279 unsigned int phy_reg_data;
2281 /* Reset the PHY */
2282 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
2283 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
2284 eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data);
2286 /* wait for PHY to come out of reset */
2287 do {
2288 udelay(1);
2289 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data);
2290 } while (phy_reg_data & 0x8000);
2293 static void mv643xx_eth_port_enable_tx(unsigned int port_num,
2294 unsigned int queues)
2296 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues);
2299 static void mv643xx_eth_port_enable_rx(unsigned int port_num,
2300 unsigned int queues)
2302 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues);
2305 static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
2307 u32 queues;
2309 /* Stop Tx port activity. Check port Tx activity. */
2310 queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num))
2311 & 0xFF;
2312 if (queues) {
2313 /* Issue stop command for active queues only */
2314 mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num),
2315 (queues << 8));
2317 /* Wait for all Tx activity to terminate. */
2318 /* Check port cause register that all Tx queues are stopped */
2319 while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num))
2320 & 0xFF)
2321 udelay(PHY_WAIT_MICRO_SECONDS);
2323 /* Wait for Tx FIFO to empty */
2324 while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) &
2325 ETH_PORT_TX_FIFO_EMPTY)
2326 udelay(PHY_WAIT_MICRO_SECONDS);
2329 return queues;
2332 static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
2334 u32 queues;
2336 /* Stop Rx port activity. Check port Rx activity. */
2337 queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num))
2338 & 0xFF;
2339 if (queues) {
2340 /* Issue stop command for active queues only */
2341 mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
2342 (queues << 8));
2344 /* Wait for all Rx activity to terminate. */
2345 /* Check port cause register that all Rx queues are stopped */
2346 while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num))
2347 & 0xFF)
2348 udelay(PHY_WAIT_MICRO_SECONDS);
2351 return queues;
2354 /*
2355 * eth_port_reset - Reset Ethernet port
2357 * DESCRIPTION:
2358 * This routine resets the chip by aborting any SDMA engine activity and
2359 * clearing the MIB counters. The Receiver and the Transmit unit are in
2360 * idle state after this command is performed and the port is disabled.
2362 * INPUT:
2363 * unsigned int eth_port_num Ethernet Port number.
2365 * OUTPUT:
2366 * Channel activity is halted.
2368 * RETURN:
2369 * None.
2371 */
2372 static void eth_port_reset(unsigned int port_num)
2374 unsigned int reg_data;
2376 mv643xx_eth_port_disable_tx(port_num);
2377 mv643xx_eth_port_disable_rx(port_num);
2379 /* Clear all MIB counters */
2380 eth_clear_mib_counters(port_num);
2382 /* Reset the Enable bit in the Configuration Register */
2383 reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num));
2384 reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE |
2385 MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL |
2386 MV643XX_ETH_FORCE_LINK_PASS);
2387 mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data);
2391 /*
2392 * eth_port_read_smi_reg - Read PHY registers
2394 * DESCRIPTION:
2395 * This routine utilize the SMI interface to interact with the PHY in
2396 * order to perform PHY register read.
2398 * INPUT:
2399 * unsigned int port_num Ethernet Port number.
2400 * unsigned int phy_reg PHY register address offset.
2401 * unsigned int *value Register value buffer.
2403 * OUTPUT:
2404 * Write the value of a specified PHY register into given buffer.
2406 * RETURN:
2407 * false if the PHY is busy or read data is not in valid state.
2408 * true otherwise.
2410 */
2411 static void eth_port_read_smi_reg(unsigned int port_num,
2412 unsigned int phy_reg, unsigned int *value)
2414 int phy_addr = ethernet_phy_get(port_num);
2415 unsigned long flags;
2416 int i;
2418 /* the SMI register is a shared resource */
2419 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2421 /* wait for the SMI register to become available */
2422 for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
2423 if (i == PHY_WAIT_ITERATIONS) {
2424 printk("mv643xx PHY busy timeout, port %d\n", port_num);
2425 goto out;
2427 udelay(PHY_WAIT_MICRO_SECONDS);
2430 mv_write(MV643XX_ETH_SMI_REG,
2431 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
2433 /* now wait for the data to be valid */
2434 for (i = 0; !(mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) {
2435 if (i == PHY_WAIT_ITERATIONS) {
2436 printk("mv643xx PHY read timeout, port %d\n", port_num);
2437 goto out;
2439 udelay(PHY_WAIT_MICRO_SECONDS);
2442 *value = mv_read(MV643XX_ETH_SMI_REG) & 0xffff;
2443 out:
2444 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
2447 /*
2448 * eth_port_write_smi_reg - Write to PHY registers
2450 * DESCRIPTION:
2451 * This routine utilize the SMI interface to interact with the PHY in
2452 * order to perform writes to PHY registers.
2454 * INPUT:
2455 * unsigned int eth_port_num Ethernet Port number.
2456 * unsigned int phy_reg PHY register address offset.
2457 * unsigned int value Register value.
2459 * OUTPUT:
2460 * Write the given value to the specified PHY register.
2462 * RETURN:
2463 * false if the PHY is busy.
2464 * true otherwise.
2466 */
2467 static void eth_port_write_smi_reg(unsigned int eth_port_num,
2468 unsigned int phy_reg, unsigned int value)
2470 int phy_addr;
2471 int i;
2472 unsigned long flags;
2474 phy_addr = ethernet_phy_get(eth_port_num);
2476 /* the SMI register is a shared resource */
2477 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2479 /* wait for the SMI register to become available */
2480 for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
2481 if (i == PHY_WAIT_ITERATIONS) {
2482 printk("mv643xx PHY busy timeout, port %d\n",
2483 eth_port_num);
2484 goto out;
2486 udelay(PHY_WAIT_MICRO_SECONDS);
2489 mv_write(MV643XX_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
2490 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2491 out:
2492 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
2495 /*
2496 * Wrappers for MII support library.
2497 */
2498 static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location)
2500 int val;
2501 struct mv643xx_private *mp = netdev_priv(dev);
2503 eth_port_read_smi_reg(mp->port_num, location, &val);
2504 return val;
2507 static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val)
2509 struct mv643xx_private *mp = netdev_priv(dev);
2510 eth_port_write_smi_reg(mp->port_num, location, val);
2513 /*
2514 * eth_port_receive - Get received information from Rx ring.
2516 * DESCRIPTION:
2517 * This routine returns the received data to the caller. There is no
2518 * data copying during routine operation. All information is returned
2519 * using pointer to packet information struct passed from the caller.
2520 * If the routine exhausts Rx ring resources then the resource error flag
2521 * is set.
2523 * INPUT:
2524 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2525 * struct pkt_info *p_pkt_info User packet buffer.
2527 * OUTPUT:
2528 * Rx ring current and used indexes are updated.
2530 * RETURN:
2531 * ETH_ERROR in case the routine can not access Rx desc ring.
2532 * ETH_QUEUE_FULL if Rx ring resources are exhausted.
2533 * ETH_END_OF_JOB if there is no received data.
2534 * ETH_OK otherwise.
2535 */
2536 static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
2537 struct pkt_info *p_pkt_info)
2539 int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
2540 volatile struct eth_rx_desc *p_rx_desc;
2541 unsigned int command_status;
2542 unsigned long flags;
2544 /* Do not process Rx ring in case of Rx ring resource error */
2545 if (mp->rx_resource_err)
2546 return ETH_QUEUE_FULL;
2548 spin_lock_irqsave(&mp->lock, flags);
2550 /* Get the Rx Desc ring 'curr and 'used' indexes */
2551 rx_curr_desc = mp->rx_curr_desc_q;
2552 rx_used_desc = mp->rx_used_desc_q;
2554 p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc];
2556 /* The following parameters are used to save readings from memory */
2557 command_status = p_rx_desc->cmd_sts;
2558 rmb();
2560 /* Nothing to receive... */
2561 if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
2562 spin_unlock_irqrestore(&mp->lock, flags);
2563 return ETH_END_OF_JOB;
2566 p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
2567 p_pkt_info->cmd_sts = command_status;
2568 p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET;
2569 p_pkt_info->return_info = mp->rx_skb[rx_curr_desc];
2570 p_pkt_info->l4i_chk = p_rx_desc->buf_size;
2572 /*
2573 * Clean the return info field to indicate that the
2574 * packet has been moved to the upper layers
2575 */
2576 mp->rx_skb[rx_curr_desc] = NULL;
2578 /* Update current index in data structure */
2579 rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size;
2580 mp->rx_curr_desc_q = rx_next_curr_desc;
2582 /* Rx descriptors exhausted. Set the Rx ring resource error flag */
2583 if (rx_next_curr_desc == rx_used_desc)
2584 mp->rx_resource_err = 1;
2586 spin_unlock_irqrestore(&mp->lock, flags);
2588 return ETH_OK;
2591 /*
2592 * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring.
2594 * DESCRIPTION:
2595 * This routine returns a Rx buffer back to the Rx ring. It retrieves the
2596 * next 'used' descriptor and attached the returned buffer to it.
2597 * In case the Rx ring was in "resource error" condition, where there are
2598 * no available Rx resources, the function resets the resource error flag.
2600 * INPUT:
2601 * struct mv643xx_private *mp Ethernet Port Control srtuct.
2602 * struct pkt_info *p_pkt_info Information on returned buffer.
2604 * OUTPUT:
2605 * New available Rx resource in Rx descriptor ring.
2607 * RETURN:
2608 * ETH_ERROR in case the routine can not access Rx desc ring.
2609 * ETH_OK otherwise.
2610 */
2611 static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
2612 struct pkt_info *p_pkt_info)
2614 int used_rx_desc; /* Where to return Rx resource */
2615 volatile struct eth_rx_desc *p_used_rx_desc;
2616 unsigned long flags;
2618 spin_lock_irqsave(&mp->lock, flags);
2620 /* Get 'used' Rx descriptor */
2621 used_rx_desc = mp->rx_used_desc_q;
2622 p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc];
2624 p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr;
2625 p_used_rx_desc->buf_size = p_pkt_info->byte_cnt;
2626 mp->rx_skb[used_rx_desc] = p_pkt_info->return_info;
2628 /* Flush the write pipe */
2630 /* Return the descriptor to DMA ownership */
2631 wmb();
2632 p_used_rx_desc->cmd_sts =
2633 ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
2634 wmb();
2636 /* Move the used descriptor pointer to the next descriptor */
2637 mp->rx_used_desc_q = (used_rx_desc + 1) % mp->rx_ring_size;
2639 /* Any Rx return cancels the Rx resource error status */
2640 mp->rx_resource_err = 0;
2642 spin_unlock_irqrestore(&mp->lock, flags);
2644 return ETH_OK;
2647 /************* Begin ethtool support *************************/
2649 struct mv643xx_stats {
2650 char stat_string[ETH_GSTRING_LEN];
2651 int sizeof_stat;
2652 int stat_offset;
2653 };
2655 #define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \
2656 offsetof(struct mv643xx_private, m)
2658 static const struct mv643xx_stats mv643xx_gstrings_stats[] = {
2659 { "rx_packets", MV643XX_STAT(stats.rx_packets) },
2660 { "tx_packets", MV643XX_STAT(stats.tx_packets) },
2661 { "rx_bytes", MV643XX_STAT(stats.rx_bytes) },
2662 { "tx_bytes", MV643XX_STAT(stats.tx_bytes) },
2663 { "rx_errors", MV643XX_STAT(stats.rx_errors) },
2664 { "tx_errors", MV643XX_STAT(stats.tx_errors) },
2665 { "rx_dropped", MV643XX_STAT(stats.rx_dropped) },
2666 { "tx_dropped", MV643XX_STAT(stats.tx_dropped) },
2667 { "good_octets_received", MV643XX_STAT(mib_counters.good_octets_received) },
2668 { "bad_octets_received", MV643XX_STAT(mib_counters.bad_octets_received) },
2669 { "internal_mac_transmit_err", MV643XX_STAT(mib_counters.internal_mac_transmit_err) },
2670 { "good_frames_received", MV643XX_STAT(mib_counters.good_frames_received) },
2671 { "bad_frames_received", MV643XX_STAT(mib_counters.bad_frames_received) },
2672 { "broadcast_frames_received", MV643XX_STAT(mib_counters.broadcast_frames_received) },
2673 { "multicast_frames_received", MV643XX_STAT(mib_counters.multicast_frames_received) },
2674 { "frames_64_octets", MV643XX_STAT(mib_counters.frames_64_octets) },
2675 { "frames_65_to_127_octets", MV643XX_STAT(mib_counters.frames_65_to_127_octets) },
2676 { "frames_128_to_255_octets", MV643XX_STAT(mib_counters.frames_128_to_255_octets) },
2677 { "frames_256_to_511_octets", MV643XX_STAT(mib_counters.frames_256_to_511_octets) },
2678 { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters.frames_512_to_1023_octets) },
2679 { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters.frames_1024_to_max_octets) },
2680 { "good_octets_sent", MV643XX_STAT(mib_counters.good_octets_sent) },
2681 { "good_frames_sent", MV643XX_STAT(mib_counters.good_frames_sent) },
2682 { "excessive_collision", MV643XX_STAT(mib_counters.excessive_collision) },
2683 { "multicast_frames_sent", MV643XX_STAT(mib_counters.multicast_frames_sent) },
2684 { "broadcast_frames_sent", MV643XX_STAT(mib_counters.broadcast_frames_sent) },
2685 { "unrec_mac_control_received", MV643XX_STAT(mib_counters.unrec_mac_control_received) },
2686 { "fc_sent", MV643XX_STAT(mib_counters.fc_sent) },
2687 { "good_fc_received", MV643XX_STAT(mib_counters.good_fc_received) },
2688 { "bad_fc_received", MV643XX_STAT(mib_counters.bad_fc_received) },
2689 { "undersize_received", MV643XX_STAT(mib_counters.undersize_received) },
2690 { "fragments_received", MV643XX_STAT(mib_counters.fragments_received) },
2691 { "oversize_received", MV643XX_STAT(mib_counters.oversize_received) },
2692 { "jabber_received", MV643XX_STAT(mib_counters.jabber_received) },
2693 { "mac_receive_error", MV643XX_STAT(mib_counters.mac_receive_error) },
2694 { "bad_crc_event", MV643XX_STAT(mib_counters.bad_crc_event) },
2695 { "collision", MV643XX_STAT(mib_counters.collision) },
2696 { "late_collision", MV643XX_STAT(mib_counters.late_collision) },
2697 };
2699 #define MV643XX_STATS_LEN \
2700 sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats)
2702 static void mv643xx_get_drvinfo(struct net_device *netdev,
2703 struct ethtool_drvinfo *drvinfo)
2705 strncpy(drvinfo->driver, mv643xx_driver_name, 32);
2706 strncpy(drvinfo->version, mv643xx_driver_version, 32);
2707 strncpy(drvinfo->fw_version, "N/A", 32);
2708 strncpy(drvinfo->bus_info, "mv643xx", 32);
2709 drvinfo->n_stats = MV643XX_STATS_LEN;
2712 static int mv643xx_get_stats_count(struct net_device *netdev)
2714 return MV643XX_STATS_LEN;
2717 static void mv643xx_get_ethtool_stats(struct net_device *netdev,
2718 struct ethtool_stats *stats, uint64_t *data)
2720 struct mv643xx_private *mp = netdev->priv;
2721 int i;
2723 eth_update_mib_counters(mp);
2725 for (i = 0; i < MV643XX_STATS_LEN; i++) {
2726 char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset;
2727 data[i] = (mv643xx_gstrings_stats[i].sizeof_stat ==
2728 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
2732 static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset,
2733 uint8_t *data)
2735 int i;
2737 switch(stringset) {
2738 case ETH_SS_STATS:
2739 for (i=0; i < MV643XX_STATS_LEN; i++) {
2740 memcpy(data + i * ETH_GSTRING_LEN,
2741 mv643xx_gstrings_stats[i].stat_string,
2742 ETH_GSTRING_LEN);
2744 break;
2748 static u32 mv643xx_eth_get_link(struct net_device *dev)
2750 struct mv643xx_private *mp = netdev_priv(dev);
2752 return mii_link_ok(&mp->mii);
2755 static int mv643xx_eth_nway_restart(struct net_device *dev)
2757 struct mv643xx_private *mp = netdev_priv(dev);
2759 return mii_nway_restart(&mp->mii);
2762 static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2764 struct mv643xx_private *mp = netdev_priv(dev);
2766 return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL);
2769 static struct ethtool_ops mv643xx_ethtool_ops = {
2770 .get_settings = mv643xx_get_settings,
2771 .set_settings = mv643xx_set_settings,
2772 .get_drvinfo = mv643xx_get_drvinfo,
2773 .get_link = mv643xx_eth_get_link,
2774 .get_sg = ethtool_op_get_sg,
2775 .set_sg = ethtool_op_set_sg,
2776 .get_strings = mv643xx_get_strings,
2777 .get_stats_count = mv643xx_get_stats_count,
2778 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2779 .get_strings = mv643xx_get_strings,
2780 .get_stats_count = mv643xx_get_stats_count,
2781 .get_ethtool_stats = mv643xx_get_ethtool_stats,
2782 .nway_reset = mv643xx_eth_nway_restart,
2783 };
2785 /************* End ethtool support *************************/