LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [patch 0/3] Add tsi108 On chip Ethernet device driver support
       [not found] ` <1157962200.10526.10.camel@localhost.localdomain>
@ 2006-09-12  8:55   ` Zang Roy-r61911
  2006-09-21  4:04     ` [patch 0/3 v2] " Zang Roy-r61911
  2006-09-12  8:55   ` [patch 1/3] Add tsi108 on Chip " Zang Roy-r61911
  2006-09-12  8:55   ` [patch 3/3] " Zang Roy-r61911
  2 siblings, 1 reply; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-12  8:55 UTC (permalink / raw)
  To: Andrew Morton, jgarzik; +Cc: netdev, linux-kernel

It seems that my previous patch was blocked because of the size :-).
This serial of patches add tsi108/9 on chip Ethernet controller support.

1/3 : Config and Makefile modification.
2/3 : Header file
3/3 : C file

Roy


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [patch 1/3]  Add tsi108 on Chip Ethernet device driver support
       [not found] ` <1157962200.10526.10.camel@localhost.localdomain>
  2006-09-12  8:55   ` [patch 0/3] Add tsi108 On chip Ethernet device driver support Zang Roy-r61911
@ 2006-09-12  8:55   ` Zang Roy-r61911
  2006-09-21  4:04     ` [patch 1/3 v2] Add tsi108 On " Zang Roy-r61911
  2006-09-12  8:55   ` [patch 3/3] " Zang Roy-r61911
  2 siblings, 1 reply; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-12  8:55 UTC (permalink / raw)
  To: Andrew Morton, jgarzik; +Cc: netdev, linux-kernel

Update the Kconfig and Makefile

Signed-off-by: Alexandre Bounine <alexandreb@tundra.com>
Signed-off-by: Roy Zang	<tie-fei.zang@freescale.com> 

---
 drivers/net/Kconfig      |    8 
 drivers/net/Makefile     |    1 
 2 files changed, 9 insertions(+), 0 deletions(-)

diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a2bd811..eb17060 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2221,6 +2221,14 @@ config SPIDER_NET
 	  This driver supports the Gigabit Ethernet chips present on the
 	  Cell Processor-Based Blades from IBM.
 
+config TSI108_ETH
+	   tristate "Tundra TSI108 gigabit Ethernet support"
+	   depends on TSI108_BRIDGE
+	   help
+	     This driver supports Tundra TSI108 gigabit Ethernet ports.
+	     To compile this driver as a module, choose M here: the module
+	     will be called tsi108_eth.
+
 config GIANFAR
 	tristate "Gianfar Ethernet"
 	depends on 85xx || 83xx || PPC_86xx
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 8427bf9..da199e7 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -112,6 +112,7 @@ obj-$(CONFIG_B44) += b44.o
 obj-$(CONFIG_FORCEDETH) += forcedeth.o
 obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 
+obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 
 obj-$(CONFIG_PPP) += ppp_generic.o slhc.o




^ permalink raw reply	[flat|nested] 23+ messages in thread

* [patch 3/3] Add tsi108 On Chip Ethernet device driver support
       [not found] ` <1157962200.10526.10.camel@localhost.localdomain>
  2006-09-12  8:55   ` [patch 0/3] Add tsi108 On chip Ethernet device driver support Zang Roy-r61911
  2006-09-12  8:55   ` [patch 1/3] Add tsi108 on Chip " Zang Roy-r61911
@ 2006-09-12  8:55   ` Zang Roy-r61911
  2006-09-12 10:07     ` Arjan van de Ven
                       ` (2 more replies)
  2 siblings, 3 replies; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-12  8:55 UTC (permalink / raw)
  To: Andrew Morton, jgarzik; +Cc: netdev, linux-kernel

The driver for tsi108/9 on chip Ethernet port


Signed-off-by: Alexandre Bounine<alexandreb@tundra.com>
Signed-off-by: Roy Zang	<tie-fei.zang@freescale.com> 

---
 drivers/net/tsi108_eth.c | 1752 ++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 1752 insertions(+), 0 deletions(-)

diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
new file mode 100644
index 0000000..eece2bb
--- /dev/null
+++ b/drivers/net/tsi108_eth.c
@@ -0,0 +1,1752 @@
+/*******************************************************************************
+  
+  Copyright(c) 2005 Tundra Semiconductor Corporation.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+*******************************************************************************/
+
+/* This driver is based on the driver code originally developed	
+ * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
+ * scott.wood@timesys.com  * Copyright (C) 2003 TimeSys Corporation
+ *
+ * Currently changes from original version are:
+ * - portig to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
+ * - modifications to handle two ports independently and support for
+ *   additional PHY devices (alexandre.bounine@tundra.com)
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/tsi108.h>
+
+#include "tsi108_eth.h"
+
+#undef DEBUG
+#ifdef DEBUG
+#define DBG(fmt...) do { printk(fmt); } while(0)
+#else
+#define DBG(fmt...) do { } while(0)
+#endif
+
+typedef struct net_device net_device;
+typedef struct sk_buff sk_buff;
+
+#define MII_READ_DELAY 10000	/* max link wait time in msec */
+
+#define TSI108_RXRING_LEN     256
+
+/* NOTE: The driver currently does not support receiving packets
+ * larger than the buffer size, so don't decrease this (unless you
+ * want to add such support).
+ */
+#define TSI108_RXBUF_SIZE     1536
+
+#define TSI108_TXRING_LEN     256
+
+#define TSI108_TX_INT_FREQ    64
+
+/* Check the phy status every half a second. */
+#define CHECK_PHY_INTERVAL (HZ/2)
+
+static int tsi108_init_one(struct platform_device *pdev);
+static int tsi108_ether_remove(struct platform_device *pdev);
+
+struct tsi108_prv_data {
+	volatile u32 regs;	/* Base of normal regs */
+	volatile u32 phyregs;	/* Base of register bank used for PHY access */
+	int phy;		/* Index of PHY for this interface */
+	int irq_num;
+	int id;
+
+	struct timer_list timer;/* Timer that triggers the check phy function */
+	int rxtail;		/* Next entry in rxring to read */
+	int rxhead;		/* Next entry in rxring to give a new buffer */
+	int rxfree;		/* Number of free, allocated RX buffers */
+
+	int rxpending;		/* Non-zero if there are still descriptors
+				 * to be processed from a previous descriptor
+				 * interrupt condition that has been cleared */
+
+	int txtail;		/* Next TX descriptor to check status on */
+	int txhead;		/* Next TX descriptor to use */
+
+	/* Number of free TX descriptors.  This could be calculated from
+	 * rxhead and rxtail if one descriptor were left unused to disambiguate
+	 * full and empty conditions, but it's simpler to just keep track
+	 * explicitly. */
+
+	int txfree;
+
+	int phy_ok;		/* The PHY is currently powered on. */
+
+	/* PHY status (duplex is 1 for half, 2 for full,
+	 * so that the default 0 indicates that neither has
+	 * yet been configured). */
+
+	int link_up;
+	int speed;
+	int duplex;
+
+	tx_desc *txring;
+	rx_desc *rxring;
+	sk_buff *txskbs[TSI108_TXRING_LEN];
+	sk_buff *rxskbs[TSI108_RXRING_LEN];
+
+	dma_addr_t txdma, rxdma;
+
+	/* txlock nests in misclock and phy_lock */
+
+	spinlock_t txlock, misclock;
+
+	/* stats is used to hold the upper bits of each hardware counter,
+	 * and tmpstats is used to hold the full values for returning
+	 * to the caller of get_stats().  They must be separate in case
+	 * an overflow interrupt occurs before the stats are consumed.
+	 */
+
+	struct net_device_stats stats;
+	struct net_device_stats tmpstats;
+
+	/* These stats are kept separate in hardware, thus require individual
+	 * fields for handling carry.  They are combined in get_stats.
+	 */
+
+	unsigned long rx_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_short_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_long_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_underruns;	/* Add to rx_length_errors */
+	unsigned long rx_overruns;	/* Add to rx_length_errors */
+
+	unsigned long tx_coll_abort;	/* Add to tx_aborted_errors/collisions */
+	unsigned long tx_pause_drop;	/* Add to tx_aborted_errors */
+
+	unsigned long mc_hash[16];
+};
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+	.probe = tsi108_init_one,
+	.remove = tsi108_ether_remove,
+	.driver	= {
+		.name = "tsi-ethernet",
+	},
+};
+
+static void tsi108_timed_checker(unsigned long dev_ptr);
+
+static void dump_eth_one(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	printk("Dumping %s...\n", dev->name);
+	printk("intstat %x intmask %x phy_ok %d"
+	       " link %d speed %d duplex %d\n",
+	       TSI108_ETH_READ_REG(TSI108_EC_INTSTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_INTMASK), data->phy_ok,
+	       data->link_up, data->speed, data->duplex);
+
+	printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
+	       data->txhead, data->txtail, data->txfree,
+	       TSI108_ETH_READ_REG(TSI108_EC_TXSTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_TXESTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_TXERR));
+
+	printk("RX: head %d, tail %d, free %d, stat %x,"
+	       " estat %x, err %x, pending %d\n\n",
+	       data->rxhead, data->rxtail, data->rxfree,
+	       TSI108_ETH_READ_REG(TSI108_EC_RXSTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_RXESTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_RXERR), data->rxpending);
+}
+
+/* Synchronization is needed between the thread and up/down events.
+ * Note that the PHY is accessed through the same registers for both
+ * interfaces, so this can't be made interface-specific.
+ */
+
+static DEFINE_SPINLOCK(phy_lock);
+
+static u16 tsi108_read_mii(struct tsi108_prv_data *data, int reg, int *status)
+{
+	int i;
+	u16 ret;
+
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	mb();
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_CMD, 0);
+	mb();
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
+	mb();
+	for (i = 0; i < 100; i++) {
+		if (!(TSI108_ETH_READ_PHYREG(TSI108_MAC_MII_IND) &
+		      (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
+			break;
+		udelay(10);
+	}
+
+	if (i == 100) {
+		if (status)
+			*status = -EBUSY;
+
+		ret = 0xffff;
+	} else {
+		if (status)
+			*status = 0;
+
+		ret = TSI108_ETH_READ_PHYREG(TSI108_MAC_MII_DATAIN);
+	}
+
+	return ret;
+}
+
+static void tsi108_write_mii(struct tsi108_prv_data *data, 
+				int reg, u16 val)
+{
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	mb();
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_DATAOUT, val);
+	mb();
+	while (TSI108_ETH_READ_PHYREG(TSI108_MAC_MII_IND) &
+	       TSI108_MAC_MII_IND_BUSY) ;
+}
+
+static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
+					int reg, u16 val)
+{
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_ADDR,
+			     (0x1e << TSI108_MAC_MII_ADDR_PHY)
+			     | (reg << TSI108_MAC_MII_ADDR_REG));
+	mb();
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_DATAOUT, val);
+	mb();
+	while (TSI108_ETH_READ_REG(TSI108_MAC_MII_IND) &
+	       TSI108_MAC_MII_IND_BUSY) ;
+}
+
+static void tsi108_check_phy(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u16 sumstat;
+	u32 mac_cfg2_reg, portctrl_reg;
+	u32 fdx_flag = 0, reg_update = 0;
+
+	/* Do a dummy read, as for some reason the first read
+	 * after a link becomes up returns link down, even if
+	 * it's been a while since the link came up.
+	 */
+
+	spin_lock(&phy_lock);
+
+	if (!data->phy_ok)
+		goto out;
+
+	tsi108_read_mii(data, PHY_STAT, NULL);
+
+	if (!(tsi108_read_mii(data, PHY_STAT, NULL) & PHY_STAT_LINKUP)) {
+		if (data->link_up == 1) {
+			netif_stop_queue(dev);
+			data->link_up = 0;
+			printk(KERN_NOTICE "%s : link is down\n", dev->name);
+			netif_carrier_off(dev);
+		}
+
+		goto out;
+	}
+
+	mac_cfg2_reg = TSI108_ETH_READ_REG(TSI108_MAC_CFG2);
+	portctrl_reg = TSI108_ETH_READ_REG(TSI108_EC_PORTCTRL);
+
+	sumstat = tsi108_read_mii(data, PHY_SUM_STAT, NULL);
+
+	switch (sumstat & PHY_SUM_STAT_SPEED_MASK) {
+	case PHY_SUM_STAT_1000T_FD:
+		fdx_flag++;
+	case PHY_SUM_STAT_1000T_HD:
+		if (data->speed != 1000) {
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+			mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
+			portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
+			data->speed = 1000;
+			reg_update++;
+		}
+		break;
+	case PHY_SUM_STAT_100TX_FD:
+		fdx_flag++;
+	case PHY_SUM_STAT_100TX_HD:
+		if (data->speed != 100) {
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+			mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+			portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+			data->speed = 100;
+			reg_update++;
+		}
+		break;
+
+	case PHY_SUM_STAT_10T_FD:
+		fdx_flag++;
+	case PHY_SUM_STAT_10T_HD:
+		if (data->speed != 10) {
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+			mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+			portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+			data->speed = 10;
+			reg_update++;
+		}
+		break;
+
+	default:
+		if (net_ratelimit())
+			printk(KERN_ERR "PHY reported invalid speed,"
+			       KERN_ERR " summary status %x\n",
+			       sumstat);
+		goto out;
+	}
+
+	if (fdx_flag || (sumstat & PHY_SUM_STAT_FULLDUPLEX)) {
+		if (data->duplex != 2) {
+			mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
+			portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
+			data->duplex = 2;
+			reg_update++;
+		}
+	} else {
+		if (data->duplex != 1) {
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
+			portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
+			data->duplex = 1;
+			reg_update++;
+		}
+	}
+
+	if (reg_update) {
+		TSI108_ETH_WRITE_REG(TSI108_MAC_CFG2, mac_cfg2_reg);
+		mb();
+		TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL, portctrl_reg);
+		mb();
+	}
+
+	if (data->link_up == 0) {
+		/* The manual says it can take 3-4 usecs for the speed change
+		 * to take effect.
+		 */
+		udelay(5);
+
+		spin_lock(&data->txlock);
+		if (netif_queue_stopped(dev)
+		    && is_valid_ether_addr(dev->dev_addr) && data->txfree)
+			netif_wake_queue(dev);
+
+		data->link_up = 1;
+		spin_unlock(&data->txlock);
+		printk("%s : link is up: %dMb %s-duplex\n",
+		       dev->name, data->speed,
+		       (data->duplex == 2) ? "full" : "half");
+		netif_carrier_on(dev);
+	}
+
+      out:
+	spin_unlock(&phy_lock);
+}
+
+static inline void
+tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
+		      unsigned long *upper)
+{
+	if (carry & carry_bit)
+		*upper += carry_shift;
+}
+
+static void tsi108_stat_carry(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 carry1, carry2;
+
+	spin_lock_irq(&data->misclock);
+
+	carry1 = TSI108_ETH_READ_REG(TSI108_STAT_CARRY1);
+	carry2 = TSI108_ETH_READ_REG(TSI108_STAT_CARRY2);
+
+	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRY1, carry1);
+	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRY2, carry2);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
+			      TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
+			      TSI108_STAT_RXPKTS_CARRY,
+			      &data->stats.rx_packets);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
+			      TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
+			      TSI108_STAT_RXMCAST_CARRY,
+			      &data->stats.multicast);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
+			      TSI108_STAT_RXALIGN_CARRY,
+			      &data->stats.rx_frame_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
+			      TSI108_STAT_RXLENGTH_CARRY,
+			      &data->stats.rx_length_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
+			      TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
+			      TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
+			      TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
+			      TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
+			      TSI108_STAT_RXDROP_CARRY,
+			      &data->stats.rx_missed_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
+			      TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
+			      TSI108_STAT_TXPKTS_CARRY,
+			      &data->stats.tx_packets);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
+			      TSI108_STAT_TXEXDEF_CARRY,
+			      &data->stats.tx_aborted_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
+			      TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
+			      TSI108_STAT_TXTCOL_CARRY,
+			      &data->stats.collisions);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
+			      TSI108_STAT_TXPAUSEDROP_CARRY,
+			      &data->tx_pause_drop);
+
+	spin_unlock_irq(&data->misclock);
+}
+
+/* Read a stat counter atomically with respect to carries.
+ * data->misclock must be held.
+ */
+static inline unsigned long
+tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
+		 int carry_shift, unsigned long *upper)
+{
+	int carryreg;
+	unsigned long val;
+
+	if (reg < 0xb0)
+		carryreg = TSI108_STAT_CARRY1;
+	else
+		carryreg = TSI108_STAT_CARRY2;
+
+      again:
+	val = TSI108_ETH_READ_REG(reg) | *upper;
+
+	rmb();
+
+	/* Check to see if it overflowed, but the interrupt hasn't
+	 * been serviced yet.  If so, handle the carry here, and
+	 * try again.
+	 */
+
+	if (unlikely(TSI108_ETH_READ_REG(carryreg) & carry_bit)) {
+		*upper += carry_shift;
+		TSI108_ETH_WRITE_REG(carryreg, carry_bit);
+		mb();
+
+		goto again;
+	}
+
+	return val;
+}
+
+static struct net_device_stats *tsi108_get_stats(net_device *dev)
+{
+	unsigned long excol;
+
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	spin_lock_irq(&data->misclock);
+
+	data->tmpstats.rx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_RXPKTS,
+			     TSI108_STAT_CARRY1_RXPKTS,
+			     TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
+
+	data->tmpstats.tx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_TXPKTS,
+			     TSI108_STAT_CARRY2_TXPKTS,
+			     TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
+
+	data->tmpstats.rx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_RXBYTES,
+			     TSI108_STAT_CARRY1_RXBYTES,
+			     TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	data->tmpstats.tx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_TXBYTES,
+			     TSI108_STAT_CARRY2_TXBYTES,
+			     TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	data->tmpstats.multicast =
+	    tsi108_read_stat(data, TSI108_STAT_RXMCAST,
+			     TSI108_STAT_CARRY1_RXMCAST,
+			     TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
+
+	excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
+				 TSI108_STAT_CARRY2_TXEXCOL,
+				 TSI108_STAT_TXEXCOL_CARRY,
+				 &data->tx_coll_abort);
+
+	data->tmpstats.collisions =
+	    tsi108_read_stat(data, TSI108_STAT_TXTCOL,
+			     TSI108_STAT_CARRY2_TXTCOL,
+			     TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
+
+	data->tmpstats.collisions += excol;
+
+	data->tmpstats.rx_length_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
+			     TSI108_STAT_CARRY1_RXLENGTH,
+			     TSI108_STAT_RXLENGTH_CARRY,
+			     &data->stats.rx_length_errors);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXRUNT,
+			     TSI108_STAT_CARRY1_RXRUNT,
+			     TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
+			     TSI108_STAT_CARRY1_RXJUMBO,
+			     TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	data->tmpstats.rx_frame_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXALIGN,
+			     TSI108_STAT_CARRY1_RXALIGN,
+			     TSI108_STAT_RXALIGN_CARRY,
+			     &data->stats.rx_frame_errors);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFCS,
+			     TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
+			     &data->rx_fcs);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFRAG,
+			     TSI108_STAT_CARRY1_RXFRAG,
+			     TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	data->tmpstats.rx_missed_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXDROP,
+			     TSI108_STAT_CARRY1_RXDROP,
+			     TSI108_STAT_RXDROP_CARRY,
+			     &data->stats.rx_missed_errors);
+
+	/* These three are maintained by software. */
+	data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
+	data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
+
+	data->tmpstats.tx_aborted_errors =
+	    tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
+			     TSI108_STAT_CARRY2_TXEXDEF,
+			     TSI108_STAT_TXEXDEF_CARRY,
+			     &data->stats.tx_aborted_errors);
+
+	data->tmpstats.tx_aborted_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
+			     TSI108_STAT_CARRY2_TXPAUSE,
+			     TSI108_STAT_TXPAUSEDROP_CARRY,
+			     &data->tx_pause_drop);
+
+	data->tmpstats.tx_aborted_errors += excol;
+
+	data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
+	data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
+	    data->tmpstats.rx_crc_errors +
+	    data->tmpstats.rx_frame_errors +
+	    data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
+
+	spin_unlock_irq(&data->misclock);
+	return &data->tmpstats;
+}
+
+static void tsi108_restart_rx(struct tsi108_prv_data * data, net_device *dev)
+{
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_PTRHIGH,
+			     TSI108_EC_RXQ_PTRHIGH_VALID);
+
+	wmb();
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
+			     | TSI108_EC_RXCTRL_QUEUE0);
+}
+
+static void tsi108_restart_tx(struct tsi108_prv_data * data)
+{
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_PTRHIGH,
+			     TSI108_EC_TXQ_PTRHIGH_VALID);
+
+	wmb();
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
+			     TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
+}
+
+/* txlock must be held by caller, with IRQs disabled, and
+ * with permission to re-enable them when the lock is dropped.
+ */
+static void tsi108_check_for_completed_tx(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int tx;
+	struct sk_buff *skb;
+	int release = 0;
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		tx = data->txtail;
+
+		if (data->txring[tx].misc & TSI108_TX_OWN)
+			break;
+
+		skb = data->txskbs[tx];
+
+		if (!(data->txring[tx].misc & TSI108_TX_OK))
+			printk("%s: bad tx packet, misc %x\n",
+			       dev->name, data->txring[tx].misc);
+
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+
+		if (data->txring[tx].misc & TSI108_TX_EOF) {
+			dev_kfree_skb_any(skb);
+			release++;
+		}
+	}
+
+	if (release) {
+		if (netif_queue_stopped(dev)
+		    && is_valid_ether_addr(dev->dev_addr) && data->link_up)
+			netif_wake_queue(dev);
+	}
+}
+
+static int tsi108_send_packet(sk_buff * skb, net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int frags = skb_shinfo(skb)->nr_frags + 1;
+	int i;
+#ifdef FIXME_SG_CSUM_NOT_TESTED	/* FIXME: Not supported now. */
+	long csstart;
+	long csum;
+
+	csstart = skb->len - skb->data_len;
+	if (csstart > skb->len - skb->data_len)
+		BUG();
+	csum = 0;
+	if (csstart != skb->len)
+		csum = skb_checksum(skb, csstart, skb->len - csstart, 0);
+#endif
+
+	if (!data->phy_ok && net_ratelimit())
+		printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
+
+	if (!data->link_up) {
+		printk(KERN_ERR "%s: Transmit while link is down!\n",
+		       dev->name);
+		netif_stop_queue(dev);
+		return 1;
+	}
+
+	if (data->txfree < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+
+		if (net_ratelimit())
+			printk(KERN_ERR "%s: Transmit with full tx ring!\n",
+			       dev->name);
+		return 1;
+	}
+
+	if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+	}
+
+	spin_lock_irq(&data->txlock);
+
+	for (i = 0; i < frags; i++) {
+		int misc = 0;
+		int tx = data->txhead;
+
+		/* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
+		 * the interrupt bit.  TX descriptor-complete interrupts are
+		 * enabled when the queue fills up, and masked when there is
+		 * still free space.  This way, when saturating the outbound
+		 * link, the tx interrupts are kept to a reasonable level. 
+		 * When the queue is not full, reclamation of skbs still occurs
+		 * as new packets are transmitted, or on a queue-empty
+		 * interrupt.
+		 */
+
+		if ((tx % TSI108_TX_INT_FREQ == 0) &&
+		    ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
+			misc = TSI108_TX_INT;
+
+		data->txskbs[tx] = skb;
+
+		if (i == 0) {
+			data->txring[tx].buf0 = virt_to_phys(skb->data);
+			data->txring[tx].len = skb->len - skb->data_len;
+			misc |= TSI108_TX_SOF;
+		} else {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+			data->txring[tx].buf0 =
+			    page_to_phys(frag->page) + frag->page_offset;
+			data->txring[tx].len = frag->size;
+		}
+
+		if (i == frags - 1)
+			misc |= TSI108_TX_EOF;
+
+#ifdef TSI108_PRINT_TX_FRAME
+		{
+			int i;
+			printk("%s: Tx Frame contents (%d)\n", dev->name,
+			       skb->len);
+			for (i = 0; i < skb->len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+#endif				/* TSI108_PRINT_TX_FRAME */
+
+		mb();
+		data->txring[tx].misc = misc | TSI108_TX_OWN;
+
+		data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
+		data->txfree--;
+	}
+
+	tsi108_check_for_completed_tx(dev);
+
+	/* This must be done after the check for completed tx descriptors,
+	 * so that the tail pointer is correct.
+	 */
+
+	if (!(TSI108_ETH_READ_REG(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
+		tsi108_restart_tx(data);
+
+	spin_unlock_irq(&data->txlock);
+	return 0;
+}
+
+static int tsi108_check_for_completed_rx(net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree && done != budget) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		if (data->rxring[rx].misc & TSI108_RX_OWN)
+			break;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		done++;
+
+		if (data->rxring[rx].misc & TSI108_RX_BAD) {
+			spin_lock_irq(&data->misclock);
+
+			if (data->rxring[rx].misc & TSI108_RX_CRC)
+				data->stats.rx_crc_errors++;
+			if (data->rxring[rx].misc & TSI108_RX_OVER)
+				data->stats.rx_fifo_errors++;
+
+			spin_unlock_irq(&data->misclock);
+
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+#ifdef TSI108_PRINT_RX_FRAME
+		{
+			int i;
+			printk("%s: Rx Frame contents (%d)\n",
+			       dev->name, data->rxring[rx].len);
+			for (i = 0; i < data->rxring[rx].len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+#endif				/* TSI108_PRINT_RX_FRAME */
+
+		skb->dev = dev;
+		skb_put(skb, data->rxring[rx].len);
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_receive_skb(skb);
+		dev->last_rx = jiffies;
+	}
+
+	return done;
+}
+
+static int tsi108_refill_rx(net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
+		int rx = data->rxhead;
+		sk_buff *skb;
+
+		data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);
+		if (!skb)
+			break;
+
+		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
+
+		data->rxring[rx].buf0 = virt_to_phys(skb->data);
+
+		/* Sometimes the hardware sets blen to zero after packet
+		 * reception, even though the manual says that it's only ever
+		 * modified by the driver.
+		 */
+
+		data->rxring[rx].blen = 1536;
+		mb();
+		data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
+
+		data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
+		data->rxfree++;
+		done++;
+	}
+
+	mb();
+
+	if (done != 0 && !(TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
+			   TSI108_EC_RXSTAT_QUEUE0))
+		tsi108_restart_rx(data, dev);
+
+	return done;
+}
+
+static int tsi108_poll(net_device *dev, int *budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI108_ETH_READ_REG(TSI108_EC_RXESTAT);
+	u32 intstat = TSI108_ETH_READ_REG(TSI108_EC_INTSTAT);
+	int total_budget = min(*budget, dev->quota);
+	int num_received = 0, num_filled = 0, budget_used;
+
+	intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+	    TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXESTAT, estat);
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, intstat);
+
+	if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
+		num_received = tsi108_check_for_completed_rx(dev, total_budget);
+
+	/* This should normally fill no more slots than the number of
+	 * packets received in tsi108_check_for_completed_rx().  The exception
+	 * is when we previously ran out of memory for RX SKBs.  In that
+	 * case, it's helpful to obey the budget, not only so that the
+	 * CPU isn't hogged, but so that memory (which may still be low)
+	 * is not hogged by one device.
+	 *
+	 * A work unit is considered to be two SKBs to allow us to catch
+	 * up when the ring has shrunk due to out-of-memory but we're
+	 * still removing the full budget's worth of packets each time.
+	 */
+
+	if (data->rxfree < TSI108_RXRING_LEN)
+		num_filled = tsi108_refill_rx(dev, total_budget * 2);
+
+	if (intstat & TSI108_INT_RXERROR) {
+		u32 err = TSI108_ETH_READ_REG(TSI108_EC_RXERR);
+		TSI108_ETH_WRITE_REG(TSI108_EC_RXERR, err);
+
+		if (err) {
+			if (net_ratelimit())
+				printk(KERN_DEBUG "%s: RX error %x\n",
+				       dev->name, err);
+
+			if (!(TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
+			      TSI108_EC_RXSTAT_QUEUE0))
+				tsi108_restart_rx(data, dev);
+		}
+	}
+
+	if (intstat & TSI108_INT_RXOVERRUN) {
+		spin_lock_irq(&data->misclock);
+		data->stats.rx_fifo_errors++;
+		spin_unlock_irq(&data->misclock);
+	}
+
+	budget_used = max(num_received, num_filled / 2);
+
+	*budget -= budget_used;
+	dev->quota -= budget_used;
+
+	if (budget_used != total_budget) {
+		data->rxpending = 0;
+		netif_rx_complete(dev);
+
+		TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
+				     TSI108_ETH_READ_REG(TSI108_EC_INTMASK)
+				     & ~(TSI108_INT_RXQUEUE0
+					 | TSI108_INT_RXTHRESH |
+					 TSI108_INT_RXOVERRUN |
+					 TSI108_INT_RXERROR |
+					 TSI108_INT_RXWAIT));
+
+		mb();
+
+		/* IRQs are level-triggered, so no need to re-check */
+		return 0;
+	} else {
+		data->rxpending = 1;
+	}
+
+	return 1;
+}
+
+static void tsi108_rx_int(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A race could cause dev to already be scheduled, so it's not an
+	 * error if that happens (and interrupts shouldn't be re-masked,
+	 * because that can cause harmful races, if poll has already
+	 * unmasked them but not cleared LINK_STATE_SCHED).  
+	 *
+	 * This can happen if this code races with tsi108_poll(), which masks
+	 * the interrupts after tsi108_irq_one() read the mask, but before
+	 * netif_rx_schedule is called.  It could also happen due to calls
+	 * from tsi108_check_rxring().
+	 */
+
+	if (netif_rx_schedule_prep(dev)) {
+		/* Mask, rather than ack, the receive interrupts.  The ack
+		 * will happen in tsi108_poll().
+		 */
+
+		TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
+				     TSI108_ETH_READ_REG(TSI108_EC_INTMASK) |
+				     TSI108_INT_RXQUEUE0
+				     | TSI108_INT_RXTHRESH |
+				     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
+				     TSI108_INT_RXWAIT);
+		mb();
+		__netif_rx_schedule(dev);
+	} else {
+		if (!netif_running(dev)) {
+			/* This can happen if an interrupt occurs while the
+			 * interface is being brought down, as the START
+			 * bit is cleared before the stop function is called.
+			 *
+			 * In this case, the interrupts must be masked, or
+			 * they will continue indefinitely.
+			 *
+			 * There's a race here if the interface is brought down
+			 * and then up in rapid succession, as the device could
+			 * be made running after the above check and before
+			 * the masking below.  This will only happen if the IRQ
+			 * thread has a lower priority than the task brining
+			 * up the interface.  Fixing this race would likely
+			 * require changes in generic code.
+			 */
+
+			TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
+					     TSI108_ETH_READ_REG
+					     (TSI108_EC_INTMASK) |
+					     TSI108_INT_RXQUEUE0 |
+					     TSI108_INT_RXTHRESH |
+					     TSI108_INT_RXOVERRUN |
+					     TSI108_INT_RXERROR |
+					     TSI108_INT_RXWAIT);
+			mb();
+		}
+	}
+}
+
+/* If the RX ring has run out of memory, try periodically
+ * to allocate some more, as otherwise poll would never
+ * get called (apart from the initial end-of-queue condition).
+ *
+ * This is called once per second (by default) from the thread.
+ */
+
+static void tsi108_check_rxring(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A poll is scheduled, as opposed to caling tsi108_refill_rx 
+	 * directly, so as to keep the receive path single-threaded
+	 * (and thus not needing a lock).
+	 */
+
+	if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
+		tsi108_rx_int(dev);
+}
+
+static void tsi108_tx_int(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI108_ETH_READ_REG(TSI108_EC_TXESTAT);
+
+	mb();
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXESTAT, estat);
+	mb();
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
+			     TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
+	mb();
+	if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
+		u32 err = TSI108_ETH_READ_REG(TSI108_EC_TXERR);
+		TSI108_ETH_WRITE_REG(TSI108_EC_TXERR, err);
+
+		if (err && net_ratelimit())
+			printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
+	}
+
+	if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
+		spin_lock(&data->txlock);
+		tsi108_check_for_completed_tx(dev);
+		spin_unlock(&data->txlock);
+	}
+}
+
+static irqreturn_t tsi108_irq_one(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 stat = TSI108_ETH_READ_REG(TSI108_EC_INTSTAT);
+
+	if (!(stat & TSI108_INT_ANY))
+		return IRQ_NONE;	/* Not our interrupt */
+
+	stat &= ~TSI108_ETH_READ_REG(TSI108_EC_INTMASK);
+
+	if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
+		    TSI108_INT_TXERROR))
+		tsi108_tx_int(dev);
+	if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+		    TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
+		    TSI108_INT_RXERROR))
+		tsi108_rx_int(dev);
+
+	if (stat & TSI108_INT_SFN) {
+		if (net_ratelimit())
+			printk(KERN_DEBUG "%s: SFN error\n", dev->name);
+		TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, TSI108_INT_SFN);
+	}
+
+	if (stat & TSI108_INT_STATCARRY) {
+		tsi108_stat_carry(dev);
+		TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tsi108_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+	if (irq == NO_IRQ)
+		return IRQ_NONE;	/* Not our interrupt */
+
+	return tsi108_irq_one(dev_id);
+}
+
+static void tsi108_stop_ethernet(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* Disable all TX and RX queues ... */
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXCTRL, 0);
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCTRL, 0);
+
+	/* ...and wait for them to become idle */
+	mb();
+	while (TSI108_ETH_READ_REG(TSI108_EC_TXSTAT) &
+	       TSI108_EC_TXSTAT_ACTIVE) ;
+	while (TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
+	       TSI108_EC_RXSTAT_ACTIVE) ;
+}
+
+static void tsi108_reset_ether(struct tsi108_prv_data * data)
+{
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, 0);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL,
+			     TSI108_ETH_READ_REG(TSI108_EC_PORTCTRL) &
+			     ~TSI108_EC_PORTCTRL_STATRST);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXCFG,
+			     TSI108_ETH_READ_REG(TSI108_EC_TXCFG) &
+			     ~TSI108_EC_TXCFG_RST);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG,
+			     TSI108_ETH_READ_REG(TSI108_EC_RXCFG) &
+			     ~TSI108_EC_RXCFG_RST);
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_MGMT_CFG,
+			     TSI108_ETH_READ_REG(TSI108_MAC_MII_MGMT_CFG) |
+			     TSI108_MAC_MII_MGMT_RST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_MGMT_CFG,
+			     TSI108_ETH_READ_REG(TSI108_MAC_MII_MGMT_CFG) &
+			     ~(TSI108_MAC_MII_MGMT_RST |
+			       TSI108_MAC_MII_MGMT_CLK));
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_MGMT_CFG,
+			     TSI108_ETH_READ_REG(TSI108_MAC_MII_MGMT_CFG) |
+			     TSI108_MAC_MII_MGMT_CLK);
+}
+
+static int tsi108_get_mac(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1 = TSI108_ETH_READ_REG(TSI108_MAC_ADDR1);
+	u32 word2 = TSI108_ETH_READ_REG(TSI108_MAC_ADDR2);
+
+	/* Note that the octets are reversed from what the manual says,
+	 * producing an even weirder ordering...
+	 */
+	if (word2 == 0 && word1 == 0) {
+		dev->dev_addr[0] = 0x00;
+		dev->dev_addr[1] = 0x06;
+		dev->dev_addr[2] = 0xd2;
+		dev->dev_addr[3] = 0x00;
+		dev->dev_addr[4] = 0x00;
+		if (0x8 == data->phy)
+			dev->dev_addr[5] = 0x01;
+		else
+			dev->dev_addr[5] = 0x02;
+
+		word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+		word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+		    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+		TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR1, word1);
+		TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR2, word2);
+	} else {
+		dev->dev_addr[0] = (word2 >> 16) & 0xff;
+		dev->dev_addr[1] = (word2 >> 24) & 0xff;
+		dev->dev_addr[2] = (word1 >> 0) & 0xff;
+		dev->dev_addr[3] = (word1 >> 8) & 0xff;
+		dev->dev_addr[4] = (word1 >> 16) & 0xff;
+		dev->dev_addr[5] = (word1 >> 24) & 0xff;
+	}
+
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tsi108_set_mac(net_device *dev, void *addr)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1, word2;
+	int i;
+
+	if (!is_valid_ether_addr(addr))
+		return -EINVAL;
+
+	for (i = 0; i < 6; i++)
+		/* +2 is for the offset of the HW addr type */
+		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+
+	word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+	word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+	    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+	spin_lock_irq(&data->misclock);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR1, word1);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR2, word2);
+	spin_lock(&data->txlock);
+
+	if (netif_queue_stopped(dev) && data->txfree && data->link_up)
+		netif_wake_queue(dev);
+
+	spin_unlock(&data->txlock);
+	spin_unlock_irq(&data->misclock);
+	return 0;
+}
+
+/* Protected by dev->xmit_lock. */
+static void tsi108_set_rx_mode(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 rxcfg = TSI108_ETH_READ_REG(TSI108_EC_RXCFG);
+
+	if (dev->flags & IFF_PROMISC) {
+		rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
+		rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
+		goto out;
+	}
+
+	rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
+
+	if (dev->mc_count) {
+		int i;
+		struct dev_mc_list *mc = dev->mc_list;
+		rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
+
+		memset(data->mc_hash, 0, sizeof(data->mc_hash));
+
+		while (mc) {
+			u32 hash, crc;
+
+			if (mc->dmi_addrlen == 6) {
+				crc = ether_crc(6, mc->dmi_addr);
+				hash = crc >> 23;
+
+				__set_bit(hash, &data->mc_hash[0]);
+			} else {
+				printk(KERN_ERR
+				       "%s: got multicast address of length %d "
+				       "instead of 6.\n", dev->name,
+				       mc->dmi_addrlen);
+			}
+
+			mc = mc->next;
+		}
+
+		TSI108_ETH_WRITE_REG(TSI108_EC_HASHADDR,
+				     TSI108_EC_HASHADDR_AUTOINC |
+				     TSI108_EC_HASHADDR_MCAST);
+
+		for (i = 0; i < 16; i++) {
+			/* The manual says that the hardware may drop
+			 * back-to-back writes to the data register.
+			 */
+			udelay(1);
+			mb();
+			TSI108_ETH_WRITE_REG(TSI108_EC_HASHDATA,
+					     data->mc_hash[i]);
+			mb();
+		}
+	}
+
+      out:
+	mb();
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG, rxcfg);
+}
+
+static void tsi108_init_phy(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 i = 0;
+	u16 phyVal = 0;
+
+	spin_lock_irq(&phy_lock);
+
+	tsi108_write_mii(data, PHY_CTRL, PHY_CTRL_RESET);
+	mb();
+	while (tsi108_read_mii(data, PHY_CTRL, NULL) & PHY_CTRL_RESET)
+		;
+
+#if (TSI108_PHY_TYPE == PHY_BCM54XX)	/* Broadcom BCM54xx PHY */
+	tsi108_write_mii(data, 0x09, 0x0300);
+	tsi108_write_mii(data, 0x10, 0x1020);
+	tsi108_write_mii(data, 0x1c, 0x8c00);
+	mb();
+#endif
+
+	tsi108_write_mii(data,
+			 PHY_CTRL,
+			 PHY_CTRL_AUTONEG_EN | PHY_CTRL_AUTONEG_START);
+	mb();
+	while (tsi108_read_mii(data, PHY_CTRL, NULL) & PHY_CTRL_AUTONEG_START)
+		;
+
+	/* Set G/MII mode and receive clock select in TBI control #2.  The
+	 * second port won't work if this isn't done, even though we don't
+	 * use TBI mode.
+	 */
+
+	tsi108_write_tbi(data, 0x11, 0x30);
+
+	/* FIXME: It seems to take more than 2 back-to-back reads to the
+	 * PHY_STAT register before the link up status bit is set.
+	 */
+
+	data->link_up = 1;
+
+	while (!((phyVal = tsi108_read_mii(data, PHY_STAT, NULL)) &
+		 PHY_STAT_LINKUP)) {
+		if (i++ > (MII_READ_DELAY / 10)) {
+			data->link_up = 0;
+			break;
+		}
+		spin_unlock_irq(&phy_lock);
+		msleep(10);
+		spin_lock_irq(&phy_lock);
+	}
+
+	printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyVal);
+	data->phy_ok = 1;
+	spin_unlock_irq(&phy_lock);
+}
+
+static void tsi108_kill_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	spin_lock_irq(&phy_lock);
+	tsi108_write_mii(data, PHY_CTRL, PHY_CTRL_POWERDOWN);
+	data->phy_ok = 0;
+	spin_unlock_irq(&phy_lock);
+}
+
+static int tsi108_open(struct net_device *dev)
+{
+	int i;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
+	unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
+
+	printk(KERN_DEBUG "Inside tsi108_open()!\n");
+
+	i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
+	if (i != 0) {
+		printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
+		       data->id, data->irq_num);
+		return i;
+	} else {
+		dev->irq = data->irq_num;
+		printk(KERN_NOTICE
+		       "tsi108_open : Port %d Assigned IRQ %d to %s\n",
+		       data->id, dev->irq, dev->name);
+	}
+
+	data->rxring = pci_alloc_consistent(NULL, rxring_size, &data->rxdma);
+
+	if (!data->rxring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for rxring!\n");
+		return -ENOMEM;
+	} else {
+		memset(data->rxring, 0, rxring_size);
+	}
+
+	data->txring = pci_alloc_consistent(NULL, txring_size, &data->txdma);
+
+	if (!data->txring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for txring!\n");
+		pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+		return -ENOMEM;
+	} else {
+		memset(data->txring, 0, txring_size);
+	}
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
+		data->rxring[i].blen = TSI108_RXBUF_SIZE;
+		data->rxring[i].vlan = 0;
+	}
+
+	data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
+
+	data->rxtail = 0;
+	data->rxhead = 0;
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+
+		if (!skb) {
+			/* Bah.  No memory for now, but maybe we'll get
+			 * some more later.
+			 * For now, we'll live with the smaller ring.
+			 */
+			printk(KERN_WARNING
+			       "%s: Could only allocate %d receive skb(s).\n",
+			       dev->name, i);
+			data->rxhead = i;
+			break;
+		}
+
+		data->rxskbs[i] = skb;
+		/* Align the payload on a 4-byte boundary */
+		skb_reserve(skb, 2);
+		data->rxskbs[i] = skb;
+		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
+		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
+	}
+
+	data->rxfree = i;
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_PTRLOW, data->rxdma);
+
+	for (i = 0; i < TSI108_TXRING_LEN; i++) {
+		data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
+		data->txring[i].misc = 0;
+	}
+
+	data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
+	data->txtail = 0;
+	data->txhead = 0;
+	data->txfree = TSI108_TXRING_LEN;
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_PTRLOW, data->txdma);
+	tsi108_init_phy(dev);
+
+	setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
+	mod_timer(&data->timer, jiffies + 1);
+
+	tsi108_restart_rx(data, dev);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, ~0);
+	mb();
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
+			     ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
+			       TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
+			       TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
+			       TSI108_INT_SFN | TSI108_INT_STATCARRY));
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1,
+			     TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int tsi108_close(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	del_timer_sync(&data->timer);
+
+	printk(KERN_DEBUG "Inside tsi108_ifdown!\n");
+
+	tsi108_stop_ethernet(dev);
+	tsi108_kill_phy(dev);
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK, ~0);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, 0);
+
+	/* Check for any pending TX packets, and drop them. */
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		int tx = data->txtail;
+		struct sk_buff *skb;
+		skb = data->txskbs[tx];
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+		dev_kfree_skb(skb);
+	}
+
+	synchronize_irq(data->irq_num);
+	free_irq(data->irq_num, dev);
+
+	/* Discard the RX ring. */
+
+	while (data->rxfree) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		dev_kfree_skb(skb);
+	}
+
+	pci_free_consistent(0,
+			    TSI108_RXRING_LEN * sizeof(rx_desc),
+			    data->rxring, data->rxdma);
+	pci_free_consistent(0,
+			    TSI108_TXRING_LEN * sizeof(tx_desc),
+			    data->txring, data->txdma);
+
+	return 0;
+}
+
+static void tsi108_init_mac(net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
+			     TSI108_MAC_CFG2_PADCRC);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXTHRESH,
+			     (192 << TSI108_EC_TXTHRESH_STARTFILL) |
+			     (192 << TSI108_EC_TXTHRESH_STOPFILL));
+
+	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRYMASK1,
+			     ~(TSI108_STAT_CARRY1_RXBYTES |
+			       TSI108_STAT_CARRY1_RXPKTS |
+			       TSI108_STAT_CARRY1_RXFCS |
+			       TSI108_STAT_CARRY1_RXMCAST |
+			       TSI108_STAT_CARRY1_RXALIGN |
+			       TSI108_STAT_CARRY1_RXLENGTH |
+			       TSI108_STAT_CARRY1_RXRUNT |
+			       TSI108_STAT_CARRY1_RXJUMBO |
+			       TSI108_STAT_CARRY1_RXFRAG |
+			       TSI108_STAT_CARRY1_RXJABBER |
+			       TSI108_STAT_CARRY1_RXDROP));
+
+	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRYMASK2,
+			     ~(TSI108_STAT_CARRY2_TXBYTES |
+			       TSI108_STAT_CARRY2_TXPKTS |
+			       TSI108_STAT_CARRY2_TXEXDEF |
+			       TSI108_STAT_CARRY2_TXEXCOL |
+			       TSI108_STAT_CARRY2_TXTCOL |
+			       TSI108_STAT_CARRY2_TXPAUSE));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, 0);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG,
+			     TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
+			     TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_CFG_SFNPORT));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
+			     TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_CFG_SFNPORT));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_BUFCFG,
+			     TSI108_EC_TXQ_BUFCFG_BURST256 |
+			     TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_BUFCFG_SFNPORT));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_BUFCFG,
+			     TSI108_EC_RXQ_BUFCFG_BURST256 |
+			     TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_BUFCFG_SFNPORT));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK, ~0);
+}
+
+static int tsi108_ioctl(net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	struct mii_ioctl_data *mii_data =
+			(struct mii_ioctl_data *)&rq->ifr_data;
+	int ret;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		mii_data->phy_id = data->phy;
+		ret = 0;
+		break;
+
+	case SIOCGMIIREG:
+		spin_lock_irq(&phy_lock);
+		mii_data->val_out =
+		    tsi108_read_mii(data, mii_data->reg_num, &ret);
+		spin_unlock_irq(&phy_lock);
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+static int
+tsi108_init_one(struct platform_device *pdev)
+{
+	struct net_device *dev = NULL;
+	struct tsi108_prv_data *data = NULL;
+	hw_info *einfo;
+	int ret;
+	
+	einfo = pdev->dev.platform_data;
+	
+	if (NULL == einfo) {
+		printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
+		       pdev->id);
+		return -ENODEV;
+	}
+
+	/* Create an ethernet device instance */
+
+	dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
+	if (!dev) {
+		printk("tsi108_eth: Could not allocate a device structure\n");
+		return -ENOMEM;
+	}
+
+	printk("tsi108_eth%d: probe...\n", pdev->id);
+	data = netdev_priv(dev);
+
+	DBG("tsi108_eth%d: regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
+			pdev->id, einfo->regs, einfo->phyregs,
+			einfo->phy, einfo->irq_num);
+
+	data->regs = (u32)ioremap(einfo->regs, 0x400);	/*FIX ME */
+	data->phyregs = (u32)ioremap(einfo->phyregs, 0x400); 	/*FIX ME */
+	data->phy = einfo->phy;
+	data->irq_num = einfo->irq_num;
+	data->id = pdev->id;
+	dev->open = tsi108_open;
+	dev->stop = tsi108_close;
+	dev->hard_start_xmit = tsi108_send_packet;
+	dev->set_mac_address = tsi108_set_mac;
+	dev->set_multicast_list = tsi108_set_rx_mode;
+	dev->get_stats = tsi108_get_stats;
+	dev->poll = tsi108_poll;
+	dev->do_ioctl = tsi108_ioctl;
+	dev->weight = 64;  /* 64 is more suitable for GigE interface - klai */
+
+	/* Apparently, the Linux networking code won't use scatter-gather
+	 * if the hardware doesn't do checksums.  However, it's faster
+	 * to checksum in place and use SG, as (among other reasons)
+	 * the cache won't be dirtied (which then has to be flushed
+	 * before DMA).  The checksumming is done by the driver (via
+	 * a new function skb_csum_dev() in net/core/skbuff.c).
+	 */
+
+#ifdef FIXME_SG_CSUM_NOT_TESTED	/* FIXME: Not supported now. */
+	dev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_HW_CSUM;
+#else
+	dev->features = NETIF_F_HIGHDMA;
+#endif
+	SET_MODULE_OWNER(dev);
+
+	spin_lock_init(&data->txlock);
+	spin_lock_init(&data->misclock);
+
+	tsi108_reset_ether(data);
+	tsi108_kill_phy(dev);
+
+	if (tsi108_get_mac(dev) != 0)
+		printk(KERN_ERR "%s: Invalid MAC address.  Please correct.\n",
+		       dev->name);
+
+	tsi108_init_mac(dev);
+	ret = register_netdev(dev);
+	if (ret < 0) {
+		free_netdev(dev);
+		return ret;
+	}
+
+	printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: "
+	       "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
+	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+#ifdef DEBUG
+	dump_eth_one(dev);
+#endif
+
+	return 0;
+}
+
+/* There's no way to either get interrupts from the PHY when
+ * something changes, or to have the Tsi108 automatically communicate
+ * with the PHY to reconfigure itself.
+ *
+ * Thus, we have to do it using a timer. 
+ */
+
+static void tsi108_timed_checker(unsigned long dev_ptr)
+{
+	struct net_device *dev = (struct net_device *)dev_ptr;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	tsi108_check_phy(dev);
+	tsi108_check_rxring(dev);
+	mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
+}
+
+static int tsi108_ether_init(void)
+{
+	int ret;
+	ret = platform_driver_register (&tsi_eth_driver);
+	if (ret < 0){
+		printk("tsi108_ether_init: error initializing ethernet "
+		       "device\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int tsi108_ether_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct tsi108_prv_data *priv = netdev_priv(dev);
+	
+	unregister_netdev(dev);
+	tsi108_stop_ethernet(dev);
+	platform_set_drvdata(pdev, NULL);
+	iounmap((void __iomem *)priv->regs);
+	iounmap((void __iomem *)priv->phyregs);
+	free_netdev(dev);
+	
+	return 0;
+}
+static void tsi108_ether_exit(void)
+{
+	platform_driver_unregister(&tsi_eth_driver);
+}
+
+module_init(tsi108_ether_init);
+module_exit(tsi108_ether_exit);
+
+MODULE_AUTHOR("Tundra Semiconductor Corporation");
+MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");




^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-12  8:55   ` [patch 3/3] " Zang Roy-r61911
@ 2006-09-12 10:07     ` Arjan van de Ven
  2006-09-19  7:39       ` Zang Roy-r61911
  2006-09-12 14:33     ` Roland Dreier
  2006-09-21  4:05     ` [patch 3/3 v2] " Zang Roy-r61911
  2 siblings, 1 reply; 23+ messages in thread
From: Arjan van de Ven @ 2006-09-12 10:07 UTC (permalink / raw)
  To: Zang Roy-r61911; +Cc: Andrew Morton, jgarzik, netdev, linux-kernel

On Tue, 2006-09-12 at 16:55 +0800, Zang Roy-r61911 wrote:
> The driver for tsi108/9 on chip Ethernet port

Hi,

I have some review comments about your driver; please consider them for
fixing....

> +
> +#undef DEBUG
> +#ifdef DEBUG
> +#define DBG(fmt...) do { printk(fmt); } while(0)
> +#else
> +#define DBG(fmt...) do { } while(0)
> +#endif

please don't do this, there is pr_debug and dev_dbg() for a reason
already. No reason to add a copy.

> +
> +typedef struct net_device net_device;
> +typedef struct sk_buff sk_buff;

Please don't do this...



> +	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_ADDR,
> +				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
> +				(reg << TSI108_MAC_MII_ADDR_REG));
> +	mb();
> +	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_CMD, 0);
> +	mb();
> +	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
> +	mb();

what is this mb() for? do you want them to be an io memory barrier?

> +	while (TSI108_ETH_READ_REG(TSI108_MAC_MII_IND) &
> +	       TSI108_MAC_MII_IND_BUSY) ;

do you want this to be an infinate loop? At minimum the ";" needs to be
a cpu_relax() to avoid the cpu from burning up but it'd be a lot nicer
if this loop wasn't infinite




> +
> +static irqreturn_t tsi108_irq(int irq, void *dev_id, struct pt_regs *regs)
> +{
> +	if (irq == NO_IRQ)
> +		return IRQ_NONE;	/* Not our interrupt */
> +
> +	return tsi108_irq_one(dev_id);
> +}

hi this IRQ_NONE just looks odd, was this really needed?



> +	mb();
> +	while (tsi108_read_mii(data, PHY_CTRL, NULL) & PHY_CTRL_AUTONEG_START)
> +		;

another infinite loop that wants cpu_relax() for sure

> +		spin_unlock_irq(&phy_lock);
> +		msleep(10);
> +		spin_lock_irq(&phy_lock);
> +	}

hmm some places take phy_lock with disabling interrupts, while others
don't. I sort of fear "the others" may be buggy.... are you sure those
are ok?



Greetings,
   Arjan van de Ven


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-12  8:55   ` [patch 3/3] " Zang Roy-r61911
  2006-09-12 10:07     ` Arjan van de Ven
@ 2006-09-12 14:33     ` Roland Dreier
  2006-09-12 14:43       ` Jeff Garzik
  2006-09-21  4:05     ` [patch 3/3 v2] " Zang Roy-r61911
  2 siblings, 1 reply; 23+ messages in thread
From: Roland Dreier @ 2006-09-12 14:33 UTC (permalink / raw)
  To: Zang Roy-r61911; +Cc: Andrew Morton, jgarzik, netdev, linux-kernel

 > +struct tsi108_prv_data {
 > +	volatile u32 regs;	/* Base of normal regs */
 > +	volatile u32 phyregs;	/* Base of register bank used for PHY access */

Why volatile?  This looks really wrong here.

 > +	data->regs = (u32)ioremap(einfo->regs, 0x400);	/*FIX ME */
 > +	data->phyregs = (u32)ioremap(einfo->phyregs, 0x400); 	/*FIX ME */

What needs to be fixed here?  And why are you casting the result of
ioremap to u32?  Shouldn't you keep the normal return value?

 - R.

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-12 14:33     ` Roland Dreier
@ 2006-09-12 14:43       ` Jeff Garzik
  2006-09-14  2:51         ` Zang Roy-r61911
       [not found]         ` <1158749825.7973.9.camel@localhost.localdomain>
  0 siblings, 2 replies; 23+ messages in thread
From: Jeff Garzik @ 2006-09-12 14:43 UTC (permalink / raw)
  To: Roland Dreier; +Cc: Zang Roy-r61911, Andrew Morton, netdev, linux-kernel

Roland Dreier wrote:
>  > +struct tsi108_prv_data {
>  > +	volatile u32 regs;	/* Base of normal regs */
>  > +	volatile u32 phyregs;	/* Base of register bank used for PHY access */
> 
> Why volatile?  This looks really wrong here.

Indeed.


>  > +	data->regs = (u32)ioremap(einfo->regs, 0x400);	/*FIX ME */
>  > +	data->phyregs = (u32)ioremap(einfo->phyregs, 0x400); 	/*FIX ME */
> 
> What needs to be fixed here?  And why are you casting the result of
> ioremap to u32?  Shouldn't you keep the normal return value?

Oh, that's very, very wrong.

	Jeff




^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-12 14:43       ` Jeff Garzik
@ 2006-09-14  2:51         ` Zang Roy-r61911
       [not found]         ` <1158749825.7973.9.camel@localhost.localdomain>
  1 sibling, 0 replies; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-14  2:51 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: Roland Dreier, Andrew Morton, netdev, linux-kernel

On Tue, 2006-09-12 at 22:43, Jeff Garzik wrote:
> Roland Dreier wrote:
> >  > +struct tsi108_prv_data {
> >  > +  volatile u32 regs;      /* Base of normal regs */
> >  > +  volatile u32 phyregs;   /* Base of register bank used for PHY
> access */
> > 
> > Why volatile?  This looks really wrong here.
> 
> Indeed.
I will remove it.
> 
> 
> >  > +  data->regs = (u32)ioremap(einfo->regs, 0x400);  /*FIX ME */
> >  > +  data->phyregs = (u32)ioremap(einfo->phyregs, 0x400);    /*FIX
> ME */
> > 
> > What needs to be fixed here?  And why are you casting the result of
> > ioremap to u32?  Shouldn't you keep the normal return value?
> 
> Oh, that's very, very wrong.
I will find method to avoid this :-).
Roy


^ permalink raw reply	[flat|nested] 23+ messages in thread

* RE: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-12 10:07     ` Arjan van de Ven
@ 2006-09-19  7:39       ` Zang Roy-r61911
  2006-09-20  8:45         ` Arjan van de Ven
  0 siblings, 1 reply; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-19  7:39 UTC (permalink / raw)
  To: Arjan van de Ven; +Cc: Andrew Morton, jgarzik, netdev, linux-kernel

> 
> I have some review comments about your driver; please 
> consider them for
> fixing....
> 
Thanks.
> 
> > +		spin_unlock_irq(&phy_lock);
> > +		msleep(10);
> > +		spin_lock_irq(&phy_lock);
> > +	}
> 
> hmm some places take phy_lock with disabling interrupts, while others
> don't. I sort of fear "the others" may be buggy.... are you sure those
> are ok?
Could you interpret your comments in detail?
Roy

^ permalink raw reply	[flat|nested] 23+ messages in thread

* RE: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-19  7:39       ` Zang Roy-r61911
@ 2006-09-20  8:45         ` Arjan van de Ven
  0 siblings, 0 replies; 23+ messages in thread
From: Arjan van de Ven @ 2006-09-20  8:45 UTC (permalink / raw)
  To: Zang Roy-r61911; +Cc: Andrew Morton, jgarzik, netdev, linux-kernel

On Tue, 2006-09-19 at 15:39 +0800, Zang Roy-r61911 wrote:

> > 
> > > +		spin_unlock_irq(&phy_lock);
> > > +		msleep(10);
> > > +		spin_lock_irq(&phy_lock);
> > > +	}
> > 
> > hmm some places take phy_lock with disabling interrupts, while others
> > don't. I sort of fear "the others" may be buggy.... are you sure those
> > are ok?
> Could you interpret your comments in detail?
> Roy

Hi,

sorry for being unclear/too short in the review.

The phy_lock lock is sometimes taken as spin_lock() and sometimes as
spin_lock_irq(). It looks likes it can be used in interrupt context, in
which case the spin_lock_irq() version is correct and the places where
spin_lock() is used would be a deadlock bug (just think what happens if
the interrupt happens while spin_lock(&phy_lock) is helt, and the
spinlock then again tries to take the lock!)

If there is no way this lock is used in interrupt context, then the
spin_lock_irq() version is doing something which is not needed and also
a bit expensive; so could be optimized.

But my impression is that the _irq() is needed. Also, please consider
switching from spin_lock_irq() to spin_lock_irqsave() version instead;
spin_unlock_irq() has some side effects (interrupts get enabled
unconditionally) so it is generally safer to use
spin_lock_irqsave()/spin_unlock_irqrestore() API.

If you have more questions please do not hesitate to ask!

Greetings,
   Arjan van de Ven 
-- 
if you want to mail me at work (you don't), use arjan (at) linux.intel.com


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [patch 0/3 v2] Add tsi108 On chip Ethernet device driver support
  2006-09-12  8:55   ` [patch 0/3] Add tsi108 On chip Ethernet device driver support Zang Roy-r61911
@ 2006-09-21  4:04     ` Zang Roy-r61911
  0 siblings, 0 replies; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-21  4:04 UTC (permalink / raw)
  To: jgarzik; +Cc: Andrew Morton, linux-kernel, netdev

The Tundra Semiconductor Corporation (Tundra) Tsi108/9 is a host bridge
for PowerPC processors that offers numerous system interconnect options
for embedded application designers . The Tsi108/9 can interconnect 60x
or MPX processors to PCI/X peripherals, DDR2-400 memory, Gigabit
Ethernet, and Flash.

Tsi108/109 is used on powerpc/mpc7448hpc2 platform.

The following serial patches provide Tsi108/9 on chip Ethernet chip
support.

1/3 : Config and Makefile modification.
2/3 : Header file
3/3 : C body file

This serial patches fix the issues in the feedback from the previous
patches.
Feedback is welcomed.
Roy 


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [patch 1/3 v2]  Add tsi108 On Chip Ethernet device driver support
  2006-09-12  8:55   ` [patch 1/3] Add tsi108 on Chip " Zang Roy-r61911
@ 2006-09-21  4:04     ` Zang Roy-r61911
  0 siblings, 0 replies; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-21  4:04 UTC (permalink / raw)
  To: jgarzik; +Cc: netdev, linux-kernel, Andrew Morton

The Tundra Semiconductor Corporation (Tundra) Tsi108/9 is a host bridge
for PowerPC processors that offers numerous system interconnect options
for embedded application designers . The Tsi108/9 can interconnect 60x
or MPX processors to PCI/X peripherals, DDR2-400 memory, Gigabit
Ethernet, and Flash.
Tsi108/109 is used on powerpc/mpc7448hpc2 platform.
The following patch provides Tsi108/9 on chip Ethernet chip driver
config and Makefile.


Signed-off-by: Alexandre Bounine <alexandreb@tundra.com>
Signed-off-by: Roy Zang <tie-fei.zang@freescale.com> 


--
 drivers/net/Kconfig  |    8 ++++++++
 drivers/net/Makefile |    1 +
 2 files changed, 9 insertions(+), 0 deletions(-)

diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a2bd811..eb17060 100644
-- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2221,6 +2221,14 @@ config SPIDER_NET
 	  This driver supports the Gigabit Ethernet chips present on the
 	  Cell Processor-Based Blades from IBM.
 
+config TSI108_ETH
+	   tristate "Tundra TSI108 gigabit Ethernet support"
+	   depends on TSI108_BRIDGE
+	   help
+	     This driver supports Tundra TSI108 gigabit Ethernet ports.
+	     To compile this driver as a module, choose M here: the module
+	     will be called tsi108_eth.
+
 config GIANFAR
 	tristate "Gianfar Ethernet"
 	depends on 85xx || 83xx || PPC_86xx
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 8427bf9..da199e7 100644
-- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -112,6 +112,7 @@ obj-$(CONFIG_B44) += b44.o
 obj-$(CONFIG_FORCEDETH) += forcedeth.o
 obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 
+obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 
 obj-$(CONFIG_PPP) += ppp_generic.o slhc.o
-- 
1.4.0


^ permalink raw reply	[flat|nested] 23+ messages in thread

* [patch 3/3 v2] Add tsi108 On Chip Ethernet device driver support
  2006-09-12  8:55   ` [patch 3/3] " Zang Roy-r61911
  2006-09-12 10:07     ` Arjan van de Ven
  2006-09-12 14:33     ` Roland Dreier
@ 2006-09-21  4:05     ` Zang Roy-r61911
  2 siblings, 0 replies; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-21  4:05 UTC (permalink / raw)
  To: jgarzik; +Cc: netdev, linux-kernel, Andrew Morton

The Tundra Semiconductor Corporation (Tundra) Tsi108/9 is a host bridge
for PowerPC processors that offers numerous system interconnect options
for embedded application designers . The Tsi108/9 can interconnect 60x
or MPX processors to PCI/X peripherals, DDR2-400 memory, Gigabit
Ethernet, and Flash.
Tsi108/109 is used on powerpc/mpc7448hpc2 platform.

The following patch provides Tsi108/9 on chip Ethernet chip driver
support.

Signed-off-by: Alexandre Bounine <alexandreb@tundra.com>
Signed-off-by: Roy Zang <tie-fei.zang@freescale.com> 


--
 drivers/net/tsi108_eth.c | 1700 ++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 1700 insertions(+), 0 deletions(-)

diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
new file mode 100644
index 0000000..5714f78
-- /dev/null
+++ b/drivers/net/tsi108_eth.c
@@ -0,0 +1,1700 @@
+/*******************************************************************************
+  
+  Copyright(c) 2006 Tundra Semiconductor Corporation.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+*******************************************************************************/
+
+/* This driver is based on the driver code originally developed	
+ * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
+ * scott.wood@timesys.com  * Copyright (C) 2003 TimeSys Corporation
+ *
+ * Currently changes from original version are:
+ * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
+ * - modifications to handle two ports independently and support for
+ *   additional PHY devices (alexandre.bounine@tundra.com)
+ * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/tsi108.h>
+
+#include "tsi108_eth.h"
+
+#define MII_READ_DELAY 10000	/* max link wait time in msec */
+
+#define TSI108_RXRING_LEN     256
+
+/* NOTE: The driver currently does not support receiving packets
+ * larger than the buffer size, so don't decrease this (unless you
+ * want to add such support).
+ */
+#define TSI108_RXBUF_SIZE     1536
+
+#define TSI108_TXRING_LEN     256
+
+#define TSI108_TX_INT_FREQ    64
+
+/* Check the phy status every half a second. */
+#define CHECK_PHY_INTERVAL (HZ/2)
+
+static int tsi108_init_one(struct platform_device *pdev);
+static int tsi108_ether_remove(struct platform_device *pdev);
+
+struct tsi108_prv_data {
+	void  __iomem *regs;	/* Base of normal regs */
+	void  __iomem *phyregs;	/* Base of register bank used for PHY access */
+	
+	int phy;		/* Index of PHY for this interface */
+	int irq_num;
+	int id;
+
+	struct timer_list timer;/* Timer that triggers the check phy function */
+	int rxtail;		/* Next entry in rxring to read */
+	int rxhead;		/* Next entry in rxring to give a new buffer */
+	int rxfree;		/* Number of free, allocated RX buffers */
+
+	int rxpending;		/* Non-zero if there are still descriptors
+				 * to be processed from a previous descriptor
+				 * interrupt condition that has been cleared */
+
+	int txtail;		/* Next TX descriptor to check status on */
+	int txhead;		/* Next TX descriptor to use */
+
+	/* Number of free TX descriptors.  This could be calculated from
+	 * rxhead and rxtail if one descriptor were left unused to disambiguate
+	 * full and empty conditions, but it's simpler to just keep track
+	 * explicitly. */
+
+	int txfree;
+
+	int phy_ok;		/* The PHY is currently powered on. */
+
+	/* PHY status (duplex is 1 for half, 2 for full,
+	 * so that the default 0 indicates that neither has
+	 * yet been configured). */
+
+	int link_up;
+	int speed;
+	int duplex;
+
+	tx_desc *txring;
+	rx_desc *rxring;
+	struct sk_buff *txskbs[TSI108_TXRING_LEN];
+	struct sk_buff *rxskbs[TSI108_RXRING_LEN];
+
+	dma_addr_t txdma, rxdma;
+
+	/* txlock nests in misclock and phy_lock */
+
+	spinlock_t txlock, misclock;
+
+	/* stats is used to hold the upper bits of each hardware counter,
+	 * and tmpstats is used to hold the full values for returning
+	 * to the caller of get_stats().  They must be separate in case
+	 * an overflow interrupt occurs before the stats are consumed.
+	 */
+
+	struct net_device_stats stats;
+	struct net_device_stats tmpstats;
+
+	/* These stats are kept separate in hardware, thus require individual
+	 * fields for handling carry.  They are combined in get_stats.
+	 */
+
+	unsigned long rx_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_short_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_long_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_underruns;	/* Add to rx_length_errors */
+	unsigned long rx_overruns;	/* Add to rx_length_errors */
+
+	unsigned long tx_coll_abort;	/* Add to tx_aborted_errors/collisions */
+	unsigned long tx_pause_drop;	/* Add to tx_aborted_errors */
+
+	unsigned long mc_hash[16];
+};
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+	.probe = tsi108_init_one,
+	.remove = tsi108_ether_remove,
+	.driver	= {
+		.name = "tsi-ethernet",
+	},
+};
+
+static void tsi108_timed_checker(unsigned long dev_ptr);
+
+static void dump_eth_one(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	printk("Dumping %s...\n", dev->name);
+	printk("intstat %x intmask %x phy_ok %d"
+	       " link %d speed %d duplex %d\n",
+	       TSI108_ETH_READ_REG(TSI108_EC_INTSTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_INTMASK), data->phy_ok,
+	       data->link_up, data->speed, data->duplex);
+
+	printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
+	       data->txhead, data->txtail, data->txfree,
+	       TSI108_ETH_READ_REG(TSI108_EC_TXSTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_TXESTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_TXERR));
+
+	printk("RX: head %d, tail %d, free %d, stat %x,"
+	       " estat %x, err %x, pending %d\n\n",
+	       data->rxhead, data->rxtail, data->rxfree,
+	       TSI108_ETH_READ_REG(TSI108_EC_RXSTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_RXESTAT),
+	       TSI108_ETH_READ_REG(TSI108_EC_RXERR), data->rxpending);
+}
+
+/* Synchronization is needed between the thread and up/down events.
+ * Note that the PHY is accessed through the same registers for both
+ * interfaces, so this can't be made interface-specific.
+ */
+
+static DEFINE_SPINLOCK(phy_lock);
+
+static u16 tsi108_read_mii(struct tsi108_prv_data *data, int reg, int *status)
+{
+	int i;
+	u16 ret;
+
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_CMD, 0);
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
+	for (i = 0; i < 100; i++) {
+		if (!(TSI108_ETH_READ_PHYREG(TSI108_MAC_MII_IND) &
+		      (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
+			break;
+		udelay(10);
+	}
+
+	if (i == 100) {
+		if (status)
+			*status = -EBUSY;
+
+		ret = 0xffff;
+	} else {
+		if (status)
+			*status = 0;
+
+		ret = TSI108_ETH_READ_PHYREG(TSI108_MAC_MII_DATAIN);
+	}
+
+	return ret;
+}
+
+static void tsi108_write_mii(struct tsi108_prv_data *data, 
+				int reg, u16 val)
+{
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_DATAOUT, val);
+	while (TSI108_ETH_READ_PHYREG(TSI108_MAC_MII_IND) &
+	       TSI108_MAC_MII_IND_BUSY)
+		cpu_relax();
+}
+
+static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
+					int reg, u16 val)
+{
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_ADDR,
+			     (0x1e << TSI108_MAC_MII_ADDR_PHY)
+			     | (reg << TSI108_MAC_MII_ADDR_REG));
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_DATAOUT, val);
+	while (TSI108_ETH_READ_REG(TSI108_MAC_MII_IND) &
+	       TSI108_MAC_MII_IND_BUSY)
+		cpu_relax();
+}
+
+static void tsi108_check_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u16 sumstat;
+	u32 mac_cfg2_reg, portctrl_reg;
+	u32 fdx_flag = 0, reg_update = 0;
+	unsigned long flags;
+
+	/* Do a dummy read, as for some reason the first read
+	 * after a link becomes up returns link down, even if
+	 * it's been a while since the link came up.
+	 */
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	if (!data->phy_ok)
+		goto out;
+
+	tsi108_read_mii(data, PHY_STAT, NULL);
+
+	if (!(tsi108_read_mii(data, PHY_STAT, NULL) & PHY_STAT_LINKUP)) {
+		if (data->link_up == 1) {
+			netif_stop_queue(dev);
+			data->link_up = 0;
+			printk(KERN_NOTICE "%s : link is down\n", dev->name);
+			netif_carrier_off(dev);
+		}
+
+		goto out;
+	}
+
+	mac_cfg2_reg = TSI108_ETH_READ_REG(TSI108_MAC_CFG2);
+	portctrl_reg = TSI108_ETH_READ_REG(TSI108_EC_PORTCTRL);
+
+	sumstat = tsi108_read_mii(data, PHY_SUM_STAT, NULL);
+
+	switch (sumstat & PHY_SUM_STAT_SPEED_MASK) {
+	case PHY_SUM_STAT_1000T_FD:
+		fdx_flag++;
+	case PHY_SUM_STAT_1000T_HD:
+		if (data->speed != 1000) {
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+			mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
+			portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
+			data->speed = 1000;
+			reg_update++;
+		}
+		break;
+	case PHY_SUM_STAT_100TX_FD:
+		fdx_flag++;
+	case PHY_SUM_STAT_100TX_HD:
+		if (data->speed != 100) {
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+			mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+			portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+			data->speed = 100;
+			reg_update++;
+		}
+		break;
+
+	case PHY_SUM_STAT_10T_FD:
+		fdx_flag++;
+	case PHY_SUM_STAT_10T_HD:
+		if (data->speed != 10) {
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+			mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+			portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+			data->speed = 10;
+			reg_update++;
+		}
+		break;
+
+	default:
+		if (net_ratelimit())
+			printk(KERN_ERR "PHY reported invalid speed,"
+			       KERN_ERR " summary status %x\n",
+			       sumstat);
+		goto out;
+	}
+
+	if (fdx_flag || (sumstat & PHY_SUM_STAT_FULLDUPLEX)) {
+		if (data->duplex != 2) {
+			mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
+			portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
+			data->duplex = 2;
+			reg_update++;
+		}
+	} else {
+		if (data->duplex != 1) {
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
+			portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
+			data->duplex = 1;
+			reg_update++;
+		}
+	}
+
+	if (reg_update) {
+		TSI108_ETH_WRITE_REG(TSI108_MAC_CFG2, mac_cfg2_reg);
+		TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL, portctrl_reg);
+		
+	}
+
+	if (data->link_up == 0) {
+		/* The manual says it can take 3-4 usecs for the speed change
+		 * to take effect.
+		 */
+		udelay(5);
+
+		spin_lock(&data->txlock);
+		if (netif_queue_stopped(dev)
+		    && is_valid_ether_addr(dev->dev_addr) && data->txfree)
+			netif_wake_queue(dev);
+
+		data->link_up = 1;
+		spin_unlock(&data->txlock);
+		printk("%s : link is up: %dMb %s-duplex\n",
+		       dev->name, data->speed,
+		       (data->duplex == 2) ? "full" : "half");
+		netif_carrier_on(dev);
+	}
+
+      out:
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static inline void
+tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
+		      unsigned long *upper)
+{
+	if (carry & carry_bit)
+		*upper += carry_shift;
+}
+
+static void tsi108_stat_carry(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 carry1, carry2;
+
+	spin_lock_irq(&data->misclock);
+
+	carry1 = TSI108_ETH_READ_REG(TSI108_STAT_CARRY1);
+	carry2 = TSI108_ETH_READ_REG(TSI108_STAT_CARRY2);
+
+	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRY1, carry1);
+	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRY2, carry2);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
+			      TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
+			      TSI108_STAT_RXPKTS_CARRY,
+			      &data->stats.rx_packets);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
+			      TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
+			      TSI108_STAT_RXMCAST_CARRY,
+			      &data->stats.multicast);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
+			      TSI108_STAT_RXALIGN_CARRY,
+			      &data->stats.rx_frame_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
+			      TSI108_STAT_RXLENGTH_CARRY,
+			      &data->stats.rx_length_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
+			      TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
+			      TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
+			      TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
+			      TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
+			      TSI108_STAT_RXDROP_CARRY,
+			      &data->stats.rx_missed_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
+			      TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
+			      TSI108_STAT_TXPKTS_CARRY,
+			      &data->stats.tx_packets);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
+			      TSI108_STAT_TXEXDEF_CARRY,
+			      &data->stats.tx_aborted_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
+			      TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
+			      TSI108_STAT_TXTCOL_CARRY,
+			      &data->stats.collisions);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
+			      TSI108_STAT_TXPAUSEDROP_CARRY,
+			      &data->tx_pause_drop);
+
+	spin_unlock_irq(&data->misclock);
+}
+
+/* Read a stat counter atomically with respect to carries.
+ * data->misclock must be held.
+ */
+static inline unsigned long
+tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
+		 int carry_shift, unsigned long *upper)
+{
+	int carryreg;
+	unsigned long val;
+
+	if (reg < 0xb0)
+		carryreg = TSI108_STAT_CARRY1;
+	else
+		carryreg = TSI108_STAT_CARRY2;
+
+      again:
+	val = TSI108_ETH_READ_REG(reg) | *upper;
+
+	/* Check to see if it overflowed, but the interrupt hasn't
+	 * been serviced yet.  If so, handle the carry here, and
+	 * try again.
+	 */
+
+	if (unlikely(TSI108_ETH_READ_REG(carryreg) & carry_bit)) {
+		*upper += carry_shift;
+		TSI108_ETH_WRITE_REG(carryreg, carry_bit);
+		goto again;
+	}
+
+	return val;
+}
+
+static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
+{
+	unsigned long excol;
+
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	spin_lock_irq(&data->misclock);
+
+	data->tmpstats.rx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_RXPKTS,
+			     TSI108_STAT_CARRY1_RXPKTS,
+			     TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
+
+	data->tmpstats.tx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_TXPKTS,
+			     TSI108_STAT_CARRY2_TXPKTS,
+			     TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
+
+	data->tmpstats.rx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_RXBYTES,
+			     TSI108_STAT_CARRY1_RXBYTES,
+			     TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	data->tmpstats.tx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_TXBYTES,
+			     TSI108_STAT_CARRY2_TXBYTES,
+			     TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	data->tmpstats.multicast =
+	    tsi108_read_stat(data, TSI108_STAT_RXMCAST,
+			     TSI108_STAT_CARRY1_RXMCAST,
+			     TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
+
+	excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
+				 TSI108_STAT_CARRY2_TXEXCOL,
+				 TSI108_STAT_TXEXCOL_CARRY,
+				 &data->tx_coll_abort);
+
+	data->tmpstats.collisions =
+	    tsi108_read_stat(data, TSI108_STAT_TXTCOL,
+			     TSI108_STAT_CARRY2_TXTCOL,
+			     TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
+
+	data->tmpstats.collisions += excol;
+
+	data->tmpstats.rx_length_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
+			     TSI108_STAT_CARRY1_RXLENGTH,
+			     TSI108_STAT_RXLENGTH_CARRY,
+			     &data->stats.rx_length_errors);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXRUNT,
+			     TSI108_STAT_CARRY1_RXRUNT,
+			     TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
+			     TSI108_STAT_CARRY1_RXJUMBO,
+			     TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	data->tmpstats.rx_frame_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXALIGN,
+			     TSI108_STAT_CARRY1_RXALIGN,
+			     TSI108_STAT_RXALIGN_CARRY,
+			     &data->stats.rx_frame_errors);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFCS,
+			     TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
+			     &data->rx_fcs);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFRAG,
+			     TSI108_STAT_CARRY1_RXFRAG,
+			     TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	data->tmpstats.rx_missed_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXDROP,
+			     TSI108_STAT_CARRY1_RXDROP,
+			     TSI108_STAT_RXDROP_CARRY,
+			     &data->stats.rx_missed_errors);
+
+	/* These three are maintained by software. */
+	data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
+	data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
+
+	data->tmpstats.tx_aborted_errors =
+	    tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
+			     TSI108_STAT_CARRY2_TXEXDEF,
+			     TSI108_STAT_TXEXDEF_CARRY,
+			     &data->stats.tx_aborted_errors);
+
+	data->tmpstats.tx_aborted_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
+			     TSI108_STAT_CARRY2_TXPAUSE,
+			     TSI108_STAT_TXPAUSEDROP_CARRY,
+			     &data->tx_pause_drop);
+
+	data->tmpstats.tx_aborted_errors += excol;
+
+	data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
+	data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
+	    data->tmpstats.rx_crc_errors +
+	    data->tmpstats.rx_frame_errors +
+	    data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
+
+	spin_unlock_irq(&data->misclock);
+	return &data->tmpstats;
+}
+
+static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
+{
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_PTRHIGH,
+			     TSI108_EC_RXQ_PTRHIGH_VALID);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
+			     | TSI108_EC_RXCTRL_QUEUE0);
+}
+
+static void tsi108_restart_tx(struct tsi108_prv_data * data)
+{
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_PTRHIGH,
+			     TSI108_EC_TXQ_PTRHIGH_VALID);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
+			     TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
+}
+
+/* txlock must be held by caller, with IRQs disabled, and
+ * with permission to re-enable them when the lock is dropped.
+ */
+static void tsi108_check_for_completed_tx(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int tx;
+	struct sk_buff *skb;
+	int release = 0;
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		tx = data->txtail;
+
+		if (data->txring[tx].misc & TSI108_TX_OWN)
+			break;
+
+		skb = data->txskbs[tx];
+
+		if (!(data->txring[tx].misc & TSI108_TX_OK))
+			printk("%s: bad tx packet, misc %x\n",
+			       dev->name, data->txring[tx].misc);
+
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+
+		if (data->txring[tx].misc & TSI108_TX_EOF) {
+			dev_kfree_skb_any(skb);
+			release++;
+		}
+	}
+
+	if (release) {
+		if (netif_queue_stopped(dev)
+		    && is_valid_ether_addr(dev->dev_addr) && data->link_up)
+			netif_wake_queue(dev);
+	}
+}
+
+static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int frags = skb_shinfo(skb)->nr_frags + 1;
+	int i;
+
+	if (!data->phy_ok && net_ratelimit())
+		printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
+
+	if (!data->link_up) {
+		printk(KERN_ERR "%s: Transmit while link is down!\n",
+		       dev->name);
+		netif_stop_queue(dev);
+		return 1;
+	}
+
+	if (data->txfree < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+
+		if (net_ratelimit())
+			printk(KERN_ERR "%s: Transmit with full tx ring!\n",
+			       dev->name);
+		return 1;
+	}
+
+	if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+	}
+
+	spin_lock_irq(&data->txlock);
+
+	for (i = 0; i < frags; i++) {
+		int misc = 0;
+		int tx = data->txhead;
+
+		/* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
+		 * the interrupt bit.  TX descriptor-complete interrupts are
+		 * enabled when the queue fills up, and masked when there is
+		 * still free space.  This way, when saturating the outbound
+		 * link, the tx interrupts are kept to a reasonable level. 
+		 * When the queue is not full, reclamation of skbs still occurs
+		 * as new packets are transmitted, or on a queue-empty
+		 * interrupt.
+		 */
+
+		if ((tx % TSI108_TX_INT_FREQ == 0) &&
+		    ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
+			misc = TSI108_TX_INT;
+
+		data->txskbs[tx] = skb;
+
+		if (i == 0) {
+			data->txring[tx].buf0 = virt_to_phys(skb->data);
+			data->txring[tx].len = skb->len - skb->data_len;
+			misc |= TSI108_TX_SOF;
+		} else {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+			data->txring[tx].buf0 =
+			    page_to_phys(frag->page) + frag->page_offset;
+			data->txring[tx].len = frag->size;
+		}
+
+		if (i == frags - 1)
+			misc |= TSI108_TX_EOF;
+
+#ifdef TSI108_PRINT_TX_FRAME
+		{
+			int i;
+			printk("%s: Tx Frame contents (%d)\n", dev->name,
+			       skb->len);
+			for (i = 0; i < skb->len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+#endif				/* TSI108_PRINT_TX_FRAME */
+		data->txring[tx].misc = misc | TSI108_TX_OWN;
+
+		data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
+		data->txfree--;
+	}
+
+	tsi108_check_for_completed_tx(dev);
+
+	/* This must be done after the check for completed tx descriptors,
+	 * so that the tail pointer is correct.
+	 */
+
+	if (!(TSI108_ETH_READ_REG(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
+		tsi108_restart_tx(data);
+
+	spin_unlock_irq(&data->txlock);
+	return 0;
+}
+
+static int tsi108_check_for_completed_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree && done != budget) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		if (data->rxring[rx].misc & TSI108_RX_OWN)
+			break;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		done++;
+
+		if (data->rxring[rx].misc & TSI108_RX_BAD) {
+			spin_lock_irq(&data->misclock);
+
+			if (data->rxring[rx].misc & TSI108_RX_CRC)
+				data->stats.rx_crc_errors++;
+			if (data->rxring[rx].misc & TSI108_RX_OVER)
+				data->stats.rx_fifo_errors++;
+
+			spin_unlock_irq(&data->misclock);
+
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+#ifdef TSI108_PRINT_RX_FRAME
+		{
+			int i;
+			printk("%s: Rx Frame contents (%d)\n",
+			       dev->name, data->rxring[rx].len);
+			for (i = 0; i < data->rxring[rx].len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+#endif				/* TSI108_PRINT_RX_FRAME */
+
+		skb->dev = dev;
+		skb_put(skb, data->rxring[rx].len);
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_receive_skb(skb);
+		dev->last_rx = jiffies;
+	}
+
+	return done;
+}
+
+static int tsi108_refill_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
+		int rx = data->rxhead;
+		struct sk_buff *skb;
+
+		data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);
+		if (!skb)
+			break;
+
+		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
+
+		data->rxring[rx].buf0 = virt_to_phys(skb->data);
+
+		/* Sometimes the hardware sets blen to zero after packet
+		 * reception, even though the manual says that it's only ever
+		 * modified by the driver.
+		 */
+
+		data->rxring[rx].blen = 1536;
+		data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
+
+		data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
+		data->rxfree++;
+		done++;
+	}
+
+	if (done != 0 && !(TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
+			   TSI108_EC_RXSTAT_QUEUE0))
+		tsi108_restart_rx(data, dev);
+
+	return done;
+}
+
+static int tsi108_poll(struct net_device *dev, int *budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI108_ETH_READ_REG(TSI108_EC_RXESTAT);
+	u32 intstat = TSI108_ETH_READ_REG(TSI108_EC_INTSTAT);
+	int total_budget = min(*budget, dev->quota);
+	int num_received = 0, num_filled = 0, budget_used;
+
+	intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+	    TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXESTAT, estat);
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, intstat);
+
+	if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
+		num_received = tsi108_check_for_completed_rx(dev, total_budget);
+
+	/* This should normally fill no more slots than the number of
+	 * packets received in tsi108_check_for_completed_rx().  The exception
+	 * is when we previously ran out of memory for RX SKBs.  In that
+	 * case, it's helpful to obey the budget, not only so that the
+	 * CPU isn't hogged, but so that memory (which may still be low)
+	 * is not hogged by one device.
+	 *
+	 * A work unit is considered to be two SKBs to allow us to catch
+	 * up when the ring has shrunk due to out-of-memory but we're
+	 * still removing the full budget's worth of packets each time.
+	 */
+
+	if (data->rxfree < TSI108_RXRING_LEN)
+		num_filled = tsi108_refill_rx(dev, total_budget * 2);
+
+	if (intstat & TSI108_INT_RXERROR) {
+		u32 err = TSI108_ETH_READ_REG(TSI108_EC_RXERR);
+		TSI108_ETH_WRITE_REG(TSI108_EC_RXERR, err);
+
+		if (err) {
+			if (net_ratelimit())
+				printk(KERN_DEBUG "%s: RX error %x\n",
+				       dev->name, err);
+
+			if (!(TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
+			      TSI108_EC_RXSTAT_QUEUE0))
+				tsi108_restart_rx(data, dev);
+		}
+	}
+
+	if (intstat & TSI108_INT_RXOVERRUN) {
+		spin_lock_irq(&data->misclock);
+		data->stats.rx_fifo_errors++;
+		spin_unlock_irq(&data->misclock);
+	}
+
+	budget_used = max(num_received, num_filled / 2);
+
+	*budget -= budget_used;
+	dev->quota -= budget_used;
+
+	if (budget_used != total_budget) {
+		data->rxpending = 0;
+		netif_rx_complete(dev);
+
+		TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
+				     TSI108_ETH_READ_REG(TSI108_EC_INTMASK)
+				     & ~(TSI108_INT_RXQUEUE0
+					 | TSI108_INT_RXTHRESH |
+					 TSI108_INT_RXOVERRUN |
+					 TSI108_INT_RXERROR |
+					 TSI108_INT_RXWAIT));
+
+		/* IRQs are level-triggered, so no need to re-check */
+		return 0;
+	} else {
+		data->rxpending = 1;
+	}
+
+	return 1;
+}
+
+static void tsi108_rx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A race could cause dev to already be scheduled, so it's not an
+	 * error if that happens (and interrupts shouldn't be re-masked,
+	 * because that can cause harmful races, if poll has already
+	 * unmasked them but not cleared LINK_STATE_SCHED).  
+	 *
+	 * This can happen if this code races with tsi108_poll(), which masks
+	 * the interrupts after tsi108_irq_one() read the mask, but before
+	 * netif_rx_schedule is called.  It could also happen due to calls
+	 * from tsi108_check_rxring().
+	 */
+
+	if (netif_rx_schedule_prep(dev)) {
+		/* Mask, rather than ack, the receive interrupts.  The ack
+		 * will happen in tsi108_poll().
+		 */
+
+		TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
+				     TSI108_ETH_READ_REG(TSI108_EC_INTMASK) |
+				     TSI108_INT_RXQUEUE0
+				     | TSI108_INT_RXTHRESH |
+				     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
+				     TSI108_INT_RXWAIT);
+		__netif_rx_schedule(dev);
+	} else {
+		if (!netif_running(dev)) {
+			/* This can happen if an interrupt occurs while the
+			 * interface is being brought down, as the START
+			 * bit is cleared before the stop function is called.
+			 *
+			 * In this case, the interrupts must be masked, or
+			 * they will continue indefinitely.
+			 *
+			 * There's a race here if the interface is brought down
+			 * and then up in rapid succession, as the device could
+			 * be made running after the above check and before
+			 * the masking below.  This will only happen if the IRQ
+			 * thread has a lower priority than the task brining
+			 * up the interface.  Fixing this race would likely
+			 * require changes in generic code.
+			 */
+
+			TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
+					     TSI108_ETH_READ_REG
+					     (TSI108_EC_INTMASK) |
+					     TSI108_INT_RXQUEUE0 |
+					     TSI108_INT_RXTHRESH |
+					     TSI108_INT_RXOVERRUN |
+					     TSI108_INT_RXERROR |
+					     TSI108_INT_RXWAIT);
+		}
+	}
+}
+
+/* If the RX ring has run out of memory, try periodically
+ * to allocate some more, as otherwise poll would never
+ * get called (apart from the initial end-of-queue condition).
+ *
+ * This is called once per second (by default) from the thread.
+ */
+
+static void tsi108_check_rxring(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A poll is scheduled, as opposed to caling tsi108_refill_rx 
+	 * directly, so as to keep the receive path single-threaded
+	 * (and thus not needing a lock).
+	 */
+
+	if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
+		tsi108_rx_int(dev);
+}
+
+static void tsi108_tx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI108_ETH_READ_REG(TSI108_EC_TXESTAT);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXESTAT, estat);
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
+			     TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
+	if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
+		u32 err = TSI108_ETH_READ_REG(TSI108_EC_TXERR);
+		TSI108_ETH_WRITE_REG(TSI108_EC_TXERR, err);
+
+		if (err && net_ratelimit())
+			printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
+	}
+
+	if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
+		spin_lock(&data->txlock);
+		tsi108_check_for_completed_tx(dev);
+		spin_unlock(&data->txlock);
+	}
+}
+
+static irqreturn_t tsi108_irq_one(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 stat = TSI108_ETH_READ_REG(TSI108_EC_INTSTAT);
+
+	if (!(stat & TSI108_INT_ANY))
+		return IRQ_NONE;	/* Not our interrupt */
+
+	stat &= ~TSI108_ETH_READ_REG(TSI108_EC_INTMASK);
+
+	if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
+		    TSI108_INT_TXERROR))
+		tsi108_tx_int(dev);
+	if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+		    TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
+		    TSI108_INT_RXERROR))
+		tsi108_rx_int(dev);
+
+	if (stat & TSI108_INT_SFN) {
+		if (net_ratelimit())
+			printk(KERN_DEBUG "%s: SFN error\n", dev->name);
+		TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, TSI108_INT_SFN);
+	}
+
+	if (stat & TSI108_INT_STATCARRY) {
+		tsi108_stat_carry(dev);
+		TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t tsi108_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+
+	return tsi108_irq_one(dev_id);
+}
+
+static void tsi108_stop_ethernet(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* Disable all TX and RX queues ... */
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXCTRL, 0);
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCTRL, 0);
+
+	/* ...and wait for them to become idle */
+	while (TSI108_ETH_READ_REG(TSI108_EC_TXSTAT) &
+	       TSI108_EC_TXSTAT_ACTIVE) 
+		cpu_relax();
+	while (TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
+	       TSI108_EC_RXSTAT_ACTIVE)
+		cpu_relax();
+}
+
+static void tsi108_reset_ether(struct tsi108_prv_data * data)
+{
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, 0);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL,
+			     TSI108_ETH_READ_REG(TSI108_EC_PORTCTRL) &
+			     ~TSI108_EC_PORTCTRL_STATRST);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXCFG,
+			     TSI108_ETH_READ_REG(TSI108_EC_TXCFG) &
+			     ~TSI108_EC_TXCFG_RST);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG,
+			     TSI108_ETH_READ_REG(TSI108_EC_RXCFG) &
+			     ~TSI108_EC_RXCFG_RST);
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_MGMT_CFG,
+			     TSI108_ETH_READ_REG(TSI108_MAC_MII_MGMT_CFG) |
+			     TSI108_MAC_MII_MGMT_RST);
+	udelay(100);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_MGMT_CFG,
+			     TSI108_ETH_READ_REG(TSI108_MAC_MII_MGMT_CFG) &
+			     ~(TSI108_MAC_MII_MGMT_RST |
+			       TSI108_MAC_MII_MGMT_CLK));
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_MGMT_CFG,
+			     TSI108_ETH_READ_REG(TSI108_MAC_MII_MGMT_CFG) |
+			     TSI108_MAC_MII_MGMT_CLK);
+}
+
+static int tsi108_get_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1 = TSI108_ETH_READ_REG(TSI108_MAC_ADDR1);
+	u32 word2 = TSI108_ETH_READ_REG(TSI108_MAC_ADDR2);
+
+	/* Note that the octets are reversed from what the manual says,
+	 * producing an even weirder ordering...
+	 */
+	if (word2 == 0 && word1 == 0) {
+		dev->dev_addr[0] = 0x00;
+		dev->dev_addr[1] = 0x06;
+		dev->dev_addr[2] = 0xd2;
+		dev->dev_addr[3] = 0x00;
+		dev->dev_addr[4] = 0x00;
+		if (0x8 == data->phy)
+			dev->dev_addr[5] = 0x01;
+		else
+			dev->dev_addr[5] = 0x02;
+
+		word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+		word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+		    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+		TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR1, word1);
+		TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR2, word2);
+	} else {
+		dev->dev_addr[0] = (word2 >> 16) & 0xff;
+		dev->dev_addr[1] = (word2 >> 24) & 0xff;
+		dev->dev_addr[2] = (word1 >> 0) & 0xff;
+		dev->dev_addr[3] = (word1 >> 8) & 0xff;
+		dev->dev_addr[4] = (word1 >> 16) & 0xff;
+		dev->dev_addr[5] = (word1 >> 24) & 0xff;
+	}
+
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tsi108_set_mac(struct net_device *dev, void *addr)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1, word2;
+	int i;
+
+	if (!is_valid_ether_addr(addr))
+		return -EINVAL;
+
+	for (i = 0; i < 6; i++)
+		/* +2 is for the offset of the HW addr type */
+		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+
+	word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+	word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+	    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+	spin_lock_irq(&data->misclock);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR1, word1);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR2, word2);
+	spin_lock(&data->txlock);
+
+	if (netif_queue_stopped(dev) && data->txfree && data->link_up)
+		netif_wake_queue(dev);
+
+	spin_unlock(&data->txlock);
+	spin_unlock_irq(&data->misclock);
+	return 0;
+}
+
+/* Protected by dev->xmit_lock. */
+static void tsi108_set_rx_mode(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 rxcfg = TSI108_ETH_READ_REG(TSI108_EC_RXCFG);
+
+	if (dev->flags & IFF_PROMISC) {
+		rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
+		rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
+		goto out;
+	}
+
+	rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
+
+	if (dev->mc_count) {
+		int i;
+		struct dev_mc_list *mc = dev->mc_list;
+		rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
+
+		memset(data->mc_hash, 0, sizeof(data->mc_hash));
+
+		while (mc) {
+			u32 hash, crc;
+
+			if (mc->dmi_addrlen == 6) {
+				crc = ether_crc(6, mc->dmi_addr);
+				hash = crc >> 23;
+
+				__set_bit(hash, &data->mc_hash[0]);
+			} else {
+				printk(KERN_ERR
+				       "%s: got multicast address of length %d "
+				       "instead of 6.\n", dev->name,
+				       mc->dmi_addrlen);
+			}
+
+			mc = mc->next;
+		}
+
+		TSI108_ETH_WRITE_REG(TSI108_EC_HASHADDR,
+				     TSI108_EC_HASHADDR_AUTOINC |
+				     TSI108_EC_HASHADDR_MCAST);
+
+		for (i = 0; i < 16; i++) {
+			/* The manual says that the hardware may drop
+			 * back-to-back writes to the data register.
+			 */
+			udelay(1);
+			TSI108_ETH_WRITE_REG(TSI108_EC_HASHDATA,
+					     data->mc_hash[i]);
+		}
+	}
+
+      out:
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG, rxcfg);
+}
+
+static void tsi108_init_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 i = 0;
+	u16 phyVal = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	tsi108_write_mii(data, PHY_CTRL, PHY_CTRL_RESET);
+	while (tsi108_read_mii(data, PHY_CTRL, NULL) & PHY_CTRL_RESET)
+		cpu_relax();
+
+#if (TSI108_PHY_TYPE == PHY_BCM54XX)	/* Broadcom BCM54xx PHY */
+	tsi108_write_mii(data, 0x09, 0x0300);
+	tsi108_write_mii(data, 0x10, 0x1020);
+	tsi108_write_mii(data, 0x1c, 0x8c00);
+#endif
+
+	tsi108_write_mii(data,
+			 PHY_CTRL,
+			 PHY_CTRL_AUTONEG_EN | PHY_CTRL_AUTONEG_START);
+	while (tsi108_read_mii(data, PHY_CTRL, NULL) & PHY_CTRL_AUTONEG_START)
+		cpu_relax();
+
+	/* Set G/MII mode and receive clock select in TBI control #2.  The
+	 * second port won't work if this isn't done, even though we don't
+	 * use TBI mode.
+	 */
+
+	tsi108_write_tbi(data, 0x11, 0x30);
+
+	/* FIXME: It seems to take more than 2 back-to-back reads to the
+	 * PHY_STAT register before the link up status bit is set.
+	 */
+
+	data->link_up = 1;
+
+	while (!((phyVal = tsi108_read_mii(data, PHY_STAT, NULL)) &
+		 PHY_STAT_LINKUP)) {
+		if (i++ > (MII_READ_DELAY / 10)) {
+			data->link_up = 0;
+			break;
+		}
+		spin_unlock_irqrestore(&phy_lock, flags);
+		msleep(10);
+		spin_lock_irqsave(&phy_lock, flags);
+	}
+
+	printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyVal);
+	data->phy_ok = 1;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static void tsi108_kill_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+	tsi108_write_mii(data, PHY_CTRL, PHY_CTRL_POWERDOWN);
+	data->phy_ok = 0;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static int tsi108_open(struct net_device *dev)
+{
+	int i;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
+	unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
+
+	printk(KERN_DEBUG "Inside tsi108_open()!\n");
+
+	i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
+	if (i != 0) {
+		printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
+		       data->id, data->irq_num);
+		return i;
+	} else {
+		dev->irq = data->irq_num;
+		printk(KERN_NOTICE
+		       "tsi108_open : Port %d Assigned IRQ %d to %s\n",
+		       data->id, dev->irq, dev->name);
+	}
+
+	data->rxring = pci_alloc_consistent(NULL, rxring_size, &data->rxdma);
+
+	if (!data->rxring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for rxring!\n");
+		return -ENOMEM;
+	} else {
+		memset(data->rxring, 0, rxring_size);
+	}
+
+	data->txring = pci_alloc_consistent(NULL, txring_size, &data->txdma);
+
+	if (!data->txring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for txring!\n");
+		pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+		return -ENOMEM;
+	} else {
+		memset(data->txring, 0, txring_size);
+	}
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
+		data->rxring[i].blen = TSI108_RXBUF_SIZE;
+		data->rxring[i].vlan = 0;
+	}
+
+	data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
+
+	data->rxtail = 0;
+	data->rxhead = 0;
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+
+		if (!skb) {
+			/* Bah.  No memory for now, but maybe we'll get
+			 * some more later.
+			 * For now, we'll live with the smaller ring.
+			 */
+			printk(KERN_WARNING
+			       "%s: Could only allocate %d receive skb(s).\n",
+			       dev->name, i);
+			data->rxhead = i;
+			break;
+		}
+
+		data->rxskbs[i] = skb;
+		/* Align the payload on a 4-byte boundary */
+		skb_reserve(skb, 2);
+		data->rxskbs[i] = skb;
+		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
+		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
+	}
+
+	data->rxfree = i;
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_PTRLOW, data->rxdma);
+
+	for (i = 0; i < TSI108_TXRING_LEN; i++) {
+		data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
+		data->txring[i].misc = 0;
+	}
+
+	data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
+	data->txtail = 0;
+	data->txhead = 0;
+	data->txfree = TSI108_TXRING_LEN;
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_PTRLOW, data->txdma);
+	tsi108_init_phy(dev);
+
+	setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
+	mod_timer(&data->timer, jiffies + 1);
+
+	tsi108_restart_rx(data, dev);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, ~0);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
+			     ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
+			       TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
+			       TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
+			       TSI108_INT_SFN | TSI108_INT_STATCARRY));
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1,
+			     TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int tsi108_close(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	del_timer_sync(&data->timer);
+
+	printk(KERN_DEBUG "Inside tsi108_ifdown!\n");
+
+	tsi108_stop_ethernet(dev);
+	tsi108_kill_phy(dev);
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK, ~0);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, 0);
+
+	/* Check for any pending TX packets, and drop them. */
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		int tx = data->txtail;
+		struct sk_buff *skb;
+		skb = data->txskbs[tx];
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+		dev_kfree_skb(skb);
+	}
+
+	synchronize_irq(data->irq_num);
+	free_irq(data->irq_num, dev);
+
+	/* Discard the RX ring. */
+
+	while (data->rxfree) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		dev_kfree_skb(skb);
+	}
+
+	pci_free_consistent(0,
+			    TSI108_RXRING_LEN * sizeof(rx_desc),
+			    data->rxring, data->rxdma);
+	pci_free_consistent(0,
+			    TSI108_TXRING_LEN * sizeof(tx_desc),
+			    data->txring, data->txdma);
+
+	return 0;
+}
+
+static void tsi108_init_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
+			     TSI108_MAC_CFG2_PADCRC);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXTHRESH,
+			     (192 << TSI108_EC_TXTHRESH_STARTFILL) |
+			     (192 << TSI108_EC_TXTHRESH_STOPFILL));
+
+	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRYMASK1,
+			     ~(TSI108_STAT_CARRY1_RXBYTES |
+			       TSI108_STAT_CARRY1_RXPKTS |
+			       TSI108_STAT_CARRY1_RXFCS |
+			       TSI108_STAT_CARRY1_RXMCAST |
+			       TSI108_STAT_CARRY1_RXALIGN |
+			       TSI108_STAT_CARRY1_RXLENGTH |
+			       TSI108_STAT_CARRY1_RXRUNT |
+			       TSI108_STAT_CARRY1_RXJUMBO |
+			       TSI108_STAT_CARRY1_RXFRAG |
+			       TSI108_STAT_CARRY1_RXJABBER |
+			       TSI108_STAT_CARRY1_RXDROP));
+
+	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRYMASK2,
+			     ~(TSI108_STAT_CARRY2_TXBYTES |
+			       TSI108_STAT_CARRY2_TXPKTS |
+			       TSI108_STAT_CARRY2_TXEXDEF |
+			       TSI108_STAT_CARRY2_TXEXCOL |
+			       TSI108_STAT_CARRY2_TXTCOL |
+			       TSI108_STAT_CARRY2_TXPAUSE));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
+	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, 0);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG,
+			     TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
+			     TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_CFG_SFNPORT));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
+			     TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_CFG_SFNPORT));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_BUFCFG,
+			     TSI108_EC_TXQ_BUFCFG_BURST256 |
+			     TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_BUFCFG_SFNPORT));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_BUFCFG,
+			     TSI108_EC_RXQ_BUFCFG_BURST256 |
+			     TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_BUFCFG_SFNPORT));
+
+	TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK, ~0);
+}
+
+static int tsi108_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	struct mii_ioctl_data *mii_data =
+			(struct mii_ioctl_data *)&rq->ifr_data;
+	unsigned long flags;
+	int ret;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		mii_data->phy_id = data->phy;
+		ret = 0;
+		break;
+
+	case SIOCGMIIREG:
+		spin_lock_irqsave(&phy_lock, flags);
+		mii_data->val_out =
+		    tsi108_read_mii(data, mii_data->reg_num, &ret);
+		spin_unlock_irqrestore(&phy_lock, flags);
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+static int
+tsi108_init_one(struct platform_device *pdev)
+{
+	struct net_device *dev = NULL;
+	struct tsi108_prv_data *data = NULL;
+	hw_info *einfo;
+	int ret;
+	
+	einfo = pdev->dev.platform_data;
+	
+	if (NULL == einfo) {
+		printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
+		       pdev->id);
+		return -ENODEV;
+	}
+
+	/* Create an ethernet device instance */
+
+	dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
+	if (!dev) {
+		printk("tsi108_eth: Could not allocate a device structure\n");
+		return -ENOMEM;
+	}
+
+	printk("tsi108_eth%d: probe...\n", pdev->id);
+	data = netdev_priv(dev);
+
+	pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
+			pdev->id, einfo->regs, einfo->phyregs,
+			einfo->phy, einfo->irq_num);
+	
+	data->regs = ioremap(einfo->regs, 0x400);
+	data->phyregs = ioremap(einfo->phyregs, 0x400);
+	data->phy = einfo->phy;
+	data->irq_num = einfo->irq_num;
+	data->id = pdev->id;
+	dev->open = tsi108_open;
+	dev->stop = tsi108_close;
+	dev->hard_start_xmit = tsi108_send_packet;
+	dev->set_mac_address = tsi108_set_mac;
+	dev->set_multicast_list = tsi108_set_rx_mode;
+	dev->get_stats = tsi108_get_stats;
+	dev->poll = tsi108_poll;
+	dev->do_ioctl = tsi108_ioctl;
+	dev->weight = 64;  /* 64 is more suitable for GigE interface - klai */
+
+	/* Apparently, the Linux networking code won't use scatter-gather
+	 * if the hardware doesn't do checksums.  However, it's faster
+	 * to checksum in place and use SG, as (among other reasons)
+	 * the cache won't be dirtied (which then has to be flushed
+	 * before DMA).  The checksumming is done by the driver (via
+	 * a new function skb_csum_dev() in net/core/skbuff.c).
+	 */
+
+	dev->features = NETIF_F_HIGHDMA;
+	SET_MODULE_OWNER(dev);
+
+	spin_lock_init(&data->txlock);
+	spin_lock_init(&data->misclock);
+
+	tsi108_reset_ether(data);
+	tsi108_kill_phy(dev);
+
+	if (tsi108_get_mac(dev) != 0)
+		printk(KERN_ERR "%s: Invalid MAC address.  Please correct.\n",
+		       dev->name);
+
+	tsi108_init_mac(dev);
+	ret = register_netdev(dev);
+	if (ret < 0) {
+		free_netdev(dev);
+		return ret;
+	}
+
+	printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: "
+	       "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
+	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+#ifdef DEBUG
+	dump_eth_one(dev);
+#endif
+
+	return 0;
+}
+
+/* There's no way to either get interrupts from the PHY when
+ * something changes, or to have the Tsi108 automatically communicate
+ * with the PHY to reconfigure itself.
+ *
+ * Thus, we have to do it using a timer. 
+ */
+
+static void tsi108_timed_checker(unsigned long dev_ptr)
+{
+	struct net_device *dev = (struct net_device *)dev_ptr;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	tsi108_check_phy(dev);
+	tsi108_check_rxring(dev);
+	mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
+}
+
+static int tsi108_ether_init(void)
+{
+	int ret;
+	ret = platform_driver_register (&tsi_eth_driver);
+	if (ret < 0){
+		printk("tsi108_ether_init: error initializing ethernet "
+		       "device\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int tsi108_ether_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct tsi108_prv_data *priv = netdev_priv(dev);
+	
+	unregister_netdev(dev);
+	tsi108_stop_ethernet(dev);
+	platform_set_drvdata(pdev, NULL);
+	iounmap((void __iomem *)priv->regs);
+	iounmap((void __iomem *)priv->phyregs);
+	free_netdev(dev);
+	
+	return 0;
+}
+static void tsi108_ether_exit(void)
+{
+	platform_driver_unregister(&tsi_eth_driver);
+}
+
+module_init(tsi108_ether_init);
+module_exit(tsi108_ether_exit);
+
+MODULE_AUTHOR("Tundra Semiconductor Corporation");
+MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
-- 
1.4.0



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
       [not found]         ` <1158749825.7973.9.camel@localhost.localdomain>
@ 2006-09-21  4:26           ` Jeff Garzik
  2006-09-21  5:46             ` Zang Roy-r61911
  2006-09-21  4:46           ` Jeff Garzik
  1 sibling, 1 reply; 23+ messages in thread
From: Jeff Garzik @ 2006-09-21  4:26 UTC (permalink / raw)
  To: Zang Roy-r61911; +Cc: Roland Dreier, Andrew Morton, netdev, linux-kernel

Zang Roy-r61911 wrote:
> +#define TSI108_ETH_WRITE_REG(offset, val) \
> +	writel(le32_to_cpu(val),data->regs + (offset))
> +
> +#define TSI108_ETH_READ_REG(offset) \
> +	le32_to_cpu(readl(data->regs + (offset)))
> +
> +#define TSI108_ETH_WRITE_PHYREG(offset, val) \
> +	writel(le32_to_cpu(val), data->phyregs + (offset))
> +
> +#define TSI108_ETH_READ_PHYREG(offset) \
> +	le32_to_cpu(readl(data->phyregs + (offset)))


NAK:

1) writel() and readl() are defined to be little endian.

If your platform is different, then your platform should have its own 
foobus_writel() and foobus_readl().

2) TSI108_ETH_WRITE_REG() is just way too long.  TSI_READ(), 
TSI_WRITE(), TSI_READ_PHY() and TSI_WRITE_PHY() would be far more readable.

More in next email.


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
       [not found]         ` <1158749825.7973.9.camel@localhost.localdomain>
  2006-09-21  4:26           ` Jeff Garzik
@ 2006-09-21  4:46           ` Jeff Garzik
  2006-09-29  7:36             ` Zang Roy-r61911
                               ` (2 more replies)
  1 sibling, 3 replies; 23+ messages in thread
From: Jeff Garzik @ 2006-09-21  4:46 UTC (permalink / raw)
  To: Zang Roy-r61911; +Cc: Roland Dreier, Andrew Morton, netdev, linux-kernel

Zang Roy-r61911 wrote:
> +struct tsi108_prv_data {
> +	void  __iomem *regs;	/* Base of normal regs */
> +	void  __iomem *phyregs;	/* Base of register bank used for PHY access */
> +	
> +	int phy;		/* Index of PHY for this interface */
> +	int irq_num;
> +	int id;
> +
> +	struct timer_list timer;/* Timer that triggers the check phy function */
> +	int rxtail;		/* Next entry in rxring to read */
> +	int rxhead;		/* Next entry in rxring to give a new buffer */
> +	int rxfree;		/* Number of free, allocated RX buffers */
> +
> +	int rxpending;		/* Non-zero if there are still descriptors
> +				 * to be processed from a previous descriptor
> +				 * interrupt condition that has been cleared */
> +
> +	int txtail;		/* Next TX descriptor to check status on */
> +	int txhead;		/* Next TX descriptor to use */

most of these should be unsigned, to prevent bugs.


> +	/* Number of free TX descriptors.  This could be calculated from
> +	 * rxhead and rxtail if one descriptor were left unused to disambiguate
> +	 * full and empty conditions, but it's simpler to just keep track
> +	 * explicitly. */
> +
> +	int txfree;
> +
> +	int phy_ok;		/* The PHY is currently powered on. */
> +
> +	/* PHY status (duplex is 1 for half, 2 for full,
> +	 * so that the default 0 indicates that neither has
> +	 * yet been configured). */
> +
> +	int link_up;
> +	int speed;
> +	int duplex;
> +
> +	tx_desc *txring;
> +	rx_desc *rxring;
> +	struct sk_buff *txskbs[TSI108_TXRING_LEN];
> +	struct sk_buff *rxskbs[TSI108_RXRING_LEN];
> +
> +	dma_addr_t txdma, rxdma;
> +
> +	/* txlock nests in misclock and phy_lock */
> +
> +	spinlock_t txlock, misclock;
> +
> +	/* stats is used to hold the upper bits of each hardware counter,
> +	 * and tmpstats is used to hold the full values for returning
> +	 * to the caller of get_stats().  They must be separate in case
> +	 * an overflow interrupt occurs before the stats are consumed.
> +	 */
> +
> +	struct net_device_stats stats;
> +	struct net_device_stats tmpstats;
> +
> +	/* These stats are kept separate in hardware, thus require individual
> +	 * fields for handling carry.  They are combined in get_stats.
> +	 */
> +
> +	unsigned long rx_fcs;	/* Add to rx_frame_errors */
> +	unsigned long rx_short_fcs;	/* Add to rx_frame_errors */
> +	unsigned long rx_long_fcs;	/* Add to rx_frame_errors */
> +	unsigned long rx_underruns;	/* Add to rx_length_errors */
> +	unsigned long rx_overruns;	/* Add to rx_length_errors */
> +
> +	unsigned long tx_coll_abort;	/* Add to tx_aborted_errors/collisions */
> +	unsigned long tx_pause_drop;	/* Add to tx_aborted_errors */
> +
> +	unsigned long mc_hash[16];
> +};
> +
> +/* Structure for a device driver */
> +
> +static struct platform_driver tsi_eth_driver = {
> +	.probe = tsi108_init_one,
> +	.remove = tsi108_ether_remove,
> +	.driver	= {
> +		.name = "tsi-ethernet",
> +	},
> +};
> +
> +static void tsi108_timed_checker(unsigned long dev_ptr);
> +
> +static void dump_eth_one(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +
> +	printk("Dumping %s...\n", dev->name);
> +	printk("intstat %x intmask %x phy_ok %d"
> +	       " link %d speed %d duplex %d\n",
> +	       TSI108_ETH_READ_REG(TSI108_EC_INTSTAT),
> +	       TSI108_ETH_READ_REG(TSI108_EC_INTMASK), data->phy_ok,
> +	       data->link_up, data->speed, data->duplex);
> +
> +	printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
> +	       data->txhead, data->txtail, data->txfree,
> +	       TSI108_ETH_READ_REG(TSI108_EC_TXSTAT),
> +	       TSI108_ETH_READ_REG(TSI108_EC_TXESTAT),
> +	       TSI108_ETH_READ_REG(TSI108_EC_TXERR));
> +
> +	printk("RX: head %d, tail %d, free %d, stat %x,"
> +	       " estat %x, err %x, pending %d\n\n",
> +	       data->rxhead, data->rxtail, data->rxfree,
> +	       TSI108_ETH_READ_REG(TSI108_EC_RXSTAT),
> +	       TSI108_ETH_READ_REG(TSI108_EC_RXESTAT),
> +	       TSI108_ETH_READ_REG(TSI108_EC_RXERR), data->rxpending);
> +}
> +
> +/* Synchronization is needed between the thread and up/down events.
> + * Note that the PHY is accessed through the same registers for both
> + * interfaces, so this can't be made interface-specific.
> + */
> +
> +static DEFINE_SPINLOCK(phy_lock);

you should have a chip structure, that contains two structs (one for 
each interface/port)


> +static u16 tsi108_read_mii(struct tsi108_prv_data *data, int reg, int *status)
> +{
> +	int i;
> +	u16 ret;
> +
> +	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_ADDR,
> +				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
> +				(reg << TSI108_MAC_MII_ADDR_REG));
> +	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_CMD, 0);
> +	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
> +	for (i = 0; i < 100; i++) {
> +		if (!(TSI108_ETH_READ_PHYREG(TSI108_MAC_MII_IND) &
> +		      (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
> +			break;
> +		udelay(10);
> +	}
> +
> +	if (i == 100) {
> +		if (status)
> +			*status = -EBUSY;
> +
> +		ret = 0xffff;
> +	} else {
> +		if (status)
> +			*status = 0;
> +
> +		ret = TSI108_ETH_READ_PHYREG(TSI108_MAC_MII_DATAIN);
> +	}
> +
> +	return ret;
> +}
> +
> +static void tsi108_write_mii(struct tsi108_prv_data *data, 
> +				int reg, u16 val)
> +{
> +	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_ADDR,
> +				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
> +				(reg << TSI108_MAC_MII_ADDR_REG));
> +	TSI108_ETH_WRITE_PHYREG(TSI108_MAC_MII_DATAOUT, val);
> +	while (TSI108_ETH_READ_PHYREG(TSI108_MAC_MII_IND) &
> +	       TSI108_MAC_MII_IND_BUSY)
> +		cpu_relax();

NAK, potential infinite loop if there is a hardware fault or other 
highly exceptional event

it should time out after a reasonable period of time.


> +static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
> +					int reg, u16 val)
> +{
> +
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_ADDR,
> +			     (0x1e << TSI108_MAC_MII_ADDR_PHY)
> +			     | (reg << TSI108_MAC_MII_ADDR_REG));
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_DATAOUT, val);
> +	while (TSI108_ETH_READ_REG(TSI108_MAC_MII_IND) &
> +	       TSI108_MAC_MII_IND_BUSY)
> +		cpu_relax();

ditto


> +static void tsi108_check_phy(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	u16 sumstat;
> +	u32 mac_cfg2_reg, portctrl_reg;
> +	u32 fdx_flag = 0, reg_update = 0;
> +
> +	/* Do a dummy read, as for some reason the first read
> +	 * after a link becomes up returns link down, even if
> +	 * it's been a while since the link came up.
> +	 */
> +
> +	spin_lock(&phy_lock);
> +
> +	if (!data->phy_ok)
> +		goto out;
> +
> +	tsi108_read_mii(data, PHY_STAT, NULL);
> +
> +	if (!(tsi108_read_mii(data, PHY_STAT, NULL) & PHY_STAT_LINKUP)) {

you should use mii.h constants rather than invent your own PHY_xxx



> +		if (data->link_up == 1) {
> +			netif_stop_queue(dev);
> +			data->link_up = 0;
> +			printk(KERN_NOTICE "%s : link is down\n", dev->name);
> +			netif_carrier_off(dev);

this looks suspiciously similar to mii_check_media() in drivers/net/mii.c


> +		goto out;
> +	}
> +
> +	mac_cfg2_reg = TSI108_ETH_READ_REG(TSI108_MAC_CFG2);
> +	portctrl_reg = TSI108_ETH_READ_REG(TSI108_EC_PORTCTRL);
> +
> +	sumstat = tsi108_read_mii(data, PHY_SUM_STAT, NULL);
> +
> +	switch (sumstat & PHY_SUM_STAT_SPEED_MASK) {
> +	case PHY_SUM_STAT_1000T_FD:
> +		fdx_flag++;
> +	case PHY_SUM_STAT_1000T_HD:
> +		if (data->speed != 1000) {
> +			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
> +			mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
> +			portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
> +			data->speed = 1000;
> +			reg_update++;
> +		}
> +		break;
> +	case PHY_SUM_STAT_100TX_FD:
> +		fdx_flag++;
> +	case PHY_SUM_STAT_100TX_HD:
> +		if (data->speed != 100) {
> +			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
> +			mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
> +			portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
> +			data->speed = 100;
> +			reg_update++;
> +		}
> +		break;
> +
> +	case PHY_SUM_STAT_10T_FD:
> +		fdx_flag++;
> +	case PHY_SUM_STAT_10T_HD:
> +		if (data->speed != 10) {
> +			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
> +			mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
> +			portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
> +			data->speed = 10;
> +			reg_update++;
> +		}
> +		break;
> +
> +	default:
> +		if (net_ratelimit())
> +			printk(KERN_ERR "PHY reported invalid speed,"
> +			       KERN_ERR " summary status %x\n",
> +			       sumstat);
> +		goto out;
> +	}
> +
> +	if (fdx_flag || (sumstat & PHY_SUM_STAT_FULLDUPLEX)) {
> +		if (data->duplex != 2) {
> +			mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
> +			portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
> +			data->duplex = 2;
> +			reg_update++;
> +		}
> +	} else {
> +		if (data->duplex != 1) {
> +			mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
> +			portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
> +			data->duplex = 1;
> +			reg_update++;
> +		}
> +	}
> +
> +	if (reg_update) {
> +		TSI108_ETH_WRITE_REG(TSI108_MAC_CFG2, mac_cfg2_reg);
> +		TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL, portctrl_reg);
> +		
> +	}
> +
> +	if (data->link_up == 0) {
> +		/* The manual says it can take 3-4 usecs for the speed change
> +		 * to take effect.
> +		 */
> +		udelay(5);
> +
> +		spin_lock(&data->txlock);
> +		if (netif_queue_stopped(dev)
> +		    && is_valid_ether_addr(dev->dev_addr) && data->txfree)
> +			netif_wake_queue(dev);
> +
> +		data->link_up = 1;
> +		spin_unlock(&data->txlock);
> +		printk("%s : link is up: %dMb %s-duplex\n",
> +		       dev->name, data->speed,
> +		       (data->duplex == 2) ? "full" : "half");
> +		netif_carrier_on(dev);
> +	}
> +
> +      out:
> +	spin_unlock(&phy_lock);
> +}
> +
> +static inline void
> +tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
> +		      unsigned long *upper)
> +{
> +	if (carry & carry_bit)
> +		*upper += carry_shift;
> +}
> +
> +static void tsi108_stat_carry(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	u32 carry1, carry2;
> +
> +	spin_lock_irq(&data->misclock);
> +
> +	carry1 = TSI108_ETH_READ_REG(TSI108_STAT_CARRY1);
> +	carry2 = TSI108_ETH_READ_REG(TSI108_STAT_CARRY2);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRY1, carry1);
> +	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRY2, carry2);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
> +			      TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
> +			      TSI108_STAT_RXPKTS_CARRY,
> +			      &data->stats.rx_packets);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
> +			      TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
> +			      TSI108_STAT_RXMCAST_CARRY,
> +			      &data->stats.multicast);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
> +			      TSI108_STAT_RXALIGN_CARRY,
> +			      &data->stats.rx_frame_errors);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
> +			      TSI108_STAT_RXLENGTH_CARRY,
> +			      &data->stats.rx_length_errors);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
> +			      TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
> +			      TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
> +			      TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
> +			      TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
> +
> +	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
> +			      TSI108_STAT_RXDROP_CARRY,
> +			      &data->stats.rx_missed_errors);
> +
> +	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
> +			      TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
> +
> +	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
> +			      TSI108_STAT_TXPKTS_CARRY,
> +			      &data->stats.tx_packets);
> +
> +	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
> +			      TSI108_STAT_TXEXDEF_CARRY,
> +			      &data->stats.tx_aborted_errors);
> +
> +	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
> +			      TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
> +
> +	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
> +			      TSI108_STAT_TXTCOL_CARRY,
> +			      &data->stats.collisions);
> +
> +	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
> +			      TSI108_STAT_TXPAUSEDROP_CARRY,
> +			      &data->tx_pause_drop);
> +
> +	spin_unlock_irq(&data->misclock);
> +}
> +
> +/* Read a stat counter atomically with respect to carries.
> + * data->misclock must be held.
> + */
> +static inline unsigned long
> +tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
> +		 int carry_shift, unsigned long *upper)
> +{
> +	int carryreg;
> +	unsigned long val;
> +
> +	if (reg < 0xb0)
> +		carryreg = TSI108_STAT_CARRY1;
> +	else
> +		carryreg = TSI108_STAT_CARRY2;
> +
> +      again:
> +	val = TSI108_ETH_READ_REG(reg) | *upper;
> +
> +	/* Check to see if it overflowed, but the interrupt hasn't
> +	 * been serviced yet.  If so, handle the carry here, and
> +	 * try again.
> +	 */
> +
> +	if (unlikely(TSI108_ETH_READ_REG(carryreg) & carry_bit)) {
> +		*upper += carry_shift;
> +		TSI108_ETH_WRITE_REG(carryreg, carry_bit);
> +		goto again;
> +	}
> +
> +	return val;
> +}
> +
> +static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
> +{
> +	unsigned long excol;
> +
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	spin_lock_irq(&data->misclock);
> +
> +	data->tmpstats.rx_packets =
> +	    tsi108_read_stat(data, TSI108_STAT_RXPKTS,
> +			     TSI108_STAT_CARRY1_RXPKTS,
> +			     TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
> +
> +	data->tmpstats.tx_packets =
> +	    tsi108_read_stat(data, TSI108_STAT_TXPKTS,
> +			     TSI108_STAT_CARRY2_TXPKTS,
> +			     TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
> +
> +	data->tmpstats.rx_bytes =
> +	    tsi108_read_stat(data, TSI108_STAT_RXBYTES,
> +			     TSI108_STAT_CARRY1_RXBYTES,
> +			     TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
> +
> +	data->tmpstats.tx_bytes =
> +	    tsi108_read_stat(data, TSI108_STAT_TXBYTES,
> +			     TSI108_STAT_CARRY2_TXBYTES,
> +			     TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
> +
> +	data->tmpstats.multicast =
> +	    tsi108_read_stat(data, TSI108_STAT_RXMCAST,
> +			     TSI108_STAT_CARRY1_RXMCAST,
> +			     TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
> +
> +	excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
> +				 TSI108_STAT_CARRY2_TXEXCOL,
> +				 TSI108_STAT_TXEXCOL_CARRY,
> +				 &data->tx_coll_abort);
> +
> +	data->tmpstats.collisions =
> +	    tsi108_read_stat(data, TSI108_STAT_TXTCOL,
> +			     TSI108_STAT_CARRY2_TXTCOL,
> +			     TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
> +
> +	data->tmpstats.collisions += excol;
> +
> +	data->tmpstats.rx_length_errors =
> +	    tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
> +			     TSI108_STAT_CARRY1_RXLENGTH,
> +			     TSI108_STAT_RXLENGTH_CARRY,
> +			     &data->stats.rx_length_errors);
> +
> +	data->tmpstats.rx_length_errors +=
> +	    tsi108_read_stat(data, TSI108_STAT_RXRUNT,
> +			     TSI108_STAT_CARRY1_RXRUNT,
> +			     TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
> +
> +	data->tmpstats.rx_length_errors +=
> +	    tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
> +			     TSI108_STAT_CARRY1_RXJUMBO,
> +			     TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
> +
> +	data->tmpstats.rx_frame_errors =
> +	    tsi108_read_stat(data, TSI108_STAT_RXALIGN,
> +			     TSI108_STAT_CARRY1_RXALIGN,
> +			     TSI108_STAT_RXALIGN_CARRY,
> +			     &data->stats.rx_frame_errors);
> +
> +	data->tmpstats.rx_frame_errors +=
> +	    tsi108_read_stat(data, TSI108_STAT_RXFCS,
> +			     TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
> +			     &data->rx_fcs);
> +
> +	data->tmpstats.rx_frame_errors +=
> +	    tsi108_read_stat(data, TSI108_STAT_RXFRAG,
> +			     TSI108_STAT_CARRY1_RXFRAG,
> +			     TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
> +
> +	data->tmpstats.rx_missed_errors =
> +	    tsi108_read_stat(data, TSI108_STAT_RXDROP,
> +			     TSI108_STAT_CARRY1_RXDROP,
> +			     TSI108_STAT_RXDROP_CARRY,
> +			     &data->stats.rx_missed_errors);
> +
> +	/* These three are maintained by software. */
> +	data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
> +	data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
> +
> +	data->tmpstats.tx_aborted_errors =
> +	    tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
> +			     TSI108_STAT_CARRY2_TXEXDEF,
> +			     TSI108_STAT_TXEXDEF_CARRY,
> +			     &data->stats.tx_aborted_errors);
> +
> +	data->tmpstats.tx_aborted_errors +=
> +	    tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
> +			     TSI108_STAT_CARRY2_TXPAUSE,
> +			     TSI108_STAT_TXPAUSEDROP_CARRY,
> +			     &data->tx_pause_drop);
> +
> +	data->tmpstats.tx_aborted_errors += excol;
> +
> +	data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
> +	data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
> +	    data->tmpstats.rx_crc_errors +
> +	    data->tmpstats.rx_frame_errors +
> +	    data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
> +
> +	spin_unlock_irq(&data->misclock);
> +	return &data->tmpstats;
> +}
> +
> +static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
> +{
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_PTRHIGH,
> +			     TSI108_EC_RXQ_PTRHIGH_VALID);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
> +			     | TSI108_EC_RXCTRL_QUEUE0);
> +}
> +
> +static void tsi108_restart_tx(struct tsi108_prv_data * data)
> +{
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_PTRHIGH,
> +			     TSI108_EC_TXQ_PTRHIGH_VALID);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
> +			     TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
> +}
> +
> +/* txlock must be held by caller, with IRQs disabled, and
> + * with permission to re-enable them when the lock is dropped.
> + */
> +static void tsi108_check_for_completed_tx(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	int tx;
> +	struct sk_buff *skb;
> +	int release = 0;
> +
> +	while (!data->txfree || data->txhead != data->txtail) {
> +		tx = data->txtail;
> +
> +		if (data->txring[tx].misc & TSI108_TX_OWN)
> +			break;
> +
> +		skb = data->txskbs[tx];
> +
> +		if (!(data->txring[tx].misc & TSI108_TX_OK))
> +			printk("%s: bad tx packet, misc %x\n",
> +			       dev->name, data->txring[tx].misc);
> +
> +		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
> +		data->txfree++;
> +
> +		if (data->txring[tx].misc & TSI108_TX_EOF) {
> +			dev_kfree_skb_any(skb);
> +			release++;
> +		}
> +	}
> +
> +	if (release) {
> +		if (netif_queue_stopped(dev)
> +		    && is_valid_ether_addr(dev->dev_addr) && data->link_up)
> +			netif_wake_queue(dev);

this is a screwup of mine, copied to many other net drivers.

you don't need to check netif_queue_stopped() before calling 
netif_wake_queue()


> +static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	int frags = skb_shinfo(skb)->nr_frags + 1;
> +	int i;
> +
> +	if (!data->phy_ok && net_ratelimit())
> +		printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
> +
> +	if (!data->link_up) {
> +		printk(KERN_ERR "%s: Transmit while link is down!\n",
> +		       dev->name);
> +		netif_stop_queue(dev);
> +		return 1;
> +	}

use proper NETDEV_TX_xxx return codes


> +	if (data->txfree < MAX_SKB_FRAGS + 1) {
> +		netif_stop_queue(dev);
> +
> +		if (net_ratelimit())
> +			printk(KERN_ERR "%s: Transmit with full tx ring!\n",
> +			       dev->name);
> +		return 1;
> +	}
> +
> +	if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
> +		netif_stop_queue(dev);
> +	}
> +
> +	spin_lock_irq(&data->txlock);
> +
> +	for (i = 0; i < frags; i++) {
> +		int misc = 0;
> +		int tx = data->txhead;
> +
> +		/* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
> +		 * the interrupt bit.  TX descriptor-complete interrupts are
> +		 * enabled when the queue fills up, and masked when there is
> +		 * still free space.  This way, when saturating the outbound
> +		 * link, the tx interrupts are kept to a reasonable level. 
> +		 * When the queue is not full, reclamation of skbs still occurs
> +		 * as new packets are transmitted, or on a queue-empty
> +		 * interrupt.
> +		 */
> +
> +		if ((tx % TSI108_TX_INT_FREQ == 0) &&
> +		    ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
> +			misc = TSI108_TX_INT;
> +
> +		data->txskbs[tx] = skb;
> +
> +		if (i == 0) {
> +			data->txring[tx].buf0 = virt_to_phys(skb->data);
> +			data->txring[tx].len = skb->len - skb->data_len;
> +			misc |= TSI108_TX_SOF;
> +		} else {
> +			skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
> +
> +			data->txring[tx].buf0 =
> +			    page_to_phys(frag->page) + frag->page_offset;
> +			data->txring[tx].len = frag->size;

use dma mapping, not virt_to_phys

(of course, on some platforms, dma mapping devolves into virt_to_phy 
under the hood)


> +		if (i == frags - 1)
> +			misc |= TSI108_TX_EOF;
> +
> +#ifdef TSI108_PRINT_TX_FRAME
> +		{
> +			int i;
> +			printk("%s: Tx Frame contents (%d)\n", dev->name,
> +			       skb->len);
> +			for (i = 0; i < skb->len; i++)
> +				printk(" %2.2x", skb->data[i]);
> +			printk(".\n");
> +		}
> +#endif				/* TSI108_PRINT_TX_FRAME */


rather than ifdef, test netif_msg_pktdata() at runtime


> +		data->txring[tx].misc = misc | TSI108_TX_OWN;
> +
> +		data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
> +		data->txfree--;
> +	}
> +
> +	tsi108_check_for_completed_tx(dev);
> +
> +	/* This must be done after the check for completed tx descriptors,
> +	 * so that the tail pointer is correct.
> +	 */
> +
> +	if (!(TSI108_ETH_READ_REG(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
> +		tsi108_restart_tx(data);
> +
> +	spin_unlock_irq(&data->txlock);
> +	return 0;
> +}
> +
> +static int tsi108_check_for_completed_rx(struct net_device *dev, int budget)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	int done = 0;
> +
> +	while (data->rxfree && done != budget) {
> +		int rx = data->rxtail;
> +		struct sk_buff *skb;
> +
> +		if (data->rxring[rx].misc & TSI108_RX_OWN)
> +			break;
> +
> +		skb = data->rxskbs[rx];
> +		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
> +		data->rxfree--;
> +		done++;
> +
> +		if (data->rxring[rx].misc & TSI108_RX_BAD) {
> +			spin_lock_irq(&data->misclock);
> +
> +			if (data->rxring[rx].misc & TSI108_RX_CRC)
> +				data->stats.rx_crc_errors++;
> +			if (data->rxring[rx].misc & TSI108_RX_OVER)
> +				data->stats.rx_fifo_errors++;
> +
> +			spin_unlock_irq(&data->misclock);
> +
> +			dev_kfree_skb_any(skb);
> +			continue;
> +		}
> +#ifdef TSI108_PRINT_RX_FRAME
> +		{
> +			int i;
> +			printk("%s: Rx Frame contents (%d)\n",
> +			       dev->name, data->rxring[rx].len);
> +			for (i = 0; i < data->rxring[rx].len; i++)
> +				printk(" %2.2x", skb->data[i]);
> +			printk(".\n");
> +		}
> +#endif				/* TSI108_PRINT_RX_FRAME */

ditto


> +		skb->dev = dev;
> +		skb_put(skb, data->rxring[rx].len);
> +		skb->protocol = eth_type_trans(skb, dev);
> +		netif_receive_skb(skb);
> +		dev->last_rx = jiffies;
> +	}
> +
> +	return done;
> +}
> +
> +static int tsi108_refill_rx(struct net_device *dev, int budget)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	int done = 0;
> +
> +	while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
> +		int rx = data->rxhead;
> +		struct sk_buff *skb;
> +
> +		data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);
> +		if (!skb)
> +			break;
> +
> +		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
> +
> +		data->rxring[rx].buf0 = virt_to_phys(skb->data);

use dma mapping


> +		/* Sometimes the hardware sets blen to zero after packet
> +		 * reception, even though the manual says that it's only ever
> +		 * modified by the driver.
> +		 */
> +
> +		data->rxring[rx].blen = 1536;
> +		data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
> +
> +		data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
> +		data->rxfree++;
> +		done++;
> +	}
> +
> +	if (done != 0 && !(TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
> +			   TSI108_EC_RXSTAT_QUEUE0))
> +		tsi108_restart_rx(data, dev);
> +
> +	return done;
> +}
> +
> +static int tsi108_poll(struct net_device *dev, int *budget)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	u32 estat = TSI108_ETH_READ_REG(TSI108_EC_RXESTAT);
> +	u32 intstat = TSI108_ETH_READ_REG(TSI108_EC_INTSTAT);
> +	int total_budget = min(*budget, dev->quota);
> +	int num_received = 0, num_filled = 0, budget_used;
> +
> +	intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
> +	    TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXESTAT, estat);
> +	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, intstat);
> +
> +	if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
> +		num_received = tsi108_check_for_completed_rx(dev, total_budget);
> +
> +	/* This should normally fill no more slots than the number of
> +	 * packets received in tsi108_check_for_completed_rx().  The exception
> +	 * is when we previously ran out of memory for RX SKBs.  In that
> +	 * case, it's helpful to obey the budget, not only so that the
> +	 * CPU isn't hogged, but so that memory (which may still be low)
> +	 * is not hogged by one device.
> +	 *
> +	 * A work unit is considered to be two SKBs to allow us to catch
> +	 * up when the ring has shrunk due to out-of-memory but we're
> +	 * still removing the full budget's worth of packets each time.
> +	 */
> +
> +	if (data->rxfree < TSI108_RXRING_LEN)
> +		num_filled = tsi108_refill_rx(dev, total_budget * 2);
> +
> +	if (intstat & TSI108_INT_RXERROR) {
> +		u32 err = TSI108_ETH_READ_REG(TSI108_EC_RXERR);
> +		TSI108_ETH_WRITE_REG(TSI108_EC_RXERR, err);
> +
> +		if (err) {
> +			if (net_ratelimit())
> +				printk(KERN_DEBUG "%s: RX error %x\n",
> +				       dev->name, err);
> +
> +			if (!(TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
> +			      TSI108_EC_RXSTAT_QUEUE0))
> +				tsi108_restart_rx(data, dev);
> +		}
> +	}
> +
> +	if (intstat & TSI108_INT_RXOVERRUN) {
> +		spin_lock_irq(&data->misclock);
> +		data->stats.rx_fifo_errors++;
> +		spin_unlock_irq(&data->misclock);
> +	}
> +
> +	budget_used = max(num_received, num_filled / 2);
> +
> +	*budget -= budget_used;
> +	dev->quota -= budget_used;
> +
> +	if (budget_used != total_budget) {
> +		data->rxpending = 0;
> +		netif_rx_complete(dev);
> +
> +		TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
> +				     TSI108_ETH_READ_REG(TSI108_EC_INTMASK)
> +				     & ~(TSI108_INT_RXQUEUE0
> +					 | TSI108_INT_RXTHRESH |
> +					 TSI108_INT_RXOVERRUN |
> +					 TSI108_INT_RXERROR |
> +					 TSI108_INT_RXWAIT));
> +
> +		/* IRQs are level-triggered, so no need to re-check */
> +		return 0;
> +	} else {
> +		data->rxpending = 1;
> +	}
> +
> +	return 1;
> +}
> +
> +static void tsi108_rx_int(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +
> +	/* A race could cause dev to already be scheduled, so it's not an
> +	 * error if that happens (and interrupts shouldn't be re-masked,
> +	 * because that can cause harmful races, if poll has already
> +	 * unmasked them but not cleared LINK_STATE_SCHED).  
> +	 *
> +	 * This can happen if this code races with tsi108_poll(), which masks
> +	 * the interrupts after tsi108_irq_one() read the mask, but before
> +	 * netif_rx_schedule is called.  It could also happen due to calls
> +	 * from tsi108_check_rxring().
> +	 */
> +
> +	if (netif_rx_schedule_prep(dev)) {
> +		/* Mask, rather than ack, the receive interrupts.  The ack
> +		 * will happen in tsi108_poll().
> +		 */
> +
> +		TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
> +				     TSI108_ETH_READ_REG(TSI108_EC_INTMASK) |
> +				     TSI108_INT_RXQUEUE0
> +				     | TSI108_INT_RXTHRESH |
> +				     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
> +				     TSI108_INT_RXWAIT);
> +		__netif_rx_schedule(dev);
> +	} else {
> +		if (!netif_running(dev)) {
> +			/* This can happen if an interrupt occurs while the
> +			 * interface is being brought down, as the START
> +			 * bit is cleared before the stop function is called.
> +			 *
> +			 * In this case, the interrupts must be masked, or
> +			 * they will continue indefinitely.
> +			 *
> +			 * There's a race here if the interface is brought down
> +			 * and then up in rapid succession, as the device could
> +			 * be made running after the above check and before
> +			 * the masking below.  This will only happen if the IRQ
> +			 * thread has a lower priority than the task brining
> +			 * up the interface.  Fixing this race would likely
> +			 * require changes in generic code.
> +			 */
> +
> +			TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
> +					     TSI108_ETH_READ_REG
> +					     (TSI108_EC_INTMASK) |
> +					     TSI108_INT_RXQUEUE0 |
> +					     TSI108_INT_RXTHRESH |
> +					     TSI108_INT_RXOVERRUN |
> +					     TSI108_INT_RXERROR |
> +					     TSI108_INT_RXWAIT);
> +		}
> +	}
> +}
> +
> +/* If the RX ring has run out of memory, try periodically
> + * to allocate some more, as otherwise poll would never
> + * get called (apart from the initial end-of-queue condition).
> + *
> + * This is called once per second (by default) from the thread.
> + */
> +
> +static void tsi108_check_rxring(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +
> +	/* A poll is scheduled, as opposed to caling tsi108_refill_rx 
> +	 * directly, so as to keep the receive path single-threaded
> +	 * (and thus not needing a lock).
> +	 */
> +
> +	if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
> +		tsi108_rx_int(dev);
> +}
> +
> +static void tsi108_tx_int(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	u32 estat = TSI108_ETH_READ_REG(TSI108_EC_TXESTAT);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXESTAT, estat);
> +	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
> +			     TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
> +	if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
> +		u32 err = TSI108_ETH_READ_REG(TSI108_EC_TXERR);
> +		TSI108_ETH_WRITE_REG(TSI108_EC_TXERR, err);
> +
> +		if (err && net_ratelimit())
> +			printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
> +	}
> +
> +	if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
> +		spin_lock(&data->txlock);
> +		tsi108_check_for_completed_tx(dev);
> +		spin_unlock(&data->txlock);

function names like tsi108_check_for_completed_tx() are just way too 
long.  tsi_complete_tx() would be better.


> +static irqreturn_t tsi108_irq_one(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	u32 stat = TSI108_ETH_READ_REG(TSI108_EC_INTSTAT);
> +
> +	if (!(stat & TSI108_INT_ANY))
> +		return IRQ_NONE;	/* Not our interrupt */
> +
> +	stat &= ~TSI108_ETH_READ_REG(TSI108_EC_INTMASK);
> +
> +	if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
> +		    TSI108_INT_TXERROR))
> +		tsi108_tx_int(dev);
> +	if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
> +		    TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
> +		    TSI108_INT_RXERROR))
> +		tsi108_rx_int(dev);
> +
> +	if (stat & TSI108_INT_SFN) {
> +		if (net_ratelimit())
> +			printk(KERN_DEBUG "%s: SFN error\n", dev->name);
> +		TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, TSI108_INT_SFN);
> +	}
> +
> +	if (stat & TSI108_INT_STATCARRY) {
> +		tsi108_stat_carry(dev);
> +		TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
> +	}
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static irqreturn_t tsi108_irq(int irq, void *dev_id, struct pt_regs *regs)
> +{
> +
> +	return tsi108_irq_one(dev_id);

why create a separate function just for this one line?


> +static void tsi108_stop_ethernet(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +
> +	/* Disable all TX and RX queues ... */
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXCTRL, 0);
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXCTRL, 0);
> +
> +	/* ...and wait for them to become idle */
> +	while (TSI108_ETH_READ_REG(TSI108_EC_TXSTAT) &
> +	       TSI108_EC_TXSTAT_ACTIVE) 
> +		cpu_relax();
> +	while (TSI108_ETH_READ_REG(TSI108_EC_RXSTAT) &
> +	       TSI108_EC_RXSTAT_ACTIVE)
> +		cpu_relax();

infinite loops with no timeout


> +static void tsi108_reset_ether(struct tsi108_prv_data * data)
> +{
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
> +	udelay(100);
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, 0);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
> +	udelay(100);
> +	TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL,
> +			     TSI108_ETH_READ_REG(TSI108_EC_PORTCTRL) &
> +			     ~TSI108_EC_PORTCTRL_STATRST);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
> +	udelay(100);
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXCFG,
> +			     TSI108_ETH_READ_REG(TSI108_EC_TXCFG) &
> +			     ~TSI108_EC_TXCFG_RST);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
> +	udelay(100);
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG,
> +			     TSI108_ETH_READ_REG(TSI108_EC_RXCFG) &
> +			     ~TSI108_EC_RXCFG_RST);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_MGMT_CFG,
> +			     TSI108_ETH_READ_REG(TSI108_MAC_MII_MGMT_CFG) |
> +			     TSI108_MAC_MII_MGMT_RST);
> +	udelay(100);
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_MGMT_CFG,
> +			     TSI108_ETH_READ_REG(TSI108_MAC_MII_MGMT_CFG) &
> +			     ~(TSI108_MAC_MII_MGMT_RST |
> +			       TSI108_MAC_MII_MGMT_CLK));
> +
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_MII_MGMT_CFG,
> +			     TSI108_ETH_READ_REG(TSI108_MAC_MII_MGMT_CFG) |
> +			     TSI108_MAC_MII_MGMT_CLK);

bus write posting bugs?


> +static int tsi108_get_mac(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	u32 word1 = TSI108_ETH_READ_REG(TSI108_MAC_ADDR1);
> +	u32 word2 = TSI108_ETH_READ_REG(TSI108_MAC_ADDR2);
> +
> +	/* Note that the octets are reversed from what the manual says,
> +	 * producing an even weirder ordering...
> +	 */
> +	if (word2 == 0 && word1 == 0) {
> +		dev->dev_addr[0] = 0x00;
> +		dev->dev_addr[1] = 0x06;
> +		dev->dev_addr[2] = 0xd2;
> +		dev->dev_addr[3] = 0x00;
> +		dev->dev_addr[4] = 0x00;
> +		if (0x8 == data->phy)
> +			dev->dev_addr[5] = 0x01;
> +		else
> +			dev->dev_addr[5] = 0x02;
> +
> +		word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
> +
> +		word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
> +		    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
> +
> +		TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR1, word1);
> +		TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR2, word2);
> +	} else {
> +		dev->dev_addr[0] = (word2 >> 16) & 0xff;
> +		dev->dev_addr[1] = (word2 >> 24) & 0xff;
> +		dev->dev_addr[2] = (word1 >> 0) & 0xff;
> +		dev->dev_addr[3] = (word1 >> 8) & 0xff;
> +		dev->dev_addr[4] = (word1 >> 16) & 0xff;
> +		dev->dev_addr[5] = (word1 >> 24) & 0xff;
> +	}
> +
> +	if (!is_valid_ether_addr(dev->dev_addr)) {
> +		printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int tsi108_set_mac(struct net_device *dev, void *addr)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	u32 word1, word2;
> +	int i;
> +
> +	if (!is_valid_ether_addr(addr))
> +		return -EINVAL;
> +
> +	for (i = 0; i < 6; i++)
> +		/* +2 is for the offset of the HW addr type */
> +		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
> +
> +	word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
> +
> +	word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
> +	    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
> +
> +	spin_lock_irq(&data->misclock);
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR1, word1);
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_ADDR2, word2);
> +	spin_lock(&data->txlock);
> +
> +	if (netif_queue_stopped(dev) && data->txfree && data->link_up)
> +		netif_wake_queue(dev);

no need to test netif_queue_stopped()


> +	spin_unlock(&data->txlock);
> +	spin_unlock_irq(&data->misclock);
> +	return 0;
> +}
> +
> +/* Protected by dev->xmit_lock. */
> +static void tsi108_set_rx_mode(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	u32 rxcfg = TSI108_ETH_READ_REG(TSI108_EC_RXCFG);
> +
> +	if (dev->flags & IFF_PROMISC) {
> +		rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
> +		rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
> +		goto out;
> +	}
> +
> +	rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
> +
> +	if (dev->mc_count) {

need to test for ALLMULTI


> +		int i;
> +		struct dev_mc_list *mc = dev->mc_list;
> +		rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
> +
> +		memset(data->mc_hash, 0, sizeof(data->mc_hash));
> +
> +		while (mc) {
> +			u32 hash, crc;
> +
> +			if (mc->dmi_addrlen == 6) {
> +				crc = ether_crc(6, mc->dmi_addr);
> +				hash = crc >> 23;
> +
> +				__set_bit(hash, &data->mc_hash[0]);
> +			} else {
> +				printk(KERN_ERR
> +				       "%s: got multicast address of length %d "
> +				       "instead of 6.\n", dev->name,
> +				       mc->dmi_addrlen);
> +			}
> +
> +			mc = mc->next;
> +		}
> +
> +		TSI108_ETH_WRITE_REG(TSI108_EC_HASHADDR,
> +				     TSI108_EC_HASHADDR_AUTOINC |
> +				     TSI108_EC_HASHADDR_MCAST);
> +
> +		for (i = 0; i < 16; i++) {
> +			/* The manual says that the hardware may drop
> +			 * back-to-back writes to the data register.
> +			 */
> +			udelay(1);
> +			TSI108_ETH_WRITE_REG(TSI108_EC_HASHDATA,
> +					     data->mc_hash[i]);
> +		}
> +	}
> +
> +      out:
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG, rxcfg);
> +}
> +
> +static void tsi108_init_phy(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	u32 i = 0;
> +	u16 phyVal = 0;
> +
> +	spin_lock_irq(&phy_lock);
> +
> +	tsi108_write_mii(data, PHY_CTRL, PHY_CTRL_RESET);
> +	while (tsi108_read_mii(data, PHY_CTRL, NULL) & PHY_CTRL_RESET)
> +		cpu_relax();

infinite loop with no timeout

> +#if (TSI108_PHY_TYPE == PHY_BCM54XX)	/* Broadcom BCM54xx PHY */
> +	tsi108_write_mii(data, 0x09, 0x0300);
> +	tsi108_write_mii(data, 0x10, 0x1020);
> +	tsi108_write_mii(data, 0x1c, 0x8c00);
> +#endif
> +
> +	tsi108_write_mii(data,
> +			 PHY_CTRL,
> +			 PHY_CTRL_AUTONEG_EN | PHY_CTRL_AUTONEG_START);
> +	while (tsi108_read_mii(data, PHY_CTRL, NULL) & PHY_CTRL_AUTONEG_START)
> +		cpu_relax();

ditto


> +	/* Set G/MII mode and receive clock select in TBI control #2.  The
> +	 * second port won't work if this isn't done, even though we don't
> +	 * use TBI mode.
> +	 */
> +
> +	tsi108_write_tbi(data, 0x11, 0x30);
> +
> +	/* FIXME: It seems to take more than 2 back-to-back reads to the
> +	 * PHY_STAT register before the link up status bit is set.
> +	 */
> +
> +	data->link_up = 1;
> +
> +	while (!((phyVal = tsi108_read_mii(data, PHY_STAT, NULL)) &
> +		 PHY_STAT_LINKUP)) {
> +		if (i++ > (MII_READ_DELAY / 10)) {
> +			data->link_up = 0;
> +			break;
> +		}
> +		spin_unlock_irq(&phy_lock);
> +		msleep(10);
> +		spin_lock_irq(&phy_lock);
> +	}
> +
> +	printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyVal);
> +	data->phy_ok = 1;
> +	spin_unlock_irq(&phy_lock);
> +}
> +
> +static void tsi108_kill_phy(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +
> +	spin_lock_irq(&phy_lock);
> +	tsi108_write_mii(data, PHY_CTRL, PHY_CTRL_POWERDOWN);
> +	data->phy_ok = 0;
> +	spin_unlock_irq(&phy_lock);
> +}
> +
> +static int tsi108_open(struct net_device *dev)
> +{
> +	int i;
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
> +	unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
> +
> +	printk(KERN_DEBUG "Inside tsi108_open()!\n");

kill debugging printk, or use netif_msg_xxx properly


> +	i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
> +	if (i != 0) {
> +		printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
> +		       data->id, data->irq_num);
> +		return i;
> +	} else {
> +		dev->irq = data->irq_num;
> +		printk(KERN_NOTICE
> +		       "tsi108_open : Port %d Assigned IRQ %d to %s\n",
> +		       data->id, dev->irq, dev->name);
> +	}
> +
> +	data->rxring = pci_alloc_consistent(NULL, rxring_size, &data->rxdma);
> +
> +	if (!data->rxring) {
> +		printk(KERN_DEBUG
> +		       "TSI108_ETH: failed to allocate memory for rxring!\n");
> +		return -ENOMEM;
> +	} else {
> +		memset(data->rxring, 0, rxring_size);
> +	}
> +
> +	data->txring = pci_alloc_consistent(NULL, txring_size, &data->txdma);

since this is a platform driver, you should be using 
dma_alloc_coherent(), passing in the struct device*


> +	if (!data->txring) {
> +		printk(KERN_DEBUG
> +		       "TSI108_ETH: failed to allocate memory for txring!\n");
> +		pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);

ditto


> +		return -ENOMEM;
> +	} else {
> +		memset(data->txring, 0, txring_size);
> +	}
> +
> +	for (i = 0; i < TSI108_RXRING_LEN; i++) {
> +		data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
> +		data->rxring[i].blen = TSI108_RXBUF_SIZE;
> +		data->rxring[i].vlan = 0;
> +	}
> +
> +	data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
> +
> +	data->rxtail = 0;
> +	data->rxhead = 0;
> +
> +	for (i = 0; i < TSI108_RXRING_LEN; i++) {
> +		struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN);
> +
> +		if (!skb) {
> +			/* Bah.  No memory for now, but maybe we'll get
> +			 * some more later.
> +			 * For now, we'll live with the smaller ring.
> +			 */
> +			printk(KERN_WARNING
> +			       "%s: Could only allocate %d receive skb(s).\n",
> +			       dev->name, i);
> +			data->rxhead = i;
> +			break;
> +		}
> +
> +		data->rxskbs[i] = skb;
> +		/* Align the payload on a 4-byte boundary */
> +		skb_reserve(skb, 2);
> +		data->rxskbs[i] = skb;
> +		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);

use dma mapping


> +		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
> +	}
> +
> +	data->rxfree = i;
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_PTRLOW, data->rxdma);
> +
> +	for (i = 0; i < TSI108_TXRING_LEN; i++) {
> +		data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
> +		data->txring[i].misc = 0;
> +	}
> +
> +	data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
> +	data->txtail = 0;
> +	data->txhead = 0;
> +	data->txfree = TSI108_TXRING_LEN;
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_PTRLOW, data->txdma);
> +	tsi108_init_phy(dev);
> +
> +	setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
> +	mod_timer(&data->timer, jiffies + 1);
> +
> +	tsi108_restart_rx(data, dev);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_INTSTAT, ~0);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK,
> +			     ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
> +			       TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
> +			       TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
> +			       TSI108_INT_SFN | TSI108_INT_STATCARRY));
> +
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1,
> +			     TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
> +	netif_start_queue(dev);
> +	return 0;
> +}
> +
> +static int tsi108_close(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +
> +	netif_stop_queue(dev);
> +
> +	del_timer_sync(&data->timer);
> +
> +	printk(KERN_DEBUG "Inside tsi108_ifdown!\n");

kill debugging printk, or use netif_msg_xxx properly


> +	tsi108_stop_ethernet(dev);
> +	tsi108_kill_phy(dev);
> +	TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK, ~0);
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, 0);
> +
> +	/* Check for any pending TX packets, and drop them. */
> +
> +	while (!data->txfree || data->txhead != data->txtail) {
> +		int tx = data->txtail;
> +		struct sk_buff *skb;
> +		skb = data->txskbs[tx];
> +		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
> +		data->txfree++;
> +		dev_kfree_skb(skb);
> +	}
> +
> +	synchronize_irq(data->irq_num);
> +	free_irq(data->irq_num, dev);
> +
> +	/* Discard the RX ring. */
> +
> +	while (data->rxfree) {
> +		int rx = data->rxtail;
> +		struct sk_buff *skb;
> +
> +		skb = data->rxskbs[rx];
> +		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
> +		data->rxfree--;
> +		dev_kfree_skb(skb);
> +	}
> +
> +	pci_free_consistent(0,
> +			    TSI108_RXRING_LEN * sizeof(rx_desc),
> +			    data->rxring, data->rxdma);
> +	pci_free_consistent(0,
> +			    TSI108_TXRING_LEN * sizeof(tx_desc),
> +			    data->txring, data->txdma);

dma_free_coherent


> +	return 0;
> +}
> +
> +static void tsi108_init_mac(struct net_device *dev)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
> +			     TSI108_MAC_CFG2_PADCRC);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXTHRESH,
> +			     (192 << TSI108_EC_TXTHRESH_STARTFILL) |
> +			     (192 << TSI108_EC_TXTHRESH_STOPFILL));
> +
> +	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRYMASK1,
> +			     ~(TSI108_STAT_CARRY1_RXBYTES |
> +			       TSI108_STAT_CARRY1_RXPKTS |
> +			       TSI108_STAT_CARRY1_RXFCS |
> +			       TSI108_STAT_CARRY1_RXMCAST |
> +			       TSI108_STAT_CARRY1_RXALIGN |
> +			       TSI108_STAT_CARRY1_RXLENGTH |
> +			       TSI108_STAT_CARRY1_RXRUNT |
> +			       TSI108_STAT_CARRY1_RXJUMBO |
> +			       TSI108_STAT_CARRY1_RXFRAG |
> +			       TSI108_STAT_CARRY1_RXJABBER |
> +			       TSI108_STAT_CARRY1_RXDROP));
> +
> +	TSI108_ETH_WRITE_REG(TSI108_STAT_CARRYMASK2,
> +			     ~(TSI108_STAT_CARRY2_TXBYTES |
> +			       TSI108_STAT_CARRY2_TXPKTS |
> +			       TSI108_STAT_CARRY2_TXEXDEF |
> +			       TSI108_STAT_CARRY2_TXEXCOL |
> +			       TSI108_STAT_CARRY2_TXTCOL |
> +			       TSI108_STAT_CARRY2_TXPAUSE));
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
> +	TSI108_ETH_WRITE_REG(TSI108_MAC_CFG1, 0);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXCFG,
> +			     TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
> +			     TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
> +			     TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
> +						TSI108_EC_TXQ_CFG_SFNPORT));
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
> +			     TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
> +			     TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
> +						TSI108_EC_RXQ_CFG_SFNPORT));
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_TXQ_BUFCFG,
> +			     TSI108_EC_TXQ_BUFCFG_BURST256 |
> +			     TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
> +						TSI108_EC_TXQ_BUFCFG_SFNPORT));
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_RXQ_BUFCFG,
> +			     TSI108_EC_RXQ_BUFCFG_BURST256 |
> +			     TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
> +						TSI108_EC_RXQ_BUFCFG_SFNPORT));
> +
> +	TSI108_ETH_WRITE_REG(TSI108_EC_INTMASK, ~0);
> +}
> +
> +static int tsi108_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
> +{
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +	struct mii_ioctl_data *mii_data =
> +			(struct mii_ioctl_data *)&rq->ifr_data;
> +	int ret;
> +
> +	switch (cmd) {
> +	case SIOCGMIIPHY:
> +		mii_data->phy_id = data->phy;
> +		ret = 0;
> +		break;
> +
> +	case SIOCGMIIREG:
> +		spin_lock_irq(&phy_lock);
> +		mii_data->val_out =
> +		    tsi108_read_mii(data, mii_data->reg_num, &ret);
> +		spin_unlock_irq(&phy_lock);
> +		break;

use generic_mii_ioctl from drivers/net/mii.c


> +	default:
> +		ret = -EOPNOTSUPP;
> +	}
> +
> +	return ret;
> +}
> +
> +static int
> +tsi108_init_one(struct platform_device *pdev)
> +{
> +	struct net_device *dev = NULL;
> +	struct tsi108_prv_data *data = NULL;
> +	hw_info *einfo;
> +	int ret;
> +	
> +	einfo = pdev->dev.platform_data;
> +	
> +	if (NULL == einfo) {
> +		printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
> +		       pdev->id);
> +		return -ENODEV;
> +	}
> +
> +	/* Create an ethernet device instance */
> +
> +	dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
> +	if (!dev) {
> +		printk("tsi108_eth: Could not allocate a device structure\n");
> +		return -ENOMEM;
> +	}
> +
> +	printk("tsi108_eth%d: probe...\n", pdev->id);
> +	data = netdev_priv(dev);
> +
> +	pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
> +			pdev->id, einfo->regs, einfo->phyregs,
> +			einfo->phy, einfo->irq_num);
> +	
> +	data->regs = ioremap(einfo->regs, 0x400);
> +	data->phyregs = ioremap(einfo->phyregs, 0x400);

check for failure (==NULL)


> +	data->phy = einfo->phy;
> +	data->irq_num = einfo->irq_num;
> +	data->id = pdev->id;
> +	dev->open = tsi108_open;
> +	dev->stop = tsi108_close;
> +	dev->hard_start_xmit = tsi108_send_packet;
> +	dev->set_mac_address = tsi108_set_mac;
> +	dev->set_multicast_list = tsi108_set_rx_mode;
> +	dev->get_stats = tsi108_get_stats;
> +	dev->poll = tsi108_poll;
> +	dev->do_ioctl = tsi108_ioctl;
> +	dev->weight = 64;  /* 64 is more suitable for GigE interface - klai */
> +
> +	/* Apparently, the Linux networking code won't use scatter-gather
> +	 * if the hardware doesn't do checksums.  However, it's faster
> +	 * to checksum in place and use SG, as (among other reasons)
> +	 * the cache won't be dirtied (which then has to be flushed
> +	 * before DMA).  The checksumming is done by the driver (via
> +	 * a new function skb_csum_dev() in net/core/skbuff.c).
> +	 */
> +
> +	dev->features = NETIF_F_HIGHDMA;
> +	SET_MODULE_OWNER(dev);
> +
> +	spin_lock_init(&data->txlock);
> +	spin_lock_init(&data->misclock);
> +
> +	tsi108_reset_ether(data);
> +	tsi108_kill_phy(dev);
> +
> +	if (tsi108_get_mac(dev) != 0)
> +		printk(KERN_ERR "%s: Invalid MAC address.  Please correct.\n",
> +		       dev->name);

handle failure


> +	tsi108_init_mac(dev);
> +	ret = register_netdev(dev);
> +	if (ret < 0) {
> +		free_netdev(dev);
> +		return ret;

leak:  need to iounmap on error


> +	}
> +
> +	printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: "
> +	       "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
> +	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
> +	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
> +#ifdef DEBUG
> +	dump_eth_one(dev);
> +#endif
> +
> +	return 0;
> +}
> +
> +/* There's no way to either get interrupts from the PHY when
> + * something changes, or to have the Tsi108 automatically communicate
> + * with the PHY to reconfigure itself.
> + *
> + * Thus, we have to do it using a timer. 
> + */
> +
> +static void tsi108_timed_checker(unsigned long dev_ptr)
> +{
> +	struct net_device *dev = (struct net_device *)dev_ptr;
> +	struct tsi108_prv_data *data = netdev_priv(dev);
> +
> +	tsi108_check_phy(dev);
> +	tsi108_check_rxring(dev);
> +	mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
> +}
> +
> +static int tsi108_ether_init(void)
> +{
> +	int ret;
> +	ret = platform_driver_register (&tsi_eth_driver);
> +	if (ret < 0){
> +		printk("tsi108_ether_init: error initializing ethernet "
> +		       "device\n");
> +		return ret;
> +	}
> +	return 0;
> +}
> +
> +static int tsi108_ether_remove(struct platform_device *pdev)
> +{
> +	struct net_device *dev = platform_get_drvdata(pdev);
> +	struct tsi108_prv_data *priv = netdev_priv(dev);
> +	
> +	unregister_netdev(dev);
> +	tsi108_stop_ethernet(dev);
> +	platform_set_drvdata(pdev, NULL);
> +	iounmap((void __iomem *)priv->regs);
> +	iounmap((void __iomem *)priv->phyregs);

delete casts


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-21  4:26           ` Jeff Garzik
@ 2006-09-21  5:46             ` Zang Roy-r61911
  2006-09-21  6:30               ` Jeff Garzik
  0 siblings, 1 reply; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-21  5:46 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: Roland Dreier, Andrew Morton, netdev, linux-kernel

On Thu, 2006-09-21 at 12:26, Jeff Garzik wrote:
> Zang Roy-r61911 wrote:
> > +#define TSI108_ETH_WRITE_REG(offset, val) \
> > +     writel(le32_to_cpu(val),data->regs + (offset))
> > +
> > +#define TSI108_ETH_READ_REG(offset) \
> > +     le32_to_cpu(readl(data->regs + (offset)))
> > +
> > +#define TSI108_ETH_WRITE_PHYREG(offset, val) \
> > +     writel(le32_to_cpu(val), data->phyregs + (offset))
> > +
> > +#define TSI108_ETH_READ_PHYREG(offset) \
> > +     le32_to_cpu(readl(data->phyregs + (offset)))
> 
> 
> NAK:
> 
> 1) writel() and readl() are defined to be little endian.
> 
> If your platform is different, then your platform should have its own 
> foobus_writel() and foobus_readl().

Tsi108 bridge is designed for powerpc platform. Originally, I use
out_be32() and in_be32(). While there is no obvious reason to object
using this bridge in a little endian system. Maybe some extra hardware
logic needed for the bus interface. le32_to_cpu() can  be aware the
endian difference.
Any comment?

> 
> 2) TSI108_ETH_WRITE_REG() is just way too long.  TSI_READ(), 
> TSI_WRITE(), TSI_READ_PHY() and TSI_WRITE_PHY() would be far more
> readable.
> 
> More in next email.
> 
I will modify the name.
Roy



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-21  5:46             ` Zang Roy-r61911
@ 2006-09-21  6:30               ` Jeff Garzik
  0 siblings, 0 replies; 23+ messages in thread
From: Jeff Garzik @ 2006-09-21  6:30 UTC (permalink / raw)
  To: Zang Roy-r61911; +Cc: Roland Dreier, Andrew Morton, netdev, linux-kernel

Zang Roy-r61911 wrote:
> On Thu, 2006-09-21 at 12:26, Jeff Garzik wrote:
>> Zang Roy-r61911 wrote:
>>> +#define TSI108_ETH_WRITE_REG(offset, val) \
>>> +     writel(le32_to_cpu(val),data->regs + (offset))
>>> +
>>> +#define TSI108_ETH_READ_REG(offset) \
>>> +     le32_to_cpu(readl(data->regs + (offset)))
>>> +
>>> +#define TSI108_ETH_WRITE_PHYREG(offset, val) \
>>> +     writel(le32_to_cpu(val), data->phyregs + (offset))
>>> +
>>> +#define TSI108_ETH_READ_PHYREG(offset) \
>>> +     le32_to_cpu(readl(data->phyregs + (offset)))
>>
>> NAK:
>>
>> 1) writel() and readl() are defined to be little endian.
>>
>> If your platform is different, then your platform should have its own 
>> foobus_writel() and foobus_readl().
> 
> Tsi108 bridge is designed for powerpc platform. Originally, I use
> out_be32() and in_be32(). While there is no obvious reason to object
> using this bridge in a little endian system. Maybe some extra hardware
> logic needed for the bus interface. le32_to_cpu() can  be aware the
> endian difference.

To restate, readl() should read a little endian value, and return a 
CPU-endian value.  writel() should receive a CPU-endian value, and write 
a little endian value.

If your platform's readl/writel doesn't do that, it's broken.

That's why normal PCI drivers can use readl() and writel() on either 
big-endian or little-endian machines, without needing to use le32_to_cpu().

	Jeff




^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-21  4:46           ` Jeff Garzik
@ 2006-09-29  7:36             ` Zang Roy-r61911
  2006-09-29  7:47               ` Jeff Garzik
  2006-10-17  7:18             ` Zang Roy-r61911
  2006-10-23  2:09             ` [patch 3/3] Add tsi108 " Zang Roy-r61911
  2 siblings, 1 reply; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-09-29  7:36 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: Andrew Morton, netdev, linux-kernel

On Thu, 2006-09-21 at 12:46, Jeff Garzik wrote:
> Zang Roy-r61911 wrote:
> > +struct tsi108_prv_data {
> > +     void  __iomem *regs;    /* Base of normal regs */
> > +     void  __iomem *phyregs; /* Base of register bank used for PHY
> access */
> > +     
> > +     int phy;                /* Index of PHY for this interface */
> > +     int irq_num;
> > +     int id;
> > +
> > +     struct timer_list timer;/* Timer that triggers the check phy
> function */
> > +     int rxtail;             /* Next entry in rxring to read */
> > +     int rxhead;             /* Next entry in rxring to give a new
> buffer */
> > +     int rxfree;             /* Number of free, allocated RX
> buffers */
> > +
> > +     int rxpending;          /* Non-zero if there are still
> descriptors
> > +                              * to be processed from a previous
> descriptor
> > +                              * interrupt condition that has been
> cleared */
> > +
> > +     int txtail;             /* Next TX descriptor to check status
> on */
> > +     int txhead;             /* Next TX descriptor to use */
> 
> most of these should be unsigned, to prevent bugs.
> 
> 
> > +     /* Number of free TX descriptors.  This could be calculated
> from
> > +      * rxhead and rxtail if one descriptor were left unused to
> disambiguate
> > +      * full and empty conditions, but it's simpler to just keep
> track
> > +      * explicitly. */
> > +
> > +     int txfree;
> > +
> > +     int phy_ok;             /* The PHY is currently powered on. */
> > +
> > +     /* PHY status (duplex is 1 for half, 2 for full,
> > +      * so that the default 0 indicates that neither has
> > +      * yet been configured). */
> > +
> > +     int link_up;
> > +     int speed;
> > +     int duplex;
> > +
> > +     tx_desc *txring;
> > +     rx_desc *rxring;
> > +     struct sk_buff *txskbs[TSI108_TXRING_LEN];
> > +     struct sk_buff *rxskbs[TSI108_RXRING_LEN];
> > +
> > +     dma_addr_t txdma, rxdma;
> > +
> > +     /* txlock nests in misclock and phy_lock */
> > +
> > +     spinlock_t txlock, misclock;
> > +
> > +     /* stats is used to hold the upper bits of each hardware
> counter,
> > +      * and tmpstats is used to hold the full values for returning
> > +      * to the caller of get_stats().  They must be separate in
> case
> > +      * an overflow interrupt occurs before the stats are consumed.
> > +      */
> > +
> > +     struct net_device_stats stats;
> > +     struct net_device_stats tmpstats;
> > +
> > +     /* These stats are kept separate in hardware, thus require
> individual
> > +      * fields for handling carry.  They are combined in get_stats.
> > +      */
> > +
> > +     unsigned long rx_fcs;   /* Add to rx_frame_errors */
> > +     unsigned long rx_short_fcs;     /* Add to rx_frame_errors */
> > +     unsigned long rx_long_fcs;      /* Add to rx_frame_errors */
> > +     unsigned long rx_underruns;     /* Add to rx_length_errors */
> > +     unsigned long rx_overruns;      /* Add to rx_length_errors */
> > +
> > +     unsigned long tx_coll_abort;    /* Add to
> tx_aborted_errors/collisions */
> > +     unsigned long tx_pause_drop;    /* Add to tx_aborted_errors */
> > +
> > +     unsigned long mc_hash[16];
> > +};
> > +
> > +/* Structure for a device driver */
> > +
> > +static struct platform_driver tsi_eth_driver = {
> > +     .probe = tsi108_init_one,
> > +     .remove = tsi108_ether_remove,
> > +     .driver = {
> > +             .name = "tsi-ethernet",
> > +     },
> > +};
> > +
> > +static void tsi108_timed_checker(unsigned long dev_ptr);
> > +
> > +static void dump_eth_one(struct net_device *dev)
> > +{
> > +     struct tsi108_prv_data *data = netdev_priv(dev);
> > +
> > +     printk("Dumping %s...\n", dev->name);
> > +     printk("intstat %x intmask %x phy_ok %d"
> > +            " link %d speed %d duplex %d\n",
> > +            TSI108_ETH_READ_REG(TSI108_EC_INTSTAT),
> > +            TSI108_ETH_READ_REG(TSI108_EC_INTMASK), data->phy_ok,
> > +            data->link_up, data->speed, data->duplex);
> > +
> > +     printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err
> %x\n",
> > +            data->txhead, data->txtail, data->txfree,
> > +            TSI108_ETH_READ_REG(TSI108_EC_TXSTAT),
> > +            TSI108_ETH_READ_REG(TSI108_EC_TXESTAT),
> > +            TSI108_ETH_READ_REG(TSI108_EC_TXERR));
> > +
> > +     printk("RX: head %d, tail %d, free %d, stat %x,"
> > +            " estat %x, err %x, pending %d\n\n",
> > +            data->rxhead, data->rxtail, data->rxfree,
> > +            TSI108_ETH_READ_REG(TSI108_EC_RXSTAT),
> > +            TSI108_ETH_READ_REG(TSI108_EC_RXESTAT),
> > +            TSI108_ETH_READ_REG(TSI108_EC_RXERR), data->rxpending);
> > +}
> > +
> > +/* Synchronization is needed between the thread and up/down events.
> > + * Note that the PHY is accessed through the same registers for
> both
> > + * interfaces, so this can't be made interface-specific.
> > + */
> > +
> > +static DEFINE_SPINLOCK(phy_lock);
> 
> you should have a chip structure, that contains two structs (one for 
> each interface/port)
> 
> 
Could you interpret the chip structure in more detail?
Need I create two net_device struct for each port?
Thanks.
Roy


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-29  7:36             ` Zang Roy-r61911
@ 2006-09-29  7:47               ` Jeff Garzik
  0 siblings, 0 replies; 23+ messages in thread
From: Jeff Garzik @ 2006-09-29  7:47 UTC (permalink / raw)
  To: Zang Roy-r61911; +Cc: Andrew Morton, netdev, linux-kernel

Zang Roy-r61911 wrote:
> Could you interpret the chip structure in more detail?
> Need I create two net_device struct for each port?

No.  One net_device per port.  And one container structure for the 
entire device.

	Jeff



^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-21  4:46           ` Jeff Garzik
  2006-09-29  7:36             ` Zang Roy-r61911
@ 2006-10-17  7:18             ` Zang Roy-r61911
  2006-10-30  5:19               ` Add tsi108/9 " Zang Roy-r61911
  2006-10-23  2:09             ` [patch 3/3] Add tsi108 " Zang Roy-r61911
  2 siblings, 1 reply; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-10-17  7:18 UTC (permalink / raw)
  To: Jeff Garzik
  Cc: Roland Dreier, Andrew Morton, netdev, linux-kernel, Alexandre.Bounine

On Thu, 2006-09-21 at 12:46, Jeff Garzik wrote:
> > +
> > +/* Synchronization is needed between the thread and up/down events.
> > + * Note that the PHY is accessed through the same registers for
> both
> > + * interfaces, so this can't be made interface-specific.
> > + */
> > +
> > +static DEFINE_SPINLOCK(phy_lock);
> 
> you should have a chip structure, that contains two structs (one for 
> each interface/port)
> 
> 
Do you mean you want to  see this spinlock within dynamically allocated
data structure, and not as a global variable?
I am not sure :-). 

All your other comments have been addressed. The following is the
updated tsi108 Ethernet port driver.
Any comment?


Signed-off-by: Alexandre Bounine<alexandreb@tundra.com>
Signed-off-by: Roy Zang	<tie-fei.zang@freescale.com> 

diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
new file mode 100644
index 0000000..6c3146d
--- /dev/null
+++ b/drivers/net/tsi108_eth.c
@@ -0,0 +1,1709 @@
+/*******************************************************************************
+  
+  Copyright(c) 2006 Tundra Semiconductor Corporation.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+*******************************************************************************/
+
+/* This driver is based on the driver code originally developed	
+ * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
+ * scott.wood@timesys.com  * Copyright (C) 2003 TimeSys Corporation
+ *
+ * Currently changes from original version are:
+ * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
+ * - modifications to handle two ports independently and support for
+ *   additional PHY devices (alexandre.bounine@tundra.com)
+ * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/tsi108.h>
+
+#include "tsi108_eth.h"
+
+#define MII_READ_DELAY 10000	/* max link wait time in msec */
+
+#define TSI108_RXRING_LEN     256
+
+/* NOTE: The driver currently does not support receiving packets
+ * larger than the buffer size, so don't decrease this (unless you
+ * want to add such support).
+ */
+#define TSI108_RXBUF_SIZE     1536
+
+#define TSI108_TXRING_LEN     256
+
+#define TSI108_TX_INT_FREQ    64
+
+/* Check the phy status every half a second. */
+#define CHECK_PHY_INTERVAL (HZ/2)
+
+static int tsi108_init_one(struct platform_device *pdev);
+static int tsi108_ether_remove(struct platform_device *pdev);
+
+struct tsi108_prv_data {
+	void  __iomem *regs;	/* Base of normal regs */
+	void  __iomem *phyregs;	/* Base of register bank used for PHY access */
+	
+	unsigned int phy;		/* Index of PHY for this interface */
+	unsigned int irq_num;
+	unsigned int id;
+
+	struct timer_list timer;/* Timer that triggers the check phy function */
+	unsigned int rxtail;	/* Next entry in rxring to read */
+	unsigned int rxhead;	/* Next entry in rxring to give a new buffer */
+	unsigned int rxfree;	/* Number of free, allocated RX buffers */
+
+	unsigned int rxpending;	/* Non-zero if there are still descriptors
+				 * to be processed from a previous descriptor
+				 * interrupt condition that has been cleared */
+
+	unsigned int txtail;	/* Next TX descriptor to check status on */
+	unsigned int txhead;	/* Next TX descriptor to use */
+
+	/* Number of free TX descriptors.  This could be calculated from
+	 * rxhead and rxtail if one descriptor were left unused to disambiguate
+	 * full and empty conditions, but it's simpler to just keep track
+	 * explicitly. */
+
+	unsigned int txfree;
+
+	unsigned int phy_ok;		/* The PHY is currently powered on. */
+
+	/* PHY status (duplex is 1 for half, 2 for full,
+	 * so that the default 0 indicates that neither has
+	 * yet been configured). */
+
+	unsigned int link_up;
+	unsigned int speed;
+	unsigned int duplex;
+
+	tx_desc *txring;
+	rx_desc *rxring;
+	struct sk_buff *txskbs[TSI108_TXRING_LEN];
+	struct sk_buff *rxskbs[TSI108_RXRING_LEN];
+
+	dma_addr_t txdma, rxdma;
+
+	/* txlock nests in misclock and phy_lock */
+
+	spinlock_t txlock, misclock;
+
+	/* stats is used to hold the upper bits of each hardware counter,
+	 * and tmpstats is used to hold the full values for returning
+	 * to the caller of get_stats().  They must be separate in case
+	 * an overflow interrupt occurs before the stats are consumed.
+	 */
+
+	struct net_device_stats stats;
+	struct net_device_stats tmpstats;
+
+	/* These stats are kept separate in hardware, thus require individual
+	 * fields for handling carry.  They are combined in get_stats.
+	 */
+
+	unsigned long rx_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_short_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_long_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_underruns;	/* Add to rx_length_errors */
+	unsigned long rx_overruns;	/* Add to rx_length_errors */
+
+	unsigned long tx_coll_abort;	/* Add to tx_aborted_errors/collisions */
+	unsigned long tx_pause_drop;	/* Add to tx_aborted_errors */
+
+	unsigned long mc_hash[16];
+	u32 msg_enable;			/* debug message level */
+	struct mii_if_info mii_if;
+	unsigned int init_media;
+};
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+	.probe = tsi108_init_one,
+	.remove = tsi108_ether_remove,
+	.driver	= {
+		.name = "tsi-ethernet",
+	},
+};
+
+static void tsi108_timed_checker(unsigned long dev_ptr);
+
+static void dump_eth_one(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	printk("Dumping %s...\n", dev->name);
+	printk("intstat %x intmask %x phy_ok %d"
+	       " link %d speed %d duplex %d\n",
+	       TSI_READ(TSI108_EC_INTSTAT),
+	       TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
+	       data->link_up, data->speed, data->duplex);
+
+	printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
+	       data->txhead, data->txtail, data->txfree,
+	       TSI_READ(TSI108_EC_TXSTAT),
+	       TSI_READ(TSI108_EC_TXESTAT),
+	       TSI_READ(TSI108_EC_TXERR));
+
+	printk("RX: head %d, tail %d, free %d, stat %x,"
+	       " estat %x, err %x, pending %d\n\n",
+	       data->rxhead, data->rxtail, data->rxfree,
+	       TSI_READ(TSI108_EC_RXSTAT),
+	       TSI_READ(TSI108_EC_RXESTAT),
+	       TSI_READ(TSI108_EC_RXERR), data->rxpending);
+}
+
+/* Synchronization is needed between the thread and up/down events.
+ * Note that the PHY is accessed through the same registers for both
+ * interfaces, so this can't be made interface-specific.
+ */
+
+static DEFINE_SPINLOCK(phy_lock);
+
+static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
+{
+	unsigned i;
+
+	TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
+	TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
+	for (i = 0; i < 100; i++) {
+		if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
+		      (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
+			break;
+		udelay(10);
+	}
+
+	if (i == 100) 
+		return 0xffff;
+	else
+		return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN));
+}
+
+static void tsi108_write_mii(struct tsi108_prv_data *data, 
+				int reg, u16 val)
+{
+	unsigned i = 100;
+	TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
+	while (i--) {
+		if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) & 
+			TSI108_MAC_MII_IND_BUSY))
+			break;
+		udelay(10);
+	}
+}
+
+static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	return tsi108_read_mii(data, reg);
+
+}
+
+static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	tsi108_write_mii(data, reg, val);
+}
+
+static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
+					int reg, u16 val)
+{
+	unsigned i = 1000;
+	TSI_WRITE(TSI108_MAC_MII_ADDR,
+			     (0x1e << TSI108_MAC_MII_ADDR_PHY)
+			     | (reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
+	while(i--) {
+		if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
+			return;
+		udelay(10);
+	}
+	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static int mii_speed(struct mii_if_info *mii)
+{
+	int advert, lpa, val, media;
+	int lpa2 = 0;
+	int speed;
+
+	if (!mii_link_ok(mii))
+		return 0;
+
+	val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
+	if ((val & BMSR_ANEGCOMPLETE) == 0)
+		return 0;
+
+	advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
+	lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
+	media = mii_nway_result(advert & lpa);
+
+	if (mii->supports_gmii)
+		lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
+
+	speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
+			(media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
+	return speed;
+}
+
+static void tsi108_check_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 mac_cfg2_reg, portctrl_reg;
+	u32 duplex;
+	u32 speed;
+	unsigned long flags;
+
+	/* Do a dummy read, as for some reason the first read
+	 * after a link becomes up returns link down, even if
+	 * it's been a while since the link came up.
+	 */
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	if (!data->phy_ok)
+		goto out;
+
+	tsi108_read_mii(data, MII_BMSR);
+
+	duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
+	data->init_media = 0;
+
+	if (netif_carrier_ok(dev)) {
+
+		speed = mii_speed(&data->mii_if);
+
+		if ((speed != data->speed) || duplex) {
+
+			mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
+			portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
+
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+
+			if (speed == 1000) {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
+				portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
+			} else {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+				portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+			}
+
+			data->speed = speed;
+
+			if (data->mii_if.full_duplex) {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
+				portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
+				data->duplex = 2;
+			} else {
+				mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
+				portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
+				data->duplex = 1;
+			}
+
+			TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
+			TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
+
+			if (data->link_up == 0) {
+				/* The manual says it can take 3-4 usecs for the speed change
+				 * to take effect.
+				 */
+				udelay(5);
+
+				spin_lock(&data->txlock);
+				if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
+					netif_wake_queue(dev);
+
+				data->link_up = 1;
+				spin_unlock(&data->txlock);
+			}
+		}
+
+	} else {
+		if (data->link_up == 1) {
+			netif_stop_queue(dev);
+			data->link_up = 0;
+			printk(KERN_NOTICE "%s : link is down\n", dev->name);
+		}
+
+		goto out;
+	}
+
+
+out:
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static inline void
+tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
+		      unsigned long *upper)
+{
+	if (carry & carry_bit)
+		*upper += carry_shift;
+}
+
+static void tsi108_stat_carry(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 carry1, carry2;
+
+	spin_lock_irq(&data->misclock);
+
+	carry1 = TSI_READ(TSI108_STAT_CARRY1);
+	carry2 = TSI_READ(TSI108_STAT_CARRY2);
+
+	TSI_WRITE(TSI108_STAT_CARRY1, carry1);
+	TSI_WRITE(TSI108_STAT_CARRY2, carry2);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
+			      TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
+			      TSI108_STAT_RXPKTS_CARRY,
+			      &data->stats.rx_packets);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
+			      TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
+			      TSI108_STAT_RXMCAST_CARRY,
+			      &data->stats.multicast);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
+			      TSI108_STAT_RXALIGN_CARRY,
+			      &data->stats.rx_frame_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
+			      TSI108_STAT_RXLENGTH_CARRY,
+			      &data->stats.rx_length_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
+			      TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
+			      TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
+			      TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
+			      TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
+			      TSI108_STAT_RXDROP_CARRY,
+			      &data->stats.rx_missed_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
+			      TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
+			      TSI108_STAT_TXPKTS_CARRY,
+			      &data->stats.tx_packets);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
+			      TSI108_STAT_TXEXDEF_CARRY,
+			      &data->stats.tx_aborted_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
+			      TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
+			      TSI108_STAT_TXTCOL_CARRY,
+			      &data->stats.collisions);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
+			      TSI108_STAT_TXPAUSEDROP_CARRY,
+			      &data->tx_pause_drop);
+
+	spin_unlock_irq(&data->misclock);
+}
+
+/* Read a stat counter atomically with respect to carries.
+ * data->misclock must be held.
+ */
+static inline unsigned long
+tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
+		 int carry_shift, unsigned long *upper)
+{
+	int carryreg;
+	unsigned long val;
+
+	if (reg < 0xb0)
+		carryreg = TSI108_STAT_CARRY1;
+	else
+		carryreg = TSI108_STAT_CARRY2;
+
+      again:
+	val = TSI_READ(reg) | *upper;
+
+	/* Check to see if it overflowed, but the interrupt hasn't
+	 * been serviced yet.  If so, handle the carry here, and
+	 * try again.
+	 */
+
+	if (unlikely(TSI_READ(carryreg) & carry_bit)) {
+		*upper += carry_shift;
+		TSI_WRITE(carryreg, carry_bit);
+		goto again;
+	}
+
+	return val;
+}
+
+static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
+{
+	unsigned long excol;
+
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	spin_lock_irq(&data->misclock);
+
+	data->tmpstats.rx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_RXPKTS,
+			     TSI108_STAT_CARRY1_RXPKTS,
+			     TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
+
+	data->tmpstats.tx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_TXPKTS,
+			     TSI108_STAT_CARRY2_TXPKTS,
+			     TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
+
+	data->tmpstats.rx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_RXBYTES,
+			     TSI108_STAT_CARRY1_RXBYTES,
+			     TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	data->tmpstats.tx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_TXBYTES,
+			     TSI108_STAT_CARRY2_TXBYTES,
+			     TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	data->tmpstats.multicast =
+	    tsi108_read_stat(data, TSI108_STAT_RXMCAST,
+			     TSI108_STAT_CARRY1_RXMCAST,
+			     TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
+
+	excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
+				 TSI108_STAT_CARRY2_TXEXCOL,
+				 TSI108_STAT_TXEXCOL_CARRY,
+				 &data->tx_coll_abort);
+
+	data->tmpstats.collisions =
+	    tsi108_read_stat(data, TSI108_STAT_TXTCOL,
+			     TSI108_STAT_CARRY2_TXTCOL,
+			     TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
+
+	data->tmpstats.collisions += excol;
+
+	data->tmpstats.rx_length_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
+			     TSI108_STAT_CARRY1_RXLENGTH,
+			     TSI108_STAT_RXLENGTH_CARRY,
+			     &data->stats.rx_length_errors);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXRUNT,
+			     TSI108_STAT_CARRY1_RXRUNT,
+			     TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
+			     TSI108_STAT_CARRY1_RXJUMBO,
+			     TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	data->tmpstats.rx_frame_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXALIGN,
+			     TSI108_STAT_CARRY1_RXALIGN,
+			     TSI108_STAT_RXALIGN_CARRY,
+			     &data->stats.rx_frame_errors);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFCS,
+			     TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
+			     &data->rx_fcs);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFRAG,
+			     TSI108_STAT_CARRY1_RXFRAG,
+			     TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	data->tmpstats.rx_missed_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXDROP,
+			     TSI108_STAT_CARRY1_RXDROP,
+			     TSI108_STAT_RXDROP_CARRY,
+			     &data->stats.rx_missed_errors);
+
+	/* These three are maintained by software. */
+	data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
+	data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
+
+	data->tmpstats.tx_aborted_errors =
+	    tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
+			     TSI108_STAT_CARRY2_TXEXDEF,
+			     TSI108_STAT_TXEXDEF_CARRY,
+			     &data->stats.tx_aborted_errors);
+
+	data->tmpstats.tx_aborted_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
+			     TSI108_STAT_CARRY2_TXPAUSE,
+			     TSI108_STAT_TXPAUSEDROP_CARRY,
+			     &data->tx_pause_drop);
+
+	data->tmpstats.tx_aborted_errors += excol;
+
+	data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
+	data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
+	    data->tmpstats.rx_crc_errors +
+	    data->tmpstats.rx_frame_errors +
+	    data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
+
+	spin_unlock_irq(&data->misclock);
+	return &data->tmpstats;
+}
+
+static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
+{
+	TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
+			     TSI108_EC_RXQ_PTRHIGH_VALID);
+
+	TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
+			     | TSI108_EC_RXCTRL_QUEUE0);
+}
+
+static void tsi108_restart_tx(struct tsi108_prv_data * data)
+{
+	TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
+			     TSI108_EC_TXQ_PTRHIGH_VALID);
+
+	TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
+			     TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
+}
+
+/* txlock must be held by caller, with IRQs disabled, and
+ * with permission to re-enable them when the lock is dropped.
+ */
+static void tsi108_complete_tx(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int tx;
+	struct sk_buff *skb;
+	int release = 0;
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		tx = data->txtail;
+
+		if (data->txring[tx].misc & TSI108_TX_OWN)
+			break;
+
+		skb = data->txskbs[tx];
+
+		if (!(data->txring[tx].misc & TSI108_TX_OK))
+			printk("%s: bad tx packet, misc %x\n",
+			       dev->name, data->txring[tx].misc);
+
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+
+		if (data->txring[tx].misc & TSI108_TX_EOF) {
+			dev_kfree_skb_any(skb);
+			release++;
+		}
+	}
+
+	if (release) {
+		if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
+			netif_wake_queue(dev);
+	}
+}
+
+static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int frags = skb_shinfo(skb)->nr_frags + 1;
+	int i;
+
+	if (!data->phy_ok && net_ratelimit())
+		printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
+
+	if (!data->link_up) {
+		printk(KERN_ERR "%s: Transmit while link is down!\n",
+		       dev->name);
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (data->txfree < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+
+		if (net_ratelimit())
+			printk(KERN_ERR "%s: Transmit with full tx ring!\n",
+			       dev->name);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+	}
+
+	spin_lock_irq(&data->txlock);
+
+	for (i = 0; i < frags; i++) {
+		int misc = 0;
+		int tx = data->txhead;
+
+		/* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
+		 * the interrupt bit.  TX descriptor-complete interrupts are
+		 * enabled when the queue fills up, and masked when there is
+		 * still free space.  This way, when saturating the outbound
+		 * link, the tx interrupts are kept to a reasonable level. 
+		 * When the queue is not full, reclamation of skbs still occurs
+		 * as new packets are transmitted, or on a queue-empty
+		 * interrupt.
+		 */
+
+		if ((tx % TSI108_TX_INT_FREQ == 0) &&
+		    ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
+			misc = TSI108_TX_INT;
+
+		data->txskbs[tx] = skb;
+
+		if (i == 0) {
+			data->txring[tx].buf0 = dma_map_single(NULL, skb->data, 
+					skb->len - skb->data_len, DMA_TO_DEVICE);
+			data->txring[tx].len = skb->len - skb->data_len;
+			misc |= TSI108_TX_SOF;
+		} else {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+			data->txring[tx].buf0 =
+			    dma_map_page(NULL, frag->page, frag->page_offset,
+					    frag->size, DMA_TO_DEVICE);
+			data->txring[tx].len = frag->size;
+		}
+
+		if (i == frags - 1)
+			misc |= TSI108_TX_EOF;
+
+		if (netif_msg_pktdata(data)) {
+			int i;
+			printk("%s: Tx Frame contents (%d)\n", dev->name,
+			       skb->len);
+			for (i = 0; i < skb->len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+		data->txring[tx].misc = misc | TSI108_TX_OWN;
+
+		data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
+		data->txfree--;
+	}
+
+	tsi108_complete_tx(dev);
+
+	/* This must be done after the check for completed tx descriptors,
+	 * so that the tail pointer is correct.
+	 */
+
+	if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
+		tsi108_restart_tx(data);
+
+	spin_unlock_irq(&data->txlock);
+	return NETDEV_TX_OK;
+}
+
+static int tsi108_complete_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree && done != budget) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		if (data->rxring[rx].misc & TSI108_RX_OWN)
+			break;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		done++;
+
+		if (data->rxring[rx].misc & TSI108_RX_BAD) {
+			spin_lock_irq(&data->misclock);
+
+			if (data->rxring[rx].misc & TSI108_RX_CRC)
+				data->stats.rx_crc_errors++;
+			if (data->rxring[rx].misc & TSI108_RX_OVER)
+				data->stats.rx_fifo_errors++;
+
+			spin_unlock_irq(&data->misclock);
+
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+		if (netif_msg_pktdata(data)) {
+			int i;
+			printk("%s: Rx Frame contents (%d)\n",
+			       dev->name, data->rxring[rx].len);
+			for (i = 0; i < data->rxring[rx].len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+
+		skb->dev = dev;
+		skb_put(skb, data->rxring[rx].len);
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_receive_skb(skb);
+		dev->last_rx = jiffies;
+	}
+
+	return done;
+}
+
+static int tsi108_refill_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
+		int rx = data->rxhead;
+		struct sk_buff *skb;
+
+		data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);
+		if (!skb)
+			break;
+
+		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
+
+		data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, 
+							TSI108_RX_SKB_SIZE,
+							DMA_FROM_DEVICE);
+
+		/* Sometimes the hardware sets blen to zero after packet
+		 * reception, even though the manual says that it's only ever
+		 * modified by the driver.
+		 */
+
+		data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
+		data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
+
+		data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
+		data->rxfree++;
+		done++;
+	}
+
+	if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
+			   TSI108_EC_RXSTAT_QUEUE0))
+		tsi108_restart_rx(data, dev);
+
+	return done;
+}
+
+static int tsi108_poll(struct net_device *dev, int *budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI_READ(TSI108_EC_RXESTAT);
+	u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
+	int total_budget = min(*budget, dev->quota);
+	int num_received = 0, num_filled = 0, budget_used;
+
+	intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+	    TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
+
+	TSI_WRITE(TSI108_EC_RXESTAT, estat);
+	TSI_WRITE(TSI108_EC_INTSTAT, intstat);
+
+	if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
+		num_received = tsi108_complete_rx(dev, total_budget);
+
+	/* This should normally fill no more slots than the number of
+	 * packets received in tsi108_complete_rx().  The exception
+	 * is when we previously ran out of memory for RX SKBs.  In that
+	 * case, it's helpful to obey the budget, not only so that the
+	 * CPU isn't hogged, but so that memory (which may still be low)
+	 * is not hogged by one device.
+	 *
+	 * A work unit is considered to be two SKBs to allow us to catch
+	 * up when the ring has shrunk due to out-of-memory but we're
+	 * still removing the full budget's worth of packets each time.
+	 */
+
+	if (data->rxfree < TSI108_RXRING_LEN)
+		num_filled = tsi108_refill_rx(dev, total_budget * 2);
+
+	if (intstat & TSI108_INT_RXERROR) {
+		u32 err = TSI_READ(TSI108_EC_RXERR);
+		TSI_WRITE(TSI108_EC_RXERR, err);
+
+		if (err) {
+			if (net_ratelimit())
+				printk(KERN_DEBUG "%s: RX error %x\n",
+				       dev->name, err);
+
+			if (!(TSI_READ(TSI108_EC_RXSTAT) &
+			      TSI108_EC_RXSTAT_QUEUE0))
+				tsi108_restart_rx(data, dev);
+		}
+	}
+
+	if (intstat & TSI108_INT_RXOVERRUN) {
+		spin_lock_irq(&data->misclock);
+		data->stats.rx_fifo_errors++;
+		spin_unlock_irq(&data->misclock);
+	}
+
+	budget_used = max(num_received, num_filled / 2);
+
+	*budget -= budget_used;
+	dev->quota -= budget_used;
+
+	if (budget_used != total_budget) {
+		data->rxpending = 0;
+		netif_rx_complete(dev);
+
+		TSI_WRITE(TSI108_EC_INTMASK,
+				     TSI_READ(TSI108_EC_INTMASK)
+				     & ~(TSI108_INT_RXQUEUE0
+					 | TSI108_INT_RXTHRESH |
+					 TSI108_INT_RXOVERRUN |
+					 TSI108_INT_RXERROR |
+					 TSI108_INT_RXWAIT));
+
+		/* IRQs are level-triggered, so no need to re-check */
+		return 0;
+	} else {
+		data->rxpending = 1;
+	}
+
+	return 1;
+}
+
+static void tsi108_rx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A race could cause dev to already be scheduled, so it's not an
+	 * error if that happens (and interrupts shouldn't be re-masked,
+	 * because that can cause harmful races, if poll has already
+	 * unmasked them but not cleared LINK_STATE_SCHED).  
+	 *
+	 * This can happen if this code races with tsi108_poll(), which masks
+	 * the interrupts after tsi108_irq_one() read the mask, but before
+	 * netif_rx_schedule is called.  It could also happen due to calls
+	 * from tsi108_check_rxring().
+	 */
+
+	if (netif_rx_schedule_prep(dev)) {
+		/* Mask, rather than ack, the receive interrupts.  The ack
+		 * will happen in tsi108_poll().
+		 */
+
+		TSI_WRITE(TSI108_EC_INTMASK,
+				     TSI_READ(TSI108_EC_INTMASK) |
+				     TSI108_INT_RXQUEUE0
+				     | TSI108_INT_RXTHRESH |
+				     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
+				     TSI108_INT_RXWAIT);
+		__netif_rx_schedule(dev);
+	} else {
+		if (!netif_running(dev)) {
+			/* This can happen if an interrupt occurs while the
+			 * interface is being brought down, as the START
+			 * bit is cleared before the stop function is called.
+			 *
+			 * In this case, the interrupts must be masked, or
+			 * they will continue indefinitely.
+			 *
+			 * There's a race here if the interface is brought down
+			 * and then up in rapid succession, as the device could
+			 * be made running after the above check and before
+			 * the masking below.  This will only happen if the IRQ
+			 * thread has a lower priority than the task brining
+			 * up the interface.  Fixing this race would likely
+			 * require changes in generic code.
+			 */
+
+			TSI_WRITE(TSI108_EC_INTMASK,
+					     TSI_READ
+					     (TSI108_EC_INTMASK) |
+					     TSI108_INT_RXQUEUE0 |
+					     TSI108_INT_RXTHRESH |
+					     TSI108_INT_RXOVERRUN |
+					     TSI108_INT_RXERROR |
+					     TSI108_INT_RXWAIT);
+		}
+	}
+}
+
+/* If the RX ring has run out of memory, try periodically
+ * to allocate some more, as otherwise poll would never
+ * get called (apart from the initial end-of-queue condition).
+ *
+ * This is called once per second (by default) from the thread.
+ */
+
+static void tsi108_check_rxring(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A poll is scheduled, as opposed to caling tsi108_refill_rx 
+	 * directly, so as to keep the receive path single-threaded
+	 * (and thus not needing a lock).
+	 */
+
+	if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
+		tsi108_rx_int(dev);
+}
+
+static void tsi108_tx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI_READ(TSI108_EC_TXESTAT);
+
+	TSI_WRITE(TSI108_EC_TXESTAT, estat);
+	TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
+			     TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
+	if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
+		u32 err = TSI_READ(TSI108_EC_TXERR);
+		TSI_WRITE(TSI108_EC_TXERR, err);
+
+		if (err && net_ratelimit())
+			printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
+	}
+
+	if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
+		spin_lock(&data->txlock);
+		tsi108_complete_tx(dev);
+		spin_unlock(&data->txlock);
+	}
+}
+
+
+static irqreturn_t tsi108_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 stat = TSI_READ(TSI108_EC_INTSTAT);
+
+	if (!(stat & TSI108_INT_ANY))
+		return IRQ_NONE;	/* Not our interrupt */
+
+	stat &= ~TSI_READ(TSI108_EC_INTMASK);
+
+	if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
+		    TSI108_INT_TXERROR))
+		tsi108_tx_int(dev);
+	if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+		    TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
+		    TSI108_INT_RXERROR))
+		tsi108_rx_int(dev);
+
+	if (stat & TSI108_INT_SFN) {
+		if (net_ratelimit())
+			printk(KERN_DEBUG "%s: SFN error\n", dev->name);
+		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
+	}
+
+	if (stat & TSI108_INT_STATCARRY) {
+		tsi108_stat_carry(dev);
+		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void tsi108_stop_ethernet(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int i = 1000;
+	/* Disable all TX and RX queues ... */
+	TSI_WRITE(TSI108_EC_TXCTRL, 0);
+	TSI_WRITE(TSI108_EC_RXCTRL, 0);
+
+	/* ...and wait for them to become idle */
+	while(i--) {
+		if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
+			break;
+		udelay(10);
+	}
+	i = 1000;
+	while(i--){
+		if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
+			return;
+		udelay(10);
+	}
+	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static void tsi108_reset_ether(struct tsi108_prv_data * data)
+{
+	TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
+	udelay(100);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_PORTCTRL,
+			     TSI_READ(TSI108_EC_PORTCTRL) &
+			     ~TSI108_EC_PORTCTRL_STATRST);
+
+	TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_TXCFG,
+			     TSI_READ(TSI108_EC_TXCFG) &
+			     ~TSI108_EC_TXCFG_RST);
+
+	TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_RXCFG,
+			     TSI_READ(TSI108_EC_RXCFG) &
+			     ~TSI108_EC_RXCFG_RST);
+
+	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+			     TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
+			     TSI108_MAC_MII_MGMT_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+			     (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
+			     ~(TSI108_MAC_MII_MGMT_RST |
+			       TSI108_MAC_MII_MGMT_CLK)) | 0x07);
+}
+
+static int tsi108_get_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
+	u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+
+	/* Note that the octets are reversed from what the manual says,
+	 * producing an even weirder ordering...
+	 */
+	if (word2 == 0 && word1 == 0) {
+		dev->dev_addr[0] = 0x00;
+		dev->dev_addr[1] = 0x06;
+		dev->dev_addr[2] = 0xd2;
+		dev->dev_addr[3] = 0x00;
+		dev->dev_addr[4] = 0x00;
+		if (0x8 == data->phy)
+			dev->dev_addr[5] = 0x01;
+		else
+			dev->dev_addr[5] = 0x02;
+
+		word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+		word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+		    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+		TSI_WRITE(TSI108_MAC_ADDR1, word1);
+		TSI_WRITE(TSI108_MAC_ADDR2, word2);
+	} else {
+		dev->dev_addr[0] = (word2 >> 16) & 0xff;
+		dev->dev_addr[1] = (word2 >> 24) & 0xff;
+		dev->dev_addr[2] = (word1 >> 0) & 0xff;
+		dev->dev_addr[3] = (word1 >> 8) & 0xff;
+		dev->dev_addr[4] = (word1 >> 16) & 0xff;
+		dev->dev_addr[5] = (word1 >> 24) & 0xff;
+	}
+
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tsi108_set_mac(struct net_device *dev, void *addr)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1, word2;
+	int i;
+
+	if (!is_valid_ether_addr(addr))
+		return -EINVAL;
+
+	for (i = 0; i < 6; i++)
+		/* +2 is for the offset of the HW addr type */
+		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+
+	word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+	word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+	    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+	spin_lock_irq(&data->misclock);
+	TSI_WRITE(TSI108_MAC_ADDR1, word1);
+	TSI_WRITE(TSI108_MAC_ADDR2, word2);
+	spin_lock(&data->txlock);
+
+	if (data->txfree && data->link_up)
+		netif_wake_queue(dev);
+
+	spin_unlock(&data->txlock);
+	spin_unlock_irq(&data->misclock);
+	return 0;
+}
+
+/* Protected by dev->xmit_lock. */
+static void tsi108_set_rx_mode(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
+
+	if (dev->flags & IFF_PROMISC) {
+		rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
+		rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
+		goto out;
+	}
+
+	rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
+
+	if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+		int i;
+		struct dev_mc_list *mc = dev->mc_list;
+		rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
+
+		memset(data->mc_hash, 0, sizeof(data->mc_hash));
+
+		while (mc) {
+			u32 hash, crc;
+
+			if (mc->dmi_addrlen == 6) {
+				crc = ether_crc(6, mc->dmi_addr);
+				hash = crc >> 23;
+
+				__set_bit(hash, &data->mc_hash[0]);
+			} else {
+				printk(KERN_ERR
+				       "%s: got multicast address of length %d "
+				       "instead of 6.\n", dev->name,
+				       mc->dmi_addrlen);
+			}
+
+			mc = mc->next;
+		}
+
+		TSI_WRITE(TSI108_EC_HASHADDR,
+				     TSI108_EC_HASHADDR_AUTOINC |
+				     TSI108_EC_HASHADDR_MCAST);
+
+		for (i = 0; i < 16; i++) {
+			/* The manual says that the hardware may drop
+			 * back-to-back writes to the data register.
+			 */
+			udelay(1);
+			TSI_WRITE(TSI108_EC_HASHDATA,
+					     data->mc_hash[i]);
+		}
+	}
+
+      out:
+	TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
+}
+
+static void tsi108_init_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 i = 0;
+	u16 phyval = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
+	while (i--){
+		if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
+			break;
+		udelay(10);
+	}
+	if (i == 0)
+		printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+			
+#if (TSI108_PHY_TYPE == PHY_BCM54XX)	/* Broadcom BCM54xx PHY */
+	tsi108_write_mii(data, 0x09, 0x0300);
+	tsi108_write_mii(data, 0x10, 0x1020);
+	tsi108_write_mii(data, 0x1c, 0x8c00);
+#endif
+
+	tsi108_write_mii(data,
+			 MII_BMCR,
+			 BMCR_ANENABLE | BMCR_ANRESTART);
+	while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
+		cpu_relax();
+
+	/* Set G/MII mode and receive clock select in TBI control #2.  The
+	 * second port won't work if this isn't done, even though we don't
+	 * use TBI mode.
+	 */
+
+	tsi108_write_tbi(data, 0x11, 0x30);
+
+	/* FIXME: It seems to take more than 2 back-to-back reads to the
+	 * PHY_STAT register before the link up status bit is set.
+	 */
+
+	data->link_up = 1;
+
+	while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
+		 BMSR_LSTATUS)) {
+		if (i++ > (MII_READ_DELAY / 10)) {
+			data->link_up = 0;
+			break;
+		}
+		spin_unlock_irqrestore(&phy_lock, flags);
+		msleep(10);
+		spin_lock_irqsave(&phy_lock, flags);
+	}
+
+	printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
+	data->phy_ok = 1;
+	data->init_media = 1;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static void tsi108_kill_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+	tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
+	data->phy_ok = 0;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static int tsi108_open(struct net_device *dev)
+{
+	int i;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
+	unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
+
+	i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
+	if (i != 0) {
+		printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
+		       data->id, data->irq_num);
+		return i;
+	} else {
+		dev->irq = data->irq_num;
+		printk(KERN_NOTICE
+		       "tsi108_open : Port %d Assigned IRQ %d to %s\n",
+		       data->id, dev->irq, dev->name);
+	}
+
+	data->rxring = dma_alloc_coherent(NULL, rxring_size, 
+			&data->rxdma, GFP_KERNEL);
+
+	if (!data->rxring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for rxring!\n");
+		return -ENOMEM;
+	} else {
+		memset(data->rxring, 0, rxring_size);
+	}
+
+	data->txring = dma_alloc_coherent(NULL, txring_size, 
+			&data->txdma, GFP_KERNEL);
+
+	if (!data->txring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for txring!\n");
+		pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+		return -ENOMEM;
+	} else {
+		memset(data->txring, 0, txring_size);
+	}
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
+		data->rxring[i].blen = TSI108_RXBUF_SIZE;
+		data->rxring[i].vlan = 0;
+	}
+
+	data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
+
+	data->rxtail = 0;
+	data->rxhead = 0;
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+
+		if (!skb) {
+			/* Bah.  No memory for now, but maybe we'll get
+			 * some more later.
+			 * For now, we'll live with the smaller ring.
+			 */
+			printk(KERN_WARNING
+			       "%s: Could only allocate %d receive skb(s).\n",
+			       dev->name, i);
+			data->rxhead = i;
+			break;
+		}
+
+		data->rxskbs[i] = skb;
+		/* Align the payload on a 4-byte boundary */
+		skb_reserve(skb, 2);
+		data->rxskbs[i] = skb;
+		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
+		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
+	}
+
+	data->rxfree = i;
+	TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
+
+	for (i = 0; i < TSI108_TXRING_LEN; i++) {
+		data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
+		data->txring[i].misc = 0;
+	}
+
+	data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
+	data->txtail = 0;
+	data->txhead = 0;
+	data->txfree = TSI108_TXRING_LEN;
+	TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
+	tsi108_init_phy(dev);
+
+	setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
+	mod_timer(&data->timer, jiffies + 1);
+
+	tsi108_restart_rx(data, dev);
+
+	TSI_WRITE(TSI108_EC_INTSTAT, ~0);
+
+	TSI_WRITE(TSI108_EC_INTMASK,
+			     ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
+			       TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
+			       TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
+			       TSI108_INT_SFN | TSI108_INT_STATCARRY));
+
+	TSI_WRITE(TSI108_MAC_CFG1,
+			     TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int tsi108_close(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	del_timer_sync(&data->timer);
+
+	tsi108_stop_ethernet(dev);
+	tsi108_kill_phy(dev);
+	TSI_WRITE(TSI108_EC_INTMASK, ~0);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	/* Check for any pending TX packets, and drop them. */
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		int tx = data->txtail;
+		struct sk_buff *skb;
+		skb = data->txskbs[tx];
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+		dev_kfree_skb(skb);
+	}
+
+	synchronize_irq(data->irq_num);
+	free_irq(data->irq_num, dev);
+
+	/* Discard the RX ring. */
+
+	while (data->rxfree) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		dev_kfree_skb(skb);
+	}
+
+	dma_free_coherent(0,
+			    TSI108_RXRING_LEN * sizeof(rx_desc),
+			    data->rxring, data->rxdma);
+	dma_free_coherent(0,
+			    TSI108_TXRING_LEN * sizeof(tx_desc),
+			    data->txring, data->txdma);
+
+	return 0;
+}
+
+static void tsi108_init_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
+			     TSI108_MAC_CFG2_PADCRC);
+
+	TSI_WRITE(TSI108_EC_TXTHRESH,
+			     (192 << TSI108_EC_TXTHRESH_STARTFILL) |
+			     (192 << TSI108_EC_TXTHRESH_STOPFILL));
+
+	TSI_WRITE(TSI108_STAT_CARRYMASK1,
+			     ~(TSI108_STAT_CARRY1_RXBYTES |
+			       TSI108_STAT_CARRY1_RXPKTS |
+			       TSI108_STAT_CARRY1_RXFCS |
+			       TSI108_STAT_CARRY1_RXMCAST |
+			       TSI108_STAT_CARRY1_RXALIGN |
+			       TSI108_STAT_CARRY1_RXLENGTH |
+			       TSI108_STAT_CARRY1_RXRUNT |
+			       TSI108_STAT_CARRY1_RXJUMBO |
+			       TSI108_STAT_CARRY1_RXFRAG |
+			       TSI108_STAT_CARRY1_RXJABBER |
+			       TSI108_STAT_CARRY1_RXDROP));
+
+	TSI_WRITE(TSI108_STAT_CARRYMASK2,
+			     ~(TSI108_STAT_CARRY2_TXBYTES |
+			       TSI108_STAT_CARRY2_TXPKTS |
+			       TSI108_STAT_CARRY2_TXEXDEF |
+			       TSI108_STAT_CARRY2_TXEXCOL |
+			       TSI108_STAT_CARRY2_TXTCOL |
+			       TSI108_STAT_CARRY2_TXPAUSE));
+
+	TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	TSI_WRITE(TSI108_EC_RXCFG,
+			     TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
+
+	TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
+			     TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_CFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
+			     TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_CFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
+			     TSI108_EC_TXQ_BUFCFG_BURST256 |
+			     TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_BUFCFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
+			     TSI108_EC_RXQ_BUFCFG_BURST256 |
+			     TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_BUFCFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_INTMASK, ~0);
+}
+
+static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
+}
+
+static int
+tsi108_init_one(struct platform_device *pdev)
+{
+	struct net_device *dev = NULL;
+	struct tsi108_prv_data *data = NULL;
+	hw_info *einfo;
+	int err = 0;
+	
+	einfo = pdev->dev.platform_data;
+	
+	if (NULL == einfo) {
+		printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
+		       pdev->id);
+		return -ENODEV;
+	}
+
+	/* Create an ethernet device instance */
+
+	dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
+	if (!dev) {
+		printk("tsi108_eth: Could not allocate a device structure\n");
+		return -ENOMEM;
+	}
+
+	printk("tsi108_eth%d: probe...\n", pdev->id);
+	data = netdev_priv(dev);
+
+	pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
+			pdev->id, einfo->regs, einfo->phyregs,
+			einfo->phy, einfo->irq_num);
+	
+	data->regs = ioremap(einfo->regs, 0x400);
+	if (NULL == data->regs) {
+		err = -ENOMEM;
+		goto regs_fail;
+	}
+	
+	data->phyregs = ioremap(einfo->phyregs, 0x400);
+	if (NULL == data->phyregs) {
+		err = -ENOMEM;
+		goto regs_fail;
+	}
+/* MII setup */
+	data->mii_if.dev = dev;
+	data->mii_if.mdio_read = tsi108_mdio_read;
+	data->mii_if.mdio_write = tsi108_mdio_write;
+	data->mii_if.phy_id = einfo->phy;
+	data->mii_if.phy_id_mask = 0x1f;
+	data->mii_if.reg_num_mask = 0x1f;
+	data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
+	
+	data->phy = einfo->phy;
+	data->irq_num = einfo->irq_num;
+	data->id = pdev->id;
+	dev->open = tsi108_open;
+	dev->stop = tsi108_close;
+	dev->hard_start_xmit = tsi108_send_packet;
+	dev->set_mac_address = tsi108_set_mac;
+	dev->set_multicast_list = tsi108_set_rx_mode;
+	dev->get_stats = tsi108_get_stats;
+	dev->poll = tsi108_poll;
+	dev->do_ioctl = tsi108_do_ioctl;
+	dev->weight = 64;  /* 64 is more suitable for GigE interface - klai */
+
+	/* Apparently, the Linux networking code won't use scatter-gather
+	 * if the hardware doesn't do checksums.  However, it's faster
+	 * to checksum in place and use SG, as (among other reasons)
+	 * the cache won't be dirtied (which then has to be flushed
+	 * before DMA).  The checksumming is done by the driver (via
+	 * a new function skb_csum_dev() in net/core/skbuff.c).
+	 */
+
+	dev->features = NETIF_F_HIGHDMA;
+	SET_MODULE_OWNER(dev);
+
+	spin_lock_init(&data->txlock);
+	spin_lock_init(&data->misclock);
+
+	tsi108_reset_ether(data);
+	tsi108_kill_phy(dev);
+
+	if ((err = tsi108_get_mac(dev)) != 0) {
+		printk(KERN_ERR "%s: Invalid MAC address.  Please correct.\n",
+		       dev->name);
+		goto register_fail;
+	}
+
+	tsi108_init_mac(dev);
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
+				dev->name);
+		goto register_fail;
+	}
+
+	printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: "
+	       "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
+	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+#ifdef DEBUG
+	data->msg_enable = DEBUG;
+	dump_eth_one(dev);
+#endif
+
+	return 0;
+	
+register_fail:
+	iounmap(data->regs);
+	iounmap(data->phyregs);
+	
+regs_fail:
+	free_netdev(dev);
+	return err;
+}
+
+/* There's no way to either get interrupts from the PHY when
+ * something changes, or to have the Tsi108 automatically communicate
+ * with the PHY to reconfigure itself.
+ *
+ * Thus, we have to do it using a timer. 
+ */
+
+static void tsi108_timed_checker(unsigned long dev_ptr)
+{
+	struct net_device *dev = (struct net_device *)dev_ptr;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	tsi108_check_phy(dev);
+	tsi108_check_rxring(dev);
+	mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
+}
+
+static int tsi108_ether_init(void)
+{
+	int ret;
+	ret = platform_driver_register (&tsi_eth_driver);
+	if (ret < 0){
+		printk("tsi108_ether_init: error initializing ethernet "
+		       "device\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int tsi108_ether_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct tsi108_prv_data *priv = netdev_priv(dev);
+	
+	unregister_netdev(dev);
+	tsi108_stop_ethernet(dev);
+	platform_set_drvdata(pdev, NULL);
+	iounmap(priv->regs);
+	iounmap(priv->phyregs);
+	free_netdev(dev);
+	
+	return 0;
+}
+static void tsi108_ether_exit(void)
+{
+	platform_driver_unregister(&tsi_eth_driver);
+}
+
+module_init(tsi108_ether_init);
+module_exit(tsi108_ether_exit);
+
+MODULE_AUTHOR("Tundra Semiconductor Corporation");
+MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");


	


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-09-21  4:46           ` Jeff Garzik
  2006-09-29  7:36             ` Zang Roy-r61911
  2006-10-17  7:18             ` Zang Roy-r61911
@ 2006-10-23  2:09             ` Zang Roy-r61911
  2006-10-26  2:50               ` Zang Roy-r61911
  2 siblings, 1 reply; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-10-23  2:09 UTC (permalink / raw)
  To: Jeff Garzik
  Cc: Roland Dreier, Andrew Morton, netdev, linux-kernel, Alexandre.Bounine

On Thu, 2006-09-21 at 12:46, Jeff Garzik wrote:

> you should have a chip structure, that contains two structs (one for 
> each interface/port)
> 
Jeff,

I updated the code according to all your feedback and post it here

http://www.spinics.net/lists/netdev/msg17120.html

Any comment?

Roy


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [patch 3/3] Add tsi108 On Chip Ethernet device driver support
  2006-10-23  2:09             ` [patch 3/3] Add tsi108 " Zang Roy-r61911
@ 2006-10-26  2:50               ` Zang Roy-r61911
  0 siblings, 0 replies; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-10-26  2:50 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: Andrew Morton, netdev, linux-kernel, Alexandre.Bounine

On Mon, 2006-10-23 at 10:09, Zang Roy-r61911 wrote:
> On Thu, 2006-09-21 at 12:46, Jeff Garzik wrote:
> 
> > you should have a chip structure, that contains two structs (one for 
> > each interface/port)
> > 
> Jeff,
> 
> I updated the code according to all your feedback and post it here
> 
> http://www.spinics.net/lists/netdev/msg17120.html
> 
> Any comment?
> 
> Roy

Jeff
	How about it? What's your comment?
Roy


^ permalink raw reply	[flat|nested] 23+ messages in thread

* Add tsi108/9 On Chip Ethernet device driver support
  2006-10-17  7:18             ` Zang Roy-r61911
@ 2006-10-30  5:19               ` Zang Roy-r61911
  2006-10-31  2:26                 ` [PATCH] " Zang Roy-r61911
  0 siblings, 1 reply; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-10-30  5:19 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: Andrew Morton, netdev, linux-kernel, Alexandre.Bounine

The following patch add tsi108/9 on chip Ethernet controller driver
support.

The driver code collects the feedback of previous posting form the
mailing list and gives the update.

MPC7448HPC2 platform in arch/powerpc uses tsi108 bridge.

The following is a brief description of the Ethernet controller:

The Tsi108/9 Ethernet Controller connects Switch Fabric to two
independent Gigabit Ethernet ports,E0 and E1. It uses a single
Management interface to manage the two physical connection devices
(PHYs). Each Ethernet port has its own statistics monitor that tracks
and reports key interface statistics. Each port supports a 256-entry
hash table for address filtering. In addition, each port is bridged to
the Switch Fabric through a 2-Kbyte transmit FIFO and a 4-Kbyte Receive
FIFO.

Each Ethernet port also has a pair of internal Ethernet DMA channels to
support the transmit and receive data flows. The Ethernet DMA channels
use descriptors set up in memory, the memory map of the device, and
access via the Switch Fabric. The Ethernet Controller’s DMA arbiter
handles arbitration for the Switch Fabric. The Controller also has a
register bus interface for register accesses and status monitor control.

The PMD (Physical Media Device) interface operates in MII, GMII, or TBI
modes. The MII mode is used for connecting with 10 or 100 Mbit/s PMDs.
The GMII and TBI modes are used to connect with Gigabit PMDs. 
Internal data flows to and from the Ethernet Controller through the
Switch Fabric. Each

Ethernet port uses its transmit and receive DMA channels to manage data
flows through buffer descriptors that are predefined by the system (the
descriptors can exist anywhere in the system memory map). These
descriptors are data structures that point to buffers filled with data
ready to transmit over Ethernet, or they point to empty buffers ready to
receive data from Ethernet.


Signed-off-by: Alexandre Bounine<alexandreb@tundra.com>
Signed-off-by: Roy Zang	<tie-fei.zang@freescale.com> 

---
 drivers/net/Kconfig      |    8 
 drivers/net/Makefile     |    1 
 drivers/net/tsi108_eth.c | 1708 ++++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/tsi108_eth.h |  365 ++++++++++
 4 files changed, 2082 insertions(+), 0 deletions(-)

diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e2ed249..74a2d93 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2251,6 +2251,14 @@ config SPIDER_NET
 	  This driver supports the Gigabit Ethernet chips present on the
 	  Cell Processor-Based Blades from IBM.
 
+config TSI108_ETH
+	   tristate "Tundra TSI108 gigabit Ethernet support"
+	   depends on TSI108_BRIDGE
+	   help
+	     This driver supports Tundra TSI108 gigabit Ethernet ports.
+	     To compile this driver as a module, choose M here: the module
+	     will be called tsi108_eth.
+
 config GIANFAR
 	tristate "Gianfar Ethernet"
 	depends on 85xx || 83xx || PPC_86xx
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f270bc4..b1f713e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -109,6 +109,7 @@ obj-$(CONFIG_B44) += b44.o
 obj-$(CONFIG_FORCEDETH) += forcedeth.o
 obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 
+obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_QLA3XXX) += qla3xxx.o
 
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
new file mode 100644
index 0000000..82213b4
--- /dev/null
+++ b/drivers/net/tsi108_eth.c
@@ -0,0 +1,1708 @@
+/*******************************************************************************
+  
+  Copyright(c) 2006 Tundra Semiconductor Corporation.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+*******************************************************************************/
+
+/* This driver is based on the driver code originally developed	
+ * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
+ * scott.wood@timesys.com  * Copyright (C) 2003 TimeSys Corporation
+ *
+ * Currently changes from original version are:
+ * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
+ * - modifications to handle two ports independently and support for
+ *   additional PHY devices (alexandre.bounine@tundra.com)
+ * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/tsi108.h>
+
+#include "tsi108_eth.h"
+
+#define MII_READ_DELAY 10000	/* max link wait time in msec */
+
+#define TSI108_RXRING_LEN     256
+
+/* NOTE: The driver currently does not support receiving packets
+ * larger than the buffer size, so don't decrease this (unless you
+ * want to add such support).
+ */
+#define TSI108_RXBUF_SIZE     1536
+
+#define TSI108_TXRING_LEN     256
+
+#define TSI108_TX_INT_FREQ    64
+
+/* Check the phy status every half a second. */
+#define CHECK_PHY_INTERVAL (HZ/2)
+
+static int tsi108_init_one(struct platform_device *pdev);
+static int tsi108_ether_remove(struct platform_device *pdev);
+
+struct tsi108_prv_data {
+	void  __iomem *regs;	/* Base of normal regs */
+	void  __iomem *phyregs;	/* Base of register bank used for PHY access */
+	
+	unsigned int phy;		/* Index of PHY for this interface */
+	unsigned int irq_num;
+	unsigned int id;
+
+	struct timer_list timer;/* Timer that triggers the check phy function */
+	unsigned int rxtail;	/* Next entry in rxring to read */
+	unsigned int rxhead;	/* Next entry in rxring to give a new buffer */
+	unsigned int rxfree;	/* Number of free, allocated RX buffers */
+
+	unsigned int rxpending;	/* Non-zero if there are still descriptors
+				 * to be processed from a previous descriptor
+				 * interrupt condition that has been cleared */
+
+	unsigned int txtail;	/* Next TX descriptor to check status on */
+	unsigned int txhead;	/* Next TX descriptor to use */
+
+	/* Number of free TX descriptors.  This could be calculated from
+	 * rxhead and rxtail if one descriptor were left unused to disambiguate
+	 * full and empty conditions, but it's simpler to just keep track
+	 * explicitly. */
+
+	unsigned int txfree;
+
+	unsigned int phy_ok;		/* The PHY is currently powered on. */
+
+	/* PHY status (duplex is 1 for half, 2 for full,
+	 * so that the default 0 indicates that neither has
+	 * yet been configured). */
+
+	unsigned int link_up;
+	unsigned int speed;
+	unsigned int duplex;
+
+	tx_desc *txring;
+	rx_desc *rxring;
+	struct sk_buff *txskbs[TSI108_TXRING_LEN];
+	struct sk_buff *rxskbs[TSI108_RXRING_LEN];
+
+	dma_addr_t txdma, rxdma;
+
+	/* txlock nests in misclock and phy_lock */
+
+	spinlock_t txlock, misclock;
+
+	/* stats is used to hold the upper bits of each hardware counter,
+	 * and tmpstats is used to hold the full values for returning
+	 * to the caller of get_stats().  They must be separate in case
+	 * an overflow interrupt occurs before the stats are consumed.
+	 */
+
+	struct net_device_stats stats;
+	struct net_device_stats tmpstats;
+
+	/* These stats are kept separate in hardware, thus require individual
+	 * fields for handling carry.  They are combined in get_stats.
+	 */
+
+	unsigned long rx_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_short_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_long_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_underruns;	/* Add to rx_length_errors */
+	unsigned long rx_overruns;	/* Add to rx_length_errors */
+
+	unsigned long tx_coll_abort;	/* Add to tx_aborted_errors/collisions */
+	unsigned long tx_pause_drop;	/* Add to tx_aborted_errors */
+
+	unsigned long mc_hash[16];
+	u32 msg_enable;			/* debug message level */
+	struct mii_if_info mii_if;
+	unsigned int init_media;
+};
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+	.probe = tsi108_init_one,
+	.remove = tsi108_ether_remove,
+	.driver	= {
+		.name = "tsi-ethernet",
+	},
+};
+
+static void tsi108_timed_checker(unsigned long dev_ptr);
+
+static void dump_eth_one(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	printk("Dumping %s...\n", dev->name);
+	printk("intstat %x intmask %x phy_ok %d"
+	       " link %d speed %d duplex %d\n",
+	       TSI_READ(TSI108_EC_INTSTAT),
+	       TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
+	       data->link_up, data->speed, data->duplex);
+
+	printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
+	       data->txhead, data->txtail, data->txfree,
+	       TSI_READ(TSI108_EC_TXSTAT),
+	       TSI_READ(TSI108_EC_TXESTAT),
+	       TSI_READ(TSI108_EC_TXERR));
+
+	printk("RX: head %d, tail %d, free %d, stat %x,"
+	       " estat %x, err %x, pending %d\n\n",
+	       data->rxhead, data->rxtail, data->rxfree,
+	       TSI_READ(TSI108_EC_RXSTAT),
+	       TSI_READ(TSI108_EC_RXESTAT),
+	       TSI_READ(TSI108_EC_RXERR), data->rxpending);
+}
+
+/* Synchronization is needed between the thread and up/down events.
+ * Note that the PHY is accessed through the same registers for both
+ * interfaces, so this can't be made interface-specific.
+ */
+
+static DEFINE_SPINLOCK(phy_lock);
+
+static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
+{
+	unsigned i;
+
+	TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
+	TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
+	for (i = 0; i < 100; i++) {
+		if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
+		      (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
+			break;
+		udelay(10);
+	}
+
+	if (i == 100) 
+		return 0xffff;
+	else
+		return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN));
+}
+
+static void tsi108_write_mii(struct tsi108_prv_data *data, 
+				int reg, u16 val)
+{
+	unsigned i = 100;
+	TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
+	while (i--) {
+		if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) & 
+			TSI108_MAC_MII_IND_BUSY))
+			break;
+		udelay(10);
+	}
+}
+
+static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	return tsi108_read_mii(data, reg);
+}
+
+static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	tsi108_write_mii(data, reg, val);
+}
+
+static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
+					int reg, u16 val)
+{
+	unsigned i = 1000;
+	TSI_WRITE(TSI108_MAC_MII_ADDR,
+			     (0x1e << TSI108_MAC_MII_ADDR_PHY)
+			     | (reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
+	while(i--) {
+		if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
+			return;
+		udelay(10);
+	}
+	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static int mii_speed(struct mii_if_info *mii)
+{
+	int advert, lpa, val, media;
+	int lpa2 = 0;
+	int speed;
+
+	if (!mii_link_ok(mii))
+		return 0;
+
+	val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
+	if ((val & BMSR_ANEGCOMPLETE) == 0)
+		return 0;
+
+	advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
+	lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
+	media = mii_nway_result(advert & lpa);
+
+	if (mii->supports_gmii)
+		lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
+
+	speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
+			(media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
+	return speed;
+}
+
+static void tsi108_check_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 mac_cfg2_reg, portctrl_reg;
+	u32 duplex;
+	u32 speed;
+	unsigned long flags;
+
+	/* Do a dummy read, as for some reason the first read
+	 * after a link becomes up returns link down, even if
+	 * it's been a while since the link came up.
+	 */
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	if (!data->phy_ok)
+		goto out;
+
+	tsi108_read_mii(data, MII_BMSR);
+
+	duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
+	data->init_media = 0;
+
+	if (netif_carrier_ok(dev)) {
+
+		speed = mii_speed(&data->mii_if);
+
+		if ((speed != data->speed) || duplex) {
+
+			mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
+			portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
+
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+
+			if (speed == 1000) {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
+				portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
+			} else {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+				portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+			}
+
+			data->speed = speed;
+
+			if (data->mii_if.full_duplex) {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
+				portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
+				data->duplex = 2;
+			} else {
+				mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
+				portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
+				data->duplex = 1;
+			}
+
+			TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
+			TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
+
+			if (data->link_up == 0) {
+				/* The manual says it can take 3-4 usecs for the speed change
+				 * to take effect.
+				 */
+				udelay(5);
+
+				spin_lock(&data->txlock);
+				if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
+					netif_wake_queue(dev);
+
+				data->link_up = 1;
+				spin_unlock(&data->txlock);
+			}
+		}
+
+	} else {
+		if (data->link_up == 1) {
+			netif_stop_queue(dev);
+			data->link_up = 0;
+			printk(KERN_NOTICE "%s : link is down\n", dev->name);
+		}
+
+		goto out;
+	}
+
+
+out:
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static inline void
+tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
+		      unsigned long *upper)
+{
+	if (carry & carry_bit)
+		*upper += carry_shift;
+}
+
+static void tsi108_stat_carry(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 carry1, carry2;
+
+	spin_lock_irq(&data->misclock);
+
+	carry1 = TSI_READ(TSI108_STAT_CARRY1);
+	carry2 = TSI_READ(TSI108_STAT_CARRY2);
+
+	TSI_WRITE(TSI108_STAT_CARRY1, carry1);
+	TSI_WRITE(TSI108_STAT_CARRY2, carry2);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
+			      TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
+			      TSI108_STAT_RXPKTS_CARRY,
+			      &data->stats.rx_packets);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
+			      TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
+			      TSI108_STAT_RXMCAST_CARRY,
+			      &data->stats.multicast);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
+			      TSI108_STAT_RXALIGN_CARRY,
+			      &data->stats.rx_frame_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
+			      TSI108_STAT_RXLENGTH_CARRY,
+			      &data->stats.rx_length_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
+			      TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
+			      TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
+			      TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
+			      TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
+			      TSI108_STAT_RXDROP_CARRY,
+			      &data->stats.rx_missed_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
+			      TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
+			      TSI108_STAT_TXPKTS_CARRY,
+			      &data->stats.tx_packets);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
+			      TSI108_STAT_TXEXDEF_CARRY,
+			      &data->stats.tx_aborted_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
+			      TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
+			      TSI108_STAT_TXTCOL_CARRY,
+			      &data->stats.collisions);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
+			      TSI108_STAT_TXPAUSEDROP_CARRY,
+			      &data->tx_pause_drop);
+
+	spin_unlock_irq(&data->misclock);
+}
+
+/* Read a stat counter atomically with respect to carries.
+ * data->misclock must be held.
+ */
+static inline unsigned long
+tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
+		 int carry_shift, unsigned long *upper)
+{
+	int carryreg;
+	unsigned long val;
+
+	if (reg < 0xb0)
+		carryreg = TSI108_STAT_CARRY1;
+	else
+		carryreg = TSI108_STAT_CARRY2;
+
+      again:
+	val = TSI_READ(reg) | *upper;
+
+	/* Check to see if it overflowed, but the interrupt hasn't
+	 * been serviced yet.  If so, handle the carry here, and
+	 * try again.
+	 */
+
+	if (unlikely(TSI_READ(carryreg) & carry_bit)) {
+		*upper += carry_shift;
+		TSI_WRITE(carryreg, carry_bit);
+		goto again;
+	}
+
+	return val;
+}
+
+static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
+{
+	unsigned long excol;
+
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	spin_lock_irq(&data->misclock);
+
+	data->tmpstats.rx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_RXPKTS,
+			     TSI108_STAT_CARRY1_RXPKTS,
+			     TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
+
+	data->tmpstats.tx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_TXPKTS,
+			     TSI108_STAT_CARRY2_TXPKTS,
+			     TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
+
+	data->tmpstats.rx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_RXBYTES,
+			     TSI108_STAT_CARRY1_RXBYTES,
+			     TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	data->tmpstats.tx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_TXBYTES,
+			     TSI108_STAT_CARRY2_TXBYTES,
+			     TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	data->tmpstats.multicast =
+	    tsi108_read_stat(data, TSI108_STAT_RXMCAST,
+			     TSI108_STAT_CARRY1_RXMCAST,
+			     TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
+
+	excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
+				 TSI108_STAT_CARRY2_TXEXCOL,
+				 TSI108_STAT_TXEXCOL_CARRY,
+				 &data->tx_coll_abort);
+
+	data->tmpstats.collisions =
+	    tsi108_read_stat(data, TSI108_STAT_TXTCOL,
+			     TSI108_STAT_CARRY2_TXTCOL,
+			     TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
+
+	data->tmpstats.collisions += excol;
+
+	data->tmpstats.rx_length_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
+			     TSI108_STAT_CARRY1_RXLENGTH,
+			     TSI108_STAT_RXLENGTH_CARRY,
+			     &data->stats.rx_length_errors);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXRUNT,
+			     TSI108_STAT_CARRY1_RXRUNT,
+			     TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
+			     TSI108_STAT_CARRY1_RXJUMBO,
+			     TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	data->tmpstats.rx_frame_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXALIGN,
+			     TSI108_STAT_CARRY1_RXALIGN,
+			     TSI108_STAT_RXALIGN_CARRY,
+			     &data->stats.rx_frame_errors);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFCS,
+			     TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
+			     &data->rx_fcs);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFRAG,
+			     TSI108_STAT_CARRY1_RXFRAG,
+			     TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	data->tmpstats.rx_missed_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXDROP,
+			     TSI108_STAT_CARRY1_RXDROP,
+			     TSI108_STAT_RXDROP_CARRY,
+			     &data->stats.rx_missed_errors);
+
+	/* These three are maintained by software. */
+	data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
+	data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
+
+	data->tmpstats.tx_aborted_errors =
+	    tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
+			     TSI108_STAT_CARRY2_TXEXDEF,
+			     TSI108_STAT_TXEXDEF_CARRY,
+			     &data->stats.tx_aborted_errors);
+
+	data->tmpstats.tx_aborted_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
+			     TSI108_STAT_CARRY2_TXPAUSE,
+			     TSI108_STAT_TXPAUSEDROP_CARRY,
+			     &data->tx_pause_drop);
+
+	data->tmpstats.tx_aborted_errors += excol;
+
+	data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
+	data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
+	    data->tmpstats.rx_crc_errors +
+	    data->tmpstats.rx_frame_errors +
+	    data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
+
+	spin_unlock_irq(&data->misclock);
+	return &data->tmpstats;
+}
+
+static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
+{
+	TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
+			     TSI108_EC_RXQ_PTRHIGH_VALID);
+
+	TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
+			     | TSI108_EC_RXCTRL_QUEUE0);
+}
+
+static void tsi108_restart_tx(struct tsi108_prv_data * data)
+{
+	TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
+			     TSI108_EC_TXQ_PTRHIGH_VALID);
+
+	TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
+			     TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
+}
+
+/* txlock must be held by caller, with IRQs disabled, and
+ * with permission to re-enable them when the lock is dropped.
+ */
+static void tsi108_complete_tx(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int tx;
+	struct sk_buff *skb;
+	int release = 0;
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		tx = data->txtail;
+
+		if (data->txring[tx].misc & TSI108_TX_OWN)
+			break;
+
+		skb = data->txskbs[tx];
+
+		if (!(data->txring[tx].misc & TSI108_TX_OK))
+			printk("%s: bad tx packet, misc %x\n",
+			       dev->name, data->txring[tx].misc);
+
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+
+		if (data->txring[tx].misc & TSI108_TX_EOF) {
+			dev_kfree_skb_any(skb);
+			release++;
+		}
+	}
+
+	if (release) {
+		if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
+			netif_wake_queue(dev);
+	}
+}
+
+static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int frags = skb_shinfo(skb)->nr_frags + 1;
+	int i;
+
+	if (!data->phy_ok && net_ratelimit())
+		printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
+
+	if (!data->link_up) {
+		printk(KERN_ERR "%s: Transmit while link is down!\n",
+		       dev->name);
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (data->txfree < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+
+		if (net_ratelimit())
+			printk(KERN_ERR "%s: Transmit with full tx ring!\n",
+			       dev->name);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+	}
+
+	spin_lock_irq(&data->txlock);
+
+	for (i = 0; i < frags; i++) {
+		int misc = 0;
+		int tx = data->txhead;
+
+		/* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
+		 * the interrupt bit.  TX descriptor-complete interrupts are
+		 * enabled when the queue fills up, and masked when there is
+		 * still free space.  This way, when saturating the outbound
+		 * link, the tx interrupts are kept to a reasonable level. 
+		 * When the queue is not full, reclamation of skbs still occurs
+		 * as new packets are transmitted, or on a queue-empty
+		 * interrupt.
+		 */
+
+		if ((tx % TSI108_TX_INT_FREQ == 0) &&
+		    ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
+			misc = TSI108_TX_INT;
+
+		data->txskbs[tx] = skb;
+
+		if (i == 0) {
+			data->txring[tx].buf0 = dma_map_single(NULL, skb->data, 
+					skb->len - skb->data_len, DMA_TO_DEVICE);
+			data->txring[tx].len = skb->len - skb->data_len;
+			misc |= TSI108_TX_SOF;
+		} else {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+			data->txring[tx].buf0 =
+			    dma_map_page(NULL, frag->page, frag->page_offset,
+					    frag->size, DMA_TO_DEVICE);
+			data->txring[tx].len = frag->size;
+		}
+
+		if (i == frags - 1)
+			misc |= TSI108_TX_EOF;
+
+		if (netif_msg_pktdata(data)) {
+			int i;
+			printk("%s: Tx Frame contents (%d)\n", dev->name,
+			       skb->len);
+			for (i = 0; i < skb->len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+		data->txring[tx].misc = misc | TSI108_TX_OWN;
+
+		data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
+		data->txfree--;
+	}
+
+	tsi108_complete_tx(dev);
+
+	/* This must be done after the check for completed tx descriptors,
+	 * so that the tail pointer is correct.
+	 */
+
+	if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
+		tsi108_restart_tx(data);
+
+	spin_unlock_irq(&data->txlock);
+	return NETDEV_TX_OK;
+}
+
+static int tsi108_complete_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree && done != budget) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		if (data->rxring[rx].misc & TSI108_RX_OWN)
+			break;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		done++;
+
+		if (data->rxring[rx].misc & TSI108_RX_BAD) {
+			spin_lock_irq(&data->misclock);
+
+			if (data->rxring[rx].misc & TSI108_RX_CRC)
+				data->stats.rx_crc_errors++;
+			if (data->rxring[rx].misc & TSI108_RX_OVER)
+				data->stats.rx_fifo_errors++;
+
+			spin_unlock_irq(&data->misclock);
+
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+		if (netif_msg_pktdata(data)) {
+			int i;
+			printk("%s: Rx Frame contents (%d)\n",
+			       dev->name, data->rxring[rx].len);
+			for (i = 0; i < data->rxring[rx].len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+
+		skb->dev = dev;
+		skb_put(skb, data->rxring[rx].len);
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_receive_skb(skb);
+		dev->last_rx = jiffies;
+	}
+
+	return done;
+}
+
+static int tsi108_refill_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
+		int rx = data->rxhead;
+		struct sk_buff *skb;
+
+		data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);
+		if (!skb)
+			break;
+
+		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
+
+		data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, 
+							TSI108_RX_SKB_SIZE,
+							DMA_FROM_DEVICE);
+
+		/* Sometimes the hardware sets blen to zero after packet
+		 * reception, even though the manual says that it's only ever
+		 * modified by the driver.
+		 */
+
+		data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
+		data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
+
+		data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
+		data->rxfree++;
+		done++;
+	}
+
+	if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
+			   TSI108_EC_RXSTAT_QUEUE0))
+		tsi108_restart_rx(data, dev);
+
+	return done;
+}
+
+static int tsi108_poll(struct net_device *dev, int *budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI_READ(TSI108_EC_RXESTAT);
+	u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
+	int total_budget = min(*budget, dev->quota);
+	int num_received = 0, num_filled = 0, budget_used;
+
+	intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+	    TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
+
+	TSI_WRITE(TSI108_EC_RXESTAT, estat);
+	TSI_WRITE(TSI108_EC_INTSTAT, intstat);
+
+	if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
+		num_received = tsi108_complete_rx(dev, total_budget);
+
+	/* This should normally fill no more slots than the number of
+	 * packets received in tsi108_complete_rx().  The exception
+	 * is when we previously ran out of memory for RX SKBs.  In that
+	 * case, it's helpful to obey the budget, not only so that the
+	 * CPU isn't hogged, but so that memory (which may still be low)
+	 * is not hogged by one device.
+	 *
+	 * A work unit is considered to be two SKBs to allow us to catch
+	 * up when the ring has shrunk due to out-of-memory but we're
+	 * still removing the full budget's worth of packets each time.
+	 */
+
+	if (data->rxfree < TSI108_RXRING_LEN)
+		num_filled = tsi108_refill_rx(dev, total_budget * 2);
+
+	if (intstat & TSI108_INT_RXERROR) {
+		u32 err = TSI_READ(TSI108_EC_RXERR);
+		TSI_WRITE(TSI108_EC_RXERR, err);
+
+		if (err) {
+			if (net_ratelimit())
+				printk(KERN_DEBUG "%s: RX error %x\n",
+				       dev->name, err);
+
+			if (!(TSI_READ(TSI108_EC_RXSTAT) &
+			      TSI108_EC_RXSTAT_QUEUE0))
+				tsi108_restart_rx(data, dev);
+		}
+	}
+
+	if (intstat & TSI108_INT_RXOVERRUN) {
+		spin_lock_irq(&data->misclock);
+		data->stats.rx_fifo_errors++;
+		spin_unlock_irq(&data->misclock);
+	}
+
+	budget_used = max(num_received, num_filled / 2);
+
+	*budget -= budget_used;
+	dev->quota -= budget_used;
+
+	if (budget_used != total_budget) {
+		data->rxpending = 0;
+		netif_rx_complete(dev);
+
+		TSI_WRITE(TSI108_EC_INTMASK,
+				     TSI_READ(TSI108_EC_INTMASK)
+				     & ~(TSI108_INT_RXQUEUE0
+					 | TSI108_INT_RXTHRESH |
+					 TSI108_INT_RXOVERRUN |
+					 TSI108_INT_RXERROR |
+					 TSI108_INT_RXWAIT));
+
+		/* IRQs are level-triggered, so no need to re-check */
+		return 0;
+	} else {
+		data->rxpending = 1;
+	}
+
+	return 1;
+}
+
+static void tsi108_rx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A race could cause dev to already be scheduled, so it's not an
+	 * error if that happens (and interrupts shouldn't be re-masked,
+	 * because that can cause harmful races, if poll has already
+	 * unmasked them but not cleared LINK_STATE_SCHED).  
+	 *
+	 * This can happen if this code races with tsi108_poll(), which masks
+	 * the interrupts after tsi108_irq_one() read the mask, but before
+	 * netif_rx_schedule is called.  It could also happen due to calls
+	 * from tsi108_check_rxring().
+	 */
+
+	if (netif_rx_schedule_prep(dev)) {
+		/* Mask, rather than ack, the receive interrupts.  The ack
+		 * will happen in tsi108_poll().
+		 */
+
+		TSI_WRITE(TSI108_EC_INTMASK,
+				     TSI_READ(TSI108_EC_INTMASK) |
+				     TSI108_INT_RXQUEUE0
+				     | TSI108_INT_RXTHRESH |
+				     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
+				     TSI108_INT_RXWAIT);
+		__netif_rx_schedule(dev);
+	} else {
+		if (!netif_running(dev)) {
+			/* This can happen if an interrupt occurs while the
+			 * interface is being brought down, as the START
+			 * bit is cleared before the stop function is called.
+			 *
+			 * In this case, the interrupts must be masked, or
+			 * they will continue indefinitely.
+			 *
+			 * There's a race here if the interface is brought down
+			 * and then up in rapid succession, as the device could
+			 * be made running after the above check and before
+			 * the masking below.  This will only happen if the IRQ
+			 * thread has a lower priority than the task brining
+			 * up the interface.  Fixing this race would likely
+			 * require changes in generic code.
+			 */
+
+			TSI_WRITE(TSI108_EC_INTMASK,
+					     TSI_READ
+					     (TSI108_EC_INTMASK) |
+					     TSI108_INT_RXQUEUE0 |
+					     TSI108_INT_RXTHRESH |
+					     TSI108_INT_RXOVERRUN |
+					     TSI108_INT_RXERROR |
+					     TSI108_INT_RXWAIT);
+		}
+	}
+}
+
+/* If the RX ring has run out of memory, try periodically
+ * to allocate some more, as otherwise poll would never
+ * get called (apart from the initial end-of-queue condition).
+ *
+ * This is called once per second (by default) from the thread.
+ */
+
+static void tsi108_check_rxring(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A poll is scheduled, as opposed to caling tsi108_refill_rx 
+	 * directly, so as to keep the receive path single-threaded
+	 * (and thus not needing a lock).
+	 */
+
+	if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
+		tsi108_rx_int(dev);
+}
+
+static void tsi108_tx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI_READ(TSI108_EC_TXESTAT);
+
+	TSI_WRITE(TSI108_EC_TXESTAT, estat);
+	TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
+			     TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
+	if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
+		u32 err = TSI_READ(TSI108_EC_TXERR);
+		TSI_WRITE(TSI108_EC_TXERR, err);
+
+		if (err && net_ratelimit())
+			printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
+	}
+
+	if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
+		spin_lock(&data->txlock);
+		tsi108_complete_tx(dev);
+		spin_unlock(&data->txlock);
+	}
+}
+
+
+static irqreturn_t tsi108_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 stat = TSI_READ(TSI108_EC_INTSTAT);
+
+	if (!(stat & TSI108_INT_ANY))
+		return IRQ_NONE;	/* Not our interrupt */
+
+	stat &= ~TSI_READ(TSI108_EC_INTMASK);
+
+	if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
+		    TSI108_INT_TXERROR))
+		tsi108_tx_int(dev);
+	if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+		    TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
+		    TSI108_INT_RXERROR))
+		tsi108_rx_int(dev);
+
+	if (stat & TSI108_INT_SFN) {
+		if (net_ratelimit())
+			printk(KERN_DEBUG "%s: SFN error\n", dev->name);
+		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
+	}
+
+	if (stat & TSI108_INT_STATCARRY) {
+		tsi108_stat_carry(dev);
+		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void tsi108_stop_ethernet(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int i = 1000;
+	/* Disable all TX and RX queues ... */
+	TSI_WRITE(TSI108_EC_TXCTRL, 0);
+	TSI_WRITE(TSI108_EC_RXCTRL, 0);
+
+	/* ...and wait for them to become idle */
+	while(i--) {
+		if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
+			break;
+		udelay(10);
+	}
+	i = 1000;
+	while(i--){
+		if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
+			return;
+		udelay(10);
+	}
+	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static void tsi108_reset_ether(struct tsi108_prv_data * data)
+{
+	TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
+	udelay(100);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_PORTCTRL,
+			     TSI_READ(TSI108_EC_PORTCTRL) &
+			     ~TSI108_EC_PORTCTRL_STATRST);
+
+	TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_TXCFG,
+			     TSI_READ(TSI108_EC_TXCFG) &
+			     ~TSI108_EC_TXCFG_RST);
+
+	TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_RXCFG,
+			     TSI_READ(TSI108_EC_RXCFG) &
+			     ~TSI108_EC_RXCFG_RST);
+
+	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+			     TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
+			     TSI108_MAC_MII_MGMT_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+			     (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
+			     ~(TSI108_MAC_MII_MGMT_RST |
+			       TSI108_MAC_MII_MGMT_CLK)) | 0x07);
+}
+
+static int tsi108_get_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
+	u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+
+	/* Note that the octets are reversed from what the manual says,
+	 * producing an even weirder ordering...
+	 */
+	if (word2 == 0 && word1 == 0) {
+		dev->dev_addr[0] = 0x00;
+		dev->dev_addr[1] = 0x06;
+		dev->dev_addr[2] = 0xd2;
+		dev->dev_addr[3] = 0x00;
+		dev->dev_addr[4] = 0x00;
+		if (0x8 == data->phy)
+			dev->dev_addr[5] = 0x01;
+		else
+			dev->dev_addr[5] = 0x02;
+
+		word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+		word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+		    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+		TSI_WRITE(TSI108_MAC_ADDR1, word1);
+		TSI_WRITE(TSI108_MAC_ADDR2, word2);
+	} else {
+		dev->dev_addr[0] = (word2 >> 16) & 0xff;
+		dev->dev_addr[1] = (word2 >> 24) & 0xff;
+		dev->dev_addr[2] = (word1 >> 0) & 0xff;
+		dev->dev_addr[3] = (word1 >> 8) & 0xff;
+		dev->dev_addr[4] = (word1 >> 16) & 0xff;
+		dev->dev_addr[5] = (word1 >> 24) & 0xff;
+	}
+
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tsi108_set_mac(struct net_device *dev, void *addr)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1, word2;
+	int i;
+
+	if (!is_valid_ether_addr(addr))
+		return -EINVAL;
+
+	for (i = 0; i < 6; i++)
+		/* +2 is for the offset of the HW addr type */
+		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+
+	word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+	word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+	    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+	spin_lock_irq(&data->misclock);
+	TSI_WRITE(TSI108_MAC_ADDR1, word1);
+	TSI_WRITE(TSI108_MAC_ADDR2, word2);
+	spin_lock(&data->txlock);
+
+	if (data->txfree && data->link_up)
+		netif_wake_queue(dev);
+
+	spin_unlock(&data->txlock);
+	spin_unlock_irq(&data->misclock);
+	return 0;
+}
+
+/* Protected by dev->xmit_lock. */
+static void tsi108_set_rx_mode(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
+
+	if (dev->flags & IFF_PROMISC) {
+		rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
+		rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
+		goto out;
+	}
+
+	rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
+
+	if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+		int i;
+		struct dev_mc_list *mc = dev->mc_list;
+		rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
+
+		memset(data->mc_hash, 0, sizeof(data->mc_hash));
+
+		while (mc) {
+			u32 hash, crc;
+
+			if (mc->dmi_addrlen == 6) {
+				crc = ether_crc(6, mc->dmi_addr);
+				hash = crc >> 23;
+
+				__set_bit(hash, &data->mc_hash[0]);
+			} else {
+				printk(KERN_ERR
+				       "%s: got multicast address of length %d "
+				       "instead of 6.\n", dev->name,
+				       mc->dmi_addrlen);
+			}
+
+			mc = mc->next;
+		}
+
+		TSI_WRITE(TSI108_EC_HASHADDR,
+				     TSI108_EC_HASHADDR_AUTOINC |
+				     TSI108_EC_HASHADDR_MCAST);
+
+		for (i = 0; i < 16; i++) {
+			/* The manual says that the hardware may drop
+			 * back-to-back writes to the data register.
+			 */
+			udelay(1);
+			TSI_WRITE(TSI108_EC_HASHDATA,
+					     data->mc_hash[i]);
+		}
+	}
+
+      out:
+	TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
+}
+
+static void tsi108_init_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 i = 0;
+	u16 phyval = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
+	while (i--){
+		if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
+			break;
+		udelay(10);
+	}
+	if (i == 0)
+		printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+			
+#if (TSI108_PHY_TYPE == PHY_BCM54XX)	/* Broadcom BCM54xx PHY */
+	tsi108_write_mii(data, 0x09, 0x0300);
+	tsi108_write_mii(data, 0x10, 0x1020);
+	tsi108_write_mii(data, 0x1c, 0x8c00);
+#endif
+
+	tsi108_write_mii(data,
+			 MII_BMCR,
+			 BMCR_ANENABLE | BMCR_ANRESTART);
+	while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
+		cpu_relax();
+
+	/* Set G/MII mode and receive clock select in TBI control #2.  The
+	 * second port won't work if this isn't done, even though we don't
+	 * use TBI mode.
+	 */
+
+	tsi108_write_tbi(data, 0x11, 0x30);
+
+	/* FIXME: It seems to take more than 2 back-to-back reads to the
+	 * PHY_STAT register before the link up status bit is set.
+	 */
+
+	data->link_up = 1;
+
+	while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
+		 BMSR_LSTATUS)) {
+		if (i++ > (MII_READ_DELAY / 10)) {
+			data->link_up = 0;
+			break;
+		}
+		spin_unlock_irqrestore(&phy_lock, flags);
+		msleep(10);
+		spin_lock_irqsave(&phy_lock, flags);
+	}
+
+	printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
+	data->phy_ok = 1;
+	data->init_media = 1;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static void tsi108_kill_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+	tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
+	data->phy_ok = 0;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static int tsi108_open(struct net_device *dev)
+{
+	int i;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
+	unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
+
+	i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
+	if (i != 0) {
+		printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
+		       data->id, data->irq_num);
+		return i;
+	} else {
+		dev->irq = data->irq_num;
+		printk(KERN_NOTICE
+		       "tsi108_open : Port %d Assigned IRQ %d to %s\n",
+		       data->id, dev->irq, dev->name);
+	}
+
+	data->rxring = dma_alloc_coherent(NULL, rxring_size, 
+			&data->rxdma, GFP_KERNEL);
+
+	if (!data->rxring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for rxring!\n");
+		return -ENOMEM;
+	} else {
+		memset(data->rxring, 0, rxring_size);
+	}
+
+	data->txring = dma_alloc_coherent(NULL, txring_size, 
+			&data->txdma, GFP_KERNEL);
+
+	if (!data->txring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for txring!\n");
+		pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+		return -ENOMEM;
+	} else {
+		memset(data->txring, 0, txring_size);
+	}
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
+		data->rxring[i].blen = TSI108_RXBUF_SIZE;
+		data->rxring[i].vlan = 0;
+	}
+
+	data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
+
+	data->rxtail = 0;
+	data->rxhead = 0;
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+
+		if (!skb) {
+			/* Bah.  No memory for now, but maybe we'll get
+			 * some more later.
+			 * For now, we'll live with the smaller ring.
+			 */
+			printk(KERN_WARNING
+			       "%s: Could only allocate %d receive skb(s).\n",
+			       dev->name, i);
+			data->rxhead = i;
+			break;
+		}
+
+		data->rxskbs[i] = skb;
+		/* Align the payload on a 4-byte boundary */
+		skb_reserve(skb, 2);
+		data->rxskbs[i] = skb;
+		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
+		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
+	}
+
+	data->rxfree = i;
+	TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
+
+	for (i = 0; i < TSI108_TXRING_LEN; i++) {
+		data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
+		data->txring[i].misc = 0;
+	}
+
+	data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
+	data->txtail = 0;
+	data->txhead = 0;
+	data->txfree = TSI108_TXRING_LEN;
+	TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
+	tsi108_init_phy(dev);
+
+	setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
+	mod_timer(&data->timer, jiffies + 1);
+
+	tsi108_restart_rx(data, dev);
+
+	TSI_WRITE(TSI108_EC_INTSTAT, ~0);
+
+	TSI_WRITE(TSI108_EC_INTMASK,
+			     ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
+			       TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
+			       TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
+			       TSI108_INT_SFN | TSI108_INT_STATCARRY));
+
+	TSI_WRITE(TSI108_MAC_CFG1,
+			     TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int tsi108_close(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	del_timer_sync(&data->timer);
+
+	tsi108_stop_ethernet(dev);
+	tsi108_kill_phy(dev);
+	TSI_WRITE(TSI108_EC_INTMASK, ~0);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	/* Check for any pending TX packets, and drop them. */
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		int tx = data->txtail;
+		struct sk_buff *skb;
+		skb = data->txskbs[tx];
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+		dev_kfree_skb(skb);
+	}
+
+	synchronize_irq(data->irq_num);
+	free_irq(data->irq_num, dev);
+
+	/* Discard the RX ring. */
+
+	while (data->rxfree) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		dev_kfree_skb(skb);
+	}
+
+	dma_free_coherent(0,
+			    TSI108_RXRING_LEN * sizeof(rx_desc),
+			    data->rxring, data->rxdma);
+	dma_free_coherent(0,
+			    TSI108_TXRING_LEN * sizeof(tx_desc),
+			    data->txring, data->txdma);
+
+	return 0;
+}
+
+static void tsi108_init_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
+			     TSI108_MAC_CFG2_PADCRC);
+
+	TSI_WRITE(TSI108_EC_TXTHRESH,
+			     (192 << TSI108_EC_TXTHRESH_STARTFILL) |
+			     (192 << TSI108_EC_TXTHRESH_STOPFILL));
+
+	TSI_WRITE(TSI108_STAT_CARRYMASK1,
+			     ~(TSI108_STAT_CARRY1_RXBYTES |
+			       TSI108_STAT_CARRY1_RXPKTS |
+			       TSI108_STAT_CARRY1_RXFCS |
+			       TSI108_STAT_CARRY1_RXMCAST |
+			       TSI108_STAT_CARRY1_RXALIGN |
+			       TSI108_STAT_CARRY1_RXLENGTH |
+			       TSI108_STAT_CARRY1_RXRUNT |
+			       TSI108_STAT_CARRY1_RXJUMBO |
+			       TSI108_STAT_CARRY1_RXFRAG |
+			       TSI108_STAT_CARRY1_RXJABBER |
+			       TSI108_STAT_CARRY1_RXDROP));
+
+	TSI_WRITE(TSI108_STAT_CARRYMASK2,
+			     ~(TSI108_STAT_CARRY2_TXBYTES |
+			       TSI108_STAT_CARRY2_TXPKTS |
+			       TSI108_STAT_CARRY2_TXEXDEF |
+			       TSI108_STAT_CARRY2_TXEXCOL |
+			       TSI108_STAT_CARRY2_TXTCOL |
+			       TSI108_STAT_CARRY2_TXPAUSE));
+
+	TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	TSI_WRITE(TSI108_EC_RXCFG,
+			     TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
+
+	TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
+			     TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_CFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
+			     TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_CFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
+			     TSI108_EC_TXQ_BUFCFG_BURST256 |
+			     TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_BUFCFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
+			     TSI108_EC_RXQ_BUFCFG_BURST256 |
+			     TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_BUFCFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_INTMASK, ~0);
+}
+
+static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
+}
+
+static int
+tsi108_init_one(struct platform_device *pdev)
+{
+	struct net_device *dev = NULL;
+	struct tsi108_prv_data *data = NULL;
+	hw_info *einfo;
+	int err = 0;
+	
+	einfo = pdev->dev.platform_data;
+	
+	if (NULL == einfo) {
+		printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
+		       pdev->id);
+		return -ENODEV;
+	}
+
+	/* Create an ethernet device instance */
+
+	dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
+	if (!dev) {
+		printk("tsi108_eth: Could not allocate a device structure\n");
+		return -ENOMEM;
+	}
+
+	printk("tsi108_eth%d: probe...\n", pdev->id);
+	data = netdev_priv(dev);
+
+	pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
+			pdev->id, einfo->regs, einfo->phyregs,
+			einfo->phy, einfo->irq_num);
+	
+	data->regs = ioremap(einfo->regs, 0x400);
+	if (NULL == data->regs) {
+		err = -ENOMEM;
+		goto regs_fail;
+	}
+	
+	data->phyregs = ioremap(einfo->phyregs, 0x400);
+	if (NULL == data->phyregs) {
+		err = -ENOMEM;
+		goto regs_fail;
+	}
+/* MII setup */
+	data->mii_if.dev = dev;
+	data->mii_if.mdio_read = tsi108_mdio_read;
+	data->mii_if.mdio_write = tsi108_mdio_write;
+	data->mii_if.phy_id = einfo->phy;
+	data->mii_if.phy_id_mask = 0x1f;
+	data->mii_if.reg_num_mask = 0x1f;
+	data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
+	
+	data->phy = einfo->phy;
+	data->irq_num = einfo->irq_num;
+	data->id = pdev->id;
+	dev->open = tsi108_open;
+	dev->stop = tsi108_close;
+	dev->hard_start_xmit = tsi108_send_packet;
+	dev->set_mac_address = tsi108_set_mac;
+	dev->set_multicast_list = tsi108_set_rx_mode;
+	dev->get_stats = tsi108_get_stats;
+	dev->poll = tsi108_poll;
+	dev->do_ioctl = tsi108_do_ioctl;
+	dev->weight = 64;  /* 64 is more suitable for GigE interface - klai */
+
+	/* Apparently, the Linux networking code won't use scatter-gather
+	 * if the hardware doesn't do checksums.  However, it's faster
+	 * to checksum in place and use SG, as (among other reasons)
+	 * the cache won't be dirtied (which then has to be flushed
+	 * before DMA).  The checksumming is done by the driver (via
+	 * a new function skb_csum_dev() in net/core/skbuff.c).
+	 */
+
+	dev->features = NETIF_F_HIGHDMA;
+	SET_MODULE_OWNER(dev);
+
+	spin_lock_init(&data->txlock);
+	spin_lock_init(&data->misclock);
+
+	tsi108_reset_ether(data);
+	tsi108_kill_phy(dev);
+
+	if ((err = tsi108_get_mac(dev)) != 0) {
+		printk(KERN_ERR "%s: Invalid MAC address.  Please correct.\n",
+		       dev->name);
+		goto register_fail;
+	}
+
+	tsi108_init_mac(dev);
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
+				dev->name);
+		goto register_fail;
+	}
+
+	printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: "
+	       "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
+	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+#ifdef DEBUG
+	data->msg_enable = DEBUG;
+	dump_eth_one(dev);
+#endif
+
+	return 0;
+	
+register_fail:
+	iounmap(data->regs);
+	iounmap(data->phyregs);
+	
+regs_fail:
+	free_netdev(dev);
+	return err;
+}
+
+/* There's no way to either get interrupts from the PHY when
+ * something changes, or to have the Tsi108 automatically communicate
+ * with the PHY to reconfigure itself.
+ *
+ * Thus, we have to do it using a timer. 
+ */
+
+static void tsi108_timed_checker(unsigned long dev_ptr)
+{
+	struct net_device *dev = (struct net_device *)dev_ptr;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	tsi108_check_phy(dev);
+	tsi108_check_rxring(dev);
+	mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
+}
+
+static int tsi108_ether_init(void)
+{
+	int ret;
+	ret = platform_driver_register (&tsi_eth_driver);
+	if (ret < 0){
+		printk("tsi108_ether_init: error initializing ethernet "
+		       "device\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int tsi108_ether_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct tsi108_prv_data *priv = netdev_priv(dev);
+	
+	unregister_netdev(dev);
+	tsi108_stop_ethernet(dev);
+	platform_set_drvdata(pdev, NULL);
+	iounmap(priv->regs);
+	iounmap(priv->phyregs);
+	free_netdev(dev);
+	
+	return 0;
+}
+static void tsi108_ether_exit(void)
+{
+	platform_driver_unregister(&tsi_eth_driver);
+}
+
+module_init(tsi108_ether_init);
+module_exit(tsi108_ether_exit);
+
+MODULE_AUTHOR("Tundra Semiconductor Corporation");
+MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tsi108_eth.h b/drivers/net/tsi108_eth.h
new file mode 100644
index 0000000..77a769d
--- /dev/null
+++ b/drivers/net/tsi108_eth.h
@@ -0,0 +1,365 @@
+/*
+ * (C) Copyright 2005 Tundra Semiconductor Corp.
+ * Kong Lai, <kong.lai@tundra.com).
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * net/tsi108_eth.h - definitions for Tsi108 GIGE network controller.
+ */
+
+#ifndef __TSI108_ETH_H
+#define __TSI108_ETH_H
+
+#include <linux/types.h>
+
+#define TSI_WRITE(offset, val) \
+	out_be32((data->regs + (offset)), val)
+
+#define TSI_READ(offset) \
+	in_be32((data->regs + (offset)))
+
+#define TSI_WRITE_PHY(offset, val) \
+	out_be32((data->phyregs + (offset)), val)
+
+#define TSI_READ_PHY(offset) \
+	in_be32((data->phyregs + (offset)))
+
+/*
+ * PHY Configuration Options
+ *
+ * NOTE: Enable set of definitions corresponding to your board type
+ */
+#define PHY_MV88E	1	/* Marvel 88Exxxx PHY */
+#define PHY_BCM54XX	2	/* Broardcom BCM54xx PHY */
+#define TSI108_PHY_TYPE	PHY_MV88E
+
+/*
+ * TSI108 GIGE port registers
+ */
+
+#define TSI108_ETH_PORT_NUM		2
+#define TSI108_PBM_PORT			2
+#define TSI108_SDRAM_PORT		4
+
+#define TSI108_MAC_CFG1			(0x000)
+#define TSI108_MAC_CFG1_SOFTRST		(1 << 31)
+#define TSI108_MAC_CFG1_LOOPBACK	(1 << 8)
+#define TSI108_MAC_CFG1_RXEN		(1 << 2)
+#define TSI108_MAC_CFG1_TXEN		(1 << 0)
+
+#define TSI108_MAC_CFG2			(0x004)
+#define TSI108_MAC_CFG2_DFLT_PREAMBLE	(7 << 12)
+#define TSI108_MAC_CFG2_IFACE_MASK	(3 << 8)
+#define TSI108_MAC_CFG2_NOGIG		(1 << 8)
+#define TSI108_MAC_CFG2_GIG		(2 << 8)
+#define TSI108_MAC_CFG2_PADCRC		(1 << 2)
+#define TSI108_MAC_CFG2_FULLDUPLEX	(1 << 0)
+
+#define TSI108_MAC_MII_MGMT_CFG		(0x020)
+#define TSI108_MAC_MII_MGMT_CLK		(7 << 0)
+#define TSI108_MAC_MII_MGMT_RST		(1 << 31)
+
+#define TSI108_MAC_MII_CMD		(0x024)
+#define TSI108_MAC_MII_CMD_READ		(1 << 0)
+
+#define TSI108_MAC_MII_ADDR		(0x028)
+#define TSI108_MAC_MII_ADDR_REG		0
+#define TSI108_MAC_MII_ADDR_PHY		8
+
+#define TSI108_MAC_MII_DATAOUT		(0x02c)
+#define TSI108_MAC_MII_DATAIN		(0x030)
+
+#define TSI108_MAC_MII_IND		(0x034)
+#define TSI108_MAC_MII_IND_NOTVALID	(1 << 2)
+#define TSI108_MAC_MII_IND_SCANNING	(1 << 1)
+#define TSI108_MAC_MII_IND_BUSY		(1 << 0)
+
+#define TSI108_MAC_IFCTRL		(0x038)
+#define TSI108_MAC_IFCTRL_PHYMODE	(1 << 24)
+
+#define TSI108_MAC_ADDR1		(0x040)
+#define TSI108_MAC_ADDR2		(0x044)
+
+#define TSI108_STAT_RXBYTES		(0x06c)
+#define TSI108_STAT_RXBYTES_CARRY	(1 << 24)
+
+#define TSI108_STAT_RXPKTS		(0x070)
+#define TSI108_STAT_RXPKTS_CARRY	(1 << 18)
+
+#define TSI108_STAT_RXFCS		(0x074)
+#define TSI108_STAT_RXFCS_CARRY		(1 << 12)
+
+#define TSI108_STAT_RXMCAST		(0x078)
+#define TSI108_STAT_RXMCAST_CARRY	(1 << 18)
+
+#define TSI108_STAT_RXALIGN		(0x08c)
+#define TSI108_STAT_RXALIGN_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXLENGTH		(0x090)
+#define TSI108_STAT_RXLENGTH_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXRUNT		(0x09c)
+#define TSI108_STAT_RXRUNT_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXJUMBO		(0x0a0)
+#define TSI108_STAT_RXJUMBO_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXFRAG		(0x0a4)
+#define TSI108_STAT_RXFRAG_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXJABBER		(0x0a8)
+#define TSI108_STAT_RXJABBER_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXDROP		(0x0ac)
+#define TSI108_STAT_RXDROP_CARRY	(1 << 12)
+
+#define TSI108_STAT_TXBYTES		(0x0b0)
+#define TSI108_STAT_TXBYTES_CARRY	(1 << 24)
+
+#define TSI108_STAT_TXPKTS		(0x0b4)
+#define TSI108_STAT_TXPKTS_CARRY	(1 << 18)
+
+#define TSI108_STAT_TXEXDEF		(0x0c8)
+#define TSI108_STAT_TXEXDEF_CARRY	(1 << 12)
+
+#define TSI108_STAT_TXEXCOL		(0x0d8)
+#define TSI108_STAT_TXEXCOL_CARRY	(1 << 12)
+
+#define TSI108_STAT_TXTCOL		(0x0dc)
+#define TSI108_STAT_TXTCOL_CARRY	(1 << 13)
+
+#define TSI108_STAT_TXPAUSEDROP		(0x0e4)
+#define TSI108_STAT_TXPAUSEDROP_CARRY	(1 << 12)
+
+#define TSI108_STAT_CARRY1		(0x100)
+#define TSI108_STAT_CARRY1_RXBYTES	(1 << 16)
+#define TSI108_STAT_CARRY1_RXPKTS	(1 << 15)
+#define TSI108_STAT_CARRY1_RXFCS	(1 << 14)
+#define TSI108_STAT_CARRY1_RXMCAST	(1 << 13)
+#define TSI108_STAT_CARRY1_RXALIGN	(1 << 8)
+#define TSI108_STAT_CARRY1_RXLENGTH	(1 << 7)
+#define TSI108_STAT_CARRY1_RXRUNT	(1 << 4)
+#define TSI108_STAT_CARRY1_RXJUMBO	(1 << 3)
+#define TSI108_STAT_CARRY1_RXFRAG	(1 << 2)
+#define TSI108_STAT_CARRY1_RXJABBER	(1 << 1)
+#define TSI108_STAT_CARRY1_RXDROP	(1 << 0)
+
+#define TSI108_STAT_CARRY2		(0x104)
+#define TSI108_STAT_CARRY2_TXBYTES	(1 << 13)
+#define TSI108_STAT_CARRY2_TXPKTS	(1 << 12)
+#define TSI108_STAT_CARRY2_TXEXDEF	(1 << 7)
+#define TSI108_STAT_CARRY2_TXEXCOL	(1 << 3)
+#define TSI108_STAT_CARRY2_TXTCOL	(1 << 2)
+#define TSI108_STAT_CARRY2_TXPAUSE	(1 << 0)
+
+#define TSI108_STAT_CARRYMASK1		(0x108)
+#define TSI108_STAT_CARRYMASK2		(0x10c)
+
+#define TSI108_EC_PORTCTRL		(0x200)
+#define TSI108_EC_PORTCTRL_STATRST	(1 << 31)
+#define TSI108_EC_PORTCTRL_STATEN	(1 << 28)
+#define TSI108_EC_PORTCTRL_NOGIG	(1 << 18)
+#define TSI108_EC_PORTCTRL_HALFDUPLEX	(1 << 16)
+
+#define TSI108_EC_INTSTAT		(0x204)
+#define TSI108_EC_INTMASK		(0x208)
+
+#define TSI108_INT_ANY			(1 << 31)
+#define TSI108_INT_SFN			(1 << 30)
+#define TSI108_INT_RXIDLE		(1 << 29)
+#define TSI108_INT_RXABORT		(1 << 28)
+#define TSI108_INT_RXERROR		(1 << 27)
+#define TSI108_INT_RXOVERRUN		(1 << 26)
+#define TSI108_INT_RXTHRESH		(1 << 25)
+#define TSI108_INT_RXWAIT		(1 << 24)
+#define TSI108_INT_RXQUEUE0		(1 << 16)
+#define TSI108_INT_STATCARRY		(1 << 15)
+#define TSI108_INT_TXIDLE		(1 << 13)
+#define TSI108_INT_TXABORT		(1 << 12)
+#define TSI108_INT_TXERROR		(1 << 11)
+#define TSI108_INT_TXUNDERRUN		(1 << 10)
+#define TSI108_INT_TXTHRESH		(1 <<  9)
+#define TSI108_INT_TXWAIT		(1 <<  8)
+#define TSI108_INT_TXQUEUE0		(1 <<  0)
+
+#define TSI108_EC_TXCFG			(0x220)
+#define TSI108_EC_TXCFG_RST		(1 << 31)
+
+#define TSI108_EC_TXCTRL		(0x224)
+#define TSI108_EC_TXCTRL_IDLEINT	(1 << 31)
+#define TSI108_EC_TXCTRL_ABORT		(1 << 30)
+#define TSI108_EC_TXCTRL_GO		(1 << 15)
+#define TSI108_EC_TXCTRL_QUEUE0		(1 <<  0)
+
+#define TSI108_EC_TXSTAT		(0x228)
+#define TSI108_EC_TXSTAT_ACTIVE		(1 << 15)
+#define TSI108_EC_TXSTAT_QUEUE0		(1 << 0)
+
+#define TSI108_EC_TXESTAT		(0x22c)
+#define TSI108_EC_TXESTAT_Q0_ERR	(1 << 24)
+#define TSI108_EC_TXESTAT_Q0_DESCINT	(1 << 16)
+#define TSI108_EC_TXESTAT_Q0_EOF	(1 <<  8)
+#define TSI108_EC_TXESTAT_Q0_EOQ	(1 <<  0)
+
+#define TSI108_EC_TXERR			(0x278)
+
+#define TSI108_EC_TXQ_CFG		(0x280)
+#define TSI108_EC_TXQ_CFG_DESC_INT	(1 << 20)
+#define TSI108_EC_TXQ_CFG_EOQ_OWN_INT	(1 << 19)
+#define TSI108_EC_TXQ_CFG_WSWP		(1 << 11)
+#define TSI108_EC_TXQ_CFG_BSWP		(1 << 10)
+#define TSI108_EC_TXQ_CFG_SFNPORT	0
+
+#define TSI108_EC_TXQ_BUFCFG		(0x284)
+#define TSI108_EC_TXQ_BUFCFG_BURST8	(0 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST32	(1 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST128	(2 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST256	(3 << 8)
+#define TSI108_EC_TXQ_BUFCFG_WSWP	(1 << 11)
+#define TSI108_EC_TXQ_BUFCFG_BSWP	(1 << 10)
+#define TSI108_EC_TXQ_BUFCFG_SFNPORT	0
+
+#define TSI108_EC_TXQ_PTRLOW		(0x288)
+
+#define TSI108_EC_TXQ_PTRHIGH		(0x28c)
+#define TSI108_EC_TXQ_PTRHIGH_VALID	(1 << 31)
+
+#define TSI108_EC_TXTHRESH		(0x230)
+#define TSI108_EC_TXTHRESH_STARTFILL	0
+#define TSI108_EC_TXTHRESH_STOPFILL	16
+
+#define TSI108_EC_RXCFG			(0x320)
+#define TSI108_EC_RXCFG_RST		(1 << 31)
+
+#define TSI108_EC_RXSTAT		(0x328)
+#define TSI108_EC_RXSTAT_ACTIVE		(1 << 15)
+#define TSI108_EC_RXSTAT_QUEUE0		(1 << 0)
+
+#define TSI108_EC_RXESTAT		(0x32c)
+#define TSI108_EC_RXESTAT_Q0_ERR	(1 << 24)
+#define TSI108_EC_RXESTAT_Q0_DESCINT	(1 << 16)
+#define TSI108_EC_RXESTAT_Q0_EOF	(1 <<  8)
+#define TSI108_EC_RXESTAT_Q0_EOQ	(1 <<  0)
+
+#define TSI108_EC_HASHADDR		(0x360)
+#define TSI108_EC_HASHADDR_AUTOINC	(1 << 31)
+#define TSI108_EC_HASHADDR_DO1STREAD	(1 << 30)
+#define TSI108_EC_HASHADDR_UNICAST	(0 <<  4)
+#define TSI108_EC_HASHADDR_MCAST	(1 <<  4)
+
+#define TSI108_EC_HASHDATA		(0x364)
+
+#define TSI108_EC_RXQ_PTRLOW		(0x388)
+
+#define TSI108_EC_RXQ_PTRHIGH		(0x38c)
+#define TSI108_EC_RXQ_PTRHIGH_VALID	(1 << 31)
+
+/* Station Enable -- accept packets destined for us */
+#define TSI108_EC_RXCFG_SE		(1 << 13)
+/* Unicast Frame Enable -- for packets not destined for us */
+#define TSI108_EC_RXCFG_UFE		(1 << 12)
+/* Multicast Frame Enable */
+#define TSI108_EC_RXCFG_MFE		(1 << 11)
+/* Broadcast Frame Enable */
+#define TSI108_EC_RXCFG_BFE		(1 << 10)
+#define TSI108_EC_RXCFG_UC_HASH		(1 <<  9)
+#define TSI108_EC_RXCFG_MC_HASH		(1 <<  8)
+
+#define TSI108_EC_RXQ_CFG		(0x380)
+#define TSI108_EC_RXQ_CFG_DESC_INT	(1 << 20)
+#define TSI108_EC_RXQ_CFG_EOQ_OWN_INT	(1 << 19)
+#define TSI108_EC_RXQ_CFG_WSWP		(1 << 11)
+#define TSI108_EC_RXQ_CFG_BSWP		(1 << 10)
+#define TSI108_EC_RXQ_CFG_SFNPORT	0
+
+#define TSI108_EC_RXQ_BUFCFG		(0x384)
+#define TSI108_EC_RXQ_BUFCFG_BURST8	(0 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST32	(1 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST128	(2 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST256	(3 << 8)
+#define TSI108_EC_RXQ_BUFCFG_WSWP	(1 << 11)
+#define TSI108_EC_RXQ_BUFCFG_BSWP	(1 << 10)
+#define TSI108_EC_RXQ_BUFCFG_SFNPORT	0
+
+#define TSI108_EC_RXCTRL		(0x324)
+#define TSI108_EC_RXCTRL_ABORT		(1 << 30)
+#define TSI108_EC_RXCTRL_GO		(1 << 15)
+#define TSI108_EC_RXCTRL_QUEUE0		(1 << 0)
+
+#define TSI108_EC_RXERR			(0x378)
+
+#define TSI108_TX_EOF	(1 << 0)	/* End of frame; last fragment of packet */
+#define TSI108_TX_SOF	(1 << 1)	/* Start of frame; first frag. of packet */
+#define TSI108_TX_VLAN	(1 << 2)	/* Per-frame VLAN: enables VLAN override */
+#define TSI108_TX_HUGE	(1 << 3)	/* Huge frame enable */
+#define TSI108_TX_PAD	(1 << 4)	/* Pad the packet if too short */
+#define TSI108_TX_CRC	(1 << 5)	/* Generate CRC for this packet */
+#define TSI108_TX_INT	(1 << 14)	/* Generate an IRQ after frag. processed */
+#define TSI108_TX_RETRY	(0xf << 16)	/* 4 bit field indicating num. of retries */
+#define TSI108_TX_COL	(1 << 20)	/* Set if a collision occured */
+#define TSI108_TX_LCOL	(1 << 24)	/* Set if a late collision occured */
+#define TSI108_TX_UNDER	(1 << 25)	/* Set if a FIFO underrun occured */
+#define TSI108_TX_RLIM	(1 << 26)	/* Set if the retry limit was reached */
+#define TSI108_TX_OK	(1 << 30)	/* Set if the frame TX was successful */
+#define TSI108_TX_OWN	(1 << 31)	/* Set if the device owns the descriptor */
+
+/* Note: the descriptor layouts assume big-endian byte order. */
+typedef struct {
+	u32 buf0;
+	u32 buf1;		/* Base address of buffer */
+	u32 next0;		/* Address of next descriptor, if any */
+	u32 next1;
+	u16 vlan;		/* VLAN, if override enabled for this packet */
+	u16 len;		/* Length of buffer in bytes */
+	u32 misc;		/* See TSI108_TX_* above */
+	u32 reserved0;		/*reserved0 and reserved1 are added to make the desc */
+	u32 reserved1;		/* 32-byte aligned */
+} __attribute__ ((aligned(32))) tx_desc;
+
+#define TSI108_RX_EOF	(1 << 0)	/* End of frame; last fragment of packet */
+#define TSI108_RX_SOF	(1 << 1)	/* Start of frame; first frag. of packet */
+#define TSI108_RX_VLAN	(1 << 2)	/* Set on SOF if packet has a VLAN */
+#define TSI108_RX_FTYPE	(1 << 3)	/* Length/Type field is type, not length */
+#define TSI108_RX_RUNT	(1 << 4)/* Packet is less than minimum size */
+#define TSI108_RX_HASH	(1 << 7)/* Hash table match */
+#define TSI108_RX_BAD	(1 << 8)	/* Bad frame */
+#define TSI108_RX_OVER	(1 << 9)	/* FIFO overrun occured */
+#define TSI108_RX_TRUNC	(1 << 11)	/* Packet truncated due to excess length */
+#define TSI108_RX_CRC	(1 << 12)	/* Packet had a CRC error */
+#define TSI108_RX_INT	(1 << 13)	/* Generate an IRQ after frag. processed */
+#define TSI108_RX_OWN	(1 << 15)	/* Set if the device owns the descriptor */
+
+#define TSI108_RX_SKB_SIZE 1536		/* The RX skb length */
+
+typedef struct {
+	u32 buf0;		/* Base address of buffer */
+	u32 buf1;		/* Base address of buffer */
+	u32 next0;		/* Address of next descriptor, if any */
+	u32 next1;		/* Address of next descriptor, if any */
+	u16 vlan;		/* VLAN of received packet, first frag only */
+	u16 len;		/* Length of received fragment in bytes */
+	u16 blen;		/* Length of buffer in bytes */
+	u16 misc;		/* See TSI108_RX_* above */
+	u32 reserved0;		/* reserved0 and reserved1 are added to make the desc */
+	u32 reserved1;		/* 32-byte aligned */
+} __attribute__ ((aligned(32))) rx_desc;
+
+#endif				/* __TSI108_ETH_H */
-- 
1.4.0





^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH] Add tsi108/9 On Chip Ethernet device driver support
  2006-10-30  5:19               ` Add tsi108/9 " Zang Roy-r61911
@ 2006-10-31  2:26                 ` Zang Roy-r61911
  0 siblings, 0 replies; 23+ messages in thread
From: Zang Roy-r61911 @ 2006-10-31  2:26 UTC (permalink / raw)
  To: Jeff Garzik; +Cc: Andrew Morton, netdev, linux-kernel, Alexandre.Bounine

The last e-mail missed the [PATCH] subject. Sorry for that.

The following patch adds tsi108/9 on chip Ethernet controller driver
support.

The driver code collects the feedback of previous posting form the
mailing list and gives the update.

MPC7448HPC2 platform in arch/powerpc uses tsi108 bridge.

The following is a brief description of the Ethernet controller:

The Tsi108/9 Ethernet Controller connects Switch Fabric to two
independent Gigabit Ethernet ports,E0 and E1. It uses a single
Management interface to manage the two physical connection devices
(PHYs). Each Ethernet port has its own statistics monitor that tracks
and reports key interface statistics. Each port supports a 256-entry
hash table for address filtering. In addition, each port is bridged to
the Switch Fabric through a 2-Kbyte transmit FIFO and a 4-Kbyte Receive
FIFO.

Each Ethernet port also has a pair of internal Ethernet DMA channels to
support the transmit and receive data flows. The Ethernet DMA channels
use descriptors set up in memory, the memory map of the device, and
access via the Switch Fabric. The Ethernet Controller’s DMA arbiter
handles arbitration for the Switch Fabric. The Controller also has a
register bus interface for register accesses and status monitor control.

The PMD (Physical Media Device) interface operates in MII, GMII, or TBI
modes. The MII mode is used for connecting with 10 or 100 Mbit/s PMDs.
The GMII and TBI modes are used to connect with Gigabit PMDs. 
Internal data flows to and from the Ethernet Controller through the
Switch Fabric. Each

Ethernet port uses its transmit and receive DMA channels to manage data
flows through buffer descriptors that are predefined by the system (the
descriptors can exist anywhere in the system memory map). These
descriptors are data structures that point to buffers filled with data
ready to transmit over Ethernet, or they point to empty buffers ready to
receive data from Ethernet.


Signed-off-by: Alexandre Bounine <Alexandre.Bounine@tundra.com>
Signed-off-by: Roy Zang	<tie-fei.zang@freescale.com> 

---
 drivers/net/Kconfig      |    8 
 drivers/net/Makefile     |    1 
 drivers/net/tsi108_eth.c | 1708 ++++++++++++++++++++++++++++++++++++++++++++++
 drivers/net/tsi108_eth.h |  365 ++++++++++
 4 files changed, 2082 insertions(+), 0 deletions(-)

diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e2ed249..74a2d93 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2251,6 +2251,14 @@ config SPIDER_NET
 	  This driver supports the Gigabit Ethernet chips present on the
 	  Cell Processor-Based Blades from IBM.
 
+config TSI108_ETH
+	   tristate "Tundra TSI108 gigabit Ethernet support"
+	   depends on TSI108_BRIDGE
+	   help
+	     This driver supports Tundra TSI108 gigabit Ethernet ports.
+	     To compile this driver as a module, choose M here: the module
+	     will be called tsi108_eth.
+
 config GIANFAR
 	tristate "Gianfar Ethernet"
 	depends on 85xx || 83xx || PPC_86xx
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f270bc4..b1f713e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -109,6 +109,7 @@ obj-$(CONFIG_B44) += b44.o
 obj-$(CONFIG_FORCEDETH) += forcedeth.o
 obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 
+obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_QLA3XXX) += qla3xxx.o
 
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
new file mode 100644
index 0000000..82213b4
--- /dev/null
+++ b/drivers/net/tsi108_eth.c
@@ -0,0 +1,1708 @@
+/*******************************************************************************
+  
+  Copyright(c) 2006 Tundra Semiconductor Corporation.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+*******************************************************************************/
+
+/* This driver is based on the driver code originally developed	
+ * for the Intel IOC80314 (ForestLake) Gigabit Ethernet by
+ * scott.wood@timesys.com  * Copyright (C) 2003 TimeSys Corporation
+ *
+ * Currently changes from original version are:
+ * - porting to Tsi108-based platform and kernel 2.6 (kong.lai@tundra.com)
+ * - modifications to handle two ports independently and support for
+ *   additional PHY devices (alexandre.bounine@tundra.com)
+ * - Get hardware information from platform device. (tie-fei.zang@freescale.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/tsi108.h>
+
+#include "tsi108_eth.h"
+
+#define MII_READ_DELAY 10000	/* max link wait time in msec */
+
+#define TSI108_RXRING_LEN     256
+
+/* NOTE: The driver currently does not support receiving packets
+ * larger than the buffer size, so don't decrease this (unless you
+ * want to add such support).
+ */
+#define TSI108_RXBUF_SIZE     1536
+
+#define TSI108_TXRING_LEN     256
+
+#define TSI108_TX_INT_FREQ    64
+
+/* Check the phy status every half a second. */
+#define CHECK_PHY_INTERVAL (HZ/2)
+
+static int tsi108_init_one(struct platform_device *pdev);
+static int tsi108_ether_remove(struct platform_device *pdev);
+
+struct tsi108_prv_data {
+	void  __iomem *regs;	/* Base of normal regs */
+	void  __iomem *phyregs;	/* Base of register bank used for PHY access */
+	
+	unsigned int phy;		/* Index of PHY for this interface */
+	unsigned int irq_num;
+	unsigned int id;
+
+	struct timer_list timer;/* Timer that triggers the check phy function */
+	unsigned int rxtail;	/* Next entry in rxring to read */
+	unsigned int rxhead;	/* Next entry in rxring to give a new buffer */
+	unsigned int rxfree;	/* Number of free, allocated RX buffers */
+
+	unsigned int rxpending;	/* Non-zero if there are still descriptors
+				 * to be processed from a previous descriptor
+				 * interrupt condition that has been cleared */
+
+	unsigned int txtail;	/* Next TX descriptor to check status on */
+	unsigned int txhead;	/* Next TX descriptor to use */
+
+	/* Number of free TX descriptors.  This could be calculated from
+	 * rxhead and rxtail if one descriptor were left unused to disambiguate
+	 * full and empty conditions, but it's simpler to just keep track
+	 * explicitly. */
+
+	unsigned int txfree;
+
+	unsigned int phy_ok;		/* The PHY is currently powered on. */
+
+	/* PHY status (duplex is 1 for half, 2 for full,
+	 * so that the default 0 indicates that neither has
+	 * yet been configured). */
+
+	unsigned int link_up;
+	unsigned int speed;
+	unsigned int duplex;
+
+	tx_desc *txring;
+	rx_desc *rxring;
+	struct sk_buff *txskbs[TSI108_TXRING_LEN];
+	struct sk_buff *rxskbs[TSI108_RXRING_LEN];
+
+	dma_addr_t txdma, rxdma;
+
+	/* txlock nests in misclock and phy_lock */
+
+	spinlock_t txlock, misclock;
+
+	/* stats is used to hold the upper bits of each hardware counter,
+	 * and tmpstats is used to hold the full values for returning
+	 * to the caller of get_stats().  They must be separate in case
+	 * an overflow interrupt occurs before the stats are consumed.
+	 */
+
+	struct net_device_stats stats;
+	struct net_device_stats tmpstats;
+
+	/* These stats are kept separate in hardware, thus require individual
+	 * fields for handling carry.  They are combined in get_stats.
+	 */
+
+	unsigned long rx_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_short_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_long_fcs;	/* Add to rx_frame_errors */
+	unsigned long rx_underruns;	/* Add to rx_length_errors */
+	unsigned long rx_overruns;	/* Add to rx_length_errors */
+
+	unsigned long tx_coll_abort;	/* Add to tx_aborted_errors/collisions */
+	unsigned long tx_pause_drop;	/* Add to tx_aborted_errors */
+
+	unsigned long mc_hash[16];
+	u32 msg_enable;			/* debug message level */
+	struct mii_if_info mii_if;
+	unsigned int init_media;
+};
+
+/* Structure for a device driver */
+
+static struct platform_driver tsi_eth_driver = {
+	.probe = tsi108_init_one,
+	.remove = tsi108_ether_remove,
+	.driver	= {
+		.name = "tsi-ethernet",
+	},
+};
+
+static void tsi108_timed_checker(unsigned long dev_ptr);
+
+static void dump_eth_one(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	printk("Dumping %s...\n", dev->name);
+	printk("intstat %x intmask %x phy_ok %d"
+	       " link %d speed %d duplex %d\n",
+	       TSI_READ(TSI108_EC_INTSTAT),
+	       TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
+	       data->link_up, data->speed, data->duplex);
+
+	printk("TX: head %d, tail %d, free %d, stat %x, estat %x, err %x\n",
+	       data->txhead, data->txtail, data->txfree,
+	       TSI_READ(TSI108_EC_TXSTAT),
+	       TSI_READ(TSI108_EC_TXESTAT),
+	       TSI_READ(TSI108_EC_TXERR));
+
+	printk("RX: head %d, tail %d, free %d, stat %x,"
+	       " estat %x, err %x, pending %d\n\n",
+	       data->rxhead, data->rxtail, data->rxfree,
+	       TSI_READ(TSI108_EC_RXSTAT),
+	       TSI_READ(TSI108_EC_RXESTAT),
+	       TSI_READ(TSI108_EC_RXERR), data->rxpending);
+}
+
+/* Synchronization is needed between the thread and up/down events.
+ * Note that the PHY is accessed through the same registers for both
+ * interfaces, so this can't be made interface-specific.
+ */
+
+static DEFINE_SPINLOCK(phy_lock);
+
+static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
+{
+	unsigned i;
+
+	TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE_PHY(TSI108_MAC_MII_CMD, 0);
+	TSI_WRITE_PHY(TSI108_MAC_MII_CMD, TSI108_MAC_MII_CMD_READ);
+	for (i = 0; i < 100; i++) {
+		if (!(TSI_READ_PHY(TSI108_MAC_MII_IND) &
+		      (TSI108_MAC_MII_IND_NOTVALID | TSI108_MAC_MII_IND_BUSY)))
+			break;
+		udelay(10);
+	}
+
+	if (i == 100) 
+		return 0xffff;
+	else
+		return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN));
+}
+
+static void tsi108_write_mii(struct tsi108_prv_data *data, 
+				int reg, u16 val)
+{
+	unsigned i = 100;
+	TSI_WRITE_PHY(TSI108_MAC_MII_ADDR,
+				(data->phy << TSI108_MAC_MII_ADDR_PHY) |
+				(reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE_PHY(TSI108_MAC_MII_DATAOUT, val);
+	while (i--) {
+		if(!(TSI_READ_PHY(TSI108_MAC_MII_IND) & 
+			TSI108_MAC_MII_IND_BUSY))
+			break;
+		udelay(10);
+	}
+}
+
+static int tsi108_mdio_read(struct net_device *dev, int addr, int reg)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	return tsi108_read_mii(data, reg);
+}
+
+static void tsi108_mdio_write(struct net_device *dev, int addr, int reg, int val)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	tsi108_write_mii(data, reg, val);
+}
+
+static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
+					int reg, u16 val)
+{
+	unsigned i = 1000;
+	TSI_WRITE(TSI108_MAC_MII_ADDR,
+			     (0x1e << TSI108_MAC_MII_ADDR_PHY)
+			     | (reg << TSI108_MAC_MII_ADDR_REG));
+	TSI_WRITE(TSI108_MAC_MII_DATAOUT, val);
+	while(i--) {
+		if(!(TSI_READ(TSI108_MAC_MII_IND) & TSI108_MAC_MII_IND_BUSY))
+			return;
+		udelay(10);
+	}
+	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static int mii_speed(struct mii_if_info *mii)
+{
+	int advert, lpa, val, media;
+	int lpa2 = 0;
+	int speed;
+
+	if (!mii_link_ok(mii))
+		return 0;
+
+	val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR);
+	if ((val & BMSR_ANEGCOMPLETE) == 0)
+		return 0;
+
+	advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE);
+	lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA);
+	media = mii_nway_result(advert & lpa);
+
+	if (mii->supports_gmii)
+		lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000);
+
+	speed = lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 :
+			(media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10);
+	return speed;
+}
+
+static void tsi108_check_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 mac_cfg2_reg, portctrl_reg;
+	u32 duplex;
+	u32 speed;
+	unsigned long flags;
+
+	/* Do a dummy read, as for some reason the first read
+	 * after a link becomes up returns link down, even if
+	 * it's been a while since the link came up.
+	 */
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	if (!data->phy_ok)
+		goto out;
+
+	tsi108_read_mii(data, MII_BMSR);
+
+	duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
+	data->init_media = 0;
+
+	if (netif_carrier_ok(dev)) {
+
+		speed = mii_speed(&data->mii_if);
+
+		if ((speed != data->speed) || duplex) {
+
+			mac_cfg2_reg = TSI_READ(TSI108_MAC_CFG2);
+			portctrl_reg = TSI_READ(TSI108_EC_PORTCTRL);
+
+			mac_cfg2_reg &= ~TSI108_MAC_CFG2_IFACE_MASK;
+
+			if (speed == 1000) {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_GIG;
+				portctrl_reg &= ~TSI108_EC_PORTCTRL_NOGIG;
+			} else {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_NOGIG;
+				portctrl_reg |= TSI108_EC_PORTCTRL_NOGIG;
+			}
+
+			data->speed = speed;
+
+			if (data->mii_if.full_duplex) {
+				mac_cfg2_reg |= TSI108_MAC_CFG2_FULLDUPLEX;
+				portctrl_reg &= ~TSI108_EC_PORTCTRL_HALFDUPLEX;
+				data->duplex = 2;
+			} else {
+				mac_cfg2_reg &= ~TSI108_MAC_CFG2_FULLDUPLEX;
+				portctrl_reg |= TSI108_EC_PORTCTRL_HALFDUPLEX;
+				data->duplex = 1;
+			}
+
+			TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
+			TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
+
+			if (data->link_up == 0) {
+				/* The manual says it can take 3-4 usecs for the speed change
+				 * to take effect.
+				 */
+				udelay(5);
+
+				spin_lock(&data->txlock);
+				if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
+					netif_wake_queue(dev);
+
+				data->link_up = 1;
+				spin_unlock(&data->txlock);
+			}
+		}
+
+	} else {
+		if (data->link_up == 1) {
+			netif_stop_queue(dev);
+			data->link_up = 0;
+			printk(KERN_NOTICE "%s : link is down\n", dev->name);
+		}
+
+		goto out;
+	}
+
+
+out:
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static inline void
+tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
+		      unsigned long *upper)
+{
+	if (carry & carry_bit)
+		*upper += carry_shift;
+}
+
+static void tsi108_stat_carry(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 carry1, carry2;
+
+	spin_lock_irq(&data->misclock);
+
+	carry1 = TSI_READ(TSI108_STAT_CARRY1);
+	carry2 = TSI_READ(TSI108_STAT_CARRY2);
+
+	TSI_WRITE(TSI108_STAT_CARRY1, carry1);
+	TSI_WRITE(TSI108_STAT_CARRY2, carry2);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXBYTES,
+			      TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXPKTS,
+			      TSI108_STAT_RXPKTS_CARRY,
+			      &data->stats.rx_packets);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFCS,
+			      TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXMCAST,
+			      TSI108_STAT_RXMCAST_CARRY,
+			      &data->stats.multicast);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXALIGN,
+			      TSI108_STAT_RXALIGN_CARRY,
+			      &data->stats.rx_frame_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXLENGTH,
+			      TSI108_STAT_RXLENGTH_CARRY,
+			      &data->stats.rx_length_errors);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXRUNT,
+			      TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJUMBO,
+			      TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXFRAG,
+			      TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXJABBER,
+			      TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
+
+	tsi108_stat_carry_one(carry1, TSI108_STAT_CARRY1_RXDROP,
+			      TSI108_STAT_RXDROP_CARRY,
+			      &data->stats.rx_missed_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXBYTES,
+			      TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPKTS,
+			      TSI108_STAT_TXPKTS_CARRY,
+			      &data->stats.tx_packets);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXDEF,
+			      TSI108_STAT_TXEXDEF_CARRY,
+			      &data->stats.tx_aborted_errors);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXEXCOL,
+			      TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXTCOL,
+			      TSI108_STAT_TXTCOL_CARRY,
+			      &data->stats.collisions);
+
+	tsi108_stat_carry_one(carry2, TSI108_STAT_CARRY2_TXPAUSE,
+			      TSI108_STAT_TXPAUSEDROP_CARRY,
+			      &data->tx_pause_drop);
+
+	spin_unlock_irq(&data->misclock);
+}
+
+/* Read a stat counter atomically with respect to carries.
+ * data->misclock must be held.
+ */
+static inline unsigned long
+tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
+		 int carry_shift, unsigned long *upper)
+{
+	int carryreg;
+	unsigned long val;
+
+	if (reg < 0xb0)
+		carryreg = TSI108_STAT_CARRY1;
+	else
+		carryreg = TSI108_STAT_CARRY2;
+
+      again:
+	val = TSI_READ(reg) | *upper;
+
+	/* Check to see if it overflowed, but the interrupt hasn't
+	 * been serviced yet.  If so, handle the carry here, and
+	 * try again.
+	 */
+
+	if (unlikely(TSI_READ(carryreg) & carry_bit)) {
+		*upper += carry_shift;
+		TSI_WRITE(carryreg, carry_bit);
+		goto again;
+	}
+
+	return val;
+}
+
+static struct net_device_stats *tsi108_get_stats(struct net_device *dev)
+{
+	unsigned long excol;
+
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	spin_lock_irq(&data->misclock);
+
+	data->tmpstats.rx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_RXPKTS,
+			     TSI108_STAT_CARRY1_RXPKTS,
+			     TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
+
+	data->tmpstats.tx_packets =
+	    tsi108_read_stat(data, TSI108_STAT_TXPKTS,
+			     TSI108_STAT_CARRY2_TXPKTS,
+			     TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
+
+	data->tmpstats.rx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_RXBYTES,
+			     TSI108_STAT_CARRY1_RXBYTES,
+			     TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
+
+	data->tmpstats.tx_bytes =
+	    tsi108_read_stat(data, TSI108_STAT_TXBYTES,
+			     TSI108_STAT_CARRY2_TXBYTES,
+			     TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
+
+	data->tmpstats.multicast =
+	    tsi108_read_stat(data, TSI108_STAT_RXMCAST,
+			     TSI108_STAT_CARRY1_RXMCAST,
+			     TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
+
+	excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
+				 TSI108_STAT_CARRY2_TXEXCOL,
+				 TSI108_STAT_TXEXCOL_CARRY,
+				 &data->tx_coll_abort);
+
+	data->tmpstats.collisions =
+	    tsi108_read_stat(data, TSI108_STAT_TXTCOL,
+			     TSI108_STAT_CARRY2_TXTCOL,
+			     TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
+
+	data->tmpstats.collisions += excol;
+
+	data->tmpstats.rx_length_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
+			     TSI108_STAT_CARRY1_RXLENGTH,
+			     TSI108_STAT_RXLENGTH_CARRY,
+			     &data->stats.rx_length_errors);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXRUNT,
+			     TSI108_STAT_CARRY1_RXRUNT,
+			     TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
+
+	data->tmpstats.rx_length_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
+			     TSI108_STAT_CARRY1_RXJUMBO,
+			     TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
+
+	data->tmpstats.rx_frame_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXALIGN,
+			     TSI108_STAT_CARRY1_RXALIGN,
+			     TSI108_STAT_RXALIGN_CARRY,
+			     &data->stats.rx_frame_errors);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFCS,
+			     TSI108_STAT_CARRY1_RXFCS, TSI108_STAT_RXFCS_CARRY,
+			     &data->rx_fcs);
+
+	data->tmpstats.rx_frame_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_RXFRAG,
+			     TSI108_STAT_CARRY1_RXFRAG,
+			     TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
+
+	data->tmpstats.rx_missed_errors =
+	    tsi108_read_stat(data, TSI108_STAT_RXDROP,
+			     TSI108_STAT_CARRY1_RXDROP,
+			     TSI108_STAT_RXDROP_CARRY,
+			     &data->stats.rx_missed_errors);
+
+	/* These three are maintained by software. */
+	data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
+	data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
+
+	data->tmpstats.tx_aborted_errors =
+	    tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
+			     TSI108_STAT_CARRY2_TXEXDEF,
+			     TSI108_STAT_TXEXDEF_CARRY,
+			     &data->stats.tx_aborted_errors);
+
+	data->tmpstats.tx_aborted_errors +=
+	    tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
+			     TSI108_STAT_CARRY2_TXPAUSE,
+			     TSI108_STAT_TXPAUSEDROP_CARRY,
+			     &data->tx_pause_drop);
+
+	data->tmpstats.tx_aborted_errors += excol;
+
+	data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
+	data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
+	    data->tmpstats.rx_crc_errors +
+	    data->tmpstats.rx_frame_errors +
+	    data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
+
+	spin_unlock_irq(&data->misclock);
+	return &data->tmpstats;
+}
+
+static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
+{
+	TSI_WRITE(TSI108_EC_RXQ_PTRHIGH,
+			     TSI108_EC_RXQ_PTRHIGH_VALID);
+
+	TSI_WRITE(TSI108_EC_RXCTRL, TSI108_EC_RXCTRL_GO
+			     | TSI108_EC_RXCTRL_QUEUE0);
+}
+
+static void tsi108_restart_tx(struct tsi108_prv_data * data)
+{
+	TSI_WRITE(TSI108_EC_TXQ_PTRHIGH,
+			     TSI108_EC_TXQ_PTRHIGH_VALID);
+
+	TSI_WRITE(TSI108_EC_TXCTRL, TSI108_EC_TXCTRL_IDLEINT |
+			     TSI108_EC_TXCTRL_GO | TSI108_EC_TXCTRL_QUEUE0);
+}
+
+/* txlock must be held by caller, with IRQs disabled, and
+ * with permission to re-enable them when the lock is dropped.
+ */
+static void tsi108_complete_tx(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int tx;
+	struct sk_buff *skb;
+	int release = 0;
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		tx = data->txtail;
+
+		if (data->txring[tx].misc & TSI108_TX_OWN)
+			break;
+
+		skb = data->txskbs[tx];
+
+		if (!(data->txring[tx].misc & TSI108_TX_OK))
+			printk("%s: bad tx packet, misc %x\n",
+			       dev->name, data->txring[tx].misc);
+
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+
+		if (data->txring[tx].misc & TSI108_TX_EOF) {
+			dev_kfree_skb_any(skb);
+			release++;
+		}
+	}
+
+	if (release) {
+		if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
+			netif_wake_queue(dev);
+	}
+}
+
+static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int frags = skb_shinfo(skb)->nr_frags + 1;
+	int i;
+
+	if (!data->phy_ok && net_ratelimit())
+		printk(KERN_ERR "%s: Transmit while PHY is down!\n", dev->name);
+
+	if (!data->link_up) {
+		printk(KERN_ERR "%s: Transmit while link is down!\n",
+		       dev->name);
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (data->txfree < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+
+		if (net_ratelimit())
+			printk(KERN_ERR "%s: Transmit with full tx ring!\n",
+			       dev->name);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
+		netif_stop_queue(dev);
+	}
+
+	spin_lock_irq(&data->txlock);
+
+	for (i = 0; i < frags; i++) {
+		int misc = 0;
+		int tx = data->txhead;
+
+		/* This is done to mark every TSI108_TX_INT_FREQ tx buffers with
+		 * the interrupt bit.  TX descriptor-complete interrupts are
+		 * enabled when the queue fills up, and masked when there is
+		 * still free space.  This way, when saturating the outbound
+		 * link, the tx interrupts are kept to a reasonable level. 
+		 * When the queue is not full, reclamation of skbs still occurs
+		 * as new packets are transmitted, or on a queue-empty
+		 * interrupt.
+		 */
+
+		if ((tx % TSI108_TX_INT_FREQ == 0) &&
+		    ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
+			misc = TSI108_TX_INT;
+
+		data->txskbs[tx] = skb;
+
+		if (i == 0) {
+			data->txring[tx].buf0 = dma_map_single(NULL, skb->data, 
+					skb->len - skb->data_len, DMA_TO_DEVICE);
+			data->txring[tx].len = skb->len - skb->data_len;
+			misc |= TSI108_TX_SOF;
+		} else {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+			data->txring[tx].buf0 =
+			    dma_map_page(NULL, frag->page, frag->page_offset,
+					    frag->size, DMA_TO_DEVICE);
+			data->txring[tx].len = frag->size;
+		}
+
+		if (i == frags - 1)
+			misc |= TSI108_TX_EOF;
+
+		if (netif_msg_pktdata(data)) {
+			int i;
+			printk("%s: Tx Frame contents (%d)\n", dev->name,
+			       skb->len);
+			for (i = 0; i < skb->len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+		data->txring[tx].misc = misc | TSI108_TX_OWN;
+
+		data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
+		data->txfree--;
+	}
+
+	tsi108_complete_tx(dev);
+
+	/* This must be done after the check for completed tx descriptors,
+	 * so that the tail pointer is correct.
+	 */
+
+	if (!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_QUEUE0))
+		tsi108_restart_tx(data);
+
+	spin_unlock_irq(&data->txlock);
+	return NETDEV_TX_OK;
+}
+
+static int tsi108_complete_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree && done != budget) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		if (data->rxring[rx].misc & TSI108_RX_OWN)
+			break;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		done++;
+
+		if (data->rxring[rx].misc & TSI108_RX_BAD) {
+			spin_lock_irq(&data->misclock);
+
+			if (data->rxring[rx].misc & TSI108_RX_CRC)
+				data->stats.rx_crc_errors++;
+			if (data->rxring[rx].misc & TSI108_RX_OVER)
+				data->stats.rx_fifo_errors++;
+
+			spin_unlock_irq(&data->misclock);
+
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+		if (netif_msg_pktdata(data)) {
+			int i;
+			printk("%s: Rx Frame contents (%d)\n",
+			       dev->name, data->rxring[rx].len);
+			for (i = 0; i < data->rxring[rx].len; i++)
+				printk(" %2.2x", skb->data[i]);
+			printk(".\n");
+		}
+
+		skb->dev = dev;
+		skb_put(skb, data->rxring[rx].len);
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_receive_skb(skb);
+		dev->last_rx = jiffies;
+	}
+
+	return done;
+}
+
+static int tsi108_refill_rx(struct net_device *dev, int budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int done = 0;
+
+	while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
+		int rx = data->rxhead;
+		struct sk_buff *skb;
+
+		data->rxskbs[rx] = skb = dev_alloc_skb(TSI108_RXBUF_SIZE + 2);
+		if (!skb)
+			break;
+
+		skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
+
+		data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, 
+							TSI108_RX_SKB_SIZE,
+							DMA_FROM_DEVICE);
+
+		/* Sometimes the hardware sets blen to zero after packet
+		 * reception, even though the manual says that it's only ever
+		 * modified by the driver.
+		 */
+
+		data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
+		data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
+
+		data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
+		data->rxfree++;
+		done++;
+	}
+
+	if (done != 0 && !(TSI_READ(TSI108_EC_RXSTAT) &
+			   TSI108_EC_RXSTAT_QUEUE0))
+		tsi108_restart_rx(data, dev);
+
+	return done;
+}
+
+static int tsi108_poll(struct net_device *dev, int *budget)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI_READ(TSI108_EC_RXESTAT);
+	u32 intstat = TSI_READ(TSI108_EC_INTSTAT);
+	int total_budget = min(*budget, dev->quota);
+	int num_received = 0, num_filled = 0, budget_used;
+
+	intstat &= TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+	    TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | TSI108_INT_RXWAIT;
+
+	TSI_WRITE(TSI108_EC_RXESTAT, estat);
+	TSI_WRITE(TSI108_EC_INTSTAT, intstat);
+
+	if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
+		num_received = tsi108_complete_rx(dev, total_budget);
+
+	/* This should normally fill no more slots than the number of
+	 * packets received in tsi108_complete_rx().  The exception
+	 * is when we previously ran out of memory for RX SKBs.  In that
+	 * case, it's helpful to obey the budget, not only so that the
+	 * CPU isn't hogged, but so that memory (which may still be low)
+	 * is not hogged by one device.
+	 *
+	 * A work unit is considered to be two SKBs to allow us to catch
+	 * up when the ring has shrunk due to out-of-memory but we're
+	 * still removing the full budget's worth of packets each time.
+	 */
+
+	if (data->rxfree < TSI108_RXRING_LEN)
+		num_filled = tsi108_refill_rx(dev, total_budget * 2);
+
+	if (intstat & TSI108_INT_RXERROR) {
+		u32 err = TSI_READ(TSI108_EC_RXERR);
+		TSI_WRITE(TSI108_EC_RXERR, err);
+
+		if (err) {
+			if (net_ratelimit())
+				printk(KERN_DEBUG "%s: RX error %x\n",
+				       dev->name, err);
+
+			if (!(TSI_READ(TSI108_EC_RXSTAT) &
+			      TSI108_EC_RXSTAT_QUEUE0))
+				tsi108_restart_rx(data, dev);
+		}
+	}
+
+	if (intstat & TSI108_INT_RXOVERRUN) {
+		spin_lock_irq(&data->misclock);
+		data->stats.rx_fifo_errors++;
+		spin_unlock_irq(&data->misclock);
+	}
+
+	budget_used = max(num_received, num_filled / 2);
+
+	*budget -= budget_used;
+	dev->quota -= budget_used;
+
+	if (budget_used != total_budget) {
+		data->rxpending = 0;
+		netif_rx_complete(dev);
+
+		TSI_WRITE(TSI108_EC_INTMASK,
+				     TSI_READ(TSI108_EC_INTMASK)
+				     & ~(TSI108_INT_RXQUEUE0
+					 | TSI108_INT_RXTHRESH |
+					 TSI108_INT_RXOVERRUN |
+					 TSI108_INT_RXERROR |
+					 TSI108_INT_RXWAIT));
+
+		/* IRQs are level-triggered, so no need to re-check */
+		return 0;
+	} else {
+		data->rxpending = 1;
+	}
+
+	return 1;
+}
+
+static void tsi108_rx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A race could cause dev to already be scheduled, so it's not an
+	 * error if that happens (and interrupts shouldn't be re-masked,
+	 * because that can cause harmful races, if poll has already
+	 * unmasked them but not cleared LINK_STATE_SCHED).  
+	 *
+	 * This can happen if this code races with tsi108_poll(), which masks
+	 * the interrupts after tsi108_irq_one() read the mask, but before
+	 * netif_rx_schedule is called.  It could also happen due to calls
+	 * from tsi108_check_rxring().
+	 */
+
+	if (netif_rx_schedule_prep(dev)) {
+		/* Mask, rather than ack, the receive interrupts.  The ack
+		 * will happen in tsi108_poll().
+		 */
+
+		TSI_WRITE(TSI108_EC_INTMASK,
+				     TSI_READ(TSI108_EC_INTMASK) |
+				     TSI108_INT_RXQUEUE0
+				     | TSI108_INT_RXTHRESH |
+				     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
+				     TSI108_INT_RXWAIT);
+		__netif_rx_schedule(dev);
+	} else {
+		if (!netif_running(dev)) {
+			/* This can happen if an interrupt occurs while the
+			 * interface is being brought down, as the START
+			 * bit is cleared before the stop function is called.
+			 *
+			 * In this case, the interrupts must be masked, or
+			 * they will continue indefinitely.
+			 *
+			 * There's a race here if the interface is brought down
+			 * and then up in rapid succession, as the device could
+			 * be made running after the above check and before
+			 * the masking below.  This will only happen if the IRQ
+			 * thread has a lower priority than the task brining
+			 * up the interface.  Fixing this race would likely
+			 * require changes in generic code.
+			 */
+
+			TSI_WRITE(TSI108_EC_INTMASK,
+					     TSI_READ
+					     (TSI108_EC_INTMASK) |
+					     TSI108_INT_RXQUEUE0 |
+					     TSI108_INT_RXTHRESH |
+					     TSI108_INT_RXOVERRUN |
+					     TSI108_INT_RXERROR |
+					     TSI108_INT_RXWAIT);
+		}
+	}
+}
+
+/* If the RX ring has run out of memory, try periodically
+ * to allocate some more, as otherwise poll would never
+ * get called (apart from the initial end-of-queue condition).
+ *
+ * This is called once per second (by default) from the thread.
+ */
+
+static void tsi108_check_rxring(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	/* A poll is scheduled, as opposed to caling tsi108_refill_rx 
+	 * directly, so as to keep the receive path single-threaded
+	 * (and thus not needing a lock).
+	 */
+
+	if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
+		tsi108_rx_int(dev);
+}
+
+static void tsi108_tx_int(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 estat = TSI_READ(TSI108_EC_TXESTAT);
+
+	TSI_WRITE(TSI108_EC_TXESTAT, estat);
+	TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |
+			     TSI108_INT_TXIDLE | TSI108_INT_TXERROR);
+	if (estat & TSI108_EC_TXESTAT_Q0_ERR) {
+		u32 err = TSI_READ(TSI108_EC_TXERR);
+		TSI_WRITE(TSI108_EC_TXERR, err);
+
+		if (err && net_ratelimit())
+			printk(KERN_ERR "%s: TX error %x\n", dev->name, err);
+	}
+
+	if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {
+		spin_lock(&data->txlock);
+		tsi108_complete_tx(dev);
+		spin_unlock(&data->txlock);
+	}
+}
+
+
+static irqreturn_t tsi108_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 stat = TSI_READ(TSI108_EC_INTSTAT);
+
+	if (!(stat & TSI108_INT_ANY))
+		return IRQ_NONE;	/* Not our interrupt */
+
+	stat &= ~TSI_READ(TSI108_EC_INTMASK);
+
+	if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |
+		    TSI108_INT_TXERROR))
+		tsi108_tx_int(dev);
+	if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |
+		    TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |
+		    TSI108_INT_RXERROR))
+		tsi108_rx_int(dev);
+
+	if (stat & TSI108_INT_SFN) {
+		if (net_ratelimit())
+			printk(KERN_DEBUG "%s: SFN error\n", dev->name);
+		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);
+	}
+
+	if (stat & TSI108_INT_STATCARRY) {
+		tsi108_stat_carry(dev);
+		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void tsi108_stop_ethernet(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	int i = 1000;
+	/* Disable all TX and RX queues ... */
+	TSI_WRITE(TSI108_EC_TXCTRL, 0);
+	TSI_WRITE(TSI108_EC_RXCTRL, 0);
+
+	/* ...and wait for them to become idle */
+	while(i--) {
+		if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))
+			break;
+		udelay(10);
+	}
+	i = 1000;
+	while(i--){
+		if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))
+			return;
+		udelay(10);
+	}
+	printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+}
+
+static void tsi108_reset_ether(struct tsi108_prv_data * data)
+{
+	TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);
+	udelay(100);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_PORTCTRL,
+			     TSI_READ(TSI108_EC_PORTCTRL) &
+			     ~TSI108_EC_PORTCTRL_STATRST);
+
+	TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_TXCFG,
+			     TSI_READ(TSI108_EC_TXCFG) &
+			     ~TSI108_EC_TXCFG_RST);
+
+	TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_EC_RXCFG,
+			     TSI_READ(TSI108_EC_RXCFG) &
+			     ~TSI108_EC_RXCFG_RST);
+
+	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+			     TSI_READ(TSI108_MAC_MII_MGMT_CFG) |
+			     TSI108_MAC_MII_MGMT_RST);
+	udelay(100);
+	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,
+			     (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &
+			     ~(TSI108_MAC_MII_MGMT_RST |
+			       TSI108_MAC_MII_MGMT_CLK)) | 0x07);
+}
+
+static int tsi108_get_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
+	u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+
+	/* Note that the octets are reversed from what the manual says,
+	 * producing an even weirder ordering...
+	 */
+	if (word2 == 0 && word1 == 0) {
+		dev->dev_addr[0] = 0x00;
+		dev->dev_addr[1] = 0x06;
+		dev->dev_addr[2] = 0xd2;
+		dev->dev_addr[3] = 0x00;
+		dev->dev_addr[4] = 0x00;
+		if (0x8 == data->phy)
+			dev->dev_addr[5] = 0x01;
+		else
+			dev->dev_addr[5] = 0x02;
+
+		word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+		word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+		    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+		TSI_WRITE(TSI108_MAC_ADDR1, word1);
+		TSI_WRITE(TSI108_MAC_ADDR2, word2);
+	} else {
+		dev->dev_addr[0] = (word2 >> 16) & 0xff;
+		dev->dev_addr[1] = (word2 >> 24) & 0xff;
+		dev->dev_addr[2] = (word1 >> 0) & 0xff;
+		dev->dev_addr[3] = (word1 >> 8) & 0xff;
+		dev->dev_addr[4] = (word1 >> 16) & 0xff;
+		dev->dev_addr[5] = (word1 >> 24) & 0xff;
+	}
+
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int tsi108_set_mac(struct net_device *dev, void *addr)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 word1, word2;
+	int i;
+
+	if (!is_valid_ether_addr(addr))
+		return -EINVAL;
+
+	for (i = 0; i < 6; i++)
+		/* +2 is for the offset of the HW addr type */
+		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+
+	word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
+
+	word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |
+	    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);
+
+	spin_lock_irq(&data->misclock);
+	TSI_WRITE(TSI108_MAC_ADDR1, word1);
+	TSI_WRITE(TSI108_MAC_ADDR2, word2);
+	spin_lock(&data->txlock);
+
+	if (data->txfree && data->link_up)
+		netif_wake_queue(dev);
+
+	spin_unlock(&data->txlock);
+	spin_unlock_irq(&data->misclock);
+	return 0;
+}
+
+/* Protected by dev->xmit_lock. */
+static void tsi108_set_rx_mode(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);
+
+	if (dev->flags & IFF_PROMISC) {
+		rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);
+		rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;
+		goto out;
+	}
+
+	rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
+
+	if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+		int i;
+		struct dev_mc_list *mc = dev->mc_list;
+		rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
+
+		memset(data->mc_hash, 0, sizeof(data->mc_hash));
+
+		while (mc) {
+			u32 hash, crc;
+
+			if (mc->dmi_addrlen == 6) {
+				crc = ether_crc(6, mc->dmi_addr);
+				hash = crc >> 23;
+
+				__set_bit(hash, &data->mc_hash[0]);
+			} else {
+				printk(KERN_ERR
+				       "%s: got multicast address of length %d "
+				       "instead of 6.\n", dev->name,
+				       mc->dmi_addrlen);
+			}
+
+			mc = mc->next;
+		}
+
+		TSI_WRITE(TSI108_EC_HASHADDR,
+				     TSI108_EC_HASHADDR_AUTOINC |
+				     TSI108_EC_HASHADDR_MCAST);
+
+		for (i = 0; i < 16; i++) {
+			/* The manual says that the hardware may drop
+			 * back-to-back writes to the data register.
+			 */
+			udelay(1);
+			TSI_WRITE(TSI108_EC_HASHDATA,
+					     data->mc_hash[i]);
+		}
+	}
+
+      out:
+	TSI_WRITE(TSI108_EC_RXCFG, rxcfg);
+}
+
+static void tsi108_init_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	u32 i = 0;
+	u16 phyval = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+
+	tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
+	while (i--){
+		if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
+			break;
+		udelay(10);
+	}
+	if (i == 0)
+		printk(KERN_ERR "%s function time out \n", __FUNCTION__);
+			
+#if (TSI108_PHY_TYPE == PHY_BCM54XX)	/* Broadcom BCM54xx PHY */
+	tsi108_write_mii(data, 0x09, 0x0300);
+	tsi108_write_mii(data, 0x10, 0x1020);
+	tsi108_write_mii(data, 0x1c, 0x8c00);
+#endif
+
+	tsi108_write_mii(data,
+			 MII_BMCR,
+			 BMCR_ANENABLE | BMCR_ANRESTART);
+	while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
+		cpu_relax();
+
+	/* Set G/MII mode and receive clock select in TBI control #2.  The
+	 * second port won't work if this isn't done, even though we don't
+	 * use TBI mode.
+	 */
+
+	tsi108_write_tbi(data, 0x11, 0x30);
+
+	/* FIXME: It seems to take more than 2 back-to-back reads to the
+	 * PHY_STAT register before the link up status bit is set.
+	 */
+
+	data->link_up = 1;
+
+	while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
+		 BMSR_LSTATUS)) {
+		if (i++ > (MII_READ_DELAY / 10)) {
+			data->link_up = 0;
+			break;
+		}
+		spin_unlock_irqrestore(&phy_lock, flags);
+		msleep(10);
+		spin_lock_irqsave(&phy_lock, flags);
+	}
+
+	printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
+	data->phy_ok = 1;
+	data->init_media = 1;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static void tsi108_kill_phy(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&phy_lock, flags);
+	tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
+	data->phy_ok = 0;
+	spin_unlock_irqrestore(&phy_lock, flags);
+}
+
+static int tsi108_open(struct net_device *dev)
+{
+	int i;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	unsigned int rxring_size = TSI108_RXRING_LEN * sizeof(rx_desc);
+	unsigned int txring_size = TSI108_TXRING_LEN * sizeof(tx_desc);
+
+	i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
+	if (i != 0) {
+		printk(KERN_ERR "tsi108_eth%d: Could not allocate IRQ%d.\n",
+		       data->id, data->irq_num);
+		return i;
+	} else {
+		dev->irq = data->irq_num;
+		printk(KERN_NOTICE
+		       "tsi108_open : Port %d Assigned IRQ %d to %s\n",
+		       data->id, dev->irq, dev->name);
+	}
+
+	data->rxring = dma_alloc_coherent(NULL, rxring_size, 
+			&data->rxdma, GFP_KERNEL);
+
+	if (!data->rxring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for rxring!\n");
+		return -ENOMEM;
+	} else {
+		memset(data->rxring, 0, rxring_size);
+	}
+
+	data->txring = dma_alloc_coherent(NULL, txring_size, 
+			&data->txdma, GFP_KERNEL);
+
+	if (!data->txring) {
+		printk(KERN_DEBUG
+		       "TSI108_ETH: failed to allocate memory for txring!\n");
+		pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+		return -ENOMEM;
+	} else {
+		memset(data->txring, 0, txring_size);
+	}
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
+		data->rxring[i].blen = TSI108_RXBUF_SIZE;
+		data->rxring[i].vlan = 0;
+	}
+
+	data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
+
+	data->rxtail = 0;
+	data->rxhead = 0;
+
+	for (i = 0; i < TSI108_RXRING_LEN; i++) {
+		struct sk_buff *skb = dev_alloc_skb(TSI108_RXBUF_SIZE + NET_IP_ALIGN);
+
+		if (!skb) {
+			/* Bah.  No memory for now, but maybe we'll get
+			 * some more later.
+			 * For now, we'll live with the smaller ring.
+			 */
+			printk(KERN_WARNING
+			       "%s: Could only allocate %d receive skb(s).\n",
+			       dev->name, i);
+			data->rxhead = i;
+			break;
+		}
+
+		data->rxskbs[i] = skb;
+		/* Align the payload on a 4-byte boundary */
+		skb_reserve(skb, 2);
+		data->rxskbs[i] = skb;
+		data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
+		data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
+	}
+
+	data->rxfree = i;
+	TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
+
+	for (i = 0; i < TSI108_TXRING_LEN; i++) {
+		data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
+		data->txring[i].misc = 0;
+	}
+
+	data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
+	data->txtail = 0;
+	data->txhead = 0;
+	data->txfree = TSI108_TXRING_LEN;
+	TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
+	tsi108_init_phy(dev);
+
+	setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev);
+	mod_timer(&data->timer, jiffies + 1);
+
+	tsi108_restart_rx(data, dev);
+
+	TSI_WRITE(TSI108_EC_INTSTAT, ~0);
+
+	TSI_WRITE(TSI108_EC_INTMASK,
+			     ~(TSI108_INT_TXQUEUE0 | TSI108_INT_RXERROR |
+			       TSI108_INT_RXTHRESH | TSI108_INT_RXQUEUE0 |
+			       TSI108_INT_RXOVERRUN | TSI108_INT_RXWAIT |
+			       TSI108_INT_SFN | TSI108_INT_STATCARRY));
+
+	TSI_WRITE(TSI108_MAC_CFG1,
+			     TSI108_MAC_CFG1_RXEN | TSI108_MAC_CFG1_TXEN);
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int tsi108_close(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	del_timer_sync(&data->timer);
+
+	tsi108_stop_ethernet(dev);
+	tsi108_kill_phy(dev);
+	TSI_WRITE(TSI108_EC_INTMASK, ~0);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	/* Check for any pending TX packets, and drop them. */
+
+	while (!data->txfree || data->txhead != data->txtail) {
+		int tx = data->txtail;
+		struct sk_buff *skb;
+		skb = data->txskbs[tx];
+		data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
+		data->txfree++;
+		dev_kfree_skb(skb);
+	}
+
+	synchronize_irq(data->irq_num);
+	free_irq(data->irq_num, dev);
+
+	/* Discard the RX ring. */
+
+	while (data->rxfree) {
+		int rx = data->rxtail;
+		struct sk_buff *skb;
+
+		skb = data->rxskbs[rx];
+		data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
+		data->rxfree--;
+		dev_kfree_skb(skb);
+	}
+
+	dma_free_coherent(0,
+			    TSI108_RXRING_LEN * sizeof(rx_desc),
+			    data->rxring, data->rxdma);
+	dma_free_coherent(0,
+			    TSI108_TXRING_LEN * sizeof(tx_desc),
+			    data->txring, data->txdma);
+
+	return 0;
+}
+
+static void tsi108_init_mac(struct net_device *dev)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	TSI_WRITE(TSI108_MAC_CFG2, TSI108_MAC_CFG2_DFLT_PREAMBLE |
+			     TSI108_MAC_CFG2_PADCRC);
+
+	TSI_WRITE(TSI108_EC_TXTHRESH,
+			     (192 << TSI108_EC_TXTHRESH_STARTFILL) |
+			     (192 << TSI108_EC_TXTHRESH_STOPFILL));
+
+	TSI_WRITE(TSI108_STAT_CARRYMASK1,
+			     ~(TSI108_STAT_CARRY1_RXBYTES |
+			       TSI108_STAT_CARRY1_RXPKTS |
+			       TSI108_STAT_CARRY1_RXFCS |
+			       TSI108_STAT_CARRY1_RXMCAST |
+			       TSI108_STAT_CARRY1_RXALIGN |
+			       TSI108_STAT_CARRY1_RXLENGTH |
+			       TSI108_STAT_CARRY1_RXRUNT |
+			       TSI108_STAT_CARRY1_RXJUMBO |
+			       TSI108_STAT_CARRY1_RXFRAG |
+			       TSI108_STAT_CARRY1_RXJABBER |
+			       TSI108_STAT_CARRY1_RXDROP));
+
+	TSI_WRITE(TSI108_STAT_CARRYMASK2,
+			     ~(TSI108_STAT_CARRY2_TXBYTES |
+			       TSI108_STAT_CARRY2_TXPKTS |
+			       TSI108_STAT_CARRY2_TXEXDEF |
+			       TSI108_STAT_CARRY2_TXEXCOL |
+			       TSI108_STAT_CARRY2_TXTCOL |
+			       TSI108_STAT_CARRY2_TXPAUSE));
+
+	TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATEN);
+	TSI_WRITE(TSI108_MAC_CFG1, 0);
+
+	TSI_WRITE(TSI108_EC_RXCFG,
+			     TSI108_EC_RXCFG_SE | TSI108_EC_RXCFG_BFE);
+
+	TSI_WRITE(TSI108_EC_TXQ_CFG, TSI108_EC_TXQ_CFG_DESC_INT |
+			     TSI108_EC_TXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_TXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_CFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_RXQ_CFG, TSI108_EC_RXQ_CFG_DESC_INT |
+			     TSI108_EC_RXQ_CFG_EOQ_OWN_INT |
+			     TSI108_EC_RXQ_CFG_WSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_CFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_TXQ_BUFCFG,
+			     TSI108_EC_TXQ_BUFCFG_BURST256 |
+			     TSI108_EC_TXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_TXQ_BUFCFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_RXQ_BUFCFG,
+			     TSI108_EC_RXQ_BUFCFG_BURST256 |
+			     TSI108_EC_RXQ_BUFCFG_BSWP | (TSI108_PBM_PORT <<
+						TSI108_EC_RXQ_BUFCFG_SFNPORT));
+
+	TSI_WRITE(TSI108_EC_INTMASK, ~0);
+}
+
+static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct tsi108_prv_data *data = netdev_priv(dev);
+	return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
+}
+
+static int
+tsi108_init_one(struct platform_device *pdev)
+{
+	struct net_device *dev = NULL;
+	struct tsi108_prv_data *data = NULL;
+	hw_info *einfo;
+	int err = 0;
+	
+	einfo = pdev->dev.platform_data;
+	
+	if (NULL == einfo) {
+		printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
+		       pdev->id);
+		return -ENODEV;
+	}
+
+	/* Create an ethernet device instance */
+
+	dev = alloc_etherdev(sizeof(struct tsi108_prv_data));
+	if (!dev) {
+		printk("tsi108_eth: Could not allocate a device structure\n");
+		return -ENOMEM;
+	}
+
+	printk("tsi108_eth%d: probe...\n", pdev->id);
+	data = netdev_priv(dev);
+
+	pr_debug("tsi108_eth%d:regs:phyresgs:phy:irq_num=0x%x:0x%x:0x%x:0x%x\n",
+			pdev->id, einfo->regs, einfo->phyregs,
+			einfo->phy, einfo->irq_num);
+	
+	data->regs = ioremap(einfo->regs, 0x400);
+	if (NULL == data->regs) {
+		err = -ENOMEM;
+		goto regs_fail;
+	}
+	
+	data->phyregs = ioremap(einfo->phyregs, 0x400);
+	if (NULL == data->phyregs) {
+		err = -ENOMEM;
+		goto regs_fail;
+	}
+/* MII setup */
+	data->mii_if.dev = dev;
+	data->mii_if.mdio_read = tsi108_mdio_read;
+	data->mii_if.mdio_write = tsi108_mdio_write;
+	data->mii_if.phy_id = einfo->phy;
+	data->mii_if.phy_id_mask = 0x1f;
+	data->mii_if.reg_num_mask = 0x1f;
+	data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
+	
+	data->phy = einfo->phy;
+	data->irq_num = einfo->irq_num;
+	data->id = pdev->id;
+	dev->open = tsi108_open;
+	dev->stop = tsi108_close;
+	dev->hard_start_xmit = tsi108_send_packet;
+	dev->set_mac_address = tsi108_set_mac;
+	dev->set_multicast_list = tsi108_set_rx_mode;
+	dev->get_stats = tsi108_get_stats;
+	dev->poll = tsi108_poll;
+	dev->do_ioctl = tsi108_do_ioctl;
+	dev->weight = 64;  /* 64 is more suitable for GigE interface - klai */
+
+	/* Apparently, the Linux networking code won't use scatter-gather
+	 * if the hardware doesn't do checksums.  However, it's faster
+	 * to checksum in place and use SG, as (among other reasons)
+	 * the cache won't be dirtied (which then has to be flushed
+	 * before DMA).  The checksumming is done by the driver (via
+	 * a new function skb_csum_dev() in net/core/skbuff.c).
+	 */
+
+	dev->features = NETIF_F_HIGHDMA;
+	SET_MODULE_OWNER(dev);
+
+	spin_lock_init(&data->txlock);
+	spin_lock_init(&data->misclock);
+
+	tsi108_reset_ether(data);
+	tsi108_kill_phy(dev);
+
+	if ((err = tsi108_get_mac(dev)) != 0) {
+		printk(KERN_ERR "%s: Invalid MAC address.  Please correct.\n",
+		       dev->name);
+		goto register_fail;
+	}
+
+	tsi108_init_mac(dev);
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
+				dev->name);
+		goto register_fail;
+	}
+
+	printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: "
+	       "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
+	       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+	       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+#ifdef DEBUG
+	data->msg_enable = DEBUG;
+	dump_eth_one(dev);
+#endif
+
+	return 0;
+	
+register_fail:
+	iounmap(data->regs);
+	iounmap(data->phyregs);
+	
+regs_fail:
+	free_netdev(dev);
+	return err;
+}
+
+/* There's no way to either get interrupts from the PHY when
+ * something changes, or to have the Tsi108 automatically communicate
+ * with the PHY to reconfigure itself.
+ *
+ * Thus, we have to do it using a timer. 
+ */
+
+static void tsi108_timed_checker(unsigned long dev_ptr)
+{
+	struct net_device *dev = (struct net_device *)dev_ptr;
+	struct tsi108_prv_data *data = netdev_priv(dev);
+
+	tsi108_check_phy(dev);
+	tsi108_check_rxring(dev);
+	mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
+}
+
+static int tsi108_ether_init(void)
+{
+	int ret;
+	ret = platform_driver_register (&tsi_eth_driver);
+	if (ret < 0){
+		printk("tsi108_ether_init: error initializing ethernet "
+		       "device\n");
+		return ret;
+	}
+	return 0;
+}
+
+static int tsi108_ether_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct tsi108_prv_data *priv = netdev_priv(dev);
+	
+	unregister_netdev(dev);
+	tsi108_stop_ethernet(dev);
+	platform_set_drvdata(pdev, NULL);
+	iounmap(priv->regs);
+	iounmap(priv->phyregs);
+	free_netdev(dev);
+	
+	return 0;
+}
+static void tsi108_ether_exit(void)
+{
+	platform_driver_unregister(&tsi_eth_driver);
+}
+
+module_init(tsi108_ether_init);
+module_exit(tsi108_ether_exit);
+
+MODULE_AUTHOR("Tundra Semiconductor Corporation");
+MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tsi108_eth.h b/drivers/net/tsi108_eth.h
new file mode 100644
index 0000000..77a769d
--- /dev/null
+++ b/drivers/net/tsi108_eth.h
@@ -0,0 +1,365 @@
+/*
+ * (C) Copyright 2005 Tundra Semiconductor Corp.
+ * Kong Lai, <kong.lai@tundra.com).
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/*
+ * net/tsi108_eth.h - definitions for Tsi108 GIGE network controller.
+ */
+
+#ifndef __TSI108_ETH_H
+#define __TSI108_ETH_H
+
+#include <linux/types.h>
+
+#define TSI_WRITE(offset, val) \
+	out_be32((data->regs + (offset)), val)
+
+#define TSI_READ(offset) \
+	in_be32((data->regs + (offset)))
+
+#define TSI_WRITE_PHY(offset, val) \
+	out_be32((data->phyregs + (offset)), val)
+
+#define TSI_READ_PHY(offset) \
+	in_be32((data->phyregs + (offset)))
+
+/*
+ * PHY Configuration Options
+ *
+ * NOTE: Enable set of definitions corresponding to your board type
+ */
+#define PHY_MV88E	1	/* Marvel 88Exxxx PHY */
+#define PHY_BCM54XX	2	/* Broardcom BCM54xx PHY */
+#define TSI108_PHY_TYPE	PHY_MV88E
+
+/*
+ * TSI108 GIGE port registers
+ */
+
+#define TSI108_ETH_PORT_NUM		2
+#define TSI108_PBM_PORT			2
+#define TSI108_SDRAM_PORT		4
+
+#define TSI108_MAC_CFG1			(0x000)
+#define TSI108_MAC_CFG1_SOFTRST		(1 << 31)
+#define TSI108_MAC_CFG1_LOOPBACK	(1 << 8)
+#define TSI108_MAC_CFG1_RXEN		(1 << 2)
+#define TSI108_MAC_CFG1_TXEN		(1 << 0)
+
+#define TSI108_MAC_CFG2			(0x004)
+#define TSI108_MAC_CFG2_DFLT_PREAMBLE	(7 << 12)
+#define TSI108_MAC_CFG2_IFACE_MASK	(3 << 8)
+#define TSI108_MAC_CFG2_NOGIG		(1 << 8)
+#define TSI108_MAC_CFG2_GIG		(2 << 8)
+#define TSI108_MAC_CFG2_PADCRC		(1 << 2)
+#define TSI108_MAC_CFG2_FULLDUPLEX	(1 << 0)
+
+#define TSI108_MAC_MII_MGMT_CFG		(0x020)
+#define TSI108_MAC_MII_MGMT_CLK		(7 << 0)
+#define TSI108_MAC_MII_MGMT_RST		(1 << 31)
+
+#define TSI108_MAC_MII_CMD		(0x024)
+#define TSI108_MAC_MII_CMD_READ		(1 << 0)
+
+#define TSI108_MAC_MII_ADDR		(0x028)
+#define TSI108_MAC_MII_ADDR_REG		0
+#define TSI108_MAC_MII_ADDR_PHY		8
+
+#define TSI108_MAC_MII_DATAOUT		(0x02c)
+#define TSI108_MAC_MII_DATAIN		(0x030)
+
+#define TSI108_MAC_MII_IND		(0x034)
+#define TSI108_MAC_MII_IND_NOTVALID	(1 << 2)
+#define TSI108_MAC_MII_IND_SCANNING	(1 << 1)
+#define TSI108_MAC_MII_IND_BUSY		(1 << 0)
+
+#define TSI108_MAC_IFCTRL		(0x038)
+#define TSI108_MAC_IFCTRL_PHYMODE	(1 << 24)
+
+#define TSI108_MAC_ADDR1		(0x040)
+#define TSI108_MAC_ADDR2		(0x044)
+
+#define TSI108_STAT_RXBYTES		(0x06c)
+#define TSI108_STAT_RXBYTES_CARRY	(1 << 24)
+
+#define TSI108_STAT_RXPKTS		(0x070)
+#define TSI108_STAT_RXPKTS_CARRY	(1 << 18)
+
+#define TSI108_STAT_RXFCS		(0x074)
+#define TSI108_STAT_RXFCS_CARRY		(1 << 12)
+
+#define TSI108_STAT_RXMCAST		(0x078)
+#define TSI108_STAT_RXMCAST_CARRY	(1 << 18)
+
+#define TSI108_STAT_RXALIGN		(0x08c)
+#define TSI108_STAT_RXALIGN_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXLENGTH		(0x090)
+#define TSI108_STAT_RXLENGTH_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXRUNT		(0x09c)
+#define TSI108_STAT_RXRUNT_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXJUMBO		(0x0a0)
+#define TSI108_STAT_RXJUMBO_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXFRAG		(0x0a4)
+#define TSI108_STAT_RXFRAG_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXJABBER		(0x0a8)
+#define TSI108_STAT_RXJABBER_CARRY	(1 << 12)
+
+#define TSI108_STAT_RXDROP		(0x0ac)
+#define TSI108_STAT_RXDROP_CARRY	(1 << 12)
+
+#define TSI108_STAT_TXBYTES		(0x0b0)
+#define TSI108_STAT_TXBYTES_CARRY	(1 << 24)
+
+#define TSI108_STAT_TXPKTS		(0x0b4)
+#define TSI108_STAT_TXPKTS_CARRY	(1 << 18)
+
+#define TSI108_STAT_TXEXDEF		(0x0c8)
+#define TSI108_STAT_TXEXDEF_CARRY	(1 << 12)
+
+#define TSI108_STAT_TXEXCOL		(0x0d8)
+#define TSI108_STAT_TXEXCOL_CARRY	(1 << 12)
+
+#define TSI108_STAT_TXTCOL		(0x0dc)
+#define TSI108_STAT_TXTCOL_CARRY	(1 << 13)
+
+#define TSI108_STAT_TXPAUSEDROP		(0x0e4)
+#define TSI108_STAT_TXPAUSEDROP_CARRY	(1 << 12)
+
+#define TSI108_STAT_CARRY1		(0x100)
+#define TSI108_STAT_CARRY1_RXBYTES	(1 << 16)
+#define TSI108_STAT_CARRY1_RXPKTS	(1 << 15)
+#define TSI108_STAT_CARRY1_RXFCS	(1 << 14)
+#define TSI108_STAT_CARRY1_RXMCAST	(1 << 13)
+#define TSI108_STAT_CARRY1_RXALIGN	(1 << 8)
+#define TSI108_STAT_CARRY1_RXLENGTH	(1 << 7)
+#define TSI108_STAT_CARRY1_RXRUNT	(1 << 4)
+#define TSI108_STAT_CARRY1_RXJUMBO	(1 << 3)
+#define TSI108_STAT_CARRY1_RXFRAG	(1 << 2)
+#define TSI108_STAT_CARRY1_RXJABBER	(1 << 1)
+#define TSI108_STAT_CARRY1_RXDROP	(1 << 0)
+
+#define TSI108_STAT_CARRY2		(0x104)
+#define TSI108_STAT_CARRY2_TXBYTES	(1 << 13)
+#define TSI108_STAT_CARRY2_TXPKTS	(1 << 12)
+#define TSI108_STAT_CARRY2_TXEXDEF	(1 << 7)
+#define TSI108_STAT_CARRY2_TXEXCOL	(1 << 3)
+#define TSI108_STAT_CARRY2_TXTCOL	(1 << 2)
+#define TSI108_STAT_CARRY2_TXPAUSE	(1 << 0)
+
+#define TSI108_STAT_CARRYMASK1		(0x108)
+#define TSI108_STAT_CARRYMASK2		(0x10c)
+
+#define TSI108_EC_PORTCTRL		(0x200)
+#define TSI108_EC_PORTCTRL_STATRST	(1 << 31)
+#define TSI108_EC_PORTCTRL_STATEN	(1 << 28)
+#define TSI108_EC_PORTCTRL_NOGIG	(1 << 18)
+#define TSI108_EC_PORTCTRL_HALFDUPLEX	(1 << 16)
+
+#define TSI108_EC_INTSTAT		(0x204)
+#define TSI108_EC_INTMASK		(0x208)
+
+#define TSI108_INT_ANY			(1 << 31)
+#define TSI108_INT_SFN			(1 << 30)
+#define TSI108_INT_RXIDLE		(1 << 29)
+#define TSI108_INT_RXABORT		(1 << 28)
+#define TSI108_INT_RXERROR		(1 << 27)
+#define TSI108_INT_RXOVERRUN		(1 << 26)
+#define TSI108_INT_RXTHRESH		(1 << 25)
+#define TSI108_INT_RXWAIT		(1 << 24)
+#define TSI108_INT_RXQUEUE0		(1 << 16)
+#define TSI108_INT_STATCARRY		(1 << 15)
+#define TSI108_INT_TXIDLE		(1 << 13)
+#define TSI108_INT_TXABORT		(1 << 12)
+#define TSI108_INT_TXERROR		(1 << 11)
+#define TSI108_INT_TXUNDERRUN		(1 << 10)
+#define TSI108_INT_TXTHRESH		(1 <<  9)
+#define TSI108_INT_TXWAIT		(1 <<  8)
+#define TSI108_INT_TXQUEUE0		(1 <<  0)
+
+#define TSI108_EC_TXCFG			(0x220)
+#define TSI108_EC_TXCFG_RST		(1 << 31)
+
+#define TSI108_EC_TXCTRL		(0x224)
+#define TSI108_EC_TXCTRL_IDLEINT	(1 << 31)
+#define TSI108_EC_TXCTRL_ABORT		(1 << 30)
+#define TSI108_EC_TXCTRL_GO		(1 << 15)
+#define TSI108_EC_TXCTRL_QUEUE0		(1 <<  0)
+
+#define TSI108_EC_TXSTAT		(0x228)
+#define TSI108_EC_TXSTAT_ACTIVE		(1 << 15)
+#define TSI108_EC_TXSTAT_QUEUE0		(1 << 0)
+
+#define TSI108_EC_TXESTAT		(0x22c)
+#define TSI108_EC_TXESTAT_Q0_ERR	(1 << 24)
+#define TSI108_EC_TXESTAT_Q0_DESCINT	(1 << 16)
+#define TSI108_EC_TXESTAT_Q0_EOF	(1 <<  8)
+#define TSI108_EC_TXESTAT_Q0_EOQ	(1 <<  0)
+
+#define TSI108_EC_TXERR			(0x278)
+
+#define TSI108_EC_TXQ_CFG		(0x280)
+#define TSI108_EC_TXQ_CFG_DESC_INT	(1 << 20)
+#define TSI108_EC_TXQ_CFG_EOQ_OWN_INT	(1 << 19)
+#define TSI108_EC_TXQ_CFG_WSWP		(1 << 11)
+#define TSI108_EC_TXQ_CFG_BSWP		(1 << 10)
+#define TSI108_EC_TXQ_CFG_SFNPORT	0
+
+#define TSI108_EC_TXQ_BUFCFG		(0x284)
+#define TSI108_EC_TXQ_BUFCFG_BURST8	(0 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST32	(1 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST128	(2 << 8)
+#define TSI108_EC_TXQ_BUFCFG_BURST256	(3 << 8)
+#define TSI108_EC_TXQ_BUFCFG_WSWP	(1 << 11)
+#define TSI108_EC_TXQ_BUFCFG_BSWP	(1 << 10)
+#define TSI108_EC_TXQ_BUFCFG_SFNPORT	0
+
+#define TSI108_EC_TXQ_PTRLOW		(0x288)
+
+#define TSI108_EC_TXQ_PTRHIGH		(0x28c)
+#define TSI108_EC_TXQ_PTRHIGH_VALID	(1 << 31)
+
+#define TSI108_EC_TXTHRESH		(0x230)
+#define TSI108_EC_TXTHRESH_STARTFILL	0
+#define TSI108_EC_TXTHRESH_STOPFILL	16
+
+#define TSI108_EC_RXCFG			(0x320)
+#define TSI108_EC_RXCFG_RST		(1 << 31)
+
+#define TSI108_EC_RXSTAT		(0x328)
+#define TSI108_EC_RXSTAT_ACTIVE		(1 << 15)
+#define TSI108_EC_RXSTAT_QUEUE0		(1 << 0)
+
+#define TSI108_EC_RXESTAT		(0x32c)
+#define TSI108_EC_RXESTAT_Q0_ERR	(1 << 24)
+#define TSI108_EC_RXESTAT_Q0_DESCINT	(1 << 16)
+#define TSI108_EC_RXESTAT_Q0_EOF	(1 <<  8)
+#define TSI108_EC_RXESTAT_Q0_EOQ	(1 <<  0)
+
+#define TSI108_EC_HASHADDR		(0x360)
+#define TSI108_EC_HASHADDR_AUTOINC	(1 << 31)
+#define TSI108_EC_HASHADDR_DO1STREAD	(1 << 30)
+#define TSI108_EC_HASHADDR_UNICAST	(0 <<  4)
+#define TSI108_EC_HASHADDR_MCAST	(1 <<  4)
+
+#define TSI108_EC_HASHDATA		(0x364)
+
+#define TSI108_EC_RXQ_PTRLOW		(0x388)
+
+#define TSI108_EC_RXQ_PTRHIGH		(0x38c)
+#define TSI108_EC_RXQ_PTRHIGH_VALID	(1 << 31)
+
+/* Station Enable -- accept packets destined for us */
+#define TSI108_EC_RXCFG_SE		(1 << 13)
+/* Unicast Frame Enable -- for packets not destined for us */
+#define TSI108_EC_RXCFG_UFE		(1 << 12)
+/* Multicast Frame Enable */
+#define TSI108_EC_RXCFG_MFE		(1 << 11)
+/* Broadcast Frame Enable */
+#define TSI108_EC_RXCFG_BFE		(1 << 10)
+#define TSI108_EC_RXCFG_UC_HASH		(1 <<  9)
+#define TSI108_EC_RXCFG_MC_HASH		(1 <<  8)
+
+#define TSI108_EC_RXQ_CFG		(0x380)
+#define TSI108_EC_RXQ_CFG_DESC_INT	(1 << 20)
+#define TSI108_EC_RXQ_CFG_EOQ_OWN_INT	(1 << 19)
+#define TSI108_EC_RXQ_CFG_WSWP		(1 << 11)
+#define TSI108_EC_RXQ_CFG_BSWP		(1 << 10)
+#define TSI108_EC_RXQ_CFG_SFNPORT	0
+
+#define TSI108_EC_RXQ_BUFCFG		(0x384)
+#define TSI108_EC_RXQ_BUFCFG_BURST8	(0 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST32	(1 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST128	(2 << 8)
+#define TSI108_EC_RXQ_BUFCFG_BURST256	(3 << 8)
+#define TSI108_EC_RXQ_BUFCFG_WSWP	(1 << 11)
+#define TSI108_EC_RXQ_BUFCFG_BSWP	(1 << 10)
+#define TSI108_EC_RXQ_BUFCFG_SFNPORT	0
+
+#define TSI108_EC_RXCTRL		(0x324)
+#define TSI108_EC_RXCTRL_ABORT		(1 << 30)
+#define TSI108_EC_RXCTRL_GO		(1 << 15)
+#define TSI108_EC_RXCTRL_QUEUE0		(1 << 0)
+
+#define TSI108_EC_RXERR			(0x378)
+
+#define TSI108_TX_EOF	(1 << 0)	/* End of frame; last fragment of packet */
+#define TSI108_TX_SOF	(1 << 1)	/* Start of frame; first frag. of packet */
+#define TSI108_TX_VLAN	(1 << 2)	/* Per-frame VLAN: enables VLAN override */
+#define TSI108_TX_HUGE	(1 << 3)	/* Huge frame enable */
+#define TSI108_TX_PAD	(1 << 4)	/* Pad the packet if too short */
+#define TSI108_TX_CRC	(1 << 5)	/* Generate CRC for this packet */
+#define TSI108_TX_INT	(1 << 14)	/* Generate an IRQ after frag. processed */
+#define TSI108_TX_RETRY	(0xf << 16)	/* 4 bit field indicating num. of retries */
+#define TSI108_TX_COL	(1 << 20)	/* Set if a collision occured */
+#define TSI108_TX_LCOL	(1 << 24)	/* Set if a late collision occured */
+#define TSI108_TX_UNDER	(1 << 25)	/* Set if a FIFO underrun occured */
+#define TSI108_TX_RLIM	(1 << 26)	/* Set if the retry limit was reached */
+#define TSI108_TX_OK	(1 << 30)	/* Set if the frame TX was successful */
+#define TSI108_TX_OWN	(1 << 31)	/* Set if the device owns the descriptor */
+
+/* Note: the descriptor layouts assume big-endian byte order. */
+typedef struct {
+	u32 buf0;
+	u32 buf1;		/* Base address of buffer */
+	u32 next0;		/* Address of next descriptor, if any */
+	u32 next1;
+	u16 vlan;		/* VLAN, if override enabled for this packet */
+	u16 len;		/* Length of buffer in bytes */
+	u32 misc;		/* See TSI108_TX_* above */
+	u32 reserved0;		/*reserved0 and reserved1 are added to make the desc */
+	u32 reserved1;		/* 32-byte aligned */
+} __attribute__ ((aligned(32))) tx_desc;
+
+#define TSI108_RX_EOF	(1 << 0)	/* End of frame; last fragment of packet */
+#define TSI108_RX_SOF	(1 << 1)	/* Start of frame; first frag. of packet */
+#define TSI108_RX_VLAN	(1 << 2)	/* Set on SOF if packet has a VLAN */
+#define TSI108_RX_FTYPE	(1 << 3)	/* Length/Type field is type, not length */
+#define TSI108_RX_RUNT	(1 << 4)/* Packet is less than minimum size */
+#define TSI108_RX_HASH	(1 << 7)/* Hash table match */
+#define TSI108_RX_BAD	(1 << 8)	/* Bad frame */
+#define TSI108_RX_OVER	(1 << 9)	/* FIFO overrun occured */
+#define TSI108_RX_TRUNC	(1 << 11)	/* Packet truncated due to excess length */
+#define TSI108_RX_CRC	(1 << 12)	/* Packet had a CRC error */
+#define TSI108_RX_INT	(1 << 13)	/* Generate an IRQ after frag. processed */
+#define TSI108_RX_OWN	(1 << 15)	/* Set if the device owns the descriptor */
+
+#define TSI108_RX_SKB_SIZE 1536		/* The RX skb length */
+
+typedef struct {
+	u32 buf0;		/* Base address of buffer */
+	u32 buf1;		/* Base address of buffer */
+	u32 next0;		/* Address of next descriptor, if any */
+	u32 next1;		/* Address of next descriptor, if any */
+	u16 vlan;		/* VLAN of received packet, first frag only */
+	u16 len;		/* Length of received fragment in bytes */
+	u16 blen;		/* Length of buffer in bytes */
+	u16 misc;		/* See TSI108_RX_* above */
+	u32 reserved0;		/* reserved0 and reserved1 are added to make the desc */
+	u32 reserved1;		/* 32-byte aligned */
+} __attribute__ ((aligned(32))) rx_desc;
+
+#endif				/* __TSI108_ETH_H */
-- 
1.4.0





^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2006-10-31  2:25 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <A0CDBA58F226D911B202000BDBAD46730A1B1410@zch01exm23.fsl.freescale.net>
     [not found] ` <1157962200.10526.10.camel@localhost.localdomain>
2006-09-12  8:55   ` [patch 0/3] Add tsi108 On chip Ethernet device driver support Zang Roy-r61911
2006-09-21  4:04     ` [patch 0/3 v2] " Zang Roy-r61911
2006-09-12  8:55   ` [patch 1/3] Add tsi108 on Chip " Zang Roy-r61911
2006-09-21  4:04     ` [patch 1/3 v2] Add tsi108 On " Zang Roy-r61911
2006-09-12  8:55   ` [patch 3/3] " Zang Roy-r61911
2006-09-12 10:07     ` Arjan van de Ven
2006-09-19  7:39       ` Zang Roy-r61911
2006-09-20  8:45         ` Arjan van de Ven
2006-09-12 14:33     ` Roland Dreier
2006-09-12 14:43       ` Jeff Garzik
2006-09-14  2:51         ` Zang Roy-r61911
     [not found]         ` <1158749825.7973.9.camel@localhost.localdomain>
2006-09-21  4:26           ` Jeff Garzik
2006-09-21  5:46             ` Zang Roy-r61911
2006-09-21  6:30               ` Jeff Garzik
2006-09-21  4:46           ` Jeff Garzik
2006-09-29  7:36             ` Zang Roy-r61911
2006-09-29  7:47               ` Jeff Garzik
2006-10-17  7:18             ` Zang Roy-r61911
2006-10-30  5:19               ` Add tsi108/9 " Zang Roy-r61911
2006-10-31  2:26                 ` [PATCH] " Zang Roy-r61911
2006-10-23  2:09             ` [patch 3/3] Add tsi108 " Zang Roy-r61911
2006-10-26  2:50               ` Zang Roy-r61911
2006-09-21  4:05     ` [patch 3/3 v2] " Zang Roy-r61911

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).