patch-2.2.13 linux/drivers/net/sunqe.c

Next file: linux/drivers/net/sunqe.h
Previous file: linux/drivers/net/sunhme.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.2.12/linux/drivers/net/sunqe.c linux/drivers/net/sunqe.c
@@ -3,11 +3,11 @@
  *          controller out there can be most efficiently programmed
  *          if you make it look like a LANCE.
  *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com)
  */
 
 static char *version =
-        "sunqe.c:v1.1 8/Nov/96 David S. Miller (davem@caipfs.rutgers.edu)\n";
+        "sunqe.c:v2.0 9/9/99 David S. Miller (davem@redhat.com)\n";
 
 #include <linux/module.h>
 
@@ -49,6 +49,8 @@
 static struct sunqec *root_qec_dev = NULL;
 #endif
 
+static void qe_set_multicast(struct device *dev);
+
 #define QEC_RESET_TRIES 200
 
 static inline int qec_global_reset(struct qe_globreg *gregs)
@@ -109,82 +111,21 @@
 	return 0;
 }
 
-static inline void qe_clean_rings(struct sunqe *qep)
-{
-	int i;
-
-	for(i = 0; i < RX_RING_SIZE; i++) {
-		if(qep->rx_skbs[i] != NULL) {
-			dev_kfree_skb(qep->rx_skbs[i]);
-			qep->rx_skbs[i] = NULL;
-		}
-	}
-
-	for(i = 0; i < TX_RING_SIZE; i++) {
-		if(qep->tx_skbs[i] != NULL) {
-			dev_kfree_skb(qep->tx_skbs[i]);
-			qep->tx_skbs[i] = NULL;
-		}
-	}
-}
-
-static void qe_init_rings(struct sunqe *qep, int from_irq)
+static void qe_init_rings(struct sunqe *qep)
 {
 	struct qe_init_block *qb = qep->qe_block;
-	struct device *dev = qep->dev;
-	int i, gfp_flags = GFP_KERNEL;
-
-	if(from_irq || in_interrupt())
-		gfp_flags = GFP_ATOMIC;
-
-	qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
-
-	qe_clean_rings(qep);
-
-	for(i = 0; i < RX_RING_SIZE; i++) {
-		struct sk_buff *skb;
-
-		skb = qe_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags | GFP_DMA);
-		if(!skb)
-			continue;
-
-		qep->rx_skbs[i] = skb;
-		skb->dev = dev;
-
-		skb_put(skb, ETH_FRAME_LEN);
-		skb_reserve(skb, 34);
-
-		qb->qe_rxd[i].rx_addr = sbus_dvma_addr(skb->data);
-		qb->qe_rxd[i].rx_flags =
-			(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
-	}
-
-	for(i = 0; i < TX_RING_SIZE; i++)
-		qb->qe_txd[i].tx_flags = qb->qe_txd[i].tx_addr = 0;
-}
-
-static void sun4c_qe_init_rings(struct sunqe *qep)
-{
-	struct qe_init_block *qb = qep->qe_block;
-	struct sunqe_buffers *qbufs = qep->sun4c_buffers;
-	__u32 qbufs_dvma = qep->s4c_buf_dvma;
+	struct sunqe_buffers *qbufs = qep->buffers;
+	__u32 qbufs_dvma = qep->buffers_dvma;
 	int i;
 
 	qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
-
+	memset(qb, 0, sizeof(struct qe_init_block));
 	memset(qbufs, 0, sizeof(struct sunqe_buffers));
-
-	for(i = 0; i < RX_RING_SIZE; i++)
-		qb->qe_rxd[i].rx_flags = qb->qe_rxd[i].rx_addr = 0;
-
-	for(i = 0; i < SUN4C_RX_RING_SIZE; i++) {
+	for(i = 0; i < RX_RING_SIZE; i++) {
 		qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
 		qb->qe_rxd[i].rx_flags =
-			(RXD_OWN | ((SUN4C_RX_BUFF_SIZE) & RXD_LENGTH));
+			(RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
 	}
-
-	for(i = 0; i < TX_RING_SIZE; i++)
-		qb->qe_txd[i].tx_flags = qb->qe_txd[i].tx_addr = 0;
 }
 
 static int qe_init(struct sunqe *qep, int from_irq)
@@ -205,9 +146,10 @@
 	cregs->rxds = qep->qblock_dvma + qib_offset(qe_rxd, 0);
 	cregs->txds = qep->qblock_dvma + qib_offset(qe_txd, 0);
 
-	/* Enable the various irq's. */
+	/* Enable/mask the various irq's. */
 	cregs->rimask = 0;
-	cregs->timask = 0;
+	cregs->timask = 1;
+
 	cregs->qmask = 0;
 	cregs->mmask = CREG_MMASK_RXCOLL;
 
@@ -222,6 +164,7 @@
 	cregs->pipg = 0;
 
 	/* Now dork with the AMD MACE. */
+	mregs->phyconfig = MREGS_PHYCONFIG_AUTO;
 	mregs->txfcntl = MREGS_TXFCNTL_AUTOPAD; /* Save us some tx work. */
 	mregs->rxfcntl = 0;
 
@@ -240,6 +183,8 @@
 
 	/* Tell MACE we are changing the ether address. */
 	mregs->iaconfig = (MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET);
+	while ((mregs->iaconfig & MREGS_IACONFIG_ACHNGE) != 0)
+		barrier();
 	mregs->ethaddr = e[0];
 	mregs->ethaddr = e[1];
 	mregs->ethaddr = e[2];
@@ -249,28 +194,38 @@
 
 	/* Clear out the address filter. */
 	mregs->iaconfig = (MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET);
-	for(i = 0; i < 8; i++) mregs->filter = 0;
+	while ((mregs->iaconfig & MREGS_IACONFIG_ACHNGE) != 0)
+		barrier();
+	for(i = 0; i < 8; i++)
+		mregs->filter = 0;
 
 	/* Address changes are now complete. */
 	mregs->iaconfig = 0;
 
-	if(sparc_cpu_model == sun4c)
-		sun4c_qe_init_rings(qep);
-	else
-		qe_init_rings(qep, from_irq);
+	qe_init_rings(qep);
 
 	/* Wait a little bit for the link to come up... */
+	mdelay(5);
 	if(!(mregs->phyconfig & MREGS_PHYCONFIG_LTESTDIS)) {
-		mdelay(5);
-		if(!(mregs->phyconfig & MREGS_PHYCONFIG_LSTAT))
+		int tries = 50;
+
+		while (tries--) {
+			mdelay(5);
+			barrier();
+			if((mregs->phyconfig & MREGS_PHYCONFIG_LSTAT) != 0)
+				break;
+		}
+		if (tries == 0)
 			printk("%s: Warning, link state is down.\n", qep->dev->name);
 	}
 
 	/* Missed packet counter is cleared on a read. */
 	garbage = mregs->mpcnt;
 
-	/* Turn on the MACE receiver and transmitter. */
-	mregs->mconfig = (MREGS_MCONFIG_TXENAB | MREGS_MCONFIG_RXENAB);
+	/* Reload multicast information, this will enable the receiver
+	 * and transmitter.  But set the base mconfig value right now.
+	 */
+	qe_set_multicast(qep->dev);
 
 	/* QEC should now start to show interrupts. */
 	return 0;
@@ -429,159 +384,27 @@
 	return mace_hwbug_workaround;
 }
 
-/* Per-QE transmit complete interrupt service routine. */
-static inline void qe_tx(struct sunqe *qep)
-{
-	struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
-	struct qe_txd *this;
-	int elem = qep->tx_old;
-
-	while(elem != qep->tx_new) {
-		struct sk_buff *skb;
-
-		this = &txbase[elem];
-		if(this->tx_flags & TXD_OWN)
-			break;
-		skb = qep->tx_skbs[elem];
-		qep->tx_skbs[elem] = NULL;
-		qep->net_stats.tx_bytes+=skb->len;
-		dev_kfree_skb(skb);
-
-		qep->net_stats.tx_packets++;
-		elem = NEXT_TX(elem);
-	}
-	qep->tx_old = elem;
-}
-
-static inline void sun4c_qe_tx(struct sunqe *qep)
-{
-	struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
-	struct qe_txd *this;
-	int elem = qep->tx_old;
-
-	while(elem != qep->tx_new) {
-		this = &txbase[elem];
-		if(this->tx_flags & TXD_OWN)
-			break;
-		qep->net_stats.tx_packets++;
-		elem = NEXT_TX(elem);
-	}
-	qep->tx_old = elem;
-}
-
 /* Per-QE receive interrupt service routine.  Just like on the happy meal
  * we receive directly into skb's with a small packet copy water mark.
  */
-static inline void qe_rx(struct sunqe *qep)
+static void qe_rx(struct sunqe *qep)
 {
 	struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
 	struct qe_rxd *this;
+	struct sunqe_buffers *qbufs = qep->buffers;
+	__u32 qbufs_dvma = qep->buffers_dvma;
 	int elem = qep->rx_new, drops = 0;
+	unsigned int flags;
 
 	this = &rxbase[elem];
-	while(!(this->rx_flags & RXD_OWN)) {
-		struct sk_buff *skb;
-		unsigned int flags = this->rx_flags;
-		int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */
-
-		/* Check for errors. */
-		if(len < ETH_ZLEN) {
-			qep->net_stats.rx_errors++;
-			qep->net_stats.rx_length_errors++;
-
-	drop_it:
-			/* Return it to the QE. */
-			qep->net_stats.rx_dropped++;
-			this->rx_addr = sbus_dvma_addr(qep->rx_skbs[elem]->data);
-			this->rx_flags =
-				(RXD_OWN | (RX_BUF_ALLOC_SIZE & RXD_LENGTH));
-			goto next;
-		}
-		skb = qep->rx_skbs[elem];
-#ifdef NEED_DMA_SYNCHRONIZATION
-#ifdef __sparc_v9__
-		if ((unsigned long) (skb->data + skb->len) >= MAX_DMA_ADDRESS) {
-			printk("sunqe: Bogus DMA buffer address "
-			       "[%016lx]\n", ((unsigned long) skb->data));
-			panic("DMA address too large, tell DaveM");
-		}
-#endif
-		mmu_sync_dma(sbus_dvma_addr(skb->data),
-			     skb->len, qep->qe_sbusdev->my_bus);
-#endif
-		if(len > RX_COPY_THRESHOLD) {
-			struct sk_buff *new_skb;
-
-			/* Now refill the entry, if we can. */
-			new_skb = qe_alloc_skb(RX_BUF_ALLOC_SIZE, (GFP_DMA|GFP_ATOMIC));
-			if(!new_skb) {
-				drops++;
-				goto drop_it;
-			}
-
-			qep->rx_skbs[elem] = new_skb;
-			new_skb->dev = qep->dev;
-			skb_put(new_skb, ETH_FRAME_LEN);
-			skb_reserve(new_skb, 34);
-
-			rxbase[elem].rx_addr = sbus_dvma_addr(new_skb->data);
-			rxbase[elem].rx_flags =
-				(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
-
-			/* Trim the original skb for the netif. */
-			skb_trim(skb, len);
-		} else {
-			struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
-
-			if(!copy_skb) {
-				drops++;
-				goto drop_it;
-			}
-
-			copy_skb->dev = qep->dev;
-			skb_reserve(copy_skb, 2);
-			skb_put(copy_skb, len);
-			eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0);
-
-			/* Reuse original ring buffer. */
-			rxbase[elem].rx_addr = sbus_dvma_addr(skb->data);
-			rxbase[elem].rx_flags =
-				(RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH));
-
-			skb = copy_skb;
-		}
-
-		/* No checksums are done by this card ;-( */
-		skb->protocol = eth_type_trans(skb, qep->dev);
-		netif_rx(skb);
-		qep->net_stats.rx_packets++;
-	next:
-		elem = NEXT_RX(elem);
-		this = &rxbase[elem];
-	}
-	qep->rx_new = elem;
-	if(drops)
-		printk("%s: Memory squeeze, deferring packet.\n", qep->dev->name);
-}
-
-static inline void sun4c_qe_rx(struct sunqe *qep)
-{
-	struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
-	struct qe_rxd *this;
-	struct sunqe_buffers *qbufs = qep->sun4c_buffers;
-	__u32 qbufs_dvma = qep->s4c_buf_dvma;
-	int elem = qep->rx_new, drops = 0;
-
-	this = &rxbase[elem];
-	while(!(this->rx_flags & RXD_OWN)) {
+	while(!((flags = this->rx_flags) & RXD_OWN)) {
 		struct sk_buff *skb;
 		unsigned char *this_qbuf =
-			qbufs->rx_buf[elem & (SUN4C_RX_RING_SIZE - 1)];
+			&qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
 		__u32 this_qbuf_dvma = qbufs_dvma +
-			qebuf_offset(rx_buf, (elem & (SUN4C_RX_RING_SIZE - 1)));
+			qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
 		struct qe_rxd *end_rxd =
-			&rxbase[(elem+SUN4C_RX_RING_SIZE)&(RX_RING_SIZE-1)];
-		unsigned int flags = this->rx_flags;
+			&rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
 		int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */
 
 		/* Check for errors. */
@@ -603,10 +426,11 @@
 				skb->protocol = eth_type_trans(skb, qep->dev);
 				netif_rx(skb);
 				qep->net_stats.rx_packets++;
+				qep->net_stats.rx_bytes+=len;
 			}
 		}
 		end_rxd->rx_addr = this_qbuf_dvma;
-		end_rxd->rx_flags = (RXD_OWN | (SUN4C_RX_BUFF_SIZE & RXD_LENGTH));
+		end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
 		
 		elem = NEXT_RX(elem);
 		this = &rxbase[elem];
@@ -643,55 +467,6 @@
 
 			if(qe_status & CREG_STAT_RXIRQ)
 				qe_rx(qep);
-
-			if(qe_status & CREG_STAT_TXIRQ)
-				qe_tx(qep);
-
-			if(dev->tbusy && (TX_BUFFS_AVAIL(qep) >= 0)) {
-				dev->tbusy = 0;
-				mark_bh(NET_BH);
-			}
-
-	next:
-			dev->interrupt = 0;
-		}
-		qec_status >>= 4;
-		channel++;
-	}
-}
-
-static void sun4c_qec_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-	struct sunqec *qecp = (struct sunqec *) dev_id;
-	unsigned int qec_status;
-	int channel = 0;
-
-	/* Latch the status now. */
-	qec_status = qecp->gregs->stat;
-	while(channel < 4) {
-		if(qec_status & 0xf) {
-			struct sunqe *qep = qecp->qes[channel];
-			struct device *dev = qep->dev;
-			unsigned int qe_status;
-
-			dev->interrupt = 1;
-
-			qe_status = qep->qcregs->stat;
-			if(qe_status & CREG_STAT_ERRORS)
-				if(qe_is_bolixed(qep, qe_status))
-					goto next;
-
-			if(qe_status & CREG_STAT_RXIRQ)
-				sun4c_qe_rx(qep);
-
-			if(qe_status & CREG_STAT_TXIRQ)
-				sun4c_qe_tx(qep);
-
-			if(dev->tbusy && (SUN4C_TX_BUFFS_AVAIL(qep) >= 0)) {
-				dev->tbusy = 0;
-				mark_bh(NET_BH);
-			}
-
 	next:
 			dev->interrupt = 0;
 		}
@@ -705,10 +480,13 @@
 	struct sunqe *qep = (struct sunqe *) dev->priv;
 	int res;
 
+	qep->mconfig = (MREGS_MCONFIG_TXENAB |
+			MREGS_MCONFIG_RXENAB |
+			MREGS_MCONFIG_MBAENAB);
 	res = qe_init(qep, 0);
-	if(!res) {
+	if(!res)
 		MOD_INC_USE_COUNT;
-	}
+
 	return res;
 }
 
@@ -717,88 +495,66 @@
 	struct sunqe *qep = (struct sunqe *) dev->priv;
 
 	qe_stop(qep);
-	qe_clean_rings(qep);
 	MOD_DEC_USE_COUNT;
 	return 0;
 }
 
-/* Get a packet queued to go onto the wire. */
-static int qe_start_xmit(struct sk_buff *skb, struct device *dev)
+/* Reclaim TX'd frames from the ring. */
+static void qe_tx_reclaim(struct sunqe *qep)
 {
-	struct sunqe *qep = (struct sunqe *) dev->priv;
-	int len, entry;
-
-	if(dev->tbusy)
-		return 1;
-
-	if(test_and_set_bit(0, (void *) &dev->tbusy) != 0) {
-		printk("%s: Transmitter access conflict.\n", dev->name);
-		return 1;
-	}
+	struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
+	struct device *dev = qep->dev;
+	int elem = qep->tx_old;
 
-	if(!TX_BUFFS_AVAIL(qep))
-		return 1;
+	while(elem != qep->tx_new) {
+		unsigned int flags = txbase[elem].tx_flags;
 
-#ifdef NEED_DMA_SYNCHRONIZATION
-#ifdef __sparc_v9__
-	if ((unsigned long) (skb->data + skb->len) >= MAX_DMA_ADDRESS) {
-		struct sk_buff *new_skb = skb_copy(skb, GFP_DMA | GFP_ATOMIC);
-		if(!new_skb)
-			return 1;
-		dev_kfree_skb(skb);
-		skb = new_skb;
+		if (flags & TXD_OWN)
+			break;
+		qep->net_stats.tx_packets++;
+		qep->net_stats.tx_bytes+=(flags & TXD_LENGTH);
+		elem = NEXT_TX(elem);
 	}
-#endif
-	mmu_sync_dma(sbus_dvma_addr(skb->data),
-		     skb->len, qep->qe_sbusdev->my_bus);
-#endif
-	len = skb->len;
-	entry = qep->tx_new;
-
-	/* Avoid a race... */
-	qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
-
-	qep->tx_skbs[entry] = skb;
-
-	qep->qe_block->qe_txd[entry].tx_addr = sbus_dvma_addr(skb->data);
-	qep->qe_block->qe_txd[entry].tx_flags =
-		(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
-	qep->tx_new = NEXT_TX(entry);
-
-	/* Get it going. */
-	qep->qcregs->ctrl = CREG_CTRL_TWAKEUP;
+	qep->tx_old = elem;
 
-	if(TX_BUFFS_AVAIL(qep))
+	if(dev->tbusy && (TX_BUFFS_AVAIL(qep) > 0)) {
 		dev->tbusy = 0;
-
-	return 0;
+		mark_bh(NET_BH);
+	}
 }
 
-static int sun4c_qe_start_xmit(struct sk_buff *skb, struct device *dev)
+/* Get a packet queued to go onto the wire. */
+static int qe_start_xmit(struct sk_buff *skb, struct device *dev)
 {
 	struct sunqe *qep = (struct sunqe *) dev->priv;
-	struct sunqe_buffers *qbufs = qep->sun4c_buffers;
-	__u32 txbuf_dvma, qbufs_dvma = qep->s4c_buf_dvma;
+	struct sunqe_buffers *qbufs = qep->buffers;
+	__u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
 	unsigned char *txbuf;
 	int len, entry;
 
-	if(dev->tbusy)
-		return 1;
+	qe_tx_reclaim(qep);
 
 	if(test_and_set_bit(0, (void *) &dev->tbusy) != 0) {
-		printk("%s: Transmitter access conflict.\n", dev->name);
+		long tickssofar = jiffies - dev->trans_start;
+
+		if (tickssofar >= 40) {
+			printk("%s: transmit timed out, resetting\n", dev->name);
+			qe_init(qep, 1);
+			dev->tbusy = 0;
+			dev->trans_start = jiffies;
+		}
 		return 1;
 	}
 
-	if(!SUN4C_TX_BUFFS_AVAIL(qep))
+	if(!TX_BUFFS_AVAIL(qep))
 		return 1;
 
 	len = skb->len;
 	entry = qep->tx_new;
 
-	txbuf = &qbufs->tx_buf[entry & (SUN4C_TX_RING_SIZE - 1)][0];
+	txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
 	txbuf_dvma = qbufs_dvma +
-		qebuf_offset(tx_buf, (entry & (SUN4C_TX_RING_SIZE - 1)));
+		qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
 
 	/* Avoid a race... */
 	qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
@@ -811,13 +567,12 @@
 	qep->tx_new = NEXT_TX(entry);
 
 	/* Get it going. */
+	dev->trans_start = jiffies;
 	qep->qcregs->ctrl = CREG_CTRL_TWAKEUP;
 
-	qep->net_stats.tx_bytes+=skb->len;
-	
 	dev_kfree_skb(skb);
 
-	if(SUN4C_TX_BUFFS_AVAIL(qep))
+	if(TX_BUFFS_AVAIL(qep))
 		dev->tbusy = 0;
 
 	return 0;
@@ -837,7 +592,7 @@
 {
 	struct sunqe *qep = (struct sunqe *) dev->priv;
 	struct dev_mc_list *dmi = dev->mc_list;
-	unsigned char new_mconfig = (MREGS_MCONFIG_TXENAB | MREGS_MCONFIG_RXENAB);
+	unsigned char new_mconfig = qep->mconfig;
 	char *addrs;
 	int i, j, bit, byte;
 	u32 crc, poly = CRC_POLYNOMIAL_LE;
@@ -847,6 +602,8 @@
 
 	if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
 		qep->mregs->iaconfig = MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET;
+		while ((qep->mregs->iaconfig & MREGS_IACONFIG_ACHNGE) != 0)
+			barrier();
 		for(i = 0; i < 8; i++)
 			qep->mregs->filter = 0xff;
 		qep->mregs->iaconfig = 0;
@@ -882,6 +639,8 @@
 		}
 		/* Program the qe with the new filter value. */
 		qep->mregs->iaconfig = MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET;
+		while ((qep->mregs->iaconfig & MREGS_IACONFIG_ACHNGE) != 0)
+			barrier();
 		for(i = 0; i < 8; i++)
 			qep->mregs->filter = *hbytes++;
 		qep->mregs->iaconfig = 0;
@@ -893,7 +652,8 @@
 	 * refuses to listen to anything on the network.  Sheesh, took
 	 * me a day or two to find this bug.
 	 */
-	qep->mregs->mconfig = new_mconfig;
+	qep->mconfig = new_mconfig;
+	qep->mregs->mconfig = qep->mconfig;
 
 	/* Let us get going again. */
 	dev->tbusy = 0;
@@ -904,10 +664,16 @@
 {
 	unsigned char bsizes = qecp->qec_bursts;
 
-	if(bsizes & DMA_BURST32)
+#ifdef __sparc_v9__
+	if (bsizes & DMA_BURST64) {
+		qecp->gregs->ctrl = GLOB_CTRL_B64;
+	} else
+#endif
+	if(bsizes & DMA_BURST32) {
 		qecp->gregs->ctrl = GLOB_CTRL_B32;
-	else
+	} else {
 		qecp->gregs->ctrl = GLOB_CTRL_B16;
+	}
 
 	/* Packetsize only used in 100baseT BigMAC configurations,
 	 * set it to zero just to be on the safe side.
@@ -1001,28 +767,30 @@
 	num_qranges = (i / sizeof(struct linux_prom_ranges));
 
 	/* Now, apply all the ranges, QEC ranges then the SBUS ones for each QE. */
-	for(i = 0; i < 4; i++) {
-		for(j = 0; j < 2; j++) {
-			int k;
+	if (sdev->ranges_applied == 0) {
+		for(i = 0; i < 4; i++) {
+			for(j = 0; j < 2; j++) {
+				int k;
 
-			for(k = 0; k < num_qranges; k++)
-				if(qesdevs[i]->reg_addrs[j].which_io ==
-				   qranges[k].ot_child_space)
-					break;
-			if(k >= num_qranges)
-				printk("QuadEther: Aieee, bogus QEC range for "
-				       "space %08x\n",qesdevs[i]->reg_addrs[j].which_io);
-			qesdevs[i]->reg_addrs[j].which_io = qranges[k].ot_parent_space;
-			qesdevs[i]->reg_addrs[j].phys_addr += qranges[k].ot_parent_base;
-		}
+				for(k = 0; k < num_qranges; k++)
+					if(qesdevs[i]->reg_addrs[j].which_io ==
+					   qranges[k].ot_child_space)
+						break;
+				if(k >= num_qranges)
+					printk("QuadEther: Aieee, bogus QEC range for "
+					       "space %08x\n",qesdevs[i]->reg_addrs[j].which_io);
+				qesdevs[i]->reg_addrs[j].which_io = qranges[k].ot_parent_space;
+				qesdevs[i]->reg_addrs[j].phys_addr += qranges[k].ot_parent_base;
+			}
 
-		prom_apply_sbus_ranges(qesdevs[i]->my_bus, &qesdevs[i]->reg_addrs[0],
-				       2, qesdevs[i]);
+			prom_apply_sbus_ranges(qesdevs[i]->my_bus, &qesdevs[i]->reg_addrs[0],
+					       2, qesdevs[i]);
+		}
+		prom_apply_sbus_ranges(sdev->my_bus, &sdev->reg_addrs[0],
+				       sdev->num_registers, sdev);
 	}
 
 	/* Now map in the registers, QEC globals first. */
-	prom_apply_sbus_ranges(sdev->my_bus, &sdev->reg_addrs[0],
-			       sdev->num_registers, sdev);
 	qecp->gregs = sparc_alloc_io(sdev->reg_addrs[0].phys_addr, 0,
 				     sizeof(struct qe_globreg),
 				     "QEC Global Registers",
@@ -1093,13 +861,10 @@
 			sparc_dvma_malloc(PAGE_SIZE, "QE Init Block",
 					  &qeps[i]->qblock_dvma);
 
-		if(sparc_cpu_model == sun4c)
-			qeps[i]->sun4c_buffers = (struct sunqe_buffers *)
-				sparc_dvma_malloc(sizeof(struct sunqe_buffers),
-						  "QE RX/TX Buffers",
-						  &qeps[i]->s4c_buf_dvma);
-		else
-			qeps[i]->sun4c_buffers = 0;
+		qeps[i]->buffers = (struct sunqe_buffers *)
+			sparc_dvma_malloc(sizeof(struct sunqe_buffers),
+					  "QE RX/TX Buffers",
+					  &qeps[i]->buffers_dvma);
 
 		/* Stop this QE. */
 		qe_stop(qeps[i]);
@@ -1108,10 +873,7 @@
 	for(i = 0; i < 4; i++) {
 		qe_devs[i]->open = qe_open;
 		qe_devs[i]->stop = qe_close;
-		if(sparc_cpu_model == sun4c)
-			qe_devs[i]->hard_start_xmit = sun4c_qe_start_xmit;
-		else
-			qe_devs[i]->hard_start_xmit = qe_start_xmit;
+		qe_devs[i]->hard_start_xmit = qe_start_xmit;
 		qe_devs[i]->get_stats = qe_get_stats;
 		qe_devs[i]->set_multicast_list = qe_set_multicast;
 		qe_devs[i]->irq = sdev->irqs[0];
@@ -1119,25 +881,16 @@
 		ether_setup(qe_devs[i]);
 	}
 
-	/* QEC receives interrupts from each QE, then it send the actual
+	/* QEC receives interrupts from each QE, then it sends the actual
 	 * IRQ to the cpu itself.  Since QEC is the single point of
 	 * interrupt for all QE channels we register the IRQ handler
 	 * for it now.
 	 */
-	if(sparc_cpu_model == sun4c) {
-		if(request_irq(sdev->irqs[0], &sun4c_qec_interrupt,
-			       SA_SHIRQ, "QuadEther", (void *) qecp)) {
-			printk("QuadEther: Can't register QEC master irq handler.\n");
-			res = EAGAIN;
-			goto qec_free_devs;
-		}
-	} else {
-		if(request_irq(sdev->irqs[0], &qec_interrupt,
-			       SA_SHIRQ, "QuadEther", (void *) qecp)) {
-			printk("QuadEther: Can't register QEC master irq handler.\n");
-			res = EAGAIN;
-			goto qec_free_devs;
-		}
+	if(request_irq(sdev->irqs[0], &qec_interrupt,
+		       SA_SHIRQ, "QuadEther", (void *) qecp)) {
+		printk("QuadEther: Can't register QEC master irq handler.\n");
+		res = EAGAIN;
+		goto qec_free_devs;
 	}
 
 	/* Report the QE channels. */
@@ -1232,9 +985,12 @@
 		/* Release all four QE channels, then the QEC itself. */
 		for(i = 0; i < 4; i++) {
 			unregister_netdev(root_qec_dev->qes[i]->dev);
-			kfree(root_qec_dev->qes[i]);
+			sparc_free_io(root_qec_dev->qes[i]->qcregs, sizeof(struct qe_creg));
+			sparc_free_io(root_qec_dev->qes[i]->mregs, sizeof(struct qe_mregs));
+			kfree(root_qec_dev->qes[i]->dev);
 		}
 		free_irq(root_qec_dev->qec_sbus_dev->irqs[0], (void *)root_qec_dev);
+		sparc_free_io(root_qec_dev->gregs, sizeof(struct qe_globreg));
 		kfree(root_qec_dev);
 		root_qec_dev = next_qec;
 	}

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)