[go: nahoru, domu]

1/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
2 * sungem.c: Sun GEM ethernet driver.
3 *
4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5 *
6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management
7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
9 *
10 * NAPI and NETPOLL support
11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
12 *
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/types.h>
20#include <linux/fcntl.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/in.h>
24#include <linux/sched.h>
25#include <linux/string.h>
26#include <linux/delay.h>
27#include <linux/errno.h>
28#include <linux/pci.h>
29#include <linux/dma-mapping.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/crc32.h>
36#include <linux/random.h>
37#include <linux/workqueue.h>
38#include <linux/if_vlan.h>
39#include <linux/bitops.h>
40#include <linux/mm.h>
41#include <linux/gfp.h>
42
43#include <asm/io.h>
44#include <asm/byteorder.h>
45#include <asm/uaccess.h>
46#include <asm/irq.h>
47
48#ifdef CONFIG_SPARC
49#include <asm/idprom.h>
50#include <asm/prom.h>
51#endif
52
53#ifdef CONFIG_PPC_PMAC
54#include <asm/pci-bridge.h>
55#include <asm/prom.h>
56#include <asm/machdep.h>
57#include <asm/pmac_feature.h>
58#endif
59
60#include <linux/sungem_phy.h>
61#include "sungem.h"
62
63/* Stripping FCS is causing problems, disabled for now */
64#undef STRIP_FCS
65
66#define DEFAULT_MSG	(NETIF_MSG_DRV		| \
67			 NETIF_MSG_PROBE	| \
68			 NETIF_MSG_LINK)
69
70#define ADVERTISE_MASK	(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
71			 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
72			 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
73			 SUPPORTED_Pause | SUPPORTED_Autoneg)
74
75#define DRV_NAME	"sungem"
76#define DRV_VERSION	"1.0"
77#define DRV_AUTHOR	"David S. Miller <davem@redhat.com>"
78
79static char version[] =
80        DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
81
82MODULE_AUTHOR(DRV_AUTHOR);
83MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
84MODULE_LICENSE("GPL");
85
86#define GEM_MODULE_NAME	"gem"
87
88static const struct pci_device_id gem_pci_tbl[] = {
89	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
90	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
91
92	/* These models only differ from the original GEM in
93	 * that their tx/rx fifos are of a different size and
94	 * they only support 10/100 speeds. -DaveM
95	 *
96	 * Apple's GMAC does support gigabit on machines with
97	 * the BCM54xx PHYs. -BenH
98	 */
99	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
100	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
101	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
102	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
104	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
106	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
107	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
108	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
109	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
110	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
111	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
112	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
113	{0, }
114};
115
116MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
117
118static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg)
119{
120	u32 cmd;
121	int limit = 10000;
122
123	cmd  = (1 << 30);
124	cmd |= (2 << 28);
125	cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
126	cmd |= (reg << 18) & MIF_FRAME_REGAD;
127	cmd |= (MIF_FRAME_TAMSB);
128	writel(cmd, gp->regs + MIF_FRAME);
129
130	while (--limit) {
131		cmd = readl(gp->regs + MIF_FRAME);
132		if (cmd & MIF_FRAME_TALSB)
133			break;
134
135		udelay(10);
136	}
137
138	if (!limit)
139		cmd = 0xffff;
140
141	return cmd & MIF_FRAME_DATA;
142}
143
144static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg)
145{
146	struct gem *gp = netdev_priv(dev);
147	return __sungem_phy_read(gp, mii_id, reg);
148}
149
150static inline u16 sungem_phy_read(struct gem *gp, int reg)
151{
152	return __sungem_phy_read(gp, gp->mii_phy_addr, reg);
153}
154
155static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
156{
157	u32 cmd;
158	int limit = 10000;
159
160	cmd  = (1 << 30);
161	cmd |= (1 << 28);
162	cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
163	cmd |= (reg << 18) & MIF_FRAME_REGAD;
164	cmd |= (MIF_FRAME_TAMSB);
165	cmd |= (val & MIF_FRAME_DATA);
166	writel(cmd, gp->regs + MIF_FRAME);
167
168	while (limit--) {
169		cmd = readl(gp->regs + MIF_FRAME);
170		if (cmd & MIF_FRAME_TALSB)
171			break;
172
173		udelay(10);
174	}
175}
176
177static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val)
178{
179	struct gem *gp = netdev_priv(dev);
180	__sungem_phy_write(gp, mii_id, reg, val & 0xffff);
181}
182
183static inline void sungem_phy_write(struct gem *gp, int reg, u16 val)
184{
185	__sungem_phy_write(gp, gp->mii_phy_addr, reg, val);
186}
187
188static inline void gem_enable_ints(struct gem *gp)
189{
190	/* Enable all interrupts but TXDONE */
191	writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
192}
193
194static inline void gem_disable_ints(struct gem *gp)
195{
196	/* Disable all interrupts, including TXDONE */
197	writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
198	(void)readl(gp->regs + GREG_IMASK); /* write posting */
199}
200
201static void gem_get_cell(struct gem *gp)
202{
203	BUG_ON(gp->cell_enabled < 0);
204	gp->cell_enabled++;
205#ifdef CONFIG_PPC_PMAC
206	if (gp->cell_enabled == 1) {
207		mb();
208		pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
209		udelay(10);
210	}
211#endif /* CONFIG_PPC_PMAC */
212}
213
214/* Turn off the chip's clock */
215static void gem_put_cell(struct gem *gp)
216{
217	BUG_ON(gp->cell_enabled <= 0);
218	gp->cell_enabled--;
219#ifdef CONFIG_PPC_PMAC
220	if (gp->cell_enabled == 0) {
221		mb();
222		pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
223		udelay(10);
224	}
225#endif /* CONFIG_PPC_PMAC */
226}
227
228static inline void gem_netif_stop(struct gem *gp)
229{
230	gp->dev->trans_start = jiffies;	/* prevent tx timeout */
231	napi_disable(&gp->napi);
232	netif_tx_disable(gp->dev);
233}
234
235static inline void gem_netif_start(struct gem *gp)
236{
237	/* NOTE: unconditional netif_wake_queue is only
238	 * appropriate so long as all callers are assured to
239	 * have free tx slots.
240	 */
241	netif_wake_queue(gp->dev);
242	napi_enable(&gp->napi);
243}
244
245static void gem_schedule_reset(struct gem *gp)
246{
247	gp->reset_task_pending = 1;
248	schedule_work(&gp->reset_task);
249}
250
251static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
252{
253	if (netif_msg_intr(gp))
254		printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
255}
256
257static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
258{
259	u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
260	u32 pcs_miistat;
261
262	if (netif_msg_intr(gp))
263		printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
264			gp->dev->name, pcs_istat);
265
266	if (!(pcs_istat & PCS_ISTAT_LSC)) {
267		netdev_err(dev, "PCS irq but no link status change???\n");
268		return 0;
269	}
270
271	/* The link status bit latches on zero, so you must
272	 * read it twice in such a case to see a transition
273	 * to the link being up.
274	 */
275	pcs_miistat = readl(gp->regs + PCS_MIISTAT);
276	if (!(pcs_miistat & PCS_MIISTAT_LS))
277		pcs_miistat |=
278			(readl(gp->regs + PCS_MIISTAT) &
279			 PCS_MIISTAT_LS);
280
281	if (pcs_miistat & PCS_MIISTAT_ANC) {
282		/* The remote-fault indication is only valid
283		 * when autoneg has completed.
284		 */
285		if (pcs_miistat & PCS_MIISTAT_RF)
286			netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
287		else
288			netdev_info(dev, "PCS AutoNEG complete\n");
289	}
290
291	if (pcs_miistat & PCS_MIISTAT_LS) {
292		netdev_info(dev, "PCS link is now up\n");
293		netif_carrier_on(gp->dev);
294	} else {
295		netdev_info(dev, "PCS link is now down\n");
296		netif_carrier_off(gp->dev);
297		/* If this happens and the link timer is not running,
298		 * reset so we re-negotiate.
299		 */
300		if (!timer_pending(&gp->link_timer))
301			return 1;
302	}
303
304	return 0;
305}
306
307static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
308{
309	u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
310
311	if (netif_msg_intr(gp))
312		printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
313			gp->dev->name, txmac_stat);
314
315	/* Defer timer expiration is quite normal,
316	 * don't even log the event.
317	 */
318	if ((txmac_stat & MAC_TXSTAT_DTE) &&
319	    !(txmac_stat & ~MAC_TXSTAT_DTE))
320		return 0;
321
322	if (txmac_stat & MAC_TXSTAT_URUN) {
323		netdev_err(dev, "TX MAC xmit underrun\n");
324		dev->stats.tx_fifo_errors++;
325	}
326
327	if (txmac_stat & MAC_TXSTAT_MPE) {
328		netdev_err(dev, "TX MAC max packet size error\n");
329		dev->stats.tx_errors++;
330	}
331
332	/* The rest are all cases of one of the 16-bit TX
333	 * counters expiring.
334	 */
335	if (txmac_stat & MAC_TXSTAT_NCE)
336		dev->stats.collisions += 0x10000;
337
338	if (txmac_stat & MAC_TXSTAT_ECE) {
339		dev->stats.tx_aborted_errors += 0x10000;
340		dev->stats.collisions += 0x10000;
341	}
342
343	if (txmac_stat & MAC_TXSTAT_LCE) {
344		dev->stats.tx_aborted_errors += 0x10000;
345		dev->stats.collisions += 0x10000;
346	}
347
348	/* We do not keep track of MAC_TXSTAT_FCE and
349	 * MAC_TXSTAT_PCE events.
350	 */
351	return 0;
352}
353
354/* When we get a RX fifo overflow, the RX unit in GEM is probably hung
355 * so we do the following.
356 *
357 * If any part of the reset goes wrong, we return 1 and that causes the
358 * whole chip to be reset.
359 */
360static int gem_rxmac_reset(struct gem *gp)
361{
362	struct net_device *dev = gp->dev;
363	int limit, i;
364	u64 desc_dma;
365	u32 val;
366
367	/* First, reset & disable MAC RX. */
368	writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
369	for (limit = 0; limit < 5000; limit++) {
370		if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
371			break;
372		udelay(10);
373	}
374	if (limit == 5000) {
375		netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
376		return 1;
377	}
378
379	writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
380	       gp->regs + MAC_RXCFG);
381	for (limit = 0; limit < 5000; limit++) {
382		if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
383			break;
384		udelay(10);
385	}
386	if (limit == 5000) {
387		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
388		return 1;
389	}
390
391	/* Second, disable RX DMA. */
392	writel(0, gp->regs + RXDMA_CFG);
393	for (limit = 0; limit < 5000; limit++) {
394		if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
395			break;
396		udelay(10);
397	}
398	if (limit == 5000) {
399		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
400		return 1;
401	}
402
403	mdelay(5);
404
405	/* Execute RX reset command. */
406	writel(gp->swrst_base | GREG_SWRST_RXRST,
407	       gp->regs + GREG_SWRST);
408	for (limit = 0; limit < 5000; limit++) {
409		if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
410			break;
411		udelay(10);
412	}
413	if (limit == 5000) {
414		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
415		return 1;
416	}
417
418	/* Refresh the RX ring. */
419	for (i = 0; i < RX_RING_SIZE; i++) {
420		struct gem_rxd *rxd = &gp->init_block->rxd[i];
421
422		if (gp->rx_skbs[i] == NULL) {
423			netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
424			return 1;
425		}
426
427		rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
428	}
429	gp->rx_new = gp->rx_old = 0;
430
431	/* Now we must reprogram the rest of RX unit. */
432	desc_dma = (u64) gp->gblock_dvma;
433	desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
434	writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
435	writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
436	writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
437	val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
438	       ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
439	writel(val, gp->regs + RXDMA_CFG);
440	if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
441		writel(((5 & RXDMA_BLANK_IPKTS) |
442			((8 << 12) & RXDMA_BLANK_ITIME)),
443		       gp->regs + RXDMA_BLANK);
444	else
445		writel(((5 & RXDMA_BLANK_IPKTS) |
446			((4 << 12) & RXDMA_BLANK_ITIME)),
447		       gp->regs + RXDMA_BLANK);
448	val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
449	val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
450	writel(val, gp->regs + RXDMA_PTHRESH);
451	val = readl(gp->regs + RXDMA_CFG);
452	writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
453	writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
454	val = readl(gp->regs + MAC_RXCFG);
455	writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
456
457	return 0;
458}
459
460static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
461{
462	u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
463	int ret = 0;
464
465	if (netif_msg_intr(gp))
466		printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
467			gp->dev->name, rxmac_stat);
468
469	if (rxmac_stat & MAC_RXSTAT_OFLW) {
470		u32 smac = readl(gp->regs + MAC_SMACHINE);
471
472		netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
473		dev->stats.rx_over_errors++;
474		dev->stats.rx_fifo_errors++;
475
476		ret = gem_rxmac_reset(gp);
477	}
478
479	if (rxmac_stat & MAC_RXSTAT_ACE)
480		dev->stats.rx_frame_errors += 0x10000;
481
482	if (rxmac_stat & MAC_RXSTAT_CCE)
483		dev->stats.rx_crc_errors += 0x10000;
484
485	if (rxmac_stat & MAC_RXSTAT_LCE)
486		dev->stats.rx_length_errors += 0x10000;
487
488	/* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
489	 * events.
490	 */
491	return ret;
492}
493
494static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
495{
496	u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
497
498	if (netif_msg_intr(gp))
499		printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
500			gp->dev->name, mac_cstat);
501
502	/* This interrupt is just for pause frame and pause
503	 * tracking.  It is useful for diagnostics and debug
504	 * but probably by default we will mask these events.
505	 */
506	if (mac_cstat & MAC_CSTAT_PS)
507		gp->pause_entered++;
508
509	if (mac_cstat & MAC_CSTAT_PRCV)
510		gp->pause_last_time_recvd = (mac_cstat >> 16);
511
512	return 0;
513}
514
515static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
516{
517	u32 mif_status = readl(gp->regs + MIF_STATUS);
518	u32 reg_val, changed_bits;
519
520	reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
521	changed_bits = (mif_status & MIF_STATUS_STAT);
522
523	gem_handle_mif_event(gp, reg_val, changed_bits);
524
525	return 0;
526}
527
528static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
529{
530	u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
531
532	if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
533	    gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
534		netdev_err(dev, "PCI error [%04x]", pci_estat);
535
536		if (pci_estat & GREG_PCIESTAT_BADACK)
537			pr_cont(" <No ACK64# during ABS64 cycle>");
538		if (pci_estat & GREG_PCIESTAT_DTRTO)
539			pr_cont(" <Delayed transaction timeout>");
540		if (pci_estat & GREG_PCIESTAT_OTHER)
541			pr_cont(" <other>");
542		pr_cont("\n");
543	} else {
544		pci_estat |= GREG_PCIESTAT_OTHER;
545		netdev_err(dev, "PCI error\n");
546	}
547
548	if (pci_estat & GREG_PCIESTAT_OTHER) {
549		u16 pci_cfg_stat;
550
551		/* Interrogate PCI config space for the
552		 * true cause.
553		 */
554		pci_read_config_word(gp->pdev, PCI_STATUS,
555				     &pci_cfg_stat);
556		netdev_err(dev, "Read PCI cfg space status [%04x]\n",
557			   pci_cfg_stat);
558		if (pci_cfg_stat & PCI_STATUS_PARITY)
559			netdev_err(dev, "PCI parity error detected\n");
560		if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
561			netdev_err(dev, "PCI target abort\n");
562		if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
563			netdev_err(dev, "PCI master acks target abort\n");
564		if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
565			netdev_err(dev, "PCI master abort\n");
566		if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
567			netdev_err(dev, "PCI system error SERR#\n");
568		if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
569			netdev_err(dev, "PCI parity error\n");
570
571		/* Write the error bits back to clear them. */
572		pci_cfg_stat &= (PCI_STATUS_PARITY |
573				 PCI_STATUS_SIG_TARGET_ABORT |
574				 PCI_STATUS_REC_TARGET_ABORT |
575				 PCI_STATUS_REC_MASTER_ABORT |
576				 PCI_STATUS_SIG_SYSTEM_ERROR |
577				 PCI_STATUS_DETECTED_PARITY);
578		pci_write_config_word(gp->pdev,
579				      PCI_STATUS, pci_cfg_stat);
580	}
581
582	/* For all PCI errors, we should reset the chip. */
583	return 1;
584}
585
586/* All non-normal interrupt conditions get serviced here.
587 * Returns non-zero if we should just exit the interrupt
588 * handler right now (ie. if we reset the card which invalidates
589 * all of the other original irq status bits).
590 */
591static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
592{
593	if (gem_status & GREG_STAT_RXNOBUF) {
594		/* Frame arrived, no free RX buffers available. */
595		if (netif_msg_rx_err(gp))
596			printk(KERN_DEBUG "%s: no buffer for rx frame\n",
597				gp->dev->name);
598		dev->stats.rx_dropped++;
599	}
600
601	if (gem_status & GREG_STAT_RXTAGERR) {
602		/* corrupt RX tag framing */
603		if (netif_msg_rx_err(gp))
604			printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
605				gp->dev->name);
606		dev->stats.rx_errors++;
607
608		return 1;
609	}
610
611	if (gem_status & GREG_STAT_PCS) {
612		if (gem_pcs_interrupt(dev, gp, gem_status))
613			return 1;
614	}
615
616	if (gem_status & GREG_STAT_TXMAC) {
617		if (gem_txmac_interrupt(dev, gp, gem_status))
618			return 1;
619	}
620
621	if (gem_status & GREG_STAT_RXMAC) {
622		if (gem_rxmac_interrupt(dev, gp, gem_status))
623			return 1;
624	}
625
626	if (gem_status & GREG_STAT_MAC) {
627		if (gem_mac_interrupt(dev, gp, gem_status))
628			return 1;
629	}
630
631	if (gem_status & GREG_STAT_MIF) {
632		if (gem_mif_interrupt(dev, gp, gem_status))
633			return 1;
634	}
635
636	if (gem_status & GREG_STAT_PCIERR) {
637		if (gem_pci_interrupt(dev, gp, gem_status))
638			return 1;
639	}
640
641	return 0;
642}
643
644static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
645{
646	int entry, limit;
647
648	entry = gp->tx_old;
649	limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
650	while (entry != limit) {
651		struct sk_buff *skb;
652		struct gem_txd *txd;
653		dma_addr_t dma_addr;
654		u32 dma_len;
655		int frag;
656
657		if (netif_msg_tx_done(gp))
658			printk(KERN_DEBUG "%s: tx done, slot %d\n",
659				gp->dev->name, entry);
660		skb = gp->tx_skbs[entry];
661		if (skb_shinfo(skb)->nr_frags) {
662			int last = entry + skb_shinfo(skb)->nr_frags;
663			int walk = entry;
664			int incomplete = 0;
665
666			last &= (TX_RING_SIZE - 1);
667			for (;;) {
668				walk = NEXT_TX(walk);
669				if (walk == limit)
670					incomplete = 1;
671				if (walk == last)
672					break;
673			}
674			if (incomplete)
675				break;
676		}
677		gp->tx_skbs[entry] = NULL;
678		dev->stats.tx_bytes += skb->len;
679
680		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
681			txd = &gp->init_block->txd[entry];
682
683			dma_addr = le64_to_cpu(txd->buffer);
684			dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
685
686			pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
687			entry = NEXT_TX(entry);
688		}
689
690		dev->stats.tx_packets++;
691		dev_consume_skb_any(skb);
692	}
693	gp->tx_old = entry;
694
695	/* Need to make the tx_old update visible to gem_start_xmit()
696	 * before checking for netif_queue_stopped().  Without the
697	 * memory barrier, there is a small possibility that gem_start_xmit()
698	 * will miss it and cause the queue to be stopped forever.
699	 */
700	smp_mb();
701
702	if (unlikely(netif_queue_stopped(dev) &&
703		     TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
704		struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
705
706		__netif_tx_lock(txq, smp_processor_id());
707		if (netif_queue_stopped(dev) &&
708		    TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
709			netif_wake_queue(dev);
710		__netif_tx_unlock(txq);
711	}
712}
713
714static __inline__ void gem_post_rxds(struct gem *gp, int limit)
715{
716	int cluster_start, curr, count, kick;
717
718	cluster_start = curr = (gp->rx_new & ~(4 - 1));
719	count = 0;
720	kick = -1;
721	wmb();
722	while (curr != limit) {
723		curr = NEXT_RX(curr);
724		if (++count == 4) {
725			struct gem_rxd *rxd =
726				&gp->init_block->rxd[cluster_start];
727			for (;;) {
728				rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
729				rxd++;
730				cluster_start = NEXT_RX(cluster_start);
731				if (cluster_start == curr)
732					break;
733			}
734			kick = curr;
735			count = 0;
736		}
737	}
738	if (kick >= 0) {
739		mb();
740		writel(kick, gp->regs + RXDMA_KICK);
741	}
742}
743
744#define ALIGNED_RX_SKB_ADDR(addr) \
745        ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
746static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
747						gfp_t gfp_flags)
748{
749	struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
750
751	if (likely(skb)) {
752		unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
753		skb_reserve(skb, offset);
754	}
755	return skb;
756}
757
758static int gem_rx(struct gem *gp, int work_to_do)
759{
760	struct net_device *dev = gp->dev;
761	int entry, drops, work_done = 0;
762	u32 done;
763	__sum16 csum;
764
765	if (netif_msg_rx_status(gp))
766		printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
767			gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
768
769	entry = gp->rx_new;
770	drops = 0;
771	done = readl(gp->regs + RXDMA_DONE);
772	for (;;) {
773		struct gem_rxd *rxd = &gp->init_block->rxd[entry];
774		struct sk_buff *skb;
775		u64 status = le64_to_cpu(rxd->status_word);
776		dma_addr_t dma_addr;
777		int len;
778
779		if ((status & RXDCTRL_OWN) != 0)
780			break;
781
782		if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
783			break;
784
785		/* When writing back RX descriptor, GEM writes status
786		 * then buffer address, possibly in separate transactions.
787		 * If we don't wait for the chip to write both, we could
788		 * post a new buffer to this descriptor then have GEM spam
789		 * on the buffer address.  We sync on the RX completion
790		 * register to prevent this from happening.
791		 */
792		if (entry == done) {
793			done = readl(gp->regs + RXDMA_DONE);
794			if (entry == done)
795				break;
796		}
797
798		/* We can now account for the work we're about to do */
799		work_done++;
800
801		skb = gp->rx_skbs[entry];
802
803		len = (status & RXDCTRL_BUFSZ) >> 16;
804		if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
805			dev->stats.rx_errors++;
806			if (len < ETH_ZLEN)
807				dev->stats.rx_length_errors++;
808			if (len & RXDCTRL_BAD)
809				dev->stats.rx_crc_errors++;
810
811			/* We'll just return it to GEM. */
812		drop_it:
813			dev->stats.rx_dropped++;
814			goto next;
815		}
816
817		dma_addr = le64_to_cpu(rxd->buffer);
818		if (len > RX_COPY_THRESHOLD) {
819			struct sk_buff *new_skb;
820
821			new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
822			if (new_skb == NULL) {
823				drops++;
824				goto drop_it;
825			}
826			pci_unmap_page(gp->pdev, dma_addr,
827				       RX_BUF_ALLOC_SIZE(gp),
828				       PCI_DMA_FROMDEVICE);
829			gp->rx_skbs[entry] = new_skb;
830			skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
831			rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
832							       virt_to_page(new_skb->data),
833							       offset_in_page(new_skb->data),
834							       RX_BUF_ALLOC_SIZE(gp),
835							       PCI_DMA_FROMDEVICE));
836			skb_reserve(new_skb, RX_OFFSET);
837
838			/* Trim the original skb for the netif. */
839			skb_trim(skb, len);
840		} else {
841			struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
842
843			if (copy_skb == NULL) {
844				drops++;
845				goto drop_it;
846			}
847
848			skb_reserve(copy_skb, 2);
849			skb_put(copy_skb, len);
850			pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
851			skb_copy_from_linear_data(skb, copy_skb->data, len);
852			pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
853
854			/* We'll reuse the original ring buffer. */
855			skb = copy_skb;
856		}
857
858		csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
859		skb->csum = csum_unfold(csum);
860		skb->ip_summed = CHECKSUM_COMPLETE;
861		skb->protocol = eth_type_trans(skb, gp->dev);
862
863		napi_gro_receive(&gp->napi, skb);
864
865		dev->stats.rx_packets++;
866		dev->stats.rx_bytes += len;
867
868	next:
869		entry = NEXT_RX(entry);
870	}
871
872	gem_post_rxds(gp, entry);
873
874	gp->rx_new = entry;
875
876	if (drops)
877		netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
878
879	return work_done;
880}
881
882static int gem_poll(struct napi_struct *napi, int budget)
883{
884	struct gem *gp = container_of(napi, struct gem, napi);
885	struct net_device *dev = gp->dev;
886	int work_done;
887
888	work_done = 0;
889	do {
890		/* Handle anomalies */
891		if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
892			struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
893			int reset;
894
895			/* We run the abnormal interrupt handling code with
896			 * the Tx lock. It only resets the Rx portion of the
897			 * chip, but we need to guard it against DMA being
898			 * restarted by the link poll timer
899			 */
900			__netif_tx_lock(txq, smp_processor_id());
901			reset = gem_abnormal_irq(dev, gp, gp->status);
902			__netif_tx_unlock(txq);
903			if (reset) {
904				gem_schedule_reset(gp);
905				napi_complete(napi);
906				return work_done;
907			}
908		}
909
910		/* Run TX completion thread */
911		gem_tx(dev, gp, gp->status);
912
913		/* Run RX thread. We don't use any locking here,
914		 * code willing to do bad things - like cleaning the
915		 * rx ring - must call napi_disable(), which
916		 * schedule_timeout()'s if polling is already disabled.
917		 */
918		work_done += gem_rx(gp, budget - work_done);
919
920		if (work_done >= budget)
921			return work_done;
922
923		gp->status = readl(gp->regs + GREG_STAT);
924	} while (gp->status & GREG_STAT_NAPI);
925
926	napi_complete(napi);
927	gem_enable_ints(gp);
928
929	return work_done;
930}
931
932static irqreturn_t gem_interrupt(int irq, void *dev_id)
933{
934	struct net_device *dev = dev_id;
935	struct gem *gp = netdev_priv(dev);
936
937	if (napi_schedule_prep(&gp->napi)) {
938		u32 gem_status = readl(gp->regs + GREG_STAT);
939
940		if (unlikely(gem_status == 0)) {
941			napi_enable(&gp->napi);
942			return IRQ_NONE;
943		}
944		if (netif_msg_intr(gp))
945			printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
946			       gp->dev->name, gem_status);
947
948		gp->status = gem_status;
949		gem_disable_ints(gp);
950		__napi_schedule(&gp->napi);
951	}
952
953	/* If polling was disabled at the time we received that
954	 * interrupt, we may return IRQ_HANDLED here while we
955	 * should return IRQ_NONE. No big deal...
956	 */
957	return IRQ_HANDLED;
958}
959
960#ifdef CONFIG_NET_POLL_CONTROLLER
961static void gem_poll_controller(struct net_device *dev)
962{
963	struct gem *gp = netdev_priv(dev);
964
965	disable_irq(gp->pdev->irq);
966	gem_interrupt(gp->pdev->irq, dev);
967	enable_irq(gp->pdev->irq);
968}
969#endif
970
971static void gem_tx_timeout(struct net_device *dev)
972{
973	struct gem *gp = netdev_priv(dev);
974
975	netdev_err(dev, "transmit timed out, resetting\n");
976
977	netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
978		   readl(gp->regs + TXDMA_CFG),
979		   readl(gp->regs + MAC_TXSTAT),
980		   readl(gp->regs + MAC_TXCFG));
981	netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
982		   readl(gp->regs + RXDMA_CFG),
983		   readl(gp->regs + MAC_RXSTAT),
984		   readl(gp->regs + MAC_RXCFG));
985
986	gem_schedule_reset(gp);
987}
988
989static __inline__ int gem_intme(int entry)
990{
991	/* Algorithm: IRQ every 1/2 of descriptors. */
992	if (!(entry & ((TX_RING_SIZE>>1)-1)))
993		return 1;
994
995	return 0;
996}
997
998static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
999				  struct net_device *dev)
1000{
1001	struct gem *gp = netdev_priv(dev);
1002	int entry;
1003	u64 ctrl;
1004
1005	ctrl = 0;
1006	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1007		const u64 csum_start_off = skb_checksum_start_offset(skb);
1008		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
1009
1010		ctrl = (TXDCTRL_CENAB |
1011			(csum_start_off << 15) |
1012			(csum_stuff_off << 21));
1013	}
1014
1015	if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1016		/* This is a hard error, log it. */
1017		if (!netif_queue_stopped(dev)) {
1018			netif_stop_queue(dev);
1019			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1020		}
1021		return NETDEV_TX_BUSY;
1022	}
1023
1024	entry = gp->tx_new;
1025	gp->tx_skbs[entry] = skb;
1026
1027	if (skb_shinfo(skb)->nr_frags == 0) {
1028		struct gem_txd *txd = &gp->init_block->txd[entry];
1029		dma_addr_t mapping;
1030		u32 len;
1031
1032		len = skb->len;
1033		mapping = pci_map_page(gp->pdev,
1034				       virt_to_page(skb->data),
1035				       offset_in_page(skb->data),
1036				       len, PCI_DMA_TODEVICE);
1037		ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
1038		if (gem_intme(entry))
1039			ctrl |= TXDCTRL_INTME;
1040		txd->buffer = cpu_to_le64(mapping);
1041		wmb();
1042		txd->control_word = cpu_to_le64(ctrl);
1043		entry = NEXT_TX(entry);
1044	} else {
1045		struct gem_txd *txd;
1046		u32 first_len;
1047		u64 intme;
1048		dma_addr_t first_mapping;
1049		int frag, first_entry = entry;
1050
1051		intme = 0;
1052		if (gem_intme(entry))
1053			intme |= TXDCTRL_INTME;
1054
1055		/* We must give this initial chunk to the device last.
1056		 * Otherwise we could race with the device.
1057		 */
1058		first_len = skb_headlen(skb);
1059		first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
1060					     offset_in_page(skb->data),
1061					     first_len, PCI_DMA_TODEVICE);
1062		entry = NEXT_TX(entry);
1063
1064		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
1065			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
1066			u32 len;
1067			dma_addr_t mapping;
1068			u64 this_ctrl;
1069
1070			len = skb_frag_size(this_frag);
1071			mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
1072						   0, len, DMA_TO_DEVICE);
1073			this_ctrl = ctrl;
1074			if (frag == skb_shinfo(skb)->nr_frags - 1)
1075				this_ctrl |= TXDCTRL_EOF;
1076
1077			txd = &gp->init_block->txd[entry];
1078			txd->buffer = cpu_to_le64(mapping);
1079			wmb();
1080			txd->control_word = cpu_to_le64(this_ctrl | len);
1081
1082			if (gem_intme(entry))
1083				intme |= TXDCTRL_INTME;
1084
1085			entry = NEXT_TX(entry);
1086		}
1087		txd = &gp->init_block->txd[first_entry];
1088		txd->buffer = cpu_to_le64(first_mapping);
1089		wmb();
1090		txd->control_word =
1091			cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
1092	}
1093
1094	gp->tx_new = entry;
1095	if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
1096		netif_stop_queue(dev);
1097
1098		/* netif_stop_queue() must be done before checking
1099		 * checking tx index in TX_BUFFS_AVAIL() below, because
1100		 * in gem_tx(), we update tx_old before checking for
1101		 * netif_queue_stopped().
1102		 */
1103		smp_mb();
1104		if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
1105			netif_wake_queue(dev);
1106	}
1107	if (netif_msg_tx_queued(gp))
1108		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
1109		       dev->name, entry, skb->len);
1110	mb();
1111	writel(gp->tx_new, gp->regs + TXDMA_KICK);
1112
1113	return NETDEV_TX_OK;
1114}
1115
1116static void gem_pcs_reset(struct gem *gp)
1117{
1118	int limit;
1119	u32 val;
1120
1121	/* Reset PCS unit. */
1122	val = readl(gp->regs + PCS_MIICTRL);
1123	val |= PCS_MIICTRL_RST;
1124	writel(val, gp->regs + PCS_MIICTRL);
1125
1126	limit = 32;
1127	while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1128		udelay(100);
1129		if (limit-- <= 0)
1130			break;
1131	}
1132	if (limit < 0)
1133		netdev_warn(gp->dev, "PCS reset bit would not clear\n");
1134}
1135
1136static void gem_pcs_reinit_adv(struct gem *gp)
1137{
1138	u32 val;
1139
1140	/* Make sure PCS is disabled while changing advertisement
1141	 * configuration.
1142	 */
1143	val = readl(gp->regs + PCS_CFG);
1144	val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1145	writel(val, gp->regs + PCS_CFG);
1146
1147	/* Advertise all capabilities except asymmetric
1148	 * pause.
1149	 */
1150	val = readl(gp->regs + PCS_MIIADV);
1151	val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1152		PCS_MIIADV_SP | PCS_MIIADV_AP);
1153	writel(val, gp->regs + PCS_MIIADV);
1154
1155	/* Enable and restart auto-negotiation, disable wrapback/loopback,
1156	 * and re-enable PCS.
1157	 */
1158	val = readl(gp->regs + PCS_MIICTRL);
1159	val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1160	val &= ~PCS_MIICTRL_WB;
1161	writel(val, gp->regs + PCS_MIICTRL);
1162
1163	val = readl(gp->regs + PCS_CFG);
1164	val |= PCS_CFG_ENABLE;
1165	writel(val, gp->regs + PCS_CFG);
1166
1167	/* Make sure serialink loopback is off.  The meaning
1168	 * of this bit is logically inverted based upon whether
1169	 * you are in Serialink or SERDES mode.
1170	 */
1171	val = readl(gp->regs + PCS_SCTRL);
1172	if (gp->phy_type == phy_serialink)
1173		val &= ~PCS_SCTRL_LOOP;
1174	else
1175		val |= PCS_SCTRL_LOOP;
1176	writel(val, gp->regs + PCS_SCTRL);
1177}
1178
1179#define STOP_TRIES 32
1180
1181static void gem_reset(struct gem *gp)
1182{
1183	int limit;
1184	u32 val;
1185
1186	/* Make sure we won't get any more interrupts */
1187	writel(0xffffffff, gp->regs + GREG_IMASK);
1188
1189	/* Reset the chip */
1190	writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
1191	       gp->regs + GREG_SWRST);
1192
1193	limit = STOP_TRIES;
1194
1195	do {
1196		udelay(20);
1197		val = readl(gp->regs + GREG_SWRST);
1198		if (limit-- <= 0)
1199			break;
1200	} while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1201
1202	if (limit < 0)
1203		netdev_err(gp->dev, "SW reset is ghetto\n");
1204
1205	if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
1206		gem_pcs_reinit_adv(gp);
1207}
1208
1209static void gem_start_dma(struct gem *gp)
1210{
1211	u32 val;
1212
1213	/* We are ready to rock, turn everything on. */
1214	val = readl(gp->regs + TXDMA_CFG);
1215	writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1216	val = readl(gp->regs + RXDMA_CFG);
1217	writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1218	val = readl(gp->regs + MAC_TXCFG);
1219	writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1220	val = readl(gp->regs + MAC_RXCFG);
1221	writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1222
1223	(void) readl(gp->regs + MAC_RXCFG);
1224	udelay(100);
1225
1226	gem_enable_ints(gp);
1227
1228	writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1229}
1230
1231/* DMA won't be actually stopped before about 4ms tho ...
1232 */
1233static void gem_stop_dma(struct gem *gp)
1234{
1235	u32 val;
1236
1237	/* We are done rocking, turn everything off. */
1238	val = readl(gp->regs + TXDMA_CFG);
1239	writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1240	val = readl(gp->regs + RXDMA_CFG);
1241	writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1242	val = readl(gp->regs + MAC_TXCFG);
1243	writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1244	val = readl(gp->regs + MAC_RXCFG);
1245	writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1246
1247	(void) readl(gp->regs + MAC_RXCFG);
1248
1249	/* Need to wait a bit ... done by the caller */
1250}
1251
1252
1253// XXX dbl check what that function should do when called on PCS PHY
1254static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1255{
1256	u32 advertise, features;
1257	int autoneg;
1258	int speed;
1259	int duplex;
1260
1261	if (gp->phy_type != phy_mii_mdio0 &&
1262     	    gp->phy_type != phy_mii_mdio1)
1263     	    	goto non_mii;
1264
1265	/* Setup advertise */
1266	if (found_mii_phy(gp))
1267		features = gp->phy_mii.def->features;
1268	else
1269		features = 0;
1270
1271	advertise = features & ADVERTISE_MASK;
1272	if (gp->phy_mii.advertising != 0)
1273		advertise &= gp->phy_mii.advertising;
1274
1275	autoneg = gp->want_autoneg;
1276	speed = gp->phy_mii.speed;
1277	duplex = gp->phy_mii.duplex;
1278
1279	/* Setup link parameters */
1280	if (!ep)
1281		goto start_aneg;
1282	if (ep->autoneg == AUTONEG_ENABLE) {
1283		advertise = ep->advertising;
1284		autoneg = 1;
1285	} else {
1286		autoneg = 0;
1287		speed = ethtool_cmd_speed(ep);
1288		duplex = ep->duplex;
1289	}
1290
1291start_aneg:
1292	/* Sanitize settings based on PHY capabilities */
1293	if ((features & SUPPORTED_Autoneg) == 0)
1294		autoneg = 0;
1295	if (speed == SPEED_1000 &&
1296	    !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
1297		speed = SPEED_100;
1298	if (speed == SPEED_100 &&
1299	    !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
1300		speed = SPEED_10;
1301	if (duplex == DUPLEX_FULL &&
1302	    !(features & (SUPPORTED_1000baseT_Full |
1303	    		  SUPPORTED_100baseT_Full |
1304	    		  SUPPORTED_10baseT_Full)))
1305	    	duplex = DUPLEX_HALF;
1306	if (speed == 0)
1307		speed = SPEED_10;
1308
1309	/* If we are asleep, we don't try to actually setup the PHY, we
1310	 * just store the settings
1311	 */
1312	if (!netif_device_present(gp->dev)) {
1313		gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
1314		gp->phy_mii.speed = speed;
1315		gp->phy_mii.duplex = duplex;
1316		return;
1317	}
1318
1319	/* Configure PHY & start aneg */
1320	gp->want_autoneg = autoneg;
1321	if (autoneg) {
1322		if (found_mii_phy(gp))
1323			gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
1324		gp->lstate = link_aneg;
1325	} else {
1326		if (found_mii_phy(gp))
1327			gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
1328		gp->lstate = link_force_ok;
1329	}
1330
1331non_mii:
1332	gp->timer_ticks = 0;
1333	mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1334}
1335
1336/* A link-up condition has occurred, initialize and enable the
1337 * rest of the chip.
1338 */
1339static int gem_set_link_modes(struct gem *gp)
1340{
1341	struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
1342	int full_duplex, speed, pause;
1343	u32 val;
1344
1345	full_duplex = 0;
1346	speed = SPEED_10;
1347	pause = 0;
1348
1349	if (found_mii_phy(gp)) {
1350	    	if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
1351	    		return 1;
1352		full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
1353		speed = gp->phy_mii.speed;
1354		pause = gp->phy_mii.pause;
1355	} else if (gp->phy_type == phy_serialink ||
1356	    	   gp->phy_type == phy_serdes) {
1357		u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1358
1359		if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
1360			full_duplex = 1;
1361		speed = SPEED_1000;
1362	}
1363
1364	netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
1365		   speed, (full_duplex ? "full" : "half"));
1366
1367
1368	/* We take the tx queue lock to avoid collisions between
1369	 * this code, the tx path and the NAPI-driven error path
1370	 */
1371	__netif_tx_lock(txq, smp_processor_id());
1372
1373	val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
1374	if (full_duplex) {
1375		val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1376	} else {
1377		/* MAC_TXCFG_NBO must be zero. */
1378	}
1379	writel(val, gp->regs + MAC_TXCFG);
1380
1381	val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
1382	if (!full_duplex &&
1383	    (gp->phy_type == phy_mii_mdio0 ||
1384	     gp->phy_type == phy_mii_mdio1)) {
1385		val |= MAC_XIFCFG_DISE;
1386	} else if (full_duplex) {
1387		val |= MAC_XIFCFG_FLED;
1388	}
1389
1390	if (speed == SPEED_1000)
1391		val |= (MAC_XIFCFG_GMII);
1392
1393	writel(val, gp->regs + MAC_XIFCFG);
1394
1395	/* If gigabit and half-duplex, enable carrier extension
1396	 * mode.  Else, disable it.
1397	 */
1398	if (speed == SPEED_1000 && !full_duplex) {
1399		val = readl(gp->regs + MAC_TXCFG);
1400		writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1401
1402		val = readl(gp->regs + MAC_RXCFG);
1403		writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1404	} else {
1405		val = readl(gp->regs + MAC_TXCFG);
1406		writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1407
1408		val = readl(gp->regs + MAC_RXCFG);
1409		writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1410	}
1411
1412	if (gp->phy_type == phy_serialink ||
1413	    gp->phy_type == phy_serdes) {
1414 		u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1415
1416		if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
1417			pause = 1;
1418	}
1419
1420	if (!full_duplex)
1421		writel(512, gp->regs + MAC_STIME);
1422	else
1423		writel(64, gp->regs + MAC_STIME);
1424	val = readl(gp->regs + MAC_MCCFG);
1425	if (pause)
1426		val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1427	else
1428		val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1429	writel(val, gp->regs + MAC_MCCFG);
1430
1431	gem_start_dma(gp);
1432
1433	__netif_tx_unlock(txq);
1434
1435	if (netif_msg_link(gp)) {
1436		if (pause) {
1437			netdev_info(gp->dev,
1438				    "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
1439				    gp->rx_fifo_sz,
1440				    gp->rx_pause_off,
1441				    gp->rx_pause_on);
1442		} else {
1443			netdev_info(gp->dev, "Pause is disabled\n");
1444		}
1445	}
1446
1447	return 0;
1448}
1449
1450static int gem_mdio_link_not_up(struct gem *gp)
1451{
1452	switch (gp->lstate) {
1453	case link_force_ret:
1454		netif_info(gp, link, gp->dev,
1455			   "Autoneg failed again, keeping forced mode\n");
1456		gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1457			gp->last_forced_speed, DUPLEX_HALF);
1458		gp->timer_ticks = 5;
1459		gp->lstate = link_force_ok;
1460		return 0;
1461	case link_aneg:
1462		/* We try forced modes after a failed aneg only on PHYs that don't
1463		 * have "magic_aneg" bit set, which means they internally do the
1464		 * while forced-mode thingy. On these, we just restart aneg
1465		 */
1466		if (gp->phy_mii.def->magic_aneg)
1467			return 1;
1468		netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
1469		/* Try forced modes. */
1470		gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1471			DUPLEX_HALF);
1472		gp->timer_ticks = 5;
1473		gp->lstate = link_force_try;
1474		return 0;
1475	case link_force_try:
1476		/* Downgrade from 100 to 10 Mbps if necessary.
1477		 * If already at 10Mbps, warn user about the
1478		 * situation every 10 ticks.
1479		 */
1480		if (gp->phy_mii.speed == SPEED_100) {
1481			gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1482				DUPLEX_HALF);
1483			gp->timer_ticks = 5;
1484			netif_info(gp, link, gp->dev,
1485				   "switching to forced 10bt\n");
1486			return 0;
1487		} else
1488			return 1;
1489	default:
1490		return 0;
1491	}
1492}
1493
1494static void gem_link_timer(unsigned long data)
1495{
1496	struct gem *gp = (struct gem *) data;
1497	struct net_device *dev = gp->dev;
1498	int restart_aneg = 0;
1499
1500	/* There's no point doing anything if we're going to be reset */
1501	if (gp->reset_task_pending)
1502		return;
1503
1504	if (gp->phy_type == phy_serialink ||
1505	    gp->phy_type == phy_serdes) {
1506		u32 val = readl(gp->regs + PCS_MIISTAT);
1507
1508		if (!(val & PCS_MIISTAT_LS))
1509			val = readl(gp->regs + PCS_MIISTAT);
1510
1511		if ((val & PCS_MIISTAT_LS) != 0) {
1512			if (gp->lstate == link_up)
1513				goto restart;
1514
1515			gp->lstate = link_up;
1516			netif_carrier_on(dev);
1517			(void)gem_set_link_modes(gp);
1518		}
1519		goto restart;
1520	}
1521	if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
1522		/* Ok, here we got a link. If we had it due to a forced
1523		 * fallback, and we were configured for autoneg, we do
1524		 * retry a short autoneg pass. If you know your hub is
1525		 * broken, use ethtool ;)
1526		 */
1527		if (gp->lstate == link_force_try && gp->want_autoneg) {
1528			gp->lstate = link_force_ret;
1529			gp->last_forced_speed = gp->phy_mii.speed;
1530			gp->timer_ticks = 5;
1531			if (netif_msg_link(gp))
1532				netdev_info(dev,
1533					    "Got link after fallback, retrying autoneg once...\n");
1534			gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1535		} else if (gp->lstate != link_up) {
1536			gp->lstate = link_up;
1537			netif_carrier_on(dev);
1538			if (gem_set_link_modes(gp))
1539				restart_aneg = 1;
1540		}
1541	} else {
1542		/* If the link was previously up, we restart the
1543		 * whole process
1544		 */
1545		if (gp->lstate == link_up) {
1546			gp->lstate = link_down;
1547			netif_info(gp, link, dev, "Link down\n");
1548			netif_carrier_off(dev);
1549			gem_schedule_reset(gp);
1550			/* The reset task will restart the timer */
1551			return;
1552		} else if (++gp->timer_ticks > 10) {
1553			if (found_mii_phy(gp))
1554				restart_aneg = gem_mdio_link_not_up(gp);
1555			else
1556				restart_aneg = 1;
1557		}
1558	}
1559	if (restart_aneg) {
1560		gem_begin_auto_negotiation(gp, NULL);
1561		return;
1562	}
1563restart:
1564	mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1565}
1566
1567static void gem_clean_rings(struct gem *gp)
1568{
1569	struct gem_init_block *gb = gp->init_block;
1570	struct sk_buff *skb;
1571	int i;
1572	dma_addr_t dma_addr;
1573
1574	for (i = 0; i < RX_RING_SIZE; i++) {
1575		struct gem_rxd *rxd;
1576
1577		rxd = &gb->rxd[i];
1578		if (gp->rx_skbs[i] != NULL) {
1579			skb = gp->rx_skbs[i];
1580			dma_addr = le64_to_cpu(rxd->buffer);
1581			pci_unmap_page(gp->pdev, dma_addr,
1582				       RX_BUF_ALLOC_SIZE(gp),
1583				       PCI_DMA_FROMDEVICE);
1584			dev_kfree_skb_any(skb);
1585			gp->rx_skbs[i] = NULL;
1586		}
1587		rxd->status_word = 0;
1588		wmb();
1589		rxd->buffer = 0;
1590	}
1591
1592	for (i = 0; i < TX_RING_SIZE; i++) {
1593		if (gp->tx_skbs[i] != NULL) {
1594			struct gem_txd *txd;
1595			int frag;
1596
1597			skb = gp->tx_skbs[i];
1598			gp->tx_skbs[i] = NULL;
1599
1600			for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1601				int ent = i & (TX_RING_SIZE - 1);
1602
1603				txd = &gb->txd[ent];
1604				dma_addr = le64_to_cpu(txd->buffer);
1605				pci_unmap_page(gp->pdev, dma_addr,
1606					       le64_to_cpu(txd->control_word) &
1607					       TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
1608
1609				if (frag != skb_shinfo(skb)->nr_frags)
1610					i++;
1611			}
1612			dev_kfree_skb_any(skb);
1613		}
1614	}
1615}
1616
1617static void gem_init_rings(struct gem *gp)
1618{
1619	struct gem_init_block *gb = gp->init_block;
1620	struct net_device *dev = gp->dev;
1621	int i;
1622	dma_addr_t dma_addr;
1623
1624	gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1625
1626	gem_clean_rings(gp);
1627
1628	gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
1629			    (unsigned)VLAN_ETH_FRAME_LEN);
1630
1631	for (i = 0; i < RX_RING_SIZE; i++) {
1632		struct sk_buff *skb;
1633		struct gem_rxd *rxd = &gb->rxd[i];
1634
1635		skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
1636		if (!skb) {
1637			rxd->buffer = 0;
1638			rxd->status_word = 0;
1639			continue;
1640		}
1641
1642		gp->rx_skbs[i] = skb;
1643		skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
1644		dma_addr = pci_map_page(gp->pdev,
1645					virt_to_page(skb->data),
1646					offset_in_page(skb->data),
1647					RX_BUF_ALLOC_SIZE(gp),
1648					PCI_DMA_FROMDEVICE);
1649		rxd->buffer = cpu_to_le64(dma_addr);
1650		wmb();
1651		rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1652		skb_reserve(skb, RX_OFFSET);
1653	}
1654
1655	for (i = 0; i < TX_RING_SIZE; i++) {
1656		struct gem_txd *txd = &gb->txd[i];
1657
1658		txd->control_word = 0;
1659		wmb();
1660		txd->buffer = 0;
1661	}
1662	wmb();
1663}
1664
1665/* Init PHY interface and start link poll state machine */
1666static void gem_init_phy(struct gem *gp)
1667{
1668	u32 mifcfg;
1669
1670	/* Revert MIF CFG setting done on stop_phy */
1671	mifcfg = readl(gp->regs + MIF_CFG);
1672	mifcfg &= ~MIF_CFG_BBMODE;
1673	writel(mifcfg, gp->regs + MIF_CFG);
1674
1675	if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1676		int i;
1677
1678		/* Those delay sucks, the HW seem to love them though, I'll
1679		 * serisouly consider breaking some locks here to be able
1680		 * to schedule instead
1681		 */
1682		for (i = 0; i < 3; i++) {
1683#ifdef CONFIG_PPC_PMAC
1684			pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1685			msleep(20);
1686#endif
1687			/* Some PHYs used by apple have problem getting back to us,
1688			 * we do an additional reset here
1689			 */
1690			sungem_phy_write(gp, MII_BMCR, BMCR_RESET);
1691			msleep(20);
1692			if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
1693				break;
1694			if (i == 2)
1695				netdev_warn(gp->dev, "GMAC PHY not responding !\n");
1696		}
1697	}
1698
1699	if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1700	    gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1701		u32 val;
1702
1703		/* Init datapath mode register. */
1704		if (gp->phy_type == phy_mii_mdio0 ||
1705		    gp->phy_type == phy_mii_mdio1) {
1706			val = PCS_DMODE_MGM;
1707		} else if (gp->phy_type == phy_serialink) {
1708			val = PCS_DMODE_SM | PCS_DMODE_GMOE;
1709		} else {
1710			val = PCS_DMODE_ESM;
1711		}
1712
1713		writel(val, gp->regs + PCS_DMODE);
1714	}
1715
1716	if (gp->phy_type == phy_mii_mdio0 ||
1717	    gp->phy_type == phy_mii_mdio1) {
1718		/* Reset and detect MII PHY */
1719		sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
1720
1721		/* Init PHY */
1722		if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1723			gp->phy_mii.def->ops->init(&gp->phy_mii);
1724	} else {
1725		gem_pcs_reset(gp);
1726		gem_pcs_reinit_adv(gp);
1727	}
1728
1729	/* Default aneg parameters */
1730	gp->timer_ticks = 0;
1731	gp->lstate = link_down;
1732	netif_carrier_off(gp->dev);
1733
1734	/* Print things out */
1735	if (gp->phy_type == phy_mii_mdio0 ||
1736	    gp->phy_type == phy_mii_mdio1)
1737		netdev_info(gp->dev, "Found %s PHY\n",
1738			    gp->phy_mii.def ? gp->phy_mii.def->name : "no");
1739
1740	gem_begin_auto_negotiation(gp, NULL);
1741}
1742
1743static void gem_init_dma(struct gem *gp)
1744{
1745	u64 desc_dma = (u64) gp->gblock_dvma;
1746	u32 val;
1747
1748	val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
1749	writel(val, gp->regs + TXDMA_CFG);
1750
1751	writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1752	writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1753	desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
1754
1755	writel(0, gp->regs + TXDMA_KICK);
1756
1757	val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1758	       ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1759	writel(val, gp->regs + RXDMA_CFG);
1760
1761	writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1762	writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1763
1764	writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1765
1766	val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1767	val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1768	writel(val, gp->regs + RXDMA_PTHRESH);
1769
1770	if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1771		writel(((5 & RXDMA_BLANK_IPKTS) |
1772			((8 << 12) & RXDMA_BLANK_ITIME)),
1773		       gp->regs + RXDMA_BLANK);
1774	else
1775		writel(((5 & RXDMA_BLANK_IPKTS) |
1776			((4 << 12) & RXDMA_BLANK_ITIME)),
1777		       gp->regs + RXDMA_BLANK);
1778}
1779
1780static u32 gem_setup_multicast(struct gem *gp)
1781{
1782	u32 rxcfg = 0;
1783	int i;
1784
1785	if ((gp->dev->flags & IFF_ALLMULTI) ||
1786	    (netdev_mc_count(gp->dev) > 256)) {
1787	    	for (i=0; i<16; i++)
1788			writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1789		rxcfg |= MAC_RXCFG_HFE;
1790	} else if (gp->dev->flags & IFF_PROMISC) {
1791		rxcfg |= MAC_RXCFG_PROM;
1792	} else {
1793		u16 hash_table[16];
1794		u32 crc;
1795		struct netdev_hw_addr *ha;
1796		int i;
1797
1798		memset(hash_table, 0, sizeof(hash_table));
1799		netdev_for_each_mc_addr(ha, gp->dev) {
1800			crc = ether_crc_le(6, ha->addr);
1801			crc >>= 24;
1802			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
1803		}
1804	    	for (i=0; i<16; i++)
1805			writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1806		rxcfg |= MAC_RXCFG_HFE;
1807	}
1808
1809	return rxcfg;
1810}
1811
1812static void gem_init_mac(struct gem *gp)
1813{
1814	unsigned char *e = &gp->dev->dev_addr[0];
1815
1816	writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1817
1818	writel(0x00, gp->regs + MAC_IPG0);
1819	writel(0x08, gp->regs + MAC_IPG1);
1820	writel(0x04, gp->regs + MAC_IPG2);
1821	writel(0x40, gp->regs + MAC_STIME);
1822	writel(0x40, gp->regs + MAC_MINFSZ);
1823
1824	/* Ethernet payload + header + FCS + optional VLAN tag. */
1825	writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
1826
1827	writel(0x07, gp->regs + MAC_PASIZE);
1828	writel(0x04, gp->regs + MAC_JAMSIZE);
1829	writel(0x10, gp->regs + MAC_ATTLIM);
1830	writel(0x8808, gp->regs + MAC_MCTYPE);
1831
1832	writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1833
1834	writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1835	writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1836	writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1837
1838	writel(0, gp->regs + MAC_ADDR3);
1839	writel(0, gp->regs + MAC_ADDR4);
1840	writel(0, gp->regs + MAC_ADDR5);
1841
1842	writel(0x0001, gp->regs + MAC_ADDR6);
1843	writel(0xc200, gp->regs + MAC_ADDR7);
1844	writel(0x0180, gp->regs + MAC_ADDR8);
1845
1846	writel(0, gp->regs + MAC_AFILT0);
1847	writel(0, gp->regs + MAC_AFILT1);
1848	writel(0, gp->regs + MAC_AFILT2);
1849	writel(0, gp->regs + MAC_AF21MSK);
1850	writel(0, gp->regs + MAC_AF0MSK);
1851
1852	gp->mac_rx_cfg = gem_setup_multicast(gp);
1853#ifdef STRIP_FCS
1854	gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
1855#endif
1856	writel(0, gp->regs + MAC_NCOLL);
1857	writel(0, gp->regs + MAC_FASUCC);
1858	writel(0, gp->regs + MAC_ECOLL);
1859	writel(0, gp->regs + MAC_LCOLL);
1860	writel(0, gp->regs + MAC_DTIMER);
1861	writel(0, gp->regs + MAC_PATMPS);
1862	writel(0, gp->regs + MAC_RFCTR);
1863	writel(0, gp->regs + MAC_LERR);
1864	writel(0, gp->regs + MAC_AERR);
1865	writel(0, gp->regs + MAC_FCSERR);
1866	writel(0, gp->regs + MAC_RXCVERR);
1867
1868	/* Clear RX/TX/MAC/XIF config, we will set these up and enable
1869	 * them once a link is established.
1870	 */
1871	writel(0, gp->regs + MAC_TXCFG);
1872	writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
1873	writel(0, gp->regs + MAC_MCCFG);
1874	writel(0, gp->regs + MAC_XIFCFG);
1875
1876	/* Setup MAC interrupts.  We want to get all of the interesting
1877	 * counter expiration events, but we do not want to hear about
1878	 * normal rx/tx as the DMA engine tells us that.
1879	 */
1880	writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
1881	writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
1882
1883	/* Don't enable even the PAUSE interrupts for now, we
1884	 * make no use of those events other than to record them.
1885	 */
1886	writel(0xffffffff, gp->regs + MAC_MCMASK);
1887
1888	/* Don't enable GEM's WOL in normal operations
1889	 */
1890	if (gp->has_wol)
1891		writel(0, gp->regs + WOL_WAKECSR);
1892}
1893
1894static void gem_init_pause_thresholds(struct gem *gp)
1895{
1896       	u32 cfg;
1897
1898	/* Calculate pause thresholds.  Setting the OFF threshold to the
1899	 * full RX fifo size effectively disables PAUSE generation which
1900	 * is what we do for 10/100 only GEMs which have FIFOs too small
1901	 * to make real gains from PAUSE.
1902	 */
1903	if (gp->rx_fifo_sz <= (2 * 1024)) {
1904		gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
1905	} else {
1906		int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
1907		int off = (gp->rx_fifo_sz - (max_frame * 2));
1908		int on = off - max_frame;
1909
1910		gp->rx_pause_off = off;
1911		gp->rx_pause_on = on;
1912	}
1913
1914
1915	/* Configure the chip "burst" DMA mode & enable some
1916	 * HW bug fixes on Apple version
1917	 */
1918       	cfg  = 0;
1919       	if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
1920		cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
1921#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1922       	cfg |= GREG_CFG_IBURST;
1923#endif
1924       	cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
1925       	cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
1926       	writel(cfg, gp->regs + GREG_CFG);
1927
1928	/* If Infinite Burst didn't stick, then use different
1929	 * thresholds (and Apple bug fixes don't exist)
1930	 */
1931	if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
1932		cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
1933		cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
1934		writel(cfg, gp->regs + GREG_CFG);
1935	}
1936}
1937
1938static int gem_check_invariants(struct gem *gp)
1939{
1940	struct pci_dev *pdev = gp->pdev;
1941	u32 mif_cfg;
1942
1943	/* On Apple's sungem, we can't rely on registers as the chip
1944	 * was been powered down by the firmware. The PHY is looked
1945	 * up later on.
1946	 */
1947	if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
1948		gp->phy_type = phy_mii_mdio0;
1949		gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
1950		gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
1951		gp->swrst_base = 0;
1952
1953		mif_cfg = readl(gp->regs + MIF_CFG);
1954		mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
1955		mif_cfg |= MIF_CFG_MDI0;
1956		writel(mif_cfg, gp->regs + MIF_CFG);
1957		writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
1958		writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
1959
1960		/* We hard-code the PHY address so we can properly bring it out of
1961		 * reset later on, we can't really probe it at this point, though
1962		 * that isn't an issue.
1963		 */
1964		if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
1965			gp->mii_phy_addr = 1;
1966		else
1967			gp->mii_phy_addr = 0;
1968
1969		return 0;
1970	}
1971
1972	mif_cfg = readl(gp->regs + MIF_CFG);
1973
1974	if (pdev->vendor == PCI_VENDOR_ID_SUN &&
1975	    pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
1976		/* One of the MII PHYs _must_ be present
1977		 * as this chip has no gigabit PHY.
1978		 */
1979		if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
1980			pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
1981			       mif_cfg);
1982			return -1;
1983		}
1984	}
1985
1986	/* Determine initial PHY interface type guess.  MDIO1 is the
1987	 * external PHY and thus takes precedence over MDIO0.
1988	 */
1989
1990	if (mif_cfg & MIF_CFG_MDI1) {
1991		gp->phy_type = phy_mii_mdio1;
1992		mif_cfg |= MIF_CFG_PSELECT;
1993		writel(mif_cfg, gp->regs + MIF_CFG);
1994	} else if (mif_cfg & MIF_CFG_MDI0) {
1995		gp->phy_type = phy_mii_mdio0;
1996		mif_cfg &= ~MIF_CFG_PSELECT;
1997		writel(mif_cfg, gp->regs + MIF_CFG);
1998	} else {
1999#ifdef CONFIG_SPARC
2000		const char *p;
2001
2002		p = of_get_property(gp->of_node, "shared-pins", NULL);
2003		if (p && !strcmp(p, "serdes"))
2004			gp->phy_type = phy_serdes;
2005		else
2006#endif
2007			gp->phy_type = phy_serialink;
2008	}
2009	if (gp->phy_type == phy_mii_mdio1 ||
2010	    gp->phy_type == phy_mii_mdio0) {
2011		int i;
2012
2013		for (i = 0; i < 32; i++) {
2014			gp->mii_phy_addr = i;
2015			if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
2016				break;
2017		}
2018		if (i == 32) {
2019			if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
2020				pr_err("RIO MII phy will not respond\n");
2021				return -1;
2022			}
2023			gp->phy_type = phy_serdes;
2024		}
2025	}
2026
2027	/* Fetch the FIFO configurations now too. */
2028	gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
2029	gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
2030
2031	if (pdev->vendor == PCI_VENDOR_ID_SUN) {
2032		if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
2033			if (gp->tx_fifo_sz != (9 * 1024) ||
2034			    gp->rx_fifo_sz != (20 * 1024)) {
2035				pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2036				       gp->tx_fifo_sz, gp->rx_fifo_sz);
2037				return -1;
2038			}
2039			gp->swrst_base = 0;
2040		} else {
2041			if (gp->tx_fifo_sz != (2 * 1024) ||
2042			    gp->rx_fifo_sz != (2 * 1024)) {
2043				pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
2044				       gp->tx_fifo_sz, gp->rx_fifo_sz);
2045				return -1;
2046			}
2047			gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
2048		}
2049	}
2050
2051	return 0;
2052}
2053
2054static void gem_reinit_chip(struct gem *gp)
2055{
2056	/* Reset the chip */
2057	gem_reset(gp);
2058
2059	/* Make sure ints are disabled */
2060	gem_disable_ints(gp);
2061
2062	/* Allocate & setup ring buffers */
2063	gem_init_rings(gp);
2064
2065	/* Configure pause thresholds */
2066	gem_init_pause_thresholds(gp);
2067
2068	/* Init DMA & MAC engines */
2069	gem_init_dma(gp);
2070	gem_init_mac(gp);
2071}
2072
2073
2074static void gem_stop_phy(struct gem *gp, int wol)
2075{
2076	u32 mifcfg;
2077
2078	/* Let the chip settle down a bit, it seems that helps
2079	 * for sleep mode on some models
2080	 */
2081	msleep(10);
2082
2083	/* Make sure we aren't polling PHY status change. We
2084	 * don't currently use that feature though
2085	 */
2086	mifcfg = readl(gp->regs + MIF_CFG);
2087	mifcfg &= ~MIF_CFG_POLL;
2088	writel(mifcfg, gp->regs + MIF_CFG);
2089
2090	if (wol && gp->has_wol) {
2091		unsigned char *e = &gp->dev->dev_addr[0];
2092		u32 csr;
2093
2094		/* Setup wake-on-lan for MAGIC packet */
2095		writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
2096		       gp->regs + MAC_RXCFG);
2097		writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
2098		writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
2099		writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
2100
2101		writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
2102		csr = WOL_WAKECSR_ENABLE;
2103		if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
2104			csr |= WOL_WAKECSR_MII;
2105		writel(csr, gp->regs + WOL_WAKECSR);
2106	} else {
2107		writel(0, gp->regs + MAC_RXCFG);
2108		(void)readl(gp->regs + MAC_RXCFG);
2109		/* Machine sleep will die in strange ways if we
2110		 * dont wait a bit here, looks like the chip takes
2111		 * some time to really shut down
2112		 */
2113		msleep(10);
2114	}
2115
2116	writel(0, gp->regs + MAC_TXCFG);
2117	writel(0, gp->regs + MAC_XIFCFG);
2118	writel(0, gp->regs + TXDMA_CFG);
2119	writel(0, gp->regs + RXDMA_CFG);
2120
2121	if (!wol) {
2122		gem_reset(gp);
2123		writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
2124		writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
2125
2126		if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
2127			gp->phy_mii.def->ops->suspend(&gp->phy_mii);
2128
2129		/* According to Apple, we must set the MDIO pins to this begnign
2130		 * state or we may 1) eat more current, 2) damage some PHYs
2131		 */
2132		writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
2133		writel(0, gp->regs + MIF_BBCLK);
2134		writel(0, gp->regs + MIF_BBDATA);
2135		writel(0, gp->regs + MIF_BBOENAB);
2136		writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
2137		(void) readl(gp->regs + MAC_XIFCFG);
2138	}
2139}
2140
2141static int gem_do_start(struct net_device *dev)
2142{
2143	struct gem *gp = netdev_priv(dev);
2144	int rc;
2145
2146	/* Enable the cell */
2147	gem_get_cell(gp);
2148
2149	/* Make sure PCI access and bus master are enabled */
2150	rc = pci_enable_device(gp->pdev);
2151	if (rc) {
2152		netdev_err(dev, "Failed to enable chip on PCI bus !\n");
2153
2154		/* Put cell and forget it for now, it will be considered as
2155		 * still asleep, a new sleep cycle may bring it back
2156		 */
2157		gem_put_cell(gp);
2158		return -ENXIO;
2159	}
2160	pci_set_master(gp->pdev);
2161
2162	/* Init & setup chip hardware */
2163	gem_reinit_chip(gp);
2164
2165	/* An interrupt might come in handy */
2166	rc = request_irq(gp->pdev->irq, gem_interrupt,
2167			 IRQF_SHARED, dev->name, (void *)dev);
2168	if (rc) {
2169		netdev_err(dev, "failed to request irq !\n");
2170
2171		gem_reset(gp);
2172		gem_clean_rings(gp);
2173		gem_put_cell(gp);
2174		return rc;
2175	}
2176
2177	/* Mark us as attached again if we come from resume(), this has
2178	 * no effect if we weren't detatched and needs to be done now.
2179	 */
2180	netif_device_attach(dev);
2181
2182	/* Restart NAPI & queues */
2183	gem_netif_start(gp);
2184
2185	/* Detect & init PHY, start autoneg etc... this will
2186	 * eventually result in starting DMA operations when
2187	 * the link is up
2188	 */
2189	gem_init_phy(gp);
2190
2191	return 0;
2192}
2193
2194static void gem_do_stop(struct net_device *dev, int wol)
2195{
2196	struct gem *gp = netdev_priv(dev);
2197
2198	/* Stop NAPI and stop tx queue */
2199	gem_netif_stop(gp);
2200
2201	/* Make sure ints are disabled. We don't care about
2202	 * synchronizing as NAPI is disabled, thus a stray
2203	 * interrupt will do nothing bad (our irq handler
2204	 * just schedules NAPI)
2205	 */
2206	gem_disable_ints(gp);
2207
2208	/* Stop the link timer */
2209	del_timer_sync(&gp->link_timer);
2210
2211	/* We cannot cancel the reset task while holding the
2212	 * rtnl lock, we'd get an A->B / B->A deadlock stituation
2213	 * if we did. This is not an issue however as the reset
2214	 * task is synchronized vs. us (rtnl_lock) and will do
2215	 * nothing if the device is down or suspended. We do
2216	 * still clear reset_task_pending to avoid a spurrious
2217	 * reset later on in case we do resume before it gets
2218	 * scheduled.
2219	 */
2220	gp->reset_task_pending = 0;
2221
2222	/* If we are going to sleep with WOL */
2223	gem_stop_dma(gp);
2224	msleep(10);
2225	if (!wol)
2226		gem_reset(gp);
2227	msleep(10);
2228
2229	/* Get rid of rings */
2230	gem_clean_rings(gp);
2231
2232	/* No irq needed anymore */
2233	free_irq(gp->pdev->irq, (void *) dev);
2234
2235	/* Shut the PHY down eventually and setup WOL */
2236	gem_stop_phy(gp, wol);
2237
2238	/* Make sure bus master is disabled */
2239	pci_disable_device(gp->pdev);
2240
2241	/* Cell not needed neither if no WOL */
2242	if (!wol)
2243		gem_put_cell(gp);
2244}
2245
2246static void gem_reset_task(struct work_struct *work)
2247{
2248	struct gem *gp = container_of(work, struct gem, reset_task);
2249
2250	/* Lock out the network stack (essentially shield ourselves
2251	 * against a racing open, close, control call, or suspend
2252	 */
2253	rtnl_lock();
2254
2255	/* Skip the reset task if suspended or closed, or if it's
2256	 * been cancelled by gem_do_stop (see comment there)
2257	 */
2258	if (!netif_device_present(gp->dev) ||
2259	    !netif_running(gp->dev) ||
2260	    !gp->reset_task_pending) {
2261		rtnl_unlock();
2262		return;
2263	}
2264
2265	/* Stop the link timer */
2266	del_timer_sync(&gp->link_timer);
2267
2268	/* Stop NAPI and tx */
2269	gem_netif_stop(gp);
2270
2271	/* Reset the chip & rings */
2272	gem_reinit_chip(gp);
2273	if (gp->lstate == link_up)
2274		gem_set_link_modes(gp);
2275
2276	/* Restart NAPI and Tx */
2277	gem_netif_start(gp);
2278
2279	/* We are back ! */
2280	gp->reset_task_pending = 0;
2281
2282	/* If the link is not up, restart autoneg, else restart the
2283	 * polling timer
2284	 */
2285	if (gp->lstate != link_up)
2286		gem_begin_auto_negotiation(gp, NULL);
2287	else
2288		mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
2289
2290	rtnl_unlock();
2291}
2292
2293static int gem_open(struct net_device *dev)
2294{
2295	/* We allow open while suspended, we just do nothing,
2296	 * the chip will be initialized in resume()
2297	 */
2298	if (netif_device_present(dev))
2299		return gem_do_start(dev);
2300	return 0;
2301}
2302
2303static int gem_close(struct net_device *dev)
2304{
2305	if (netif_device_present(dev))
2306		gem_do_stop(dev, 0);
2307
2308	return 0;
2309}
2310
2311#ifdef CONFIG_PM
2312static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2313{
2314	struct net_device *dev = pci_get_drvdata(pdev);
2315	struct gem *gp = netdev_priv(dev);
2316
2317	/* Lock the network stack first to avoid racing with open/close,
2318	 * reset task and setting calls
2319	 */
2320	rtnl_lock();
2321
2322	/* Not running, mark ourselves non-present, no need for
2323	 * a lock here
2324	 */
2325	if (!netif_running(dev)) {
2326		netif_device_detach(dev);
2327		rtnl_unlock();
2328		return 0;
2329	}
2330	netdev_info(dev, "suspending, WakeOnLan %s\n",
2331		    (gp->wake_on_lan && netif_running(dev)) ?
2332		    "enabled" : "disabled");
2333
2334	/* Tell the network stack we're gone. gem_do_stop() below will
2335	 * synchronize with TX, stop NAPI etc...
2336	 */
2337	netif_device_detach(dev);
2338
2339	/* Switch off chip, remember WOL setting */
2340	gp->asleep_wol = !!gp->wake_on_lan;
2341	gem_do_stop(dev, gp->asleep_wol);
2342
2343	/* Unlock the network stack */
2344	rtnl_unlock();
2345
2346	return 0;
2347}
2348
2349static int gem_resume(struct pci_dev *pdev)
2350{
2351	struct net_device *dev = pci_get_drvdata(pdev);
2352	struct gem *gp = netdev_priv(dev);
2353
2354	/* See locking comment in gem_suspend */
2355	rtnl_lock();
2356
2357	/* Not running, mark ourselves present, no need for
2358	 * a lock here
2359	 */
2360	if (!netif_running(dev)) {
2361		netif_device_attach(dev);
2362		rtnl_unlock();
2363		return 0;
2364	}
2365
2366	/* Restart chip. If that fails there isn't much we can do, we
2367	 * leave things stopped.
2368	 */
2369	gem_do_start(dev);
2370
2371	/* If we had WOL enabled, the cell clock was never turned off during
2372	 * sleep, so we end up beeing unbalanced. Fix that here
2373	 */
2374	if (gp->asleep_wol)
2375		gem_put_cell(gp);
2376
2377	/* Unlock the network stack */
2378	rtnl_unlock();
2379
2380	return 0;
2381}
2382#endif /* CONFIG_PM */
2383
2384static struct net_device_stats *gem_get_stats(struct net_device *dev)
2385{
2386	struct gem *gp = netdev_priv(dev);
2387
2388	/* I have seen this being called while the PM was in progress,
2389	 * so we shield against this. Let's also not poke at registers
2390	 * while the reset task is going on.
2391	 *
2392	 * TODO: Move stats collection elsewhere (link timer ?) and
2393	 * make this a nop to avoid all those synchro issues
2394	 */
2395	if (!netif_device_present(dev) || !netif_running(dev))
2396		goto bail;
2397
2398	/* Better safe than sorry... */
2399	if (WARN_ON(!gp->cell_enabled))
2400		goto bail;
2401
2402	dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2403	writel(0, gp->regs + MAC_FCSERR);
2404
2405	dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
2406	writel(0, gp->regs + MAC_AERR);
2407
2408	dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
2409	writel(0, gp->regs + MAC_LERR);
2410
2411	dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2412	dev->stats.collisions +=
2413		(readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
2414	writel(0, gp->regs + MAC_ECOLL);
2415	writel(0, gp->regs + MAC_LCOLL);
2416 bail:
2417	return &dev->stats;
2418}
2419
2420static int gem_set_mac_address(struct net_device *dev, void *addr)
2421{
2422	struct sockaddr *macaddr = (struct sockaddr *) addr;
2423	struct gem *gp = netdev_priv(dev);
2424	unsigned char *e = &dev->dev_addr[0];
2425
2426	if (!is_valid_ether_addr(macaddr->sa_data))
2427		return -EADDRNOTAVAIL;
2428
2429	memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
2430
2431	/* We'll just catch it later when the device is up'd or resumed */
2432	if (!netif_running(dev) || !netif_device_present(dev))
2433		return 0;
2434
2435	/* Better safe than sorry... */
2436	if (WARN_ON(!gp->cell_enabled))
2437		return 0;
2438
2439	writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
2440	writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
2441	writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
2442
2443	return 0;
2444}
2445
2446static void gem_set_multicast(struct net_device *dev)
2447{
2448	struct gem *gp = netdev_priv(dev);
2449	u32 rxcfg, rxcfg_new;
2450	int limit = 10000;
2451
2452	if (!netif_running(dev) || !netif_device_present(dev))
2453		return;
2454
2455	/* Better safe than sorry... */
2456	if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
2457		return;
2458
2459	rxcfg = readl(gp->regs + MAC_RXCFG);
2460	rxcfg_new = gem_setup_multicast(gp);
2461#ifdef STRIP_FCS
2462	rxcfg_new |= MAC_RXCFG_SFCS;
2463#endif
2464	gp->mac_rx_cfg = rxcfg_new;
2465
2466	writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2467	while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2468		if (!limit--)
2469			break;
2470		udelay(10);
2471	}
2472
2473	rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
2474	rxcfg |= rxcfg_new;
2475
2476	writel(rxcfg, gp->regs + MAC_RXCFG);
2477}
2478
2479/* Jumbo-grams don't seem to work :-( */
2480#define GEM_MIN_MTU	68
2481#if 1
2482#define GEM_MAX_MTU	1500
2483#else
2484#define GEM_MAX_MTU	9000
2485#endif
2486
2487static int gem_change_mtu(struct net_device *dev, int new_mtu)
2488{
2489	struct gem *gp = netdev_priv(dev);
2490
2491	if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
2492		return -EINVAL;
2493
2494	dev->mtu = new_mtu;
2495
2496	/* We'll just catch it later when the device is up'd or resumed */
2497	if (!netif_running(dev) || !netif_device_present(dev))
2498		return 0;
2499
2500	/* Better safe than sorry... */
2501	if (WARN_ON(!gp->cell_enabled))
2502		return 0;
2503
2504	gem_netif_stop(gp);
2505	gem_reinit_chip(gp);
2506	if (gp->lstate == link_up)
2507		gem_set_link_modes(gp);
2508	gem_netif_start(gp);
2509
2510	return 0;
2511}
2512
2513static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2514{
2515	struct gem *gp = netdev_priv(dev);
2516
2517	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2518	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2519	strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
2520}
2521
2522static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2523{
2524	struct gem *gp = netdev_priv(dev);
2525
2526	if (gp->phy_type == phy_mii_mdio0 ||
2527	    gp->phy_type == phy_mii_mdio1) {
2528		if (gp->phy_mii.def)
2529			cmd->supported = gp->phy_mii.def->features;
2530		else
2531			cmd->supported = (SUPPORTED_10baseT_Half |
2532					  SUPPORTED_10baseT_Full);
2533
2534		/* XXX hardcoded stuff for now */
2535		cmd->port = PORT_MII;
2536		cmd->transceiver = XCVR_EXTERNAL;
2537		cmd->phy_address = 0; /* XXX fixed PHYAD */
2538
2539		/* Return current PHY settings */
2540		cmd->autoneg = gp->want_autoneg;
2541		ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
2542		cmd->duplex = gp->phy_mii.duplex;
2543		cmd->advertising = gp->phy_mii.advertising;
2544
2545		/* If we started with a forced mode, we don't have a default
2546		 * advertise set, we need to return something sensible so
2547		 * userland can re-enable autoneg properly.
2548		 */
2549		if (cmd->advertising == 0)
2550			cmd->advertising = cmd->supported;
2551	} else { // XXX PCS ?
2552		cmd->supported =
2553			(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2554			 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2555			 SUPPORTED_Autoneg);
2556		cmd->advertising = cmd->supported;
2557		ethtool_cmd_speed_set(cmd, 0);
2558		cmd->duplex = cmd->port = cmd->phy_address =
2559			cmd->transceiver = cmd->autoneg = 0;
2560
2561		/* serdes means usually a Fibre connector, with most fixed */
2562		if (gp->phy_type == phy_serdes) {
2563			cmd->port = PORT_FIBRE;
2564			cmd->supported = (SUPPORTED_1000baseT_Half |
2565				SUPPORTED_1000baseT_Full |
2566				SUPPORTED_FIBRE | SUPPORTED_Autoneg |
2567				SUPPORTED_Pause | SUPPORTED_Asym_Pause);
2568			cmd->advertising = cmd->supported;
2569			cmd->transceiver = XCVR_INTERNAL;
2570			if (gp->lstate == link_up)
2571				ethtool_cmd_speed_set(cmd, SPEED_1000);
2572			cmd->duplex = DUPLEX_FULL;
2573			cmd->autoneg = 1;
2574		}
2575	}
2576	cmd->maxtxpkt = cmd->maxrxpkt = 0;
2577
2578	return 0;
2579}
2580
2581static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2582{
2583	struct gem *gp = netdev_priv(dev);
2584	u32 speed = ethtool_cmd_speed(cmd);
2585
2586	/* Verify the settings we care about. */
2587	if (cmd->autoneg != AUTONEG_ENABLE &&
2588	    cmd->autoneg != AUTONEG_DISABLE)
2589		return -EINVAL;
2590
2591	if (cmd->autoneg == AUTONEG_ENABLE &&
2592	    cmd->advertising == 0)
2593		return -EINVAL;
2594
2595	if (cmd->autoneg == AUTONEG_DISABLE &&
2596	    ((speed != SPEED_1000 &&
2597	      speed != SPEED_100 &&
2598	      speed != SPEED_10) ||
2599	     (cmd->duplex != DUPLEX_HALF &&
2600	      cmd->duplex != DUPLEX_FULL)))
2601		return -EINVAL;
2602
2603	/* Apply settings and restart link process. */
2604	if (netif_device_present(gp->dev)) {
2605		del_timer_sync(&gp->link_timer);
2606		gem_begin_auto_negotiation(gp, cmd);
2607	}
2608
2609	return 0;
2610}
2611
2612static int gem_nway_reset(struct net_device *dev)
2613{
2614	struct gem *gp = netdev_priv(dev);
2615
2616	if (!gp->want_autoneg)
2617		return -EINVAL;
2618
2619	/* Restart link process  */
2620	if (netif_device_present(gp->dev)) {
2621		del_timer_sync(&gp->link_timer);
2622		gem_begin_auto_negotiation(gp, NULL);
2623	}
2624
2625	return 0;
2626}
2627
2628static u32 gem_get_msglevel(struct net_device *dev)
2629{
2630	struct gem *gp = netdev_priv(dev);
2631	return gp->msg_enable;
2632}
2633
2634static void gem_set_msglevel(struct net_device *dev, u32 value)
2635{
2636	struct gem *gp = netdev_priv(dev);
2637	gp->msg_enable = value;
2638}
2639
2640
2641/* Add more when I understand how to program the chip */
2642/* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
2643
2644#define WOL_SUPPORTED_MASK	(WAKE_MAGIC)
2645
2646static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2647{
2648	struct gem *gp = netdev_priv(dev);
2649
2650	/* Add more when I understand how to program the chip */
2651	if (gp->has_wol) {
2652		wol->supported = WOL_SUPPORTED_MASK;
2653		wol->wolopts = gp->wake_on_lan;
2654	} else {
2655		wol->supported = 0;
2656		wol->wolopts = 0;
2657	}
2658}
2659
2660static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2661{
2662	struct gem *gp = netdev_priv(dev);
2663
2664	if (!gp->has_wol)
2665		return -EOPNOTSUPP;
2666	gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
2667	return 0;
2668}
2669
2670static const struct ethtool_ops gem_ethtool_ops = {
2671	.get_drvinfo		= gem_get_drvinfo,
2672	.get_link		= ethtool_op_get_link,
2673	.get_settings		= gem_get_settings,
2674	.set_settings		= gem_set_settings,
2675	.nway_reset		= gem_nway_reset,
2676	.get_msglevel		= gem_get_msglevel,
2677	.set_msglevel		= gem_set_msglevel,
2678	.get_wol		= gem_get_wol,
2679	.set_wol		= gem_set_wol,
2680};
2681
2682static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2683{
2684	struct gem *gp = netdev_priv(dev);
2685	struct mii_ioctl_data *data = if_mii(ifr);
2686	int rc = -EOPNOTSUPP;
2687
2688	/* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
2689	 * netif_device_present() is true and holds rtnl_lock for us
2690	 * so we have nothing to worry about
2691	 */
2692
2693	switch (cmd) {
2694	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
2695		data->phy_id = gp->mii_phy_addr;
2696		/* Fallthrough... */
2697
2698	case SIOCGMIIREG:		/* Read MII PHY register. */
2699		data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f,
2700					   data->reg_num & 0x1f);
2701		rc = 0;
2702		break;
2703
2704	case SIOCSMIIREG:		/* Write MII PHY register. */
2705		__sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
2706			    data->val_in);
2707		rc = 0;
2708		break;
2709	}
2710	return rc;
2711}
2712
2713#if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
2714/* Fetch MAC address from vital product data of PCI ROM. */
2715static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
2716{
2717	int this_offset;
2718
2719	for (this_offset = 0x20; this_offset < len; this_offset++) {
2720		void __iomem *p = rom_base + this_offset;
2721		int i;
2722
2723		if (readb(p + 0) != 0x90 ||
2724		    readb(p + 1) != 0x00 ||
2725		    readb(p + 2) != 0x09 ||
2726		    readb(p + 3) != 0x4e ||
2727		    readb(p + 4) != 0x41 ||
2728		    readb(p + 5) != 0x06)
2729			continue;
2730
2731		this_offset += 6;
2732		p += 6;
2733
2734		for (i = 0; i < 6; i++)
2735			dev_addr[i] = readb(p + i);
2736		return 1;
2737	}
2738	return 0;
2739}
2740
2741static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2742{
2743	size_t size;
2744	void __iomem *p = pci_map_rom(pdev, &size);
2745
2746	if (p) {
2747			int found;
2748
2749		found = readb(p) == 0x55 &&
2750			readb(p + 1) == 0xaa &&
2751			find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
2752		pci_unmap_rom(pdev, p);
2753		if (found)
2754			return;
2755	}
2756
2757	/* Sun MAC prefix then 3 random bytes. */
2758	dev_addr[0] = 0x08;
2759	dev_addr[1] = 0x00;
2760	dev_addr[2] = 0x20;
2761	get_random_bytes(dev_addr + 3, 3);
2762}
2763#endif /* not Sparc and not PPC */
2764
2765static int gem_get_device_address(struct gem *gp)
2766{
2767#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
2768	struct net_device *dev = gp->dev;
2769	const unsigned char *addr;
2770
2771	addr = of_get_property(gp->of_node, "local-mac-address", NULL);
2772	if (addr == NULL) {
2773#ifdef CONFIG_SPARC
2774		addr = idprom->id_ethaddr;
2775#else
2776		printk("\n");
2777		pr_err("%s: can't get mac-address\n", dev->name);
2778		return -1;
2779#endif
2780	}
2781	memcpy(dev->dev_addr, addr, ETH_ALEN);
2782#else
2783	get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2784#endif
2785	return 0;
2786}
2787
2788static void gem_remove_one(struct pci_dev *pdev)
2789{
2790	struct net_device *dev = pci_get_drvdata(pdev);
2791
2792	if (dev) {
2793		struct gem *gp = netdev_priv(dev);
2794
2795		unregister_netdev(dev);
2796
2797		/* Ensure reset task is truely gone */
2798		cancel_work_sync(&gp->reset_task);
2799
2800		/* Free resources */
2801		pci_free_consistent(pdev,
2802				    sizeof(struct gem_init_block),
2803				    gp->init_block,
2804				    gp->gblock_dvma);
2805		iounmap(gp->regs);
2806		pci_release_regions(pdev);
2807		free_netdev(dev);
2808	}
2809}
2810
2811static const struct net_device_ops gem_netdev_ops = {
2812	.ndo_open		= gem_open,
2813	.ndo_stop		= gem_close,
2814	.ndo_start_xmit		= gem_start_xmit,
2815	.ndo_get_stats		= gem_get_stats,
2816	.ndo_set_rx_mode	= gem_set_multicast,
2817	.ndo_do_ioctl		= gem_ioctl,
2818	.ndo_tx_timeout		= gem_tx_timeout,
2819	.ndo_change_mtu		= gem_change_mtu,
2820	.ndo_validate_addr	= eth_validate_addr,
2821	.ndo_set_mac_address    = gem_set_mac_address,
2822#ifdef CONFIG_NET_POLL_CONTROLLER
2823	.ndo_poll_controller    = gem_poll_controller,
2824#endif
2825};
2826
2827static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2828{
2829	unsigned long gemreg_base, gemreg_len;
2830	struct net_device *dev;
2831	struct gem *gp;
2832	int err, pci_using_dac;
2833
2834	printk_once(KERN_INFO "%s", version);
2835
2836	/* Apple gmac note: during probe, the chip is powered up by
2837	 * the arch code to allow the code below to work (and to let
2838	 * the chip be probed on the config space. It won't stay powered
2839	 * up until the interface is brought up however, so we can't rely
2840	 * on register configuration done at this point.
2841	 */
2842	err = pci_enable_device(pdev);
2843	if (err) {
2844		pr_err("Cannot enable MMIO operation, aborting\n");
2845		return err;
2846	}
2847	pci_set_master(pdev);
2848
2849	/* Configure DMA attributes. */
2850
2851	/* All of the GEM documentation states that 64-bit DMA addressing
2852	 * is fully supported and should work just fine.  However the
2853	 * front end for RIO based GEMs is different and only supports
2854	 * 32-bit addressing.
2855	 *
2856	 * For now we assume the various PPC GEMs are 32-bit only as well.
2857	 */
2858	if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2859	    pdev->device == PCI_DEVICE_ID_SUN_GEM &&
2860	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2861		pci_using_dac = 1;
2862	} else {
2863		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2864		if (err) {
2865			pr_err("No usable DMA configuration, aborting\n");
2866			goto err_disable_device;
2867		}
2868		pci_using_dac = 0;
2869	}
2870
2871	gemreg_base = pci_resource_start(pdev, 0);
2872	gemreg_len = pci_resource_len(pdev, 0);
2873
2874	if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
2875		pr_err("Cannot find proper PCI device base address, aborting\n");
2876		err = -ENODEV;
2877		goto err_disable_device;
2878	}
2879
2880	dev = alloc_etherdev(sizeof(*gp));
2881	if (!dev) {
2882		err = -ENOMEM;
2883		goto err_disable_device;
2884	}
2885	SET_NETDEV_DEV(dev, &pdev->dev);
2886
2887	gp = netdev_priv(dev);
2888
2889	err = pci_request_regions(pdev, DRV_NAME);
2890	if (err) {
2891		pr_err("Cannot obtain PCI resources, aborting\n");
2892		goto err_out_free_netdev;
2893	}
2894
2895	gp->pdev = pdev;
2896	gp->dev = dev;
2897
2898	gp->msg_enable = DEFAULT_MSG;
2899
2900	init_timer(&gp->link_timer);
2901	gp->link_timer.function = gem_link_timer;
2902	gp->link_timer.data = (unsigned long) gp;
2903
2904	INIT_WORK(&gp->reset_task, gem_reset_task);
2905
2906	gp->lstate = link_down;
2907	gp->timer_ticks = 0;
2908	netif_carrier_off(dev);
2909
2910	gp->regs = ioremap(gemreg_base, gemreg_len);
2911	if (!gp->regs) {
2912		pr_err("Cannot map device registers, aborting\n");
2913		err = -EIO;
2914		goto err_out_free_res;
2915	}
2916
2917	/* On Apple, we want a reference to the Open Firmware device-tree
2918	 * node. We use it for clock control.
2919	 */
2920#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
2921	gp->of_node = pci_device_to_OF_node(pdev);
2922#endif
2923
2924	/* Only Apple version supports WOL afaik */
2925	if (pdev->vendor == PCI_VENDOR_ID_APPLE)
2926		gp->has_wol = 1;
2927
2928	/* Make sure cell is enabled */
2929	gem_get_cell(gp);
2930
2931	/* Make sure everything is stopped and in init state */
2932	gem_reset(gp);
2933
2934	/* Fill up the mii_phy structure (even if we won't use it) */
2935	gp->phy_mii.dev = dev;
2936	gp->phy_mii.mdio_read = _sungem_phy_read;
2937	gp->phy_mii.mdio_write = _sungem_phy_write;
2938#ifdef CONFIG_PPC_PMAC
2939	gp->phy_mii.platform_data = gp->of_node;
2940#endif
2941	/* By default, we start with autoneg */
2942	gp->want_autoneg = 1;
2943
2944	/* Check fifo sizes, PHY type, etc... */
2945	if (gem_check_invariants(gp)) {
2946		err = -ENODEV;
2947		goto err_out_iounmap;
2948	}
2949
2950	/* It is guaranteed that the returned buffer will be at least
2951	 * PAGE_SIZE aligned.
2952	 */
2953	gp->init_block = (struct gem_init_block *)
2954		pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
2955				     &gp->gblock_dvma);
2956	if (!gp->init_block) {
2957		pr_err("Cannot allocate init block, aborting\n");
2958		err = -ENOMEM;
2959		goto err_out_iounmap;
2960	}
2961
2962	err = gem_get_device_address(gp);
2963	if (err)
2964		goto err_out_free_consistent;
2965
2966	dev->netdev_ops = &gem_netdev_ops;
2967	netif_napi_add(dev, &gp->napi, gem_poll, 64);
2968	dev->ethtool_ops = &gem_ethtool_ops;
2969	dev->watchdog_timeo = 5 * HZ;
2970	dev->dma = 0;
2971
2972	/* Set that now, in case PM kicks in now */
2973	pci_set_drvdata(pdev, dev);
2974
2975	/* We can do scatter/gather and HW checksum */
2976	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2977	dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2978	if (pci_using_dac)
2979		dev->features |= NETIF_F_HIGHDMA;
2980
2981	/* Register with kernel */
2982	if (register_netdev(dev)) {
2983		pr_err("Cannot register net device, aborting\n");
2984		err = -ENOMEM;
2985		goto err_out_free_consistent;
2986	}
2987
2988	/* Undo the get_cell with appropriate locking (we could use
2989	 * ndo_init/uninit but that would be even more clumsy imho)
2990	 */
2991	rtnl_lock();
2992	gem_put_cell(gp);
2993	rtnl_unlock();
2994
2995	netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
2996		    dev->dev_addr);
2997	return 0;
2998
2999err_out_free_consistent:
3000	gem_remove_one(pdev);
3001err_out_iounmap:
3002	gem_put_cell(gp);
3003	iounmap(gp->regs);
3004
3005err_out_free_res:
3006	pci_release_regions(pdev);
3007
3008err_out_free_netdev:
3009	free_netdev(dev);
3010err_disable_device:
3011	pci_disable_device(pdev);
3012	return err;
3013
3014}
3015
3016
3017static struct pci_driver gem_driver = {
3018	.name		= GEM_MODULE_NAME,
3019	.id_table	= gem_pci_tbl,
3020	.probe		= gem_init_one,
3021	.remove		= gem_remove_one,
3022#ifdef CONFIG_PM
3023	.suspend	= gem_suspend,
3024	.resume		= gem_resume,
3025#endif /* CONFIG_PM */
3026};
3027
3028module_pci_driver(gem_driver);
3029