[go: nahoru, domu]

1/*****************************************************************************
2 *                                                                           *
3 * File: cxgb2.c                                                             *
4 * $Revision: 1.25 $                                                         *
5 * $Date: 2005/06/22 00:43:25 $                                              *
6 * Description:                                                              *
7 *  Chelsio 10Gb Ethernet Driver.                                            *
8 *                                                                           *
9 * This program is free software; you can redistribute it and/or modify      *
10 * it under the terms of the GNU General Public License, version 2, as       *
11 * published by the Free Software Foundation.                                *
12 *                                                                           *
13 * You should have received a copy of the GNU General Public License along   *
14 * with this program; if not, see <http://www.gnu.org/licenses/>.            *
15 *                                                                           *
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
17 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
19 *                                                                           *
20 * http://www.chelsio.com                                                    *
21 *                                                                           *
22 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
23 * All rights reserved.                                                      *
24 *                                                                           *
25 * Maintainers: maintainers@chelsio.com                                      *
26 *                                                                           *
27 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
28 *          Tina Yang               <tainay@chelsio.com>                     *
29 *          Felix Marti             <felix@chelsio.com>                      *
30 *          Scott Bardone           <sbardone@chelsio.com>                   *
31 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
32 *          Frank DiMambro          <frank@chelsio.com>                      *
33 *                                                                           *
34 * History:                                                                  *
35 *                                                                           *
36 ****************************************************************************/
37
38#include "common.h"
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/if_vlan.h>
44#include <linux/mii.h>
45#include <linux/sockios.h>
46#include <linux/dma-mapping.h>
47#include <asm/uaccess.h>
48
49#include "cpl5_cmd.h"
50#include "regs.h"
51#include "gmac.h"
52#include "cphy.h"
53#include "sge.h"
54#include "tp.h"
55#include "espi.h"
56#include "elmer0.h"
57
58#include <linux/workqueue.h>
59
60static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
61{
62	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
63}
64
65static inline void cancel_mac_stats_update(struct adapter *ap)
66{
67	cancel_delayed_work(&ap->stats_update_task);
68}
69
70#define MAX_CMDQ_ENTRIES	16384
71#define MAX_CMDQ1_ENTRIES	1024
72#define MAX_RX_BUFFERS		16384
73#define MAX_RX_JUMBO_BUFFERS	16384
74#define MAX_TX_BUFFERS_HIGH	16384U
75#define MAX_TX_BUFFERS_LOW	1536U
76#define MAX_TX_BUFFERS		1460U
77#define MIN_FL_ENTRIES		32
78
79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83/*
84 * The EEPROM is actually bigger but only the first few bytes are used so we
85 * only report those.
86 */
87#define EEPROM_SIZE 32
88
89MODULE_DESCRIPTION(DRV_DESCRIPTION);
90MODULE_AUTHOR("Chelsio Communications");
91MODULE_LICENSE("GPL");
92
93static int dflt_msg_enable = DFLT_MSG_ENABLE;
94
95module_param(dflt_msg_enable, int, 0);
96MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
97
98#define HCLOCK 0x0
99#define LCLOCK 0x1
100
101/* T1 cards powersave mode */
102static int t1_clock(struct adapter *adapter, int mode);
103static int t1powersave = 1;	/* HW default is powersave mode. */
104
105module_param(t1powersave, int, 0);
106MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
107
108static int disable_msi = 0;
109module_param(disable_msi, int, 0);
110MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
111
112static const char pci_speed[][4] = {
113	"33", "66", "100", "133"
114};
115
116/*
117 * Setup MAC to receive the types of packets we want.
118 */
119static void t1_set_rxmode(struct net_device *dev)
120{
121	struct adapter *adapter = dev->ml_priv;
122	struct cmac *mac = adapter->port[dev->if_port].mac;
123	struct t1_rx_mode rm;
124
125	rm.dev = dev;
126	mac->ops->set_rx_mode(mac, &rm);
127}
128
129static void link_report(struct port_info *p)
130{
131	if (!netif_carrier_ok(p->dev))
132		netdev_info(p->dev, "link down\n");
133	else {
134		const char *s = "10Mbps";
135
136		switch (p->link_config.speed) {
137			case SPEED_10000: s = "10Gbps"; break;
138			case SPEED_1000:  s = "1000Mbps"; break;
139			case SPEED_100:   s = "100Mbps"; break;
140		}
141
142		netdev_info(p->dev, "link up, %s, %s-duplex\n",
143			    s, p->link_config.duplex == DUPLEX_FULL
144			    ? "full" : "half");
145	}
146}
147
148void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
149			int speed, int duplex, int pause)
150{
151	struct port_info *p = &adapter->port[port_id];
152
153	if (link_stat != netif_carrier_ok(p->dev)) {
154		if (link_stat)
155			netif_carrier_on(p->dev);
156		else
157			netif_carrier_off(p->dev);
158		link_report(p);
159
160		/* multi-ports: inform toe */
161		if ((speed > 0) && (adapter->params.nports > 1)) {
162			unsigned int sched_speed = 10;
163			switch (speed) {
164			case SPEED_1000:
165				sched_speed = 1000;
166				break;
167			case SPEED_100:
168				sched_speed = 100;
169				break;
170			case SPEED_10:
171				sched_speed = 10;
172				break;
173			}
174			t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
175		}
176	}
177}
178
179static void link_start(struct port_info *p)
180{
181	struct cmac *mac = p->mac;
182
183	mac->ops->reset(mac);
184	if (mac->ops->macaddress_set)
185		mac->ops->macaddress_set(mac, p->dev->dev_addr);
186	t1_set_rxmode(p->dev);
187	t1_link_start(p->phy, mac, &p->link_config);
188	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
189}
190
191static void enable_hw_csum(struct adapter *adapter)
192{
193	if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
194		t1_tp_set_ip_checksum_offload(adapter->tp, 1);	/* for TSO only */
195	t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
196}
197
198/*
199 * Things to do upon first use of a card.
200 * This must run with the rtnl lock held.
201 */
202static int cxgb_up(struct adapter *adapter)
203{
204	int err = 0;
205
206	if (!(adapter->flags & FULL_INIT_DONE)) {
207		err = t1_init_hw_modules(adapter);
208		if (err)
209			goto out_err;
210
211		enable_hw_csum(adapter);
212		adapter->flags |= FULL_INIT_DONE;
213	}
214
215	t1_interrupts_clear(adapter);
216
217	adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
218	err = request_irq(adapter->pdev->irq, t1_interrupt,
219			  adapter->params.has_msi ? 0 : IRQF_SHARED,
220			  adapter->name, adapter);
221	if (err) {
222		if (adapter->params.has_msi)
223			pci_disable_msi(adapter->pdev);
224
225		goto out_err;
226	}
227
228	t1_sge_start(adapter->sge);
229	t1_interrupts_enable(adapter);
230out_err:
231	return err;
232}
233
234/*
235 * Release resources when all the ports have been stopped.
236 */
237static void cxgb_down(struct adapter *adapter)
238{
239	t1_sge_stop(adapter->sge);
240	t1_interrupts_disable(adapter);
241	free_irq(adapter->pdev->irq, adapter);
242	if (adapter->params.has_msi)
243		pci_disable_msi(adapter->pdev);
244}
245
246static int cxgb_open(struct net_device *dev)
247{
248	int err;
249	struct adapter *adapter = dev->ml_priv;
250	int other_ports = adapter->open_device_map & PORT_MASK;
251
252	napi_enable(&adapter->napi);
253	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
254		napi_disable(&adapter->napi);
255		return err;
256	}
257
258	__set_bit(dev->if_port, &adapter->open_device_map);
259	link_start(&adapter->port[dev->if_port]);
260	netif_start_queue(dev);
261	if (!other_ports && adapter->params.stats_update_period)
262		schedule_mac_stats_update(adapter,
263					  adapter->params.stats_update_period);
264
265	t1_vlan_mode(adapter, dev->features);
266	return 0;
267}
268
269static int cxgb_close(struct net_device *dev)
270{
271	struct adapter *adapter = dev->ml_priv;
272	struct port_info *p = &adapter->port[dev->if_port];
273	struct cmac *mac = p->mac;
274
275	netif_stop_queue(dev);
276	napi_disable(&adapter->napi);
277	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
278	netif_carrier_off(dev);
279
280	clear_bit(dev->if_port, &adapter->open_device_map);
281	if (adapter->params.stats_update_period &&
282	    !(adapter->open_device_map & PORT_MASK)) {
283		/* Stop statistics accumulation. */
284		smp_mb__after_atomic();
285		spin_lock(&adapter->work_lock);   /* sync with update task */
286		spin_unlock(&adapter->work_lock);
287		cancel_mac_stats_update(adapter);
288	}
289
290	if (!adapter->open_device_map)
291		cxgb_down(adapter);
292	return 0;
293}
294
295static struct net_device_stats *t1_get_stats(struct net_device *dev)
296{
297	struct adapter *adapter = dev->ml_priv;
298	struct port_info *p = &adapter->port[dev->if_port];
299	struct net_device_stats *ns = &p->netstats;
300	const struct cmac_statistics *pstats;
301
302	/* Do a full update of the MAC stats */
303	pstats = p->mac->ops->statistics_update(p->mac,
304						MAC_STATS_UPDATE_FULL);
305
306	ns->tx_packets = pstats->TxUnicastFramesOK +
307		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
308
309	ns->rx_packets = pstats->RxUnicastFramesOK +
310		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
311
312	ns->tx_bytes = pstats->TxOctetsOK;
313	ns->rx_bytes = pstats->RxOctetsOK;
314
315	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
316		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
317	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
318		pstats->RxFCSErrors + pstats->RxAlignErrors +
319		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
320		pstats->RxSymbolErrors + pstats->RxRuntErrors;
321
322	ns->multicast  = pstats->RxMulticastFramesOK;
323	ns->collisions = pstats->TxTotalCollisions;
324
325	/* detailed rx_errors */
326	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
327		pstats->RxJabberErrors;
328	ns->rx_over_errors   = 0;
329	ns->rx_crc_errors    = pstats->RxFCSErrors;
330	ns->rx_frame_errors  = pstats->RxAlignErrors;
331	ns->rx_fifo_errors   = 0;
332	ns->rx_missed_errors = 0;
333
334	/* detailed tx_errors */
335	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
336	ns->tx_carrier_errors   = 0;
337	ns->tx_fifo_errors      = pstats->TxUnderrun;
338	ns->tx_heartbeat_errors = 0;
339	ns->tx_window_errors    = pstats->TxLateCollisions;
340	return ns;
341}
342
343static u32 get_msglevel(struct net_device *dev)
344{
345	struct adapter *adapter = dev->ml_priv;
346
347	return adapter->msg_enable;
348}
349
350static void set_msglevel(struct net_device *dev, u32 val)
351{
352	struct adapter *adapter = dev->ml_priv;
353
354	adapter->msg_enable = val;
355}
356
357static char stats_strings[][ETH_GSTRING_LEN] = {
358	"TxOctetsOK",
359	"TxOctetsBad",
360	"TxUnicastFramesOK",
361	"TxMulticastFramesOK",
362	"TxBroadcastFramesOK",
363	"TxPauseFrames",
364	"TxFramesWithDeferredXmissions",
365	"TxLateCollisions",
366	"TxTotalCollisions",
367	"TxFramesAbortedDueToXSCollisions",
368	"TxUnderrun",
369	"TxLengthErrors",
370	"TxInternalMACXmitError",
371	"TxFramesWithExcessiveDeferral",
372	"TxFCSErrors",
373	"TxJumboFramesOk",
374	"TxJumboOctetsOk",
375
376	"RxOctetsOK",
377	"RxOctetsBad",
378	"RxUnicastFramesOK",
379	"RxMulticastFramesOK",
380	"RxBroadcastFramesOK",
381	"RxPauseFrames",
382	"RxFCSErrors",
383	"RxAlignErrors",
384	"RxSymbolErrors",
385	"RxDataErrors",
386	"RxSequenceErrors",
387	"RxRuntErrors",
388	"RxJabberErrors",
389	"RxInternalMACRcvError",
390	"RxInRangeLengthErrors",
391	"RxOutOfRangeLengthField",
392	"RxFrameTooLongErrors",
393	"RxJumboFramesOk",
394	"RxJumboOctetsOk",
395
396	/* Port stats */
397	"RxCsumGood",
398	"TxCsumOffload",
399	"TxTso",
400	"RxVlan",
401	"TxVlan",
402	"TxNeedHeadroom",
403
404	/* Interrupt stats */
405	"rx drops",
406	"pure_rsps",
407	"unhandled irqs",
408	"respQ_empty",
409	"respQ_overflow",
410	"freelistQ_empty",
411	"pkt_too_big",
412	"pkt_mismatch",
413	"cmdQ_full0",
414	"cmdQ_full1",
415
416	"espi_DIP2ParityErr",
417	"espi_DIP4Err",
418	"espi_RxDrops",
419	"espi_TxDrops",
420	"espi_RxOvfl",
421	"espi_ParityErr"
422};
423
424#define T2_REGMAP_SIZE (3 * 1024)
425
426static int get_regs_len(struct net_device *dev)
427{
428	return T2_REGMAP_SIZE;
429}
430
431static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
432{
433	struct adapter *adapter = dev->ml_priv;
434
435	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
436	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
437	strlcpy(info->bus_info, pci_name(adapter->pdev),
438		sizeof(info->bus_info));
439}
440
441static int get_sset_count(struct net_device *dev, int sset)
442{
443	switch (sset) {
444	case ETH_SS_STATS:
445		return ARRAY_SIZE(stats_strings);
446	default:
447		return -EOPNOTSUPP;
448	}
449}
450
451static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
452{
453	if (stringset == ETH_SS_STATS)
454		memcpy(data, stats_strings, sizeof(stats_strings));
455}
456
457static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
458		      u64 *data)
459{
460	struct adapter *adapter = dev->ml_priv;
461	struct cmac *mac = adapter->port[dev->if_port].mac;
462	const struct cmac_statistics *s;
463	const struct sge_intr_counts *t;
464	struct sge_port_stats ss;
465
466	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
467	t = t1_sge_get_intr_counts(adapter->sge);
468	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
469
470	*data++ = s->TxOctetsOK;
471	*data++ = s->TxOctetsBad;
472	*data++ = s->TxUnicastFramesOK;
473	*data++ = s->TxMulticastFramesOK;
474	*data++ = s->TxBroadcastFramesOK;
475	*data++ = s->TxPauseFrames;
476	*data++ = s->TxFramesWithDeferredXmissions;
477	*data++ = s->TxLateCollisions;
478	*data++ = s->TxTotalCollisions;
479	*data++ = s->TxFramesAbortedDueToXSCollisions;
480	*data++ = s->TxUnderrun;
481	*data++ = s->TxLengthErrors;
482	*data++ = s->TxInternalMACXmitError;
483	*data++ = s->TxFramesWithExcessiveDeferral;
484	*data++ = s->TxFCSErrors;
485	*data++ = s->TxJumboFramesOK;
486	*data++ = s->TxJumboOctetsOK;
487
488	*data++ = s->RxOctetsOK;
489	*data++ = s->RxOctetsBad;
490	*data++ = s->RxUnicastFramesOK;
491	*data++ = s->RxMulticastFramesOK;
492	*data++ = s->RxBroadcastFramesOK;
493	*data++ = s->RxPauseFrames;
494	*data++ = s->RxFCSErrors;
495	*data++ = s->RxAlignErrors;
496	*data++ = s->RxSymbolErrors;
497	*data++ = s->RxDataErrors;
498	*data++ = s->RxSequenceErrors;
499	*data++ = s->RxRuntErrors;
500	*data++ = s->RxJabberErrors;
501	*data++ = s->RxInternalMACRcvError;
502	*data++ = s->RxInRangeLengthErrors;
503	*data++ = s->RxOutOfRangeLengthField;
504	*data++ = s->RxFrameTooLongErrors;
505	*data++ = s->RxJumboFramesOK;
506	*data++ = s->RxJumboOctetsOK;
507
508	*data++ = ss.rx_cso_good;
509	*data++ = ss.tx_cso;
510	*data++ = ss.tx_tso;
511	*data++ = ss.vlan_xtract;
512	*data++ = ss.vlan_insert;
513	*data++ = ss.tx_need_hdrroom;
514
515	*data++ = t->rx_drops;
516	*data++ = t->pure_rsps;
517	*data++ = t->unhandled_irqs;
518	*data++ = t->respQ_empty;
519	*data++ = t->respQ_overflow;
520	*data++ = t->freelistQ_empty;
521	*data++ = t->pkt_too_big;
522	*data++ = t->pkt_mismatch;
523	*data++ = t->cmdQ_full[0];
524	*data++ = t->cmdQ_full[1];
525
526	if (adapter->espi) {
527		const struct espi_intr_counts *e;
528
529		e = t1_espi_get_intr_counts(adapter->espi);
530		*data++ = e->DIP2_parity_err;
531		*data++ = e->DIP4_err;
532		*data++ = e->rx_drops;
533		*data++ = e->tx_drops;
534		*data++ = e->rx_ovflw;
535		*data++ = e->parity_err;
536	}
537}
538
539static inline void reg_block_dump(struct adapter *ap, void *buf,
540				  unsigned int start, unsigned int end)
541{
542	u32 *p = buf + start;
543
544	for ( ; start <= end; start += sizeof(u32))
545		*p++ = readl(ap->regs + start);
546}
547
548static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
549		     void *buf)
550{
551	struct adapter *ap = dev->ml_priv;
552
553	/*
554	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
555	 */
556	regs->version = 2;
557
558	memset(buf, 0, T2_REGMAP_SIZE);
559	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
560	reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
561	reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
562	reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
563	reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
564	reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
565	reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
566	reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
567	reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
568	reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
569}
570
571static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
572{
573	struct adapter *adapter = dev->ml_priv;
574	struct port_info *p = &adapter->port[dev->if_port];
575
576	cmd->supported = p->link_config.supported;
577	cmd->advertising = p->link_config.advertising;
578
579	if (netif_carrier_ok(dev)) {
580		ethtool_cmd_speed_set(cmd, p->link_config.speed);
581		cmd->duplex = p->link_config.duplex;
582	} else {
583		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
584		cmd->duplex = DUPLEX_UNKNOWN;
585	}
586
587	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
588	cmd->phy_address = p->phy->mdio.prtad;
589	cmd->transceiver = XCVR_EXTERNAL;
590	cmd->autoneg = p->link_config.autoneg;
591	cmd->maxtxpkt = 0;
592	cmd->maxrxpkt = 0;
593	return 0;
594}
595
596static int speed_duplex_to_caps(int speed, int duplex)
597{
598	int cap = 0;
599
600	switch (speed) {
601	case SPEED_10:
602		if (duplex == DUPLEX_FULL)
603			cap = SUPPORTED_10baseT_Full;
604		else
605			cap = SUPPORTED_10baseT_Half;
606		break;
607	case SPEED_100:
608		if (duplex == DUPLEX_FULL)
609			cap = SUPPORTED_100baseT_Full;
610		else
611			cap = SUPPORTED_100baseT_Half;
612		break;
613	case SPEED_1000:
614		if (duplex == DUPLEX_FULL)
615			cap = SUPPORTED_1000baseT_Full;
616		else
617			cap = SUPPORTED_1000baseT_Half;
618		break;
619	case SPEED_10000:
620		if (duplex == DUPLEX_FULL)
621			cap = SUPPORTED_10000baseT_Full;
622	}
623	return cap;
624}
625
626#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
627		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
628		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
629		      ADVERTISED_10000baseT_Full)
630
631static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
632{
633	struct adapter *adapter = dev->ml_priv;
634	struct port_info *p = &adapter->port[dev->if_port];
635	struct link_config *lc = &p->link_config;
636
637	if (!(lc->supported & SUPPORTED_Autoneg))
638		return -EOPNOTSUPP;             /* can't change speed/duplex */
639
640	if (cmd->autoneg == AUTONEG_DISABLE) {
641		u32 speed = ethtool_cmd_speed(cmd);
642		int cap = speed_duplex_to_caps(speed, cmd->duplex);
643
644		if (!(lc->supported & cap) || (speed == SPEED_1000))
645			return -EINVAL;
646		lc->requested_speed = speed;
647		lc->requested_duplex = cmd->duplex;
648		lc->advertising = 0;
649	} else {
650		cmd->advertising &= ADVERTISED_MASK;
651		if (cmd->advertising & (cmd->advertising - 1))
652			cmd->advertising = lc->supported;
653		cmd->advertising &= lc->supported;
654		if (!cmd->advertising)
655			return -EINVAL;
656		lc->requested_speed = SPEED_INVALID;
657		lc->requested_duplex = DUPLEX_INVALID;
658		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
659	}
660	lc->autoneg = cmd->autoneg;
661	if (netif_running(dev))
662		t1_link_start(p->phy, p->mac, lc);
663	return 0;
664}
665
666static void get_pauseparam(struct net_device *dev,
667			   struct ethtool_pauseparam *epause)
668{
669	struct adapter *adapter = dev->ml_priv;
670	struct port_info *p = &adapter->port[dev->if_port];
671
672	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
673	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
674	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
675}
676
677static int set_pauseparam(struct net_device *dev,
678			  struct ethtool_pauseparam *epause)
679{
680	struct adapter *adapter = dev->ml_priv;
681	struct port_info *p = &adapter->port[dev->if_port];
682	struct link_config *lc = &p->link_config;
683
684	if (epause->autoneg == AUTONEG_DISABLE)
685		lc->requested_fc = 0;
686	else if (lc->supported & SUPPORTED_Autoneg)
687		lc->requested_fc = PAUSE_AUTONEG;
688	else
689		return -EINVAL;
690
691	if (epause->rx_pause)
692		lc->requested_fc |= PAUSE_RX;
693	if (epause->tx_pause)
694		lc->requested_fc |= PAUSE_TX;
695	if (lc->autoneg == AUTONEG_ENABLE) {
696		if (netif_running(dev))
697			t1_link_start(p->phy, p->mac, lc);
698	} else {
699		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
700		if (netif_running(dev))
701			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
702							 lc->fc);
703	}
704	return 0;
705}
706
707static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
708{
709	struct adapter *adapter = dev->ml_priv;
710	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
711
712	e->rx_max_pending = MAX_RX_BUFFERS;
713	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
714	e->tx_max_pending = MAX_CMDQ_ENTRIES;
715
716	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
717	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
718	e->tx_pending = adapter->params.sge.cmdQ_size[0];
719}
720
721static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
722{
723	struct adapter *adapter = dev->ml_priv;
724	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
725
726	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
727	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
728	    e->tx_pending > MAX_CMDQ_ENTRIES ||
729	    e->rx_pending < MIN_FL_ENTRIES ||
730	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
731	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
732		return -EINVAL;
733
734	if (adapter->flags & FULL_INIT_DONE)
735		return -EBUSY;
736
737	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
738	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
739	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
740	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
741		MAX_CMDQ1_ENTRIES : e->tx_pending;
742	return 0;
743}
744
745static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
746{
747	struct adapter *adapter = dev->ml_priv;
748
749	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
750	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
751	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
752	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
753	return 0;
754}
755
756static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
757{
758	struct adapter *adapter = dev->ml_priv;
759
760	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
761	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
762	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
763	return 0;
764}
765
766static int get_eeprom_len(struct net_device *dev)
767{
768	struct adapter *adapter = dev->ml_priv;
769
770	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
771}
772
773#define EEPROM_MAGIC(ap) \
774	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
775
776static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
777		      u8 *data)
778{
779	int i;
780	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
781	struct adapter *adapter = dev->ml_priv;
782
783	e->magic = EEPROM_MAGIC(adapter);
784	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
785		t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
786	memcpy(data, buf + e->offset, e->len);
787	return 0;
788}
789
790static const struct ethtool_ops t1_ethtool_ops = {
791	.get_settings      = get_settings,
792	.set_settings      = set_settings,
793	.get_drvinfo       = get_drvinfo,
794	.get_msglevel      = get_msglevel,
795	.set_msglevel      = set_msglevel,
796	.get_ringparam     = get_sge_param,
797	.set_ringparam     = set_sge_param,
798	.get_coalesce      = get_coalesce,
799	.set_coalesce      = set_coalesce,
800	.get_eeprom_len    = get_eeprom_len,
801	.get_eeprom        = get_eeprom,
802	.get_pauseparam    = get_pauseparam,
803	.set_pauseparam    = set_pauseparam,
804	.get_link          = ethtool_op_get_link,
805	.get_strings       = get_strings,
806	.get_sset_count	   = get_sset_count,
807	.get_ethtool_stats = get_stats,
808	.get_regs_len      = get_regs_len,
809	.get_regs          = get_regs,
810};
811
812static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
813{
814	struct adapter *adapter = dev->ml_priv;
815	struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
816
817	return mdio_mii_ioctl(mdio, if_mii(req), cmd);
818}
819
820static int t1_change_mtu(struct net_device *dev, int new_mtu)
821{
822	int ret;
823	struct adapter *adapter = dev->ml_priv;
824	struct cmac *mac = adapter->port[dev->if_port].mac;
825
826	if (!mac->ops->set_mtu)
827		return -EOPNOTSUPP;
828	if (new_mtu < 68)
829		return -EINVAL;
830	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
831		return ret;
832	dev->mtu = new_mtu;
833	return 0;
834}
835
836static int t1_set_mac_addr(struct net_device *dev, void *p)
837{
838	struct adapter *adapter = dev->ml_priv;
839	struct cmac *mac = adapter->port[dev->if_port].mac;
840	struct sockaddr *addr = p;
841
842	if (!mac->ops->macaddress_set)
843		return -EOPNOTSUPP;
844
845	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
846	mac->ops->macaddress_set(mac, dev->dev_addr);
847	return 0;
848}
849
850static netdev_features_t t1_fix_features(struct net_device *dev,
851	netdev_features_t features)
852{
853	/*
854	 * Since there is no support for separate rx/tx vlan accel
855	 * enable/disable make sure tx flag is always in same state as rx.
856	 */
857	if (features & NETIF_F_HW_VLAN_CTAG_RX)
858		features |= NETIF_F_HW_VLAN_CTAG_TX;
859	else
860		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
861
862	return features;
863}
864
865static int t1_set_features(struct net_device *dev, netdev_features_t features)
866{
867	netdev_features_t changed = dev->features ^ features;
868	struct adapter *adapter = dev->ml_priv;
869
870	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
871		t1_vlan_mode(adapter, features);
872
873	return 0;
874}
875#ifdef CONFIG_NET_POLL_CONTROLLER
876static void t1_netpoll(struct net_device *dev)
877{
878	unsigned long flags;
879	struct adapter *adapter = dev->ml_priv;
880
881	local_irq_save(flags);
882	t1_interrupt(adapter->pdev->irq, adapter);
883	local_irq_restore(flags);
884}
885#endif
886
887/*
888 * Periodic accumulation of MAC statistics.  This is used only if the MAC
889 * does not have any other way to prevent stats counter overflow.
890 */
891static void mac_stats_task(struct work_struct *work)
892{
893	int i;
894	struct adapter *adapter =
895		container_of(work, struct adapter, stats_update_task.work);
896
897	for_each_port(adapter, i) {
898		struct port_info *p = &adapter->port[i];
899
900		if (netif_running(p->dev))
901			p->mac->ops->statistics_update(p->mac,
902						       MAC_STATS_UPDATE_FAST);
903	}
904
905	/* Schedule the next statistics update if any port is active. */
906	spin_lock(&adapter->work_lock);
907	if (adapter->open_device_map & PORT_MASK)
908		schedule_mac_stats_update(adapter,
909					  adapter->params.stats_update_period);
910	spin_unlock(&adapter->work_lock);
911}
912
913/*
914 * Processes elmer0 external interrupts in process context.
915 */
916static void ext_intr_task(struct work_struct *work)
917{
918	struct adapter *adapter =
919		container_of(work, struct adapter, ext_intr_handler_task);
920
921	t1_elmer0_ext_intr_handler(adapter);
922
923	/* Now reenable external interrupts */
924	spin_lock_irq(&adapter->async_lock);
925	adapter->slow_intr_mask |= F_PL_INTR_EXT;
926	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
927	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
928		   adapter->regs + A_PL_ENABLE);
929	spin_unlock_irq(&adapter->async_lock);
930}
931
932/*
933 * Interrupt-context handler for elmer0 external interrupts.
934 */
935void t1_elmer0_ext_intr(struct adapter *adapter)
936{
937	/*
938	 * Schedule a task to handle external interrupts as we require
939	 * a process context.  We disable EXT interrupts in the interim
940	 * and let the task reenable them when it's done.
941	 */
942	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
943	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
944		   adapter->regs + A_PL_ENABLE);
945	schedule_work(&adapter->ext_intr_handler_task);
946}
947
948void t1_fatal_err(struct adapter *adapter)
949{
950	if (adapter->flags & FULL_INIT_DONE) {
951		t1_sge_stop(adapter->sge);
952		t1_interrupts_disable(adapter);
953	}
954	pr_alert("%s: encountered fatal error, operation suspended\n",
955		 adapter->name);
956}
957
958static const struct net_device_ops cxgb_netdev_ops = {
959	.ndo_open		= cxgb_open,
960	.ndo_stop		= cxgb_close,
961	.ndo_start_xmit		= t1_start_xmit,
962	.ndo_get_stats		= t1_get_stats,
963	.ndo_validate_addr	= eth_validate_addr,
964	.ndo_set_rx_mode	= t1_set_rxmode,
965	.ndo_do_ioctl		= t1_ioctl,
966	.ndo_change_mtu		= t1_change_mtu,
967	.ndo_set_mac_address	= t1_set_mac_addr,
968	.ndo_fix_features	= t1_fix_features,
969	.ndo_set_features	= t1_set_features,
970#ifdef CONFIG_NET_POLL_CONTROLLER
971	.ndo_poll_controller	= t1_netpoll,
972#endif
973};
974
975static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
976{
977	int i, err, pci_using_dac = 0;
978	unsigned long mmio_start, mmio_len;
979	const struct board_info *bi;
980	struct adapter *adapter = NULL;
981	struct port_info *pi;
982
983	pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
984
985	err = pci_enable_device(pdev);
986	if (err)
987		return err;
988
989	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
990		pr_err("%s: cannot find PCI device memory base address\n",
991		       pci_name(pdev));
992		err = -ENODEV;
993		goto out_disable_pdev;
994	}
995
996	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
997		pci_using_dac = 1;
998
999		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1000			pr_err("%s: unable to obtain 64-bit DMA for "
1001			       "consistent allocations\n", pci_name(pdev));
1002			err = -ENODEV;
1003			goto out_disable_pdev;
1004		}
1005
1006	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1007		pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1008		goto out_disable_pdev;
1009	}
1010
1011	err = pci_request_regions(pdev, DRV_NAME);
1012	if (err) {
1013		pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1014		goto out_disable_pdev;
1015	}
1016
1017	pci_set_master(pdev);
1018
1019	mmio_start = pci_resource_start(pdev, 0);
1020	mmio_len = pci_resource_len(pdev, 0);
1021	bi = t1_get_board_info(ent->driver_data);
1022
1023	for (i = 0; i < bi->port_number; ++i) {
1024		struct net_device *netdev;
1025
1026		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1027		if (!netdev) {
1028			err = -ENOMEM;
1029			goto out_free_dev;
1030		}
1031
1032		SET_NETDEV_DEV(netdev, &pdev->dev);
1033
1034		if (!adapter) {
1035			adapter = netdev_priv(netdev);
1036			adapter->pdev = pdev;
1037			adapter->port[0].dev = netdev;  /* so we don't leak it */
1038
1039			adapter->regs = ioremap(mmio_start, mmio_len);
1040			if (!adapter->regs) {
1041				pr_err("%s: cannot map device registers\n",
1042				       pci_name(pdev));
1043				err = -ENOMEM;
1044				goto out_free_dev;
1045			}
1046
1047			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1048				err = -ENODEV;	  /* Can't handle this chip rev */
1049				goto out_free_dev;
1050			}
1051
1052			adapter->name = pci_name(pdev);
1053			adapter->msg_enable = dflt_msg_enable;
1054			adapter->mmio_len = mmio_len;
1055
1056			spin_lock_init(&adapter->tpi_lock);
1057			spin_lock_init(&adapter->work_lock);
1058			spin_lock_init(&adapter->async_lock);
1059			spin_lock_init(&adapter->mac_lock);
1060
1061			INIT_WORK(&adapter->ext_intr_handler_task,
1062				  ext_intr_task);
1063			INIT_DELAYED_WORK(&adapter->stats_update_task,
1064					  mac_stats_task);
1065
1066			pci_set_drvdata(pdev, netdev);
1067		}
1068
1069		pi = &adapter->port[i];
1070		pi->dev = netdev;
1071		netif_carrier_off(netdev);
1072		netdev->irq = pdev->irq;
1073		netdev->if_port = i;
1074		netdev->mem_start = mmio_start;
1075		netdev->mem_end = mmio_start + mmio_len - 1;
1076		netdev->ml_priv = adapter;
1077		netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1078			NETIF_F_RXCSUM;
1079		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1080			NETIF_F_RXCSUM | NETIF_F_LLTX;
1081
1082		if (pci_using_dac)
1083			netdev->features |= NETIF_F_HIGHDMA;
1084		if (vlan_tso_capable(adapter)) {
1085			netdev->features |=
1086				NETIF_F_HW_VLAN_CTAG_TX |
1087				NETIF_F_HW_VLAN_CTAG_RX;
1088			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1089
1090			/* T204: disable TSO */
1091			if (!(is_T2(adapter)) || bi->port_number != 4) {
1092				netdev->hw_features |= NETIF_F_TSO;
1093				netdev->features |= NETIF_F_TSO;
1094			}
1095		}
1096
1097		netdev->netdev_ops = &cxgb_netdev_ops;
1098		netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1099			sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1100
1101		netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1102
1103		netdev->ethtool_ops = &t1_ethtool_ops;
1104	}
1105
1106	if (t1_init_sw_modules(adapter, bi) < 0) {
1107		err = -ENODEV;
1108		goto out_free_dev;
1109	}
1110
1111	/*
1112	 * The card is now ready to go.  If any errors occur during device
1113	 * registration we do not fail the whole card but rather proceed only
1114	 * with the ports we manage to register successfully.  However we must
1115	 * register at least one net device.
1116	 */
1117	for (i = 0; i < bi->port_number; ++i) {
1118		err = register_netdev(adapter->port[i].dev);
1119		if (err)
1120			pr_warn("%s: cannot register net device %s, skipping\n",
1121				pci_name(pdev), adapter->port[i].dev->name);
1122		else {
1123			/*
1124			 * Change the name we use for messages to the name of
1125			 * the first successfully registered interface.
1126			 */
1127			if (!adapter->registered_device_map)
1128				adapter->name = adapter->port[i].dev->name;
1129
1130			__set_bit(i, &adapter->registered_device_map);
1131		}
1132	}
1133	if (!adapter->registered_device_map) {
1134		pr_err("%s: could not register any net devices\n",
1135		       pci_name(pdev));
1136		goto out_release_adapter_res;
1137	}
1138
1139	pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1140		adapter->name, bi->desc, adapter->params.chip_revision,
1141		adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1142		adapter->params.pci.speed, adapter->params.pci.width);
1143
1144	/*
1145	 * Set the T1B ASIC and memory clocks.
1146	 */
1147	if (t1powersave)
1148		adapter->t1powersave = LCLOCK;	/* HW default is powersave mode. */
1149	else
1150		adapter->t1powersave = HCLOCK;
1151	if (t1_is_T1B(adapter))
1152		t1_clock(adapter, t1powersave);
1153
1154	return 0;
1155
1156out_release_adapter_res:
1157	t1_free_sw_modules(adapter);
1158out_free_dev:
1159	if (adapter) {
1160		if (adapter->regs)
1161			iounmap(adapter->regs);
1162		for (i = bi->port_number - 1; i >= 0; --i)
1163			if (adapter->port[i].dev)
1164				free_netdev(adapter->port[i].dev);
1165	}
1166	pci_release_regions(pdev);
1167out_disable_pdev:
1168	pci_disable_device(pdev);
1169	return err;
1170}
1171
1172static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1173{
1174	int data;
1175	int i;
1176	u32 val;
1177
1178	enum {
1179		S_CLOCK = 1 << 3,
1180		S_DATA = 1 << 4
1181	};
1182
1183	for (i = (nbits - 1); i > -1; i--) {
1184
1185		udelay(50);
1186
1187		data = ((bitdata >> i) & 0x1);
1188		__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1189
1190		if (data)
1191			val |= S_DATA;
1192		else
1193			val &= ~S_DATA;
1194
1195		udelay(50);
1196
1197		/* Set SCLOCK low */
1198		val &= ~S_CLOCK;
1199		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1200
1201		udelay(50);
1202
1203		/* Write SCLOCK high */
1204		val |= S_CLOCK;
1205		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1206
1207	}
1208}
1209
1210static int t1_clock(struct adapter *adapter, int mode)
1211{
1212	u32 val;
1213	int M_CORE_VAL;
1214	int M_MEM_VAL;
1215
1216	enum {
1217		M_CORE_BITS	= 9,
1218		T_CORE_VAL	= 0,
1219		T_CORE_BITS	= 2,
1220		N_CORE_VAL	= 0,
1221		N_CORE_BITS	= 2,
1222		M_MEM_BITS	= 9,
1223		T_MEM_VAL	= 0,
1224		T_MEM_BITS	= 2,
1225		N_MEM_VAL	= 0,
1226		N_MEM_BITS	= 2,
1227		NP_LOAD		= 1 << 17,
1228		S_LOAD_MEM	= 1 << 5,
1229		S_LOAD_CORE	= 1 << 6,
1230		S_CLOCK		= 1 << 3
1231	};
1232
1233	if (!t1_is_T1B(adapter))
1234		return -ENODEV;	/* Can't re-clock this chip. */
1235
1236	if (mode & 2)
1237		return 0;	/* show current mode. */
1238
1239	if ((adapter->t1powersave & 1) == (mode & 1))
1240		return -EALREADY;	/* ASIC already running in mode. */
1241
1242	if ((mode & 1) == HCLOCK) {
1243		M_CORE_VAL = 0x14;
1244		M_MEM_VAL = 0x18;
1245		adapter->t1powersave = HCLOCK;	/* overclock */
1246	} else {
1247		M_CORE_VAL = 0xe;
1248		M_MEM_VAL = 0x10;
1249		adapter->t1powersave = LCLOCK;	/* underclock */
1250	}
1251
1252	/* Don't interrupt this serial stream! */
1253	spin_lock(&adapter->tpi_lock);
1254
1255	/* Initialize for ASIC core */
1256	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1257	val |= NP_LOAD;
1258	udelay(50);
1259	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1260	udelay(50);
1261	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1262	val &= ~S_LOAD_CORE;
1263	val &= ~S_CLOCK;
1264	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1265	udelay(50);
1266
1267	/* Serial program the ASIC clock synthesizer */
1268	bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1269	bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1270	bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1271	udelay(50);
1272
1273	/* Finish ASIC core */
1274	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1275	val |= S_LOAD_CORE;
1276	udelay(50);
1277	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1278	udelay(50);
1279	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1280	val &= ~S_LOAD_CORE;
1281	udelay(50);
1282	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1283	udelay(50);
1284
1285	/* Initialize for memory */
1286	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1287	val |= NP_LOAD;
1288	udelay(50);
1289	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1290	udelay(50);
1291	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1292	val &= ~S_LOAD_MEM;
1293	val &= ~S_CLOCK;
1294	udelay(50);
1295	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1296	udelay(50);
1297
1298	/* Serial program the memory clock synthesizer */
1299	bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1300	bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1301	bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1302	udelay(50);
1303
1304	/* Finish memory */
1305	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1306	val |= S_LOAD_MEM;
1307	udelay(50);
1308	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1309	udelay(50);
1310	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1311	val &= ~S_LOAD_MEM;
1312	udelay(50);
1313	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1314
1315	spin_unlock(&adapter->tpi_lock);
1316
1317	return 0;
1318}
1319
1320static inline void t1_sw_reset(struct pci_dev *pdev)
1321{
1322	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1323	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1324}
1325
1326static void remove_one(struct pci_dev *pdev)
1327{
1328	struct net_device *dev = pci_get_drvdata(pdev);
1329	struct adapter *adapter = dev->ml_priv;
1330	int i;
1331
1332	for_each_port(adapter, i) {
1333		if (test_bit(i, &adapter->registered_device_map))
1334			unregister_netdev(adapter->port[i].dev);
1335	}
1336
1337	t1_free_sw_modules(adapter);
1338	iounmap(adapter->regs);
1339
1340	while (--i >= 0) {
1341		if (adapter->port[i].dev)
1342			free_netdev(adapter->port[i].dev);
1343	}
1344
1345	pci_release_regions(pdev);
1346	pci_disable_device(pdev);
1347	t1_sw_reset(pdev);
1348}
1349
1350static struct pci_driver cxgb_pci_driver = {
1351	.name     = DRV_NAME,
1352	.id_table = t1_pci_tbl,
1353	.probe    = init_one,
1354	.remove   = remove_one,
1355};
1356
1357module_pci_driver(cxgb_pci_driver);
1358