[go: nahoru, domu]

e1000_82575.c revision f1b4d6214b04caed45f0938a1d769b0d8fe79a3b
1/*******************************************************************************
2
3  Intel(R) Gigabit Ethernet Linux driver
4  Copyright(c) 2007-2013 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28/* e1000_82575
29 * e1000_82576
30 */
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/types.h>
35#include <linux/if_ether.h>
36#include <linux/i2c.h>
37
38#include "e1000_mac.h"
39#include "e1000_82575.h"
40#include "e1000_i210.h"
41
42static s32  igb_get_invariants_82575(struct e1000_hw *);
43static s32  igb_acquire_phy_82575(struct e1000_hw *);
44static void igb_release_phy_82575(struct e1000_hw *);
45static s32  igb_acquire_nvm_82575(struct e1000_hw *);
46static void igb_release_nvm_82575(struct e1000_hw *);
47static s32  igb_check_for_link_82575(struct e1000_hw *);
48static s32  igb_get_cfg_done_82575(struct e1000_hw *);
49static s32  igb_init_hw_82575(struct e1000_hw *);
50static s32  igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
51static s32  igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
52static s32  igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
53static s32  igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
54static s32  igb_reset_hw_82575(struct e1000_hw *);
55static s32  igb_reset_hw_82580(struct e1000_hw *);
56static s32  igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
57static s32  igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
58static s32  igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
59static s32  igb_setup_copper_link_82575(struct e1000_hw *);
60static s32  igb_setup_serdes_link_82575(struct e1000_hw *);
61static s32  igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
62static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
63static s32  igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
64static s32  igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
65						 u16 *);
66static s32  igb_get_phy_id_82575(struct e1000_hw *);
67static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
68static bool igb_sgmii_active_82575(struct e1000_hw *);
69static s32  igb_reset_init_script_82575(struct e1000_hw *);
70static s32  igb_read_mac_addr_82575(struct e1000_hw *);
71static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
72static s32  igb_reset_mdicnfg_82580(struct e1000_hw *hw);
73static s32  igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
74static s32  igb_update_nvm_checksum_82580(struct e1000_hw *hw);
75static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
76static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
77static const u16 e1000_82580_rxpbs_table[] =
78	{ 36, 72, 144, 1, 2, 4, 8, 16,
79	  35, 70, 140 };
80#define E1000_82580_RXPBS_TABLE_SIZE \
81	(sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
82
83/**
84 *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
85 *  @hw: pointer to the HW structure
86 *
87 *  Called to determine if the I2C pins are being used for I2C or as an
88 *  external MDIO interface since the two options are mutually exclusive.
89 **/
90static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
91{
92	u32 reg = 0;
93	bool ext_mdio = false;
94
95	switch (hw->mac.type) {
96	case e1000_82575:
97	case e1000_82576:
98		reg = rd32(E1000_MDIC);
99		ext_mdio = !!(reg & E1000_MDIC_DEST);
100		break;
101	case e1000_82580:
102	case e1000_i350:
103	case e1000_i354:
104	case e1000_i210:
105	case e1000_i211:
106		reg = rd32(E1000_MDICNFG);
107		ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
108		break;
109	default:
110		break;
111	}
112	return ext_mdio;
113}
114
115/**
116 *  igb_init_phy_params_82575 - Init PHY func ptrs.
117 *  @hw: pointer to the HW structure
118 **/
119static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
120{
121	struct e1000_phy_info *phy = &hw->phy;
122	s32 ret_val = 0;
123	u32 ctrl_ext;
124
125	if (hw->phy.media_type != e1000_media_type_copper) {
126		phy->type = e1000_phy_none;
127		goto out;
128	}
129
130	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
131	phy->reset_delay_us	= 100;
132
133	ctrl_ext = rd32(E1000_CTRL_EXT);
134
135	if (igb_sgmii_active_82575(hw)) {
136		phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
137		ctrl_ext |= E1000_CTRL_I2C_ENA;
138	} else {
139		phy->ops.reset = igb_phy_hw_reset;
140		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
141	}
142
143	wr32(E1000_CTRL_EXT, ctrl_ext);
144	igb_reset_mdicnfg_82580(hw);
145
146	if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
147		phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
148		phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
149	} else {
150		switch (hw->mac.type) {
151		case e1000_82580:
152		case e1000_i350:
153		case e1000_i354:
154			phy->ops.read_reg = igb_read_phy_reg_82580;
155			phy->ops.write_reg = igb_write_phy_reg_82580;
156			break;
157		case e1000_i210:
158		case e1000_i211:
159			phy->ops.read_reg = igb_read_phy_reg_gs40g;
160			phy->ops.write_reg = igb_write_phy_reg_gs40g;
161			break;
162		default:
163			phy->ops.read_reg = igb_read_phy_reg_igp;
164			phy->ops.write_reg = igb_write_phy_reg_igp;
165		}
166	}
167
168	/* set lan id */
169	hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
170			E1000_STATUS_FUNC_SHIFT;
171
172	/* Set phy->phy_addr and phy->id. */
173	ret_val = igb_get_phy_id_82575(hw);
174	if (ret_val)
175		return ret_val;
176
177	/* Verify phy id and set remaining function pointers */
178	switch (phy->id) {
179	case M88E1543_E_PHY_ID:
180	case I347AT4_E_PHY_ID:
181	case M88E1112_E_PHY_ID:
182	case M88E1111_I_PHY_ID:
183		phy->type		= e1000_phy_m88;
184		phy->ops.check_polarity	= igb_check_polarity_m88;
185		phy->ops.get_phy_info	= igb_get_phy_info_m88;
186		if (phy->id != M88E1111_I_PHY_ID)
187			phy->ops.get_cable_length =
188					 igb_get_cable_length_m88_gen2;
189		else
190			phy->ops.get_cable_length = igb_get_cable_length_m88;
191		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
192		break;
193	case IGP03E1000_E_PHY_ID:
194		phy->type = e1000_phy_igp_3;
195		phy->ops.get_phy_info = igb_get_phy_info_igp;
196		phy->ops.get_cable_length = igb_get_cable_length_igp_2;
197		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
198		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
199		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
200		break;
201	case I82580_I_PHY_ID:
202	case I350_I_PHY_ID:
203		phy->type = e1000_phy_82580;
204		phy->ops.force_speed_duplex =
205					 igb_phy_force_speed_duplex_82580;
206		phy->ops.get_cable_length = igb_get_cable_length_82580;
207		phy->ops.get_phy_info = igb_get_phy_info_82580;
208		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
209		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
210		break;
211	case I210_I_PHY_ID:
212		phy->type		= e1000_phy_i210;
213		phy->ops.check_polarity	= igb_check_polarity_m88;
214		phy->ops.get_phy_info	= igb_get_phy_info_m88;
215		phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
216		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
217		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
218		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
219		break;
220	default:
221		ret_val = -E1000_ERR_PHY;
222		goto out;
223	}
224
225out:
226	return ret_val;
227}
228
229/**
230 *  igb_init_nvm_params_82575 - Init NVM func ptrs.
231 *  @hw: pointer to the HW structure
232 **/
233static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
234{
235	struct e1000_nvm_info *nvm = &hw->nvm;
236	u32 eecd = rd32(E1000_EECD);
237	u16 size;
238
239	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
240		     E1000_EECD_SIZE_EX_SHIFT);
241
242	/* Added to a constant, "size" becomes the left-shift value
243	 * for setting word_size.
244	 */
245	size += NVM_WORD_SIZE_BASE_SHIFT;
246
247	/* Just in case size is out of range, cap it to the largest
248	 * EEPROM size supported
249	 */
250	if (size > 15)
251		size = 15;
252
253	nvm->word_size = 1 << size;
254	nvm->opcode_bits = 8;
255	nvm->delay_usec = 1;
256
257	switch (nvm->override) {
258	case e1000_nvm_override_spi_large:
259		nvm->page_size = 32;
260		nvm->address_bits = 16;
261		break;
262	case e1000_nvm_override_spi_small:
263		nvm->page_size = 8;
264		nvm->address_bits = 8;
265		break;
266	default:
267		nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
268		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
269				    16 : 8;
270		break;
271	}
272	if (nvm->word_size == (1 << 15))
273		nvm->page_size = 128;
274
275	nvm->type = e1000_nvm_eeprom_spi;
276
277	/* NVM Function Pointers */
278	nvm->ops.acquire = igb_acquire_nvm_82575;
279	nvm->ops.release = igb_release_nvm_82575;
280	nvm->ops.write = igb_write_nvm_spi;
281	nvm->ops.validate = igb_validate_nvm_checksum;
282	nvm->ops.update = igb_update_nvm_checksum;
283	if (nvm->word_size < (1 << 15))
284		nvm->ops.read = igb_read_nvm_eerd;
285	else
286		nvm->ops.read = igb_read_nvm_spi;
287
288	/* override generic family function pointers for specific descendants */
289	switch (hw->mac.type) {
290	case e1000_82580:
291		nvm->ops.validate = igb_validate_nvm_checksum_82580;
292		nvm->ops.update = igb_update_nvm_checksum_82580;
293		break;
294	case e1000_i354:
295	case e1000_i350:
296		nvm->ops.validate = igb_validate_nvm_checksum_i350;
297		nvm->ops.update = igb_update_nvm_checksum_i350;
298		break;
299	default:
300		break;
301	}
302
303	return 0;
304}
305
306/**
307 *  igb_init_mac_params_82575 - Init MAC func ptrs.
308 *  @hw: pointer to the HW structure
309 **/
310static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
311{
312	struct e1000_mac_info *mac = &hw->mac;
313	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
314
315	/* Set mta register count */
316	mac->mta_reg_count = 128;
317	/* Set rar entry count */
318	switch (mac->type) {
319	case e1000_82576:
320		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
321		break;
322	case e1000_82580:
323		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
324		break;
325	case e1000_i350:
326	case e1000_i354:
327		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
328		break;
329	default:
330		mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
331		break;
332	}
333	/* reset */
334	if (mac->type >= e1000_82580)
335		mac->ops.reset_hw = igb_reset_hw_82580;
336	else
337		mac->ops.reset_hw = igb_reset_hw_82575;
338
339	if (mac->type >= e1000_i210) {
340		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
341		mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
342
343	} else {
344		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
345		mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
346	}
347
348	/* Set if part includes ASF firmware */
349	mac->asf_firmware_present = true;
350	/* Set if manageability features are enabled. */
351	mac->arc_subsystem_valid =
352		(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
353			? true : false;
354	/* enable EEE on i350 parts and later parts */
355	if (mac->type >= e1000_i350)
356		dev_spec->eee_disable = false;
357	else
358		dev_spec->eee_disable = true;
359	/* Allow a single clear of the SW semaphore on I210 and newer */
360	if (mac->type >= e1000_i210)
361		dev_spec->clear_semaphore_once = true;
362	/* physical interface link setup */
363	mac->ops.setup_physical_interface =
364		(hw->phy.media_type == e1000_media_type_copper)
365			? igb_setup_copper_link_82575
366			: igb_setup_serdes_link_82575;
367
368	return 0;
369}
370
371/**
372 *  igb_set_sfp_media_type_82575 - derives SFP module media type.
373 *  @hw: pointer to the HW structure
374 *
375 *  The media type is chosen based on SFP module.
376 *  compatibility flags retrieved from SFP ID EEPROM.
377 **/
378static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
379{
380	s32 ret_val = E1000_ERR_CONFIG;
381	u32 ctrl_ext = 0;
382	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
383	struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
384	u8 tranceiver_type = 0;
385	s32 timeout = 3;
386
387	/* Turn I2C interface ON and power on sfp cage */
388	ctrl_ext = rd32(E1000_CTRL_EXT);
389	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
390	wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
391
392	wrfl();
393
394	/* Read SFP module data */
395	while (timeout) {
396		ret_val = igb_read_sfp_data_byte(hw,
397			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
398			&tranceiver_type);
399		if (ret_val == 0)
400			break;
401		msleep(100);
402		timeout--;
403	}
404	if (ret_val != 0)
405		goto out;
406
407	ret_val = igb_read_sfp_data_byte(hw,
408			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
409			(u8 *)eth_flags);
410	if (ret_val != 0)
411		goto out;
412
413	/* Check if there is some SFP module plugged and powered */
414	if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
415	    (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
416		dev_spec->module_plugged = true;
417		if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
418			hw->phy.media_type = e1000_media_type_internal_serdes;
419		} else if (eth_flags->e100_base_fx) {
420			dev_spec->sgmii_active = true;
421			hw->phy.media_type = e1000_media_type_internal_serdes;
422		} else if (eth_flags->e1000_base_t) {
423			dev_spec->sgmii_active = true;
424			hw->phy.media_type = e1000_media_type_copper;
425		} else {
426			hw->phy.media_type = e1000_media_type_unknown;
427			hw_dbg("PHY module has not been recognized\n");
428			goto out;
429		}
430	} else {
431		hw->phy.media_type = e1000_media_type_unknown;
432	}
433	ret_val = 0;
434out:
435	/* Restore I2C interface setting */
436	wr32(E1000_CTRL_EXT, ctrl_ext);
437	return ret_val;
438}
439
440static s32 igb_get_invariants_82575(struct e1000_hw *hw)
441{
442	struct e1000_mac_info *mac = &hw->mac;
443	struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
444	s32 ret_val;
445	u32 ctrl_ext = 0;
446	u32 link_mode = 0;
447
448	switch (hw->device_id) {
449	case E1000_DEV_ID_82575EB_COPPER:
450	case E1000_DEV_ID_82575EB_FIBER_SERDES:
451	case E1000_DEV_ID_82575GB_QUAD_COPPER:
452		mac->type = e1000_82575;
453		break;
454	case E1000_DEV_ID_82576:
455	case E1000_DEV_ID_82576_NS:
456	case E1000_DEV_ID_82576_NS_SERDES:
457	case E1000_DEV_ID_82576_FIBER:
458	case E1000_DEV_ID_82576_SERDES:
459	case E1000_DEV_ID_82576_QUAD_COPPER:
460	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
461	case E1000_DEV_ID_82576_SERDES_QUAD:
462		mac->type = e1000_82576;
463		break;
464	case E1000_DEV_ID_82580_COPPER:
465	case E1000_DEV_ID_82580_FIBER:
466	case E1000_DEV_ID_82580_QUAD_FIBER:
467	case E1000_DEV_ID_82580_SERDES:
468	case E1000_DEV_ID_82580_SGMII:
469	case E1000_DEV_ID_82580_COPPER_DUAL:
470	case E1000_DEV_ID_DH89XXCC_SGMII:
471	case E1000_DEV_ID_DH89XXCC_SERDES:
472	case E1000_DEV_ID_DH89XXCC_BACKPLANE:
473	case E1000_DEV_ID_DH89XXCC_SFP:
474		mac->type = e1000_82580;
475		break;
476	case E1000_DEV_ID_I350_COPPER:
477	case E1000_DEV_ID_I350_FIBER:
478	case E1000_DEV_ID_I350_SERDES:
479	case E1000_DEV_ID_I350_SGMII:
480		mac->type = e1000_i350;
481		break;
482	case E1000_DEV_ID_I210_COPPER:
483	case E1000_DEV_ID_I210_FIBER:
484	case E1000_DEV_ID_I210_SERDES:
485	case E1000_DEV_ID_I210_SGMII:
486	case E1000_DEV_ID_I210_COPPER_FLASHLESS:
487	case E1000_DEV_ID_I210_SERDES_FLASHLESS:
488		mac->type = e1000_i210;
489		break;
490	case E1000_DEV_ID_I211_COPPER:
491		mac->type = e1000_i211;
492		break;
493	case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
494	case E1000_DEV_ID_I354_SGMII:
495	case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
496		mac->type = e1000_i354;
497		break;
498	default:
499		return -E1000_ERR_MAC_INIT;
500		break;
501	}
502
503	/* Set media type */
504	/* The 82575 uses bits 22:23 for link mode. The mode can be changed
505	 * based on the EEPROM. We cannot rely upon device ID. There
506	 * is no distinguishable difference between fiber and internal
507	 * SerDes mode on the 82575. There can be an external PHY attached
508	 * on the SGMII interface. For this, we'll set sgmii_active to true.
509	 */
510	hw->phy.media_type = e1000_media_type_copper;
511	dev_spec->sgmii_active = false;
512	dev_spec->module_plugged = false;
513
514	ctrl_ext = rd32(E1000_CTRL_EXT);
515
516	link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
517	switch (link_mode) {
518	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
519		hw->phy.media_type = e1000_media_type_internal_serdes;
520		break;
521	case E1000_CTRL_EXT_LINK_MODE_SGMII:
522		/* Get phy control interface type set (MDIO vs. I2C)*/
523		if (igb_sgmii_uses_mdio_82575(hw)) {
524			hw->phy.media_type = e1000_media_type_copper;
525			dev_spec->sgmii_active = true;
526			break;
527		}
528		/* fall through for I2C based SGMII */
529	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
530		/* read media type from SFP EEPROM */
531		ret_val = igb_set_sfp_media_type_82575(hw);
532		if ((ret_val != 0) ||
533		    (hw->phy.media_type == e1000_media_type_unknown)) {
534			/* If media type was not identified then return media
535			 * type defined by the CTRL_EXT settings.
536			 */
537			hw->phy.media_type = e1000_media_type_internal_serdes;
538
539			if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
540				hw->phy.media_type = e1000_media_type_copper;
541				dev_spec->sgmii_active = true;
542			}
543
544			break;
545		}
546
547		/* do not change link mode for 100BaseFX */
548		if (dev_spec->eth_flags.e100_base_fx)
549			break;
550
551		/* change current link mode setting */
552		ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
553
554		if (hw->phy.media_type == e1000_media_type_copper)
555			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
556		else
557			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
558
559		wr32(E1000_CTRL_EXT, ctrl_ext);
560
561		break;
562	default:
563		break;
564	}
565
566	/* mac initialization and operations */
567	ret_val = igb_init_mac_params_82575(hw);
568	if (ret_val)
569		goto out;
570
571	/* NVM initialization */
572	ret_val = igb_init_nvm_params_82575(hw);
573	switch (hw->mac.type) {
574	case e1000_i210:
575	case e1000_i211:
576		ret_val = igb_init_nvm_params_i210(hw);
577		break;
578	default:
579		break;
580	}
581
582	if (ret_val)
583		goto out;
584
585	/* if part supports SR-IOV then initialize mailbox parameters */
586	switch (mac->type) {
587	case e1000_82576:
588	case e1000_i350:
589		igb_init_mbx_params_pf(hw);
590		break;
591	default:
592		break;
593	}
594
595	/* setup PHY parameters */
596	ret_val = igb_init_phy_params_82575(hw);
597
598out:
599	return ret_val;
600}
601
602/**
603 *  igb_acquire_phy_82575 - Acquire rights to access PHY
604 *  @hw: pointer to the HW structure
605 *
606 *  Acquire access rights to the correct PHY.  This is a
607 *  function pointer entry point called by the api module.
608 **/
609static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
610{
611	u16 mask = E1000_SWFW_PHY0_SM;
612
613	if (hw->bus.func == E1000_FUNC_1)
614		mask = E1000_SWFW_PHY1_SM;
615	else if (hw->bus.func == E1000_FUNC_2)
616		mask = E1000_SWFW_PHY2_SM;
617	else if (hw->bus.func == E1000_FUNC_3)
618		mask = E1000_SWFW_PHY3_SM;
619
620	return hw->mac.ops.acquire_swfw_sync(hw, mask);
621}
622
623/**
624 *  igb_release_phy_82575 - Release rights to access PHY
625 *  @hw: pointer to the HW structure
626 *
627 *  A wrapper to release access rights to the correct PHY.  This is a
628 *  function pointer entry point called by the api module.
629 **/
630static void igb_release_phy_82575(struct e1000_hw *hw)
631{
632	u16 mask = E1000_SWFW_PHY0_SM;
633
634	if (hw->bus.func == E1000_FUNC_1)
635		mask = E1000_SWFW_PHY1_SM;
636	else if (hw->bus.func == E1000_FUNC_2)
637		mask = E1000_SWFW_PHY2_SM;
638	else if (hw->bus.func == E1000_FUNC_3)
639		mask = E1000_SWFW_PHY3_SM;
640
641	hw->mac.ops.release_swfw_sync(hw, mask);
642}
643
644/**
645 *  igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
646 *  @hw: pointer to the HW structure
647 *  @offset: register offset to be read
648 *  @data: pointer to the read data
649 *
650 *  Reads the PHY register at offset using the serial gigabit media independent
651 *  interface and stores the retrieved information in data.
652 **/
653static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
654					  u16 *data)
655{
656	s32 ret_val = -E1000_ERR_PARAM;
657
658	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
659		hw_dbg("PHY Address %u is out of range\n", offset);
660		goto out;
661	}
662
663	ret_val = hw->phy.ops.acquire(hw);
664	if (ret_val)
665		goto out;
666
667	ret_val = igb_read_phy_reg_i2c(hw, offset, data);
668
669	hw->phy.ops.release(hw);
670
671out:
672	return ret_val;
673}
674
675/**
676 *  igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
677 *  @hw: pointer to the HW structure
678 *  @offset: register offset to write to
679 *  @data: data to write at register offset
680 *
681 *  Writes the data to PHY register at the offset using the serial gigabit
682 *  media independent interface.
683 **/
684static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
685					   u16 data)
686{
687	s32 ret_val = -E1000_ERR_PARAM;
688
689
690	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
691		hw_dbg("PHY Address %d is out of range\n", offset);
692		goto out;
693	}
694
695	ret_val = hw->phy.ops.acquire(hw);
696	if (ret_val)
697		goto out;
698
699	ret_val = igb_write_phy_reg_i2c(hw, offset, data);
700
701	hw->phy.ops.release(hw);
702
703out:
704	return ret_val;
705}
706
707/**
708 *  igb_get_phy_id_82575 - Retrieve PHY addr and id
709 *  @hw: pointer to the HW structure
710 *
711 *  Retrieves the PHY address and ID for both PHY's which do and do not use
712 *  sgmi interface.
713 **/
714static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
715{
716	struct e1000_phy_info *phy = &hw->phy;
717	s32  ret_val = 0;
718	u16 phy_id;
719	u32 ctrl_ext;
720	u32 mdic;
721
722	/* For SGMII PHYs, we try the list of possible addresses until
723	 * we find one that works.  For non-SGMII PHYs
724	 * (e.g. integrated copper PHYs), an address of 1 should
725	 * work.  The result of this function should mean phy->phy_addr
726	 * and phy->id are set correctly.
727	 */
728	if (!(igb_sgmii_active_82575(hw))) {
729		phy->addr = 1;
730		ret_val = igb_get_phy_id(hw);
731		goto out;
732	}
733
734	if (igb_sgmii_uses_mdio_82575(hw)) {
735		switch (hw->mac.type) {
736		case e1000_82575:
737		case e1000_82576:
738			mdic = rd32(E1000_MDIC);
739			mdic &= E1000_MDIC_PHY_MASK;
740			phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
741			break;
742		case e1000_82580:
743		case e1000_i350:
744		case e1000_i354:
745		case e1000_i210:
746		case e1000_i211:
747			mdic = rd32(E1000_MDICNFG);
748			mdic &= E1000_MDICNFG_PHY_MASK;
749			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
750			break;
751		default:
752			ret_val = -E1000_ERR_PHY;
753			goto out;
754			break;
755		}
756		ret_val = igb_get_phy_id(hw);
757		goto out;
758	}
759
760	/* Power on sgmii phy if it is disabled */
761	ctrl_ext = rd32(E1000_CTRL_EXT);
762	wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
763	wrfl();
764	msleep(300);
765
766	/* The address field in the I2CCMD register is 3 bits and 0 is invalid.
767	 * Therefore, we need to test 1-7
768	 */
769	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
770		ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
771		if (ret_val == 0) {
772			hw_dbg("Vendor ID 0x%08X read at address %u\n",
773			       phy_id, phy->addr);
774			/* At the time of this writing, The M88 part is
775			 * the only supported SGMII PHY product.
776			 */
777			if (phy_id == M88_VENDOR)
778				break;
779		} else {
780			hw_dbg("PHY address %u was unreadable\n", phy->addr);
781		}
782	}
783
784	/* A valid PHY type couldn't be found. */
785	if (phy->addr == 8) {
786		phy->addr = 0;
787		ret_val = -E1000_ERR_PHY;
788		goto out;
789	} else {
790		ret_val = igb_get_phy_id(hw);
791	}
792
793	/* restore previous sfp cage power state */
794	wr32(E1000_CTRL_EXT, ctrl_ext);
795
796out:
797	return ret_val;
798}
799
800/**
801 *  igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
802 *  @hw: pointer to the HW structure
803 *
804 *  Resets the PHY using the serial gigabit media independent interface.
805 **/
806static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
807{
808	s32 ret_val;
809
810	/* This isn't a true "hard" reset, but is the only reset
811	 * available to us at this time.
812	 */
813
814	hw_dbg("Soft resetting SGMII attached PHY...\n");
815
816	/* SFP documentation requires the following to configure the SPF module
817	 * to work on SGMII.  No further documentation is given.
818	 */
819	ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
820	if (ret_val)
821		goto out;
822
823	ret_val = igb_phy_sw_reset(hw);
824
825out:
826	return ret_val;
827}
828
829/**
830 *  igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
831 *  @hw: pointer to the HW structure
832 *  @active: true to enable LPLU, false to disable
833 *
834 *  Sets the LPLU D0 state according to the active flag.  When
835 *  activating LPLU this function also disables smart speed
836 *  and vice versa.  LPLU will not be activated unless the
837 *  device autonegotiation advertisement meets standards of
838 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
839 *  This is a function pointer entry point only called by
840 *  PHY setup routines.
841 **/
842static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
843{
844	struct e1000_phy_info *phy = &hw->phy;
845	s32 ret_val;
846	u16 data;
847
848	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
849	if (ret_val)
850		goto out;
851
852	if (active) {
853		data |= IGP02E1000_PM_D0_LPLU;
854		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
855						 data);
856		if (ret_val)
857			goto out;
858
859		/* When LPLU is enabled, we should disable SmartSpeed */
860		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
861						&data);
862		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
863		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
864						 data);
865		if (ret_val)
866			goto out;
867	} else {
868		data &= ~IGP02E1000_PM_D0_LPLU;
869		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
870						 data);
871		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
872		 * during Dx states where the power conservation is most
873		 * important.  During driver activity we should enable
874		 * SmartSpeed, so performance is maintained.
875		 */
876		if (phy->smart_speed == e1000_smart_speed_on) {
877			ret_val = phy->ops.read_reg(hw,
878					IGP01E1000_PHY_PORT_CONFIG, &data);
879			if (ret_val)
880				goto out;
881
882			data |= IGP01E1000_PSCFR_SMART_SPEED;
883			ret_val = phy->ops.write_reg(hw,
884					IGP01E1000_PHY_PORT_CONFIG, data);
885			if (ret_val)
886				goto out;
887		} else if (phy->smart_speed == e1000_smart_speed_off) {
888			ret_val = phy->ops.read_reg(hw,
889					IGP01E1000_PHY_PORT_CONFIG, &data);
890			if (ret_val)
891				goto out;
892
893			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
894			ret_val = phy->ops.write_reg(hw,
895					IGP01E1000_PHY_PORT_CONFIG, data);
896			if (ret_val)
897				goto out;
898		}
899	}
900
901out:
902	return ret_val;
903}
904
905/**
906 *  igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
907 *  @hw: pointer to the HW structure
908 *  @active: true to enable LPLU, false to disable
909 *
910 *  Sets the LPLU D0 state according to the active flag.  When
911 *  activating LPLU this function also disables smart speed
912 *  and vice versa.  LPLU will not be activated unless the
913 *  device autonegotiation advertisement meets standards of
914 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
915 *  This is a function pointer entry point only called by
916 *  PHY setup routines.
917 **/
918static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
919{
920	struct e1000_phy_info *phy = &hw->phy;
921	s32 ret_val = 0;
922	u16 data;
923
924	data = rd32(E1000_82580_PHY_POWER_MGMT);
925
926	if (active) {
927		data |= E1000_82580_PM_D0_LPLU;
928
929		/* When LPLU is enabled, we should disable SmartSpeed */
930		data &= ~E1000_82580_PM_SPD;
931	} else {
932		data &= ~E1000_82580_PM_D0_LPLU;
933
934		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
935		 * during Dx states where the power conservation is most
936		 * important.  During driver activity we should enable
937		 * SmartSpeed, so performance is maintained.
938		 */
939		if (phy->smart_speed == e1000_smart_speed_on)
940			data |= E1000_82580_PM_SPD;
941		else if (phy->smart_speed == e1000_smart_speed_off)
942			data &= ~E1000_82580_PM_SPD; }
943
944	wr32(E1000_82580_PHY_POWER_MGMT, data);
945	return ret_val;
946}
947
948/**
949 *  igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
950 *  @hw: pointer to the HW structure
951 *  @active: boolean used to enable/disable lplu
952 *
953 *  Success returns 0, Failure returns 1
954 *
955 *  The low power link up (lplu) state is set to the power management level D3
956 *  and SmartSpeed is disabled when active is true, else clear lplu for D3
957 *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
958 *  is used during Dx states where the power conservation is most important.
959 *  During driver activity, SmartSpeed should be enabled so performance is
960 *  maintained.
961 **/
962static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
963{
964	struct e1000_phy_info *phy = &hw->phy;
965	s32 ret_val = 0;
966	u16 data;
967
968	data = rd32(E1000_82580_PHY_POWER_MGMT);
969
970	if (!active) {
971		data &= ~E1000_82580_PM_D3_LPLU;
972		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
973		 * during Dx states where the power conservation is most
974		 * important.  During driver activity we should enable
975		 * SmartSpeed, so performance is maintained.
976		 */
977		if (phy->smart_speed == e1000_smart_speed_on)
978			data |= E1000_82580_PM_SPD;
979		else if (phy->smart_speed == e1000_smart_speed_off)
980			data &= ~E1000_82580_PM_SPD;
981	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
982		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
983		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
984		data |= E1000_82580_PM_D3_LPLU;
985		/* When LPLU is enabled, we should disable SmartSpeed */
986		data &= ~E1000_82580_PM_SPD;
987	}
988
989	wr32(E1000_82580_PHY_POWER_MGMT, data);
990	return ret_val;
991}
992
993/**
994 *  igb_acquire_nvm_82575 - Request for access to EEPROM
995 *  @hw: pointer to the HW structure
996 *
997 *  Acquire the necessary semaphores for exclusive access to the EEPROM.
998 *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
999 *  Return successful if access grant bit set, else clear the request for
1000 *  EEPROM access and return -E1000_ERR_NVM (-1).
1001 **/
1002static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
1003{
1004	s32 ret_val;
1005
1006	ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
1007	if (ret_val)
1008		goto out;
1009
1010	ret_val = igb_acquire_nvm(hw);
1011
1012	if (ret_val)
1013		hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1014
1015out:
1016	return ret_val;
1017}
1018
1019/**
1020 *  igb_release_nvm_82575 - Release exclusive access to EEPROM
1021 *  @hw: pointer to the HW structure
1022 *
1023 *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
1024 *  then release the semaphores acquired.
1025 **/
1026static void igb_release_nvm_82575(struct e1000_hw *hw)
1027{
1028	igb_release_nvm(hw);
1029	hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1030}
1031
1032/**
1033 *  igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
1034 *  @hw: pointer to the HW structure
1035 *  @mask: specifies which semaphore to acquire
1036 *
1037 *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
1038 *  will also specify which port we're acquiring the lock for.
1039 **/
1040static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1041{
1042	u32 swfw_sync;
1043	u32 swmask = mask;
1044	u32 fwmask = mask << 16;
1045	s32 ret_val = 0;
1046	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
1047
1048	while (i < timeout) {
1049		if (igb_get_hw_semaphore(hw)) {
1050			ret_val = -E1000_ERR_SWFW_SYNC;
1051			goto out;
1052		}
1053
1054		swfw_sync = rd32(E1000_SW_FW_SYNC);
1055		if (!(swfw_sync & (fwmask | swmask)))
1056			break;
1057
1058		/* Firmware currently using resource (fwmask)
1059		 * or other software thread using resource (swmask)
1060		 */
1061		igb_put_hw_semaphore(hw);
1062		mdelay(5);
1063		i++;
1064	}
1065
1066	if (i == timeout) {
1067		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
1068		ret_val = -E1000_ERR_SWFW_SYNC;
1069		goto out;
1070	}
1071
1072	swfw_sync |= swmask;
1073	wr32(E1000_SW_FW_SYNC, swfw_sync);
1074
1075	igb_put_hw_semaphore(hw);
1076
1077out:
1078	return ret_val;
1079}
1080
1081/**
1082 *  igb_release_swfw_sync_82575 - Release SW/FW semaphore
1083 *  @hw: pointer to the HW structure
1084 *  @mask: specifies which semaphore to acquire
1085 *
1086 *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
1087 *  will also specify which port we're releasing the lock for.
1088 **/
1089static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1090{
1091	u32 swfw_sync;
1092
1093	while (igb_get_hw_semaphore(hw) != 0);
1094	/* Empty */
1095
1096	swfw_sync = rd32(E1000_SW_FW_SYNC);
1097	swfw_sync &= ~mask;
1098	wr32(E1000_SW_FW_SYNC, swfw_sync);
1099
1100	igb_put_hw_semaphore(hw);
1101}
1102
1103/**
1104 *  igb_get_cfg_done_82575 - Read config done bit
1105 *  @hw: pointer to the HW structure
1106 *
1107 *  Read the management control register for the config done bit for
1108 *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
1109 *  to read the config done bit, so an error is *ONLY* logged and returns
1110 *  0.  If we were to return with error, EEPROM-less silicon
1111 *  would not be able to be reset or change link.
1112 **/
1113static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1114{
1115	s32 timeout = PHY_CFG_TIMEOUT;
1116	s32 ret_val = 0;
1117	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1118
1119	if (hw->bus.func == 1)
1120		mask = E1000_NVM_CFG_DONE_PORT_1;
1121	else if (hw->bus.func == E1000_FUNC_2)
1122		mask = E1000_NVM_CFG_DONE_PORT_2;
1123	else if (hw->bus.func == E1000_FUNC_3)
1124		mask = E1000_NVM_CFG_DONE_PORT_3;
1125
1126	while (timeout) {
1127		if (rd32(E1000_EEMNGCTL) & mask)
1128			break;
1129		msleep(1);
1130		timeout--;
1131	}
1132	if (!timeout)
1133		hw_dbg("MNG configuration cycle has not completed.\n");
1134
1135	/* If EEPROM is not marked present, init the PHY manually */
1136	if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
1137	    (hw->phy.type == e1000_phy_igp_3))
1138		igb_phy_init_script_igp3(hw);
1139
1140	return ret_val;
1141}
1142
1143/**
1144 *  igb_check_for_link_82575 - Check for link
1145 *  @hw: pointer to the HW structure
1146 *
1147 *  If sgmii is enabled, then use the pcs register to determine link, otherwise
1148 *  use the generic interface for determining link.
1149 **/
1150static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1151{
1152	s32 ret_val;
1153	u16 speed, duplex;
1154
1155	if (hw->phy.media_type != e1000_media_type_copper) {
1156		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1157		                                             &duplex);
1158		/* Use this flag to determine if link needs to be checked or
1159		 * not.  If  we have link clear the flag so that we do not
1160		 * continue to check for link.
1161		 */
1162		hw->mac.get_link_status = !hw->mac.serdes_has_link;
1163
1164		/* Configure Flow Control now that Auto-Neg has completed.
1165		 * First, we need to restore the desired flow control
1166		 * settings because we may have had to re-autoneg with a
1167		 * different link partner.
1168		 */
1169		ret_val = igb_config_fc_after_link_up(hw);
1170		if (ret_val)
1171			hw_dbg("Error configuring flow control\n");
1172	} else {
1173		ret_val = igb_check_for_copper_link(hw);
1174	}
1175
1176	return ret_val;
1177}
1178
1179/**
1180 *  igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1181 *  @hw: pointer to the HW structure
1182 **/
1183void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1184{
1185	u32 reg;
1186
1187
1188	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1189	    !igb_sgmii_active_82575(hw))
1190		return;
1191
1192	/* Enable PCS to turn on link */
1193	reg = rd32(E1000_PCS_CFG0);
1194	reg |= E1000_PCS_CFG_PCS_EN;
1195	wr32(E1000_PCS_CFG0, reg);
1196
1197	/* Power up the laser */
1198	reg = rd32(E1000_CTRL_EXT);
1199	reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1200	wr32(E1000_CTRL_EXT, reg);
1201
1202	/* flush the write to verify completion */
1203	wrfl();
1204	msleep(1);
1205}
1206
1207/**
1208 *  igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1209 *  @hw: pointer to the HW structure
1210 *  @speed: stores the current speed
1211 *  @duplex: stores the current duplex
1212 *
1213 *  Using the physical coding sub-layer (PCS), retrieve the current speed and
1214 *  duplex, then store the values in the pointers provided.
1215 **/
1216static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1217						u16 *duplex)
1218{
1219	struct e1000_mac_info *mac = &hw->mac;
1220	u32 pcs, status;
1221
1222	/* Set up defaults for the return values of this function */
1223	mac->serdes_has_link = false;
1224	*speed = 0;
1225	*duplex = 0;
1226
1227	/* Read the PCS Status register for link state. For non-copper mode,
1228	 * the status register is not accurate. The PCS status register is
1229	 * used instead.
1230	 */
1231	pcs = rd32(E1000_PCS_LSTAT);
1232
1233	/* The link up bit determines when link is up on autoneg. The sync ok
1234	 * gets set once both sides sync up and agree upon link. Stable link
1235	 * can be determined by checking for both link up and link sync ok
1236	 */
1237	if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1238		mac->serdes_has_link = true;
1239
1240		/* Detect and store PCS speed */
1241		if (pcs & E1000_PCS_LSTS_SPEED_1000)
1242			*speed = SPEED_1000;
1243		else if (pcs & E1000_PCS_LSTS_SPEED_100)
1244			*speed = SPEED_100;
1245		else
1246			*speed = SPEED_10;
1247
1248		/* Detect and store PCS duplex */
1249		if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
1250			*duplex = FULL_DUPLEX;
1251		else
1252			*duplex = HALF_DUPLEX;
1253
1254	/* Check if it is an I354 2.5Gb backplane connection. */
1255		if (mac->type == e1000_i354) {
1256			status = rd32(E1000_STATUS);
1257			if ((status & E1000_STATUS_2P5_SKU) &&
1258			    !(status & E1000_STATUS_2P5_SKU_OVER)) {
1259				*speed = SPEED_2500;
1260				*duplex = FULL_DUPLEX;
1261				hw_dbg("2500 Mbs, ");
1262				hw_dbg("Full Duplex\n");
1263			}
1264		}
1265
1266	}
1267
1268	return 0;
1269}
1270
1271/**
1272 *  igb_shutdown_serdes_link_82575 - Remove link during power down
1273 *  @hw: pointer to the HW structure
1274 *
1275 *  In the case of fiber serdes, shut down optics and PCS on driver unload
1276 *  when management pass thru is not enabled.
1277 **/
1278void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1279{
1280	u32 reg;
1281
1282	if (hw->phy.media_type != e1000_media_type_internal_serdes &&
1283	    igb_sgmii_active_82575(hw))
1284		return;
1285
1286	if (!igb_enable_mng_pass_thru(hw)) {
1287		/* Disable PCS to turn off link */
1288		reg = rd32(E1000_PCS_CFG0);
1289		reg &= ~E1000_PCS_CFG_PCS_EN;
1290		wr32(E1000_PCS_CFG0, reg);
1291
1292		/* shutdown the laser */
1293		reg = rd32(E1000_CTRL_EXT);
1294		reg |= E1000_CTRL_EXT_SDP3_DATA;
1295		wr32(E1000_CTRL_EXT, reg);
1296
1297		/* flush the write to verify completion */
1298		wrfl();
1299		msleep(1);
1300	}
1301}
1302
1303/**
1304 *  igb_reset_hw_82575 - Reset hardware
1305 *  @hw: pointer to the HW structure
1306 *
1307 *  This resets the hardware into a known state.  This is a
1308 *  function pointer entry point called by the api module.
1309 **/
1310static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1311{
1312	u32 ctrl;
1313	s32 ret_val;
1314
1315	/* Prevent the PCI-E bus from sticking if there is no TLP connection
1316	 * on the last TLP read/write transaction when MAC is reset.
1317	 */
1318	ret_val = igb_disable_pcie_master(hw);
1319	if (ret_val)
1320		hw_dbg("PCI-E Master disable polling has failed.\n");
1321
1322	/* set the completion timeout for interface */
1323	ret_val = igb_set_pcie_completion_timeout(hw);
1324	if (ret_val) {
1325		hw_dbg("PCI-E Set completion timeout has failed.\n");
1326	}
1327
1328	hw_dbg("Masking off all interrupts\n");
1329	wr32(E1000_IMC, 0xffffffff);
1330
1331	wr32(E1000_RCTL, 0);
1332	wr32(E1000_TCTL, E1000_TCTL_PSP);
1333	wrfl();
1334
1335	msleep(10);
1336
1337	ctrl = rd32(E1000_CTRL);
1338
1339	hw_dbg("Issuing a global reset to MAC\n");
1340	wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1341
1342	ret_val = igb_get_auto_rd_done(hw);
1343	if (ret_val) {
1344		/* When auto config read does not complete, do not
1345		 * return with an error. This can happen in situations
1346		 * where there is no eeprom and prevents getting link.
1347		 */
1348		hw_dbg("Auto Read Done did not complete\n");
1349	}
1350
1351	/* If EEPROM is not present, run manual init scripts */
1352	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1353		igb_reset_init_script_82575(hw);
1354
1355	/* Clear any pending interrupt events. */
1356	wr32(E1000_IMC, 0xffffffff);
1357	rd32(E1000_ICR);
1358
1359	/* Install any alternate MAC address into RAR0 */
1360	ret_val = igb_check_alt_mac_addr(hw);
1361
1362	return ret_val;
1363}
1364
1365/**
1366 *  igb_init_hw_82575 - Initialize hardware
1367 *  @hw: pointer to the HW structure
1368 *
1369 *  This inits the hardware readying it for operation.
1370 **/
1371static s32 igb_init_hw_82575(struct e1000_hw *hw)
1372{
1373	struct e1000_mac_info *mac = &hw->mac;
1374	s32 ret_val;
1375	u16 i, rar_count = mac->rar_entry_count;
1376
1377	/* Initialize identification LED */
1378	ret_val = igb_id_led_init(hw);
1379	if (ret_val) {
1380		hw_dbg("Error initializing identification LED\n");
1381		/* This is not fatal and we should not stop init due to this */
1382	}
1383
1384	/* Disabling VLAN filtering */
1385	hw_dbg("Initializing the IEEE VLAN\n");
1386	if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
1387		igb_clear_vfta_i350(hw);
1388	else
1389		igb_clear_vfta(hw);
1390
1391	/* Setup the receive address */
1392	igb_init_rx_addrs(hw, rar_count);
1393
1394	/* Zero out the Multicast HASH table */
1395	hw_dbg("Zeroing the MTA\n");
1396	for (i = 0; i < mac->mta_reg_count; i++)
1397		array_wr32(E1000_MTA, i, 0);
1398
1399	/* Zero out the Unicast HASH table */
1400	hw_dbg("Zeroing the UTA\n");
1401	for (i = 0; i < mac->uta_reg_count; i++)
1402		array_wr32(E1000_UTA, i, 0);
1403
1404	/* Setup link and flow control */
1405	ret_val = igb_setup_link(hw);
1406
1407	/* Clear all of the statistics registers (clear on read).  It is
1408	 * important that we do this after we have tried to establish link
1409	 * because the symbol error count will increment wildly if there
1410	 * is no link.
1411	 */
1412	igb_clear_hw_cntrs_82575(hw);
1413	return ret_val;
1414}
1415
1416/**
1417 *  igb_setup_copper_link_82575 - Configure copper link settings
1418 *  @hw: pointer to the HW structure
1419 *
1420 *  Configures the link for auto-neg or forced speed and duplex.  Then we check
1421 *  for link, once link is established calls to configure collision distance
1422 *  and flow control are called.
1423 **/
1424static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1425{
1426	u32 ctrl;
1427	s32  ret_val;
1428	u32 phpm_reg;
1429
1430	ctrl = rd32(E1000_CTRL);
1431	ctrl |= E1000_CTRL_SLU;
1432	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1433	wr32(E1000_CTRL, ctrl);
1434
1435	/* Clear Go Link Disconnect bit on supported devices */
1436	switch (hw->mac.type) {
1437	case e1000_82580:
1438	case e1000_i350:
1439	case e1000_i210:
1440	case e1000_i211:
1441		phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1442		phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1443		wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1444		break;
1445	default:
1446		break;
1447	}
1448
1449	ret_val = igb_setup_serdes_link_82575(hw);
1450	if (ret_val)
1451		goto out;
1452
1453	if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1454		/* allow time for SFP cage time to power up phy */
1455		msleep(300);
1456
1457		ret_val = hw->phy.ops.reset(hw);
1458		if (ret_val) {
1459			hw_dbg("Error resetting the PHY.\n");
1460			goto out;
1461		}
1462	}
1463	switch (hw->phy.type) {
1464	case e1000_phy_i210:
1465	case e1000_phy_m88:
1466		switch (hw->phy.id) {
1467		case I347AT4_E_PHY_ID:
1468		case M88E1112_E_PHY_ID:
1469		case M88E1543_E_PHY_ID:
1470		case I210_I_PHY_ID:
1471			ret_val = igb_copper_link_setup_m88_gen2(hw);
1472			break;
1473		default:
1474			ret_val = igb_copper_link_setup_m88(hw);
1475			break;
1476		}
1477		break;
1478	case e1000_phy_igp_3:
1479		ret_val = igb_copper_link_setup_igp(hw);
1480		break;
1481	case e1000_phy_82580:
1482		ret_val = igb_copper_link_setup_82580(hw);
1483		break;
1484	default:
1485		ret_val = -E1000_ERR_PHY;
1486		break;
1487	}
1488
1489	if (ret_val)
1490		goto out;
1491
1492	ret_val = igb_setup_copper_link(hw);
1493out:
1494	return ret_val;
1495}
1496
1497/**
1498 *  igb_setup_serdes_link_82575 - Setup link for serdes
1499 *  @hw: pointer to the HW structure
1500 *
1501 *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
1502 *  used on copper connections where the serialized gigabit media independent
1503 *  interface (sgmii), or serdes fiber is being used.  Configures the link
1504 *  for auto-negotiation or forces speed/duplex.
1505 **/
1506static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1507{
1508	u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1509	bool pcs_autoneg;
1510	s32 ret_val = E1000_SUCCESS;
1511	u16 data;
1512
1513	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1514	    !igb_sgmii_active_82575(hw))
1515		return ret_val;
1516
1517
1518	/* On the 82575, SerDes loopback mode persists until it is
1519	 * explicitly turned off or a power cycle is performed.  A read to
1520	 * the register does not indicate its status.  Therefore, we ensure
1521	 * loopback mode is disabled during initialization.
1522	 */
1523	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1524
1525	/* power on the sfp cage if present and turn on I2C */
1526	ctrl_ext = rd32(E1000_CTRL_EXT);
1527	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1528	ctrl_ext |= E1000_CTRL_I2C_ENA;
1529	wr32(E1000_CTRL_EXT, ctrl_ext);
1530
1531	ctrl_reg = rd32(E1000_CTRL);
1532	ctrl_reg |= E1000_CTRL_SLU;
1533
1534	if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1535		/* set both sw defined pins */
1536		ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1537
1538		/* Set switch control to serdes energy detect */
1539		reg = rd32(E1000_CONNSW);
1540		reg |= E1000_CONNSW_ENRGSRC;
1541		wr32(E1000_CONNSW, reg);
1542	}
1543
1544	reg = rd32(E1000_PCS_LCTL);
1545
1546	/* default pcs_autoneg to the same setting as mac autoneg */
1547	pcs_autoneg = hw->mac.autoneg;
1548
1549	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1550	case E1000_CTRL_EXT_LINK_MODE_SGMII:
1551		/* sgmii mode lets the phy handle forcing speed/duplex */
1552		pcs_autoneg = true;
1553		/* autoneg time out should be disabled for SGMII mode */
1554		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1555		break;
1556	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1557		/* disable PCS autoneg and support parallel detect only */
1558		pcs_autoneg = false;
1559	default:
1560		if (hw->mac.type == e1000_82575 ||
1561		    hw->mac.type == e1000_82576) {
1562			ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1563			if (ret_val) {
1564				printk(KERN_DEBUG "NVM Read Error\n\n");
1565				return ret_val;
1566			}
1567
1568			if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1569				pcs_autoneg = false;
1570		}
1571
1572		/* non-SGMII modes only supports a speed of 1000/Full for the
1573		 * link so it is best to just force the MAC and let the pcs
1574		 * link either autoneg or be forced to 1000/Full
1575		 */
1576		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1577		            E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1578
1579		/* set speed of 1000/Full if speed/duplex is forced */
1580		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1581		break;
1582	}
1583
1584	wr32(E1000_CTRL, ctrl_reg);
1585
1586	/* New SerDes mode allows for forcing speed or autonegotiating speed
1587	 * at 1gb. Autoneg should be default set by most drivers. This is the
1588	 * mode that will be compatible with older link partners and switches.
1589	 * However, both are supported by the hardware and some drivers/tools.
1590	 */
1591	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1592		E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1593
1594	if (pcs_autoneg) {
1595		/* Set PCS register for autoneg */
1596		reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1597		       E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1598
1599		/* Disable force flow control for autoneg */
1600		reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1601
1602		/* Configure flow control advertisement for autoneg */
1603		anadv_reg = rd32(E1000_PCS_ANADV);
1604		anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1605		switch (hw->fc.requested_mode) {
1606		case e1000_fc_full:
1607		case e1000_fc_rx_pause:
1608			anadv_reg |= E1000_TXCW_ASM_DIR;
1609			anadv_reg |= E1000_TXCW_PAUSE;
1610			break;
1611		case e1000_fc_tx_pause:
1612			anadv_reg |= E1000_TXCW_ASM_DIR;
1613			break;
1614		default:
1615			break;
1616		}
1617		wr32(E1000_PCS_ANADV, anadv_reg);
1618
1619		hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1620	} else {
1621		/* Set PCS register for forced link */
1622		reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
1623
1624		/* Force flow control for forced link */
1625		reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1626
1627		hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1628	}
1629
1630	wr32(E1000_PCS_LCTL, reg);
1631
1632	if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
1633		igb_force_mac_fc(hw);
1634
1635	return ret_val;
1636}
1637
1638/**
1639 *  igb_sgmii_active_82575 - Return sgmii state
1640 *  @hw: pointer to the HW structure
1641 *
1642 *  82575 silicon has a serialized gigabit media independent interface (sgmii)
1643 *  which can be enabled for use in the embedded applications.  Simply
1644 *  return the current state of the sgmii interface.
1645 **/
1646static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1647{
1648	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1649	return dev_spec->sgmii_active;
1650}
1651
1652/**
1653 *  igb_reset_init_script_82575 - Inits HW defaults after reset
1654 *  @hw: pointer to the HW structure
1655 *
1656 *  Inits recommended HW defaults after a reset when there is no EEPROM
1657 *  detected. This is only for the 82575.
1658 **/
1659static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1660{
1661	if (hw->mac.type == e1000_82575) {
1662		hw_dbg("Running reset init script for 82575\n");
1663		/* SerDes configuration via SERDESCTRL */
1664		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1665		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1666		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1667		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1668
1669		/* CCM configuration via CCMCTL register */
1670		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1671		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1672
1673		/* PCIe lanes configuration */
1674		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1675		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1676		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1677		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1678
1679		/* PCIe PLL Configuration */
1680		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1681		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1682		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1683	}
1684
1685	return 0;
1686}
1687
1688/**
1689 *  igb_read_mac_addr_82575 - Read device MAC address
1690 *  @hw: pointer to the HW structure
1691 **/
1692static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1693{
1694	s32 ret_val = 0;
1695
1696	/* If there's an alternate MAC address place it in RAR0
1697	 * so that it will override the Si installed default perm
1698	 * address.
1699	 */
1700	ret_val = igb_check_alt_mac_addr(hw);
1701	if (ret_val)
1702		goto out;
1703
1704	ret_val = igb_read_mac_addr(hw);
1705
1706out:
1707	return ret_val;
1708}
1709
1710/**
1711 * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1712 * @hw: pointer to the HW structure
1713 *
1714 * In the case of a PHY power down to save power, or to turn off link during a
1715 * driver unload, or wake on lan is not enabled, remove the link.
1716 **/
1717void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1718{
1719	/* If the management interface is not enabled, then power down */
1720	if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1721		igb_power_down_phy_copper(hw);
1722}
1723
1724/**
1725 *  igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
1726 *  @hw: pointer to the HW structure
1727 *
1728 *  Clears the hardware counters by reading the counter registers.
1729 **/
1730static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1731{
1732	igb_clear_hw_cntrs_base(hw);
1733
1734	rd32(E1000_PRC64);
1735	rd32(E1000_PRC127);
1736	rd32(E1000_PRC255);
1737	rd32(E1000_PRC511);
1738	rd32(E1000_PRC1023);
1739	rd32(E1000_PRC1522);
1740	rd32(E1000_PTC64);
1741	rd32(E1000_PTC127);
1742	rd32(E1000_PTC255);
1743	rd32(E1000_PTC511);
1744	rd32(E1000_PTC1023);
1745	rd32(E1000_PTC1522);
1746
1747	rd32(E1000_ALGNERRC);
1748	rd32(E1000_RXERRC);
1749	rd32(E1000_TNCRS);
1750	rd32(E1000_CEXTERR);
1751	rd32(E1000_TSCTC);
1752	rd32(E1000_TSCTFC);
1753
1754	rd32(E1000_MGTPRC);
1755	rd32(E1000_MGTPDC);
1756	rd32(E1000_MGTPTC);
1757
1758	rd32(E1000_IAC);
1759	rd32(E1000_ICRXOC);
1760
1761	rd32(E1000_ICRXPTC);
1762	rd32(E1000_ICRXATC);
1763	rd32(E1000_ICTXPTC);
1764	rd32(E1000_ICTXATC);
1765	rd32(E1000_ICTXQEC);
1766	rd32(E1000_ICTXQMTC);
1767	rd32(E1000_ICRXDMTC);
1768
1769	rd32(E1000_CBTMPC);
1770	rd32(E1000_HTDPMC);
1771	rd32(E1000_CBRMPC);
1772	rd32(E1000_RPTHC);
1773	rd32(E1000_HGPTC);
1774	rd32(E1000_HTCBDPC);
1775	rd32(E1000_HGORCL);
1776	rd32(E1000_HGORCH);
1777	rd32(E1000_HGOTCL);
1778	rd32(E1000_HGOTCH);
1779	rd32(E1000_LENERRS);
1780
1781	/* This register should not be read in copper configurations */
1782	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1783	    igb_sgmii_active_82575(hw))
1784		rd32(E1000_SCVPC);
1785}
1786
1787/**
1788 *  igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1789 *  @hw: pointer to the HW structure
1790 *
1791 *  After rx enable if managability is enabled then there is likely some
1792 *  bad data at the start of the fifo and possibly in the DMA fifo.  This
1793 *  function clears the fifos and flushes any packets that came in as rx was
1794 *  being enabled.
1795 **/
1796void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1797{
1798	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1799	int i, ms_wait;
1800
1801	if (hw->mac.type != e1000_82575 ||
1802	    !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1803		return;
1804
1805	/* Disable all RX queues */
1806	for (i = 0; i < 4; i++) {
1807		rxdctl[i] = rd32(E1000_RXDCTL(i));
1808		wr32(E1000_RXDCTL(i),
1809		     rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1810	}
1811	/* Poll all queues to verify they have shut down */
1812	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1813		msleep(1);
1814		rx_enabled = 0;
1815		for (i = 0; i < 4; i++)
1816			rx_enabled |= rd32(E1000_RXDCTL(i));
1817		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1818			break;
1819	}
1820
1821	if (ms_wait == 10)
1822		hw_dbg("Queue disable timed out after 10ms\n");
1823
1824	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1825	 * incoming packets are rejected.  Set enable and wait 2ms so that
1826	 * any packet that was coming in as RCTL.EN was set is flushed
1827	 */
1828	rfctl = rd32(E1000_RFCTL);
1829	wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1830
1831	rlpml = rd32(E1000_RLPML);
1832	wr32(E1000_RLPML, 0);
1833
1834	rctl = rd32(E1000_RCTL);
1835	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1836	temp_rctl |= E1000_RCTL_LPE;
1837
1838	wr32(E1000_RCTL, temp_rctl);
1839	wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1840	wrfl();
1841	msleep(2);
1842
1843	/* Enable RX queues that were previously enabled and restore our
1844	 * previous state
1845	 */
1846	for (i = 0; i < 4; i++)
1847		wr32(E1000_RXDCTL(i), rxdctl[i]);
1848	wr32(E1000_RCTL, rctl);
1849	wrfl();
1850
1851	wr32(E1000_RLPML, rlpml);
1852	wr32(E1000_RFCTL, rfctl);
1853
1854	/* Flush receive errors generated by workaround */
1855	rd32(E1000_ROC);
1856	rd32(E1000_RNBC);
1857	rd32(E1000_MPC);
1858}
1859
1860/**
1861 *  igb_set_pcie_completion_timeout - set pci-e completion timeout
1862 *  @hw: pointer to the HW structure
1863 *
1864 *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1865 *  however the hardware default for these parts is 500us to 1ms which is less
1866 *  than the 10ms recommended by the pci-e spec.  To address this we need to
1867 *  increase the value to either 10ms to 200ms for capability version 1 config,
1868 *  or 16ms to 55ms for version 2.
1869 **/
1870static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1871{
1872	u32 gcr = rd32(E1000_GCR);
1873	s32 ret_val = 0;
1874	u16 pcie_devctl2;
1875
1876	/* only take action if timeout value is defaulted to 0 */
1877	if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1878		goto out;
1879
1880	/* if capabilities version is type 1 we can write the
1881	 * timeout of 10ms to 200ms through the GCR register
1882	 */
1883	if (!(gcr & E1000_GCR_CAP_VER2)) {
1884		gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1885		goto out;
1886	}
1887
1888	/* for version 2 capabilities we need to write the config space
1889	 * directly in order to set the completion timeout value for
1890	 * 16ms to 55ms
1891	 */
1892	ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1893	                                &pcie_devctl2);
1894	if (ret_val)
1895		goto out;
1896
1897	pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1898
1899	ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1900	                                 &pcie_devctl2);
1901out:
1902	/* disable completion timeout resend */
1903	gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1904
1905	wr32(E1000_GCR, gcr);
1906	return ret_val;
1907}
1908
1909/**
1910 *  igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
1911 *  @hw: pointer to the hardware struct
1912 *  @enable: state to enter, either enabled or disabled
1913 *  @pf: Physical Function pool - do not set anti-spoofing for the PF
1914 *
1915 *  enables/disables L2 switch anti-spoofing functionality.
1916 **/
1917void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1918{
1919	u32 reg_val, reg_offset;
1920
1921	switch (hw->mac.type) {
1922	case e1000_82576:
1923		reg_offset = E1000_DTXSWC;
1924		break;
1925	case e1000_i350:
1926	case e1000_i354:
1927		reg_offset = E1000_TXSWC;
1928		break;
1929	default:
1930		return;
1931	}
1932
1933	reg_val = rd32(reg_offset);
1934	if (enable) {
1935		reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1936			     E1000_DTXSWC_VLAN_SPOOF_MASK);
1937		/* The PF can spoof - it has to in order to
1938		 * support emulation mode NICs
1939		 */
1940		reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1941	} else {
1942		reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1943			     E1000_DTXSWC_VLAN_SPOOF_MASK);
1944	}
1945	wr32(reg_offset, reg_val);
1946}
1947
1948/**
1949 *  igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
1950 *  @hw: pointer to the hardware struct
1951 *  @enable: state to enter, either enabled or disabled
1952 *
1953 *  enables/disables L2 switch loopback functionality.
1954 **/
1955void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1956{
1957	u32 dtxswc;
1958
1959	switch (hw->mac.type) {
1960	case e1000_82576:
1961		dtxswc = rd32(E1000_DTXSWC);
1962		if (enable)
1963			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1964		else
1965			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1966		wr32(E1000_DTXSWC, dtxswc);
1967		break;
1968	case e1000_i354:
1969	case e1000_i350:
1970		dtxswc = rd32(E1000_TXSWC);
1971		if (enable)
1972			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1973		else
1974			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1975		wr32(E1000_TXSWC, dtxswc);
1976		break;
1977	default:
1978		/* Currently no other hardware supports loopback */
1979		break;
1980	}
1981
1982}
1983
1984/**
1985 *  igb_vmdq_set_replication_pf - enable or disable vmdq replication
1986 *  @hw: pointer to the hardware struct
1987 *  @enable: state to enter, either enabled or disabled
1988 *
1989 *  enables/disables replication of packets across multiple pools.
1990 **/
1991void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1992{
1993	u32 vt_ctl = rd32(E1000_VT_CTL);
1994
1995	if (enable)
1996		vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1997	else
1998		vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1999
2000	wr32(E1000_VT_CTL, vt_ctl);
2001}
2002
2003/**
2004 *  igb_read_phy_reg_82580 - Read 82580 MDI control register
2005 *  @hw: pointer to the HW structure
2006 *  @offset: register offset to be read
2007 *  @data: pointer to the read data
2008 *
2009 *  Reads the MDI control register in the PHY at offset and stores the
2010 *  information read to data.
2011 **/
2012static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
2013{
2014	s32 ret_val;
2015
2016	ret_val = hw->phy.ops.acquire(hw);
2017	if (ret_val)
2018		goto out;
2019
2020	ret_val = igb_read_phy_reg_mdic(hw, offset, data);
2021
2022	hw->phy.ops.release(hw);
2023
2024out:
2025	return ret_val;
2026}
2027
2028/**
2029 *  igb_write_phy_reg_82580 - Write 82580 MDI control register
2030 *  @hw: pointer to the HW structure
2031 *  @offset: register offset to write to
2032 *  @data: data to write to register at offset
2033 *
2034 *  Writes data to MDI control register in the PHY at offset.
2035 **/
2036static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
2037{
2038	s32 ret_val;
2039
2040
2041	ret_val = hw->phy.ops.acquire(hw);
2042	if (ret_val)
2043		goto out;
2044
2045	ret_val = igb_write_phy_reg_mdic(hw, offset, data);
2046
2047	hw->phy.ops.release(hw);
2048
2049out:
2050	return ret_val;
2051}
2052
2053/**
2054 *  igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
2055 *  @hw: pointer to the HW structure
2056 *
2057 *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
2058 *  the values found in the EEPROM.  This addresses an issue in which these
2059 *  bits are not restored from EEPROM after reset.
2060 **/
2061static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
2062{
2063	s32 ret_val = 0;
2064	u32 mdicnfg;
2065	u16 nvm_data = 0;
2066
2067	if (hw->mac.type != e1000_82580)
2068		goto out;
2069	if (!igb_sgmii_active_82575(hw))
2070		goto out;
2071
2072	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2073				   NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2074				   &nvm_data);
2075	if (ret_val) {
2076		hw_dbg("NVM Read Error\n");
2077		goto out;
2078	}
2079
2080	mdicnfg = rd32(E1000_MDICNFG);
2081	if (nvm_data & NVM_WORD24_EXT_MDIO)
2082		mdicnfg |= E1000_MDICNFG_EXT_MDIO;
2083	if (nvm_data & NVM_WORD24_COM_MDIO)
2084		mdicnfg |= E1000_MDICNFG_COM_MDIO;
2085	wr32(E1000_MDICNFG, mdicnfg);
2086out:
2087	return ret_val;
2088}
2089
2090/**
2091 *  igb_reset_hw_82580 - Reset hardware
2092 *  @hw: pointer to the HW structure
2093 *
2094 *  This resets function or entire device (all ports, etc.)
2095 *  to a known state.
2096 **/
2097static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2098{
2099	s32 ret_val = 0;
2100	/* BH SW mailbox bit in SW_FW_SYNC */
2101	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2102	u32 ctrl;
2103	bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2104
2105	hw->dev_spec._82575.global_device_reset = false;
2106
2107	/* due to hw errata, global device reset doesn't always
2108	 * work on 82580
2109	 */
2110	if (hw->mac.type == e1000_82580)
2111		global_device_reset = false;
2112
2113	/* Get current control state. */
2114	ctrl = rd32(E1000_CTRL);
2115
2116	/* Prevent the PCI-E bus from sticking if there is no TLP connection
2117	 * on the last TLP read/write transaction when MAC is reset.
2118	 */
2119	ret_val = igb_disable_pcie_master(hw);
2120	if (ret_val)
2121		hw_dbg("PCI-E Master disable polling has failed.\n");
2122
2123	hw_dbg("Masking off all interrupts\n");
2124	wr32(E1000_IMC, 0xffffffff);
2125	wr32(E1000_RCTL, 0);
2126	wr32(E1000_TCTL, E1000_TCTL_PSP);
2127	wrfl();
2128
2129	msleep(10);
2130
2131	/* Determine whether or not a global dev reset is requested */
2132	if (global_device_reset &&
2133		hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
2134			global_device_reset = false;
2135
2136	if (global_device_reset &&
2137		!(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
2138		ctrl |= E1000_CTRL_DEV_RST;
2139	else
2140		ctrl |= E1000_CTRL_RST;
2141
2142	wr32(E1000_CTRL, ctrl);
2143	wrfl();
2144
2145	/* Add delay to insure DEV_RST has time to complete */
2146	if (global_device_reset)
2147		msleep(5);
2148
2149	ret_val = igb_get_auto_rd_done(hw);
2150	if (ret_val) {
2151		/* When auto config read does not complete, do not
2152		 * return with an error. This can happen in situations
2153		 * where there is no eeprom and prevents getting link.
2154		 */
2155		hw_dbg("Auto Read Done did not complete\n");
2156	}
2157
2158	/* clear global device reset status bit */
2159	wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
2160
2161	/* Clear any pending interrupt events. */
2162	wr32(E1000_IMC, 0xffffffff);
2163	rd32(E1000_ICR);
2164
2165	ret_val = igb_reset_mdicnfg_82580(hw);
2166	if (ret_val)
2167		hw_dbg("Could not reset MDICNFG based on EEPROM\n");
2168
2169	/* Install any alternate MAC address into RAR0 */
2170	ret_val = igb_check_alt_mac_addr(hw);
2171
2172	/* Release semaphore */
2173	if (global_device_reset)
2174		hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
2175
2176	return ret_val;
2177}
2178
2179/**
2180 *  igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
2181 *  @data: data received by reading RXPBS register
2182 *
2183 *  The 82580 uses a table based approach for packet buffer allocation sizes.
2184 *  This function converts the retrieved value into the correct table value
2185 *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2186 *  0x0 36  72 144   1   2   4   8  16
2187 *  0x8 35  70 140 rsv rsv rsv rsv rsv
2188 */
2189u16 igb_rxpbs_adjust_82580(u32 data)
2190{
2191	u16 ret_val = 0;
2192
2193	if (data < E1000_82580_RXPBS_TABLE_SIZE)
2194		ret_val = e1000_82580_rxpbs_table[data];
2195
2196	return ret_val;
2197}
2198
2199/**
2200 *  igb_validate_nvm_checksum_with_offset - Validate EEPROM
2201 *  checksum
2202 *  @hw: pointer to the HW structure
2203 *  @offset: offset in words of the checksum protected region
2204 *
2205 *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2206 *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
2207 **/
2208static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
2209						 u16 offset)
2210{
2211	s32 ret_val = 0;
2212	u16 checksum = 0;
2213	u16 i, nvm_data;
2214
2215	for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2216		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2217		if (ret_val) {
2218			hw_dbg("NVM Read Error\n");
2219			goto out;
2220		}
2221		checksum += nvm_data;
2222	}
2223
2224	if (checksum != (u16) NVM_SUM) {
2225		hw_dbg("NVM Checksum Invalid\n");
2226		ret_val = -E1000_ERR_NVM;
2227		goto out;
2228	}
2229
2230out:
2231	return ret_val;
2232}
2233
2234/**
2235 *  igb_update_nvm_checksum_with_offset - Update EEPROM
2236 *  checksum
2237 *  @hw: pointer to the HW structure
2238 *  @offset: offset in words of the checksum protected region
2239 *
2240 *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
2241 *  up to the checksum.  Then calculates the EEPROM checksum and writes the
2242 *  value to the EEPROM.
2243 **/
2244static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2245{
2246	s32 ret_val;
2247	u16 checksum = 0;
2248	u16 i, nvm_data;
2249
2250	for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2251		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2252		if (ret_val) {
2253			hw_dbg("NVM Read Error while updating checksum.\n");
2254			goto out;
2255		}
2256		checksum += nvm_data;
2257	}
2258	checksum = (u16) NVM_SUM - checksum;
2259	ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2260				&checksum);
2261	if (ret_val)
2262		hw_dbg("NVM Write Error while updating checksum.\n");
2263
2264out:
2265	return ret_val;
2266}
2267
2268/**
2269 *  igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
2270 *  @hw: pointer to the HW structure
2271 *
2272 *  Calculates the EEPROM section checksum by reading/adding each word of
2273 *  the EEPROM and then verifies that the sum of the EEPROM is
2274 *  equal to 0xBABA.
2275 **/
2276static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2277{
2278	s32 ret_val = 0;
2279	u16 eeprom_regions_count = 1;
2280	u16 j, nvm_data;
2281	u16 nvm_offset;
2282
2283	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2284	if (ret_val) {
2285		hw_dbg("NVM Read Error\n");
2286		goto out;
2287	}
2288
2289	if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2290		/* if checksums compatibility bit is set validate checksums
2291		 * for all 4 ports.
2292		 */
2293		eeprom_regions_count = 4;
2294	}
2295
2296	for (j = 0; j < eeprom_regions_count; j++) {
2297		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2298		ret_val = igb_validate_nvm_checksum_with_offset(hw,
2299								nvm_offset);
2300		if (ret_val != 0)
2301			goto out;
2302	}
2303
2304out:
2305	return ret_val;
2306}
2307
2308/**
2309 *  igb_update_nvm_checksum_82580 - Update EEPROM checksum
2310 *  @hw: pointer to the HW structure
2311 *
2312 *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2313 *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2314 *  checksum and writes the value to the EEPROM.
2315 **/
2316static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2317{
2318	s32 ret_val;
2319	u16 j, nvm_data;
2320	u16 nvm_offset;
2321
2322	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2323	if (ret_val) {
2324		hw_dbg("NVM Read Error while updating checksum"
2325			" compatibility bit.\n");
2326		goto out;
2327	}
2328
2329	if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2330		/* set compatibility bit to validate checksums appropriately */
2331		nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2332		ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2333					&nvm_data);
2334		if (ret_val) {
2335			hw_dbg("NVM Write Error while updating checksum"
2336				" compatibility bit.\n");
2337			goto out;
2338		}
2339	}
2340
2341	for (j = 0; j < 4; j++) {
2342		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2343		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2344		if (ret_val)
2345			goto out;
2346	}
2347
2348out:
2349	return ret_val;
2350}
2351
2352/**
2353 *  igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
2354 *  @hw: pointer to the HW structure
2355 *
2356 *  Calculates the EEPROM section checksum by reading/adding each word of
2357 *  the EEPROM and then verifies that the sum of the EEPROM is
2358 *  equal to 0xBABA.
2359 **/
2360static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
2361{
2362	s32 ret_val = 0;
2363	u16 j;
2364	u16 nvm_offset;
2365
2366	for (j = 0; j < 4; j++) {
2367		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2368		ret_val = igb_validate_nvm_checksum_with_offset(hw,
2369								nvm_offset);
2370		if (ret_val != 0)
2371			goto out;
2372	}
2373
2374out:
2375	return ret_val;
2376}
2377
2378/**
2379 *  igb_update_nvm_checksum_i350 - Update EEPROM checksum
2380 *  @hw: pointer to the HW structure
2381 *
2382 *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2383 *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2384 *  checksum and writes the value to the EEPROM.
2385 **/
2386static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2387{
2388	s32 ret_val = 0;
2389	u16 j;
2390	u16 nvm_offset;
2391
2392	for (j = 0; j < 4; j++) {
2393		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2394		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2395		if (ret_val != 0)
2396			goto out;
2397	}
2398
2399out:
2400	return ret_val;
2401}
2402
2403/**
2404 *  __igb_access_emi_reg - Read/write EMI register
2405 *  @hw: pointer to the HW structure
2406 *  @addr: EMI address to program
2407 *  @data: pointer to value to read/write from/to the EMI address
2408 *  @read: boolean flag to indicate read or write
2409 **/
2410static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2411				  u16 *data, bool read)
2412{
2413	s32 ret_val = E1000_SUCCESS;
2414
2415	ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2416	if (ret_val)
2417		return ret_val;
2418
2419	if (read)
2420		ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
2421	else
2422		ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
2423
2424	return ret_val;
2425}
2426
2427/**
2428 *  igb_read_emi_reg - Read Extended Management Interface register
2429 *  @hw: pointer to the HW structure
2430 *  @addr: EMI address to program
2431 *  @data: value to be read from the EMI address
2432 **/
2433s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2434{
2435	return __igb_access_emi_reg(hw, addr, data, true);
2436}
2437
2438/**
2439 *  igb_set_eee_i350 - Enable/disable EEE support
2440 *  @hw: pointer to the HW structure
2441 *
2442 *  Enable/disable EEE based on setting in dev_spec structure.
2443 *
2444 **/
2445s32 igb_set_eee_i350(struct e1000_hw *hw)
2446{
2447	s32 ret_val = 0;
2448	u32 ipcnfg, eeer;
2449
2450	if ((hw->mac.type < e1000_i350) ||
2451	    (hw->phy.media_type != e1000_media_type_copper))
2452		goto out;
2453	ipcnfg = rd32(E1000_IPCNFG);
2454	eeer = rd32(E1000_EEER);
2455
2456	/* enable or disable per user setting */
2457	if (!(hw->dev_spec._82575.eee_disable)) {
2458		u32 eee_su = rd32(E1000_EEE_SU);
2459
2460		ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2461		eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2462			E1000_EEER_LPI_FC);
2463
2464		/* This bit should not be set in normal operation. */
2465		if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2466			hw_dbg("LPI Clock Stop Bit should not be set!\n");
2467
2468	} else {
2469		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2470			E1000_IPCNFG_EEE_100M_AN);
2471		eeer &= ~(E1000_EEER_TX_LPI_EN |
2472			E1000_EEER_RX_LPI_EN |
2473			E1000_EEER_LPI_FC);
2474	}
2475	wr32(E1000_IPCNFG, ipcnfg);
2476	wr32(E1000_EEER, eeer);
2477	rd32(E1000_IPCNFG);
2478	rd32(E1000_EEER);
2479out:
2480
2481	return ret_val;
2482}
2483
2484/**
2485 *  igb_set_eee_i354 - Enable/disable EEE support
2486 *  @hw: pointer to the HW structure
2487 *
2488 *  Enable/disable EEE legacy mode based on setting in dev_spec structure.
2489 *
2490 **/
2491s32 igb_set_eee_i354(struct e1000_hw *hw)
2492{
2493	struct e1000_phy_info *phy = &hw->phy;
2494	s32 ret_val = 0;
2495	u16 phy_data;
2496
2497	if ((hw->phy.media_type != e1000_media_type_copper) ||
2498	    (phy->id != M88E1543_E_PHY_ID))
2499		goto out;
2500
2501	if (!hw->dev_spec._82575.eee_disable) {
2502		/* Switch to PHY page 18. */
2503		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
2504		if (ret_val)
2505			goto out;
2506
2507		ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2508					    &phy_data);
2509		if (ret_val)
2510			goto out;
2511
2512		phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
2513		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2514					     phy_data);
2515		if (ret_val)
2516			goto out;
2517
2518		/* Return the PHY to page 0. */
2519		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
2520		if (ret_val)
2521			goto out;
2522
2523		/* Turn on EEE advertisement. */
2524		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2525					     E1000_EEE_ADV_DEV_I354,
2526					     &phy_data);
2527		if (ret_val)
2528			goto out;
2529
2530		phy_data |= E1000_EEE_ADV_100_SUPPORTED |
2531			    E1000_EEE_ADV_1000_SUPPORTED;
2532		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2533						E1000_EEE_ADV_DEV_I354,
2534						phy_data);
2535	} else {
2536		/* Turn off EEE advertisement. */
2537		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2538					     E1000_EEE_ADV_DEV_I354,
2539					     &phy_data);
2540		if (ret_val)
2541			goto out;
2542
2543		phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
2544			      E1000_EEE_ADV_1000_SUPPORTED);
2545		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2546					      E1000_EEE_ADV_DEV_I354,
2547					      phy_data);
2548	}
2549
2550out:
2551	return ret_val;
2552}
2553
2554/**
2555 *  igb_get_eee_status_i354 - Get EEE status
2556 *  @hw: pointer to the HW structure
2557 *  @status: EEE status
2558 *
2559 *  Get EEE status by guessing based on whether Tx or Rx LPI indications have
2560 *  been received.
2561 **/
2562s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2563{
2564	struct e1000_phy_info *phy = &hw->phy;
2565	s32 ret_val = 0;
2566	u16 phy_data;
2567
2568	/* Check if EEE is supported on this device. */
2569	if ((hw->phy.media_type != e1000_media_type_copper) ||
2570	    (phy->id != M88E1543_E_PHY_ID))
2571		goto out;
2572
2573	ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
2574				     E1000_PCS_STATUS_DEV_I354,
2575				     &phy_data);
2576	if (ret_val)
2577		goto out;
2578
2579	*status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
2580			      E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
2581
2582out:
2583	return ret_val;
2584}
2585
2586static const u8 e1000_emc_temp_data[4] = {
2587	E1000_EMC_INTERNAL_DATA,
2588	E1000_EMC_DIODE1_DATA,
2589	E1000_EMC_DIODE2_DATA,
2590	E1000_EMC_DIODE3_DATA
2591};
2592static const u8 e1000_emc_therm_limit[4] = {
2593	E1000_EMC_INTERNAL_THERM_LIMIT,
2594	E1000_EMC_DIODE1_THERM_LIMIT,
2595	E1000_EMC_DIODE2_THERM_LIMIT,
2596	E1000_EMC_DIODE3_THERM_LIMIT
2597};
2598
2599/**
2600 *  igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2601 *  @hw: pointer to hardware structure
2602 *
2603 *  Updates the temperatures in mac.thermal_sensor_data
2604 **/
2605s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2606{
2607	s32 status = E1000_SUCCESS;
2608	u16 ets_offset;
2609	u16 ets_cfg;
2610	u16 ets_sensor;
2611	u8  num_sensors;
2612	u8  sensor_index;
2613	u8  sensor_location;
2614	u8  i;
2615	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2616
2617	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2618		return E1000_NOT_IMPLEMENTED;
2619
2620	data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
2621
2622	/* Return the internal sensor only if ETS is unsupported */
2623	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2624	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2625		return status;
2626
2627	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2628	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2629	    != NVM_ETS_TYPE_EMC)
2630		return E1000_NOT_IMPLEMENTED;
2631
2632	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2633	if (num_sensors > E1000_MAX_SENSORS)
2634		num_sensors = E1000_MAX_SENSORS;
2635
2636	for (i = 1; i < num_sensors; i++) {
2637		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2638		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2639				NVM_ETS_DATA_INDEX_SHIFT);
2640		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2641				   NVM_ETS_DATA_LOC_SHIFT);
2642
2643		if (sensor_location != 0)
2644			hw->phy.ops.read_i2c_byte(hw,
2645					e1000_emc_temp_data[sensor_index],
2646					E1000_I2C_THERMAL_SENSOR_ADDR,
2647					&data->sensor[i].temp);
2648	}
2649	return status;
2650}
2651
2652/**
2653 *  igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
2654 *  @hw: pointer to hardware structure
2655 *
2656 *  Sets the thermal sensor thresholds according to the NVM map
2657 *  and save off the threshold and location values into mac.thermal_sensor_data
2658 **/
2659s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2660{
2661	s32 status = E1000_SUCCESS;
2662	u16 ets_offset;
2663	u16 ets_cfg;
2664	u16 ets_sensor;
2665	u8  low_thresh_delta;
2666	u8  num_sensors;
2667	u8  sensor_index;
2668	u8  sensor_location;
2669	u8  therm_limit;
2670	u8  i;
2671	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2672
2673	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2674		return E1000_NOT_IMPLEMENTED;
2675
2676	memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
2677
2678	data->sensor[0].location = 0x1;
2679	data->sensor[0].caution_thresh =
2680		(rd32(E1000_THHIGHTC) & 0xFF);
2681	data->sensor[0].max_op_thresh =
2682		(rd32(E1000_THLOWTC) & 0xFF);
2683
2684	/* Return the internal sensor only if ETS is unsupported */
2685	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2686	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2687		return status;
2688
2689	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2690	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2691	    != NVM_ETS_TYPE_EMC)
2692		return E1000_NOT_IMPLEMENTED;
2693
2694	low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
2695			    NVM_ETS_LTHRES_DELTA_SHIFT);
2696	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2697
2698	for (i = 1; i <= num_sensors; i++) {
2699		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2700		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2701				NVM_ETS_DATA_INDEX_SHIFT);
2702		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2703				   NVM_ETS_DATA_LOC_SHIFT);
2704		therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
2705
2706		hw->phy.ops.write_i2c_byte(hw,
2707			e1000_emc_therm_limit[sensor_index],
2708			E1000_I2C_THERMAL_SENSOR_ADDR,
2709			therm_limit);
2710
2711		if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
2712			data->sensor[i].location = sensor_location;
2713			data->sensor[i].caution_thresh = therm_limit;
2714			data->sensor[i].max_op_thresh = therm_limit -
2715							low_thresh_delta;
2716		}
2717	}
2718	return status;
2719}
2720
2721static struct e1000_mac_operations e1000_mac_ops_82575 = {
2722	.init_hw              = igb_init_hw_82575,
2723	.check_for_link       = igb_check_for_link_82575,
2724	.rar_set              = igb_rar_set,
2725	.read_mac_addr        = igb_read_mac_addr_82575,
2726	.get_speed_and_duplex = igb_get_speed_and_duplex_copper,
2727#ifdef CONFIG_IGB_HWMON
2728	.get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
2729	.init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
2730#endif
2731};
2732
2733static struct e1000_phy_operations e1000_phy_ops_82575 = {
2734	.acquire              = igb_acquire_phy_82575,
2735	.get_cfg_done         = igb_get_cfg_done_82575,
2736	.release              = igb_release_phy_82575,
2737	.write_i2c_byte       = igb_write_i2c_byte,
2738	.read_i2c_byte        = igb_read_i2c_byte,
2739};
2740
2741static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
2742	.acquire              = igb_acquire_nvm_82575,
2743	.read                 = igb_read_nvm_eerd,
2744	.release              = igb_release_nvm_82575,
2745	.write                = igb_write_nvm_spi,
2746};
2747
2748const struct e1000_info e1000_82575_info = {
2749	.get_invariants = igb_get_invariants_82575,
2750	.mac_ops = &e1000_mac_ops_82575,
2751	.phy_ops = &e1000_phy_ops_82575,
2752	.nvm_ops = &e1000_nvm_ops_82575,
2753};
2754
2755