e1000_82575.c revision a4a07624927743df7f4414e7f368b49ff19271b9
1/* Intel(R) Gigabit Ethernet Linux driver 2 * Copyright(c) 2007-2014 Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * The full GNU General Public License is included in this distribution in 17 * the file called "COPYING". 18 * 19 * Contact Information: 20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 22 */ 23 24/* e1000_82575 25 * e1000_82576 26 */ 27 28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30#include <linux/types.h> 31#include <linux/if_ether.h> 32#include <linux/i2c.h> 33 34#include "e1000_mac.h" 35#include "e1000_82575.h" 36#include "e1000_i210.h" 37 38static s32 igb_get_invariants_82575(struct e1000_hw *); 39static s32 igb_acquire_phy_82575(struct e1000_hw *); 40static void igb_release_phy_82575(struct e1000_hw *); 41static s32 igb_acquire_nvm_82575(struct e1000_hw *); 42static void igb_release_nvm_82575(struct e1000_hw *); 43static s32 igb_check_for_link_82575(struct e1000_hw *); 44static s32 igb_get_cfg_done_82575(struct e1000_hw *); 45static s32 igb_init_hw_82575(struct e1000_hw *); 46static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 47static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 48static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *); 49static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16); 50static s32 igb_reset_hw_82575(struct e1000_hw *); 51static s32 igb_reset_hw_82580(struct e1000_hw *); 52static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 53static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool); 54static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool); 55static s32 igb_setup_copper_link_82575(struct e1000_hw *); 56static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 57static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16); 58static void igb_clear_hw_cntrs_82575(struct e1000_hw *); 59static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16); 60static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *, 61 u16 *); 62static s32 igb_get_phy_id_82575(struct e1000_hw *); 63static void igb_release_swfw_sync_82575(struct e1000_hw *, u16); 64static bool igb_sgmii_active_82575(struct e1000_hw *); 65static s32 igb_reset_init_script_82575(struct e1000_hw *); 66static s32 igb_read_mac_addr_82575(struct e1000_hw *); 67static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 68static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); 69static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); 70static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); 71static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); 72static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); 73static const u16 e1000_82580_rxpbs_table[] = { 74 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; 75 76/** 77 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 78 * @hw: pointer to the HW structure 79 * 80 * Called to determine if the I2C pins are being used for I2C or as an 81 * external MDIO interface since the two options are mutually exclusive. 82 **/ 83static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw) 84{ 85 u32 reg = 0; 86 bool ext_mdio = false; 87 88 switch (hw->mac.type) { 89 case e1000_82575: 90 case e1000_82576: 91 reg = rd32(E1000_MDIC); 92 ext_mdio = !!(reg & E1000_MDIC_DEST); 93 break; 94 case e1000_82580: 95 case e1000_i350: 96 case e1000_i354: 97 case e1000_i210: 98 case e1000_i211: 99 reg = rd32(E1000_MDICNFG); 100 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); 101 break; 102 default: 103 break; 104 } 105 return ext_mdio; 106} 107 108/** 109 * igb_check_for_link_media_swap - Check which M88E1112 interface linked 110 * @hw: pointer to the HW structure 111 * 112 * Poll the M88E1112 interfaces to see which interface achieved link. 113 */ 114static s32 igb_check_for_link_media_swap(struct e1000_hw *hw) 115{ 116 struct e1000_phy_info *phy = &hw->phy; 117 s32 ret_val; 118 u16 data; 119 u8 port = 0; 120 121 /* Check the copper medium. */ 122 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); 123 if (ret_val) 124 return ret_val; 125 126 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 127 if (ret_val) 128 return ret_val; 129 130 if (data & E1000_M88E1112_STATUS_LINK) 131 port = E1000_MEDIA_PORT_COPPER; 132 133 /* Check the other medium. */ 134 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); 135 if (ret_val) 136 return ret_val; 137 138 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); 139 if (ret_val) 140 return ret_val; 141 142 /* reset page to 0 */ 143 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); 144 if (ret_val) 145 return ret_val; 146 147 if (data & E1000_M88E1112_STATUS_LINK) 148 port = E1000_MEDIA_PORT_OTHER; 149 150 /* Determine if a swap needs to happen. */ 151 if (port && (hw->dev_spec._82575.media_port != port)) { 152 hw->dev_spec._82575.media_port = port; 153 hw->dev_spec._82575.media_changed = true; 154 } else { 155 ret_val = igb_check_for_link_82575(hw); 156 } 157 158 return 0; 159} 160 161/** 162 * igb_init_phy_params_82575 - Init PHY func ptrs. 163 * @hw: pointer to the HW structure 164 **/ 165static s32 igb_init_phy_params_82575(struct e1000_hw *hw) 166{ 167 struct e1000_phy_info *phy = &hw->phy; 168 s32 ret_val = 0; 169 u32 ctrl_ext; 170 171 if (hw->phy.media_type != e1000_media_type_copper) { 172 phy->type = e1000_phy_none; 173 goto out; 174 } 175 176 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 177 phy->reset_delay_us = 100; 178 179 ctrl_ext = rd32(E1000_CTRL_EXT); 180 181 if (igb_sgmii_active_82575(hw)) { 182 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 183 ctrl_ext |= E1000_CTRL_I2C_ENA; 184 } else { 185 phy->ops.reset = igb_phy_hw_reset; 186 ctrl_ext &= ~E1000_CTRL_I2C_ENA; 187 } 188 189 wr32(E1000_CTRL_EXT, ctrl_ext); 190 igb_reset_mdicnfg_82580(hw); 191 192 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) { 193 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 194 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 195 } else { 196 switch (hw->mac.type) { 197 case e1000_82580: 198 case e1000_i350: 199 case e1000_i354: 200 phy->ops.read_reg = igb_read_phy_reg_82580; 201 phy->ops.write_reg = igb_write_phy_reg_82580; 202 break; 203 case e1000_i210: 204 case e1000_i211: 205 phy->ops.read_reg = igb_read_phy_reg_gs40g; 206 phy->ops.write_reg = igb_write_phy_reg_gs40g; 207 break; 208 default: 209 phy->ops.read_reg = igb_read_phy_reg_igp; 210 phy->ops.write_reg = igb_write_phy_reg_igp; 211 } 212 } 213 214 /* set lan id */ 215 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >> 216 E1000_STATUS_FUNC_SHIFT; 217 218 /* Set phy->phy_addr and phy->id. */ 219 ret_val = igb_get_phy_id_82575(hw); 220 if (ret_val) 221 return ret_val; 222 223 /* Verify phy id and set remaining function pointers */ 224 switch (phy->id) { 225 case M88E1543_E_PHY_ID: 226 case I347AT4_E_PHY_ID: 227 case M88E1112_E_PHY_ID: 228 case M88E1111_I_PHY_ID: 229 phy->type = e1000_phy_m88; 230 phy->ops.check_polarity = igb_check_polarity_m88; 231 phy->ops.get_phy_info = igb_get_phy_info_m88; 232 if (phy->id != M88E1111_I_PHY_ID) 233 phy->ops.get_cable_length = 234 igb_get_cable_length_m88_gen2; 235 else 236 phy->ops.get_cable_length = igb_get_cable_length_m88; 237 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 238 /* Check if this PHY is confgured for media swap. */ 239 if (phy->id == M88E1112_E_PHY_ID) { 240 u16 data; 241 242 ret_val = phy->ops.write_reg(hw, 243 E1000_M88E1112_PAGE_ADDR, 244 2); 245 if (ret_val) 246 goto out; 247 248 ret_val = phy->ops.read_reg(hw, 249 E1000_M88E1112_MAC_CTRL_1, 250 &data); 251 if (ret_val) 252 goto out; 253 254 data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> 255 E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; 256 if (data == E1000_M88E1112_AUTO_COPPER_SGMII || 257 data == E1000_M88E1112_AUTO_COPPER_BASEX) 258 hw->mac.ops.check_for_link = 259 igb_check_for_link_media_swap; 260 } 261 break; 262 case IGP03E1000_E_PHY_ID: 263 phy->type = e1000_phy_igp_3; 264 phy->ops.get_phy_info = igb_get_phy_info_igp; 265 phy->ops.get_cable_length = igb_get_cable_length_igp_2; 266 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp; 267 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; 268 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 269 break; 270 case I82580_I_PHY_ID: 271 case I350_I_PHY_ID: 272 phy->type = e1000_phy_82580; 273 phy->ops.force_speed_duplex = 274 igb_phy_force_speed_duplex_82580; 275 phy->ops.get_cable_length = igb_get_cable_length_82580; 276 phy->ops.get_phy_info = igb_get_phy_info_82580; 277 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 278 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 279 break; 280 case I210_I_PHY_ID: 281 phy->type = e1000_phy_i210; 282 phy->ops.check_polarity = igb_check_polarity_m88; 283 phy->ops.get_phy_info = igb_get_phy_info_m88; 284 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2; 285 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580; 286 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; 287 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 288 break; 289 default: 290 ret_val = -E1000_ERR_PHY; 291 goto out; 292 } 293 294out: 295 return ret_val; 296} 297 298/** 299 * igb_init_nvm_params_82575 - Init NVM func ptrs. 300 * @hw: pointer to the HW structure 301 **/ 302static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 303{ 304 struct e1000_nvm_info *nvm = &hw->nvm; 305 u32 eecd = rd32(E1000_EECD); 306 u16 size; 307 308 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 309 E1000_EECD_SIZE_EX_SHIFT); 310 311 /* Added to a constant, "size" becomes the left-shift value 312 * for setting word_size. 313 */ 314 size += NVM_WORD_SIZE_BASE_SHIFT; 315 316 /* Just in case size is out of range, cap it to the largest 317 * EEPROM size supported 318 */ 319 if (size > 15) 320 size = 15; 321 322 nvm->word_size = 1 << size; 323 nvm->opcode_bits = 8; 324 nvm->delay_usec = 1; 325 326 switch (nvm->override) { 327 case e1000_nvm_override_spi_large: 328 nvm->page_size = 32; 329 nvm->address_bits = 16; 330 break; 331 case e1000_nvm_override_spi_small: 332 nvm->page_size = 8; 333 nvm->address_bits = 8; 334 break; 335 default: 336 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; 337 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 338 16 : 8; 339 break; 340 } 341 if (nvm->word_size == (1 << 15)) 342 nvm->page_size = 128; 343 344 nvm->type = e1000_nvm_eeprom_spi; 345 346 /* NVM Function Pointers */ 347 nvm->ops.acquire = igb_acquire_nvm_82575; 348 nvm->ops.release = igb_release_nvm_82575; 349 nvm->ops.write = igb_write_nvm_spi; 350 nvm->ops.validate = igb_validate_nvm_checksum; 351 nvm->ops.update = igb_update_nvm_checksum; 352 if (nvm->word_size < (1 << 15)) 353 nvm->ops.read = igb_read_nvm_eerd; 354 else 355 nvm->ops.read = igb_read_nvm_spi; 356 357 /* override generic family function pointers for specific descendants */ 358 switch (hw->mac.type) { 359 case e1000_82580: 360 nvm->ops.validate = igb_validate_nvm_checksum_82580; 361 nvm->ops.update = igb_update_nvm_checksum_82580; 362 break; 363 case e1000_i354: 364 case e1000_i350: 365 nvm->ops.validate = igb_validate_nvm_checksum_i350; 366 nvm->ops.update = igb_update_nvm_checksum_i350; 367 break; 368 default: 369 break; 370 } 371 372 return 0; 373} 374 375/** 376 * igb_init_mac_params_82575 - Init MAC func ptrs. 377 * @hw: pointer to the HW structure 378 **/ 379static s32 igb_init_mac_params_82575(struct e1000_hw *hw) 380{ 381 struct e1000_mac_info *mac = &hw->mac; 382 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 383 384 /* Set mta register count */ 385 mac->mta_reg_count = 128; 386 /* Set rar entry count */ 387 switch (mac->type) { 388 case e1000_82576: 389 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 390 break; 391 case e1000_82580: 392 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 393 break; 394 case e1000_i350: 395 case e1000_i354: 396 mac->rar_entry_count = E1000_RAR_ENTRIES_I350; 397 break; 398 default: 399 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 400 break; 401 } 402 /* reset */ 403 if (mac->type >= e1000_82580) 404 mac->ops.reset_hw = igb_reset_hw_82580; 405 else 406 mac->ops.reset_hw = igb_reset_hw_82575; 407 408 if (mac->type >= e1000_i210) { 409 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210; 410 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210; 411 412 } else { 413 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575; 414 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; 415 } 416 417 /* Set if part includes ASF firmware */ 418 mac->asf_firmware_present = true; 419 /* Set if manageability features are enabled. */ 420 mac->arc_subsystem_valid = 421 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) 422 ? true : false; 423 /* enable EEE on i350 parts and later parts */ 424 if (mac->type >= e1000_i350) 425 dev_spec->eee_disable = false; 426 else 427 dev_spec->eee_disable = true; 428 /* Allow a single clear of the SW semaphore on I210 and newer */ 429 if (mac->type >= e1000_i210) 430 dev_spec->clear_semaphore_once = true; 431 /* physical interface link setup */ 432 mac->ops.setup_physical_interface = 433 (hw->phy.media_type == e1000_media_type_copper) 434 ? igb_setup_copper_link_82575 435 : igb_setup_serdes_link_82575; 436 437 if (mac->type == e1000_82580) { 438 switch (hw->device_id) { 439 /* feature not supported on these id's */ 440 case E1000_DEV_ID_DH89XXCC_SGMII: 441 case E1000_DEV_ID_DH89XXCC_SERDES: 442 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 443 case E1000_DEV_ID_DH89XXCC_SFP: 444 break; 445 default: 446 hw->dev_spec._82575.mas_capable = true; 447 break; 448 } 449 } 450 return 0; 451} 452 453/** 454 * igb_set_sfp_media_type_82575 - derives SFP module media type. 455 * @hw: pointer to the HW structure 456 * 457 * The media type is chosen based on SFP module. 458 * compatibility flags retrieved from SFP ID EEPROM. 459 **/ 460static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw) 461{ 462 s32 ret_val = E1000_ERR_CONFIG; 463 u32 ctrl_ext = 0; 464 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 465 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags; 466 u8 tranceiver_type = 0; 467 s32 timeout = 3; 468 469 /* Turn I2C interface ON and power on sfp cage */ 470 ctrl_ext = rd32(E1000_CTRL_EXT); 471 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 472 wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); 473 474 wrfl(); 475 476 /* Read SFP module data */ 477 while (timeout) { 478 ret_val = igb_read_sfp_data_byte(hw, 479 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), 480 &tranceiver_type); 481 if (ret_val == 0) 482 break; 483 msleep(100); 484 timeout--; 485 } 486 if (ret_val != 0) 487 goto out; 488 489 ret_val = igb_read_sfp_data_byte(hw, 490 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), 491 (u8 *)eth_flags); 492 if (ret_val != 0) 493 goto out; 494 495 /* Check if there is some SFP module plugged and powered */ 496 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || 497 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { 498 dev_spec->module_plugged = true; 499 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { 500 hw->phy.media_type = e1000_media_type_internal_serdes; 501 } else if (eth_flags->e100_base_fx) { 502 dev_spec->sgmii_active = true; 503 hw->phy.media_type = e1000_media_type_internal_serdes; 504 } else if (eth_flags->e1000_base_t) { 505 dev_spec->sgmii_active = true; 506 hw->phy.media_type = e1000_media_type_copper; 507 } else { 508 hw->phy.media_type = e1000_media_type_unknown; 509 hw_dbg("PHY module has not been recognized\n"); 510 goto out; 511 } 512 } else { 513 hw->phy.media_type = e1000_media_type_unknown; 514 } 515 ret_val = 0; 516out: 517 /* Restore I2C interface setting */ 518 wr32(E1000_CTRL_EXT, ctrl_ext); 519 return ret_val; 520} 521 522static s32 igb_get_invariants_82575(struct e1000_hw *hw) 523{ 524 struct e1000_mac_info *mac = &hw->mac; 525 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 526 s32 ret_val; 527 u32 ctrl_ext = 0; 528 u32 link_mode = 0; 529 530 switch (hw->device_id) { 531 case E1000_DEV_ID_82575EB_COPPER: 532 case E1000_DEV_ID_82575EB_FIBER_SERDES: 533 case E1000_DEV_ID_82575GB_QUAD_COPPER: 534 mac->type = e1000_82575; 535 break; 536 case E1000_DEV_ID_82576: 537 case E1000_DEV_ID_82576_NS: 538 case E1000_DEV_ID_82576_NS_SERDES: 539 case E1000_DEV_ID_82576_FIBER: 540 case E1000_DEV_ID_82576_SERDES: 541 case E1000_DEV_ID_82576_QUAD_COPPER: 542 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 543 case E1000_DEV_ID_82576_SERDES_QUAD: 544 mac->type = e1000_82576; 545 break; 546 case E1000_DEV_ID_82580_COPPER: 547 case E1000_DEV_ID_82580_FIBER: 548 case E1000_DEV_ID_82580_QUAD_FIBER: 549 case E1000_DEV_ID_82580_SERDES: 550 case E1000_DEV_ID_82580_SGMII: 551 case E1000_DEV_ID_82580_COPPER_DUAL: 552 case E1000_DEV_ID_DH89XXCC_SGMII: 553 case E1000_DEV_ID_DH89XXCC_SERDES: 554 case E1000_DEV_ID_DH89XXCC_BACKPLANE: 555 case E1000_DEV_ID_DH89XXCC_SFP: 556 mac->type = e1000_82580; 557 break; 558 case E1000_DEV_ID_I350_COPPER: 559 case E1000_DEV_ID_I350_FIBER: 560 case E1000_DEV_ID_I350_SERDES: 561 case E1000_DEV_ID_I350_SGMII: 562 mac->type = e1000_i350; 563 break; 564 case E1000_DEV_ID_I210_COPPER: 565 case E1000_DEV_ID_I210_FIBER: 566 case E1000_DEV_ID_I210_SERDES: 567 case E1000_DEV_ID_I210_SGMII: 568 case E1000_DEV_ID_I210_COPPER_FLASHLESS: 569 case E1000_DEV_ID_I210_SERDES_FLASHLESS: 570 mac->type = e1000_i210; 571 break; 572 case E1000_DEV_ID_I211_COPPER: 573 mac->type = e1000_i211; 574 break; 575 case E1000_DEV_ID_I354_BACKPLANE_1GBPS: 576 case E1000_DEV_ID_I354_SGMII: 577 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: 578 mac->type = e1000_i354; 579 break; 580 default: 581 return -E1000_ERR_MAC_INIT; 582 break; 583 } 584 585 /* Set media type */ 586 /* The 82575 uses bits 22:23 for link mode. The mode can be changed 587 * based on the EEPROM. We cannot rely upon device ID. There 588 * is no distinguishable difference between fiber and internal 589 * SerDes mode on the 82575. There can be an external PHY attached 590 * on the SGMII interface. For this, we'll set sgmii_active to true. 591 */ 592 hw->phy.media_type = e1000_media_type_copper; 593 dev_spec->sgmii_active = false; 594 dev_spec->module_plugged = false; 595 596 ctrl_ext = rd32(E1000_CTRL_EXT); 597 598 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; 599 switch (link_mode) { 600 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 601 hw->phy.media_type = e1000_media_type_internal_serdes; 602 break; 603 case E1000_CTRL_EXT_LINK_MODE_SGMII: 604 /* Get phy control interface type set (MDIO vs. I2C)*/ 605 if (igb_sgmii_uses_mdio_82575(hw)) { 606 hw->phy.media_type = e1000_media_type_copper; 607 dev_spec->sgmii_active = true; 608 break; 609 } 610 /* fall through for I2C based SGMII */ 611 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 612 /* read media type from SFP EEPROM */ 613 ret_val = igb_set_sfp_media_type_82575(hw); 614 if ((ret_val != 0) || 615 (hw->phy.media_type == e1000_media_type_unknown)) { 616 /* If media type was not identified then return media 617 * type defined by the CTRL_EXT settings. 618 */ 619 hw->phy.media_type = e1000_media_type_internal_serdes; 620 621 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { 622 hw->phy.media_type = e1000_media_type_copper; 623 dev_spec->sgmii_active = true; 624 } 625 626 break; 627 } 628 629 /* do not change link mode for 100BaseFX */ 630 if (dev_spec->eth_flags.e100_base_fx) 631 break; 632 633 /* change current link mode setting */ 634 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; 635 636 if (hw->phy.media_type == e1000_media_type_copper) 637 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; 638 else 639 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 640 641 wr32(E1000_CTRL_EXT, ctrl_ext); 642 643 break; 644 default: 645 break; 646 } 647 648 /* mac initialization and operations */ 649 ret_val = igb_init_mac_params_82575(hw); 650 if (ret_val) 651 goto out; 652 653 /* NVM initialization */ 654 ret_val = igb_init_nvm_params_82575(hw); 655 switch (hw->mac.type) { 656 case e1000_i210: 657 case e1000_i211: 658 ret_val = igb_init_nvm_params_i210(hw); 659 break; 660 default: 661 break; 662 } 663 664 if (ret_val) 665 goto out; 666 667 /* if part supports SR-IOV then initialize mailbox parameters */ 668 switch (mac->type) { 669 case e1000_82576: 670 case e1000_i350: 671 igb_init_mbx_params_pf(hw); 672 break; 673 default: 674 break; 675 } 676 677 /* setup PHY parameters */ 678 ret_val = igb_init_phy_params_82575(hw); 679 680out: 681 return ret_val; 682} 683 684/** 685 * igb_acquire_phy_82575 - Acquire rights to access PHY 686 * @hw: pointer to the HW structure 687 * 688 * Acquire access rights to the correct PHY. This is a 689 * function pointer entry point called by the api module. 690 **/ 691static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 692{ 693 u16 mask = E1000_SWFW_PHY0_SM; 694 695 if (hw->bus.func == E1000_FUNC_1) 696 mask = E1000_SWFW_PHY1_SM; 697 else if (hw->bus.func == E1000_FUNC_2) 698 mask = E1000_SWFW_PHY2_SM; 699 else if (hw->bus.func == E1000_FUNC_3) 700 mask = E1000_SWFW_PHY3_SM; 701 702 return hw->mac.ops.acquire_swfw_sync(hw, mask); 703} 704 705/** 706 * igb_release_phy_82575 - Release rights to access PHY 707 * @hw: pointer to the HW structure 708 * 709 * A wrapper to release access rights to the correct PHY. This is a 710 * function pointer entry point called by the api module. 711 **/ 712static void igb_release_phy_82575(struct e1000_hw *hw) 713{ 714 u16 mask = E1000_SWFW_PHY0_SM; 715 716 if (hw->bus.func == E1000_FUNC_1) 717 mask = E1000_SWFW_PHY1_SM; 718 else if (hw->bus.func == E1000_FUNC_2) 719 mask = E1000_SWFW_PHY2_SM; 720 else if (hw->bus.func == E1000_FUNC_3) 721 mask = E1000_SWFW_PHY3_SM; 722 723 hw->mac.ops.release_swfw_sync(hw, mask); 724} 725 726/** 727 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii 728 * @hw: pointer to the HW structure 729 * @offset: register offset to be read 730 * @data: pointer to the read data 731 * 732 * Reads the PHY register at offset using the serial gigabit media independent 733 * interface and stores the retrieved information in data. 734 **/ 735static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 736 u16 *data) 737{ 738 s32 ret_val = -E1000_ERR_PARAM; 739 740 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 741 hw_dbg("PHY Address %u is out of range\n", offset); 742 goto out; 743 } 744 745 ret_val = hw->phy.ops.acquire(hw); 746 if (ret_val) 747 goto out; 748 749 ret_val = igb_read_phy_reg_i2c(hw, offset, data); 750 751 hw->phy.ops.release(hw); 752 753out: 754 return ret_val; 755} 756 757/** 758 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii 759 * @hw: pointer to the HW structure 760 * @offset: register offset to write to 761 * @data: data to write at register offset 762 * 763 * Writes the data to PHY register at the offset using the serial gigabit 764 * media independent interface. 765 **/ 766static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 767 u16 data) 768{ 769 s32 ret_val = -E1000_ERR_PARAM; 770 771 772 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 773 hw_dbg("PHY Address %d is out of range\n", offset); 774 goto out; 775 } 776 777 ret_val = hw->phy.ops.acquire(hw); 778 if (ret_val) 779 goto out; 780 781 ret_val = igb_write_phy_reg_i2c(hw, offset, data); 782 783 hw->phy.ops.release(hw); 784 785out: 786 return ret_val; 787} 788 789/** 790 * igb_get_phy_id_82575 - Retrieve PHY addr and id 791 * @hw: pointer to the HW structure 792 * 793 * Retrieves the PHY address and ID for both PHY's which do and do not use 794 * sgmi interface. 795 **/ 796static s32 igb_get_phy_id_82575(struct e1000_hw *hw) 797{ 798 struct e1000_phy_info *phy = &hw->phy; 799 s32 ret_val = 0; 800 u16 phy_id; 801 u32 ctrl_ext; 802 u32 mdic; 803 804 /* Extra read required for some PHY's on i354 */ 805 if (hw->mac.type == e1000_i354) 806 igb_get_phy_id(hw); 807 808 /* For SGMII PHYs, we try the list of possible addresses until 809 * we find one that works. For non-SGMII PHYs 810 * (e.g. integrated copper PHYs), an address of 1 should 811 * work. The result of this function should mean phy->phy_addr 812 * and phy->id are set correctly. 813 */ 814 if (!(igb_sgmii_active_82575(hw))) { 815 phy->addr = 1; 816 ret_val = igb_get_phy_id(hw); 817 goto out; 818 } 819 820 if (igb_sgmii_uses_mdio_82575(hw)) { 821 switch (hw->mac.type) { 822 case e1000_82575: 823 case e1000_82576: 824 mdic = rd32(E1000_MDIC); 825 mdic &= E1000_MDIC_PHY_MASK; 826 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; 827 break; 828 case e1000_82580: 829 case e1000_i350: 830 case e1000_i354: 831 case e1000_i210: 832 case e1000_i211: 833 mdic = rd32(E1000_MDICNFG); 834 mdic &= E1000_MDICNFG_PHY_MASK; 835 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; 836 break; 837 default: 838 ret_val = -E1000_ERR_PHY; 839 goto out; 840 } 841 ret_val = igb_get_phy_id(hw); 842 goto out; 843 } 844 845 /* Power on sgmii phy if it is disabled */ 846 ctrl_ext = rd32(E1000_CTRL_EXT); 847 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); 848 wrfl(); 849 msleep(300); 850 851 /* The address field in the I2CCMD register is 3 bits and 0 is invalid. 852 * Therefore, we need to test 1-7 853 */ 854 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 855 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 856 if (ret_val == 0) { 857 hw_dbg("Vendor ID 0x%08X read at address %u\n", 858 phy_id, phy->addr); 859 /* At the time of this writing, The M88 part is 860 * the only supported SGMII PHY product. 861 */ 862 if (phy_id == M88_VENDOR) 863 break; 864 } else { 865 hw_dbg("PHY address %u was unreadable\n", phy->addr); 866 } 867 } 868 869 /* A valid PHY type couldn't be found. */ 870 if (phy->addr == 8) { 871 phy->addr = 0; 872 ret_val = -E1000_ERR_PHY; 873 goto out; 874 } else { 875 ret_val = igb_get_phy_id(hw); 876 } 877 878 /* restore previous sfp cage power state */ 879 wr32(E1000_CTRL_EXT, ctrl_ext); 880 881out: 882 return ret_val; 883} 884 885/** 886 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset 887 * @hw: pointer to the HW structure 888 * 889 * Resets the PHY using the serial gigabit media independent interface. 890 **/ 891static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) 892{ 893 s32 ret_val; 894 895 /* This isn't a true "hard" reset, but is the only reset 896 * available to us at this time. 897 */ 898 899 hw_dbg("Soft resetting SGMII attached PHY...\n"); 900 901 /* SFP documentation requires the following to configure the SPF module 902 * to work on SGMII. No further documentation is given. 903 */ 904 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); 905 if (ret_val) 906 goto out; 907 908 ret_val = igb_phy_sw_reset(hw); 909 910out: 911 return ret_val; 912} 913 914/** 915 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state 916 * @hw: pointer to the HW structure 917 * @active: true to enable LPLU, false to disable 918 * 919 * Sets the LPLU D0 state according to the active flag. When 920 * activating LPLU this function also disables smart speed 921 * and vice versa. LPLU will not be activated unless the 922 * device autonegotiation advertisement meets standards of 923 * either 10 or 10/100 or 10/100/1000 at all duplexes. 924 * This is a function pointer entry point only called by 925 * PHY setup routines. 926 **/ 927static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) 928{ 929 struct e1000_phy_info *phy = &hw->phy; 930 s32 ret_val; 931 u16 data; 932 933 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 934 if (ret_val) 935 goto out; 936 937 if (active) { 938 data |= IGP02E1000_PM_D0_LPLU; 939 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 940 data); 941 if (ret_val) 942 goto out; 943 944 /* When LPLU is enabled, we should disable SmartSpeed */ 945 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 946 &data); 947 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 948 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 949 data); 950 if (ret_val) 951 goto out; 952 } else { 953 data &= ~IGP02E1000_PM_D0_LPLU; 954 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, 955 data); 956 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 957 * during Dx states where the power conservation is most 958 * important. During driver activity we should enable 959 * SmartSpeed, so performance is maintained. 960 */ 961 if (phy->smart_speed == e1000_smart_speed_on) { 962 ret_val = phy->ops.read_reg(hw, 963 IGP01E1000_PHY_PORT_CONFIG, &data); 964 if (ret_val) 965 goto out; 966 967 data |= IGP01E1000_PSCFR_SMART_SPEED; 968 ret_val = phy->ops.write_reg(hw, 969 IGP01E1000_PHY_PORT_CONFIG, data); 970 if (ret_val) 971 goto out; 972 } else if (phy->smart_speed == e1000_smart_speed_off) { 973 ret_val = phy->ops.read_reg(hw, 974 IGP01E1000_PHY_PORT_CONFIG, &data); 975 if (ret_val) 976 goto out; 977 978 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 979 ret_val = phy->ops.write_reg(hw, 980 IGP01E1000_PHY_PORT_CONFIG, data); 981 if (ret_val) 982 goto out; 983 } 984 } 985 986out: 987 return ret_val; 988} 989 990/** 991 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state 992 * @hw: pointer to the HW structure 993 * @active: true to enable LPLU, false to disable 994 * 995 * Sets the LPLU D0 state according to the active flag. When 996 * activating LPLU this function also disables smart speed 997 * and vice versa. LPLU will not be activated unless the 998 * device autonegotiation advertisement meets standards of 999 * either 10 or 10/100 or 10/100/1000 at all duplexes. 1000 * This is a function pointer entry point only called by 1001 * PHY setup routines. 1002 **/ 1003static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) 1004{ 1005 struct e1000_phy_info *phy = &hw->phy; 1006 u16 data; 1007 1008 data = rd32(E1000_82580_PHY_POWER_MGMT); 1009 1010 if (active) { 1011 data |= E1000_82580_PM_D0_LPLU; 1012 1013 /* When LPLU is enabled, we should disable SmartSpeed */ 1014 data &= ~E1000_82580_PM_SPD; 1015 } else { 1016 data &= ~E1000_82580_PM_D0_LPLU; 1017 1018 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1019 * during Dx states where the power conservation is most 1020 * important. During driver activity we should enable 1021 * SmartSpeed, so performance is maintained. 1022 */ 1023 if (phy->smart_speed == e1000_smart_speed_on) 1024 data |= E1000_82580_PM_SPD; 1025 else if (phy->smart_speed == e1000_smart_speed_off) 1026 data &= ~E1000_82580_PM_SPD; } 1027 1028 wr32(E1000_82580_PHY_POWER_MGMT, data); 1029 return 0; 1030} 1031 1032/** 1033 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3 1034 * @hw: pointer to the HW structure 1035 * @active: boolean used to enable/disable lplu 1036 * 1037 * Success returns 0, Failure returns 1 1038 * 1039 * The low power link up (lplu) state is set to the power management level D3 1040 * and SmartSpeed is disabled when active is true, else clear lplu for D3 1041 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU 1042 * is used during Dx states where the power conservation is most important. 1043 * During driver activity, SmartSpeed should be enabled so performance is 1044 * maintained. 1045 **/ 1046static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) 1047{ 1048 struct e1000_phy_info *phy = &hw->phy; 1049 u16 data; 1050 1051 data = rd32(E1000_82580_PHY_POWER_MGMT); 1052 1053 if (!active) { 1054 data &= ~E1000_82580_PM_D3_LPLU; 1055 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1056 * during Dx states where the power conservation is most 1057 * important. During driver activity we should enable 1058 * SmartSpeed, so performance is maintained. 1059 */ 1060 if (phy->smart_speed == e1000_smart_speed_on) 1061 data |= E1000_82580_PM_SPD; 1062 else if (phy->smart_speed == e1000_smart_speed_off) 1063 data &= ~E1000_82580_PM_SPD; 1064 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || 1065 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || 1066 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { 1067 data |= E1000_82580_PM_D3_LPLU; 1068 /* When LPLU is enabled, we should disable SmartSpeed */ 1069 data &= ~E1000_82580_PM_SPD; 1070 } 1071 1072 wr32(E1000_82580_PHY_POWER_MGMT, data); 1073 return 0; 1074} 1075 1076/** 1077 * igb_acquire_nvm_82575 - Request for access to EEPROM 1078 * @hw: pointer to the HW structure 1079 * 1080 * Acquire the necessary semaphores for exclusive access to the EEPROM. 1081 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 1082 * Return successful if access grant bit set, else clear the request for 1083 * EEPROM access and return -E1000_ERR_NVM (-1). 1084 **/ 1085static s32 igb_acquire_nvm_82575(struct e1000_hw *hw) 1086{ 1087 s32 ret_val; 1088 1089 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM); 1090 if (ret_val) 1091 goto out; 1092 1093 ret_val = igb_acquire_nvm(hw); 1094 1095 if (ret_val) 1096 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1097 1098out: 1099 return ret_val; 1100} 1101 1102/** 1103 * igb_release_nvm_82575 - Release exclusive access to EEPROM 1104 * @hw: pointer to the HW structure 1105 * 1106 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 1107 * then release the semaphores acquired. 1108 **/ 1109static void igb_release_nvm_82575(struct e1000_hw *hw) 1110{ 1111 igb_release_nvm(hw); 1112 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM); 1113} 1114 1115/** 1116 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore 1117 * @hw: pointer to the HW structure 1118 * @mask: specifies which semaphore to acquire 1119 * 1120 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask 1121 * will also specify which port we're acquiring the lock for. 1122 **/ 1123static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1124{ 1125 u32 swfw_sync; 1126 u32 swmask = mask; 1127 u32 fwmask = mask << 16; 1128 s32 ret_val = 0; 1129 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ 1130 1131 while (i < timeout) { 1132 if (igb_get_hw_semaphore(hw)) { 1133 ret_val = -E1000_ERR_SWFW_SYNC; 1134 goto out; 1135 } 1136 1137 swfw_sync = rd32(E1000_SW_FW_SYNC); 1138 if (!(swfw_sync & (fwmask | swmask))) 1139 break; 1140 1141 /* Firmware currently using resource (fwmask) 1142 * or other software thread using resource (swmask) 1143 */ 1144 igb_put_hw_semaphore(hw); 1145 mdelay(5); 1146 i++; 1147 } 1148 1149 if (i == timeout) { 1150 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); 1151 ret_val = -E1000_ERR_SWFW_SYNC; 1152 goto out; 1153 } 1154 1155 swfw_sync |= swmask; 1156 wr32(E1000_SW_FW_SYNC, swfw_sync); 1157 1158 igb_put_hw_semaphore(hw); 1159 1160out: 1161 return ret_val; 1162} 1163 1164/** 1165 * igb_release_swfw_sync_82575 - Release SW/FW semaphore 1166 * @hw: pointer to the HW structure 1167 * @mask: specifies which semaphore to acquire 1168 * 1169 * Release the SW/FW semaphore used to access the PHY or NVM. The mask 1170 * will also specify which port we're releasing the lock for. 1171 **/ 1172static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) 1173{ 1174 u32 swfw_sync; 1175 1176 while (igb_get_hw_semaphore(hw) != 0) 1177 ; /* Empty */ 1178 1179 swfw_sync = rd32(E1000_SW_FW_SYNC); 1180 swfw_sync &= ~mask; 1181 wr32(E1000_SW_FW_SYNC, swfw_sync); 1182 1183 igb_put_hw_semaphore(hw); 1184} 1185 1186/** 1187 * igb_get_cfg_done_82575 - Read config done bit 1188 * @hw: pointer to the HW structure 1189 * 1190 * Read the management control register for the config done bit for 1191 * completion status. NOTE: silicon which is EEPROM-less will fail trying 1192 * to read the config done bit, so an error is *ONLY* logged and returns 1193 * 0. If we were to return with error, EEPROM-less silicon 1194 * would not be able to be reset or change link. 1195 **/ 1196static s32 igb_get_cfg_done_82575(struct e1000_hw *hw) 1197{ 1198 s32 timeout = PHY_CFG_TIMEOUT; 1199 u32 mask = E1000_NVM_CFG_DONE_PORT_0; 1200 1201 if (hw->bus.func == 1) 1202 mask = E1000_NVM_CFG_DONE_PORT_1; 1203 else if (hw->bus.func == E1000_FUNC_2) 1204 mask = E1000_NVM_CFG_DONE_PORT_2; 1205 else if (hw->bus.func == E1000_FUNC_3) 1206 mask = E1000_NVM_CFG_DONE_PORT_3; 1207 1208 while (timeout) { 1209 if (rd32(E1000_EEMNGCTL) & mask) 1210 break; 1211 usleep_range(1000, 2000); 1212 timeout--; 1213 } 1214 if (!timeout) 1215 hw_dbg("MNG configuration cycle has not completed.\n"); 1216 1217 /* If EEPROM is not marked present, init the PHY manually */ 1218 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && 1219 (hw->phy.type == e1000_phy_igp_3)) 1220 igb_phy_init_script_igp3(hw); 1221 1222 return 0; 1223} 1224 1225/** 1226 * igb_get_link_up_info_82575 - Get link speed/duplex info 1227 * @hw: pointer to the HW structure 1228 * @speed: stores the current speed 1229 * @duplex: stores the current duplex 1230 * 1231 * This is a wrapper function, if using the serial gigabit media independent 1232 * interface, use PCS to retrieve the link speed and duplex information. 1233 * Otherwise, use the generic function to get the link speed and duplex info. 1234 **/ 1235static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, 1236 u16 *duplex) 1237{ 1238 s32 ret_val; 1239 1240 if (hw->phy.media_type != e1000_media_type_copper) 1241 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed, 1242 duplex); 1243 else 1244 ret_val = igb_get_speed_and_duplex_copper(hw, speed, 1245 duplex); 1246 1247 return ret_val; 1248} 1249 1250/** 1251 * igb_check_for_link_82575 - Check for link 1252 * @hw: pointer to the HW structure 1253 * 1254 * If sgmii is enabled, then use the pcs register to determine link, otherwise 1255 * use the generic interface for determining link. 1256 **/ 1257static s32 igb_check_for_link_82575(struct e1000_hw *hw) 1258{ 1259 s32 ret_val; 1260 u16 speed, duplex; 1261 1262 if (hw->phy.media_type != e1000_media_type_copper) { 1263 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 1264 &duplex); 1265 /* Use this flag to determine if link needs to be checked or 1266 * not. If we have link clear the flag so that we do not 1267 * continue to check for link. 1268 */ 1269 hw->mac.get_link_status = !hw->mac.serdes_has_link; 1270 1271 /* Configure Flow Control now that Auto-Neg has completed. 1272 * First, we need to restore the desired flow control 1273 * settings because we may have had to re-autoneg with a 1274 * different link partner. 1275 */ 1276 ret_val = igb_config_fc_after_link_up(hw); 1277 if (ret_val) 1278 hw_dbg("Error configuring flow control\n"); 1279 } else { 1280 ret_val = igb_check_for_copper_link(hw); 1281 } 1282 1283 return ret_val; 1284} 1285 1286/** 1287 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown 1288 * @hw: pointer to the HW structure 1289 **/ 1290void igb_power_up_serdes_link_82575(struct e1000_hw *hw) 1291{ 1292 u32 reg; 1293 1294 1295 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1296 !igb_sgmii_active_82575(hw)) 1297 return; 1298 1299 /* Enable PCS to turn on link */ 1300 reg = rd32(E1000_PCS_CFG0); 1301 reg |= E1000_PCS_CFG_PCS_EN; 1302 wr32(E1000_PCS_CFG0, reg); 1303 1304 /* Power up the laser */ 1305 reg = rd32(E1000_CTRL_EXT); 1306 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 1307 wr32(E1000_CTRL_EXT, reg); 1308 1309 /* flush the write to verify completion */ 1310 wrfl(); 1311 usleep_range(1000, 2000); 1312} 1313 1314/** 1315 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 1316 * @hw: pointer to the HW structure 1317 * @speed: stores the current speed 1318 * @duplex: stores the current duplex 1319 * 1320 * Using the physical coding sub-layer (PCS), retrieve the current speed and 1321 * duplex, then store the values in the pointers provided. 1322 **/ 1323static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, 1324 u16 *duplex) 1325{ 1326 struct e1000_mac_info *mac = &hw->mac; 1327 u32 pcs, status; 1328 1329 /* Set up defaults for the return values of this function */ 1330 mac->serdes_has_link = false; 1331 *speed = 0; 1332 *duplex = 0; 1333 1334 /* Read the PCS Status register for link state. For non-copper mode, 1335 * the status register is not accurate. The PCS status register is 1336 * used instead. 1337 */ 1338 pcs = rd32(E1000_PCS_LSTAT); 1339 1340 /* The link up bit determines when link is up on autoneg. The sync ok 1341 * gets set once both sides sync up and agree upon link. Stable link 1342 * can be determined by checking for both link up and link sync ok 1343 */ 1344 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { 1345 mac->serdes_has_link = true; 1346 1347 /* Detect and store PCS speed */ 1348 if (pcs & E1000_PCS_LSTS_SPEED_1000) 1349 *speed = SPEED_1000; 1350 else if (pcs & E1000_PCS_LSTS_SPEED_100) 1351 *speed = SPEED_100; 1352 else 1353 *speed = SPEED_10; 1354 1355 /* Detect and store PCS duplex */ 1356 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) 1357 *duplex = FULL_DUPLEX; 1358 else 1359 *duplex = HALF_DUPLEX; 1360 1361 /* Check if it is an I354 2.5Gb backplane connection. */ 1362 if (mac->type == e1000_i354) { 1363 status = rd32(E1000_STATUS); 1364 if ((status & E1000_STATUS_2P5_SKU) && 1365 !(status & E1000_STATUS_2P5_SKU_OVER)) { 1366 *speed = SPEED_2500; 1367 *duplex = FULL_DUPLEX; 1368 hw_dbg("2500 Mbs, "); 1369 hw_dbg("Full Duplex\n"); 1370 } 1371 } 1372 1373 } 1374 1375 return 0; 1376} 1377 1378/** 1379 * igb_shutdown_serdes_link_82575 - Remove link during power down 1380 * @hw: pointer to the HW structure 1381 * 1382 * In the case of fiber serdes, shut down optics and PCS on driver unload 1383 * when management pass thru is not enabled. 1384 **/ 1385void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 1386{ 1387 u32 reg; 1388 1389 if (hw->phy.media_type != e1000_media_type_internal_serdes && 1390 igb_sgmii_active_82575(hw)) 1391 return; 1392 1393 if (!igb_enable_mng_pass_thru(hw)) { 1394 /* Disable PCS to turn off link */ 1395 reg = rd32(E1000_PCS_CFG0); 1396 reg &= ~E1000_PCS_CFG_PCS_EN; 1397 wr32(E1000_PCS_CFG0, reg); 1398 1399 /* shutdown the laser */ 1400 reg = rd32(E1000_CTRL_EXT); 1401 reg |= E1000_CTRL_EXT_SDP3_DATA; 1402 wr32(E1000_CTRL_EXT, reg); 1403 1404 /* flush the write to verify completion */ 1405 wrfl(); 1406 usleep_range(1000, 2000); 1407 } 1408} 1409 1410/** 1411 * igb_reset_hw_82575 - Reset hardware 1412 * @hw: pointer to the HW structure 1413 * 1414 * This resets the hardware into a known state. This is a 1415 * function pointer entry point called by the api module. 1416 **/ 1417static s32 igb_reset_hw_82575(struct e1000_hw *hw) 1418{ 1419 u32 ctrl; 1420 s32 ret_val; 1421 1422 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1423 * on the last TLP read/write transaction when MAC is reset. 1424 */ 1425 ret_val = igb_disable_pcie_master(hw); 1426 if (ret_val) 1427 hw_dbg("PCI-E Master disable polling has failed.\n"); 1428 1429 /* set the completion timeout for interface */ 1430 ret_val = igb_set_pcie_completion_timeout(hw); 1431 if (ret_val) 1432 hw_dbg("PCI-E Set completion timeout has failed.\n"); 1433 1434 hw_dbg("Masking off all interrupts\n"); 1435 wr32(E1000_IMC, 0xffffffff); 1436 1437 wr32(E1000_RCTL, 0); 1438 wr32(E1000_TCTL, E1000_TCTL_PSP); 1439 wrfl(); 1440 1441 usleep_range(10000, 20000); 1442 1443 ctrl = rd32(E1000_CTRL); 1444 1445 hw_dbg("Issuing a global reset to MAC\n"); 1446 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); 1447 1448 ret_val = igb_get_auto_rd_done(hw); 1449 if (ret_val) { 1450 /* When auto config read does not complete, do not 1451 * return with an error. This can happen in situations 1452 * where there is no eeprom and prevents getting link. 1453 */ 1454 hw_dbg("Auto Read Done did not complete\n"); 1455 } 1456 1457 /* If EEPROM is not present, run manual init scripts */ 1458 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) 1459 igb_reset_init_script_82575(hw); 1460 1461 /* Clear any pending interrupt events. */ 1462 wr32(E1000_IMC, 0xffffffff); 1463 rd32(E1000_ICR); 1464 1465 /* Install any alternate MAC address into RAR0 */ 1466 ret_val = igb_check_alt_mac_addr(hw); 1467 1468 return ret_val; 1469} 1470 1471/** 1472 * igb_init_hw_82575 - Initialize hardware 1473 * @hw: pointer to the HW structure 1474 * 1475 * This inits the hardware readying it for operation. 1476 **/ 1477static s32 igb_init_hw_82575(struct e1000_hw *hw) 1478{ 1479 struct e1000_mac_info *mac = &hw->mac; 1480 s32 ret_val; 1481 u16 i, rar_count = mac->rar_entry_count; 1482 1483 /* Initialize identification LED */ 1484 ret_val = igb_id_led_init(hw); 1485 if (ret_val) { 1486 hw_dbg("Error initializing identification LED\n"); 1487 /* This is not fatal and we should not stop init due to this */ 1488 } 1489 1490 /* Disabling VLAN filtering */ 1491 hw_dbg("Initializing the IEEE VLAN\n"); 1492 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) 1493 igb_clear_vfta_i350(hw); 1494 else 1495 igb_clear_vfta(hw); 1496 1497 /* Setup the receive address */ 1498 igb_init_rx_addrs(hw, rar_count); 1499 1500 /* Zero out the Multicast HASH table */ 1501 hw_dbg("Zeroing the MTA\n"); 1502 for (i = 0; i < mac->mta_reg_count; i++) 1503 array_wr32(E1000_MTA, i, 0); 1504 1505 /* Zero out the Unicast HASH table */ 1506 hw_dbg("Zeroing the UTA\n"); 1507 for (i = 0; i < mac->uta_reg_count; i++) 1508 array_wr32(E1000_UTA, i, 0); 1509 1510 /* Setup link and flow control */ 1511 ret_val = igb_setup_link(hw); 1512 1513 /* Clear all of the statistics registers (clear on read). It is 1514 * important that we do this after we have tried to establish link 1515 * because the symbol error count will increment wildly if there 1516 * is no link. 1517 */ 1518 igb_clear_hw_cntrs_82575(hw); 1519 return ret_val; 1520} 1521 1522/** 1523 * igb_setup_copper_link_82575 - Configure copper link settings 1524 * @hw: pointer to the HW structure 1525 * 1526 * Configures the link for auto-neg or forced speed and duplex. Then we check 1527 * for link, once link is established calls to configure collision distance 1528 * and flow control are called. 1529 **/ 1530static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) 1531{ 1532 u32 ctrl; 1533 s32 ret_val; 1534 u32 phpm_reg; 1535 1536 ctrl = rd32(E1000_CTRL); 1537 ctrl |= E1000_CTRL_SLU; 1538 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1539 wr32(E1000_CTRL, ctrl); 1540 1541 /* Clear Go Link Disconnect bit on supported devices */ 1542 switch (hw->mac.type) { 1543 case e1000_82580: 1544 case e1000_i350: 1545 case e1000_i210: 1546 case e1000_i211: 1547 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); 1548 phpm_reg &= ~E1000_82580_PM_GO_LINKD; 1549 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); 1550 break; 1551 default: 1552 break; 1553 } 1554 1555 ret_val = igb_setup_serdes_link_82575(hw); 1556 if (ret_val) 1557 goto out; 1558 1559 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 1560 /* allow time for SFP cage time to power up phy */ 1561 msleep(300); 1562 1563 ret_val = hw->phy.ops.reset(hw); 1564 if (ret_val) { 1565 hw_dbg("Error resetting the PHY.\n"); 1566 goto out; 1567 } 1568 } 1569 switch (hw->phy.type) { 1570 case e1000_phy_i210: 1571 case e1000_phy_m88: 1572 switch (hw->phy.id) { 1573 case I347AT4_E_PHY_ID: 1574 case M88E1112_E_PHY_ID: 1575 case M88E1543_E_PHY_ID: 1576 case I210_I_PHY_ID: 1577 ret_val = igb_copper_link_setup_m88_gen2(hw); 1578 break; 1579 default: 1580 ret_val = igb_copper_link_setup_m88(hw); 1581 break; 1582 } 1583 break; 1584 case e1000_phy_igp_3: 1585 ret_val = igb_copper_link_setup_igp(hw); 1586 break; 1587 case e1000_phy_82580: 1588 ret_val = igb_copper_link_setup_82580(hw); 1589 break; 1590 default: 1591 ret_val = -E1000_ERR_PHY; 1592 break; 1593 } 1594 1595 if (ret_val) 1596 goto out; 1597 1598 ret_val = igb_setup_copper_link(hw); 1599out: 1600 return ret_val; 1601} 1602 1603/** 1604 * igb_setup_serdes_link_82575 - Setup link for serdes 1605 * @hw: pointer to the HW structure 1606 * 1607 * Configure the physical coding sub-layer (PCS) link. The PCS link is 1608 * used on copper connections where the serialized gigabit media independent 1609 * interface (sgmii), or serdes fiber is being used. Configures the link 1610 * for auto-negotiation or forces speed/duplex. 1611 **/ 1612static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1613{ 1614 u32 ctrl_ext, ctrl_reg, reg, anadv_reg; 1615 bool pcs_autoneg; 1616 s32 ret_val = 0; 1617 u16 data; 1618 1619 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1620 !igb_sgmii_active_82575(hw)) 1621 return ret_val; 1622 1623 1624 /* On the 82575, SerDes loopback mode persists until it is 1625 * explicitly turned off or a power cycle is performed. A read to 1626 * the register does not indicate its status. Therefore, we ensure 1627 * loopback mode is disabled during initialization. 1628 */ 1629 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1630 1631 /* power on the sfp cage if present and turn on I2C */ 1632 ctrl_ext = rd32(E1000_CTRL_EXT); 1633 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; 1634 ctrl_ext |= E1000_CTRL_I2C_ENA; 1635 wr32(E1000_CTRL_EXT, ctrl_ext); 1636 1637 ctrl_reg = rd32(E1000_CTRL); 1638 ctrl_reg |= E1000_CTRL_SLU; 1639 1640 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { 1641 /* set both sw defined pins */ 1642 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; 1643 1644 /* Set switch control to serdes energy detect */ 1645 reg = rd32(E1000_CONNSW); 1646 reg |= E1000_CONNSW_ENRGSRC; 1647 wr32(E1000_CONNSW, reg); 1648 } 1649 1650 reg = rd32(E1000_PCS_LCTL); 1651 1652 /* default pcs_autoneg to the same setting as mac autoneg */ 1653 pcs_autoneg = hw->mac.autoneg; 1654 1655 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { 1656 case E1000_CTRL_EXT_LINK_MODE_SGMII: 1657 /* sgmii mode lets the phy handle forcing speed/duplex */ 1658 pcs_autoneg = true; 1659 /* autoneg time out should be disabled for SGMII mode */ 1660 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1661 break; 1662 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: 1663 /* disable PCS autoneg and support parallel detect only */ 1664 pcs_autoneg = false; 1665 default: 1666 if (hw->mac.type == e1000_82575 || 1667 hw->mac.type == e1000_82576) { 1668 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); 1669 if (ret_val) { 1670 hw_dbg(KERN_DEBUG "NVM Read Error\n\n"); 1671 return ret_val; 1672 } 1673 1674 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) 1675 pcs_autoneg = false; 1676 } 1677 1678 /* non-SGMII modes only supports a speed of 1000/Full for the 1679 * link so it is best to just force the MAC and let the pcs 1680 * link either autoneg or be forced to 1000/Full 1681 */ 1682 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1683 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1684 1685 /* set speed of 1000/Full if speed/duplex is forced */ 1686 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; 1687 break; 1688 } 1689 1690 wr32(E1000_CTRL, ctrl_reg); 1691 1692 /* New SerDes mode allows for forcing speed or autonegotiating speed 1693 * at 1gb. Autoneg should be default set by most drivers. This is the 1694 * mode that will be compatible with older link partners and switches. 1695 * However, both are supported by the hardware and some drivers/tools. 1696 */ 1697 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1698 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1699 1700 if (pcs_autoneg) { 1701 /* Set PCS register for autoneg */ 1702 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1703 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1704 1705 /* Disable force flow control for autoneg */ 1706 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; 1707 1708 /* Configure flow control advertisement for autoneg */ 1709 anadv_reg = rd32(E1000_PCS_ANADV); 1710 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); 1711 switch (hw->fc.requested_mode) { 1712 case e1000_fc_full: 1713 case e1000_fc_rx_pause: 1714 anadv_reg |= E1000_TXCW_ASM_DIR; 1715 anadv_reg |= E1000_TXCW_PAUSE; 1716 break; 1717 case e1000_fc_tx_pause: 1718 anadv_reg |= E1000_TXCW_ASM_DIR; 1719 break; 1720 default: 1721 break; 1722 } 1723 wr32(E1000_PCS_ANADV, anadv_reg); 1724 1725 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1726 } else { 1727 /* Set PCS register for forced link */ 1728 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1729 1730 /* Force flow control for forced link */ 1731 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1732 1733 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1734 } 1735 1736 wr32(E1000_PCS_LCTL, reg); 1737 1738 if (!pcs_autoneg && !igb_sgmii_active_82575(hw)) 1739 igb_force_mac_fc(hw); 1740 1741 return ret_val; 1742} 1743 1744/** 1745 * igb_sgmii_active_82575 - Return sgmii state 1746 * @hw: pointer to the HW structure 1747 * 1748 * 82575 silicon has a serialized gigabit media independent interface (sgmii) 1749 * which can be enabled for use in the embedded applications. Simply 1750 * return the current state of the sgmii interface. 1751 **/ 1752static bool igb_sgmii_active_82575(struct e1000_hw *hw) 1753{ 1754 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; 1755 return dev_spec->sgmii_active; 1756} 1757 1758/** 1759 * igb_reset_init_script_82575 - Inits HW defaults after reset 1760 * @hw: pointer to the HW structure 1761 * 1762 * Inits recommended HW defaults after a reset when there is no EEPROM 1763 * detected. This is only for the 82575. 1764 **/ 1765static s32 igb_reset_init_script_82575(struct e1000_hw *hw) 1766{ 1767 if (hw->mac.type == e1000_82575) { 1768 hw_dbg("Running reset init script for 82575\n"); 1769 /* SerDes configuration via SERDESCTRL */ 1770 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); 1771 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); 1772 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23); 1773 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15); 1774 1775 /* CCM configuration via CCMCTL register */ 1776 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00); 1777 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00); 1778 1779 /* PCIe lanes configuration */ 1780 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC); 1781 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF); 1782 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05); 1783 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81); 1784 1785 /* PCIe PLL Configuration */ 1786 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47); 1787 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00); 1788 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00); 1789 } 1790 1791 return 0; 1792} 1793 1794/** 1795 * igb_read_mac_addr_82575 - Read device MAC address 1796 * @hw: pointer to the HW structure 1797 **/ 1798static s32 igb_read_mac_addr_82575(struct e1000_hw *hw) 1799{ 1800 s32 ret_val = 0; 1801 1802 /* If there's an alternate MAC address place it in RAR0 1803 * so that it will override the Si installed default perm 1804 * address. 1805 */ 1806 ret_val = igb_check_alt_mac_addr(hw); 1807 if (ret_val) 1808 goto out; 1809 1810 ret_val = igb_read_mac_addr(hw); 1811 1812out: 1813 return ret_val; 1814} 1815 1816/** 1817 * igb_power_down_phy_copper_82575 - Remove link during PHY power down 1818 * @hw: pointer to the HW structure 1819 * 1820 * In the case of a PHY power down to save power, or to turn off link during a 1821 * driver unload, or wake on lan is not enabled, remove the link. 1822 **/ 1823void igb_power_down_phy_copper_82575(struct e1000_hw *hw) 1824{ 1825 /* If the management interface is not enabled, then power down */ 1826 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) 1827 igb_power_down_phy_copper(hw); 1828} 1829 1830/** 1831 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters 1832 * @hw: pointer to the HW structure 1833 * 1834 * Clears the hardware counters by reading the counter registers. 1835 **/ 1836static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1837{ 1838 igb_clear_hw_cntrs_base(hw); 1839 1840 rd32(E1000_PRC64); 1841 rd32(E1000_PRC127); 1842 rd32(E1000_PRC255); 1843 rd32(E1000_PRC511); 1844 rd32(E1000_PRC1023); 1845 rd32(E1000_PRC1522); 1846 rd32(E1000_PTC64); 1847 rd32(E1000_PTC127); 1848 rd32(E1000_PTC255); 1849 rd32(E1000_PTC511); 1850 rd32(E1000_PTC1023); 1851 rd32(E1000_PTC1522); 1852 1853 rd32(E1000_ALGNERRC); 1854 rd32(E1000_RXERRC); 1855 rd32(E1000_TNCRS); 1856 rd32(E1000_CEXTERR); 1857 rd32(E1000_TSCTC); 1858 rd32(E1000_TSCTFC); 1859 1860 rd32(E1000_MGTPRC); 1861 rd32(E1000_MGTPDC); 1862 rd32(E1000_MGTPTC); 1863 1864 rd32(E1000_IAC); 1865 rd32(E1000_ICRXOC); 1866 1867 rd32(E1000_ICRXPTC); 1868 rd32(E1000_ICRXATC); 1869 rd32(E1000_ICTXPTC); 1870 rd32(E1000_ICTXATC); 1871 rd32(E1000_ICTXQEC); 1872 rd32(E1000_ICTXQMTC); 1873 rd32(E1000_ICRXDMTC); 1874 1875 rd32(E1000_CBTMPC); 1876 rd32(E1000_HTDPMC); 1877 rd32(E1000_CBRMPC); 1878 rd32(E1000_RPTHC); 1879 rd32(E1000_HGPTC); 1880 rd32(E1000_HTCBDPC); 1881 rd32(E1000_HGORCL); 1882 rd32(E1000_HGORCH); 1883 rd32(E1000_HGOTCL); 1884 rd32(E1000_HGOTCH); 1885 rd32(E1000_LENERRS); 1886 1887 /* This register should not be read in copper configurations */ 1888 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1889 igb_sgmii_active_82575(hw)) 1890 rd32(E1000_SCVPC); 1891} 1892 1893/** 1894 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1895 * @hw: pointer to the HW structure 1896 * 1897 * After rx enable if managability is enabled then there is likely some 1898 * bad data at the start of the fifo and possibly in the DMA fifo. This 1899 * function clears the fifos and flushes any packets that came in as rx was 1900 * being enabled. 1901 **/ 1902void igb_rx_fifo_flush_82575(struct e1000_hw *hw) 1903{ 1904 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1905 int i, ms_wait; 1906 1907 if (hw->mac.type != e1000_82575 || 1908 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) 1909 return; 1910 1911 /* Disable all RX queues */ 1912 for (i = 0; i < 4; i++) { 1913 rxdctl[i] = rd32(E1000_RXDCTL(i)); 1914 wr32(E1000_RXDCTL(i), 1915 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); 1916 } 1917 /* Poll all queues to verify they have shut down */ 1918 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 1919 usleep_range(1000, 2000); 1920 rx_enabled = 0; 1921 for (i = 0; i < 4; i++) 1922 rx_enabled |= rd32(E1000_RXDCTL(i)); 1923 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) 1924 break; 1925 } 1926 1927 if (ms_wait == 10) 1928 hw_dbg("Queue disable timed out after 10ms\n"); 1929 1930 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 1931 * incoming packets are rejected. Set enable and wait 2ms so that 1932 * any packet that was coming in as RCTL.EN was set is flushed 1933 */ 1934 rfctl = rd32(E1000_RFCTL); 1935 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1936 1937 rlpml = rd32(E1000_RLPML); 1938 wr32(E1000_RLPML, 0); 1939 1940 rctl = rd32(E1000_RCTL); 1941 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); 1942 temp_rctl |= E1000_RCTL_LPE; 1943 1944 wr32(E1000_RCTL, temp_rctl); 1945 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN); 1946 wrfl(); 1947 usleep_range(2000, 3000); 1948 1949 /* Enable RX queues that were previously enabled and restore our 1950 * previous state 1951 */ 1952 for (i = 0; i < 4; i++) 1953 wr32(E1000_RXDCTL(i), rxdctl[i]); 1954 wr32(E1000_RCTL, rctl); 1955 wrfl(); 1956 1957 wr32(E1000_RLPML, rlpml); 1958 wr32(E1000_RFCTL, rfctl); 1959 1960 /* Flush receive errors generated by workaround */ 1961 rd32(E1000_ROC); 1962 rd32(E1000_RNBC); 1963 rd32(E1000_MPC); 1964} 1965 1966/** 1967 * igb_set_pcie_completion_timeout - set pci-e completion timeout 1968 * @hw: pointer to the HW structure 1969 * 1970 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, 1971 * however the hardware default for these parts is 500us to 1ms which is less 1972 * than the 10ms recommended by the pci-e spec. To address this we need to 1973 * increase the value to either 10ms to 200ms for capability version 1 config, 1974 * or 16ms to 55ms for version 2. 1975 **/ 1976static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw) 1977{ 1978 u32 gcr = rd32(E1000_GCR); 1979 s32 ret_val = 0; 1980 u16 pcie_devctl2; 1981 1982 /* only take action if timeout value is defaulted to 0 */ 1983 if (gcr & E1000_GCR_CMPL_TMOUT_MASK) 1984 goto out; 1985 1986 /* if capabilities version is type 1 we can write the 1987 * timeout of 10ms to 200ms through the GCR register 1988 */ 1989 if (!(gcr & E1000_GCR_CAP_VER2)) { 1990 gcr |= E1000_GCR_CMPL_TMOUT_10ms; 1991 goto out; 1992 } 1993 1994 /* for version 2 capabilities we need to write the config space 1995 * directly in order to set the completion timeout value for 1996 * 16ms to 55ms 1997 */ 1998 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 1999 &pcie_devctl2); 2000 if (ret_val) 2001 goto out; 2002 2003 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; 2004 2005 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, 2006 &pcie_devctl2); 2007out: 2008 /* disable completion timeout resend */ 2009 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; 2010 2011 wr32(E1000_GCR, gcr); 2012 return ret_val; 2013} 2014 2015/** 2016 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing 2017 * @hw: pointer to the hardware struct 2018 * @enable: state to enter, either enabled or disabled 2019 * @pf: Physical Function pool - do not set anti-spoofing for the PF 2020 * 2021 * enables/disables L2 switch anti-spoofing functionality. 2022 **/ 2023void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) 2024{ 2025 u32 reg_val, reg_offset; 2026 2027 switch (hw->mac.type) { 2028 case e1000_82576: 2029 reg_offset = E1000_DTXSWC; 2030 break; 2031 case e1000_i350: 2032 case e1000_i354: 2033 reg_offset = E1000_TXSWC; 2034 break; 2035 default: 2036 return; 2037 } 2038 2039 reg_val = rd32(reg_offset); 2040 if (enable) { 2041 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | 2042 E1000_DTXSWC_VLAN_SPOOF_MASK); 2043 /* The PF can spoof - it has to in order to 2044 * support emulation mode NICs 2045 */ 2046 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); 2047 } else { 2048 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | 2049 E1000_DTXSWC_VLAN_SPOOF_MASK); 2050 } 2051 wr32(reg_offset, reg_val); 2052} 2053 2054/** 2055 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 2056 * @hw: pointer to the hardware struct 2057 * @enable: state to enter, either enabled or disabled 2058 * 2059 * enables/disables L2 switch loopback functionality. 2060 **/ 2061void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 2062{ 2063 u32 dtxswc; 2064 2065 switch (hw->mac.type) { 2066 case e1000_82576: 2067 dtxswc = rd32(E1000_DTXSWC); 2068 if (enable) 2069 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2070 else 2071 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2072 wr32(E1000_DTXSWC, dtxswc); 2073 break; 2074 case e1000_i354: 2075 case e1000_i350: 2076 dtxswc = rd32(E1000_TXSWC); 2077 if (enable) 2078 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2079 else 2080 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 2081 wr32(E1000_TXSWC, dtxswc); 2082 break; 2083 default: 2084 /* Currently no other hardware supports loopback */ 2085 break; 2086 } 2087 2088} 2089 2090/** 2091 * igb_vmdq_set_replication_pf - enable or disable vmdq replication 2092 * @hw: pointer to the hardware struct 2093 * @enable: state to enter, either enabled or disabled 2094 * 2095 * enables/disables replication of packets across multiple pools. 2096 **/ 2097void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 2098{ 2099 u32 vt_ctl = rd32(E1000_VT_CTL); 2100 2101 if (enable) 2102 vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 2103 else 2104 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 2105 2106 wr32(E1000_VT_CTL, vt_ctl); 2107} 2108 2109/** 2110 * igb_read_phy_reg_82580 - Read 82580 MDI control register 2111 * @hw: pointer to the HW structure 2112 * @offset: register offset to be read 2113 * @data: pointer to the read data 2114 * 2115 * Reads the MDI control register in the PHY at offset and stores the 2116 * information read to data. 2117 **/ 2118static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 2119{ 2120 s32 ret_val; 2121 2122 ret_val = hw->phy.ops.acquire(hw); 2123 if (ret_val) 2124 goto out; 2125 2126 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 2127 2128 hw->phy.ops.release(hw); 2129 2130out: 2131 return ret_val; 2132} 2133 2134/** 2135 * igb_write_phy_reg_82580 - Write 82580 MDI control register 2136 * @hw: pointer to the HW structure 2137 * @offset: register offset to write to 2138 * @data: data to write to register at offset 2139 * 2140 * Writes data to MDI control register in the PHY at offset. 2141 **/ 2142static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 2143{ 2144 s32 ret_val; 2145 2146 2147 ret_val = hw->phy.ops.acquire(hw); 2148 if (ret_val) 2149 goto out; 2150 2151 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 2152 2153 hw->phy.ops.release(hw); 2154 2155out: 2156 return ret_val; 2157} 2158 2159/** 2160 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits 2161 * @hw: pointer to the HW structure 2162 * 2163 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on 2164 * the values found in the EEPROM. This addresses an issue in which these 2165 * bits are not restored from EEPROM after reset. 2166 **/ 2167static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw) 2168{ 2169 s32 ret_val = 0; 2170 u32 mdicnfg; 2171 u16 nvm_data = 0; 2172 2173 if (hw->mac.type != e1000_82580) 2174 goto out; 2175 if (!igb_sgmii_active_82575(hw)) 2176 goto out; 2177 2178 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2179 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2180 &nvm_data); 2181 if (ret_val) { 2182 hw_dbg("NVM Read Error\n"); 2183 goto out; 2184 } 2185 2186 mdicnfg = rd32(E1000_MDICNFG); 2187 if (nvm_data & NVM_WORD24_EXT_MDIO) 2188 mdicnfg |= E1000_MDICNFG_EXT_MDIO; 2189 if (nvm_data & NVM_WORD24_COM_MDIO) 2190 mdicnfg |= E1000_MDICNFG_COM_MDIO; 2191 wr32(E1000_MDICNFG, mdicnfg); 2192out: 2193 return ret_val; 2194} 2195 2196/** 2197 * igb_reset_hw_82580 - Reset hardware 2198 * @hw: pointer to the HW structure 2199 * 2200 * This resets function or entire device (all ports, etc.) 2201 * to a known state. 2202 **/ 2203static s32 igb_reset_hw_82580(struct e1000_hw *hw) 2204{ 2205 s32 ret_val = 0; 2206 /* BH SW mailbox bit in SW_FW_SYNC */ 2207 u16 swmbsw_mask = E1000_SW_SYNCH_MB; 2208 u32 ctrl; 2209 bool global_device_reset = hw->dev_spec._82575.global_device_reset; 2210 2211 hw->dev_spec._82575.global_device_reset = false; 2212 2213 /* due to hw errata, global device reset doesn't always 2214 * work on 82580 2215 */ 2216 if (hw->mac.type == e1000_82580) 2217 global_device_reset = false; 2218 2219 /* Get current control state. */ 2220 ctrl = rd32(E1000_CTRL); 2221 2222 /* Prevent the PCI-E bus from sticking if there is no TLP connection 2223 * on the last TLP read/write transaction when MAC is reset. 2224 */ 2225 ret_val = igb_disable_pcie_master(hw); 2226 if (ret_val) 2227 hw_dbg("PCI-E Master disable polling has failed.\n"); 2228 2229 hw_dbg("Masking off all interrupts\n"); 2230 wr32(E1000_IMC, 0xffffffff); 2231 wr32(E1000_RCTL, 0); 2232 wr32(E1000_TCTL, E1000_TCTL_PSP); 2233 wrfl(); 2234 2235 usleep_range(10000, 11000); 2236 2237 /* Determine whether or not a global dev reset is requested */ 2238 if (global_device_reset && 2239 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) 2240 global_device_reset = false; 2241 2242 if (global_device_reset && 2243 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET)) 2244 ctrl |= E1000_CTRL_DEV_RST; 2245 else 2246 ctrl |= E1000_CTRL_RST; 2247 2248 wr32(E1000_CTRL, ctrl); 2249 wrfl(); 2250 2251 /* Add delay to insure DEV_RST has time to complete */ 2252 if (global_device_reset) 2253 usleep_range(5000, 6000); 2254 2255 ret_val = igb_get_auto_rd_done(hw); 2256 if (ret_val) { 2257 /* When auto config read does not complete, do not 2258 * return with an error. This can happen in situations 2259 * where there is no eeprom and prevents getting link. 2260 */ 2261 hw_dbg("Auto Read Done did not complete\n"); 2262 } 2263 2264 /* clear global device reset status bit */ 2265 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET); 2266 2267 /* Clear any pending interrupt events. */ 2268 wr32(E1000_IMC, 0xffffffff); 2269 rd32(E1000_ICR); 2270 2271 ret_val = igb_reset_mdicnfg_82580(hw); 2272 if (ret_val) 2273 hw_dbg("Could not reset MDICNFG based on EEPROM\n"); 2274 2275 /* Install any alternate MAC address into RAR0 */ 2276 ret_val = igb_check_alt_mac_addr(hw); 2277 2278 /* Release semaphore */ 2279 if (global_device_reset) 2280 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); 2281 2282 return ret_val; 2283} 2284 2285/** 2286 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size 2287 * @data: data received by reading RXPBS register 2288 * 2289 * The 82580 uses a table based approach for packet buffer allocation sizes. 2290 * This function converts the retrieved value into the correct table value 2291 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 2292 * 0x0 36 72 144 1 2 4 8 16 2293 * 0x8 35 70 140 rsv rsv rsv rsv rsv 2294 */ 2295u16 igb_rxpbs_adjust_82580(u32 data) 2296{ 2297 u16 ret_val = 0; 2298 2299 if (data < ARRAY_SIZE(e1000_82580_rxpbs_table)) 2300 ret_val = e1000_82580_rxpbs_table[data]; 2301 2302 return ret_val; 2303} 2304 2305/** 2306 * igb_validate_nvm_checksum_with_offset - Validate EEPROM 2307 * checksum 2308 * @hw: pointer to the HW structure 2309 * @offset: offset in words of the checksum protected region 2310 * 2311 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 2312 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 2313 **/ 2314static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, 2315 u16 offset) 2316{ 2317 s32 ret_val = 0; 2318 u16 checksum = 0; 2319 u16 i, nvm_data; 2320 2321 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { 2322 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2323 if (ret_val) { 2324 hw_dbg("NVM Read Error\n"); 2325 goto out; 2326 } 2327 checksum += nvm_data; 2328 } 2329 2330 if (checksum != (u16) NVM_SUM) { 2331 hw_dbg("NVM Checksum Invalid\n"); 2332 ret_val = -E1000_ERR_NVM; 2333 goto out; 2334 } 2335 2336out: 2337 return ret_val; 2338} 2339 2340/** 2341 * igb_update_nvm_checksum_with_offset - Update EEPROM 2342 * checksum 2343 * @hw: pointer to the HW structure 2344 * @offset: offset in words of the checksum protected region 2345 * 2346 * Updates the EEPROM checksum by reading/adding each word of the EEPROM 2347 * up to the checksum. Then calculates the EEPROM checksum and writes the 2348 * value to the EEPROM. 2349 **/ 2350static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) 2351{ 2352 s32 ret_val; 2353 u16 checksum = 0; 2354 u16 i, nvm_data; 2355 2356 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { 2357 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); 2358 if (ret_val) { 2359 hw_dbg("NVM Read Error while updating checksum.\n"); 2360 goto out; 2361 } 2362 checksum += nvm_data; 2363 } 2364 checksum = (u16) NVM_SUM - checksum; 2365 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, 2366 &checksum); 2367 if (ret_val) 2368 hw_dbg("NVM Write Error while updating checksum.\n"); 2369 2370out: 2371 return ret_val; 2372} 2373 2374/** 2375 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum 2376 * @hw: pointer to the HW structure 2377 * 2378 * Calculates the EEPROM section checksum by reading/adding each word of 2379 * the EEPROM and then verifies that the sum of the EEPROM is 2380 * equal to 0xBABA. 2381 **/ 2382static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) 2383{ 2384 s32 ret_val = 0; 2385 u16 eeprom_regions_count = 1; 2386 u16 j, nvm_data; 2387 u16 nvm_offset; 2388 2389 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2390 if (ret_val) { 2391 hw_dbg("NVM Read Error\n"); 2392 goto out; 2393 } 2394 2395 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { 2396 /* if checksums compatibility bit is set validate checksums 2397 * for all 4 ports. 2398 */ 2399 eeprom_regions_count = 4; 2400 } 2401 2402 for (j = 0; j < eeprom_regions_count; j++) { 2403 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2404 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2405 nvm_offset); 2406 if (ret_val != 0) 2407 goto out; 2408 } 2409 2410out: 2411 return ret_val; 2412} 2413 2414/** 2415 * igb_update_nvm_checksum_82580 - Update EEPROM checksum 2416 * @hw: pointer to the HW structure 2417 * 2418 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2419 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2420 * checksum and writes the value to the EEPROM. 2421 **/ 2422static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) 2423{ 2424 s32 ret_val; 2425 u16 j, nvm_data; 2426 u16 nvm_offset; 2427 2428 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); 2429 if (ret_val) { 2430 hw_dbg("NVM Read Error while updating checksum compatibility bit.\n"); 2431 goto out; 2432 } 2433 2434 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { 2435 /* set compatibility bit to validate checksums appropriately */ 2436 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; 2437 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, 2438 &nvm_data); 2439 if (ret_val) { 2440 hw_dbg("NVM Write Error while updating checksum compatibility bit.\n"); 2441 goto out; 2442 } 2443 } 2444 2445 for (j = 0; j < 4; j++) { 2446 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2447 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2448 if (ret_val) 2449 goto out; 2450 } 2451 2452out: 2453 return ret_val; 2454} 2455 2456/** 2457 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum 2458 * @hw: pointer to the HW structure 2459 * 2460 * Calculates the EEPROM section checksum by reading/adding each word of 2461 * the EEPROM and then verifies that the sum of the EEPROM is 2462 * equal to 0xBABA. 2463 **/ 2464static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) 2465{ 2466 s32 ret_val = 0; 2467 u16 j; 2468 u16 nvm_offset; 2469 2470 for (j = 0; j < 4; j++) { 2471 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2472 ret_val = igb_validate_nvm_checksum_with_offset(hw, 2473 nvm_offset); 2474 if (ret_val != 0) 2475 goto out; 2476 } 2477 2478out: 2479 return ret_val; 2480} 2481 2482/** 2483 * igb_update_nvm_checksum_i350 - Update EEPROM checksum 2484 * @hw: pointer to the HW structure 2485 * 2486 * Updates the EEPROM section checksums for all 4 ports by reading/adding 2487 * each word of the EEPROM up to the checksum. Then calculates the EEPROM 2488 * checksum and writes the value to the EEPROM. 2489 **/ 2490static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) 2491{ 2492 s32 ret_val = 0; 2493 u16 j; 2494 u16 nvm_offset; 2495 2496 for (j = 0; j < 4; j++) { 2497 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); 2498 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); 2499 if (ret_val != 0) 2500 goto out; 2501 } 2502 2503out: 2504 return ret_val; 2505} 2506 2507/** 2508 * __igb_access_emi_reg - Read/write EMI register 2509 * @hw: pointer to the HW structure 2510 * @addr: EMI address to program 2511 * @data: pointer to value to read/write from/to the EMI address 2512 * @read: boolean flag to indicate read or write 2513 **/ 2514static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address, 2515 u16 *data, bool read) 2516{ 2517 s32 ret_val = 0; 2518 2519 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); 2520 if (ret_val) 2521 return ret_val; 2522 2523 if (read) 2524 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); 2525 else 2526 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); 2527 2528 return ret_val; 2529} 2530 2531/** 2532 * igb_read_emi_reg - Read Extended Management Interface register 2533 * @hw: pointer to the HW structure 2534 * @addr: EMI address to program 2535 * @data: value to be read from the EMI address 2536 **/ 2537s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) 2538{ 2539 return __igb_access_emi_reg(hw, addr, data, true); 2540} 2541 2542/** 2543 * igb_set_eee_i350 - Enable/disable EEE support 2544 * @hw: pointer to the HW structure 2545 * 2546 * Enable/disable EEE based on setting in dev_spec structure. 2547 * 2548 **/ 2549s32 igb_set_eee_i350(struct e1000_hw *hw) 2550{ 2551 u32 ipcnfg, eeer; 2552 2553 if ((hw->mac.type < e1000_i350) || 2554 (hw->phy.media_type != e1000_media_type_copper)) 2555 goto out; 2556 ipcnfg = rd32(E1000_IPCNFG); 2557 eeer = rd32(E1000_EEER); 2558 2559 /* enable or disable per user setting */ 2560 if (!(hw->dev_spec._82575.eee_disable)) { 2561 u32 eee_su = rd32(E1000_EEE_SU); 2562 2563 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); 2564 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | 2565 E1000_EEER_LPI_FC); 2566 2567 /* This bit should not be set in normal operation. */ 2568 if (eee_su & E1000_EEE_SU_LPI_CLK_STP) 2569 hw_dbg("LPI Clock Stop Bit should not be set!\n"); 2570 2571 } else { 2572 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2573 E1000_IPCNFG_EEE_100M_AN); 2574 eeer &= ~(E1000_EEER_TX_LPI_EN | 2575 E1000_EEER_RX_LPI_EN | 2576 E1000_EEER_LPI_FC); 2577 } 2578 wr32(E1000_IPCNFG, ipcnfg); 2579 wr32(E1000_EEER, eeer); 2580 rd32(E1000_IPCNFG); 2581 rd32(E1000_EEER); 2582out: 2583 2584 return 0; 2585} 2586 2587/** 2588 * igb_set_eee_i354 - Enable/disable EEE support 2589 * @hw: pointer to the HW structure 2590 * 2591 * Enable/disable EEE legacy mode based on setting in dev_spec structure. 2592 * 2593 **/ 2594s32 igb_set_eee_i354(struct e1000_hw *hw) 2595{ 2596 struct e1000_phy_info *phy = &hw->phy; 2597 s32 ret_val = 0; 2598 u16 phy_data; 2599 2600 if ((hw->phy.media_type != e1000_media_type_copper) || 2601 (phy->id != M88E1543_E_PHY_ID)) 2602 goto out; 2603 2604 if (!hw->dev_spec._82575.eee_disable) { 2605 /* Switch to PHY page 18. */ 2606 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); 2607 if (ret_val) 2608 goto out; 2609 2610 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2611 &phy_data); 2612 if (ret_val) 2613 goto out; 2614 2615 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; 2616 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, 2617 phy_data); 2618 if (ret_val) 2619 goto out; 2620 2621 /* Return the PHY to page 0. */ 2622 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); 2623 if (ret_val) 2624 goto out; 2625 2626 /* Turn on EEE advertisement. */ 2627 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2628 E1000_EEE_ADV_DEV_I354, 2629 &phy_data); 2630 if (ret_val) 2631 goto out; 2632 2633 phy_data |= E1000_EEE_ADV_100_SUPPORTED | 2634 E1000_EEE_ADV_1000_SUPPORTED; 2635 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2636 E1000_EEE_ADV_DEV_I354, 2637 phy_data); 2638 } else { 2639 /* Turn off EEE advertisement. */ 2640 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2641 E1000_EEE_ADV_DEV_I354, 2642 &phy_data); 2643 if (ret_val) 2644 goto out; 2645 2646 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | 2647 E1000_EEE_ADV_1000_SUPPORTED); 2648 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, 2649 E1000_EEE_ADV_DEV_I354, 2650 phy_data); 2651 } 2652 2653out: 2654 return ret_val; 2655} 2656 2657/** 2658 * igb_get_eee_status_i354 - Get EEE status 2659 * @hw: pointer to the HW structure 2660 * @status: EEE status 2661 * 2662 * Get EEE status by guessing based on whether Tx or Rx LPI indications have 2663 * been received. 2664 **/ 2665s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) 2666{ 2667 struct e1000_phy_info *phy = &hw->phy; 2668 s32 ret_val = 0; 2669 u16 phy_data; 2670 2671 /* Check if EEE is supported on this device. */ 2672 if ((hw->phy.media_type != e1000_media_type_copper) || 2673 (phy->id != M88E1543_E_PHY_ID)) 2674 goto out; 2675 2676 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, 2677 E1000_PCS_STATUS_DEV_I354, 2678 &phy_data); 2679 if (ret_val) 2680 goto out; 2681 2682 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | 2683 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; 2684 2685out: 2686 return ret_val; 2687} 2688 2689static const u8 e1000_emc_temp_data[4] = { 2690 E1000_EMC_INTERNAL_DATA, 2691 E1000_EMC_DIODE1_DATA, 2692 E1000_EMC_DIODE2_DATA, 2693 E1000_EMC_DIODE3_DATA 2694}; 2695static const u8 e1000_emc_therm_limit[4] = { 2696 E1000_EMC_INTERNAL_THERM_LIMIT, 2697 E1000_EMC_DIODE1_THERM_LIMIT, 2698 E1000_EMC_DIODE2_THERM_LIMIT, 2699 E1000_EMC_DIODE3_THERM_LIMIT 2700}; 2701 2702#ifdef CONFIG_IGB_HWMON 2703/** 2704 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2705 * @hw: pointer to hardware structure 2706 * 2707 * Updates the temperatures in mac.thermal_sensor_data 2708 **/ 2709static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2710{ 2711 u16 ets_offset; 2712 u16 ets_cfg; 2713 u16 ets_sensor; 2714 u8 num_sensors; 2715 u8 sensor_index; 2716 u8 sensor_location; 2717 u8 i; 2718 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2719 2720 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2721 return E1000_NOT_IMPLEMENTED; 2722 2723 data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF); 2724 2725 /* Return the internal sensor only if ETS is unsupported */ 2726 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2727 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2728 return 0; 2729 2730 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2731 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2732 != NVM_ETS_TYPE_EMC) 2733 return E1000_NOT_IMPLEMENTED; 2734 2735 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2736 if (num_sensors > E1000_MAX_SENSORS) 2737 num_sensors = E1000_MAX_SENSORS; 2738 2739 for (i = 1; i < num_sensors; i++) { 2740 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2741 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2742 NVM_ETS_DATA_INDEX_SHIFT); 2743 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2744 NVM_ETS_DATA_LOC_SHIFT); 2745 2746 if (sensor_location != 0) 2747 hw->phy.ops.read_i2c_byte(hw, 2748 e1000_emc_temp_data[sensor_index], 2749 E1000_I2C_THERMAL_SENSOR_ADDR, 2750 &data->sensor[i].temp); 2751 } 2752 return 0; 2753} 2754 2755/** 2756 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds 2757 * @hw: pointer to hardware structure 2758 * 2759 * Sets the thermal sensor thresholds according to the NVM map 2760 * and save off the threshold and location values into mac.thermal_sensor_data 2761 **/ 2762static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2763{ 2764 u16 ets_offset; 2765 u16 ets_cfg; 2766 u16 ets_sensor; 2767 u8 low_thresh_delta; 2768 u8 num_sensors; 2769 u8 sensor_index; 2770 u8 sensor_location; 2771 u8 therm_limit; 2772 u8 i; 2773 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; 2774 2775 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) 2776 return E1000_NOT_IMPLEMENTED; 2777 2778 memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); 2779 2780 data->sensor[0].location = 0x1; 2781 data->sensor[0].caution_thresh = 2782 (rd32(E1000_THHIGHTC) & 0xFF); 2783 data->sensor[0].max_op_thresh = 2784 (rd32(E1000_THLOWTC) & 0xFF); 2785 2786 /* Return the internal sensor only if ETS is unsupported */ 2787 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset); 2788 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) 2789 return 0; 2790 2791 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg); 2792 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) 2793 != NVM_ETS_TYPE_EMC) 2794 return E1000_NOT_IMPLEMENTED; 2795 2796 low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> 2797 NVM_ETS_LTHRES_DELTA_SHIFT); 2798 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); 2799 2800 for (i = 1; i <= num_sensors; i++) { 2801 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor); 2802 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> 2803 NVM_ETS_DATA_INDEX_SHIFT); 2804 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> 2805 NVM_ETS_DATA_LOC_SHIFT); 2806 therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; 2807 2808 hw->phy.ops.write_i2c_byte(hw, 2809 e1000_emc_therm_limit[sensor_index], 2810 E1000_I2C_THERMAL_SENSOR_ADDR, 2811 therm_limit); 2812 2813 if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { 2814 data->sensor[i].location = sensor_location; 2815 data->sensor[i].caution_thresh = therm_limit; 2816 data->sensor[i].max_op_thresh = therm_limit - 2817 low_thresh_delta; 2818 } 2819 } 2820 return 0; 2821} 2822 2823#endif 2824static struct e1000_mac_operations e1000_mac_ops_82575 = { 2825 .init_hw = igb_init_hw_82575, 2826 .check_for_link = igb_check_for_link_82575, 2827 .rar_set = igb_rar_set, 2828 .read_mac_addr = igb_read_mac_addr_82575, 2829 .get_speed_and_duplex = igb_get_link_up_info_82575, 2830#ifdef CONFIG_IGB_HWMON 2831 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic, 2832 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic, 2833#endif 2834}; 2835 2836static struct e1000_phy_operations e1000_phy_ops_82575 = { 2837 .acquire = igb_acquire_phy_82575, 2838 .get_cfg_done = igb_get_cfg_done_82575, 2839 .release = igb_release_phy_82575, 2840 .write_i2c_byte = igb_write_i2c_byte, 2841 .read_i2c_byte = igb_read_i2c_byte, 2842}; 2843 2844static struct e1000_nvm_operations e1000_nvm_ops_82575 = { 2845 .acquire = igb_acquire_nvm_82575, 2846 .read = igb_read_nvm_eerd, 2847 .release = igb_release_nvm_82575, 2848 .write = igb_write_nvm_spi, 2849}; 2850 2851const struct e1000_info e1000_82575_info = { 2852 .get_invariants = igb_get_invariants_82575, 2853 .mac_ops = &e1000_mac_ops_82575, 2854 .phy_ops = &e1000_phy_ops_82575, 2855 .nvm_ops = &e1000_nvm_ops_82575, 2856}; 2857 2858