comparison usr/src/uts/common/io/ixgbe/ixgbe_82599.c @ 13761:229af35d14fb

3014 Intel X540 Support Reviewed by: Robert Mustacchi <rm@joyent.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Approved by: Gordon Ross <gwr@nexenta.com>
author Dan McDonald <danmcd@nexenta.com>
date Sat, 28 Jul 2012 22:20:23 -0400
parents 7b5944437191
children e0e5ba2d65b6
comparison
equal deleted inserted replaced
13760:527f645bd884 13761:229af35d14fb
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE. 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 ******************************************************************************/ 32 ******************************************************************************/
33 /*$FreeBSD$*/ 33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.8 2012/07/05 20:51:44 jfv Exp $*/
34 34
35 #include "ixgbe_type.h" 35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
36 #include "ixgbe_api.h" 37 #include "ixgbe_api.h"
37 #include "ixgbe_common.h" 38 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h" 39 #include "ixgbe_phy.h"
39 40
40 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
41 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
43 bool *autoneg);
44 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
45 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
46 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
47 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
48 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
49 ixgbe_link_speed speed, bool autoneg,
50 bool autoneg_wait_to_complete);
51 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
52 ixgbe_link_speed speed, bool autoneg,
53 bool autoneg_wait_to_complete);
54 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
55 bool autoneg_wait_to_complete);
56 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
57 ixgbe_link_speed speed,
58 bool autoneg,
59 bool autoneg_wait_to_complete);
60 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
61 ixgbe_link_speed speed, 42 ixgbe_link_speed speed,
62 bool autoneg, 43 bool autoneg,
63 bool autoneg_wait_to_complete); 44 bool autoneg_wait_to_complete);
64 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
65 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
66 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
67 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
68 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
69 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
70 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
71 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
72 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
73 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
74 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 45 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
75 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); 46 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
76 47 u16 offset, u16 *data);
48 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
49 u16 words, u16 *data);
77 50
78 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 51 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
79 { 52 {
80 struct ixgbe_mac_info *mac = &hw->mac; 53 struct ixgbe_mac_info *mac = &hw->mac;
81 54
82 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 55 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
83 56
84 /* enable the laser control functions for SFP+ fiber */ 57 /* enable the laser control functions for SFP+ fiber */
85 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { 58 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
86 mac->ops.disable_tx_laser = 59 mac->ops.disable_tx_laser =
87 &ixgbe_disable_tx_laser_multispeed_fiber; 60 &ixgbe_disable_tx_laser_multispeed_fiber;
88 mac->ops.enable_tx_laser = 61 mac->ops.enable_tx_laser =
89 &ixgbe_enable_tx_laser_multispeed_fiber; 62 &ixgbe_enable_tx_laser_multispeed_fiber;
90 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 63 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
91 64
92 } else { 65 } else {
93 mac->ops.disable_tx_laser = NULL; 66 mac->ops.disable_tx_laser = NULL;
94 mac->ops.enable_tx_laser = NULL; 67 mac->ops.enable_tx_laser = NULL;
139 112
140 /* If copper media, overwrite with copper function pointers */ 113 /* If copper media, overwrite with copper function pointers */
141 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 114 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
142 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 115 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
143 mac->ops.get_link_capabilities = 116 mac->ops.get_link_capabilities =
144 &ixgbe_get_copper_link_capabilities_generic; 117 &ixgbe_get_copper_link_capabilities_generic;
145 } 118 }
146 119
147 /* Set necessary function pointers based on phy type */ 120 /* Set necessary function pointers based on phy type */
148 switch (hw->phy.type) { 121 switch (hw->phy.type) {
149 case ixgbe_phy_tn: 122 case ixgbe_phy_tn:
150 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 123 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
151 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 124 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
152 phy->ops.get_firmware_version = 125 phy->ops.get_firmware_version =
153 &ixgbe_get_phy_firmware_version_tnx; 126 &ixgbe_get_phy_firmware_version_tnx;
154 break;
155 case ixgbe_phy_aq:
156 phy->ops.get_firmware_version =
157 &ixgbe_get_phy_firmware_version_generic;
158 break; 127 break;
159 default: 128 default:
160 break; 129 break;
161 } 130 }
162 init_phy_ops_out: 131 init_phy_ops_out:
176 ixgbe_init_mac_link_ops_82599(hw); 145 ixgbe_init_mac_link_ops_82599(hw);
177 146
178 hw->phy.ops.reset = NULL; 147 hw->phy.ops.reset = NULL;
179 148
180 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 149 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
181 &data_offset); 150 &data_offset);
182 if (ret_val != IXGBE_SUCCESS) 151 if (ret_val != IXGBE_SUCCESS)
183 goto setup_sfp_out; 152 goto setup_sfp_out;
184 153
185 /* PHY config will finish before releasing the semaphore */ 154 /* PHY config will finish before releasing the semaphore */
186 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 155 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
156 IXGBE_GSSR_MAC_CSR_SM);
187 if (ret_val != IXGBE_SUCCESS) { 157 if (ret_val != IXGBE_SUCCESS) {
188 ret_val = IXGBE_ERR_SWFW_SYNC; 158 ret_val = IXGBE_ERR_SWFW_SYNC;
189 goto setup_sfp_out; 159 goto setup_sfp_out;
190 } 160 }
191 161
195 IXGBE_WRITE_FLUSH(hw); 165 IXGBE_WRITE_FLUSH(hw);
196 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 166 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
197 } 167 }
198 168
199 /* Release the semaphore */ 169 /* Release the semaphore */
200 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 170 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
201 /* Delay obtaining semaphore again to allow FW access */ 171 /* Delay obtaining semaphore again to allow FW access */
202 msec_delay(hw->eeprom.semaphore_delay); 172 msec_delay(hw->eeprom.semaphore_delay);
203 173
204 /* Now restart DSP by setting Restart_AN and clearing LMS */ 174 /* Now restart DSP by setting Restart_AN and clearing LMS */
205 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, 175 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
206 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | 176 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
207 IXGBE_AUTOC_AN_RESTART)); 177 IXGBE_AUTOC_AN_RESTART));
208 178
209 /* Wait for AN to leave state 0 */ 179 /* Wait for AN to leave state 0 */
210 for (i = 0; i < 10; i++) { 180 for (i = 0; i < 10; i++) {
211 msec_delay(4); 181 msec_delay(4);
212 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); 182 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
219 goto setup_sfp_out; 189 goto setup_sfp_out;
220 } 190 }
221 191
222 /* Restart DSP by setting Restart_AN and return to SFI mode */ 192 /* Restart DSP by setting Restart_AN and return to SFI mode */
223 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, 193 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
224 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | 194 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
225 IXGBE_AUTOC_AN_RESTART)); 195 IXGBE_AUTOC_AN_RESTART));
226 } 196 }
227 197
228 setup_sfp_out: 198 setup_sfp_out:
229 return ret_val; 199 return ret_val;
230 } 200 }
239 209
240 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 210 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
241 { 211 {
242 struct ixgbe_mac_info *mac = &hw->mac; 212 struct ixgbe_mac_info *mac = &hw->mac;
243 struct ixgbe_phy_info *phy = &hw->phy; 213 struct ixgbe_phy_info *phy = &hw->phy;
214 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
244 s32 ret_val; 215 s32 ret_val;
245 216
246 DEBUGFUNC("ixgbe_init_ops_82599"); 217 DEBUGFUNC("ixgbe_init_ops_82599");
247 218
248 ret_val = ixgbe_init_phy_ops_generic(hw); 219 ret_val = ixgbe_init_phy_ops_generic(hw);
255 /* MAC */ 226 /* MAC */
256 mac->ops.reset_hw = &ixgbe_reset_hw_82599; 227 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
257 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; 228 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
258 mac->ops.get_media_type = &ixgbe_get_media_type_82599; 229 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
259 mac->ops.get_supported_physical_layer = 230 mac->ops.get_supported_physical_layer =
260 &ixgbe_get_supported_physical_layer_82599; 231 &ixgbe_get_supported_physical_layer_82599;
232 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
233 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
261 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; 234 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
262 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; 235 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
263 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; 236 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
264 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599; 237 mac->ops.start_hw = &ixgbe_start_hw_82599;
265 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; 238 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
266 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; 239 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
267 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; 240 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
268 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; 241 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
269 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; 242 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
270 243
271 /* RAR, Multicast, VLAN */ 244 /* RAR, Multicast, VLAN */
272 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; 245 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
246 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
273 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; 247 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
274 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; 248 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
275 mac->rar_highwater = 1; 249 mac->rar_highwater = 1;
276 mac->ops.set_vfta = &ixgbe_set_vfta_generic; 250 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
251 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
277 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; 252 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
278 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; 253 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
279 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; 254 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
280 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; 255 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
281 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; 256 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
282 257
283 /* Link */ 258 /* Link */
284 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; 259 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
285 mac->ops.check_link = &ixgbe_check_mac_link_generic; 260 mac->ops.check_link = &ixgbe_check_mac_link_generic;
261 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
286 ixgbe_init_mac_link_ops_82599(hw); 262 ixgbe_init_mac_link_ops_82599(hw);
287 263
288 mac->mcft_size = 128; 264 mac->mcft_size = 128;
289 mac->vft_size = 128; 265 mac->vft_size = 128;
290 mac->num_rar_entries = 128; 266 mac->num_rar_entries = 128;
291 mac->rx_pb_size = 512; 267 mac->rx_pb_size = 512;
292 mac->max_tx_queues = 128; 268 mac->max_tx_queues = 128;
293 mac->max_rx_queues = 128; 269 mac->max_rx_queues = 128;
294 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 270 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
271
272 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
273 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
274
275 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
276
277 /* EEPROM */
278 eeprom->ops.read = &ixgbe_read_eeprom_82599;
279 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
280
281 /* Manageability interface */
282 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
283
295 284
296 return ret_val; 285 return ret_val;
297 } 286 }
298 287
299 /** 288 /**
303 * @negotiation: TRUE when autoneg or autotry is enabled 292 * @negotiation: TRUE when autoneg or autotry is enabled
304 * 293 *
305 * Determines the link capabilities by reading the AUTOC register. 294 * Determines the link capabilities by reading the AUTOC register.
306 **/ 295 **/
307 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 296 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
308 ixgbe_link_speed *speed, 297 ixgbe_link_speed *speed,
309 bool *negotiation) 298 bool *negotiation)
310 { 299 {
311 s32 status = IXGBE_SUCCESS; 300 s32 status = IXGBE_SUCCESS;
312 u32 autoc = 0; 301 u32 autoc = 0;
313 302
314 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 303 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
315 304
316 305
317 /* Check if 1G SFP module. */ 306 /* Check if 1G SFP module. */
318 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 307 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
319 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { 308 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
309 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
310 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
320 *speed = IXGBE_LINK_SPEED_1GB_FULL; 311 *speed = IXGBE_LINK_SPEED_1GB_FULL;
321 *negotiation = TRUE; 312 *negotiation = TRUE;
322 goto out; 313 goto out;
323 } 314 }
324 315
386 goto out; 377 goto out;
387 } 378 }
388 379
389 if (hw->phy.multispeed_fiber) { 380 if (hw->phy.multispeed_fiber) {
390 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 381 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
391 IXGBE_LINK_SPEED_1GB_FULL; 382 IXGBE_LINK_SPEED_1GB_FULL;
392 *negotiation = TRUE; 383 *negotiation = TRUE;
393 } 384 }
394 385
395 out: 386 out:
396 return status; 387 return status;
410 401
411 /* Detect if there is a copper PHY attached. */ 402 /* Detect if there is a copper PHY attached. */
412 switch (hw->phy.type) { 403 switch (hw->phy.type) {
413 case ixgbe_phy_cu_unknown: 404 case ixgbe_phy_cu_unknown:
414 case ixgbe_phy_tn: 405 case ixgbe_phy_tn:
415 case ixgbe_phy_aq:
416 media_type = ixgbe_media_type_copper; 406 media_type = ixgbe_media_type_copper;
417 goto out; 407 goto out;
418 default: 408 default:
419 break; 409 break;
420 } 410 }
457 * 447 *
458 * Configures link settings based on values in the ixgbe_hw struct. 448 * Configures link settings based on values in the ixgbe_hw struct.
459 * Restarts the link. Performs autonegotiation if needed. 449 * Restarts the link. Performs autonegotiation if needed.
460 **/ 450 **/
461 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 451 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
462 bool autoneg_wait_to_complete) 452 bool autoneg_wait_to_complete)
463 { 453 {
464 u32 autoc_reg; 454 u32 autoc_reg;
465 u32 links_reg; 455 u32 links_reg;
466 u32 i; 456 u32 i;
467 s32 status = IXGBE_SUCCESS; 457 s32 status = IXGBE_SUCCESS;
571 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 561 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
572 * 562 *
573 * Set the link speed in the AUTOC register and restarts link. 563 * Set the link speed in the AUTOC register and restarts link.
574 **/ 564 **/
575 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 565 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
576 ixgbe_link_speed speed, bool autoneg, 566 ixgbe_link_speed speed, bool autoneg,
577 bool autoneg_wait_to_complete) 567 bool autoneg_wait_to_complete)
578 { 568 {
579 s32 status = IXGBE_SUCCESS; 569 s32 status = IXGBE_SUCCESS;
580 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 570 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
581 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 571 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
582 u32 speedcnt = 0; 572 u32 speedcnt = 0;
617 607
618 /* Allow module to change analog characteristics (1G->10G) */ 608 /* Allow module to change analog characteristics (1G->10G) */
619 msec_delay(40); 609 msec_delay(40);
620 610
621 status = ixgbe_setup_mac_link_82599(hw, 611 status = ixgbe_setup_mac_link_82599(hw,
622 IXGBE_LINK_SPEED_10GB_FULL, 612 IXGBE_LINK_SPEED_10GB_FULL,
623 autoneg, 613 autoneg,
624 autoneg_wait_to_complete); 614 autoneg_wait_to_complete);
625 if (status != IXGBE_SUCCESS) 615 if (status != IXGBE_SUCCESS)
626 return status; 616 return status;
627 617
628 /* Flap the tx laser if it has not already been done */ 618 /* Flap the tx laser if it has not already been done */
629 ixgbe_flap_tx_laser(hw); 619 ixgbe_flap_tx_laser(hw);
637 /* Wait for the link partner to also set speed */ 627 /* Wait for the link partner to also set speed */
638 msec_delay(100); 628 msec_delay(100);
639 629
640 /* If we have link, just jump out */ 630 /* If we have link, just jump out */
641 status = ixgbe_check_link(hw, &link_speed, 631 status = ixgbe_check_link(hw, &link_speed,
642 &link_up, FALSE); 632 &link_up, FALSE);
643 if (status != IXGBE_SUCCESS) 633 if (status != IXGBE_SUCCESS)
644 return status; 634 return status;
645 635
646 if (link_up) 636 if (link_up)
647 goto out; 637 goto out;
697 * (if there was more than one). We call ourselves back with just the 687 * (if there was more than one). We call ourselves back with just the
698 * single highest speed that the user requested. 688 * single highest speed that the user requested.
699 */ 689 */
700 if (speedcnt > 1) 690 if (speedcnt > 1)
701 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 691 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
702 highest_link_speed, autoneg, autoneg_wait_to_complete); 692 highest_link_speed, autoneg, autoneg_wait_to_complete);
703 693
704 out: 694 out:
705 /* Set autoneg_advertised value based on input link speed */ 695 /* Set autoneg_advertised value based on input link speed */
706 hw->phy.autoneg_advertised = 0; 696 hw->phy.autoneg_advertised = 0;
707 697
722 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 712 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
723 * 713 *
724 * Implements the Intel SmartSpeed algorithm. 714 * Implements the Intel SmartSpeed algorithm.
725 **/ 715 **/
726 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 716 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
727 ixgbe_link_speed speed, bool autoneg, 717 ixgbe_link_speed speed, bool autoneg,
728 bool autoneg_wait_to_complete) 718 bool autoneg_wait_to_complete)
729 { 719 {
730 s32 status = IXGBE_SUCCESS; 720 s32 status = IXGBE_SUCCESS;
731 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 721 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
732 s32 i, j; 722 s32 i, j;
733 bool link_up = FALSE; 723 bool link_up = FALSE;
835 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 825 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
836 * 826 *
837 * Set the link speed in the AUTOC register and restarts link. 827 * Set the link speed in the AUTOC register and restarts link.
838 **/ 828 **/
839 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 829 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
840 ixgbe_link_speed speed, bool autoneg, 830 ixgbe_link_speed speed, bool autoneg,
841 bool autoneg_wait_to_complete) 831 bool autoneg_wait_to_complete)
842 { 832 {
843 s32 status = IXGBE_SUCCESS; 833 s32 status = IXGBE_SUCCESS;
844 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 834 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
845 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 835 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
846 u32 start_autoc = autoc; 836 u32 start_autoc = autoc;
884 (hw->phy.smart_speed_active == FALSE)) 874 (hw->phy.smart_speed_active == FALSE))
885 autoc |= IXGBE_AUTOC_KR_SUPP; 875 autoc |= IXGBE_AUTOC_KR_SUPP;
886 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 876 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
887 autoc |= IXGBE_AUTOC_KX_SUPP; 877 autoc |= IXGBE_AUTOC_KX_SUPP;
888 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 878 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
889 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 879 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
890 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 880 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
891 /* Switch from 1G SFI to 10G SFI if requested */ 881 /* Switch from 1G SFI to 10G SFI if requested */
892 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 882 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
893 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 883 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
894 autoc &= ~IXGBE_AUTOC_LMS_MASK; 884 autoc &= ~IXGBE_AUTOC_LMS_MASK;
895 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 885 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
896 } 886 }
897 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 887 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
898 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 888 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
899 /* Switch from 10G SFI to 1G SFI if requested */ 889 /* Switch from 10G SFI to 1G SFI if requested */
900 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 890 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
901 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 891 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
902 autoc &= ~IXGBE_AUTOC_LMS_MASK; 892 autoc &= ~IXGBE_AUTOC_LMS_MASK;
903 if (autoneg) 893 if (autoneg)
949 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 939 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
950 * 940 *
951 * Restarts link on PHY and MAC based on settings passed in. 941 * Restarts link on PHY and MAC based on settings passed in.
952 **/ 942 **/
953 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 943 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
954 ixgbe_link_speed speed, 944 ixgbe_link_speed speed,
955 bool autoneg, 945 bool autoneg,
956 bool autoneg_wait_to_complete) 946 bool autoneg_wait_to_complete)
957 { 947 {
958 s32 status; 948 s32 status;
959 949
960 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 950 DEBUGFUNC("ixgbe_setup_copper_link_82599");
961 951
962 /* Setup the PHY according to input speed */ 952 /* Setup the PHY according to input speed */
963 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 953 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
964 autoneg_wait_to_complete); 954 autoneg_wait_to_complete);
965 /* Set up MAC */ 955 /* Set up MAC */
966 (void) ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 956 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
967 957
968 return status; 958 return status;
969 } 959 }
970 960
971 /** 961 /**
976 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 966 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
977 * reset. 967 * reset.
978 **/ 968 **/
979 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 969 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
980 { 970 {
981 s32 status = IXGBE_SUCCESS; 971 ixgbe_link_speed link_speed;
982 u32 ctrl; 972 s32 status;
983 u32 i; 973 u32 ctrl, i, autoc, autoc2;
984 u32 autoc; 974 bool link_up = FALSE;
985 u32 autoc2;
986 975
987 DEBUGFUNC("ixgbe_reset_hw_82599"); 976 DEBUGFUNC("ixgbe_reset_hw_82599");
988 977
989 /* Call adapter stop to disable tx/rx and clear interrupts */ 978 /* Call adapter stop to disable tx/rx and clear interrupts */
990 hw->mac.ops.stop_adapter(hw); 979 status = hw->mac.ops.stop_adapter(hw);
980 if (status != IXGBE_SUCCESS)
981 goto reset_hw_out;
982
983 /* flush pending Tx transactions */
984 ixgbe_clear_tx_pending(hw);
991 985
992 /* PHY ops must be identified and initialized prior to reset */ 986 /* PHY ops must be identified and initialized prior to reset */
993 987
994 /* Identify PHY and related function pointers */ 988 /* Identify PHY and related function pointers */
995 status = hw->phy.ops.init(hw); 989 status = hw->phy.ops.init(hw);
1008 1002
1009 /* Reset PHY */ 1003 /* Reset PHY */
1010 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) 1004 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1011 hw->phy.ops.reset(hw); 1005 hw->phy.ops.reset(hw);
1012 1006
1013 /*
1014 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1015 * access and verify no pending requests before reset
1016 */
1017 (void) ixgbe_disable_pcie_master(hw);
1018
1019 mac_reset_top: 1007 mac_reset_top:
1020 /* 1008 /*
1021 * Issue global reset to the MAC. This needs to be a SW reset. 1009 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1022 * If link reset is used, it might reset the MAC when mng is using it 1010 * If link reset is used when link is up, it might reset the PHY when
1023 */ 1011 * mng is using it. If link is down or the flag to force full link
1024 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1012 * reset is set, then perform link reset.
1025 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 1013 */
1014 ctrl = IXGBE_CTRL_LNK_RST;
1015 if (!hw->force_full_reset) {
1016 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1017 if (link_up)
1018 ctrl = IXGBE_CTRL_RST;
1019 }
1020
1021 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1022 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1026 IXGBE_WRITE_FLUSH(hw); 1023 IXGBE_WRITE_FLUSH(hw);
1027 1024
1028 /* Poll for reset bit to self-clear indicating reset is complete */ 1025 /* Poll for reset bit to self-clear indicating reset is complete */
1029 for (i = 0; i < 10; i++) { 1026 for (i = 0; i < 10; i++) {
1030 usec_delay(1); 1027 usec_delay(1);
1031 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1028 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1032 if (!(ctrl & IXGBE_CTRL_RST)) 1029 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1033 break; 1030 break;
1034 } 1031 }
1035 if (ctrl & IXGBE_CTRL_RST) { 1032
1033 if (ctrl & IXGBE_CTRL_RST_MASK) {
1036 status = IXGBE_ERR_RESET_FAILED; 1034 status = IXGBE_ERR_RESET_FAILED;
1037 DEBUGOUT("Reset polling failed to complete.\n"); 1035 DEBUGOUT("Reset polling failed to complete.\n");
1038 } 1036 }
1039 1037
1038 msec_delay(50);
1039
1040 /* 1040 /*
1041 * Double resets are required for recovery from certain error 1041 * Double resets are required for recovery from certain error
1042 * conditions. Between resets, it is necessary to stall to allow time 1042 * conditions. Between resets, it is necessary to stall to allow time
1043 * for any pending HW events to complete. We use 1usec since that is 1043 * for any pending HW events to complete.
1044 * what is needed for ixgbe_disable_pcie_master(). The second reset
1045 * then clears out any effects of those events.
1046 */ 1044 */
1047 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1045 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1048 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1046 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1049 usec_delay(1);
1050 goto mac_reset_top; 1047 goto mac_reset_top;
1051 } 1048 }
1052
1053 msec_delay(50);
1054 1049
1055 /* 1050 /*
1056 * Store the original AUTOC/AUTOC2 values if they have not been 1051 * Store the original AUTOC/AUTOC2 values if they have not been
1057 * stored off yet. Otherwise restore the stored original 1052 * stored off yet. Otherwise restore the stored original
1058 * values since the reset operation sets back to defaults. 1053 * values since the reset operation sets back to defaults.
1070 1065
1071 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1066 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1072 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1067 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1073 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1068 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1074 autoc2 |= (hw->mac.orig_autoc2 & 1069 autoc2 |= (hw->mac.orig_autoc2 &
1075 IXGBE_AUTOC2_UPPER_MASK); 1070 IXGBE_AUTOC2_UPPER_MASK);
1076 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1071 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1077 } 1072 }
1078 } 1073 }
1079 1074
1080 /* Store the permanent mac address */ 1075 /* Store the permanent mac address */
1092 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1087 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1093 1088
1094 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1089 /* Add the SAN MAC address to the RAR only if it's a valid address */
1095 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1090 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1096 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1091 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1097 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1092 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1093
1094 /* Save the SAN MAC RAR index */
1095 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1098 1096
1099 /* Reserve the last RAR for the SAN MAC address */ 1097 /* Reserve the last RAR for the SAN MAC address */
1100 hw->mac.num_rar_entries--; 1098 hw->mac.num_rar_entries--;
1101 } 1099 }
1102 1100
1103 /* Store the alternative WWNN/WWPN prefix */ 1101 /* Store the alternative WWNN/WWPN prefix */
1104 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1102 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1105 &hw->mac.wwpn_prefix); 1103 &hw->mac.wwpn_prefix);
1106 1104
1107 reset_hw_out: 1105 reset_hw_out:
1108 return status; 1106 return status;
1109 } 1107 }
1110 1108
1130 break; 1128 break;
1131 usec_delay(10); 1129 usec_delay(10);
1132 } 1130 }
1133 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1131 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1134 DEBUGOUT("Flow Director previous command isn't complete, " 1132 DEBUGOUT("Flow Director previous command isn't complete, "
1135 "aborting table re-initialization. \n"); 1133 "aborting table re-initialization.\n");
1136 return IXGBE_ERR_FDIR_REINIT_FAILED; 1134 return IXGBE_ERR_FDIR_REINIT_FAILED;
1137 } 1135 }
1138 1136
1139 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1137 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1140 IXGBE_WRITE_FLUSH(hw); 1138 IXGBE_WRITE_FLUSH(hw);
1144 * before re-writing the FDIRCTRL control register with the same value. 1142 * before re-writing the FDIRCTRL control register with the same value.
1145 * - write 1 to bit 8 of FDIRCMD register & 1143 * - write 1 to bit 8 of FDIRCMD register &
1146 * - write 0 to bit 8 of FDIRCMD register 1144 * - write 0 to bit 8 of FDIRCMD register
1147 */ 1145 */
1148 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1146 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1149 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1147 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1150 IXGBE_FDIRCMD_CLEARHT)); 1148 IXGBE_FDIRCMD_CLEARHT));
1151 IXGBE_WRITE_FLUSH(hw); 1149 IXGBE_WRITE_FLUSH(hw);
1152 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1150 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1153 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1151 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1154 ~IXGBE_FDIRCMD_CLEARHT)); 1152 ~IXGBE_FDIRCMD_CLEARHT));
1155 IXGBE_WRITE_FLUSH(hw); 1153 IXGBE_WRITE_FLUSH(hw);
1156 /* 1154 /*
1157 * Clear FDIR Hash register to clear any leftover hashes 1155 * Clear FDIR Hash register to clear any leftover hashes
1158 * waiting to be programmed. 1156 * waiting to be programmed.
1159 */ 1157 */
1164 IXGBE_WRITE_FLUSH(hw); 1162 IXGBE_WRITE_FLUSH(hw);
1165 1163
1166 /* Poll init-done after we write FDIRCTRL register */ 1164 /* Poll init-done after we write FDIRCTRL register */
1167 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1165 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1168 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1166 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1169 IXGBE_FDIRCTRL_INIT_DONE) 1167 IXGBE_FDIRCTRL_INIT_DONE)
1170 break; 1168 break;
1171 usec_delay(10); 1169 usec_delay(10);
1172 } 1170 }
1173 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1171 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1174 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1172 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1175 return IXGBE_ERR_FDIR_REINIT_FAILED; 1173 return IXGBE_ERR_FDIR_REINIT_FAILED;
1176 } 1174 }
1177 1175
1178 /* Clear FDIR statistics registers (read to clear) */ 1176 /* Clear FDIR statistics registers (read to clear) */
1179 (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1177 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1180 (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1178 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1181 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1179 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1182 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1180 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1183 (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1181 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1184 1182
1185 return IXGBE_SUCCESS; 1183 return IXGBE_SUCCESS;
1186 } 1184 }
1187 1185
1188 /** 1186 /**
1189 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1187 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1190 * @hw: pointer to hardware structure 1188 * @hw: pointer to hardware structure
1191 * @pballoc: which mode to allocate filters with 1189 * @fdirctrl: value to write to flow director control register
1192 **/ 1190 **/
1193 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) 1191 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1194 { 1192 {
1195 u32 fdirctrl = 0;
1196 u32 pbsize;
1197 int i; 1193 int i;
1198 1194
1199 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1195 DEBUGFUNC("ixgbe_fdir_enable_82599");
1200
1201 /*
1202 * Before enabling Flow Director, the Rx Packet Buffer size
1203 * must be reduced. The new value is the current size minus
1204 * flow director memory usage size.
1205 */
1206 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
1207 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
1208 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
1209
1210 /*
1211 * The defaults in the HW for RX PB 1-7 are not zero and so should be
1212 * intialized to zero for non DCB mode otherwise actual total RX PB
1213 * would be bigger than programmed and filter space would run into
1214 * the PB 0 region.
1215 */
1216 for (i = 1; i < 8; i++)
1217 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
1218
1219 /* Send interrupt when 64 filters are left */
1220 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1221
1222 /* Set the maximum length per hash bucket to 0xA filters */
1223 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
1224
1225 switch (pballoc) {
1226 case IXGBE_FDIR_PBALLOC_64K:
1227 /* 8k - 1 signature filters */
1228 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1229 break;
1230 case IXGBE_FDIR_PBALLOC_128K:
1231 /* 16k - 1 signature filters */
1232 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1233 break;
1234 case IXGBE_FDIR_PBALLOC_256K:
1235 /* 32k - 1 signature filters */
1236 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1237 break;
1238 default:
1239 /* bad value */
1240 return IXGBE_ERR_CONFIG;
1241 };
1242
1243 /* Move the flexible bytes to use the ethertype - shift 6 words */
1244 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1245
1246 1196
1247 /* Prime the keys for hashing */ 1197 /* Prime the keys for hashing */
1248 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1198 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1249 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1199 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1250 1200
1263 */ 1213 */
1264 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1214 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1265 IXGBE_WRITE_FLUSH(hw); 1215 IXGBE_WRITE_FLUSH(hw);
1266 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1216 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1267 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1217 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1268 IXGBE_FDIRCTRL_INIT_DONE) 1218 IXGBE_FDIRCTRL_INIT_DONE)
1269 break; 1219 break;
1270 msec_delay(1); 1220 msec_delay(1);
1271 } 1221 }
1222
1272 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1223 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1273 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1224 DEBUGOUT("Flow Director poll time exceeded!\n");
1225 }
1226
1227 /**
1228 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1229 * @hw: pointer to hardware structure
1230 * @fdirctrl: value to write to flow director control register, initially
1231 * contains just the value of the Rx packet buffer allocation
1232 **/
1233 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1234 {
1235 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1236
1237 /*
1238 * Continue setup of fdirctrl register bits:
1239 * Move the flexible bytes to use the ethertype - shift 6 words
1240 * Set the maximum length per hash bucket to 0xA filters
1241 * Send interrupt when 64 filters are left
1242 */
1243 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1244 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1245 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1246
1247 /* write hashes and fdirctrl register, poll for completion */
1248 ixgbe_fdir_enable_82599(hw, fdirctrl);
1274 1249
1275 return IXGBE_SUCCESS; 1250 return IXGBE_SUCCESS;
1276 } 1251 }
1277 1252
1278 /** 1253 /**
1279 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1254 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1280 * @hw: pointer to hardware structure 1255 * @hw: pointer to hardware structure
1281 * @pballoc: which mode to allocate filters with 1256 * @fdirctrl: value to write to flow director control register, initially
1282 **/ 1257 * contains just the value of the Rx packet buffer allocation
1283 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) 1258 **/
1284 { 1259 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1285 u32 fdirctrl = 0; 1260 {
1286 u32 pbsize;
1287 int i;
1288
1289 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1261 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1290 1262
1291 /* 1263 /*
1292 * Before enabling Flow Director, the Rx Packet Buffer size 1264 * Continue setup of fdirctrl register bits:
1293 * must be reduced. The new value is the current size minus 1265 * Turn perfect match filtering on
1294 * flow director memory usage size. 1266 * Report hash in RSS field of Rx wb descriptor
1295 */ 1267 * Initialize the drop queue
1296 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1268 * Move the flexible bytes to use the ethertype - shift 6 words
1297 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1269 * Set the maximum length per hash bucket to 0xA filters
1298 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); 1270 * Send interrupt when 64 (0x4 * 16) filters are left
1299 1271 */
1300 /* 1272 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1301 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1273 IXGBE_FDIRCTRL_REPORT_STATUS |
1302 * intialized to zero for non DCB mode otherwise actual total RX PB 1274 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1303 * would be bigger than programmed and filter space would run into 1275 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1304 * the PB 0 region. 1276 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1305 */ 1277 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1306 for (i = 1; i < 8; i++) 1278
1307 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1279 /* write hashes and fdirctrl register, poll for completion */
1308 1280 ixgbe_fdir_enable_82599(hw, fdirctrl);
1309 /* Send interrupt when 64 filters are left */
1310 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
1311
1312 /* Initialize the drop queue to Rx queue 127 */
1313 fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
1314
1315 switch (pballoc) {
1316 case IXGBE_FDIR_PBALLOC_64K:
1317 /* 2k - 1 perfect filters */
1318 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
1319 break;
1320 case IXGBE_FDIR_PBALLOC_128K:
1321 /* 4k - 1 perfect filters */
1322 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
1323 break;
1324 case IXGBE_FDIR_PBALLOC_256K:
1325 /* 8k - 1 perfect filters */
1326 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
1327 break;
1328 default:
1329 /* bad value */
1330 return IXGBE_ERR_CONFIG;
1331 };
1332
1333 /* Turn perfect match filtering on */
1334 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
1335 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1336
1337 /* Move the flexible bytes to use the ethertype - shift 6 words */
1338 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1339
1340 /* Prime the keys for hashing */
1341 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1342 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,IXGBE_ATR_SIGNATURE_HASH_KEY);
1343
1344 /*
1345 * Poll init-done after we write the register. Estimated times:
1346 * 10G: PBALLOC = 11b, timing is 60us
1347 * 1G: PBALLOC = 11b, timing is 600us
1348 * 100M: PBALLOC = 11b, timing is 6ms
1349 *
1350 * Multiple these timings by 4 if under full Rx load
1351 *
1352 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1353 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1354 * this might not finish in our poll time, but we can live with that
1355 * for now.
1356 */
1357
1358 /* Set the maximum length per hash bucket to 0xA filters */
1359 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
1360
1361 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1362 IXGBE_WRITE_FLUSH(hw);
1363 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1364 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1365 IXGBE_FDIRCTRL_INIT_DONE)
1366 break;
1367 msec_delay(1);
1368 }
1369 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1370 DEBUGOUT("Flow Director Perfect poll time exceeded!\n");
1371 1281
1372 return IXGBE_SUCCESS; 1282 return IXGBE_SUCCESS;
1373 }
1374
1375 /**
1376 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
1377 * @stream: input bitstream to compute the hash on
1378 * @key: 32-bit hash key
1379 **/
1380 u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
1381 u32 key)
1382 {
1383 /*
1384 * The algorithm is as follows:
1385 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
1386 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
1387 * and A[n] x B[n] is bitwise AND between same length strings
1388 *
1389 * K[n] is 16 bits, defined as:
1390 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
1391 * for n modulo 32 < 15, K[n] =
1392 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
1393 *
1394 * S[n] is 16 bits, defined as:
1395 * for n >= 15, S[n] = S[n:n - 15]
1396 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
1397 *
1398 * To simplify for programming, the algorithm is implemented
1399 * in software this way:
1400 *
1401 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
1402 *
1403 * for (i = 0; i < 352; i+=32)
1404 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
1405 *
1406 * lo_hash_dword[15:0] ^= Stream[15:0];
1407 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
1408 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
1409 *
1410 * hi_hash_dword[31:0] ^= Stream[351:320];
1411 *
1412 * if(key[0])
1413 * hash[15:0] ^= Stream[15:0];
1414 *
1415 * for (i = 0; i < 16; i++) {
1416 * if (key[i])
1417 * hash[15:0] ^= lo_hash_dword[(i+15):i];
1418 * if (key[i + 16])
1419 * hash[15:0] ^= hi_hash_dword[(i+15):i];
1420 * }
1421 *
1422 */
1423 __be32 common_hash_dword = 0;
1424 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1425 u32 hash_result = 0;
1426 u8 i;
1427
1428 /* record the flow_vm_vlan bits as they are a key part to the hash */
1429 flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
1430
1431 /* generate common hash dword */
1432 for (i = 10; i; i -= 2)
1433 common_hash_dword ^= atr_input->dword_stream[i] ^
1434 atr_input->dword_stream[i - 1];
1435
1436 hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
1437
1438 /* low dword is word swapped version of common */
1439 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1440
1441 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1442 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1443
1444 /* Process bits 0 and 16 */
1445 if (key & 0x0001) hash_result ^= lo_hash_dword;
1446 if (key & 0x00010000) hash_result ^= hi_hash_dword;
1447
1448 /*
1449 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1450 * delay this because bit 0 of the stream should not be processed
1451 * so we do not add the vlan until after bit 0 was processed
1452 */
1453 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1454
1455
1456 /* process the remaining 30 bits in the key 2 bits at a time */
1457 for (i = 15; i; i-- ) {
1458 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
1459 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
1460 }
1461
1462 return hash_result & IXGBE_ATR_HASH_MASK;
1463 } 1283 }
1464 1284
1465 /* 1285 /*
1466 * These defines allow us to quickly generate all of the necessary instructions 1286 * These defines allow us to quickly generate all of the necessary instructions
1467 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1287 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1468 * for values 0 through 15 1288 * for values 0 through 15
1469 */ 1289 */
1470 #define IXGBE_ATR_COMMON_HASH_KEY \ 1290 #define IXGBE_ATR_COMMON_HASH_KEY \
1471 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1291 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1472 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1292 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1473 { \ 1293 do { \
1474 u32 n = (_n); \ 1294 u32 n = (_n); \
1475 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ 1295 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1476 common_hash ^= lo_hash_dword >> n; \ 1296 common_hash ^= lo_hash_dword >> n; \
1477 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ 1297 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1478 bucket_hash ^= lo_hash_dword >> n; \ 1298 bucket_hash ^= lo_hash_dword >> n; \
1482 common_hash ^= hi_hash_dword >> n; \ 1302 common_hash ^= hi_hash_dword >> n; \
1483 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ 1303 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1484 bucket_hash ^= hi_hash_dword >> n; \ 1304 bucket_hash ^= hi_hash_dword >> n; \
1485 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ 1305 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1486 sig_hash ^= hi_hash_dword << (16 - n); \ 1306 sig_hash ^= hi_hash_dword << (16 - n); \
1487 } 1307 } while (0);
1488 1308
1489 /** 1309 /**
1490 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1310 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1491 * @stream: input bitstream to compute the hash on 1311 * @stream: input bitstream to compute the hash on
1492 * 1312 *
1494 * several optomizations such as unwinding all of the loops, letting the 1314 * several optomizations such as unwinding all of the loops, letting the
1495 * compiler work out all of the conditional ifs since the keys are static 1315 * compiler work out all of the conditional ifs since the keys are static
1496 * defines, and computing two keys at once since the hashed dword stream 1316 * defines, and computing two keys at once since the hashed dword stream
1497 * will be the same for both keys. 1317 * will be the same for both keys.
1498 **/ 1318 **/
1499 static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1319 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1500 union ixgbe_atr_hash_dword common) 1320 union ixgbe_atr_hash_dword common)
1501 { 1321 {
1502 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1322 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1503 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1323 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1504 1324
1505 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1325 /* record the flow_vm_vlan bits as they are a key part to the hash */
1553 } 1373 }
1554 1374
1555 /** 1375 /**
1556 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1376 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1557 * @hw: pointer to hardware structure 1377 * @hw: pointer to hardware structure
1558 * @stream: input bitstream 1378 * @input: unique input dword
1379 * @common: compressed common input dword
1559 * @queue: queue index to direct traffic to 1380 * @queue: queue index to direct traffic to
1560 **/ 1381 **/
1561 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1382 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1562 union ixgbe_atr_hash_dword input, 1383 union ixgbe_atr_hash_dword input,
1563 union ixgbe_atr_hash_dword common, 1384 union ixgbe_atr_hash_dword common,
1564 u8 queue) 1385 u8 queue)
1565 { 1386 {
1566 u64 fdirhashcmd; 1387 u64 fdirhashcmd;
1567 u32 fdircmd; 1388 u32 fdircmd;
1568 1389
1569 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1390 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1585 return IXGBE_ERR_CONFIG; 1406 return IXGBE_ERR_CONFIG;
1586 } 1407 }
1587 1408
1588 /* configure FDIRCMD register */ 1409 /* configure FDIRCMD register */
1589 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1410 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1590 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1411 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1591 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1412 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1592 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1413 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1593 1414
1594 /* 1415 /*
1595 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1416 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1600 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1421 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1601 1422
1602 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1423 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1603 1424
1604 return IXGBE_SUCCESS; 1425 return IXGBE_SUCCESS;
1426 }
1427
1428 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1429 do { \
1430 u32 n = (_n); \
1431 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1432 bucket_hash ^= lo_hash_dword >> n; \
1433 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1434 bucket_hash ^= hi_hash_dword >> n; \
1435 } while (0);
1436
1437 /**
1438 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1439 * @atr_input: input bitstream to compute the hash on
1440 * @input_mask: mask for the input bitstream
1441 *
1442 * This function serves two main purposes. First it applys the input_mask
1443 * to the atr_input resulting in a cleaned up atr_input data stream.
1444 * Secondly it computes the hash and stores it in the bkt_hash field at
1445 * the end of the input byte stream. This way it will be available for
1446 * future use without needing to recompute the hash.
1447 **/
1448 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1449 union ixgbe_atr_input *input_mask)
1450 {
1451
1452 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1453 u32 bucket_hash = 0;
1454
1455 /* Apply masks to input data */
1456 input->dword_stream[0] &= input_mask->dword_stream[0];
1457 input->dword_stream[1] &= input_mask->dword_stream[1];
1458 input->dword_stream[2] &= input_mask->dword_stream[2];
1459 input->dword_stream[3] &= input_mask->dword_stream[3];
1460 input->dword_stream[4] &= input_mask->dword_stream[4];
1461 input->dword_stream[5] &= input_mask->dword_stream[5];
1462 input->dword_stream[6] &= input_mask->dword_stream[6];
1463 input->dword_stream[7] &= input_mask->dword_stream[7];
1464 input->dword_stream[8] &= input_mask->dword_stream[8];
1465 input->dword_stream[9] &= input_mask->dword_stream[9];
1466 input->dword_stream[10] &= input_mask->dword_stream[10];
1467
1468 /* record the flow_vm_vlan bits as they are a key part to the hash */
1469 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1470
1471 /* generate common hash dword */
1472 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1473 input->dword_stream[2] ^
1474 input->dword_stream[3] ^
1475 input->dword_stream[4] ^
1476 input->dword_stream[5] ^
1477 input->dword_stream[6] ^
1478 input->dword_stream[7] ^
1479 input->dword_stream[8] ^
1480 input->dword_stream[9] ^
1481 input->dword_stream[10]);
1482
1483 /* low dword is word swapped version of common */
1484 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1485
1486 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1487 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1488
1489 /* Process bits 0 and 16 */
1490 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1491
1492 /*
1493 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1494 * delay this because bit 0 of the stream should not be processed
1495 * so we do not add the vlan until after bit 0 was processed
1496 */
1497 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1498
1499 /* Process remaining 30 bit of the key */
1500 IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1501 IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1502 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1503 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1504 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1505 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1506 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1507 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1508 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1509 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1510 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1511 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1512 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1513 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1514 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1515
1516 /*
1517 * Limit hash to 13 bits since max bucket count is 8K.
1518 * Store result at the end of the input stream.
1519 */
1520 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1605 } 1521 }
1606 1522
1607 /** 1523 /**
1608 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks 1524 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1609 * @input_mask: mask to be bit swapped 1525 * @input_mask: mask to be bit swapped
1611 * The source and destination port masks for flow director are bit swapped 1527 * The source and destination port masks for flow director are bit swapped
1612 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1528 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1613 * generate a correctly swapped value we need to bit swap the mask and that 1529 * generate a correctly swapped value we need to bit swap the mask and that
1614 * is what is accomplished by this function. 1530 * is what is accomplished by this function.
1615 **/ 1531 **/
1616 static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) 1532 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1617 { 1533 {
1618 u32 mask = IXGBE_NTOHS(input_masks->dst_port_mask); 1534 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1619 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1535 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1620 mask |= IXGBE_NTOHS(input_masks->src_port_mask); 1536 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1621 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1537 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1622 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1538 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1623 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1539 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1624 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1540 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1625 } 1541 }
1637 1553
1638 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1554 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1639 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) 1555 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1640 1556
1641 #define IXGBE_STORE_AS_BE16(_value) \ 1557 #define IXGBE_STORE_AS_BE16(_value) \
1642 (((u16)(_value) >> 8) | ((u16)(_value) << 8)) 1558 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1643 1559
1644 1560 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1645 /** 1561 union ixgbe_atr_input *input_mask)
1646 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1562 {
1647 * @hw: pointer to hardware structure 1563 /* mask IPv6 since it is currently not supported */
1648 * @input: input bitstream 1564 u32 fdirm = IXGBE_FDIRM_DIPv6;
1649 * @input_masks: masks for the input bitstream 1565 u32 fdirtcpm;
1650 * @soft_id: software index for the filters 1566
1651 * @queue: queue index to direct traffic to 1567 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1652 *
1653 * Note that the caller to this function must lock before calling, since the
1654 * hardware writes must be protected from one another.
1655 **/
1656 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1657 union ixgbe_atr_input *input,
1658 struct ixgbe_atr_input_masks *input_masks,
1659 u16 soft_id, u8 queue)
1660 {
1661 u32 fdirhash;
1662 u32 fdircmd;
1663 u32 fdirport, fdirtcpm;
1664 u32 fdirvlan;
1665 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
1666 u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
1667 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
1668
1669 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1670
1671 /*
1672 * Check flow_type formatting, and bail out before we touch the hardware
1673 * if there's a configuration issue
1674 */
1675 switch (input->formatted.flow_type) {
1676 case IXGBE_ATR_FLOW_TYPE_IPV4:
1677 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
1678 fdirm |= IXGBE_FDIRM_L4P;
1679 /* FALLTHRU */
1680 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1681 if (input_masks->dst_port_mask || input_masks->src_port_mask) {
1682 DEBUGOUT(" Error on src/dst port mask\n");
1683 return IXGBE_ERR_CONFIG;
1684 }
1685 break;
1686 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1687 break;
1688 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1689 break;
1690 default:
1691 DEBUGOUT(" Error on flow type input\n");
1692 return IXGBE_ERR_CONFIG;
1693 }
1694 1568
1695 /* 1569 /*
1696 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1570 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1697 * are zero, then assume a full mask for that field. Also assume that 1571 * are zero, then assume a full mask for that field. Also assume that
1698 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1572 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1700 * 1574 *
1701 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1575 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1702 * point in time. 1576 * point in time.
1703 */ 1577 */
1704 1578
1705 /* Program FDIRM */ 1579 /* verify bucket hash is cleared on hash generation */
1706 switch (IXGBE_NTOHS(input_masks->vlan_id_mask) & 0xEFFF) { 1580 if (input_mask->formatted.bkt_hash)
1581 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1582
1583 /* Program FDIRM and verify partial masks */
1584 switch (input_mask->formatted.vm_pool & 0x7F) {
1585 case 0x0:
1586 fdirm |= IXGBE_FDIRM_POOL;
1587 case 0x7F:
1588 break;
1589 default:
1590 DEBUGOUT(" Error on vm pool mask\n");
1591 return IXGBE_ERR_CONFIG;
1592 }
1593
1594 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1595 case 0x0:
1596 fdirm |= IXGBE_FDIRM_L4P;
1597 if (input_mask->formatted.dst_port ||
1598 input_mask->formatted.src_port) {
1599 DEBUGOUT(" Error on src/dst port mask\n");
1600 return IXGBE_ERR_CONFIG;
1601 }
1602 case IXGBE_ATR_L4TYPE_MASK:
1603 break;
1604 default:
1605 DEBUGOUT(" Error on flow type mask\n");
1606 return IXGBE_ERR_CONFIG;
1607 }
1608
1609 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1610 case 0x0000:
1611 /* mask VLAN ID, fall through to mask VLAN priority */
1612 fdirm |= IXGBE_FDIRM_VLANID;
1613 case 0x0FFF:
1614 /* mask VLAN priority */
1615 fdirm |= IXGBE_FDIRM_VLANP;
1616 break;
1617 case 0xE000:
1618 /* mask VLAN ID only, fall through */
1619 fdirm |= IXGBE_FDIRM_VLANID;
1707 case 0xEFFF: 1620 case 0xEFFF:
1708 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ 1621 /* no VLAN fields masked */
1709 fdirm &= ~IXGBE_FDIRM_VLANID;
1710 /* FALLTHRU */
1711 case 0xE000:
1712 /* Unmask VLAN prio - bit 1 */
1713 fdirm &= ~IXGBE_FDIRM_VLANP;
1714 break;
1715 case 0x0FFF:
1716 /* Unmask VLAN ID - bit 0 */
1717 fdirm &= ~IXGBE_FDIRM_VLANID;
1718 break;
1719 case 0x0000:
1720 /* do nothing, vlans already masked */
1721 break; 1622 break;
1722 default: 1623 default:
1723 DEBUGOUT(" Error on VLAN mask\n"); 1624 DEBUGOUT(" Error on VLAN mask\n");
1724 return IXGBE_ERR_CONFIG; 1625 return IXGBE_ERR_CONFIG;
1725 } 1626 }
1726 1627
1727 if (input_masks->flex_mask & 0xFFFF) { 1628 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1728 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { 1629 case 0x0000:
1729 DEBUGOUT(" Error on flexible byte mask\n"); 1630 /* Mask Flex Bytes, fall through */
1730 return IXGBE_ERR_CONFIG; 1631 fdirm |= IXGBE_FDIRM_FLEX;
1731 } 1632 case 0xFFFF:
1732 /* Unmask Flex Bytes - bit 4 */ 1633 break;
1733 fdirm &= ~IXGBE_FDIRM_FLEX; 1634 default:
1635 DEBUGOUT(" Error on flexible byte mask\n");
1636 return IXGBE_ERR_CONFIG;
1734 } 1637 }
1735 1638
1736 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1639 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1737 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1640 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1738 1641
1739 /* store the TCP/UDP port masks, bit reversed from port layout */ 1642 /* store the TCP/UDP port masks, bit reversed from port layout */
1740 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); 1643 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1741 1644
1742 /* write both the same so that UDP and TCP use the same mask */ 1645 /* write both the same so that UDP and TCP use the same mask */
1743 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1646 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1744 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1647 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1745 1648
1746 /* store source and destination IP masks (big-enian) */ 1649 /* store source and destination IP masks (big-enian) */
1747 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1650 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1748 ~input_masks->src_ip_mask[0]); 1651 ~input_mask->formatted.src_ip[0]);
1749 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1652 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1750 ~input_masks->dst_ip_mask[0]); 1653 ~input_mask->formatted.dst_ip[0]);
1751 1654
1752 /* Apply masks to input data */ 1655 return IXGBE_SUCCESS;
1753 input->formatted.vlan_id &= input_masks->vlan_id_mask; 1656 }
1754 input->formatted.flex_bytes &= input_masks->flex_mask; 1657
1755 input->formatted.src_port &= input_masks->src_port_mask; 1658 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1756 input->formatted.dst_port &= input_masks->dst_port_mask; 1659 union ixgbe_atr_input *input,
1757 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0]; 1660 u16 soft_id, u8 queue)
1758 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0]; 1661 {
1759 1662 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1760 /* record vlan (little-endian) and flex_bytes(big-endian) */ 1663
1761 fdirvlan = 1664 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1762 IXGBE_STORE_AS_BE16(IXGBE_NTOHS(input->formatted.flex_bytes)); 1665
1763 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1666 /* currently IPv6 is not supported, must be programmed with 0 */
1764 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); 1667 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1765 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1668 input->formatted.src_ip[0]);
1669 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1670 input->formatted.src_ip[1]);
1671 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1672 input->formatted.src_ip[2]);
1673
1674 /* record the source address (big-endian) */
1675 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1676
1677 /* record the first 32 bits of the destination address (big-endian) */
1678 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1766 1679
1767 /* record source and destination port (little-endian)*/ 1680 /* record source and destination port (little-endian)*/
1768 fdirport = IXGBE_NTOHS(input->formatted.dst_port); 1681 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1769 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1682 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1770 fdirport |= IXGBE_NTOHS(input->formatted.src_port); 1683 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1771 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1684 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1772 1685
1773 /* record the first 32 bits of the destination address (big-endian) */ 1686 /* record vlan (little-endian) and flex_bytes(big-endian) */
1774 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); 1687 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1775 1688 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1776 /* record the source address (big-endian) */ 1689 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1777 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); 1690 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1691
1692 /* configure FDIRHASH register */
1693 fdirhash = input->formatted.bkt_hash;
1694 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1695 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1696
1697 /*
1698 * flush all previous writes to make certain registers are
1699 * programmed prior to issuing the command
1700 */
1701 IXGBE_WRITE_FLUSH(hw);
1778 1702
1779 /* configure FDIRCMD register */ 1703 /* configure FDIRCMD register */
1780 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1704 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1781 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1705 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1706 if (queue == IXGBE_FDIR_DROP_QUEUE)
1707 fdircmd |= IXGBE_FDIRCMD_DROP;
1782 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1708 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1783 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1709 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1784 1710 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1785 /* we only want the bucket hash so drop the upper 16 bits */ 1711
1786 fdirhash = ixgbe_atr_compute_hash_82599(input, 1712 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1787 IXGBE_ATR_BUCKET_HASH_KEY); 1713
1714 return IXGBE_SUCCESS;
1715 }
1716
1717 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1718 union ixgbe_atr_input *input,
1719 u16 soft_id)
1720 {
1721 u32 fdirhash;
1722 u32 fdircmd = 0;
1723 u32 retry_count;
1724 s32 err = IXGBE_SUCCESS;
1725
1726 /* configure FDIRHASH register */
1727 fdirhash = input->formatted.bkt_hash;
1788 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1728 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1789
1790 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1729 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1791 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1730
1792 1731 /* flush hash to HW */
1793 return IXGBE_SUCCESS; 1732 IXGBE_WRITE_FLUSH(hw);
1733
1734 /* Query if filter is present */
1735 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1736
1737 for (retry_count = 10; retry_count; retry_count--) {
1738 /* allow 10us for query to process */
1739 usec_delay(10);
1740 /* verify query completed successfully */
1741 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1742 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1743 break;
1744 }
1745
1746 if (!retry_count)
1747 err = IXGBE_ERR_FDIR_REINIT_FAILED;
1748
1749 /* if filter exists in hardware then remove it */
1750 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1751 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1752 IXGBE_WRITE_FLUSH(hw);
1753 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1754 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1755 }
1756
1757 return err;
1758 }
1759
1760 /**
1761 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1762 * @hw: pointer to hardware structure
1763 * @input: input bitstream
1764 * @input_mask: mask for the input bitstream
1765 * @soft_id: software index for the filters
1766 * @queue: queue index to direct traffic to
1767 *
1768 * Note that the caller to this function must lock before calling, since the
1769 * hardware writes must be protected from one another.
1770 **/
1771 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1772 union ixgbe_atr_input *input,
1773 union ixgbe_atr_input *input_mask,
1774 u16 soft_id, u8 queue)
1775 {
1776 s32 err = IXGBE_ERR_CONFIG;
1777
1778 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1779
1780 /*
1781 * Check flow_type formatting, and bail out before we touch the hardware
1782 * if there's a configuration issue
1783 */
1784 switch (input->formatted.flow_type) {
1785 case IXGBE_ATR_FLOW_TYPE_IPV4:
1786 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1787 if (input->formatted.dst_port || input->formatted.src_port) {
1788 DEBUGOUT(" Error on src/dst port\n");
1789 return IXGBE_ERR_CONFIG;
1790 }
1791 break;
1792 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1793 if (input->formatted.dst_port || input->formatted.src_port) {
1794 DEBUGOUT(" Error on src/dst port\n");
1795 return IXGBE_ERR_CONFIG;
1796 }
1797 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1798 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1799 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1800 IXGBE_ATR_L4TYPE_MASK;
1801 break;
1802 default:
1803 DEBUGOUT(" Error on flow type input\n");
1804 return err;
1805 }
1806
1807 /* program input mask into the HW */
1808 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1809 if (err)
1810 return err;
1811
1812 /* apply mask and compute/store hash */
1813 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1814
1815 /* program filters to filter memory */
1816 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
1817 soft_id, queue);
1794 } 1818 }
1795 1819
1796 /** 1820 /**
1797 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1821 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1798 * @hw: pointer to hardware structure 1822 * @hw: pointer to hardware structure
1806 u32 core_ctl; 1830 u32 core_ctl;
1807 1831
1808 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 1832 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
1809 1833
1810 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1834 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1811 (reg << 8)); 1835 (reg << 8));
1812 IXGBE_WRITE_FLUSH(hw); 1836 IXGBE_WRITE_FLUSH(hw);
1813 usec_delay(10); 1837 usec_delay(10);
1814 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1838 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
1815 *val = (u8)core_ctl; 1839 *val = (u8)core_ctl;
1816 1840
1838 1862
1839 return IXGBE_SUCCESS; 1863 return IXGBE_SUCCESS;
1840 } 1864 }
1841 1865
1842 /** 1866 /**
1843 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx 1867 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
1844 * @hw: pointer to hardware structure 1868 * @hw: pointer to hardware structure
1845 * 1869 *
1846 * Starts the hardware using the generic start_hw function 1870 * Starts the hardware using the generic start_hw function
1847 * and the generation start_hw function. 1871 * and the generation start_hw function.
1848 * Then performs revision-specific operations, if any. 1872 * Then performs revision-specific operations, if any.
1849 **/ 1873 **/
1850 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw) 1874 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1851 { 1875 {
1852 s32 ret_val = IXGBE_SUCCESS; 1876 s32 ret_val = IXGBE_SUCCESS;
1853 1877
1854 DEBUGFUNC("ixgbe_start_hw_rev_1__82599"); 1878 DEBUGFUNC("ixgbe_start_hw_82599");
1855 1879
1856 ret_val = ixgbe_start_hw_generic(hw); 1880 ret_val = ixgbe_start_hw_generic(hw);
1857 if (ret_val != IXGBE_SUCCESS) 1881 if (ret_val != IXGBE_SUCCESS)
1858 goto out; 1882 goto out;
1859 1883
1889 if (status != IXGBE_SUCCESS) { 1913 if (status != IXGBE_SUCCESS) {
1890 /* 82599 10GBASE-T requires an external PHY */ 1914 /* 82599 10GBASE-T requires an external PHY */
1891 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 1915 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1892 goto out; 1916 goto out;
1893 else 1917 else
1894 status = ixgbe_identify_sfp_module_generic(hw); 1918 status = ixgbe_identify_module_generic(hw);
1895 } 1919 }
1896 1920
1897 /* Set PHY type none if no PHY detected */ 1921 /* Set PHY type none if no PHY detected */
1898 if (hw->phy.type == ixgbe_phy_unknown) { 1922 if (hw->phy.type == ixgbe_phy_unknown) {
1899 hw->phy.type = ixgbe_phy_none; 1923 hw->phy.type = ixgbe_phy_none;
1930 1954
1931 hw->phy.ops.identify(hw); 1955 hw->phy.ops.identify(hw);
1932 1956
1933 switch (hw->phy.type) { 1957 switch (hw->phy.type) {
1934 case ixgbe_phy_tn: 1958 case ixgbe_phy_tn:
1935 case ixgbe_phy_aq:
1936 case ixgbe_phy_cu_unknown: 1959 case ixgbe_phy_cu_unknown:
1937 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 1960 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1938 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 1961 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1939 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 1962 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1940 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 1963 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2014 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2037 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2015 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2038 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2016 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2039 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2017 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 2040 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2018 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; 2041 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2042 else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2043 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2019 break; 2044 break;
2020 default: 2045 default:
2021 break; 2046 break;
2022 } 2047 }
2023 2048
2032 * 2057 *
2033 * Enables the Rx DMA unit for 82599 2058 * Enables the Rx DMA unit for 82599
2034 **/ 2059 **/
2035 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2060 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2036 { 2061 {
2037 #define IXGBE_MAX_SECRX_POLL 30
2038 int i;
2039 int secrxreg;
2040 2062
2041 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2063 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2042 2064
2043 /* 2065 /*
2044 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2066 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2045 * If traffic is incoming before we enable the Rx unit, it could hang 2067 * If traffic is incoming before we enable the Rx unit, it could hang
2046 * the Rx DMA unit. Therefore, make sure the security engine is 2068 * the Rx DMA unit. Therefore, make sure the security engine is
2047 * completely disabled prior to enabling the Rx unit. 2069 * completely disabled prior to enabling the Rx unit.
2048 */ 2070 */
2049 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2071
2050 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2072 hw->mac.ops.disable_sec_rx_path(hw);
2051 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2052 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
2053 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
2054 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
2055 break;
2056 else
2057 /* Use interrupt-safe sleep just in case */
2058 usec_delay(10);
2059 }
2060
2061 /* For informational purposes only */
2062 if (i >= IXGBE_MAX_SECRX_POLL)
2063 DEBUGOUT("Rx unit being enabled before security "
2064 "path fully disabled. Continuing with init.\n");
2065 2073
2066 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2074 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2067 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2075
2068 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2076 hw->mac.ops.enable_sec_rx_path(hw);
2069 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
2070 IXGBE_WRITE_FLUSH(hw);
2071 2077
2072 return IXGBE_SUCCESS; 2078 return IXGBE_SUCCESS;
2073 } 2079 }
2074 2080
2075 /** 2081 /**
2102 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2108 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2103 goto fw_version_out; 2109 goto fw_version_out;
2104 2110
2105 /* get the offset to the Pass Through Patch Configuration block */ 2111 /* get the offset to the Pass Through Patch Configuration block */
2106 hw->eeprom.ops.read(hw, (fw_offset + 2112 hw->eeprom.ops.read(hw, (fw_offset +
2107 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), 2113 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2108 &fw_ptp_cfg_offset); 2114 &fw_ptp_cfg_offset);
2109 2115
2110 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2116 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2111 goto fw_version_out; 2117 goto fw_version_out;
2112 2118
2113 /* get the firmware version */ 2119 /* get the firmware version */
2114 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + 2120 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2115 IXGBE_FW_PATCH_VERSION_4), 2121 IXGBE_FW_PATCH_VERSION_4), &fw_version);
2116 &fw_version);
2117 2122
2118 if (fw_version > 0x5) 2123 if (fw_version > 0x5)
2119 status = IXGBE_SUCCESS; 2124 status = IXGBE_SUCCESS;
2120 2125
2121 fw_version_out: 2126 fw_version_out:
2144 (fw_offset == 0) || (fw_offset == 0xFFFF)) 2149 (fw_offset == 0) || (fw_offset == 0xFFFF))
2145 goto out; 2150 goto out;
2146 2151
2147 /* get the offset to the LESM Parameters block */ 2152 /* get the offset to the LESM Parameters block */
2148 status = hw->eeprom.ops.read(hw, (fw_offset + 2153 status = hw->eeprom.ops.read(hw, (fw_offset +
2149 IXGBE_FW_LESM_PARAMETERS_PTR), 2154 IXGBE_FW_LESM_PARAMETERS_PTR),
2150 &fw_lesm_param_offset); 2155 &fw_lesm_param_offset);
2151 2156
2152 if ((status != IXGBE_SUCCESS) || 2157 if ((status != IXGBE_SUCCESS) ||
2153 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) 2158 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2154 goto out; 2159 goto out;
2155 2160
2156 /* get the lesm state word */ 2161 /* get the lesm state word */
2157 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 2162 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2158 IXGBE_FW_LESM_STATE_1), 2163 IXGBE_FW_LESM_STATE_1),
2159 &fw_lesm_state); 2164 &fw_lesm_state);
2160 2165
2161 if ((status == IXGBE_SUCCESS) && 2166 if ((status == IXGBE_SUCCESS) &&
2162 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 2167 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2163 lesm_enabled = TRUE; 2168 lesm_enabled = TRUE;
2164 2169
2165 out: 2170 out:
2166 return lesm_enabled; 2171 return lesm_enabled;
2167 } 2172 }
2168 2173
2169 2174 /**
2175 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2176 * fastest available method
2177 *
2178 * @hw: pointer to hardware structure
2179 * @offset: offset of word in EEPROM to read
2180 * @words: number of words
2181 * @data: word(s) read from the EEPROM
2182 *
2183 * Retrieves 16 bit word(s) read from EEPROM
2184 **/
2185 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2186 u16 words, u16 *data)
2187 {
2188 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2189 s32 ret_val = IXGBE_ERR_CONFIG;
2190
2191 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2192
2193 /*
2194 * If EEPROM is detected and can be addressed using 14 bits,
2195 * use EERD otherwise use bit bang
2196 */
2197 if ((eeprom->type == ixgbe_eeprom_spi) &&
2198 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2199 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2200 data);
2201 else
2202 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2203 words,
2204 data);
2205
2206 return ret_val;
2207 }
2208
2209 /**
2210 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2211 * fastest available method
2212 *
2213 * @hw: pointer to hardware structure
2214 * @offset: offset of word in the EEPROM to read
2215 * @data: word read from the EEPROM
2216 *
2217 * Reads a 16 bit word from the EEPROM
2218 **/
2219 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2220 u16 offset, u16 *data)
2221 {
2222 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2223 s32 ret_val = IXGBE_ERR_CONFIG;
2224
2225 DEBUGFUNC("ixgbe_read_eeprom_82599");
2226
2227 /*
2228 * If EEPROM is detected and can be addressed using 14 bits,
2229 * use EERD otherwise use bit bang
2230 */
2231 if ((eeprom->type == ixgbe_eeprom_spi) &&
2232 (offset <= IXGBE_EERD_MAX_ADDR))
2233 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2234 else
2235 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2236
2237 return ret_val;
2238 }
2239
2240