Mercurial > illumos > illumos-gate
comparison usr/src/uts/common/io/ixgbe/ixgbe_82599.c @ 13448:f03238cace0b
1390 ixgbe update from FreeBSD via Joyent
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: Garrett D'Amore <garrett@nexenta.com>
Reviewed by: Gordon Ross <gwr@nexenta.com>
Approved by: Richard Lowe <richlowe@richlowe.net>
author | Jerry Jelinek <jerry.jelinek@joyent.com> |
---|---|
date | Fri, 09 Sep 2011 10:48:44 -0400 |
parents | 22e6d3edaab5 |
children | 7b5944437191 |
comparison
equal
deleted
inserted
replaced
13447:99622235dae0 | 13448:f03238cace0b |
---|---|
1 /* | 1 /****************************************************************************** |
2 * CDDL HEADER START | 2 |
3 * | 3 Copyright (c) 2001-2010, Intel Corporation |
4 * The contents of this file are subject to the terms of the | 4 All rights reserved. |
5 * Common Development and Distribution License (the "License"). | 5 |
6 * You may not use this file except in compliance with the License. | 6 Redistribution and use in source and binary forms, with or without |
7 * | 7 modification, are permitted provided that the following conditions are met: |
8 * You can obtain a copy of the license at: | 8 |
9 * http://www.opensolaris.org/os/licensing. | 9 1. Redistributions of source code must retain the above copyright notice, |
10 * See the License for the specific language governing permissions | 10 this list of conditions and the following disclaimer. |
11 * and limitations under the License. | 11 |
12 * | 12 2. Redistributions in binary form must reproduce the above copyright |
13 * When using or redistributing this file, you may do so under the | 13 notice, this list of conditions and the following disclaimer in the |
14 * License only. No other modification of this header is permitted. | 14 documentation and/or other materials provided with the distribution. |
15 * | 15 |
16 * If applicable, add the following below this CDDL HEADER, with the | 16 3. Neither the name of the Intel Corporation nor the names of its |
17 * fields enclosed by brackets "[]" replaced with your own identifying | 17 contributors may be used to endorse or promote products derived from |
18 * information: Portions Copyright [yyyy] [name of copyright owner] | 18 this software without specific prior written permission. |
19 * | 19 |
20 * CDDL HEADER END | 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
21 */ | 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
22 | 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
23 /* | 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
24 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. | 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 */ | 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 /* | 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 */ | 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | 30 POSSIBILITY OF SUCH DAMAGE. |
31 /* IntelVersion: 1.217 scm_061610_003709 */ | 31 |
32 ******************************************************************************/ | |
33 /*$FreeBSD$*/ | |
32 | 34 |
33 #include "ixgbe_type.h" | 35 #include "ixgbe_type.h" |
34 #include "ixgbe_api.h" | 36 #include "ixgbe_api.h" |
35 #include "ixgbe_common.h" | 37 #include "ixgbe_common.h" |
36 #include "ixgbe_phy.h" | 38 #include "ixgbe_phy.h" |
37 | 39 |
38 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); | 40 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); |
39 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, | 41 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, |
40 ixgbe_link_speed *speed, bool *autoneg); | 42 ixgbe_link_speed *speed, |
43 bool *autoneg); | |
41 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); | 44 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); |
42 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 45 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
43 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 46 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
44 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); | 47 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); |
45 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | 48 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, |
46 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); | 49 ixgbe_link_speed speed, bool autoneg, |
50 bool autoneg_wait_to_complete); | |
47 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, | 51 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, |
48 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); | 52 ixgbe_link_speed speed, bool autoneg, |
53 bool autoneg_wait_to_complete); | |
49 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, | 54 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, |
50 bool autoneg_wait_to_complete); | 55 bool autoneg_wait_to_complete); |
51 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, | 56 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, |
52 ixgbe_link_speed speed, bool autoneg, | 57 ixgbe_link_speed speed, |
53 bool autoneg_wait_to_complete); | 58 bool autoneg, |
59 bool autoneg_wait_to_complete); | |
54 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, | 60 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, |
55 ixgbe_link_speed speed, bool autoneg, | 61 ixgbe_link_speed speed, |
56 bool autoneg_wait_to_complete); | 62 bool autoneg, |
63 bool autoneg_wait_to_complete); | |
57 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); | 64 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); |
58 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); | 65 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); |
59 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); | 66 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); |
60 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); | 67 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); |
61 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); | 68 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); |
62 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw); | 69 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw); |
63 void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw); | |
64 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); | 70 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); |
65 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); | 71 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); |
66 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); | 72 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); |
67 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); | 73 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); |
68 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps); | |
69 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); | 74 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); |
70 | 75 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); |
71 void | 76 |
72 ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) | 77 |
78 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) | |
73 { | 79 { |
74 struct ixgbe_mac_info *mac = &hw->mac; | 80 struct ixgbe_mac_info *mac = &hw->mac; |
75 | 81 |
76 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); | 82 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); |
77 | 83 |
78 if (hw->phy.multispeed_fiber) { | 84 /* enable the laser control functions for SFP+ fiber */ |
79 /* Set up dual speed SFP+ support */ | 85 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { |
80 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; | |
81 mac->ops.disable_tx_laser = | 86 mac->ops.disable_tx_laser = |
82 &ixgbe_disable_tx_laser_multispeed_fiber; | 87 &ixgbe_disable_tx_laser_multispeed_fiber; |
83 mac->ops.enable_tx_laser = | 88 mac->ops.enable_tx_laser = |
84 &ixgbe_enable_tx_laser_multispeed_fiber; | 89 &ixgbe_enable_tx_laser_multispeed_fiber; |
85 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; | 90 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; |
91 | |
86 } else { | 92 } else { |
87 mac->ops.disable_tx_laser = NULL; | 93 mac->ops.disable_tx_laser = NULL; |
88 mac->ops.enable_tx_laser = NULL; | 94 mac->ops.enable_tx_laser = NULL; |
89 mac->ops.flap_tx_laser = NULL; | 95 mac->ops.flap_tx_laser = NULL; |
96 } | |
97 | |
98 if (hw->phy.multispeed_fiber) { | |
99 /* Set up dual speed SFP+ support */ | |
100 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; | |
101 } else { | |
90 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && | 102 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && |
91 (hw->phy.smart_speed == ixgbe_smart_speed_auto || | 103 (hw->phy.smart_speed == ixgbe_smart_speed_auto || |
92 hw->phy.smart_speed == ixgbe_smart_speed_on)) | 104 hw->phy.smart_speed == ixgbe_smart_speed_on) && |
105 !ixgbe_verify_lesm_fw_enabled_82599(hw)) { | |
93 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; | 106 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; |
94 else | 107 } else { |
95 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; | 108 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; |
96 } | 109 } |
97 } | 110 } |
98 | 111 } |
99 /* | 112 |
100 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init | 113 /** |
101 * @hw: pointer to hardware structure | 114 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init |
102 * | 115 * @hw: pointer to hardware structure |
103 * Initialize any function pointers that were not able to be | 116 * |
104 * set during init_shared_code because the PHY/SFP type was | 117 * Initialize any function pointers that were not able to be |
105 * not known. Perform the SFP init if necessary. | 118 * set during init_shared_code because the PHY/SFP type was |
106 * | 119 * not known. Perform the SFP init if necessary. |
107 */ | 120 * |
108 s32 | 121 **/ |
109 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) | 122 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) |
110 { | 123 { |
111 struct ixgbe_mac_info *mac = &hw->mac; | 124 struct ixgbe_mac_info *mac = &hw->mac; |
112 struct ixgbe_phy_info *phy = &hw->phy; | 125 struct ixgbe_phy_info *phy = &hw->phy; |
113 s32 ret_val = IXGBE_SUCCESS; | 126 s32 ret_val = IXGBE_SUCCESS; |
114 | 127 |
126 | 139 |
127 /* If copper media, overwrite with copper function pointers */ | 140 /* If copper media, overwrite with copper function pointers */ |
128 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { | 141 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { |
129 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; | 142 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; |
130 mac->ops.get_link_capabilities = | 143 mac->ops.get_link_capabilities = |
131 &ixgbe_get_copper_link_capabilities_generic; | 144 &ixgbe_get_copper_link_capabilities_generic; |
132 } | 145 } |
133 | 146 |
134 /* Set necessary function pointers based on phy type */ | 147 /* Set necessary function pointers based on phy type */ |
135 switch (hw->phy.type) { | 148 switch (hw->phy.type) { |
136 case ixgbe_phy_tn: | 149 case ixgbe_phy_tn: |
137 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; | 150 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; |
138 phy->ops.check_link = &ixgbe_check_phy_link_tnx; | 151 phy->ops.check_link = &ixgbe_check_phy_link_tnx; |
139 phy->ops.get_firmware_version = | 152 phy->ops.get_firmware_version = |
140 &ixgbe_get_phy_firmware_version_tnx; | 153 &ixgbe_get_phy_firmware_version_tnx; |
141 break; | 154 break; |
142 case ixgbe_phy_aq: | 155 case ixgbe_phy_aq: |
143 phy->ops.get_firmware_version = | 156 phy->ops.get_firmware_version = |
144 &ixgbe_get_phy_firmware_version_generic; | 157 &ixgbe_get_phy_firmware_version_generic; |
145 break; | 158 break; |
146 default: | 159 default: |
147 break; | 160 break; |
148 } | 161 } |
149 | |
150 init_phy_ops_out: | 162 init_phy_ops_out: |
151 return (ret_val); | 163 return ret_val; |
152 } | 164 } |
153 | 165 |
154 s32 | 166 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) |
155 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) | |
156 { | 167 { |
157 s32 ret_val = IXGBE_SUCCESS; | 168 s32 ret_val = IXGBE_SUCCESS; |
169 u32 reg_anlp1 = 0; | |
170 u32 i = 0; | |
158 u16 list_offset, data_offset, data_value; | 171 u16 list_offset, data_offset, data_value; |
159 | 172 |
160 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); | 173 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); |
161 | 174 |
162 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { | 175 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { |
163 ixgbe_init_mac_link_ops_82599(hw); | 176 ixgbe_init_mac_link_ops_82599(hw); |
164 | 177 |
165 hw->phy.ops.reset = NULL; | 178 hw->phy.ops.reset = NULL; |
166 | 179 |
167 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, | 180 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, |
168 &data_offset); | 181 &data_offset); |
169 | |
170 if (ret_val != IXGBE_SUCCESS) | 182 if (ret_val != IXGBE_SUCCESS) |
171 goto setup_sfp_out; | 183 goto setup_sfp_out; |
172 | 184 |
173 /* PHY config will finish before releasing the semaphore */ | 185 /* PHY config will finish before releasing the semaphore */ |
174 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); | 186 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); |
181 while (data_value != 0xffff) { | 193 while (data_value != 0xffff) { |
182 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); | 194 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); |
183 IXGBE_WRITE_FLUSH(hw); | 195 IXGBE_WRITE_FLUSH(hw); |
184 hw->eeprom.ops.read(hw, ++data_offset, &data_value); | 196 hw->eeprom.ops.read(hw, ++data_offset, &data_value); |
185 } | 197 } |
186 /* Now restart DSP by setting Restart_AN */ | |
187 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, | |
188 (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART)); | |
189 | 198 |
190 /* Release the semaphore */ | 199 /* Release the semaphore */ |
191 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); | 200 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); |
192 /* Delay obtaining semaphore again to allow FW access */ | 201 /* Delay obtaining semaphore again to allow FW access */ |
193 msec_delay(hw->eeprom.semaphore_delay); | 202 msec_delay(hw->eeprom.semaphore_delay); |
203 | |
204 /* Now restart DSP by setting Restart_AN and clearing LMS */ | |
205 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, | |
206 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | | |
207 IXGBE_AUTOC_AN_RESTART)); | |
208 | |
209 /* Wait for AN to leave state 0 */ | |
210 for (i = 0; i < 10; i++) { | |
211 msec_delay(4); | |
212 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); | |
213 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) | |
214 break; | |
215 } | |
216 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { | |
217 DEBUGOUT("sfp module setup not complete\n"); | |
218 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; | |
219 goto setup_sfp_out; | |
220 } | |
221 | |
222 /* Restart DSP by setting Restart_AN and return to SFI mode */ | |
223 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, | |
224 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | | |
225 IXGBE_AUTOC_AN_RESTART)); | |
194 } | 226 } |
195 | 227 |
196 setup_sfp_out: | 228 setup_sfp_out: |
197 return (ret_val); | 229 return ret_val; |
198 } | 230 } |
199 | 231 |
200 /* | 232 /** |
201 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type | 233 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type |
202 * @hw: pointer to hardware structure | 234 * @hw: pointer to hardware structure |
203 * | 235 * |
204 * Initialize the function pointers and assign the MAC type for 82599. | 236 * Initialize the function pointers and assign the MAC type for 82599. |
205 * Does not touch the hardware. | 237 * Does not touch the hardware. |
206 */ | 238 **/ |
207 | 239 |
208 s32 | 240 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) |
209 ixgbe_init_ops_82599(struct ixgbe_hw *hw) | |
210 { | 241 { |
211 struct ixgbe_mac_info *mac = &hw->mac; | 242 struct ixgbe_mac_info *mac = &hw->mac; |
212 struct ixgbe_phy_info *phy = &hw->phy; | 243 struct ixgbe_phy_info *phy = &hw->phy; |
213 s32 ret_val; | 244 s32 ret_val; |
214 | 245 |
221 phy->ops.identify = &ixgbe_identify_phy_82599; | 252 phy->ops.identify = &ixgbe_identify_phy_82599; |
222 phy->ops.init = &ixgbe_init_phy_ops_82599; | 253 phy->ops.init = &ixgbe_init_phy_ops_82599; |
223 | 254 |
224 /* MAC */ | 255 /* MAC */ |
225 mac->ops.reset_hw = &ixgbe_reset_hw_82599; | 256 mac->ops.reset_hw = &ixgbe_reset_hw_82599; |
226 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82599; | 257 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; |
227 mac->ops.get_media_type = &ixgbe_get_media_type_82599; | 258 mac->ops.get_media_type = &ixgbe_get_media_type_82599; |
228 mac->ops.get_supported_physical_layer = | 259 mac->ops.get_supported_physical_layer = |
229 &ixgbe_get_supported_physical_layer_82599; | 260 &ixgbe_get_supported_physical_layer_82599; |
230 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; | 261 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; |
231 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; | 262 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; |
232 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; | 263 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; |
233 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599; | 264 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599; |
234 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82599; | |
235 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; | 265 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; |
236 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; | 266 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; |
237 mac->ops.get_device_caps = &ixgbe_get_device_caps_82599; | 267 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; |
238 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; | 268 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; |
239 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; | 269 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; |
240 | 270 |
241 /* RAR, Multicast, VLAN */ | 271 /* RAR, Multicast, VLAN */ |
242 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; | 272 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; |
245 mac->rar_highwater = 1; | 275 mac->rar_highwater = 1; |
246 mac->ops.set_vfta = &ixgbe_set_vfta_generic; | 276 mac->ops.set_vfta = &ixgbe_set_vfta_generic; |
247 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; | 277 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; |
248 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; | 278 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; |
249 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; | 279 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; |
280 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; | |
281 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; | |
250 | 282 |
251 /* Link */ | 283 /* Link */ |
252 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; | 284 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; |
253 mac->ops.check_link = &ixgbe_check_mac_link_generic; | 285 mac->ops.check_link = &ixgbe_check_mac_link_generic; |
254 ixgbe_init_mac_link_ops_82599(hw); | 286 ixgbe_init_mac_link_ops_82599(hw); |
255 | 287 |
256 mac->mcft_size = 128; | 288 mac->mcft_size = 128; |
257 mac->vft_size = 128; | 289 mac->vft_size = 128; |
258 mac->num_rar_entries = 128; | 290 mac->num_rar_entries = 128; |
259 mac->max_tx_queues = 128; | 291 mac->rx_pb_size = 512; |
260 mac->max_rx_queues = 128; | 292 mac->max_tx_queues = 128; |
293 mac->max_rx_queues = 128; | |
261 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); | 294 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); |
262 | 295 |
263 return (ret_val); | 296 return ret_val; |
264 } | 297 } |
265 | 298 |
266 /* | 299 /** |
267 * ixgbe_get_link_capabilities_82599 - Determines link capabilities | 300 * ixgbe_get_link_capabilities_82599 - Determines link capabilities |
268 * @hw: pointer to hardware structure | 301 * @hw: pointer to hardware structure |
269 * @speed: pointer to link speed | 302 * @speed: pointer to link speed |
270 * @negotiation: true when autoneg or autotry is enabled | 303 * @negotiation: TRUE when autoneg or autotry is enabled |
271 * | 304 * |
272 * Determines the link capabilities by reading the AUTOC register. | 305 * Determines the link capabilities by reading the AUTOC register. |
273 */ | 306 **/ |
274 s32 | 307 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, |
275 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, | 308 ixgbe_link_speed *speed, |
276 ixgbe_link_speed *speed, bool *negotiation) | 309 bool *negotiation) |
277 { | 310 { |
278 s32 status = IXGBE_SUCCESS; | 311 s32 status = IXGBE_SUCCESS; |
279 u32 autoc = 0; | 312 u32 autoc = 0; |
280 | 313 |
281 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); | 314 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); |
315 | |
282 | 316 |
283 /* Check if 1G SFP module. */ | 317 /* Check if 1G SFP module. */ |
284 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || | 318 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || |
285 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { | 319 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { |
286 *speed = IXGBE_LINK_SPEED_1GB_FULL; | 320 *speed = IXGBE_LINK_SPEED_1GB_FULL; |
287 *negotiation = true; | 321 *negotiation = TRUE; |
288 goto out; | 322 goto out; |
289 } | 323 } |
290 | 324 |
291 /* | 325 /* |
292 * Determine link capabilities based on the stored value of AUTOC, | 326 * Determine link capabilities based on the stored value of AUTOC, |
299 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); | 333 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
300 | 334 |
301 switch (autoc & IXGBE_AUTOC_LMS_MASK) { | 335 switch (autoc & IXGBE_AUTOC_LMS_MASK) { |
302 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: | 336 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: |
303 *speed = IXGBE_LINK_SPEED_1GB_FULL; | 337 *speed = IXGBE_LINK_SPEED_1GB_FULL; |
304 *negotiation = false; | 338 *negotiation = FALSE; |
305 break; | 339 break; |
306 | 340 |
307 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: | 341 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: |
308 *speed = IXGBE_LINK_SPEED_10GB_FULL; | 342 *speed = IXGBE_LINK_SPEED_10GB_FULL; |
309 *negotiation = false; | 343 *negotiation = FALSE; |
310 break; | 344 break; |
311 | 345 |
312 case IXGBE_AUTOC_LMS_1G_AN: | 346 case IXGBE_AUTOC_LMS_1G_AN: |
313 *speed = IXGBE_LINK_SPEED_1GB_FULL; | 347 *speed = IXGBE_LINK_SPEED_1GB_FULL; |
314 *negotiation = true; | 348 *negotiation = TRUE; |
315 break; | 349 break; |
316 | 350 |
317 case IXGBE_AUTOC_LMS_10G_SERIAL: | 351 case IXGBE_AUTOC_LMS_10G_SERIAL: |
318 *speed = IXGBE_LINK_SPEED_10GB_FULL; | 352 *speed = IXGBE_LINK_SPEED_10GB_FULL; |
319 *negotiation = false; | 353 *negotiation = FALSE; |
320 break; | 354 break; |
321 | 355 |
322 case IXGBE_AUTOC_LMS_KX4_KX_KR: | 356 case IXGBE_AUTOC_LMS_KX4_KX_KR: |
323 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: | 357 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: |
324 *speed = IXGBE_LINK_SPEED_UNKNOWN; | 358 *speed = IXGBE_LINK_SPEED_UNKNOWN; |
326 *speed |= IXGBE_LINK_SPEED_10GB_FULL; | 360 *speed |= IXGBE_LINK_SPEED_10GB_FULL; |
327 if (autoc & IXGBE_AUTOC_KX4_SUPP) | 361 if (autoc & IXGBE_AUTOC_KX4_SUPP) |
328 *speed |= IXGBE_LINK_SPEED_10GB_FULL; | 362 *speed |= IXGBE_LINK_SPEED_10GB_FULL; |
329 if (autoc & IXGBE_AUTOC_KX_SUPP) | 363 if (autoc & IXGBE_AUTOC_KX_SUPP) |
330 *speed |= IXGBE_LINK_SPEED_1GB_FULL; | 364 *speed |= IXGBE_LINK_SPEED_1GB_FULL; |
331 *negotiation = true; | 365 *negotiation = TRUE; |
332 break; | 366 break; |
333 | 367 |
334 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: | 368 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: |
335 *speed = IXGBE_LINK_SPEED_100_FULL; | 369 *speed = IXGBE_LINK_SPEED_100_FULL; |
336 if (autoc & IXGBE_AUTOC_KR_SUPP) | 370 if (autoc & IXGBE_AUTOC_KR_SUPP) |
337 *speed |= IXGBE_LINK_SPEED_10GB_FULL; | 371 *speed |= IXGBE_LINK_SPEED_10GB_FULL; |
338 if (autoc & IXGBE_AUTOC_KX4_SUPP) | 372 if (autoc & IXGBE_AUTOC_KX4_SUPP) |
339 *speed |= IXGBE_LINK_SPEED_10GB_FULL; | 373 *speed |= IXGBE_LINK_SPEED_10GB_FULL; |
340 if (autoc & IXGBE_AUTOC_KX_SUPP) | 374 if (autoc & IXGBE_AUTOC_KX_SUPP) |
341 *speed |= IXGBE_LINK_SPEED_1GB_FULL; | 375 *speed |= IXGBE_LINK_SPEED_1GB_FULL; |
342 *negotiation = true; | 376 *negotiation = TRUE; |
343 break; | 377 break; |
344 | 378 |
345 case IXGBE_AUTOC_LMS_SGMII_1G_100M: | 379 case IXGBE_AUTOC_LMS_SGMII_1G_100M: |
346 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; | 380 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; |
347 *negotiation = false; | 381 *negotiation = FALSE; |
348 break; | 382 break; |
349 | 383 |
350 default: | 384 default: |
351 status = IXGBE_ERR_LINK_SETUP; | 385 status = IXGBE_ERR_LINK_SETUP; |
352 goto out; | 386 goto out; |
353 } | 387 } |
354 | 388 |
355 if (hw->phy.multispeed_fiber) { | 389 if (hw->phy.multispeed_fiber) { |
356 *speed |= IXGBE_LINK_SPEED_10GB_FULL | | 390 *speed |= IXGBE_LINK_SPEED_10GB_FULL | |
357 IXGBE_LINK_SPEED_1GB_FULL; | 391 IXGBE_LINK_SPEED_1GB_FULL; |
358 *negotiation = true; | 392 *negotiation = TRUE; |
359 } | 393 } |
360 | 394 |
361 out: | 395 out: |
362 return (status); | 396 return status; |
363 } | 397 } |
364 | 398 |
365 /* | 399 /** |
366 * ixgbe_get_media_type_82599 - Get media type | 400 * ixgbe_get_media_type_82599 - Get media type |
367 * @hw: pointer to hardware structure | 401 * @hw: pointer to hardware structure |
368 * | 402 * |
369 * Returns the media type (fiber, copper, backplane) | 403 * Returns the media type (fiber, copper, backplane) |
370 */ | 404 **/ |
371 enum ixgbe_media_type | 405 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) |
372 ixgbe_get_media_type_82599(struct ixgbe_hw *hw) | |
373 { | 406 { |
374 enum ixgbe_media_type media_type; | 407 enum ixgbe_media_type media_type; |
375 | 408 |
376 DEBUGFUNC("ixgbe_get_media_type_82599"); | 409 DEBUGFUNC("ixgbe_get_media_type_82599"); |
377 | 410 |
378 /* Detect if there is a copper PHY attached. */ | 411 /* Detect if there is a copper PHY attached. */ |
379 if (hw->phy.type == ixgbe_phy_cu_unknown || | 412 switch (hw->phy.type) { |
380 hw->phy.type == ixgbe_phy_tn || | 413 case ixgbe_phy_cu_unknown: |
381 hw->phy.type == ixgbe_phy_aq) { | 414 case ixgbe_phy_tn: |
415 case ixgbe_phy_aq: | |
382 media_type = ixgbe_media_type_copper; | 416 media_type = ixgbe_media_type_copper; |
383 goto out; | 417 goto out; |
418 default: | |
419 break; | |
384 } | 420 } |
385 | 421 |
386 switch (hw->device_id) { | 422 switch (hw->device_id) { |
387 case IXGBE_DEV_ID_82599_KX4: | 423 case IXGBE_DEV_ID_82599_KX4: |
388 case IXGBE_DEV_ID_82599_KX4_MEZZ: | 424 case IXGBE_DEV_ID_82599_KX4_MEZZ: |
389 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: | 425 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: |
390 case IXGBE_DEV_ID_82599_KR: | 426 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: |
391 case IXGBE_DEV_ID_82599_XAUI_LOM: | 427 case IXGBE_DEV_ID_82599_XAUI_LOM: |
392 /* Default device ID is mezzanine card KX/KX4 */ | 428 /* Default device ID is mezzanine card KX/KX4 */ |
393 media_type = ixgbe_media_type_backplane; | 429 media_type = ixgbe_media_type_backplane; |
394 break; | 430 break; |
395 case IXGBE_DEV_ID_82599_SFP: | 431 case IXGBE_DEV_ID_82599_SFP: |
396 case IXGBE_DEV_ID_82599_SFP_EM: | 432 case IXGBE_DEV_ID_82599_SFP_FCOE: |
397 media_type = ixgbe_media_type_fiber; | 433 media_type = ixgbe_media_type_fiber; |
398 break; | 434 break; |
399 case IXGBE_DEV_ID_82599_CX4: | 435 case IXGBE_DEV_ID_82599_CX4: |
400 media_type = ixgbe_media_type_cx4; | 436 media_type = ixgbe_media_type_cx4; |
401 break; | 437 break; |
405 default: | 441 default: |
406 media_type = ixgbe_media_type_unknown; | 442 media_type = ixgbe_media_type_unknown; |
407 break; | 443 break; |
408 } | 444 } |
409 out: | 445 out: |
410 return (media_type); | 446 return media_type; |
411 } | 447 } |
412 | 448 |
413 /* | 449 /** |
414 * ixgbe_start_mac_link_82599 - Setup MAC link settings | 450 * ixgbe_start_mac_link_82599 - Setup MAC link settings |
415 * @hw: pointer to hardware structure | 451 * @hw: pointer to hardware structure |
416 * | 452 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed |
417 * Configures link settings based on values in the ixgbe_hw struct. | 453 * |
418 * Restarts the link. Performs autonegotiation if needed. | 454 * Configures link settings based on values in the ixgbe_hw struct. |
419 */ | 455 * Restarts the link. Performs autonegotiation if needed. |
420 s32 | 456 **/ |
421 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) | 457 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, |
458 bool autoneg_wait_to_complete) | |
422 { | 459 { |
423 u32 autoc_reg; | 460 u32 autoc_reg; |
424 u32 links_reg; | 461 u32 links_reg; |
425 u32 i; | 462 u32 i; |
426 s32 status = IXGBE_SUCCESS; | 463 s32 status = IXGBE_SUCCESS; |
427 | 464 |
428 DEBUGFUNC("ixgbe_start_mac_link_82599"); | 465 DEBUGFUNC("ixgbe_start_mac_link_82599"); |
429 | 466 |
467 | |
430 /* Restart link */ | 468 /* Restart link */ |
431 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | 469 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
432 autoc_reg |= IXGBE_AUTOC_AN_RESTART; | 470 autoc_reg |= IXGBE_AUTOC_AN_RESTART; |
433 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); | 471 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); |
434 | 472 |
435 /* Only poll for autoneg to complete if specified to do so */ | 473 /* Only poll for autoneg to complete if specified to do so */ |
436 if (autoneg_wait_to_complete) { | 474 if (autoneg_wait_to_complete) { |
437 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == | 475 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == |
438 IXGBE_AUTOC_LMS_KX4_KX_KR || | 476 IXGBE_AUTOC_LMS_KX4_KX_KR || |
439 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == | 477 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == |
440 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || | 478 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || |
441 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == | 479 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == |
442 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { | 480 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { |
443 links_reg = 0; /* Just in case Autoneg time = 0 */ | 481 links_reg = 0; /* Just in case Autoneg time = 0 */ |
444 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { | 482 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { |
445 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); | 483 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); |
446 if (links_reg & IXGBE_LINKS_KX_AN_COMP) | 484 if (links_reg & IXGBE_LINKS_KX_AN_COMP) |
447 break; | 485 break; |
455 } | 493 } |
456 | 494 |
457 /* Add delay to filter out noises during initial link setup */ | 495 /* Add delay to filter out noises during initial link setup */ |
458 msec_delay(50); | 496 msec_delay(50); |
459 | 497 |
460 return (status); | 498 return status; |
461 } | 499 } |
462 | 500 |
463 /* | 501 /** |
464 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser | 502 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser |
465 * @hw: pointer to hardware structure | 503 * @hw: pointer to hardware structure |
466 * | 504 * |
467 * The base drivers may require better control over SFP+ module | 505 * The base drivers may require better control over SFP+ module |
468 * PHY states. This includes selectively shutting down the Tx | 506 * PHY states. This includes selectively shutting down the Tx |
469 * laser on the PHY, effectively halting physical link. | 507 * laser on the PHY, effectively halting physical link. |
470 */ | 508 **/ |
471 void | 509 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
472 ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | |
473 { | 510 { |
474 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | 511 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); |
475 | 512 |
476 /* | 513 /* Disable tx laser; allow 100us to go dark per spec */ |
477 * Disable tx laser; allow 100us to go dark per spec | |
478 */ | |
479 esdp_reg |= IXGBE_ESDP_SDP3; | 514 esdp_reg |= IXGBE_ESDP_SDP3; |
480 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | 515 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); |
481 IXGBE_WRITE_FLUSH(hw); | 516 IXGBE_WRITE_FLUSH(hw); |
482 usec_delay(100); | 517 usec_delay(100); |
483 } | 518 } |
484 | 519 |
485 /* | 520 /** |
486 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser | 521 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser |
487 * @hw: pointer to hardware structure | 522 * @hw: pointer to hardware structure |
488 * | 523 * |
489 * The base drivers may require better control over SFP+ module | 524 * The base drivers may require better control over SFP+ module |
490 * PHY states. This includes selectively turning on the Tx | 525 * PHY states. This includes selectively turning on the Tx |
491 * laser on the PHY, effectively starting physical link. | 526 * laser on the PHY, effectively starting physical link. |
492 */ | 527 **/ |
493 void | 528 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
494 ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | |
495 { | 529 { |
496 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | 530 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); |
497 | 531 |
498 /* | 532 /* Enable tx laser; allow 100ms to light up */ |
499 * Enable tx laser; allow 100ms to light up | |
500 */ | |
501 esdp_reg &= ~IXGBE_ESDP_SDP3; | 533 esdp_reg &= ~IXGBE_ESDP_SDP3; |
502 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); | 534 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); |
503 IXGBE_WRITE_FLUSH(hw); | 535 IXGBE_WRITE_FLUSH(hw); |
504 msec_delay(100); | 536 msec_delay(100); |
505 } | 537 } |
506 | 538 |
507 /* | 539 /** |
508 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser | 540 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser |
509 * @hw: pointer to hardware structure | 541 * @hw: pointer to hardware structure |
510 * | 542 * |
511 * When the driver changes the link speeds that it can support, | 543 * When the driver changes the link speeds that it can support, |
512 * it sets autotry_restart to true to indicate that we need to | 544 * it sets autotry_restart to TRUE to indicate that we need to |
513 * initiate a new autotry session with the link partner. To do | 545 * initiate a new autotry session with the link partner. To do |
514 * so, we set the speed then disable and re-enable the tx laser, to | 546 * so, we set the speed then disable and re-enable the tx laser, to |
515 * alert the link partner that it also needs to restart autotry on its | 547 * alert the link partner that it also needs to restart autotry on its |
516 * end. This is consistent with true clause 37 autoneg, which also | 548 * end. This is consistent with TRUE clause 37 autoneg, which also |
517 * involves a loss of signal. | 549 * involves a loss of signal. |
518 */ | 550 **/ |
519 void | 551 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) |
520 ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) | |
521 { | 552 { |
522 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); | 553 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); |
523 | 554 |
524 if (hw->mac.autotry_restart) { | 555 if (hw->mac.autotry_restart) { |
525 ixgbe_disable_tx_laser_multispeed_fiber(hw); | 556 ixgbe_disable_tx_laser_multispeed_fiber(hw); |
526 ixgbe_enable_tx_laser_multispeed_fiber(hw); | 557 ixgbe_enable_tx_laser_multispeed_fiber(hw); |
527 hw->mac.autotry_restart = false; | 558 hw->mac.autotry_restart = FALSE; |
528 } | 559 } |
529 } | 560 } |
530 | 561 |
531 /* | 562 /** |
532 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed | 563 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed |
533 * @hw: pointer to hardware structure | 564 * @hw: pointer to hardware structure |
534 * @speed: new link speed | 565 * @speed: new link speed |
535 * @autoneg: true if autonegotiation enabled | 566 * @autoneg: TRUE if autonegotiation enabled |
536 * @autoneg_wait_to_complete: true when waiting for completion is needed | 567 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed |
537 * | 568 * |
538 * Set the link speed in the AUTOC register and restarts link. | 569 * Set the link speed in the AUTOC register and restarts link. |
539 */ | 570 **/ |
540 s32 | 571 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, |
541 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, | 572 ixgbe_link_speed speed, bool autoneg, |
542 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) | 573 bool autoneg_wait_to_complete) |
543 { | 574 { |
544 s32 status = IXGBE_SUCCESS; | 575 s32 status = IXGBE_SUCCESS; |
545 ixgbe_link_speed link_speed; | 576 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; |
546 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; | 577 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; |
547 u32 speedcnt = 0; | 578 u32 speedcnt = 0; |
548 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); | 579 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); |
549 u32 i = 0; | 580 u32 i = 0; |
550 bool link_up = false; | 581 bool link_up = FALSE; |
551 bool negotiation; | 582 bool negotiation; |
552 | 583 |
553 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); | 584 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); |
554 | 585 |
555 /* Mask off requested but non-supported speeds */ | 586 /* Mask off requested but non-supported speeds */ |
556 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); | 587 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); |
557 if (status != IXGBE_SUCCESS) | 588 if (status != IXGBE_SUCCESS) |
558 return (status); | 589 return status; |
559 | 590 |
560 speed &= link_speed; | 591 speed &= link_speed; |
561 | 592 |
562 /* | 593 /* |
563 * Try each speed one by one, highest priority first. We do this in | 594 * Try each speed one by one, highest priority first. We do this in |
566 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { | 597 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { |
567 speedcnt++; | 598 speedcnt++; |
568 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; | 599 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; |
569 | 600 |
570 /* If we already have link at this speed, just jump out */ | 601 /* If we already have link at this speed, just jump out */ |
571 status = ixgbe_check_link(hw, &link_speed, &link_up, false); | 602 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); |
572 if (status != IXGBE_SUCCESS) | 603 if (status != IXGBE_SUCCESS) |
573 return (status); | 604 return status; |
574 | 605 |
575 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) | 606 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) |
576 goto out; | 607 goto out; |
577 | 608 |
578 /* Set the module link speed */ | 609 /* Set the module link speed */ |
581 IXGBE_WRITE_FLUSH(hw); | 612 IXGBE_WRITE_FLUSH(hw); |
582 | 613 |
583 /* Allow module to change analog characteristics (1G->10G) */ | 614 /* Allow module to change analog characteristics (1G->10G) */ |
584 msec_delay(40); | 615 msec_delay(40); |
585 | 616 |
586 status = ixgbe_setup_mac_link_82599( | 617 status = ixgbe_setup_mac_link_82599(hw, |
587 hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg, | 618 IXGBE_LINK_SPEED_10GB_FULL, |
588 autoneg_wait_to_complete); | 619 autoneg, |
620 autoneg_wait_to_complete); | |
589 if (status != IXGBE_SUCCESS) | 621 if (status != IXGBE_SUCCESS) |
590 return (status); | 622 return status; |
591 | 623 |
592 /* Flap the tx laser if it has not already been done */ | 624 /* Flap the tx laser if it has not already been done */ |
593 ixgbe_flap_tx_laser(hw); | 625 ixgbe_flap_tx_laser(hw); |
594 | 626 |
595 /* | 627 /* |
601 /* Wait for the link partner to also set speed */ | 633 /* Wait for the link partner to also set speed */ |
602 msec_delay(100); | 634 msec_delay(100); |
603 | 635 |
604 /* If we have link, just jump out */ | 636 /* If we have link, just jump out */ |
605 status = ixgbe_check_link(hw, &link_speed, | 637 status = ixgbe_check_link(hw, &link_speed, |
606 &link_up, false); | 638 &link_up, FALSE); |
607 if (status != IXGBE_SUCCESS) | 639 if (status != IXGBE_SUCCESS) |
608 return (status); | 640 return status; |
609 | 641 |
610 if (link_up) | 642 if (link_up) |
611 goto out; | 643 goto out; |
612 } | 644 } |
613 } | 645 } |
616 speedcnt++; | 648 speedcnt++; |
617 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) | 649 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) |
618 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; | 650 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; |
619 | 651 |
620 /* If we already have link at this speed, just jump out */ | 652 /* If we already have link at this speed, just jump out */ |
621 status = ixgbe_check_link(hw, &link_speed, &link_up, false); | 653 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); |
622 if (status != IXGBE_SUCCESS) | 654 if (status != IXGBE_SUCCESS) |
623 return (status); | 655 return status; |
624 | 656 |
625 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) | 657 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) |
626 goto out; | 658 goto out; |
627 | 659 |
628 /* Set the module link speed */ | 660 /* Set the module link speed */ |
632 IXGBE_WRITE_FLUSH(hw); | 664 IXGBE_WRITE_FLUSH(hw); |
633 | 665 |
634 /* Allow module to change analog characteristics (10G->1G) */ | 666 /* Allow module to change analog characteristics (10G->1G) */ |
635 msec_delay(40); | 667 msec_delay(40); |
636 | 668 |
637 status = ixgbe_setup_mac_link_82599( | 669 status = ixgbe_setup_mac_link_82599(hw, |
638 hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg, | 670 IXGBE_LINK_SPEED_1GB_FULL, |
639 autoneg_wait_to_complete); | 671 autoneg, |
672 autoneg_wait_to_complete); | |
640 if (status != IXGBE_SUCCESS) | 673 if (status != IXGBE_SUCCESS) |
641 return (status); | 674 return status; |
642 | 675 |
643 /* Flap the tx laser if it has not already been done */ | 676 /* Flap the tx laser if it has not already been done */ |
644 ixgbe_flap_tx_laser(hw); | 677 ixgbe_flap_tx_laser(hw); |
645 | 678 |
646 /* Wait for the link partner to also set speed */ | 679 /* Wait for the link partner to also set speed */ |
647 msec_delay(100); | 680 msec_delay(100); |
648 | 681 |
649 /* If we have link, just jump out */ | 682 /* If we have link, just jump out */ |
650 status = ixgbe_check_link(hw, &link_speed, &link_up, false); | 683 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); |
651 if (status != IXGBE_SUCCESS) | 684 if (status != IXGBE_SUCCESS) |
652 return (status); | 685 return status; |
653 | 686 |
654 if (link_up) | 687 if (link_up) |
655 goto out; | 688 goto out; |
656 } | 689 } |
657 | 690 |
660 * (if there was more than one). We call ourselves back with just the | 693 * (if there was more than one). We call ourselves back with just the |
661 * single highest speed that the user requested. | 694 * single highest speed that the user requested. |
662 */ | 695 */ |
663 if (speedcnt > 1) | 696 if (speedcnt > 1) |
664 status = ixgbe_setup_mac_link_multispeed_fiber(hw, | 697 status = ixgbe_setup_mac_link_multispeed_fiber(hw, |
665 highest_link_speed, autoneg, autoneg_wait_to_complete); | 698 highest_link_speed, autoneg, autoneg_wait_to_complete); |
666 | 699 |
667 out: | 700 out: |
668 /* Set autoneg_advertised value based on input link speed */ | 701 /* Set autoneg_advertised value based on input link speed */ |
669 hw->phy.autoneg_advertised = 0; | 702 hw->phy.autoneg_advertised = 0; |
670 | 703 |
672 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; | 705 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; |
673 | 706 |
674 if (speed & IXGBE_LINK_SPEED_1GB_FULL) | 707 if (speed & IXGBE_LINK_SPEED_1GB_FULL) |
675 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; | 708 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; |
676 | 709 |
677 return (status); | 710 return status; |
678 } | 711 } |
679 | 712 |
680 /* | 713 /** |
681 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed | 714 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed |
682 * @hw: pointer to hardware structure | 715 * @hw: pointer to hardware structure |
683 * @speed: new link speed | 716 * @speed: new link speed |
684 * @autoneg: true if autonegotiation enabled | 717 * @autoneg: TRUE if autonegotiation enabled |
685 * @autoneg_wait_to_complete: true when waiting for completion is needed | 718 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed |
686 * | 719 * |
687 * Implements the Intel SmartSpeed algorithm. | 720 * Implements the Intel SmartSpeed algorithm. |
688 */ | 721 **/ |
689 s32 | 722 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, |
690 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, | 723 ixgbe_link_speed speed, bool autoneg, |
691 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) | 724 bool autoneg_wait_to_complete) |
692 { | 725 { |
693 s32 status = IXGBE_SUCCESS; | 726 s32 status = IXGBE_SUCCESS; |
694 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; | 727 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; |
695 s32 i, j; | 728 s32 i, j; |
696 bool link_up = false; | 729 bool link_up = FALSE; |
697 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); | 730 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
698 | 731 |
699 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); | 732 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); |
700 | 733 |
701 /* Set autoneg_advertised value based on input link speed */ | 734 /* Set autoneg_advertised value based on input link speed */ |
702 hw->phy.autoneg_advertised = 0; | 735 hw->phy.autoneg_advertised = 0; |
703 | 736 |
704 if (speed & IXGBE_LINK_SPEED_10GB_FULL) | 737 if (speed & IXGBE_LINK_SPEED_10GB_FULL) |
705 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; | 738 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; |
706 | 739 |
716 * highest negotiated rate. This can sometimes happen due to integrity | 749 * highest negotiated rate. This can sometimes happen due to integrity |
717 * issues with the physical media connection. | 750 * issues with the physical media connection. |
718 */ | 751 */ |
719 | 752 |
720 /* First, try to get link with full advertisement */ | 753 /* First, try to get link with full advertisement */ |
721 hw->phy.smart_speed_active = false; | 754 hw->phy.smart_speed_active = FALSE; |
722 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { | 755 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { |
723 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, | 756 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, |
724 autoneg_wait_to_complete); | 757 autoneg_wait_to_complete); |
725 if (status != IXGBE_SUCCESS) | 758 if (status != IXGBE_SUCCESS) |
726 goto out; | 759 goto out; |
727 | 760 |
728 /* | 761 /* |
729 * Wait for the controller to acquire link. Per IEEE 802.3ap, | 762 * Wait for the controller to acquire link. Per IEEE 802.3ap, |
734 for (i = 0; i < 5; i++) { | 767 for (i = 0; i < 5; i++) { |
735 msec_delay(100); | 768 msec_delay(100); |
736 | 769 |
737 /* If we have link, just jump out */ | 770 /* If we have link, just jump out */ |
738 status = ixgbe_check_link(hw, &link_speed, &link_up, | 771 status = ixgbe_check_link(hw, &link_speed, &link_up, |
739 false); | 772 FALSE); |
740 if (status != IXGBE_SUCCESS) | 773 if (status != IXGBE_SUCCESS) |
741 goto out; | 774 goto out; |
742 | 775 |
743 if (link_up) | 776 if (link_up) |
744 goto out; | 777 goto out; |
752 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || | 785 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || |
753 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) | 786 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) |
754 goto out; | 787 goto out; |
755 | 788 |
756 /* Turn SmartSpeed on to disable KR support */ | 789 /* Turn SmartSpeed on to disable KR support */ |
757 hw->phy.smart_speed_active = true; | 790 hw->phy.smart_speed_active = TRUE; |
758 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, | 791 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, |
759 autoneg_wait_to_complete); | 792 autoneg_wait_to_complete); |
760 if (status != IXGBE_SUCCESS) | 793 if (status != IXGBE_SUCCESS) |
761 goto out; | 794 goto out; |
762 | 795 |
763 /* | 796 /* |
764 * Wait for the controller to acquire link. 600ms will allow for | 797 * Wait for the controller to acquire link. 600ms will allow for |
768 */ | 801 */ |
769 for (i = 0; i < 6; i++) { | 802 for (i = 0; i < 6; i++) { |
770 msec_delay(100); | 803 msec_delay(100); |
771 | 804 |
772 /* If we have link, just jump out */ | 805 /* If we have link, just jump out */ |
773 status = ixgbe_check_link(hw, &link_speed, &link_up, false); | 806 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); |
774 if (status != IXGBE_SUCCESS) | 807 if (status != IXGBE_SUCCESS) |
775 goto out; | 808 goto out; |
776 | 809 |
777 if (link_up) | 810 if (link_up) |
778 goto out; | 811 goto out; |
779 } | 812 } |
780 | 813 |
781 /* We didn't get link. Turn SmartSpeed back off. */ | 814 /* We didn't get link. Turn SmartSpeed back off. */ |
782 hw->phy.smart_speed_active = false; | 815 hw->phy.smart_speed_active = FALSE; |
783 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, | 816 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, |
784 autoneg_wait_to_complete); | 817 autoneg_wait_to_complete); |
785 | 818 |
786 out: | 819 out: |
787 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) | 820 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) |
788 DEBUGOUT("Smartspeed has downgraded the link speed " | 821 DEBUGOUT("Smartspeed has downgraded the link speed " |
789 "from the maximum advertised\n"); | 822 "from the maximum advertised\n"); |
790 return (status); | 823 return status; |
791 } | 824 } |
792 | 825 |
793 /* | 826 /** |
794 * ixgbe_setup_mac_link_82599 - Set MAC link speed | 827 * ixgbe_setup_mac_link_82599 - Set MAC link speed |
795 * @hw: pointer to hardware structure | 828 * @hw: pointer to hardware structure |
796 * @speed: new link speed | 829 * @speed: new link speed |
797 * @autoneg: true if autonegotiation enabled | 830 * @autoneg: TRUE if autonegotiation enabled |
798 * @autoneg_wait_to_complete: true when waiting for completion is needed | 831 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed |
799 * | 832 * |
800 * Set the link speed in the AUTOC register and restarts link. | 833 * Set the link speed in the AUTOC register and restarts link. |
801 */ | 834 **/ |
802 s32 | 835 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, |
803 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, | 836 ixgbe_link_speed speed, bool autoneg, |
804 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) | 837 bool autoneg_wait_to_complete) |
805 { | 838 { |
806 s32 status = IXGBE_SUCCESS; | 839 s32 status = IXGBE_SUCCESS; |
807 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); | 840 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
808 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); | 841 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); |
809 u32 start_autoc = autoc; | 842 u32 start_autoc = autoc; |
827 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { | 860 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { |
828 status = IXGBE_ERR_LINK_SETUP; | 861 status = IXGBE_ERR_LINK_SETUP; |
829 goto out; | 862 goto out; |
830 } | 863 } |
831 | 864 |
832 /* | 865 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ |
833 * Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support | |
834 */ | |
835 if (hw->mac.orig_link_settings_stored) | 866 if (hw->mac.orig_link_settings_stored) |
836 orig_autoc = hw->mac.orig_autoc; | 867 orig_autoc = hw->mac.orig_autoc; |
837 else | 868 else |
838 orig_autoc = autoc; | 869 orig_autoc = autoc; |
839 | 870 |
844 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); | 875 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); |
845 if (speed & IXGBE_LINK_SPEED_10GB_FULL) | 876 if (speed & IXGBE_LINK_SPEED_10GB_FULL) |
846 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) | 877 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) |
847 autoc |= IXGBE_AUTOC_KX4_SUPP; | 878 autoc |= IXGBE_AUTOC_KX4_SUPP; |
848 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && | 879 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && |
849 (hw->phy.smart_speed_active == false)) | 880 (hw->phy.smart_speed_active == FALSE)) |
850 autoc |= IXGBE_AUTOC_KR_SUPP; | 881 autoc |= IXGBE_AUTOC_KR_SUPP; |
851 if (speed & IXGBE_LINK_SPEED_1GB_FULL) | 882 if (speed & IXGBE_LINK_SPEED_1GB_FULL) |
852 autoc |= IXGBE_AUTOC_KX_SUPP; | 883 autoc |= IXGBE_AUTOC_KX_SUPP; |
853 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && | 884 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && |
854 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || | 885 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || |
855 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { | 886 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { |
856 /* Switch from 1G SFI to 10G SFI if requested */ | 887 /* Switch from 1G SFI to 10G SFI if requested */ |
857 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && | 888 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && |
858 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { | 889 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { |
859 autoc &= ~IXGBE_AUTOC_LMS_MASK; | 890 autoc &= ~IXGBE_AUTOC_LMS_MASK; |
860 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; | 891 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; |
861 } | 892 } |
862 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && | 893 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && |
863 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { | 894 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { |
864 /* Switch from 10G SFI to 1G SFI if requested */ | 895 /* Switch from 10G SFI to 1G SFI if requested */ |
865 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && | 896 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && |
866 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { | 897 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { |
867 autoc &= ~IXGBE_AUTOC_LMS_MASK; | 898 autoc &= ~IXGBE_AUTOC_LMS_MASK; |
868 if (autoneg) | 899 if (autoneg) |
869 autoc |= IXGBE_AUTOC_LMS_1G_AN; | 900 autoc |= IXGBE_AUTOC_LMS_1G_AN; |
870 else | 901 else |
871 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; | 902 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; |
872 } | 903 } |
873 } | 904 } |
874 | 905 |
875 if (autoc != start_autoc) { | 906 if (autoc != start_autoc) { |
876 /* Restart link */ | 907 /* Restart link */ |
877 autoc |= IXGBE_AUTOC_AN_RESTART; | 908 autoc |= IXGBE_AUTOC_AN_RESTART; |
880 /* Only poll for autoneg to complete if specified to do so */ | 911 /* Only poll for autoneg to complete if specified to do so */ |
881 if (autoneg_wait_to_complete) { | 912 if (autoneg_wait_to_complete) { |
882 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || | 913 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || |
883 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || | 914 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || |
884 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { | 915 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { |
885 links_reg = 0; /* Just in case Autoneg time=0 */ | 916 links_reg = 0; /*Just in case Autoneg time=0*/ |
886 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { | 917 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { |
887 links_reg = | 918 links_reg = |
888 IXGBE_READ_REG(hw, IXGBE_LINKS); | 919 IXGBE_READ_REG(hw, IXGBE_LINKS); |
889 if (links_reg & IXGBE_LINKS_KX_AN_COMP) | 920 if (links_reg & IXGBE_LINKS_KX_AN_COMP) |
890 break; | 921 break; |
891 msec_delay(100); | 922 msec_delay(100); |
892 } | 923 } |
893 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { | 924 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { |
894 status = | 925 status = |
895 IXGBE_ERR_AUTONEG_NOT_COMPLETE; | 926 IXGBE_ERR_AUTONEG_NOT_COMPLETE; |
896 DEBUGOUT("Autoneg did not complete.\n"); | 927 DEBUGOUT("Autoneg did not complete.\n"); |
897 } | 928 } |
898 } | 929 } |
899 } | 930 } |
900 | 931 |
901 /* Add delay to filter out noises during initial link setup */ | 932 /* Add delay to filter out noises during initial link setup */ |
902 msec_delay(50); | 933 msec_delay(50); |
903 } | 934 } |
904 | 935 |
905 out: | 936 out: |
906 return (status); | 937 return status; |
907 } | 938 } |
908 | 939 |
909 /* | 940 /** |
910 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field | 941 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field |
911 * @hw: pointer to hardware structure | 942 * @hw: pointer to hardware structure |
912 * @speed: new link speed | 943 * @speed: new link speed |
913 * @autoneg: true if autonegotiation enabled | 944 * @autoneg: TRUE if autonegotiation enabled |
914 * @autoneg_wait_to_complete: true if waiting is needed to complete | 945 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete |
915 * | 946 * |
916 * Restarts link on PHY and MAC based on settings passed in. | 947 * Restarts link on PHY and MAC based on settings passed in. |
917 */ | 948 **/ |
918 static s32 | 949 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, |
919 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, | 950 ixgbe_link_speed speed, |
920 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) | 951 bool autoneg, |
952 bool autoneg_wait_to_complete) | |
921 { | 953 { |
922 s32 status; | 954 s32 status; |
923 | 955 |
924 DEBUGFUNC("ixgbe_setup_copper_link_82599"); | 956 DEBUGFUNC("ixgbe_setup_copper_link_82599"); |
925 | 957 |
926 /* Setup the PHY according to input speed */ | 958 /* Setup the PHY according to input speed */ |
927 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, | 959 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, |
928 autoneg_wait_to_complete); | 960 autoneg_wait_to_complete); |
929 /* Set up MAC */ | 961 /* Set up MAC */ |
930 (void) ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); | 962 (void) ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); |
931 | 963 |
932 return (status); | 964 return status; |
933 } | 965 } |
934 /* | 966 |
935 * ixgbe_reset_hw_82599 - Perform hardware reset | 967 /** |
936 * @hw: pointer to hardware structure | 968 * ixgbe_reset_hw_82599 - Perform hardware reset |
937 * | 969 * @hw: pointer to hardware structure |
938 * Resets the hardware by resetting the transmit and receive units, masks | 970 * |
939 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) | 971 * Resets the hardware by resetting the transmit and receive units, masks |
940 * reset. | 972 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) |
941 */ | 973 * reset. |
942 s32 | 974 **/ |
943 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) | 975 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) |
944 { | 976 { |
945 s32 status = IXGBE_SUCCESS; | 977 s32 status = IXGBE_SUCCESS; |
946 u32 ctrl; | 978 u32 ctrl; |
947 u32 i; | 979 u32 i; |
948 u32 autoc; | 980 u32 autoc; |
962 goto reset_hw_out; | 994 goto reset_hw_out; |
963 | 995 |
964 /* Setup SFP module if there is one present. */ | 996 /* Setup SFP module if there is one present. */ |
965 if (hw->phy.sfp_setup_needed) { | 997 if (hw->phy.sfp_setup_needed) { |
966 status = hw->mac.ops.setup_sfp(hw); | 998 status = hw->mac.ops.setup_sfp(hw); |
967 hw->phy.sfp_setup_needed = false; | 999 hw->phy.sfp_setup_needed = FALSE; |
968 } | 1000 } |
969 | 1001 |
970 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) | 1002 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) |
971 goto reset_hw_out; | 1003 goto reset_hw_out; |
972 | 1004 |
973 /* Reset PHY */ | 1005 /* Reset PHY */ |
974 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) | 1006 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) |
975 hw->phy.ops.reset(hw); | 1007 hw->phy.ops.reset(hw); |
976 | 1008 |
977 /* | 1009 /* |
978 * Prevent the PCI-E bus from from hanging by disabling PCI-E master | 1010 * Prevent the PCI-E bus from from hanging by disabling PCI-E master |
979 * access and verify no pending requests before reset | 1011 * access and verify no pending requests before reset |
991 | 1023 |
992 /* Poll for reset bit to self-clear indicating reset is complete */ | 1024 /* Poll for reset bit to self-clear indicating reset is complete */ |
993 for (i = 0; i < 10; i++) { | 1025 for (i = 0; i < 10; i++) { |
994 usec_delay(1); | 1026 usec_delay(1); |
995 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); | 1027 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); |
996 if (!(ctrl & IXGBE_CTRL_RST)) { | 1028 if (!(ctrl & IXGBE_CTRL_RST)) |
997 break; | 1029 break; |
998 } | |
999 } | 1030 } |
1000 if (ctrl & IXGBE_CTRL_RST) { | 1031 if (ctrl & IXGBE_CTRL_RST) { |
1001 status = IXGBE_ERR_RESET_FAILED; | 1032 status = IXGBE_ERR_RESET_FAILED; |
1002 DEBUGOUT("Reset polling failed to complete.\n"); | 1033 DEBUGOUT("Reset polling failed to complete.\n"); |
1003 } | 1034 } |
1022 * stored off yet. Otherwise restore the stored original | 1053 * stored off yet. Otherwise restore the stored original |
1023 * values since the reset operation sets back to defaults. | 1054 * values since the reset operation sets back to defaults. |
1024 */ | 1055 */ |
1025 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); | 1056 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
1026 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); | 1057 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); |
1027 if (hw->mac.orig_link_settings_stored == false) { | 1058 if (hw->mac.orig_link_settings_stored == FALSE) { |
1028 hw->mac.orig_autoc = autoc; | 1059 hw->mac.orig_autoc = autoc; |
1029 hw->mac.orig_autoc2 = autoc2; | 1060 hw->mac.orig_autoc2 = autoc2; |
1030 hw->mac.orig_link_settings_stored = true; | 1061 hw->mac.orig_link_settings_stored = TRUE; |
1031 } else { | 1062 } else { |
1032 if (autoc != hw->mac.orig_autoc) { | 1063 if (autoc != hw->mac.orig_autoc) |
1033 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | | 1064 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | |
1034 IXGBE_AUTOC_AN_RESTART)); | 1065 IXGBE_AUTOC_AN_RESTART)); |
1035 } | |
1036 | 1066 |
1037 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != | 1067 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != |
1038 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { | 1068 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { |
1039 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; | 1069 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; |
1040 autoc2 |= (hw->mac.orig_autoc2 & | 1070 autoc2 |= (hw->mac.orig_autoc2 & |
1041 IXGBE_AUTOC2_UPPER_MASK); | 1071 IXGBE_AUTOC2_UPPER_MASK); |
1042 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); | 1072 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); |
1043 } | 1073 } |
1044 } | 1074 } |
1045 | 1075 |
1046 /* Store the permanent mac address */ | 1076 /* Store the permanent mac address */ |
1058 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); | 1088 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); |
1059 | 1089 |
1060 /* Add the SAN MAC address to the RAR only if it's a valid address */ | 1090 /* Add the SAN MAC address to the RAR only if it's a valid address */ |
1061 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { | 1091 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { |
1062 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, | 1092 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, |
1063 hw->mac.san_addr, 0, IXGBE_RAH_AV); | 1093 hw->mac.san_addr, 0, IXGBE_RAH_AV); |
1064 | 1094 |
1065 /* Reserve the last RAR for the SAN MAC address */ | 1095 /* Reserve the last RAR for the SAN MAC address */ |
1066 hw->mac.num_rar_entries--; | 1096 hw->mac.num_rar_entries--; |
1067 } | 1097 } |
1068 | 1098 |
1069 /* Store the alternative WWNN/WWPN prefix */ | 1099 /* Store the alternative WWNN/WWPN prefix */ |
1070 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, | 1100 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, |
1071 &hw->mac.wwpn_prefix); | 1101 &hw->mac.wwpn_prefix); |
1072 | 1102 |
1073 reset_hw_out: | 1103 reset_hw_out: |
1074 return (status); | 1104 return status; |
1075 } | 1105 } |
1076 | 1106 |
1077 /* | 1107 /** |
1078 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. | 1108 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. |
1079 * @hw: pointer to hardware structure | 1109 * @hw: pointer to hardware structure |
1080 */ | 1110 **/ |
1081 s32 | 1111 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) |
1082 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) | |
1083 { | 1112 { |
1084 int i; | 1113 int i; |
1085 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); | 1114 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); |
1086 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; | 1115 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; |
1087 | 1116 |
1091 * Before starting reinitialization process, | 1120 * Before starting reinitialization process, |
1092 * FDIRCMD.CMD must be zero. | 1121 * FDIRCMD.CMD must be zero. |
1093 */ | 1122 */ |
1094 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { | 1123 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { |
1095 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & | 1124 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & |
1096 IXGBE_FDIRCMD_CMD_MASK)) | 1125 IXGBE_FDIRCMD_CMD_MASK)) |
1097 break; | 1126 break; |
1098 usec_delay(10); | 1127 usec_delay(10); |
1099 } | 1128 } |
1100 if (i >= IXGBE_FDIRCMD_CMD_POLL) { | 1129 if (i >= IXGBE_FDIRCMD_CMD_POLL) { |
1101 DEBUGOUT("Flow Director previous command isn't complete, " | 1130 DEBUGOUT("Flow Director previous command isn't complete, " |
1102 "aborting table re-initialization. \n"); | 1131 "aborting table re-initialization. \n"); |
1103 return (IXGBE_ERR_FDIR_REINIT_FAILED); | 1132 return IXGBE_ERR_FDIR_REINIT_FAILED; |
1104 } | 1133 } |
1105 | 1134 |
1106 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); | 1135 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); |
1107 IXGBE_WRITE_FLUSH(hw); | 1136 IXGBE_WRITE_FLUSH(hw); |
1108 /* | 1137 /* |
1111 * before re-writing the FDIRCTRL control register with the same value. | 1140 * before re-writing the FDIRCTRL control register with the same value. |
1112 * - write 1 to bit 8 of FDIRCMD register & | 1141 * - write 1 to bit 8 of FDIRCMD register & |
1113 * - write 0 to bit 8 of FDIRCMD register | 1142 * - write 0 to bit 8 of FDIRCMD register |
1114 */ | 1143 */ |
1115 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, | 1144 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, |
1116 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | | 1145 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | |
1117 IXGBE_FDIRCMD_CLEARHT)); | 1146 IXGBE_FDIRCMD_CLEARHT)); |
1118 IXGBE_WRITE_FLUSH(hw); | 1147 IXGBE_WRITE_FLUSH(hw); |
1119 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, | 1148 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, |
1120 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & | 1149 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & |
1121 ~IXGBE_FDIRCMD_CLEARHT)); | 1150 ~IXGBE_FDIRCMD_CLEARHT)); |
1122 IXGBE_WRITE_FLUSH(hw); | 1151 IXGBE_WRITE_FLUSH(hw); |
1123 /* | 1152 /* |
1124 * Clear FDIR Hash register to clear any leftover hashes | 1153 * Clear FDIR Hash register to clear any leftover hashes |
1125 * waiting to be programmed. | 1154 * waiting to be programmed. |
1126 */ | 1155 */ |
1131 IXGBE_WRITE_FLUSH(hw); | 1160 IXGBE_WRITE_FLUSH(hw); |
1132 | 1161 |
1133 /* Poll init-done after we write FDIRCTRL register */ | 1162 /* Poll init-done after we write FDIRCTRL register */ |
1134 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { | 1163 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { |
1135 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & | 1164 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & |
1136 IXGBE_FDIRCTRL_INIT_DONE) | 1165 IXGBE_FDIRCTRL_INIT_DONE) |
1137 break; | 1166 break; |
1138 usec_delay(10); | 1167 usec_delay(10); |
1139 } | 1168 } |
1140 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { | 1169 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { |
1141 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); | 1170 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); |
1142 return (IXGBE_ERR_FDIR_REINIT_FAILED); | 1171 return IXGBE_ERR_FDIR_REINIT_FAILED; |
1143 } | 1172 } |
1144 | 1173 |
1145 /* Clear FDIR statistics registers (read to clear) */ | 1174 /* Clear FDIR statistics registers (read to clear) */ |
1146 (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); | 1175 (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); |
1147 (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); | 1176 (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); |
1148 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); | 1177 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); |
1149 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS); | 1178 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS); |
1150 (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN); | 1179 (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN); |
1151 | 1180 |
1152 return (IXGBE_SUCCESS); | 1181 return IXGBE_SUCCESS; |
1153 } | 1182 } |
1154 | 1183 |
1155 /* | 1184 /** |
1156 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters | 1185 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters |
1157 * @hw: pointer to hardware structure | 1186 * @hw: pointer to hardware structure |
1158 * @pballoc: which mode to allocate filters with | 1187 * @pballoc: which mode to allocate filters with |
1159 */ | 1188 **/ |
1160 s32 | 1189 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) |
1161 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) | |
1162 { | 1190 { |
1163 u32 fdirctrl = 0; | 1191 u32 fdirctrl = 0; |
1164 u32 pbsize; | 1192 u32 pbsize; |
1165 int i; | 1193 int i; |
1166 | 1194 |
1171 * must be reduced. The new value is the current size minus | 1199 * must be reduced. The new value is the current size minus |
1172 * flow director memory usage size. | 1200 * flow director memory usage size. |
1173 */ | 1201 */ |
1174 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); | 1202 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); |
1175 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), | 1203 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), |
1176 IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize); | 1204 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); |
1177 | 1205 |
1178 /* | 1206 /* |
1179 * The defaults in the HW for RX PB 1-7 are not zero and so should be | 1207 * The defaults in the HW for RX PB 1-7 are not zero and so should be |
1180 * intialized to zero for non DCB mode otherwise actual total RX PB | 1208 * intialized to zero for non DCB mode otherwise actual total RX PB |
1181 * would be bigger than programmed and filter space would run into | 1209 * would be bigger than programmed and filter space would run into |
1203 /* 32k - 1 signature filters */ | 1231 /* 32k - 1 signature filters */ |
1204 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; | 1232 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; |
1205 break; | 1233 break; |
1206 default: | 1234 default: |
1207 /* bad value */ | 1235 /* bad value */ |
1208 return (IXGBE_ERR_CONFIG); | 1236 return IXGBE_ERR_CONFIG; |
1209 }; | 1237 }; |
1210 | 1238 |
1211 /* Move the flexible bytes to use the ethertype - shift 6 words */ | 1239 /* Move the flexible bytes to use the ethertype - shift 6 words */ |
1212 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); | 1240 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); |
1213 | 1241 |
1242 | |
1214 /* Prime the keys for hashing */ | 1243 /* Prime the keys for hashing */ |
1215 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, | 1244 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); |
1216 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); | 1245 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); |
1217 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, | |
1218 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); | |
1219 | 1246 |
1220 /* | 1247 /* |
1221 * Poll init-done after we write the register. Estimated times: | 1248 * Poll init-done after we write the register. Estimated times: |
1222 * 10G: PBALLOC = 11b, timing is 60us | 1249 * 10G: PBALLOC = 11b, timing is 60us |
1223 * 1G: PBALLOC = 11b, timing is 600us | 1250 * 1G: PBALLOC = 11b, timing is 600us |
1224 * 100M: PBALLOC = 11b, timing is 6ms | 1251 * 100M: PBALLOC = 11b, timing is 6ms |
1225 * | 1252 * |
1226 * Multiple these timings by 4 if under full Rx load | 1253 * Multiple these timings by 4 if under full Rx load |
1227 * | 1254 * |
1228 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for | 1255 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for |
1229 * 1 msec per poll time. If we're at line rate and drop to 100M, then | 1256 * 1 msec per poll time. If we're at line rate and drop to 100M, then |
1230 * this might not finish in our poll time, but we can live with that | 1257 * this might not finish in our poll time, but we can live with that |
1231 * for now. | 1258 * for now. |
1232 */ | 1259 */ |
1233 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); | 1260 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); |
1234 IXGBE_WRITE_FLUSH(hw); | 1261 IXGBE_WRITE_FLUSH(hw); |
1235 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { | 1262 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { |
1236 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & | 1263 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & |
1237 IXGBE_FDIRCTRL_INIT_DONE) | 1264 IXGBE_FDIRCTRL_INIT_DONE) |
1238 break; | 1265 break; |
1239 | |
1240 msec_delay(1); | 1266 msec_delay(1); |
1241 } | 1267 } |
1242 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { | 1268 if (i >= IXGBE_FDIR_INIT_DONE_POLL) |
1243 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); | 1269 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); |
1244 } | 1270 |
1245 | 1271 return IXGBE_SUCCESS; |
1246 return (IXGBE_SUCCESS); | 1272 } |
1247 } | 1273 |
1248 | 1274 /** |
1249 /* | 1275 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters |
1250 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters | 1276 * @hw: pointer to hardware structure |
1251 * @hw: pointer to hardware structure | 1277 * @pballoc: which mode to allocate filters with |
1252 * @pballoc: which mode to allocate filters with | 1278 **/ |
1253 */ | 1279 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) |
1254 s32 | |
1255 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) | |
1256 { | 1280 { |
1257 u32 fdirctrl = 0; | 1281 u32 fdirctrl = 0; |
1258 u32 pbsize; | 1282 u32 pbsize; |
1259 int i; | 1283 int i; |
1260 | 1284 |
1263 /* | 1287 /* |
1264 * Before enabling Flow Director, the Rx Packet Buffer size | 1288 * Before enabling Flow Director, the Rx Packet Buffer size |
1265 * must be reduced. The new value is the current size minus | 1289 * must be reduced. The new value is the current size minus |
1266 * flow director memory usage size. | 1290 * flow director memory usage size. |
1267 */ | 1291 */ |
1268 | |
1269 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); | 1292 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); |
1270 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), | 1293 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), |
1271 IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize); | 1294 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); |
1272 | 1295 |
1273 /* | 1296 /* |
1274 * The defaults in the HW for RX PB 1-7 are not zero and so should be | 1297 * The defaults in the HW for RX PB 1-7 are not zero and so should be |
1275 * intialized to zero for non DCB mode otherwise actual total RX PB | 1298 * intialized to zero for non DCB mode otherwise actual total RX PB |
1276 * would be bigger than programmed and filter space would run into | 1299 * would be bigger than programmed and filter space would run into |
1298 /* 8k - 1 perfect filters */ | 1321 /* 8k - 1 perfect filters */ |
1299 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; | 1322 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; |
1300 break; | 1323 break; |
1301 default: | 1324 default: |
1302 /* bad value */ | 1325 /* bad value */ |
1303 return (IXGBE_ERR_CONFIG); | 1326 return IXGBE_ERR_CONFIG; |
1304 }; | 1327 }; |
1305 | 1328 |
1306 /* Turn perfect match filtering on */ | 1329 /* Turn perfect match filtering on */ |
1307 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; | 1330 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; |
1308 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; | 1331 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; |
1309 | 1332 |
1310 /* Move the flexible bytes to use the ethertype - shift 6 words */ | 1333 /* Move the flexible bytes to use the ethertype - shift 6 words */ |
1311 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); | 1334 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); |
1312 | 1335 |
1313 /* Prime the keys for hashing */ | 1336 /* Prime the keys for hashing */ |
1314 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, | 1337 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); |
1315 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); | 1338 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,IXGBE_ATR_SIGNATURE_HASH_KEY); |
1316 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, | |
1317 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); | |
1318 | 1339 |
1319 /* | 1340 /* |
1320 * Poll init-done after we write the register. Estimated times: | 1341 * Poll init-done after we write the register. Estimated times: |
1321 * 10G: PBALLOC = 11b, timing is 60us | 1342 * 10G: PBALLOC = 11b, timing is 60us |
1322 * 1G: PBALLOC = 11b, timing is 600us | 1343 * 1G: PBALLOC = 11b, timing is 600us |
1323 * 100M: PBALLOC = 11b, timing is 6ms | 1344 * 100M: PBALLOC = 11b, timing is 6ms |
1324 * | 1345 * |
1325 * Multiple these timings by 4 if under full Rx load | 1346 * Multiple these timings by 4 if under full Rx load |
1326 * | 1347 * |
1327 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for | 1348 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for |
1328 * 1 msec per poll time. If we're at line rate and drop to 100M, then | 1349 * 1 msec per poll time. If we're at line rate and drop to 100M, then |
1329 * this might not finish in our poll time, but we can live with that | 1350 * this might not finish in our poll time, but we can live with that |
1330 * for now. | 1351 * for now. |
1335 | 1356 |
1336 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); | 1357 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); |
1337 IXGBE_WRITE_FLUSH(hw); | 1358 IXGBE_WRITE_FLUSH(hw); |
1338 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { | 1359 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { |
1339 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & | 1360 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & |
1340 IXGBE_FDIRCTRL_INIT_DONE) | 1361 IXGBE_FDIRCTRL_INIT_DONE) |
1341 break; | 1362 break; |
1342 | |
1343 msec_delay(1); | 1363 msec_delay(1); |
1344 } | 1364 } |
1345 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { | 1365 if (i >= IXGBE_FDIR_INIT_DONE_POLL) |
1346 DEBUGOUT("Flow Director Perfect poll time exceeded!\n"); | 1366 DEBUGOUT("Flow Director Perfect poll time exceeded!\n"); |
1347 } | 1367 |
1348 | 1368 return IXGBE_SUCCESS; |
1349 return (IXGBE_SUCCESS); | 1369 } |
1350 } | 1370 |
1351 | 1371 /** |
1352 /* | 1372 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR |
1353 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR | 1373 * @stream: input bitstream to compute the hash on |
1354 * @stream: input bitstream to compute the hash on | 1374 * @key: 32-bit hash key |
1355 * @key: 32-bit hash key | 1375 **/ |
1356 */ | 1376 u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, |
1357 u16 | 1377 u32 key) |
1358 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key) | |
1359 { | 1378 { |
1360 /* | 1379 /* |
1361 * The algorithm is as follows: | 1380 * The algorithm is as follows: |
1362 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 | 1381 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 |
1363 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] | 1382 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] |
1364 * and A[n] x B[n] is bitwise AND between same length strings | 1383 * and A[n] x B[n] is bitwise AND between same length strings |
1365 * | 1384 * |
1366 * K[n] is 16 bits, defined as: | 1385 * K[n] is 16 bits, defined as: |
1367 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] | 1386 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] |
1368 * for n modulo 32 < 15, K[n] = | 1387 * for n modulo 32 < 15, K[n] = |
1369 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] | 1388 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] |
1370 * | 1389 * |
1371 * S[n] is 16 bits, defined as: | 1390 * S[n] is 16 bits, defined as: |
1372 * for n >= 15, S[n] = S[n:n - 15] | 1391 * for n >= 15, S[n] = S[n:n - 15] |
1373 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] | 1392 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] |
1374 * | 1393 * |
1375 * To simplify for programming, the algorithm is implemented | 1394 * To simplify for programming, the algorithm is implemented |
1376 * in software this way: | 1395 * in software this way: |
1377 * | 1396 * |
1378 * Key[31:0], Stream[335:0] | 1397 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] |
1379 * | 1398 * |
1380 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times | 1399 * for (i = 0; i < 352; i+=32) |
1381 * int_key[350:0] = tmp_key[351:1] | 1400 * hi_hash_dword[31:0] ^= Stream[(i+31):i]; |
1382 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321] | |
1383 * | 1401 * |
1384 * hash[15:0] = 0; | 1402 * lo_hash_dword[15:0] ^= Stream[15:0]; |
1385 * for (i = 0; i < 351; i++) { | 1403 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; |
1386 * if (int_key[i]) | 1404 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; |
1387 * hash ^= int_stream[(i + 15):i]; | 1405 * |
1406 * hi_hash_dword[31:0] ^= Stream[351:320]; | |
1407 * | |
1408 * if(key[0]) | |
1409 * hash[15:0] ^= Stream[15:0]; | |
1410 * | |
1411 * for (i = 0; i < 16; i++) { | |
1412 * if (key[i]) | |
1413 * hash[15:0] ^= lo_hash_dword[(i+15):i]; | |
1414 * if (key[i + 16]) | |
1415 * hash[15:0] ^= hi_hash_dword[(i+15):i]; | |
1388 * } | 1416 * } |
1389 */ | 1417 * |
1390 | 1418 */ |
1391 union { | 1419 __be32 common_hash_dword = 0; |
1392 u64 fill[6]; | 1420 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; |
1393 u32 key[11]; | 1421 u32 hash_result = 0; |
1394 u8 key_stream[44]; | 1422 u8 i; |
1395 } tmp_key; | 1423 |
1396 | 1424 /* record the flow_vm_vlan bits as they are a key part to the hash */ |
1397 u8 *stream = (u8 *)atr_input; | 1425 flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]); |
1398 u8 int_key[44]; /* upper-most bit unused */ | 1426 |
1399 u8 hash_str[46]; /* upper-most 2 bits unused */ | 1427 /* generate common hash dword */ |
1400 u16 hash_result = 0; | 1428 for (i = 10; i; i -= 2) |
1401 int i, j, k, h; | 1429 common_hash_dword ^= atr_input->dword_stream[i] ^ |
1402 | 1430 atr_input->dword_stream[i - 1]; |
1403 DEBUGFUNC("ixgbe_atr_compute_hash_82599"); | 1431 |
1404 | 1432 hi_hash_dword = IXGBE_NTOHL(common_hash_dword); |
1405 /* | 1433 |
1406 * Initialize the fill member to prevent warnings | 1434 /* low dword is word swapped version of common */ |
1407 * on some compilers | 1435 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); |
1408 */ | 1436 |
1409 tmp_key.fill[0] = 0; | 1437 /* apply flow ID/VM pool/VLAN ID bits to hash words */ |
1410 | 1438 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); |
1411 /* First load the temporary key stream */ | 1439 |
1412 for (i = 0; i < 6; i++) { | 1440 /* Process bits 0 and 16 */ |
1413 u64 fillkey = ((u64)key << 32) | key; | 1441 if (key & 0x0001) hash_result ^= lo_hash_dword; |
1414 tmp_key.fill[i] = fillkey; | 1442 if (key & 0x00010000) hash_result ^= hi_hash_dword; |
1415 } | 1443 |
1416 | 1444 /* |
1417 /* | 1445 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to |
1418 * Set the interim key for the hashing. Bit 352 is unused, so we must | 1446 * delay this because bit 0 of the stream should not be processed |
1419 * shift and compensate when building the key. | 1447 * so we do not add the vlan until after bit 0 was processed |
1420 */ | 1448 */ |
1421 int_key[0] = tmp_key.key_stream[0] >> 1; | 1449 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); |
1422 for (i = 1, j = 0; i < 44; i++) { | 1450 |
1423 unsigned int this_key = tmp_key.key_stream[j] << 7; | 1451 |
1424 j++; | 1452 /* process the remaining 30 bits in the key 2 bits at a time */ |
1425 int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1)); | 1453 for (i = 15; i; i-- ) { |
1426 } | 1454 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; |
1427 | 1455 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; |
1428 /* | 1456 } |
1429 * Set the interim bit string for the hashing. Bits 368 and 367 are | 1457 |
1430 * unused, so shift and compensate when building the string. | 1458 return hash_result & IXGBE_ATR_HASH_MASK; |
1431 */ | |
1432 hash_str[0] = (stream[40] & 0x7f) >> 1; | |
1433 for (i = 1, j = 40; i < 46; i++) { | |
1434 unsigned int this_str = stream[j] << 7; | |
1435 j++; | |
1436 if (j > 41) | |
1437 j = 0; | |
1438 hash_str[i] = (u8)(this_str | (stream[j] >> 1)); | |
1439 } | |
1440 | |
1441 /* | |
1442 * Now compute the hash. i is the index into hash_str, j is into our | |
1443 * key stream, k is counting the number of bits, and h interates within | |
1444 * each byte. | |
1445 */ | |
1446 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { | |
1447 for (h = 0; h < 8 && k < 351; h++, k++) { | |
1448 if (int_key[j] & (1 << h)) { | |
1449 /* | |
1450 * Key bit is set, XOR in the current 16-bit | |
1451 * string. Example of processing: | |
1452 * h = 0, | |
1453 * tmp = (hash_str[i - 2] & 0 << 16) | | |
1454 * (hash_str[i - 1] & 0xff << 8) | | |
1455 * (hash_str[i] & 0xff >> 0) | |
1456 * So tmp = hash_str[15 + k:k], since the | |
1457 * i + 2 clause rolls off the 16-bit value | |
1458 * h = 7, | |
1459 * tmp = (hash_str[i - 2] & 0x7f << 9) | | |
1460 * (hash_str[i - 1] & 0xff << 1) | | |
1461 * (hash_str[i] & 0x80 >> 7) | |
1462 */ | |
1463 int tmp = (hash_str[i] >> h); | |
1464 tmp |= (hash_str[i - 1] << (8 - h)); | |
1465 tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1)) | |
1466 << (16 - h); | |
1467 hash_result ^= (u16)tmp; | |
1468 } | |
1469 } | |
1470 } | |
1471 | |
1472 return (hash_result); | |
1473 } | 1459 } |
1474 | 1460 |
1475 /* | 1461 /* |
1476 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream | 1462 * These defines allow us to quickly generate all of the necessary instructions |
1477 * @input: input stream to modify | 1463 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION |
1478 * @vlan: the VLAN id to load | 1464 * for values 0 through 15 |
1479 */ | 1465 */ |
1480 s32 | 1466 #define IXGBE_ATR_COMMON_HASH_KEY \ |
1481 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) | 1467 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) |
1482 { | 1468 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ |
1483 DEBUGFUNC("ixgbe_atr_set_vlan_id_82599"); | 1469 { \ |
1484 | 1470 u32 n = (_n); \ |
1485 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; | 1471 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ |
1486 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; | 1472 common_hash ^= lo_hash_dword >> n; \ |
1487 | 1473 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ |
1488 return (IXGBE_SUCCESS); | 1474 bucket_hash ^= lo_hash_dword >> n; \ |
1489 } | 1475 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ |
1490 | 1476 sig_hash ^= lo_hash_dword << (16 - n); \ |
1491 /* | 1477 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ |
1492 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address | 1478 common_hash ^= hi_hash_dword >> n; \ |
1493 * @input: input stream to modify | 1479 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ |
1494 * @src_addr: the IP address to load | 1480 bucket_hash ^= hi_hash_dword >> n; \ |
1495 */ | 1481 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ |
1496 s32 | 1482 sig_hash ^= hi_hash_dword << (16 - n); \ |
1497 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr) | 1483 } |
1498 { | 1484 |
1499 DEBUGFUNC("ixgbe_atr_set_src_ipv4_82599"); | 1485 /** |
1500 | 1486 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash |
1501 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24; | 1487 * @stream: input bitstream to compute the hash on |
1502 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] = | 1488 * |
1503 (src_addr >> 16) & 0xff; | 1489 * This function is almost identical to the function above but contains |
1504 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] = | 1490 * several optomizations such as unwinding all of the loops, letting the |
1505 (src_addr >> 8) & 0xff; | 1491 * compiler work out all of the conditional ifs since the keys are static |
1506 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff; | 1492 * defines, and computing two keys at once since the hashed dword stream |
1507 | 1493 * will be the same for both keys. |
1508 return (IXGBE_SUCCESS); | 1494 **/ |
1509 } | 1495 static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, |
1510 | 1496 union ixgbe_atr_hash_dword common) |
1511 /* | 1497 { |
1512 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address | 1498 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; |
1513 * @input: input stream to modify | 1499 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; |
1514 * @dst_addr: the IP address to load | 1500 |
1515 */ | 1501 /* record the flow_vm_vlan bits as they are a key part to the hash */ |
1516 s32 | 1502 flow_vm_vlan = IXGBE_NTOHL(input.dword); |
1517 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) | 1503 |
1518 { | 1504 /* generate common hash dword */ |
1519 DEBUGFUNC("ixgbe_atr_set_dst_ipv4_82599"); | 1505 hi_hash_dword = IXGBE_NTOHL(common.dword); |
1520 | 1506 |
1521 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24; | 1507 /* low dword is word swapped version of common */ |
1522 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] = | 1508 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); |
1523 (dst_addr >> 16) & 0xff; | 1509 |
1524 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] = | 1510 /* apply flow ID/VM pool/VLAN ID bits to hash words */ |
1525 (dst_addr >> 8) & 0xff; | 1511 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); |
1526 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff; | 1512 |
1527 | 1513 /* Process bits 0 and 16 */ |
1528 return (IXGBE_SUCCESS); | 1514 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); |
1529 } | 1515 |
1530 | 1516 /* |
1531 /* | 1517 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to |
1532 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address | 1518 * delay this because bit 0 of the stream should not be processed |
1533 * @input: input stream to modify | 1519 * so we do not add the vlan until after bit 0 was processed |
1534 * @src_addr_1: the first 4 bytes of the IP address to load | 1520 */ |
1535 * @src_addr_2: the second 4 bytes of the IP address to load | 1521 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); |
1536 * @src_addr_3: the third 4 bytes of the IP address to load | 1522 |
1537 * @src_addr_4: the fourth 4 bytes of the IP address to load | 1523 /* Process remaining 30 bit of the key */ |
1538 */ | 1524 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); |
1539 s32 | 1525 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); |
1540 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, | 1526 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); |
1541 u32 src_addr_1, u32 src_addr_2, u32 src_addr_3, u32 src_addr_4) | 1527 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); |
1542 { | 1528 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); |
1543 DEBUGFUNC("ixgbe_atr_set_src_ipv6_82599"); | 1529 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); |
1544 | 1530 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); |
1545 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; | 1531 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); |
1546 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = | 1532 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); |
1547 (src_addr_4 >> 8) & 0xff; | 1533 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); |
1548 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] = | 1534 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); |
1549 (src_addr_4 >> 16) & 0xff; | 1535 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); |
1550 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24; | 1536 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); |
1551 | 1537 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); |
1552 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff; | 1538 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); |
1553 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] = | 1539 |
1554 (src_addr_3 >> 8) & 0xff; | 1540 /* combine common_hash result with signature and bucket hashes */ |
1555 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] = | 1541 bucket_hash ^= common_hash; |
1556 (src_addr_3 >> 16) & 0xff; | 1542 bucket_hash &= IXGBE_ATR_HASH_MASK; |
1557 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24; | 1543 |
1558 | 1544 sig_hash ^= common_hash << 16; |
1559 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff; | 1545 sig_hash &= IXGBE_ATR_HASH_MASK << 16; |
1560 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] = | 1546 |
1561 (src_addr_2 >> 8) & 0xff; | 1547 /* return completed signature hash */ |
1562 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] = | 1548 return sig_hash ^ bucket_hash; |
1563 (src_addr_2 >> 16) & 0xff; | 1549 } |
1564 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24; | 1550 |
1565 | 1551 /** |
1566 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff; | 1552 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter |
1567 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] = | 1553 * @hw: pointer to hardware structure |
1568 (src_addr_1 >> 8) & 0xff; | 1554 * @stream: input bitstream |
1569 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] = | 1555 * @queue: queue index to direct traffic to |
1570 (src_addr_1 >> 16) & 0xff; | 1556 **/ |
1571 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24; | 1557 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, |
1572 | 1558 union ixgbe_atr_hash_dword input, |
1573 return (IXGBE_SUCCESS); | 1559 union ixgbe_atr_hash_dword common, |
1574 } | 1560 u8 queue) |
1575 | |
1576 /* | |
1577 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address | |
1578 * @input: input stream to modify | |
1579 * @dst_addr_1: the first 4 bytes of the IP address to load | |
1580 * @dst_addr_2: the second 4 bytes of the IP address to load | |
1581 * @dst_addr_3: the third 4 bytes of the IP address to load | |
1582 * @dst_addr_4: the fourth 4 bytes of the IP address to load | |
1583 */ | |
1584 s32 | |
1585 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, | |
1586 u32 dst_addr_1, u32 dst_addr_2, u32 dst_addr_3, u32 dst_addr_4) | |
1587 { | |
1588 DEBUGFUNC("ixgbe_atr_set_dst_ipv6_82599"); | |
1589 | |
1590 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; | |
1591 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = | |
1592 (dst_addr_4 >> 8) & 0xff; | |
1593 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] = | |
1594 (dst_addr_4 >> 16) & 0xff; | |
1595 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24; | |
1596 | |
1597 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff; | |
1598 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] = | |
1599 (dst_addr_3 >> 8) & 0xff; | |
1600 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] = | |
1601 (dst_addr_3 >> 16) & 0xff; | |
1602 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24; | |
1603 | |
1604 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff; | |
1605 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] = | |
1606 (dst_addr_2 >> 8) & 0xff; | |
1607 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] = | |
1608 (dst_addr_2 >> 16) & 0xff; | |
1609 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24; | |
1610 | |
1611 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff; | |
1612 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] = | |
1613 (dst_addr_1 >> 8) & 0xff; | |
1614 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] = | |
1615 (dst_addr_1 >> 16) & 0xff; | |
1616 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24; | |
1617 | |
1618 return (IXGBE_SUCCESS); | |
1619 } | |
1620 | |
1621 /* | |
1622 * ixgbe_atr_set_src_port_82599 - Sets the source port | |
1623 * @input: input stream to modify | |
1624 * @src_port: the source port to load | |
1625 */ | |
1626 s32 | |
1627 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port) | |
1628 { | |
1629 DEBUGFUNC("ixgbe_atr_set_src_port_82599"); | |
1630 | |
1631 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8; | |
1632 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff; | |
1633 | |
1634 return (IXGBE_SUCCESS); | |
1635 } | |
1636 | |
1637 /* | |
1638 * ixgbe_atr_set_dst_port_82599 - Sets the destination port | |
1639 * @input: input stream to modify | |
1640 * @dst_port: the destination port to load | |
1641 */ | |
1642 s32 | |
1643 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port) | |
1644 { | |
1645 DEBUGFUNC("ixgbe_atr_set_dst_port_82599"); | |
1646 | |
1647 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8; | |
1648 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff; | |
1649 | |
1650 return (IXGBE_SUCCESS); | |
1651 } | |
1652 | |
1653 /* | |
1654 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes | |
1655 * @input: input stream to modify | |
1656 * @flex_bytes: the flexible bytes to load | |
1657 */ | |
1658 s32 | |
1659 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) | |
1660 { | |
1661 DEBUGFUNC("ixgbe_atr_set_flex_byte_82599"); | |
1662 | |
1663 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8; | |
1664 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff; | |
1665 | |
1666 return (IXGBE_SUCCESS); | |
1667 } | |
1668 | |
1669 /* | |
1670 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool | |
1671 * @input: input stream to modify | |
1672 * @vm_pool: the Virtual Machine pool to load | |
1673 */ | |
1674 s32 | |
1675 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool) | |
1676 { | |
1677 DEBUGFUNC("ixgbe_atr_set_vm_pool_82599"); | |
1678 | |
1679 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; | |
1680 | |
1681 return (IXGBE_SUCCESS); | |
1682 } | |
1683 | |
1684 /* | |
1685 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type | |
1686 * @input: input stream to modify | |
1687 * @l4type: the layer 4 type value to load | |
1688 */ | |
1689 s32 | |
1690 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) | |
1691 { | |
1692 DEBUGFUNC("ixgbe_atr_set_l4type_82599"); | |
1693 | |
1694 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type; | |
1695 | |
1696 return (IXGBE_SUCCESS); | |
1697 } | |
1698 | |
1699 /* | |
1700 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream | |
1701 * @input: input stream to search | |
1702 * @vlan: the VLAN id to load | |
1703 */ | |
1704 s32 | |
1705 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) | |
1706 { | |
1707 DEBUGFUNC("ixgbe_atr_get_vlan_id_82599"); | |
1708 | |
1709 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; | |
1710 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; | |
1711 | |
1712 return (IXGBE_SUCCESS); | |
1713 } | |
1714 | |
1715 /* | |
1716 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address | |
1717 * @input: input stream to search | |
1718 * @src_addr: the IP address to load | |
1719 */ | |
1720 s32 | |
1721 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr) | |
1722 { | |
1723 DEBUGFUNC("ixgbe_atr_get_src_ipv4_82599"); | |
1724 | |
1725 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET]; | |
1726 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8; | |
1727 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16; | |
1728 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24; | |
1729 | |
1730 return (IXGBE_SUCCESS); | |
1731 } | |
1732 | |
1733 /* | |
1734 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address | |
1735 * @input: input stream to search | |
1736 * @dst_addr: the IP address to load | |
1737 */ | |
1738 s32 | |
1739 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr) | |
1740 { | |
1741 DEBUGFUNC("ixgbe_atr_get_dst_ipv4_82599"); | |
1742 | |
1743 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; | |
1744 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; | |
1745 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; | |
1746 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; | |
1747 | |
1748 return (IXGBE_SUCCESS); | |
1749 } | |
1750 | |
1751 /* | |
1752 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address | |
1753 * @input: input stream to search | |
1754 * @src_addr_1: the first 4 bytes of the IP address to load | |
1755 * @src_addr_2: the second 4 bytes of the IP address to load | |
1756 * @src_addr_3: the third 4 bytes of the IP address to load | |
1757 * @src_addr_4: the fourth 4 bytes of the IP address to load | |
1758 */ | |
1759 s32 | |
1760 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, | |
1761 u32 *src_addr_1, u32 *src_addr_2, u32 *src_addr_3, u32 *src_addr_4) | |
1762 { | |
1763 DEBUGFUNC("ixgbe_atr_get_src_ipv6_82599"); | |
1764 | |
1765 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; | |
1766 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; | |
1767 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; | |
1768 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; | |
1769 | |
1770 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; | |
1771 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; | |
1772 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; | |
1773 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; | |
1774 | |
1775 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; | |
1776 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8; | |
1777 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16; | |
1778 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24; | |
1779 | |
1780 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET]; | |
1781 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8; | |
1782 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16; | |
1783 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24; | |
1784 | |
1785 return (IXGBE_SUCCESS); | |
1786 } | |
1787 | |
1788 /* | |
1789 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address | |
1790 * @input: input stream to search | |
1791 * @dst_addr_1: the first 4 bytes of the IP address to load | |
1792 * @dst_addr_2: the second 4 bytes of the IP address to load | |
1793 * @dst_addr_3: the third 4 bytes of the IP address to load | |
1794 * @dst_addr_4: the fourth 4 bytes of the IP address to load | |
1795 */ | |
1796 s32 | |
1797 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, | |
1798 u32 *dst_addr_1, u32 *dst_addr_2, u32 *dst_addr_3, u32 *dst_addr_4) | |
1799 { | |
1800 DEBUGFUNC("ixgbe_atr_get_dst_ipv6_82599"); | |
1801 | |
1802 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12]; | |
1803 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8; | |
1804 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16; | |
1805 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24; | |
1806 | |
1807 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8]; | |
1808 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8; | |
1809 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16; | |
1810 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24; | |
1811 | |
1812 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4]; | |
1813 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8; | |
1814 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16; | |
1815 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24; | |
1816 | |
1817 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET]; | |
1818 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8; | |
1819 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16; | |
1820 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24; | |
1821 | |
1822 return (IXGBE_SUCCESS); | |
1823 } | |
1824 | |
1825 /* | |
1826 * ixgbe_atr_get_src_port_82599 - Gets the source port | |
1827 * @input: input stream to modify | |
1828 * @src_port: the source port to load | |
1829 * | |
1830 * Even though the input is given in big-endian, the FDIRPORT registers | |
1831 * expect the ports to be programmed in little-endian. Hence the need to swap | |
1832 * endianness when retrieving the data. This can be confusing since the | |
1833 * internal hash engine expects it to be big-endian. | |
1834 */ | |
1835 s32 | |
1836 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port) | |
1837 { | |
1838 DEBUGFUNC("ixgbe_atr_get_src_port_82599"); | |
1839 | |
1840 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; | |
1841 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; | |
1842 | |
1843 return (IXGBE_SUCCESS); | |
1844 } | |
1845 | |
1846 /* | |
1847 * ixgbe_atr_get_dst_port_82599 - Gets the destination port | |
1848 * @input: input stream to modify | |
1849 * @dst_port: the destination port to load | |
1850 * | |
1851 * Even though the input is given in big-endian, the FDIRPORT registers | |
1852 * expect the ports to be programmed in little-endian. Hence the need to swap | |
1853 * endianness when retrieving the data. This can be confusing since the | |
1854 * internal hash engine expects it to be big-endian. | |
1855 */ | |
1856 s32 | |
1857 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port) | |
1858 { | |
1859 DEBUGFUNC("ixgbe_atr_get_dst_port_82599"); | |
1860 | |
1861 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8; | |
1862 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1]; | |
1863 | |
1864 return (IXGBE_SUCCESS); | |
1865 } | |
1866 | |
1867 /* | |
1868 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes | |
1869 * @input: input stream to modify | |
1870 * @flex_bytes: the flexible bytes to load | |
1871 */ | |
1872 s32 | |
1873 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte) | |
1874 { | |
1875 DEBUGFUNC("ixgbe_atr_get_flex_byte_82599"); | |
1876 | |
1877 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET]; | |
1878 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8; | |
1879 | |
1880 return (IXGBE_SUCCESS); | |
1881 } | |
1882 | |
1883 /* | |
1884 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool | |
1885 * @input: input stream to modify | |
1886 * @vm_pool: the Virtual Machine pool to load | |
1887 */ | |
1888 s32 | |
1889 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool) | |
1890 { | |
1891 DEBUGFUNC("ixgbe_atr_get_vm_pool_82599"); | |
1892 | |
1893 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET]; | |
1894 | |
1895 return (IXGBE_SUCCESS); | |
1896 } | |
1897 | |
1898 /* | |
1899 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type | |
1900 * @input: input stream to modify | |
1901 * @l4type: the layer 4 type value to load | |
1902 */ | |
1903 s32 | |
1904 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type) | |
1905 { | |
1906 DEBUGFUNC("ixgbe_atr_get_l4type__82599"); | |
1907 | |
1908 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; | |
1909 | |
1910 return (IXGBE_SUCCESS); | |
1911 } | |
1912 | |
1913 /* | |
1914 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter | |
1915 * @hw: pointer to hardware structure | |
1916 * @stream: input bitstream | |
1917 * @queue: queue index to direct traffic to | |
1918 */ | |
1919 s32 | |
1920 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, | |
1921 struct ixgbe_atr_input *input, u8 queue) | |
1922 { | 1561 { |
1923 u64 fdirhashcmd; | 1562 u64 fdirhashcmd; |
1924 u64 fdircmd; | 1563 u32 fdircmd; |
1925 u32 fdirhash; | |
1926 u16 bucket_hash, sig_hash; | |
1927 u8 l4type; | |
1928 | 1564 |
1929 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); | 1565 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); |
1930 | 1566 |
1931 bucket_hash = ixgbe_atr_compute_hash_82599(input, | 1567 /* |
1932 IXGBE_ATR_BUCKET_HASH_KEY); | 1568 * Get the flow_type in order to program FDIRCMD properly |
1933 | 1569 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 |
1934 /* bucket_hash is only 15 bits */ | 1570 */ |
1935 bucket_hash &= IXGBE_ATR_HASH_MASK; | 1571 switch (input.formatted.flow_type) { |
1936 | 1572 case IXGBE_ATR_FLOW_TYPE_TCPV4: |
1937 sig_hash = ixgbe_atr_compute_hash_82599(input, | 1573 case IXGBE_ATR_FLOW_TYPE_UDPV4: |
1938 IXGBE_ATR_SIGNATURE_HASH_KEY); | 1574 case IXGBE_ATR_FLOW_TYPE_SCTPV4: |
1939 | 1575 case IXGBE_ATR_FLOW_TYPE_TCPV6: |
1940 /* Get the l4type in order to program FDIRCMD properly */ | 1576 case IXGBE_ATR_FLOW_TYPE_UDPV6: |
1941 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ | 1577 case IXGBE_ATR_FLOW_TYPE_SCTPV6: |
1942 (void) ixgbe_atr_get_l4type_82599(input, &l4type); | 1578 break; |
1579 default: | |
1580 DEBUGOUT(" Error on flow type input\n"); | |
1581 return IXGBE_ERR_CONFIG; | |
1582 } | |
1583 | |
1584 /* configure FDIRCMD register */ | |
1585 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | | |
1586 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; | |
1587 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; | |
1588 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; | |
1943 | 1589 |
1944 /* | 1590 /* |
1945 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits | 1591 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits |
1946 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. | 1592 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. |
1947 */ | 1593 */ |
1948 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; | 1594 fdirhashcmd = (u64)fdircmd << 32; |
1949 | 1595 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); |
1950 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | | 1596 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); |
1951 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); | 1597 |
1952 | 1598 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); |
1953 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { | 1599 |
1954 case IXGBE_ATR_L4TYPE_TCP: | 1600 return IXGBE_SUCCESS; |
1955 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; | 1601 } |
1956 break; | 1602 |
1957 case IXGBE_ATR_L4TYPE_UDP: | 1603 /** |
1958 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; | 1604 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks |
1959 break; | 1605 * @input_mask: mask to be bit swapped |
1960 case IXGBE_ATR_L4TYPE_SCTP: | 1606 * |
1961 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; | 1607 * The source and destination port masks for flow director are bit swapped |
1608 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to | |
1609 * generate a correctly swapped value we need to bit swap the mask and that | |
1610 * is what is accomplished by this function. | |
1611 **/ | |
1612 static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks) | |
1613 { | |
1614 u32 mask = IXGBE_NTOHS(input_masks->dst_port_mask); | |
1615 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; | |
1616 mask |= IXGBE_NTOHS(input_masks->src_port_mask); | |
1617 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); | |
1618 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); | |
1619 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); | |
1620 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); | |
1621 } | |
1622 | |
1623 /* | |
1624 * These two macros are meant to address the fact that we have registers | |
1625 * that are either all or in part big-endian. As a result on big-endian | |
1626 * systems we will end up byte swapping the value to little-endian before | |
1627 * it is byte swapped again and written to the hardware in the original | |
1628 * big-endian format. | |
1629 */ | |
1630 #define IXGBE_STORE_AS_BE32(_value) \ | |
1631 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ | |
1632 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) | |
1633 | |
1634 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ | |
1635 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) | |
1636 | |
1637 #define IXGBE_STORE_AS_BE16(_value) \ | |
1638 (((u16)(_value) >> 8) | ((u16)(_value) << 8)) | |
1639 | |
1640 | |
1641 /** | |
1642 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter | |
1643 * @hw: pointer to hardware structure | |
1644 * @input: input bitstream | |
1645 * @input_masks: masks for the input bitstream | |
1646 * @soft_id: software index for the filters | |
1647 * @queue: queue index to direct traffic to | |
1648 * | |
1649 * Note that the caller to this function must lock before calling, since the | |
1650 * hardware writes must be protected from one another. | |
1651 **/ | |
1652 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, | |
1653 union ixgbe_atr_input *input, | |
1654 struct ixgbe_atr_input_masks *input_masks, | |
1655 u16 soft_id, u8 queue) | |
1656 { | |
1657 u32 fdirhash; | |
1658 u32 fdircmd; | |
1659 u32 fdirport, fdirtcpm; | |
1660 u32 fdirvlan; | |
1661 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */ | |
1662 u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX | | |
1663 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; | |
1664 | |
1665 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); | |
1666 | |
1667 /* | |
1668 * Check flow_type formatting, and bail out before we touch the hardware | |
1669 * if there's a configuration issue | |
1670 */ | |
1671 switch (input->formatted.flow_type) { | |
1672 case IXGBE_ATR_FLOW_TYPE_IPV4: | |
1673 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ | |
1674 fdirm |= IXGBE_FDIRM_L4P; | |
1675 /* FALLTHRU */ | |
1676 case IXGBE_ATR_FLOW_TYPE_SCTPV4: | |
1677 if (input_masks->dst_port_mask || input_masks->src_port_mask) { | |
1678 DEBUGOUT(" Error on src/dst port mask\n"); | |
1679 return IXGBE_ERR_CONFIG; | |
1680 } | |
1681 break; | |
1682 case IXGBE_ATR_FLOW_TYPE_TCPV4: | |
1683 break; | |
1684 case IXGBE_ATR_FLOW_TYPE_UDPV4: | |
1962 break; | 1685 break; |
1963 default: | 1686 default: |
1964 DEBUGOUT(" Error on l4type input\n"); | 1687 DEBUGOUT(" Error on flow type input\n"); |
1965 return (IXGBE_ERR_CONFIG); | 1688 return IXGBE_ERR_CONFIG; |
1966 } | 1689 } |
1967 | |
1968 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) | |
1969 fdircmd |= IXGBE_FDIRCMD_IPV6; | |
1970 | |
1971 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); | |
1972 fdirhashcmd = ((fdircmd << 32) | fdirhash); | |
1973 | |
1974 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF); | |
1975 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); | |
1976 | |
1977 return (IXGBE_SUCCESS); | |
1978 } | |
1979 | |
1980 /* | |
1981 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter | |
1982 * @hw: pointer to hardware structure | |
1983 * @input: input bitstream | |
1984 * @input_masks: masks for the input bitstream | |
1985 * @soft_id: software index for the filters | |
1986 * @queue: queue index to direct traffic to | |
1987 * | |
1988 * Note that the caller to this function must lock before calling, since the | |
1989 * hardware writes must be protected from one another. | |
1990 */ | |
1991 s32 | |
1992 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, | |
1993 struct ixgbe_atr_input *input, struct ixgbe_atr_input_masks *input_masks, | |
1994 u16 soft_id, u8 queue) | |
1995 { | |
1996 u32 fdircmd = 0; | |
1997 u32 fdirhash; | |
1998 u32 src_ipv4 = 0, dst_ipv4 = 0; | |
1999 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; | |
2000 u16 src_port, dst_port, vlan_id, flex_bytes; | |
2001 u16 bucket_hash; | |
2002 u8 l4type; | |
2003 u8 fdirm = 0; | |
2004 | |
2005 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); | |
2006 | |
2007 /* Get our input values */ | |
2008 (void) ixgbe_atr_get_l4type_82599(input, &l4type); | |
2009 | |
2010 /* | |
2011 * Check l4type formatting, and bail out before we touch the hardware | |
2012 * if there's a configuration issue | |
2013 */ | |
2014 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { | |
2015 case IXGBE_ATR_L4TYPE_TCP: | |
2016 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; | |
2017 break; | |
2018 case IXGBE_ATR_L4TYPE_UDP: | |
2019 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; | |
2020 break; | |
2021 case IXGBE_ATR_L4TYPE_SCTP: | |
2022 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; | |
2023 break; | |
2024 default: | |
2025 DEBUGOUT(" Error on l4type input\n"); | |
2026 return (IXGBE_ERR_CONFIG); | |
2027 } | |
2028 | |
2029 bucket_hash = ixgbe_atr_compute_hash_82599(input, | |
2030 IXGBE_ATR_BUCKET_HASH_KEY); | |
2031 | |
2032 /* bucket_hash is only 15 bits */ | |
2033 bucket_hash &= IXGBE_ATR_HASH_MASK; | |
2034 | |
2035 (void) ixgbe_atr_get_vlan_id_82599(input, &vlan_id); | |
2036 (void) ixgbe_atr_get_src_port_82599(input, &src_port); | |
2037 (void) ixgbe_atr_get_dst_port_82599(input, &dst_port); | |
2038 (void) ixgbe_atr_get_flex_byte_82599(input, &flex_bytes); | |
2039 | |
2040 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; | |
2041 | |
2042 /* Now figure out if we're IPv4 or IPv6 */ | |
2043 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) { | |
2044 /* IPv6 */ | |
2045 (void) ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, | |
2046 &src_ipv6_2, &src_ipv6_3, &src_ipv6_4); | |
2047 | |
2048 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1); | |
2049 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2); | |
2050 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3); | |
2051 /* The last 4 bytes is the same register as IPv4 */ | |
2052 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4); | |
2053 | |
2054 fdircmd |= IXGBE_FDIRCMD_IPV6; | |
2055 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH; | |
2056 } else { | |
2057 /* IPv4 */ | |
2058 (void) ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); | |
2059 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); | |
2060 | |
2061 } | |
2062 | |
2063 (void) ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); | |
2064 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4); | |
2065 | |
2066 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | | |
2067 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); | |
2068 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | | |
2069 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); | |
2070 | 1690 |
2071 /* | 1691 /* |
2072 * Program the relevant mask registers. If src/dst_port or src/dst_addr | 1692 * Program the relevant mask registers. If src/dst_port or src/dst_addr |
2073 * are zero, then assume a full mask for that field. Also assume that | 1693 * are zero, then assume a full mask for that field. Also assume that |
2074 * a VLAN of 0 is unspecified, so mask that out as well. L4type | 1694 * a VLAN of 0 is unspecified, so mask that out as well. L4type |
2075 * cannot be masked out in this implementation. | 1695 * cannot be masked out in this implementation. |
2076 * | 1696 * |
2077 * This also assumes IPv4 only. IPv6 masking isn't supported at this | 1697 * This also assumes IPv4 only. IPv6 masking isn't supported at this |
2078 * point in time. | 1698 * point in time. |
2079 */ | 1699 */ |
2080 if (src_ipv4 == 0) | 1700 |
2081 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff); | 1701 /* Program FDIRM */ |
2082 else | 1702 switch (IXGBE_NTOHS(input_masks->vlan_id_mask) & 0xEFFF) { |
2083 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); | 1703 case 0xEFFF: |
2084 | 1704 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */ |
2085 if (dst_ipv4 == 0) | 1705 fdirm &= ~IXGBE_FDIRM_VLANID; |
2086 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff); | 1706 /* FALLTHRU */ |
2087 else | 1707 case 0xE000: |
2088 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); | 1708 /* Unmask VLAN prio - bit 1 */ |
2089 | 1709 fdirm &= ~IXGBE_FDIRM_VLANP; |
2090 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { | 1710 break; |
2091 case IXGBE_ATR_L4TYPE_TCP: | 1711 case 0x0FFF: |
2092 if (src_port == 0) | 1712 /* Unmask VLAN ID - bit 0 */ |
2093 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff); | 1713 fdirm &= ~IXGBE_FDIRM_VLANID; |
2094 else | 1714 break; |
2095 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, | 1715 case 0x0000: |
2096 input_masks->src_port_mask); | 1716 /* do nothing, vlans already masked */ |
2097 | |
2098 if (dst_port == 0) | |
2099 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, | |
2100 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | | |
2101 0xffff0000)); | |
2102 else | |
2103 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, | |
2104 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | | |
2105 (input_masks->dst_port_mask << 16))); | |
2106 break; | |
2107 case IXGBE_ATR_L4TYPE_UDP: | |
2108 if (src_port == 0) | |
2109 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff); | |
2110 else | |
2111 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, | |
2112 input_masks->src_port_mask); | |
2113 | |
2114 if (dst_port == 0) | |
2115 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, | |
2116 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | | |
2117 0xffff0000)); | |
2118 else | |
2119 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, | |
2120 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | | |
2121 (input_masks->src_port_mask << 16))); | |
2122 break; | 1717 break; |
2123 default: | 1718 default: |
2124 /* this already would have failed above */ | 1719 DEBUGOUT(" Error on VLAN mask\n"); |
2125 break; | 1720 return IXGBE_ERR_CONFIG; |
2126 } | 1721 } |
2127 | 1722 |
2128 /* Program the last mask register, FDIRM */ | 1723 if (input_masks->flex_mask & 0xFFFF) { |
2129 if (input_masks->vlan_id_mask || !vlan_id) | 1724 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) { |
2130 /* Mask both VLAN and VLANP - bits 0 and 1 */ | 1725 DEBUGOUT(" Error on flexible byte mask\n"); |
2131 fdirm |= (IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP); | 1726 return IXGBE_ERR_CONFIG; |
2132 | 1727 } |
2133 if (input_masks->data_mask || !flex_bytes) | 1728 /* Unmask Flex Bytes - bit 4 */ |
2134 /* Flex bytes need masking, so mask the whole thing - bit 4 */ | 1729 fdirm &= ~IXGBE_FDIRM_FLEX; |
2135 fdirm |= IXGBE_FDIRM_FLEX; | 1730 } |
2136 | 1731 |
2137 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ | 1732 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ |
2138 fdirm |= (IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6); | |
2139 | |
2140 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); | 1733 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); |
2141 | 1734 |
2142 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; | 1735 /* store the TCP/UDP port masks, bit reversed from port layout */ |
2143 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; | 1736 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks); |
2144 fdircmd |= IXGBE_FDIRCMD_LAST; | 1737 |
2145 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; | 1738 /* write both the same so that UDP and TCP use the same mask */ |
2146 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; | 1739 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); |
1740 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); | |
1741 | |
1742 /* store source and destination IP masks (big-enian) */ | |
1743 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, | |
1744 ~input_masks->src_ip_mask[0]); | |
1745 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, | |
1746 ~input_masks->dst_ip_mask[0]); | |
1747 | |
1748 /* Apply masks to input data */ | |
1749 input->formatted.vlan_id &= input_masks->vlan_id_mask; | |
1750 input->formatted.flex_bytes &= input_masks->flex_mask; | |
1751 input->formatted.src_port &= input_masks->src_port_mask; | |
1752 input->formatted.dst_port &= input_masks->dst_port_mask; | |
1753 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0]; | |
1754 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0]; | |
1755 | |
1756 /* record vlan (little-endian) and flex_bytes(big-endian) */ | |
1757 fdirvlan = | |
1758 IXGBE_STORE_AS_BE16(IXGBE_NTOHS(input->formatted.flex_bytes)); | |
1759 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; | |
1760 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); | |
1761 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); | |
1762 | |
1763 /* record source and destination port (little-endian)*/ | |
1764 fdirport = IXGBE_NTOHS(input->formatted.dst_port); | |
1765 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; | |
1766 fdirport |= IXGBE_NTOHS(input->formatted.src_port); | |
1767 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); | |
1768 | |
1769 /* record the first 32 bits of the destination address (big-endian) */ | |
1770 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); | |
1771 | |
1772 /* record the source address (big-endian) */ | |
1773 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); | |
1774 | |
1775 /* configure FDIRCMD register */ | |
1776 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | | |
1777 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; | |
1778 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; | |
1779 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; | |
1780 | |
1781 /* we only want the bucket hash so drop the upper 16 bits */ | |
1782 fdirhash = ixgbe_atr_compute_hash_82599(input, | |
1783 IXGBE_ATR_BUCKET_HASH_KEY); | |
1784 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; | |
2147 | 1785 |
2148 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); | 1786 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); |
2149 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); | 1787 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); |
2150 | 1788 |
2151 return (IXGBE_SUCCESS); | 1789 return IXGBE_SUCCESS; |
2152 } | 1790 } |
2153 | 1791 |
2154 /* | 1792 /** |
2155 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register | 1793 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register |
2156 * @hw: pointer to hardware structure | 1794 * @hw: pointer to hardware structure |
2157 * @reg: analog register to read | 1795 * @reg: analog register to read |
2158 * @val: read value | 1796 * @val: read value |
2159 * | 1797 * |
2160 * Performs read operation to Omer analog register specified. | 1798 * Performs read operation to Omer analog register specified. |
2161 */ | 1799 **/ |
2162 s32 | 1800 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) |
2163 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) | |
2164 { | 1801 { |
2165 u32 core_ctl; | 1802 u32 core_ctl; |
2166 | 1803 |
2167 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); | 1804 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); |
2168 | 1805 |
2169 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | | 1806 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | |
2170 (reg << 8)); | 1807 (reg << 8)); |
2171 IXGBE_WRITE_FLUSH(hw); | 1808 IXGBE_WRITE_FLUSH(hw); |
2172 usec_delay(10); | 1809 usec_delay(10); |
2173 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); | 1810 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); |
2174 *val = (u8)core_ctl; | 1811 *val = (u8)core_ctl; |
2175 | 1812 |
2176 return (IXGBE_SUCCESS); | 1813 return IXGBE_SUCCESS; |
2177 } | 1814 } |
2178 | 1815 |
2179 /* | 1816 /** |
2180 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register | 1817 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register |
2181 * @hw: pointer to hardware structure | 1818 * @hw: pointer to hardware structure |
2182 * @reg: atlas register to write | 1819 * @reg: atlas register to write |
2183 * @val: value to write | 1820 * @val: value to write |
2184 * | 1821 * |
2185 * Performs write operation to Omer analog register specified. | 1822 * Performs write operation to Omer analog register specified. |
2186 */ | 1823 **/ |
2187 s32 | 1824 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) |
2188 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) | |
2189 { | 1825 { |
2190 u32 core_ctl; | 1826 u32 core_ctl; |
2191 | 1827 |
2192 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); | 1828 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); |
2193 | 1829 |
2194 core_ctl = (reg << 8) | val; | 1830 core_ctl = (reg << 8) | val; |
2195 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); | 1831 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); |
2196 IXGBE_WRITE_FLUSH(hw); | 1832 IXGBE_WRITE_FLUSH(hw); |
2197 usec_delay(10); | 1833 usec_delay(10); |
2198 | 1834 |
2199 return (IXGBE_SUCCESS); | 1835 return IXGBE_SUCCESS; |
2200 } | 1836 } |
2201 | 1837 |
2202 /* | 1838 /** |
2203 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx | 1839 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx |
2204 * @hw: pointer to hardware structure | 1840 * @hw: pointer to hardware structure |
2205 * | 1841 * |
2206 * Starts the hardware using the generic start_hw function. | 1842 * Starts the hardware using the generic start_hw function |
2207 * Then performs revision-specific operations: | 1843 * and the generation start_hw function. |
2208 * Clears the rate limiter registers. | 1844 * Then performs revision-specific operations, if any. |
2209 */ | 1845 **/ |
2210 s32 | 1846 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw) |
2211 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw) | 1847 { |
2212 { | |
2213 u32 i; | |
2214 u32 regval; | |
2215 s32 ret_val = IXGBE_SUCCESS; | 1848 s32 ret_val = IXGBE_SUCCESS; |
2216 | 1849 |
2217 DEBUGFUNC("ixgbe_start_hw_rev_1__82599"); | 1850 DEBUGFUNC("ixgbe_start_hw_rev_1__82599"); |
2218 | 1851 |
2219 ret_val = ixgbe_start_hw_generic(hw); | 1852 ret_val = ixgbe_start_hw_generic(hw); |
2220 | 1853 if (ret_val != IXGBE_SUCCESS) |
2221 /* Clear the rate limiters */ | 1854 goto out; |
2222 for (i = 0; i < hw->mac.max_tx_queues; i++) { | 1855 |
2223 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); | 1856 ret_val = ixgbe_start_hw_gen2(hw); |
2224 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); | 1857 if (ret_val != IXGBE_SUCCESS) |
2225 } | 1858 goto out; |
2226 IXGBE_WRITE_FLUSH(hw); | |
2227 | |
2228 /* Disable relaxed ordering */ | |
2229 for (i = 0; i < hw->mac.max_tx_queues; i++) { | |
2230 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); | |
2231 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | |
2232 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); | |
2233 } | |
2234 | |
2235 for (i = 0; i < hw->mac.max_rx_queues; i++) { | |
2236 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); | |
2237 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | | |
2238 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); | |
2239 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); | |
2240 } | |
2241 | 1859 |
2242 /* We need to run link autotry after the driver loads */ | 1860 /* We need to run link autotry after the driver loads */ |
2243 hw->mac.autotry_restart = true; | 1861 hw->mac.autotry_restart = TRUE; |
2244 | 1862 |
2245 if (ret_val == IXGBE_SUCCESS) | 1863 if (ret_val == IXGBE_SUCCESS) |
2246 ret_val = ixgbe_verify_fw_version_82599(hw); | 1864 ret_val = ixgbe_verify_fw_version_82599(hw); |
2247 | 1865 out: |
2248 return (ret_val); | 1866 return ret_val; |
2249 } | 1867 } |
2250 | 1868 |
2251 /* | 1869 /** |
2252 * ixgbe_identify_phy_82599 - Get physical layer module | 1870 * ixgbe_identify_phy_82599 - Get physical layer module |
2253 * @hw: pointer to hardware structure | 1871 * @hw: pointer to hardware structure |
2254 * | 1872 * |
2255 * Determines the physical layer module found on the current adapter. | 1873 * Determines the physical layer module found on the current adapter. |
2256 * If PHY already detected, maintains current PHY type in hw struct, | 1874 * If PHY already detected, maintains current PHY type in hw struct, |
2257 * otherwise executes the PHY detection routine. | 1875 * otherwise executes the PHY detection routine. |
2258 */ | 1876 **/ |
2259 s32 | 1877 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) |
2260 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) | |
2261 { | 1878 { |
2262 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; | 1879 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; |
2263 | 1880 |
2264 DEBUGFUNC("ixgbe_identify_phy_82599"); | 1881 DEBUGFUNC("ixgbe_identify_phy_82599"); |
2265 | 1882 |
2266 /* Detect PHY if not unknown - returns success if already detected. */ | 1883 /* Detect PHY if not unknown - returns success if already detected. */ |
2267 status = ixgbe_identify_phy_generic(hw); | 1884 status = ixgbe_identify_phy_generic(hw); |
2268 if (status != IXGBE_SUCCESS) | 1885 if (status != IXGBE_SUCCESS) { |
2269 status = ixgbe_identify_sfp_module_generic(hw); | 1886 /* 82599 10GBASE-T requires an external PHY */ |
1887 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) | |
1888 goto out; | |
1889 else | |
1890 status = ixgbe_identify_sfp_module_generic(hw); | |
1891 } | |
1892 | |
2270 /* Set PHY type none if no PHY detected */ | 1893 /* Set PHY type none if no PHY detected */ |
2271 if (hw->phy.type == ixgbe_phy_unknown) { | 1894 if (hw->phy.type == ixgbe_phy_unknown) { |
2272 hw->phy.type = ixgbe_phy_none; | 1895 hw->phy.type = ixgbe_phy_none; |
2273 status = IXGBE_SUCCESS; | 1896 status = IXGBE_SUCCESS; |
2274 } | 1897 } |
2275 | 1898 |
2276 /* Return error if SFP module has been detected but is not supported */ | 1899 /* Return error if SFP module has been detected but is not supported */ |
2277 if (hw->phy.type == ixgbe_phy_sfp_unsupported) | 1900 if (hw->phy.type == ixgbe_phy_sfp_unsupported) |
2278 status = IXGBE_ERR_SFP_NOT_SUPPORTED; | 1901 status = IXGBE_ERR_SFP_NOT_SUPPORTED; |
2279 | 1902 |
2280 return (status); | 1903 out: |
2281 } | 1904 return status; |
2282 | 1905 } |
2283 /* | 1906 |
2284 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type | 1907 /** |
2285 * @hw: pointer to hardware structure | 1908 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type |
2286 * | 1909 * @hw: pointer to hardware structure |
2287 * Determines physical layer capabilities of the current configuration. | 1910 * |
2288 */ | 1911 * Determines physical layer capabilities of the current configuration. |
2289 u32 | 1912 **/ |
2290 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) | 1913 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) |
2291 { | 1914 { |
2292 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; | 1915 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; |
2293 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); | 1916 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); |
2294 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); | 1917 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); |
2295 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; | 1918 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; |
2301 | 1924 |
2302 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); | 1925 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); |
2303 | 1926 |
2304 hw->phy.ops.identify(hw); | 1927 hw->phy.ops.identify(hw); |
2305 | 1928 |
2306 if (hw->phy.type == ixgbe_phy_tn || | 1929 switch (hw->phy.type) { |
2307 hw->phy.type == ixgbe_phy_aq || | 1930 case ixgbe_phy_tn: |
2308 hw->phy.type == ixgbe_phy_cu_unknown) { | 1931 case ixgbe_phy_aq: |
1932 case ixgbe_phy_cu_unknown: | |
2309 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, | 1933 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, |
2310 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); | 1934 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); |
2311 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) | 1935 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) |
2312 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; | 1936 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; |
2313 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) | 1937 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) |
2314 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; | 1938 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; |
2315 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) | 1939 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) |
2316 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; | 1940 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; |
2317 goto out; | 1941 goto out; |
1942 default: | |
1943 break; | |
2318 } | 1944 } |
2319 | 1945 |
2320 switch (autoc & IXGBE_AUTOC_LMS_MASK) { | 1946 switch (autoc & IXGBE_AUTOC_LMS_MASK) { |
2321 case IXGBE_AUTOC_LMS_1G_AN: | 1947 case IXGBE_AUTOC_LMS_1G_AN: |
2322 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: | 1948 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: |
2323 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { | 1949 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { |
2324 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | | 1950 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | |
2325 IXGBE_PHYSICAL_LAYER_1000BASE_BX; | 1951 IXGBE_PHYSICAL_LAYER_1000BASE_BX; |
2326 goto out; | 1952 goto out; |
2327 } else { | |
2328 /* SFI mode so read SFP module */ | |
2329 goto sfp_check; | |
2330 } | 1953 } |
1954 /* SFI mode so read SFP module */ | |
1955 goto sfp_check; | |
2331 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: | 1956 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: |
2332 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) | 1957 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) |
2333 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; | 1958 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; |
2334 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) | 1959 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) |
2335 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; | 1960 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; |
2355 default: | 1980 default: |
2356 goto out; | 1981 goto out; |
2357 } | 1982 } |
2358 | 1983 |
2359 sfp_check: | 1984 sfp_check: |
2360 /* | 1985 /* SFP check must be done last since DA modules are sometimes used to |
2361 * SFP check must be done last since DA modules are sometimes used to | |
2362 * test KR mode - we need to id KR mode correctly before SFP module. | 1986 * test KR mode - we need to id KR mode correctly before SFP module. |
2363 * Call identify_sfp because the pluggable module may have changed | 1987 * Call identify_sfp because the pluggable module may have changed */ |
2364 */ | |
2365 hw->phy.ops.identify_sfp(hw); | 1988 hw->phy.ops.identify_sfp(hw); |
2366 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) | 1989 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) |
2367 goto out; | 1990 goto out; |
2368 | 1991 |
2369 switch (hw->phy.type) { | 1992 switch (hw->phy.type) { |
2378 case ixgbe_phy_sfp_avago: | 2001 case ixgbe_phy_sfp_avago: |
2379 case ixgbe_phy_sfp_ftl: | 2002 case ixgbe_phy_sfp_ftl: |
2380 case ixgbe_phy_sfp_intel: | 2003 case ixgbe_phy_sfp_intel: |
2381 case ixgbe_phy_sfp_unknown: | 2004 case ixgbe_phy_sfp_unknown: |
2382 hw->phy.ops.read_i2c_eeprom(hw, | 2005 hw->phy.ops.read_i2c_eeprom(hw, |
2383 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); | 2006 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); |
2384 hw->phy.ops.read_i2c_eeprom(hw, | 2007 hw->phy.ops.read_i2c_eeprom(hw, |
2385 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); | 2008 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); |
2386 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) | 2009 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) |
2387 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; | 2010 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; |
2388 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) | 2011 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) |
2389 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; | 2012 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; |
2390 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) | 2013 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) |
2393 default: | 2016 default: |
2394 break; | 2017 break; |
2395 } | 2018 } |
2396 | 2019 |
2397 out: | 2020 out: |
2398 return (physical_layer); | 2021 return physical_layer; |
2399 } | 2022 } |
2400 | 2023 |
2401 /* | 2024 /** |
2402 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 | 2025 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 |
2403 * @hw: pointer to hardware structure | 2026 * @hw: pointer to hardware structure |
2404 * @regval: register value to write to RXCTRL | 2027 * @regval: register value to write to RXCTRL |
2405 * | 2028 * |
2406 * Enables the Rx DMA unit for 82599 | 2029 * Enables the Rx DMA unit for 82599 |
2407 */ | 2030 **/ |
2408 s32 | 2031 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) |
2409 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) | 2032 { |
2410 { | 2033 #define IXGBE_MAX_SECRX_POLL 30 |
2411 #define IXGBE_MAX_SECRX_POLL 30 | |
2412 int i; | 2034 int i; |
2413 int secrxreg; | 2035 int secrxreg; |
2414 | 2036 |
2415 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); | 2037 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); |
2416 | 2038 |
2433 } | 2055 } |
2434 | 2056 |
2435 /* For informational purposes only */ | 2057 /* For informational purposes only */ |
2436 if (i >= IXGBE_MAX_SECRX_POLL) | 2058 if (i >= IXGBE_MAX_SECRX_POLL) |
2437 DEBUGOUT("Rx unit being enabled before security " | 2059 DEBUGOUT("Rx unit being enabled before security " |
2438 "path fully disabled. Continuing with init.\n"); | 2060 "path fully disabled. Continuing with init.\n"); |
2439 | 2061 |
2440 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); | 2062 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); |
2441 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); | 2063 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); |
2442 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; | 2064 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; |
2443 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); | 2065 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); |
2444 IXGBE_WRITE_FLUSH(hw); | 2066 IXGBE_WRITE_FLUSH(hw); |
2445 | 2067 |
2446 return (IXGBE_SUCCESS); | 2068 return IXGBE_SUCCESS; |
2447 } | 2069 } |
2448 | 2070 |
2449 /* | 2071 /** |
2450 * ixgbe_get_device_caps_82599 - Get additional device capabilities | 2072 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 |
2451 * @hw: pointer to hardware structure | 2073 * @hw: pointer to hardware structure |
2452 * @device_caps: the EEPROM word with the extra device capabilities | 2074 * |
2453 * | 2075 * Verifies that installed the firmware version is 0.6 or higher |
2454 * This function will read the EEPROM location for the device capabilities, | 2076 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. |
2455 * and return the word through device_caps. | 2077 * |
2456 */ | 2078 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or |
2457 s32 | 2079 * if the FW version is not supported. |
2458 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps) | 2080 **/ |
2459 { | 2081 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) |
2460 DEBUGFUNC("ixgbe_get_device_caps_82599"); | |
2461 | |
2462 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); | |
2463 | |
2464 return (IXGBE_SUCCESS); | |
2465 } | |
2466 | |
2467 /* | |
2468 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 | |
2469 * @hw: pointer to hardware structure | |
2470 * | |
2471 * Verifies that installed the firmware version is 0.6 or higher | |
2472 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. | |
2473 * | |
2474 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or | |
2475 * if the FW version is not supported. | |
2476 */ | |
2477 static s32 | |
2478 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) | |
2479 { | 2082 { |
2480 s32 status = IXGBE_ERR_EEPROM_VERSION; | 2083 s32 status = IXGBE_ERR_EEPROM_VERSION; |
2481 u16 fw_offset, fw_ptp_cfg_offset; | 2084 u16 fw_offset, fw_ptp_cfg_offset; |
2482 u16 fw_version = 0; | 2085 u16 fw_version = 0; |
2483 | 2086 |
2495 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) | 2098 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) |
2496 goto fw_version_out; | 2099 goto fw_version_out; |
2497 | 2100 |
2498 /* get the offset to the Pass Through Patch Configuration block */ | 2101 /* get the offset to the Pass Through Patch Configuration block */ |
2499 hw->eeprom.ops.read(hw, (fw_offset + | 2102 hw->eeprom.ops.read(hw, (fw_offset + |
2500 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), &fw_ptp_cfg_offset); | 2103 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), |
2104 &fw_ptp_cfg_offset); | |
2501 | 2105 |
2502 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) | 2106 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) |
2503 goto fw_version_out; | 2107 goto fw_version_out; |
2504 | 2108 |
2505 /* get the firmware version */ | 2109 /* get the firmware version */ |
2506 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4), | 2110 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + |
2507 &fw_version); | 2111 IXGBE_FW_PATCH_VERSION_4), |
2112 &fw_version); | |
2508 | 2113 |
2509 if (fw_version > 0x5) | 2114 if (fw_version > 0x5) |
2510 status = IXGBE_SUCCESS; | 2115 status = IXGBE_SUCCESS; |
2511 | 2116 |
2512 fw_version_out: | 2117 fw_version_out: |
2513 return (status); | 2118 return status; |
2514 } | 2119 } |
2515 | 2120 |
2516 /* | 2121 /** |
2517 * ixgbe_enable_relaxed_ordering_82599 - Enable relaxed ordering | 2122 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. |
2518 * @hw: pointer to hardware structure | 2123 * @hw: pointer to hardware structure |
2519 */ | 2124 * |
2520 void | 2125 * Returns TRUE if the LESM FW module is present and enabled. Otherwise |
2521 ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw) | 2126 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. |
2522 { | 2127 **/ |
2523 u32 regval; | 2128 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) |
2524 u32 i; | 2129 { |
2525 | 2130 bool lesm_enabled = FALSE; |
2526 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82599"); | 2131 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; |
2527 | 2132 s32 status; |
2528 /* Enable relaxed ordering */ | 2133 |
2529 for (i = 0; i < hw->mac.max_tx_queues; i++) { | 2134 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); |
2530 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); | 2135 |
2531 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN; | 2136 /* get the offset to the Firmware Module block */ |
2532 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); | 2137 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); |
2533 } | 2138 |
2534 | 2139 if ((status != IXGBE_SUCCESS) || |
2535 for (i = 0; i < hw->mac.max_rx_queues; i++) { | 2140 (fw_offset == 0) || (fw_offset == 0xFFFF)) |
2536 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); | 2141 goto out; |
2537 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN | | 2142 |
2538 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); | 2143 /* get the offset to the LESM Parameters block */ |
2539 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); | 2144 status = hw->eeprom.ops.read(hw, (fw_offset + |
2540 } | 2145 IXGBE_FW_LESM_PARAMETERS_PTR), |
2541 } | 2146 &fw_lesm_param_offset); |
2147 | |
2148 if ((status != IXGBE_SUCCESS) || | |
2149 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) | |
2150 goto out; | |
2151 | |
2152 /* get the lesm state word */ | |
2153 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + | |
2154 IXGBE_FW_LESM_STATE_1), | |
2155 &fw_lesm_state); | |
2156 | |
2157 if ((status == IXGBE_SUCCESS) && | |
2158 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) | |
2159 lesm_enabled = TRUE; | |
2160 | |
2161 out: | |
2162 return lesm_enabled; | |
2163 } | |
2164 | |
2165 |