1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2024, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "ice_common.h"
33 #include "ice_sched.h"
34 #include "ice_adminq_cmd.h"
35 #include "ice_flow.h"
36 #include "ice_switch.h"
37
38 #define ICE_PF_RESET_WAIT_COUNT 500
39
40 static const char * const ice_link_mode_str_low[] = {
41 ice_arr_elem_idx(0, "100BASE_TX"),
42 ice_arr_elem_idx(1, "100M_SGMII"),
43 ice_arr_elem_idx(2, "1000BASE_T"),
44 ice_arr_elem_idx(3, "1000BASE_SX"),
45 ice_arr_elem_idx(4, "1000BASE_LX"),
46 ice_arr_elem_idx(5, "1000BASE_KX"),
47 ice_arr_elem_idx(6, "1G_SGMII"),
48 ice_arr_elem_idx(7, "2500BASE_T"),
49 ice_arr_elem_idx(8, "2500BASE_X"),
50 ice_arr_elem_idx(9, "2500BASE_KX"),
51 ice_arr_elem_idx(10, "5GBASE_T"),
52 ice_arr_elem_idx(11, "5GBASE_KR"),
53 ice_arr_elem_idx(12, "10GBASE_T"),
54 ice_arr_elem_idx(13, "10G_SFI_DA"),
55 ice_arr_elem_idx(14, "10GBASE_SR"),
56 ice_arr_elem_idx(15, "10GBASE_LR"),
57 ice_arr_elem_idx(16, "10GBASE_KR_CR1"),
58 ice_arr_elem_idx(17, "10G_SFI_AOC_ACC"),
59 ice_arr_elem_idx(18, "10G_SFI_C2C"),
60 ice_arr_elem_idx(19, "25GBASE_T"),
61 ice_arr_elem_idx(20, "25GBASE_CR"),
62 ice_arr_elem_idx(21, "25GBASE_CR_S"),
63 ice_arr_elem_idx(22, "25GBASE_CR1"),
64 ice_arr_elem_idx(23, "25GBASE_SR"),
65 ice_arr_elem_idx(24, "25GBASE_LR"),
66 ice_arr_elem_idx(25, "25GBASE_KR"),
67 ice_arr_elem_idx(26, "25GBASE_KR_S"),
68 ice_arr_elem_idx(27, "25GBASE_KR1"),
69 ice_arr_elem_idx(28, "25G_AUI_AOC_ACC"),
70 ice_arr_elem_idx(29, "25G_AUI_C2C"),
71 ice_arr_elem_idx(30, "40GBASE_CR4"),
72 ice_arr_elem_idx(31, "40GBASE_SR4"),
73 ice_arr_elem_idx(32, "40GBASE_LR4"),
74 ice_arr_elem_idx(33, "40GBASE_KR4"),
75 ice_arr_elem_idx(34, "40G_XLAUI_AOC_ACC"),
76 ice_arr_elem_idx(35, "40G_XLAUI"),
77 ice_arr_elem_idx(36, "50GBASE_CR2"),
78 ice_arr_elem_idx(37, "50GBASE_SR2"),
79 ice_arr_elem_idx(38, "50GBASE_LR2"),
80 ice_arr_elem_idx(39, "50GBASE_KR2"),
81 ice_arr_elem_idx(40, "50G_LAUI2_AOC_ACC"),
82 ice_arr_elem_idx(41, "50G_LAUI2"),
83 ice_arr_elem_idx(42, "50G_AUI2_AOC_ACC"),
84 ice_arr_elem_idx(43, "50G_AUI2"),
85 ice_arr_elem_idx(44, "50GBASE_CP"),
86 ice_arr_elem_idx(45, "50GBASE_SR"),
87 ice_arr_elem_idx(46, "50GBASE_FR"),
88 ice_arr_elem_idx(47, "50GBASE_LR"),
89 ice_arr_elem_idx(48, "50GBASE_KR_PAM4"),
90 ice_arr_elem_idx(49, "50G_AUI1_AOC_ACC"),
91 ice_arr_elem_idx(50, "50G_AUI1"),
92 ice_arr_elem_idx(51, "100GBASE_CR4"),
93 ice_arr_elem_idx(52, "100GBASE_SR4"),
94 ice_arr_elem_idx(53, "100GBASE_LR4"),
95 ice_arr_elem_idx(54, "100GBASE_KR4"),
96 ice_arr_elem_idx(55, "100G_CAUI4_AOC_ACC"),
97 ice_arr_elem_idx(56, "100G_CAUI4"),
98 ice_arr_elem_idx(57, "100G_AUI4_AOC_ACC"),
99 ice_arr_elem_idx(58, "100G_AUI4"),
100 ice_arr_elem_idx(59, "100GBASE_CR_PAM4"),
101 ice_arr_elem_idx(60, "100GBASE_KR_PAM4"),
102 ice_arr_elem_idx(61, "100GBASE_CP2"),
103 ice_arr_elem_idx(62, "100GBASE_SR2"),
104 ice_arr_elem_idx(63, "100GBASE_DR"),
105 };
106
107 static const char * const ice_link_mode_str_high[] = {
108 ice_arr_elem_idx(0, "100GBASE_KR2_PAM4"),
109 ice_arr_elem_idx(1, "100G_CAUI2_AOC_ACC"),
110 ice_arr_elem_idx(2, "100G_CAUI2"),
111 ice_arr_elem_idx(3, "100G_AUI2_AOC_ACC"),
112 ice_arr_elem_idx(4, "100G_AUI2"),
113 ice_arr_elem_idx(5, "200G_CR4_PAM4"),
114 ice_arr_elem_idx(6, "200G_SR4"),
115 ice_arr_elem_idx(7, "200G_FR4"),
116 ice_arr_elem_idx(8, "200G_LR4"),
117 ice_arr_elem_idx(9, "200G_DR4"),
118 ice_arr_elem_idx(10, "200G_KR4_PAM4"),
119 ice_arr_elem_idx(11, "200G_AUI4_AOC_ACC"),
120 ice_arr_elem_idx(12, "200G_AUI4"),
121 ice_arr_elem_idx(13, "200G_AUI8_AOC_ACC"),
122 ice_arr_elem_idx(14, "200G_AUI8"),
123 ice_arr_elem_idx(15, "400GBASE_FR8"),
124 };
125
126 /**
127 * ice_dump_phy_type - helper function to dump phy_type
128 * @hw: pointer to the HW structure
129 * @low: 64 bit value for phy_type_low
130 * @high: 64 bit value for phy_type_high
131 * @prefix: prefix string to differentiate multiple dumps
132 */
133 static void
ice_dump_phy_type(struct ice_hw * hw,u64 low,u64 high,const char * prefix)134 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
135 {
136 u32 i;
137
138 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
139 (unsigned long long)low);
140
141 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) {
142 if (low & BIT_ULL(i))
143 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
144 prefix, i, ice_link_mode_str_low[i]);
145 }
146
147 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
148 (unsigned long long)high);
149
150 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) {
151 if (high & BIT_ULL(i))
152 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
153 prefix, i, ice_link_mode_str_high[i]);
154 }
155 }
156
157 /**
158 * ice_set_mac_type - Sets MAC type
159 * @hw: pointer to the HW structure
160 *
161 * This function sets the MAC type of the adapter based on the
162 * vendor ID and device ID stored in the HW structure.
163 */
ice_set_mac_type(struct ice_hw * hw)164 int ice_set_mac_type(struct ice_hw *hw)
165 {
166 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
167
168 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
169 return ICE_ERR_DEVICE_NOT_SUPPORTED;
170
171 switch (hw->device_id) {
172 case ICE_DEV_ID_E810C_BACKPLANE:
173 case ICE_DEV_ID_E810C_QSFP:
174 case ICE_DEV_ID_E810C_SFP:
175 case ICE_DEV_ID_E810_XXV_BACKPLANE:
176 case ICE_DEV_ID_E810_XXV_QSFP:
177 case ICE_DEV_ID_E810_XXV_SFP:
178 hw->mac_type = ICE_MAC_E810;
179 break;
180 case ICE_DEV_ID_E822C_10G_BASE_T:
181 case ICE_DEV_ID_E822C_BACKPLANE:
182 case ICE_DEV_ID_E822C_QSFP:
183 case ICE_DEV_ID_E822C_SFP:
184 case ICE_DEV_ID_E822C_SGMII:
185 case ICE_DEV_ID_E822L_10G_BASE_T:
186 case ICE_DEV_ID_E822L_BACKPLANE:
187 case ICE_DEV_ID_E822L_SFP:
188 case ICE_DEV_ID_E822L_SGMII:
189 case ICE_DEV_ID_E823L_10G_BASE_T:
190 case ICE_DEV_ID_E823L_1GBE:
191 case ICE_DEV_ID_E823L_BACKPLANE:
192 case ICE_DEV_ID_E823L_QSFP:
193 case ICE_DEV_ID_E823L_SFP:
194 case ICE_DEV_ID_E823C_10G_BASE_T:
195 case ICE_DEV_ID_E823C_BACKPLANE:
196 case ICE_DEV_ID_E823C_QSFP:
197 case ICE_DEV_ID_E823C_SFP:
198 case ICE_DEV_ID_E823C_SGMII:
199 hw->mac_type = ICE_MAC_GENERIC;
200 break;
201 case ICE_DEV_ID_E825C_BACKPLANE:
202 case ICE_DEV_ID_E825C_QSFP:
203 case ICE_DEV_ID_E825C_SFP:
204 case ICE_DEV_ID_E825C_SGMII:
205 hw->mac_type = ICE_MAC_GENERIC_3K_E825;
206 break;
207 case ICE_DEV_ID_E830_BACKPLANE:
208 case ICE_DEV_ID_E830_QSFP56:
209 case ICE_DEV_ID_E830_SFP:
210 case ICE_DEV_ID_E830C_BACKPLANE:
211 case ICE_DEV_ID_E830_L_BACKPLANE:
212 case ICE_DEV_ID_E830C_QSFP:
213 case ICE_DEV_ID_E830_L_QSFP:
214 case ICE_DEV_ID_E830C_SFP:
215 case ICE_DEV_ID_E830_L_SFP:
216 case ICE_DEV_ID_E835CC_BACKPLANE:
217 case ICE_DEV_ID_E835CC_QSFP56:
218 case ICE_DEV_ID_E835CC_SFP:
219 case ICE_DEV_ID_E835C_BACKPLANE:
220 case ICE_DEV_ID_E835C_QSFP:
221 case ICE_DEV_ID_E835C_SFP:
222 case ICE_DEV_ID_E835_L_BACKPLANE:
223 case ICE_DEV_ID_E835_L_QSFP:
224 case ICE_DEV_ID_E835_L_SFP:
225 hw->mac_type = ICE_MAC_E830;
226 break;
227 default:
228 hw->mac_type = ICE_MAC_UNKNOWN;
229 break;
230 }
231
232 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
233 return 0;
234 }
235
236 /**
237 * ice_is_generic_mac
238 * @hw: pointer to the hardware structure
239 *
240 * returns true if mac_type is ICE_MAC_GENERIC, false if not
241 */
ice_is_generic_mac(struct ice_hw * hw)242 bool ice_is_generic_mac(struct ice_hw *hw)
243 {
244 return (hw->mac_type == ICE_MAC_GENERIC ||
245 hw->mac_type == ICE_MAC_GENERIC_3K ||
246 hw->mac_type == ICE_MAC_GENERIC_3K_E825);
247 }
248
249 /**
250 * ice_is_e810
251 * @hw: pointer to the hardware structure
252 *
253 * returns true if the device is E810 based, false if not.
254 */
ice_is_e810(struct ice_hw * hw)255 bool ice_is_e810(struct ice_hw *hw)
256 {
257 return hw->mac_type == ICE_MAC_E810;
258 }
259
260 /**
261 * ice_is_e810t
262 * @hw: pointer to the hardware structure
263 *
264 * returns true if the device is E810T based, false if not.
265 */
ice_is_e810t(struct ice_hw * hw)266 bool ice_is_e810t(struct ice_hw *hw)
267 {
268 switch (hw->device_id) {
269 case ICE_DEV_ID_E810C_SFP:
270 switch (hw->subsystem_device_id) {
271 case ICE_SUBDEV_ID_E810T:
272 case ICE_SUBDEV_ID_E810T2:
273 case ICE_SUBDEV_ID_E810T3:
274 case ICE_SUBDEV_ID_E810T4:
275 case ICE_SUBDEV_ID_E810T6:
276 case ICE_SUBDEV_ID_E810T7:
277 return true;
278 }
279 break;
280 case ICE_DEV_ID_E810C_QSFP:
281 switch (hw->subsystem_device_id) {
282 case ICE_SUBDEV_ID_E810T2:
283 case ICE_SUBDEV_ID_E810T3:
284 case ICE_SUBDEV_ID_E810T5:
285 return true;
286 }
287 break;
288 default:
289 break;
290 }
291
292 return false;
293 }
294
295 /**
296 * ice_is_e830
297 * @hw: pointer to the hardware structure
298 *
299 * returns true if the device is E830 based, false if not.
300 */
ice_is_e830(struct ice_hw * hw)301 bool ice_is_e830(struct ice_hw *hw)
302 {
303 return hw->mac_type == ICE_MAC_E830;
304 }
305
306 /**
307 * ice_is_e823
308 * @hw: pointer to the hardware structure
309 *
310 * returns true if the device is E823-L or E823-C based, false if not.
311 */
ice_is_e823(struct ice_hw * hw)312 bool ice_is_e823(struct ice_hw *hw)
313 {
314 switch (hw->device_id) {
315 case ICE_DEV_ID_E823L_BACKPLANE:
316 case ICE_DEV_ID_E823L_SFP:
317 case ICE_DEV_ID_E823L_10G_BASE_T:
318 case ICE_DEV_ID_E823L_1GBE:
319 case ICE_DEV_ID_E823L_QSFP:
320 case ICE_DEV_ID_E823C_BACKPLANE:
321 case ICE_DEV_ID_E823C_QSFP:
322 case ICE_DEV_ID_E823C_SFP:
323 case ICE_DEV_ID_E823C_10G_BASE_T:
324 case ICE_DEV_ID_E823C_SGMII:
325 return true;
326 default:
327 return false;
328 }
329 }
330
331 /**
332 * ice_is_e825c
333 * @hw: pointer to the hardware structure
334 *
335 * returns true if the device is E825-C based, false if not.
336 */
ice_is_e825c(struct ice_hw * hw)337 bool ice_is_e825c(struct ice_hw *hw)
338 {
339 switch (hw->device_id) {
340 case ICE_DEV_ID_E825C_BACKPLANE:
341 case ICE_DEV_ID_E825C_QSFP:
342 case ICE_DEV_ID_E825C_SFP:
343 case ICE_DEV_ID_E825C_SGMII:
344 return true;
345 default:
346 return false;
347 }
348 }
349
350 /**
351 * ice_clear_pf_cfg - Clear PF configuration
352 * @hw: pointer to the hardware structure
353 *
354 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
355 * configuration, flow director filters, etc.).
356 */
ice_clear_pf_cfg(struct ice_hw * hw)357 int ice_clear_pf_cfg(struct ice_hw *hw)
358 {
359 struct ice_aq_desc desc;
360
361 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
362
363 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
364 }
365
366 /**
367 * ice_aq_manage_mac_read - manage MAC address read command
368 * @hw: pointer to the HW struct
369 * @buf: a virtual buffer to hold the manage MAC read response
370 * @buf_size: Size of the virtual buffer
371 * @cd: pointer to command details structure or NULL
372 *
373 * This function is used to return per PF station MAC address (0x0107).
374 * NOTE: Upon successful completion of this command, MAC address information
375 * is returned in user specified buffer. Please interpret user specified
376 * buffer as "manage_mac_read" response.
377 * Response such as various MAC addresses are stored in HW struct (port.mac)
378 * ice_discover_dev_caps is expected to be called before this function is
379 * called.
380 */
381 int
ice_aq_manage_mac_read(struct ice_hw * hw,void * buf,u16 buf_size,struct ice_sq_cd * cd)382 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
383 struct ice_sq_cd *cd)
384 {
385 struct ice_aqc_manage_mac_read_resp *resp;
386 struct ice_aqc_manage_mac_read *cmd;
387 struct ice_aq_desc desc;
388 int status;
389 u16 flags;
390 u8 i;
391
392 cmd = &desc.params.mac_read;
393
394 if (buf_size < sizeof(*resp))
395 return ICE_ERR_BUF_TOO_SHORT;
396
397 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
398
399 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
400 if (status)
401 return status;
402
403 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
404 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
405
406 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
407 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
408 return ICE_ERR_CFG;
409 }
410
411 /* A single port can report up to two (LAN and WoL) addresses */
412 for (i = 0; i < cmd->num_addr; i++)
413 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
414 ice_memcpy(hw->port_info->mac.lan_addr,
415 resp[i].mac_addr, ETH_ALEN,
416 ICE_NONDMA_TO_NONDMA);
417 ice_memcpy(hw->port_info->mac.perm_addr,
418 resp[i].mac_addr,
419 ETH_ALEN, ICE_NONDMA_TO_NONDMA);
420 break;
421 }
422 return 0;
423 }
424
425 /**
426 * ice_phy_maps_to_media
427 * @phy_type_low: PHY type low bits
428 * @phy_type_high: PHY type high bits
429 * @media_mask_low: media type PHY type low bitmask
430 * @media_mask_high: media type PHY type high bitmask
431 *
432 * Return true if PHY type [low|high] bits are only of media type PHY types
433 * [low|high] bitmask.
434 */
435 static bool
ice_phy_maps_to_media(u64 phy_type_low,u64 phy_type_high,u64 media_mask_low,u64 media_mask_high)436 ice_phy_maps_to_media(u64 phy_type_low, u64 phy_type_high,
437 u64 media_mask_low, u64 media_mask_high)
438 {
439 /* check if a PHY type exist for media type */
440 if (!(phy_type_low & media_mask_low ||
441 phy_type_high & media_mask_high))
442 return false;
443
444 /* check that PHY types are only of media type */
445 if (!(phy_type_low & ~media_mask_low) &&
446 !(phy_type_high & ~media_mask_high))
447 return true;
448
449 return false;
450 }
451
452 /**
453 * ice_set_media_type - Sets media type
454 * @pi: port information structure
455 *
456 * Set ice_port_info PHY media type based on PHY type. This should be called
457 * from Get PHY caps with media.
458 */
ice_set_media_type(struct ice_port_info * pi)459 static void ice_set_media_type(struct ice_port_info *pi)
460 {
461 enum ice_media_type *media_type;
462 u64 phy_type_high, phy_type_low;
463
464 phy_type_high = pi->phy.phy_type_high;
465 phy_type_low = pi->phy.phy_type_low;
466 media_type = &pi->phy.media_type;
467
468 /* if no media, then media type is NONE */
469 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
470 *media_type = ICE_MEDIA_NONE;
471 /* else if PHY types are only BASE-T, then media type is BASET */
472 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
473 ICE_MEDIA_BASET_PHY_TYPE_LOW_M, 0))
474 *media_type = ICE_MEDIA_BASET;
475 /* else if any PHY type is BACKPLANE, then media type is BACKPLANE */
476 else if (phy_type_low & ICE_MEDIA_BP_PHY_TYPE_LOW_M ||
477 phy_type_high & ICE_MEDIA_BP_PHY_TYPE_HIGH_M)
478 *media_type = ICE_MEDIA_BACKPLANE;
479 /* else if PHY types are only optical, or optical and C2M, then media
480 * type is FIBER
481 */
482 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
483 ICE_MEDIA_OPT_PHY_TYPE_LOW_M,
484 ICE_MEDIA_OPT_PHY_TYPE_HIGH_M) ||
485 ((phy_type_low & ICE_MEDIA_OPT_PHY_TYPE_LOW_M ||
486 phy_type_high & ICE_MEDIA_OPT_PHY_TYPE_HIGH_M) &&
487 (phy_type_low & ICE_MEDIA_C2M_PHY_TYPE_LOW_M ||
488 phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
489 *media_type = ICE_MEDIA_FIBER;
490 /* else if PHY types are only DA, or DA and C2C, then media type DA */
491 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
492 ICE_MEDIA_DAC_PHY_TYPE_LOW_M,
493 ICE_MEDIA_DAC_PHY_TYPE_HIGH_M) ||
494 ((phy_type_low & ICE_MEDIA_DAC_PHY_TYPE_LOW_M ||
495 phy_type_high & ICE_MEDIA_DAC_PHY_TYPE_HIGH_M) &&
496 (phy_type_low & ICE_MEDIA_C2C_PHY_TYPE_LOW_M ||
497 phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
498 *media_type = ICE_MEDIA_DA;
499 /* else if PHY types are only C2M or only C2C, then media is AUI */
500 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
501 ICE_MEDIA_C2M_PHY_TYPE_LOW_M,
502 ICE_MEDIA_C2M_PHY_TYPE_HIGH_M) ||
503 ice_phy_maps_to_media(phy_type_low, phy_type_high,
504 ICE_MEDIA_C2C_PHY_TYPE_LOW_M,
505 ICE_MEDIA_C2C_PHY_TYPE_HIGH_M))
506 *media_type = ICE_MEDIA_AUI;
507
508 else
509 *media_type = ICE_MEDIA_UNKNOWN;
510 }
511
512 /**
513 * ice_aq_get_phy_caps - returns PHY capabilities
514 * @pi: port information structure
515 * @qual_mods: report qualified modules
516 * @report_mode: report mode capabilities
517 * @pcaps: structure for PHY capabilities to be filled
518 * @cd: pointer to command details structure or NULL
519 *
520 * Returns the various PHY capabilities supported on the Port (0x0600)
521 */
522 int
ice_aq_get_phy_caps(struct ice_port_info * pi,bool qual_mods,u8 report_mode,struct ice_aqc_get_phy_caps_data * pcaps,struct ice_sq_cd * cd)523 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
524 struct ice_aqc_get_phy_caps_data *pcaps,
525 struct ice_sq_cd *cd)
526 {
527 struct ice_aqc_get_phy_caps *cmd;
528 u16 pcaps_size = sizeof(*pcaps);
529 struct ice_aq_desc desc;
530 const char *prefix;
531 struct ice_hw *hw;
532 int status;
533
534 cmd = &desc.params.get_phy;
535
536 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
537 return ICE_ERR_PARAM;
538 hw = pi->hw;
539
540 if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
541 !ice_fw_supports_report_dflt_cfg(hw))
542 return ICE_ERR_PARAM;
543
544 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
545
546 if (qual_mods)
547 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
548
549 cmd->param0 |= CPU_TO_LE16(report_mode);
550
551 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
552
553 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
554
555 switch (report_mode) {
556 case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
557 prefix = "phy_caps_media";
558 break;
559 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
560 prefix = "phy_caps_no_media";
561 break;
562 case ICE_AQC_REPORT_ACTIVE_CFG:
563 prefix = "phy_caps_active";
564 break;
565 case ICE_AQC_REPORT_DFLT_CFG:
566 prefix = "phy_caps_default";
567 break;
568 default:
569 prefix = "phy_caps_invalid";
570 }
571
572 ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low),
573 LE64_TO_CPU(pcaps->phy_type_high), prefix);
574
575 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
576 prefix, report_mode);
577 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
578 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
579 pcaps->low_power_ctrl_an);
580 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
581 pcaps->eee_cap);
582 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
583 pcaps->eeer_value);
584 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
585 pcaps->link_fec_options);
586 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
587 prefix, pcaps->module_compliance_enforcement);
588 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
589 prefix, pcaps->extended_compliance_code);
590 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
591 pcaps->module_type[0]);
592 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
593 pcaps->module_type[1]);
594 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
595 pcaps->module_type[2]);
596
597 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
598 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
599 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
600 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
601 sizeof(pi->phy.link_info.module_type),
602 ICE_NONDMA_TO_NONDMA);
603 ice_set_media_type(pi);
604 ice_debug(hw, ICE_DBG_LINK, "%s: media_type = 0x%x\n", prefix,
605 pi->phy.media_type);
606 }
607
608 return status;
609 }
610
611 /**
612 * ice_aq_get_phy_equalization - function to read serdes equalizer value from
613 * firmware using admin queue command.
614 * @hw: pointer to the HW struct
615 * @data_in: represents the serdes equalization parameter requested
616 * @op_code: represents the serdes number and flag to represent tx or rx
617 * @serdes_num: represents the serdes number
618 * @output: pointer to the caller-supplied buffer to return serdes equalizer
619 *
620 * Returns 0 on success,
621 * non-zero status on error
622 */
ice_aq_get_phy_equalization(struct ice_hw * hw,u16 data_in,u16 op_code,u8 serdes_num,int * output)623 int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
624 u8 serdes_num, int *output)
625 {
626 struct ice_aqc_dnl_call_command *cmd;
627 struct ice_aqc_dnl_call buf;
628 struct ice_aq_desc desc;
629 int err = 0;
630
631 if (!hw || !output)
632 return (ICE_ERR_PARAM);
633
634 memset(&buf, 0, sizeof(buf));
635 buf.sto.txrx_equa_reqs.data_in = CPU_TO_LE16(data_in);
636 buf.sto.txrx_equa_reqs.op_code_serdes_sel =
637 CPU_TO_LE16(op_code | (serdes_num & 0xF));
638
639 cmd = &desc.params.dnl_call;
640 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
641 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_RD |
642 ICE_AQ_FLAG_SI);
643 desc.datalen = CPU_TO_LE16(sizeof(struct ice_aqc_dnl_call));
644 cmd->activity_id = CPU_TO_LE16(ICE_AQC_ACT_ID_DNL);
645 cmd->ctx = 0;
646
647 err = ice_aq_send_cmd(hw, &desc, &buf,
648 sizeof(struct ice_aqc_dnl_call), NULL);
649 if (!err)
650 *output = buf.sto.txrx_equa_resp.val;
651
652 return err;
653 }
654
655 #define ice_get_link_status_data_ver(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
656 ICE_GET_LINK_STATUS_DATA_V2 : ICE_GET_LINK_STATUS_DATA_V1)
657
658 /**
659 * ice_get_link_status_datalen
660 * @hw: pointer to the HW struct
661 *
662 * return Get Link Status datalen
663 */
ice_get_link_status_datalen(struct ice_hw * hw)664 static u16 ice_get_link_status_datalen(struct ice_hw *hw)
665 {
666 return (ice_get_link_status_data_ver(hw) ==
667 ICE_GET_LINK_STATUS_DATA_V1) ? ICE_GET_LINK_STATUS_DATALEN_V1 :
668 ICE_GET_LINK_STATUS_DATALEN_V2;
669 }
670
671 /**
672 * ice_aq_get_link_info
673 * @pi: port information structure
674 * @ena_lse: enable/disable LinkStatusEvent reporting
675 * @link: pointer to link status structure - optional
676 * @cd: pointer to command details structure or NULL
677 *
678 * Get Link Status (0x607). Returns the link status of the adapter.
679 */
680 int
ice_aq_get_link_info(struct ice_port_info * pi,bool ena_lse,struct ice_link_status * link,struct ice_sq_cd * cd)681 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
682 struct ice_link_status *link, struct ice_sq_cd *cd)
683 {
684 struct ice_aqc_get_link_status_data link_data = { 0 };
685 struct ice_aqc_get_link_status *resp;
686 struct ice_link_status *li_old, *li;
687 struct ice_fc_info *hw_fc_info;
688 bool tx_pause, rx_pause;
689 struct ice_aq_desc desc;
690 struct ice_hw *hw;
691 u16 cmd_flags;
692 int status;
693
694 if (!pi)
695 return ICE_ERR_PARAM;
696 hw = pi->hw;
697
698 li_old = &pi->phy.link_info_old;
699 li = &pi->phy.link_info;
700 hw_fc_info = &pi->fc;
701
702 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
703 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
704 resp = &desc.params.get_link_status;
705 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
706 resp->lport_num = pi->lport;
707
708 status = ice_aq_send_cmd(hw, &desc, &link_data,
709 ice_get_link_status_datalen(hw), cd);
710 if (status)
711 return status;
712
713 /* save off old link status information */
714 *li_old = *li;
715
716 /* update current link status information */
717 li->link_speed = LE16_TO_CPU(link_data.link_speed);
718 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
719 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
720 li->link_info = link_data.link_info;
721 li->link_cfg_err = link_data.link_cfg_err;
722 li->an_info = link_data.an_info;
723 li->ext_info = link_data.ext_info;
724 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
725 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
726 li->topo_media_conflict = link_data.topo_media_conflict;
727 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
728 ICE_AQ_CFG_PACING_TYPE_M);
729
730 /* update fc info */
731 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
732 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
733 if (tx_pause && rx_pause)
734 hw_fc_info->current_mode = ICE_FC_FULL;
735 else if (tx_pause)
736 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
737 else if (rx_pause)
738 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
739 else
740 hw_fc_info->current_mode = ICE_FC_NONE;
741
742 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
743
744 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
745 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
746 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
747 (unsigned long long)li->phy_type_low);
748 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
749 (unsigned long long)li->phy_type_high);
750 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
751 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
752 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
753 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
754 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
755 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
756 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
757 li->max_frame_size);
758 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
759
760 /* save link status information */
761 if (link)
762 *link = *li;
763
764 /* flag cleared so calling functions don't call AQ again */
765 pi->phy.get_link_info = false;
766
767 return 0;
768 }
769
770 /**
771 * ice_fill_tx_timer_and_fc_thresh
772 * @hw: pointer to the HW struct
773 * @cmd: pointer to MAC cfg structure
774 *
775 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
776 * descriptor
777 */
778 static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw * hw,struct ice_aqc_set_mac_cfg * cmd)779 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
780 struct ice_aqc_set_mac_cfg *cmd)
781 {
782 u16 fc_thres_val, tx_timer_val;
783 u32 val;
784
785 /* We read back the transmit timer and fc threshold value of
786 * LFC. Thus, we will use index =
787 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
788 *
789 * Also, because we are operating on transmit timer and fc
790 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
791 */
792 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
793
794 if ((hw)->mac_type == ICE_MAC_E830) {
795 /* Retrieve the transmit timer */
796 val = rd32(hw, E830_PRTMAC_CL01_PAUSE_QUANTA);
797 tx_timer_val = val & E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M;
798 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
799
800 /* Retrieve the fc threshold */
801 val = rd32(hw, E830_PRTMAC_CL01_QUANTA_THRESH);
802 fc_thres_val = val & E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M;
803 } else {
804 /* Retrieve the transmit timer */
805 val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(E800_IDX_OF_LFC));
806 tx_timer_val = val &
807 E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
808 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
809
810 /* Retrieve the fc threshold */
811 val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(E800_IDX_OF_LFC));
812 fc_thres_val = val & E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
813 }
814
815 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
816 }
817
818 /**
819 * ice_aq_set_mac_cfg
820 * @hw: pointer to the HW struct
821 * @max_frame_size: Maximum Frame Size to be supported
822 * @auto_drop: Tell HW to drop packets if TC queue is blocked
823 * @cd: pointer to command details structure or NULL
824 *
825 * Set MAC configuration (0x0603)
826 */
827 int
ice_aq_set_mac_cfg(struct ice_hw * hw,u16 max_frame_size,bool auto_drop,struct ice_sq_cd * cd)828 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
829 struct ice_sq_cd *cd)
830 {
831 struct ice_aqc_set_mac_cfg *cmd;
832 struct ice_aq_desc desc;
833
834 cmd = &desc.params.set_mac_cfg;
835
836 if (max_frame_size == 0)
837 return ICE_ERR_PARAM;
838
839 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
840
841 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
842
843 if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
844 cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
845 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
846
847 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
848 }
849
850 /**
851 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
852 * @hw: pointer to the HW struct
853 */
ice_init_fltr_mgmt_struct(struct ice_hw * hw)854 int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
855 {
856 struct ice_switch_info *sw;
857 int status;
858
859 hw->switch_info = (struct ice_switch_info *)
860 ice_malloc(hw, sizeof(*hw->switch_info));
861
862 sw = hw->switch_info;
863
864 if (!sw)
865 return ICE_ERR_NO_MEMORY;
866
867 INIT_LIST_HEAD(&sw->vsi_list_map_head);
868 sw->prof_res_bm_init = 0;
869
870 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
871 if (status) {
872 ice_free(hw, hw->switch_info);
873 return status;
874 }
875 return 0;
876 }
877
878 /**
879 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
880 * @hw: pointer to the HW struct
881 * @sw: pointer to switch info struct for which function clears filters
882 */
883 static void
ice_cleanup_fltr_mgmt_single(struct ice_hw * hw,struct ice_switch_info * sw)884 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
885 {
886 struct ice_vsi_list_map_info *v_pos_map;
887 struct ice_vsi_list_map_info *v_tmp_map;
888 struct ice_sw_recipe *recps;
889 u8 i;
890
891 if (!sw)
892 return;
893
894 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
895 ice_vsi_list_map_info, list_entry) {
896 LIST_DEL(&v_pos_map->list_entry);
897 ice_free(hw, v_pos_map);
898 }
899 recps = sw->recp_list;
900 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
901 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
902
903 recps[i].root_rid = i;
904 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
905 &recps[i].rg_list, ice_recp_grp_entry,
906 l_entry) {
907 LIST_DEL(&rg_entry->l_entry);
908 ice_free(hw, rg_entry);
909 }
910
911 if (recps[i].adv_rule) {
912 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
913 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
914
915 ice_destroy_lock(&recps[i].filt_rule_lock);
916 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
917 &recps[i].filt_rules,
918 ice_adv_fltr_mgmt_list_entry,
919 list_entry) {
920 LIST_DEL(&lst_itr->list_entry);
921 ice_free(hw, lst_itr->lkups);
922 ice_free(hw, lst_itr);
923 }
924 } else {
925 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
926
927 ice_destroy_lock(&recps[i].filt_rule_lock);
928 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
929 &recps[i].filt_rules,
930 ice_fltr_mgmt_list_entry,
931 list_entry) {
932 LIST_DEL(&lst_itr->list_entry);
933 ice_free(hw, lst_itr);
934 }
935 }
936 if (recps[i].root_buf)
937 ice_free(hw, recps[i].root_buf);
938 }
939 ice_rm_sw_replay_rule_info(hw, sw);
940 ice_free(hw, sw->recp_list);
941 ice_free(hw, sw);
942 }
943
944 /**
945 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
946 * @hw: pointer to the HW struct
947 */
ice_cleanup_fltr_mgmt_struct(struct ice_hw * hw)948 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
949 {
950 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
951 }
952
953 /**
954 * ice_get_itr_intrl_gran
955 * @hw: pointer to the HW struct
956 *
957 * Determines the ITR/INTRL granularities based on the maximum aggregate
958 * bandwidth according to the device's configuration during power-on.
959 */
ice_get_itr_intrl_gran(struct ice_hw * hw)960 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
961 {
962 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
963 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
964 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
965
966 switch (max_agg_bw) {
967 case ICE_MAX_AGG_BW_200G:
968 case ICE_MAX_AGG_BW_100G:
969 case ICE_MAX_AGG_BW_50G:
970 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
971 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
972 break;
973 case ICE_MAX_AGG_BW_25G:
974 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
975 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
976 break;
977 }
978 }
979
980 /**
981 * ice_print_rollback_msg - print FW rollback message
982 * @hw: pointer to the hardware structure
983 */
ice_print_rollback_msg(struct ice_hw * hw)984 void ice_print_rollback_msg(struct ice_hw *hw)
985 {
986 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
987 struct ice_orom_info *orom;
988 struct ice_nvm_info *nvm;
989
990 orom = &hw->flash.orom;
991 nvm = &hw->flash.nvm;
992
993 (void)SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
994 nvm->major, nvm->minor, nvm->eetrack, orom->major,
995 orom->build, orom->patch);
996 ice_warn(hw,
997 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
998 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
999 }
1000
1001 /**
1002 * ice_set_umac_shared
1003 * @hw: pointer to the hw struct
1004 *
1005 * Set boolean flag to allow unicast MAC sharing
1006 */
ice_set_umac_shared(struct ice_hw * hw)1007 void ice_set_umac_shared(struct ice_hw *hw)
1008 {
1009 hw->umac_shared = true;
1010 }
1011
1012 /**
1013 * ice_init_hw - main hardware initialization routine
1014 * @hw: pointer to the hardware structure
1015 */
ice_init_hw(struct ice_hw * hw)1016 int ice_init_hw(struct ice_hw *hw)
1017 {
1018 struct ice_aqc_get_phy_caps_data *pcaps;
1019 u16 mac_buf_len;
1020 void *mac_buf;
1021 int status;
1022
1023 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1024
1025 /* Set MAC type based on DeviceID */
1026 status = ice_set_mac_type(hw);
1027 if (status)
1028 return status;
1029
1030 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
1031 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
1032 PF_FUNC_RID_FUNCTION_NUMBER_S;
1033
1034 status = ice_reset(hw, ICE_RESET_PFR);
1035 if (status)
1036 return status;
1037 ice_get_itr_intrl_gran(hw);
1038
1039 hw->fw_vsi_num = ICE_DFLT_VSI_INVAL;
1040
1041 status = ice_create_all_ctrlq(hw);
1042 if (status)
1043 goto err_unroll_cqinit;
1044
1045 ice_fwlog_set_support_ena(hw);
1046 status = ice_fwlog_set(hw, &hw->fwlog_cfg);
1047 if (status) {
1048 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n",
1049 status);
1050 } else {
1051 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) {
1052 status = ice_fwlog_register(hw);
1053 if (status)
1054 ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n",
1055 status);
1056 } else {
1057 status = ice_fwlog_unregister(hw);
1058 if (status)
1059 ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n",
1060 status);
1061 }
1062 }
1063
1064 status = ice_init_nvm(hw);
1065 if (status)
1066 goto err_unroll_cqinit;
1067
1068 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
1069 ice_print_rollback_msg(hw);
1070
1071 if (!hw->skip_clear_pf) {
1072 status = ice_clear_pf_cfg(hw);
1073 if (status)
1074 goto err_unroll_cqinit;
1075 }
1076
1077 ice_clear_pxe_mode(hw);
1078
1079 status = ice_get_caps(hw);
1080 if (status)
1081 goto err_unroll_cqinit;
1082
1083 if (!hw->port_info)
1084 hw->port_info = (struct ice_port_info *)
1085 ice_malloc(hw, sizeof(*hw->port_info));
1086 if (!hw->port_info) {
1087 status = ICE_ERR_NO_MEMORY;
1088 goto err_unroll_cqinit;
1089 }
1090
1091 hw->port_info->loopback_mode = ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_NORMAL;
1092
1093 /* set the back pointer to HW */
1094 hw->port_info->hw = hw;
1095
1096 /* Initialize port_info struct with switch configuration data */
1097 status = ice_get_initial_sw_cfg(hw);
1098 if (status)
1099 goto err_unroll_alloc;
1100
1101 hw->evb_veb = true;
1102 /* Query the allocated resources for Tx scheduler */
1103 status = ice_sched_query_res_alloc(hw);
1104 if (status) {
1105 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1106 goto err_unroll_alloc;
1107 }
1108 ice_sched_get_psm_clk_freq(hw);
1109
1110 /* Initialize port_info struct with scheduler data */
1111 status = ice_sched_init_port(hw->port_info);
1112 if (status)
1113 goto err_unroll_sched;
1114 pcaps = (struct ice_aqc_get_phy_caps_data *)
1115 ice_malloc(hw, sizeof(*pcaps));
1116 if (!pcaps) {
1117 status = ICE_ERR_NO_MEMORY;
1118 goto err_unroll_sched;
1119 }
1120
1121 /* Initialize port_info struct with PHY capabilities */
1122 status = ice_aq_get_phy_caps(hw->port_info, false,
1123 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL);
1124 ice_free(hw, pcaps);
1125 if (status)
1126 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
1127 status);
1128
1129 /* Initialize port_info struct with link information */
1130 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1131 if (status)
1132 goto err_unroll_sched;
1133 /* need a valid SW entry point to build a Tx tree */
1134 if (!hw->sw_entry_point_layer) {
1135 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1136 status = ICE_ERR_CFG;
1137 goto err_unroll_sched;
1138 }
1139 INIT_LIST_HEAD(&hw->agg_list);
1140 /* Initialize max burst size */
1141 if (!hw->max_burst_size)
1142 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1143 status = ice_init_fltr_mgmt_struct(hw);
1144 if (status)
1145 goto err_unroll_sched;
1146
1147 /* Get MAC information */
1148
1149 /* A single port can report up to two (LAN and WoL) addresses */
1150 mac_buf = ice_calloc(hw, 2,
1151 sizeof(struct ice_aqc_manage_mac_read_resp));
1152 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
1153
1154 if (!mac_buf) {
1155 status = ICE_ERR_NO_MEMORY;
1156 goto err_unroll_fltr_mgmt_struct;
1157 }
1158
1159 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1160 ice_free(hw, mac_buf);
1161
1162 if (status)
1163 goto err_unroll_fltr_mgmt_struct;
1164
1165 /* enable jumbo frame support at MAC level */
1166 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
1167 NULL);
1168 if (status)
1169 goto err_unroll_fltr_mgmt_struct;
1170
1171 status = ice_init_hw_tbls(hw);
1172 if (status)
1173 goto err_unroll_fltr_mgmt_struct;
1174 ice_init_lock(&hw->tnl_lock);
1175
1176 return 0;
1177
1178 err_unroll_fltr_mgmt_struct:
1179 ice_cleanup_fltr_mgmt_struct(hw);
1180 err_unroll_sched:
1181 ice_sched_cleanup_all(hw);
1182 err_unroll_alloc:
1183 ice_free(hw, hw->port_info);
1184 hw->port_info = NULL;
1185 err_unroll_cqinit:
1186 ice_destroy_all_ctrlq(hw);
1187 return status;
1188 }
1189
1190 /**
1191 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1192 * @hw: pointer to the hardware structure
1193 *
1194 * This should be called only during nominal operation, not as a result of
1195 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
1196 * applicable initializations if it fails for any reason.
1197 */
ice_deinit_hw(struct ice_hw * hw)1198 void ice_deinit_hw(struct ice_hw *hw)
1199 {
1200 ice_cleanup_fltr_mgmt_struct(hw);
1201
1202 ice_sched_cleanup_all(hw);
1203 ice_sched_clear_agg(hw);
1204 ice_free_seg(hw);
1205 ice_free_hw_tbls(hw);
1206 ice_destroy_lock(&hw->tnl_lock);
1207
1208 if (hw->port_info) {
1209 ice_free(hw, hw->port_info);
1210 hw->port_info = NULL;
1211 }
1212
1213 ice_destroy_all_ctrlq(hw);
1214
1215 /* Clear VSI contexts if not already cleared */
1216 ice_clear_all_vsi_ctx(hw);
1217 }
1218
1219 /**
1220 * ice_check_reset - Check to see if a global reset is complete
1221 * @hw: pointer to the hardware structure
1222 */
ice_check_reset(struct ice_hw * hw)1223 int ice_check_reset(struct ice_hw *hw)
1224 {
1225 u32 cnt, reg = 0, grst_timeout, uld_mask, reset_wait_cnt;
1226
1227 /* Poll for Device Active state in case a recent CORER, GLOBR,
1228 * or EMPR has occurred. The grst delay value is in 100ms units.
1229 * Add 1sec for outstanding AQ commands that can take a long time.
1230 */
1231 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1232 GLGEN_RSTCTL_GRSTDEL_S) + 10;
1233
1234 for (cnt = 0; cnt < grst_timeout; cnt++) {
1235 ice_msec_delay(100, true);
1236 reg = rd32(hw, GLGEN_RSTAT);
1237 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
1238 break;
1239 }
1240
1241 if (cnt == grst_timeout) {
1242 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1243 return ICE_ERR_RESET_FAILED;
1244 }
1245
1246 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
1247 GLNVM_ULD_PCIER_DONE_1_M |\
1248 GLNVM_ULD_CORER_DONE_M |\
1249 GLNVM_ULD_GLOBR_DONE_M |\
1250 GLNVM_ULD_POR_DONE_M |\
1251 GLNVM_ULD_POR_DONE_1_M |\
1252 GLNVM_ULD_PCIER_DONE_2_M)
1253
1254 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ?
1255 GLNVM_ULD_PE_DONE_M : 0);
1256
1257 reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT;
1258
1259 /* Device is Active; check Global Reset processes are done */
1260 for (cnt = 0; cnt < reset_wait_cnt; cnt++) {
1261 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1262 if (reg == uld_mask) {
1263 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1264 break;
1265 }
1266 ice_msec_delay(10, true);
1267 }
1268
1269 if (cnt == reset_wait_cnt) {
1270 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1271 reg);
1272 return ICE_ERR_RESET_FAILED;
1273 }
1274
1275 return 0;
1276 }
1277
1278 /**
1279 * ice_pf_reset - Reset the PF
1280 * @hw: pointer to the hardware structure
1281 *
1282 * If a global reset has been triggered, this function checks
1283 * for its completion and then issues the PF reset
1284 */
ice_pf_reset(struct ice_hw * hw)1285 static int ice_pf_reset(struct ice_hw *hw)
1286 {
1287 u32 cnt, reg, reset_wait_cnt, cfg_lock_timeout;
1288
1289 /* If at function entry a global reset was already in progress, i.e.
1290 * state is not 'device active' or any of the reset done bits are not
1291 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
1292 * global reset is done.
1293 */
1294 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1295 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1296 /* poll on global reset currently in progress until done */
1297 if (ice_check_reset(hw))
1298 return ICE_ERR_RESET_FAILED;
1299
1300 return 0;
1301 }
1302
1303 /* Reset the PF */
1304 reg = rd32(hw, PFGEN_CTRL);
1305
1306 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1307
1308 /* Wait for the PFR to complete. The wait time is the global config lock
1309 * timeout plus the PFR timeout which will account for a possible reset
1310 * that is occurring during a download package operation.
1311 */
1312 reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT;
1313 cfg_lock_timeout = ICE_GLOBAL_CFG_LOCK_TIMEOUT;
1314
1315 for (cnt = 0; cnt < cfg_lock_timeout + reset_wait_cnt; cnt++) {
1316 reg = rd32(hw, PFGEN_CTRL);
1317 if (!(reg & PFGEN_CTRL_PFSWR_M))
1318 break;
1319
1320 ice_msec_delay(1, true);
1321 }
1322
1323 if (cnt == cfg_lock_timeout + reset_wait_cnt) {
1324 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1325 return ICE_ERR_RESET_FAILED;
1326 }
1327
1328 return 0;
1329 }
1330
1331 /**
1332 * ice_reset - Perform different types of reset
1333 * @hw: pointer to the hardware structure
1334 * @req: reset request
1335 *
1336 * This function triggers a reset as specified by the req parameter.
1337 *
1338 * Note:
1339 * If anything other than a PF reset is triggered, PXE mode is restored.
1340 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1341 * interface has been restored in the rebuild flow.
1342 */
ice_reset(struct ice_hw * hw,enum ice_reset_req req)1343 int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1344 {
1345 u32 val = 0;
1346
1347 switch (req) {
1348 case ICE_RESET_PFR:
1349 return ice_pf_reset(hw);
1350 case ICE_RESET_CORER:
1351 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1352 val = GLGEN_RTRIG_CORER_M;
1353 break;
1354 case ICE_RESET_GLOBR:
1355 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1356 val = GLGEN_RTRIG_GLOBR_M;
1357 break;
1358 default:
1359 return ICE_ERR_PARAM;
1360 }
1361
1362 val |= rd32(hw, GLGEN_RTRIG);
1363 wr32(hw, GLGEN_RTRIG, val);
1364 ice_flush(hw);
1365
1366 /* wait for the FW to be ready */
1367 return ice_check_reset(hw);
1368 }
1369
1370 /**
1371 * ice_copy_rxq_ctx_to_hw
1372 * @hw: pointer to the hardware structure
1373 * @ice_rxq_ctx: pointer to the rxq context
1374 * @rxq_index: the index of the Rx queue
1375 *
1376 * Copies rxq context from dense structure to HW register space
1377 */
1378 static int
ice_copy_rxq_ctx_to_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1379 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1380 {
1381 u8 i;
1382
1383 if (!ice_rxq_ctx)
1384 return ICE_ERR_BAD_PTR;
1385
1386 if (rxq_index > QRX_CTRL_MAX_INDEX)
1387 return ICE_ERR_PARAM;
1388
1389 /* Copy each dword separately to HW */
1390 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1391 wr32(hw, QRX_CONTEXT(i, rxq_index),
1392 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1393
1394 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1395 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1396 }
1397
1398 return 0;
1399 }
1400
1401 /**
1402 * ice_copy_rxq_ctx_from_hw - Copy rxq context register from HW
1403 * @hw: pointer to the hardware structure
1404 * @ice_rxq_ctx: pointer to the rxq context
1405 * @rxq_index: the index of the Rx queue
1406 *
1407 * Copies rxq context from HW register space to dense structure
1408 */
1409 static int
ice_copy_rxq_ctx_from_hw(struct ice_hw * hw,u8 * ice_rxq_ctx,u32 rxq_index)1410 ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1411 {
1412 u8 i;
1413
1414 if (!ice_rxq_ctx)
1415 return ICE_ERR_BAD_PTR;
1416
1417 if (rxq_index > QRX_CTRL_MAX_INDEX)
1418 return ICE_ERR_PARAM;
1419
1420 /* Copy each dword separately from HW */
1421 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1422 u32 *ctx = (u32 *)(ice_rxq_ctx + (i * sizeof(u32)));
1423
1424 *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
1425
1426 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
1427 }
1428
1429 return 0;
1430 }
1431
1432 /* LAN Rx Queue Context */
1433 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1434 /* Field Width LSB */
1435 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1436 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1437 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1438 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1439 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1440 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1441 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1442 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1443 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1444 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1445 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1446 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1447 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1448 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1449 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1450 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1451 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1452 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1453 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1454 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1455 { 0 }
1456 };
1457
1458 /**
1459 * ice_write_rxq_ctx
1460 * @hw: pointer to the hardware structure
1461 * @rlan_ctx: pointer to the rxq context
1462 * @rxq_index: the index of the Rx queue
1463 *
1464 * Converts rxq context from sparse to dense structure and then writes
1465 * it to HW register space and enables the hardware to prefetch descriptors
1466 * instead of only fetching them on demand
1467 */
1468 int
ice_write_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1469 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1470 u32 rxq_index)
1471 {
1472 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1473
1474 if (!rlan_ctx)
1475 return ICE_ERR_BAD_PTR;
1476
1477 rlan_ctx->prefena = 1;
1478
1479 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1480 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1481 }
1482
1483 /**
1484 * ice_read_rxq_ctx - Read rxq context from HW
1485 * @hw: pointer to the hardware structure
1486 * @rlan_ctx: pointer to the rxq context
1487 * @rxq_index: the index of the Rx queue
1488 *
1489 * Read rxq context from HW register space and then converts it from dense
1490 * structure to sparse
1491 */
1492 int
ice_read_rxq_ctx(struct ice_hw * hw,struct ice_rlan_ctx * rlan_ctx,u32 rxq_index)1493 ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1494 u32 rxq_index)
1495 {
1496 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1497 int status;
1498
1499 if (!rlan_ctx)
1500 return ICE_ERR_BAD_PTR;
1501
1502 status = ice_copy_rxq_ctx_from_hw(hw, ctx_buf, rxq_index);
1503 if (status)
1504 return status;
1505
1506 return ice_get_ctx(ctx_buf, (u8 *)rlan_ctx, ice_rlan_ctx_info);
1507 }
1508
1509 /**
1510 * ice_clear_rxq_ctx
1511 * @hw: pointer to the hardware structure
1512 * @rxq_index: the index of the Rx queue to clear
1513 *
1514 * Clears rxq context in HW register space
1515 */
ice_clear_rxq_ctx(struct ice_hw * hw,u32 rxq_index)1516 int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1517 {
1518 u8 i;
1519
1520 if (rxq_index > QRX_CTRL_MAX_INDEX)
1521 return ICE_ERR_PARAM;
1522
1523 /* Clear each dword register separately */
1524 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1525 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1526
1527 return 0;
1528 }
1529
1530 /* LAN Tx Queue Context used for set Tx config by ice_aqc_opc_add_txqs,
1531 * Bit[0-175] is valid
1532 */
1533 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1534 /* Field Width LSB */
1535 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1536 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1537 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1538 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1539 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1540 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1541 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1542 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1543 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1544 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1545 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1546 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1547 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1548 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1549 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1550 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1551 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1552 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1553 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1554 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1555 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1556 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1557 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1558 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1559 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1560 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1561 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1562 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1563 { 0 }
1564 };
1565
1566 /**
1567 * ice_copy_tx_cmpltnq_ctx_to_hw
1568 * @hw: pointer to the hardware structure
1569 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1570 * @tx_cmpltnq_index: the index of the completion queue
1571 *
1572 * Copies Tx completion queue context from dense structure to HW register space
1573 */
1574 static int
ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1575 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1576 u32 tx_cmpltnq_index)
1577 {
1578 u8 i;
1579
1580 if (!ice_tx_cmpltnq_ctx)
1581 return ICE_ERR_BAD_PTR;
1582
1583 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1584 return ICE_ERR_PARAM;
1585
1586 /* Copy each dword separately to HW */
1587 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1588 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1589 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1590
1591 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1592 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1593 }
1594
1595 return 0;
1596 }
1597
1598 /* LAN Tx Completion Queue Context */
1599 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1600 /* Field Width LSB */
1601 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1602 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1603 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1604 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1605 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1606 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1607 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1608 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1609 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1610 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1611 { 0 }
1612 };
1613
1614 /**
1615 * ice_write_tx_cmpltnq_ctx
1616 * @hw: pointer to the hardware structure
1617 * @tx_cmpltnq_ctx: pointer to the completion queue context
1618 * @tx_cmpltnq_index: the index of the completion queue
1619 *
1620 * Converts completion queue context from sparse to dense structure and then
1621 * writes it to HW register space
1622 */
1623 int
ice_write_tx_cmpltnq_ctx(struct ice_hw * hw,struct ice_tx_cmpltnq_ctx * tx_cmpltnq_ctx,u32 tx_cmpltnq_index)1624 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1625 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1626 u32 tx_cmpltnq_index)
1627 {
1628 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1629
1630 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1631 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1632 }
1633
1634 /**
1635 * ice_clear_tx_cmpltnq_ctx
1636 * @hw: pointer to the hardware structure
1637 * @tx_cmpltnq_index: the index of the completion queue to clear
1638 *
1639 * Clears Tx completion queue context in HW register space
1640 */
1641 int
ice_clear_tx_cmpltnq_ctx(struct ice_hw * hw,u32 tx_cmpltnq_index)1642 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1643 {
1644 u8 i;
1645
1646 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1647 return ICE_ERR_PARAM;
1648
1649 /* Clear each dword register separately */
1650 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1651 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1652
1653 return 0;
1654 }
1655
1656 /**
1657 * ice_copy_tx_drbell_q_ctx_to_hw
1658 * @hw: pointer to the hardware structure
1659 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1660 * @tx_drbell_q_index: the index of the doorbell queue
1661 *
1662 * Copies doorbell queue context from dense structure to HW register space
1663 */
1664 static int
ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw * hw,u8 * ice_tx_drbell_q_ctx,u32 tx_drbell_q_index)1665 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1666 u32 tx_drbell_q_index)
1667 {
1668 u8 i;
1669
1670 if (!ice_tx_drbell_q_ctx)
1671 return ICE_ERR_BAD_PTR;
1672
1673 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1674 return ICE_ERR_PARAM;
1675
1676 /* Copy each dword separately to HW */
1677 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1678 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1679 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1680
1681 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1682 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1683 }
1684
1685 return 0;
1686 }
1687
1688 /* LAN Tx Doorbell Queue Context info */
1689 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1690 /* Field Width LSB */
1691 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1692 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1693 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1694 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1695 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1696 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1697 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1698 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1699 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1700 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1701 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1702 { 0 }
1703 };
1704
1705 /**
1706 * ice_write_tx_drbell_q_ctx
1707 * @hw: pointer to the hardware structure
1708 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1709 * @tx_drbell_q_index: the index of the doorbell queue
1710 *
1711 * Converts doorbell queue context from sparse to dense structure and then
1712 * writes it to HW register space
1713 */
1714 int
ice_write_tx_drbell_q_ctx(struct ice_hw * hw,struct ice_tx_drbell_q_ctx * tx_drbell_q_ctx,u32 tx_drbell_q_index)1715 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1716 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1717 u32 tx_drbell_q_index)
1718 {
1719 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1720
1721 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1722 ice_tx_drbell_q_ctx_info);
1723 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1724 }
1725
1726 /**
1727 * ice_clear_tx_drbell_q_ctx
1728 * @hw: pointer to the hardware structure
1729 * @tx_drbell_q_index: the index of the doorbell queue to clear
1730 *
1731 * Clears doorbell queue context in HW register space
1732 */
1733 int
ice_clear_tx_drbell_q_ctx(struct ice_hw * hw,u32 tx_drbell_q_index)1734 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1735 {
1736 u8 i;
1737
1738 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1739 return ICE_ERR_PARAM;
1740
1741 /* Clear each dword register separately */
1742 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1743 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1744
1745 return 0;
1746 }
1747
1748 /* Sideband Queue command wrappers */
1749
1750 /**
1751 * ice_get_sbq - returns the right control queue to use for sideband
1752 * @hw: pointer to the hardware structure
1753 */
ice_get_sbq(struct ice_hw * hw)1754 static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
1755 {
1756 if (!ice_is_generic_mac(hw))
1757 return &hw->adminq;
1758 return &hw->sbq;
1759 }
1760
1761 /**
1762 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1763 * @hw: pointer to the HW struct
1764 * @desc: descriptor describing the command
1765 * @buf: buffer to use for indirect commands (NULL for direct commands)
1766 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1767 * @cd: pointer to command details structure
1768 */
1769 static int
ice_sbq_send_cmd(struct ice_hw * hw,struct ice_sbq_cmd_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1770 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1771 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1772 {
1773 return ice_sq_send_cmd(hw, ice_get_sbq(hw), (struct ice_aq_desc *)desc,
1774 buf, buf_size, cd);
1775 }
1776
1777 /**
1778 * ice_sbq_send_cmd_nolock - send Sideband Queue command to Sideband Queue
1779 * but do not lock sq_lock
1780 * @hw: pointer to the HW struct
1781 * @desc: descriptor describing the command
1782 * @buf: buffer to use for indirect commands (NULL for direct commands)
1783 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1784 * @cd: pointer to command details structure
1785 */
1786 static int
ice_sbq_send_cmd_nolock(struct ice_hw * hw,struct ice_sbq_cmd_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1787 ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1788 void *buf, u16 buf_size, struct ice_sq_cd *cd)
1789 {
1790 return ice_sq_send_cmd_nolock(hw, ice_get_sbq(hw),
1791 (struct ice_aq_desc *)desc, buf,
1792 buf_size, cd);
1793 }
1794
1795 /**
1796 * ice_sbq_rw_reg_lp - Fill Sideband Queue command, with lock parameter
1797 * @hw: pointer to the HW struct
1798 * @in: message info to be filled in descriptor
1799 * @flag: flag to fill desc structure
1800 * @lock: true to lock the sq_lock (the usual case); false if the sq_lock has
1801 * already been locked at a higher level
1802 */
ice_sbq_rw_reg_lp(struct ice_hw * hw,struct ice_sbq_msg_input * in,u16 flag,bool lock)1803 int ice_sbq_rw_reg_lp(struct ice_hw *hw, struct ice_sbq_msg_input *in,
1804 u16 flag, bool lock)
1805 {
1806 struct ice_sbq_cmd_desc desc = {0};
1807 struct ice_sbq_msg_req msg = {0};
1808 u16 msg_len;
1809 int status;
1810
1811 msg_len = sizeof(msg);
1812
1813 msg.dest_dev = in->dest_dev;
1814 msg.opcode = in->opcode;
1815 msg.flags = ICE_SBQ_MSG_FLAGS;
1816 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
1817 msg.msg_addr_low = CPU_TO_LE16(in->msg_addr_low);
1818 msg.msg_addr_high = CPU_TO_LE32(in->msg_addr_high);
1819
1820 if (in->opcode)
1821 msg.data = CPU_TO_LE32(in->data);
1822 else
1823 /* data read comes back in completion, so shorten the struct by
1824 * sizeof(msg.data)
1825 */
1826 msg_len -= sizeof(msg.data);
1827
1828 desc.flags = CPU_TO_LE16(flag);
1829 desc.opcode = CPU_TO_LE16(ice_sbq_opc_neigh_dev_req);
1830 desc.param0.cmd_len = CPU_TO_LE16(msg_len);
1831 if (lock)
1832 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1833 else
1834 status = ice_sbq_send_cmd_nolock(hw, &desc, &msg, msg_len,
1835 NULL);
1836 if (!status && !in->opcode)
1837 in->data = LE32_TO_CPU
1838 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1839 return status;
1840 }
1841
1842 /**
1843 * ice_sbq_rw_reg - Fill Sideband Queue command
1844 * @hw: pointer to the HW struct
1845 * @in: message info to be filled in descriptor
1846 * @flag: flag to fill desc structure
1847 */
ice_sbq_rw_reg(struct ice_hw * hw,struct ice_sbq_msg_input * in,u16 flag)1848 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag)
1849 {
1850 return ice_sbq_rw_reg_lp(hw, in, flag, true);
1851 }
1852
1853 /**
1854 * ice_sbq_lock - Lock the sideband queue's sq_lock
1855 * @hw: pointer to the HW struct
1856 */
ice_sbq_lock(struct ice_hw * hw)1857 void ice_sbq_lock(struct ice_hw *hw)
1858 {
1859 ice_acquire_lock(&ice_get_sbq(hw)->sq_lock);
1860 }
1861
1862 /**
1863 * ice_sbq_unlock - Unlock the sideband queue's sq_lock
1864 * @hw: pointer to the HW struct
1865 */
ice_sbq_unlock(struct ice_hw * hw)1866 void ice_sbq_unlock(struct ice_hw *hw)
1867 {
1868 ice_release_lock(&ice_get_sbq(hw)->sq_lock);
1869 }
1870
1871 /* FW Admin Queue command wrappers */
1872
1873 /**
1874 * ice_should_retry_sq_send_cmd
1875 * @opcode: AQ opcode
1876 *
1877 * Decide if we should retry the send command routine for the ATQ, depending
1878 * on the opcode.
1879 */
ice_should_retry_sq_send_cmd(u16 opcode)1880 static bool ice_should_retry_sq_send_cmd(u16 opcode)
1881 {
1882 switch (opcode) {
1883 case ice_aqc_opc_dnl_get_status:
1884 case ice_aqc_opc_dnl_run:
1885 case ice_aqc_opc_dnl_call:
1886 case ice_aqc_opc_dnl_read_sto:
1887 case ice_aqc_opc_dnl_write_sto:
1888 case ice_aqc_opc_dnl_set_breakpoints:
1889 case ice_aqc_opc_dnl_read_log:
1890 case ice_aqc_opc_get_link_topo:
1891 case ice_aqc_opc_done_alt_write:
1892 case ice_aqc_opc_lldp_stop:
1893 case ice_aqc_opc_lldp_start:
1894 case ice_aqc_opc_lldp_filter_ctrl:
1895 return true;
1896 }
1897
1898 return false;
1899 }
1900
1901 /**
1902 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1903 * @hw: pointer to the HW struct
1904 * @cq: pointer to the specific Control queue
1905 * @desc: prefilled descriptor describing the command
1906 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1907 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1908 * @cd: pointer to command details structure
1909 *
1910 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin
1911 * Queue if the EBUSY AQ error is returned.
1912 */
1913 static int
ice_sq_send_cmd_retry(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1914 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1915 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1916 struct ice_sq_cd *cd)
1917 {
1918 struct ice_aq_desc desc_cpy;
1919 bool is_cmd_for_retry;
1920 u8 *buf_cpy = NULL;
1921 u8 idx = 0;
1922 u16 opcode;
1923 int status;
1924
1925 opcode = LE16_TO_CPU(desc->opcode);
1926 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
1927 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM);
1928
1929 if (is_cmd_for_retry) {
1930 if (buf) {
1931 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1932 if (!buf_cpy)
1933 return ICE_ERR_NO_MEMORY;
1934 }
1935
1936 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy),
1937 ICE_NONDMA_TO_NONDMA);
1938 }
1939
1940 do {
1941 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1942
1943 if (!is_cmd_for_retry || !status ||
1944 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1945 break;
1946
1947 if (buf_cpy)
1948 ice_memcpy(buf, buf_cpy, buf_size,
1949 ICE_NONDMA_TO_NONDMA);
1950
1951 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy),
1952 ICE_NONDMA_TO_NONDMA);
1953
1954 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false);
1955
1956 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
1957
1958 if (buf_cpy)
1959 ice_free(hw, buf_cpy);
1960
1961 return status;
1962 }
1963
1964 /**
1965 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1966 * @hw: pointer to the HW struct
1967 * @desc: descriptor describing the command
1968 * @buf: buffer to use for indirect commands (NULL for direct commands)
1969 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1970 * @cd: pointer to command details structure
1971 *
1972 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1973 */
1974 int
ice_aq_send_cmd(struct ice_hw * hw,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)1975 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1976 u16 buf_size, struct ice_sq_cd *cd)
1977 {
1978 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1979 }
1980
1981 /**
1982 * ice_aq_get_fw_ver
1983 * @hw: pointer to the HW struct
1984 * @cd: pointer to command details structure or NULL
1985 *
1986 * Get the firmware version (0x0001) from the admin queue commands
1987 */
ice_aq_get_fw_ver(struct ice_hw * hw,struct ice_sq_cd * cd)1988 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1989 {
1990 struct ice_aqc_get_ver *resp;
1991 struct ice_aq_desc desc;
1992 int status;
1993
1994 resp = &desc.params.get_ver;
1995
1996 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1997
1998 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1999
2000 if (!status) {
2001 hw->fw_branch = resp->fw_branch;
2002 hw->fw_maj_ver = resp->fw_major;
2003 hw->fw_min_ver = resp->fw_minor;
2004 hw->fw_patch = resp->fw_patch;
2005 hw->fw_build = LE32_TO_CPU(resp->fw_build);
2006 hw->api_branch = resp->api_branch;
2007 hw->api_maj_ver = resp->api_major;
2008 hw->api_min_ver = resp->api_minor;
2009 hw->api_patch = resp->api_patch;
2010 }
2011
2012 return status;
2013 }
2014
2015 /**
2016 * ice_aq_send_driver_ver
2017 * @hw: pointer to the HW struct
2018 * @dv: driver's major, minor version
2019 * @cd: pointer to command details structure or NULL
2020 *
2021 * Send the driver version (0x0002) to the firmware
2022 */
2023 int
ice_aq_send_driver_ver(struct ice_hw * hw,struct ice_driver_ver * dv,struct ice_sq_cd * cd)2024 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
2025 struct ice_sq_cd *cd)
2026 {
2027 struct ice_aqc_driver_ver *cmd;
2028 struct ice_aq_desc desc;
2029 u16 len;
2030
2031 cmd = &desc.params.driver_ver;
2032
2033 if (!dv)
2034 return ICE_ERR_PARAM;
2035
2036 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
2037
2038 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2039 cmd->major_ver = dv->major_ver;
2040 cmd->minor_ver = dv->minor_ver;
2041 cmd->build_ver = dv->build_ver;
2042 cmd->subbuild_ver = dv->subbuild_ver;
2043
2044 len = 0;
2045 while (len < sizeof(dv->driver_string) &&
2046 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
2047 len++;
2048
2049 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
2050 }
2051
2052 /**
2053 * ice_aq_q_shutdown
2054 * @hw: pointer to the HW struct
2055 * @unloading: is the driver unloading itself
2056 *
2057 * Tell the Firmware that we're shutting down the AdminQ and whether
2058 * or not the driver is unloading as well (0x0003).
2059 */
ice_aq_q_shutdown(struct ice_hw * hw,bool unloading)2060 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
2061 {
2062 struct ice_aqc_q_shutdown *cmd;
2063 struct ice_aq_desc desc;
2064
2065 cmd = &desc.params.q_shutdown;
2066
2067 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
2068
2069 if (unloading)
2070 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
2071
2072 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2073 }
2074
2075 /**
2076 * ice_aq_req_res
2077 * @hw: pointer to the HW struct
2078 * @res: resource ID
2079 * @access: access type
2080 * @sdp_number: resource number
2081 * @timeout: the maximum time in ms that the driver may hold the resource
2082 * @cd: pointer to command details structure or NULL
2083 *
2084 * Requests common resource using the admin queue commands (0x0008).
2085 * When attempting to acquire the Global Config Lock, the driver can
2086 * learn of three states:
2087 * 1) 0 - acquired lock, and can perform download package
2088 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
2089 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
2090 * successfully downloaded the package; the driver does
2091 * not have to download the package and can continue
2092 * loading
2093 *
2094 * Note that if the caller is in an acquire lock, perform action, release lock
2095 * phase of operation, it is possible that the FW may detect a timeout and issue
2096 * a CORER. In this case, the driver will receive a CORER interrupt and will
2097 * have to determine its cause. The calling thread that is handling this flow
2098 * will likely get an error propagated back to it indicating the Download
2099 * Package, Update Package or the Release Resource AQ commands timed out.
2100 */
2101 static int
ice_aq_req_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u8 sdp_number,u32 * timeout,struct ice_sq_cd * cd)2102 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2103 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
2104 struct ice_sq_cd *cd)
2105 {
2106 struct ice_aqc_req_res *cmd_resp;
2107 struct ice_aq_desc desc;
2108 int status;
2109
2110 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2111
2112 cmd_resp = &desc.params.res_owner;
2113
2114 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
2115
2116 cmd_resp->res_id = CPU_TO_LE16(res);
2117 cmd_resp->access_type = CPU_TO_LE16(access);
2118 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
2119 cmd_resp->timeout = CPU_TO_LE32(*timeout);
2120 *timeout = 0;
2121
2122 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2123
2124 /* The completion specifies the maximum time in ms that the driver
2125 * may hold the resource in the Timeout field.
2126 */
2127
2128 /* Global config lock response utilizes an additional status field.
2129 *
2130 * If the Global config lock resource is held by some other driver, the
2131 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
2132 * and the timeout field indicates the maximum time the current owner
2133 * of the resource has to free it.
2134 */
2135 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
2136 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
2137 *timeout = LE32_TO_CPU(cmd_resp->timeout);
2138 return 0;
2139 } else if (LE16_TO_CPU(cmd_resp->status) ==
2140 ICE_AQ_RES_GLBL_IN_PROG) {
2141 *timeout = LE32_TO_CPU(cmd_resp->timeout);
2142 return ICE_ERR_AQ_ERROR;
2143 } else if (LE16_TO_CPU(cmd_resp->status) ==
2144 ICE_AQ_RES_GLBL_DONE) {
2145 return ICE_ERR_AQ_NO_WORK;
2146 }
2147
2148 /* invalid FW response, force a timeout immediately */
2149 *timeout = 0;
2150 return ICE_ERR_AQ_ERROR;
2151 }
2152
2153 /* If the resource is held by some other driver, the command completes
2154 * with a busy return value and the timeout field indicates the maximum
2155 * time the current owner of the resource has to free it.
2156 */
2157 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
2158 *timeout = LE32_TO_CPU(cmd_resp->timeout);
2159
2160 return status;
2161 }
2162
2163 /**
2164 * ice_aq_release_res
2165 * @hw: pointer to the HW struct
2166 * @res: resource ID
2167 * @sdp_number: resource number
2168 * @cd: pointer to command details structure or NULL
2169 *
2170 * release common resource using the admin queue commands (0x0009)
2171 */
2172 static int
ice_aq_release_res(struct ice_hw * hw,enum ice_aq_res_ids res,u8 sdp_number,struct ice_sq_cd * cd)2173 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
2174 struct ice_sq_cd *cd)
2175 {
2176 struct ice_aqc_req_res *cmd;
2177 struct ice_aq_desc desc;
2178
2179 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2180
2181 cmd = &desc.params.res_owner;
2182
2183 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
2184
2185 cmd->res_id = CPU_TO_LE16(res);
2186 cmd->res_number = CPU_TO_LE32(sdp_number);
2187
2188 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2189 }
2190
2191 /**
2192 * ice_acquire_res
2193 * @hw: pointer to the HW structure
2194 * @res: resource ID
2195 * @access: access type (read or write)
2196 * @timeout: timeout in milliseconds
2197 *
2198 * This function will attempt to acquire the ownership of a resource.
2199 */
2200 int
ice_acquire_res(struct ice_hw * hw,enum ice_aq_res_ids res,enum ice_aq_res_access_type access,u32 timeout)2201 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2202 enum ice_aq_res_access_type access, u32 timeout)
2203 {
2204 #define ICE_RES_POLLING_DELAY_MS 10
2205 u32 delay = ICE_RES_POLLING_DELAY_MS;
2206 u32 time_left = timeout;
2207 int status;
2208
2209 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2210
2211 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2212
2213 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
2214 * previously acquired the resource and performed any necessary updates;
2215 * in this case the caller does not obtain the resource and has no
2216 * further work to do.
2217 */
2218 if (status == ICE_ERR_AQ_NO_WORK)
2219 goto ice_acquire_res_exit;
2220
2221 if (status)
2222 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
2223
2224 /* If necessary, poll until the current lock owner timeouts */
2225 timeout = time_left;
2226 while (status && timeout && time_left) {
2227 ice_msec_delay(delay, true);
2228 timeout = (timeout > delay) ? timeout - delay : 0;
2229 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2230
2231 if (status == ICE_ERR_AQ_NO_WORK)
2232 /* lock free, but no work to do */
2233 break;
2234
2235 if (!status)
2236 /* lock acquired */
2237 break;
2238 }
2239 if (status && status != ICE_ERR_AQ_NO_WORK)
2240 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2241
2242 ice_acquire_res_exit:
2243 if (status == ICE_ERR_AQ_NO_WORK) {
2244 if (access == ICE_RES_WRITE)
2245 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2246 else
2247 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2248 }
2249 return status;
2250 }
2251
2252 /**
2253 * ice_release_res
2254 * @hw: pointer to the HW structure
2255 * @res: resource ID
2256 *
2257 * This function will release a resource using the proper Admin Command.
2258 */
ice_release_res(struct ice_hw * hw,enum ice_aq_res_ids res)2259 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2260 {
2261 u32 total_delay = 0;
2262 int status;
2263
2264 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2265
2266 status = ice_aq_release_res(hw, res, 0, NULL);
2267
2268 /* there are some rare cases when trying to release the resource
2269 * results in an admin queue timeout, so handle them correctly
2270 */
2271 while ((status == ICE_ERR_AQ_TIMEOUT) &&
2272 (total_delay < hw->adminq.sq_cmd_timeout)) {
2273 ice_msec_delay(1, true);
2274 status = ice_aq_release_res(hw, res, 0, NULL);
2275 total_delay++;
2276 }
2277 }
2278
2279 /**
2280 * ice_aq_alloc_free_res - command to allocate/free resources
2281 * @hw: pointer to the HW struct
2282 * @num_entries: number of resource entries in buffer
2283 * @buf: Indirect buffer to hold data parameters and response
2284 * @buf_size: size of buffer for indirect commands
2285 * @opc: pass in the command opcode
2286 * @cd: pointer to command details structure or NULL
2287 *
2288 * Helper function to allocate/free resources using the admin queue commands
2289 */
2290 int
ice_aq_alloc_free_res(struct ice_hw * hw,u16 num_entries,struct ice_aqc_alloc_free_res_elem * buf,u16 buf_size,enum ice_adminq_opc opc,struct ice_sq_cd * cd)2291 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2292 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
2293 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2294 {
2295 struct ice_aqc_alloc_free_res_cmd *cmd;
2296 struct ice_aq_desc desc;
2297
2298 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2299
2300 cmd = &desc.params.sw_res_ctrl;
2301
2302 if (!buf)
2303 return ICE_ERR_PARAM;
2304
2305 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
2306 return ICE_ERR_PARAM;
2307
2308 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2309
2310 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2311
2312 cmd->num_entries = CPU_TO_LE16(num_entries);
2313
2314 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2315 }
2316
2317 /**
2318 * ice_alloc_hw_res - allocate resource
2319 * @hw: pointer to the HW struct
2320 * @type: type of resource
2321 * @num: number of resources to allocate
2322 * @btm: allocate from bottom
2323 * @res: pointer to array that will receive the resources
2324 */
2325 int
ice_alloc_hw_res(struct ice_hw * hw,u16 type,u16 num,bool btm,u16 * res)2326 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2327 {
2328 struct ice_aqc_alloc_free_res_elem *buf;
2329 u16 buf_len;
2330 int status;
2331
2332 buf_len = ice_struct_size(buf, elem, num);
2333 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2334 if (!buf)
2335 return ICE_ERR_NO_MEMORY;
2336
2337 /* Prepare buffer to allocate resource. */
2338 buf->num_elems = CPU_TO_LE16(num);
2339 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2340 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
2341 if (btm)
2342 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2343
2344 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2345 ice_aqc_opc_alloc_res, NULL);
2346 if (status)
2347 goto ice_alloc_res_exit;
2348
2349 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2350 ICE_NONDMA_TO_NONDMA);
2351
2352 ice_alloc_res_exit:
2353 ice_free(hw, buf);
2354 return status;
2355 }
2356
2357 /**
2358 * ice_free_hw_res - free allocated HW resource
2359 * @hw: pointer to the HW struct
2360 * @type: type of resource to free
2361 * @num: number of resources
2362 * @res: pointer to array that contains the resources to free
2363 */
ice_free_hw_res(struct ice_hw * hw,u16 type,u16 num,u16 * res)2364 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2365 {
2366 struct ice_aqc_alloc_free_res_elem *buf;
2367 u16 buf_len;
2368 int status;
2369
2370 buf_len = ice_struct_size(buf, elem, num);
2371 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2372 if (!buf)
2373 return ICE_ERR_NO_MEMORY;
2374
2375 /* Prepare buffer to free resource. */
2376 buf->num_elems = CPU_TO_LE16(num);
2377 buf->res_type = CPU_TO_LE16(type);
2378 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2379 ICE_NONDMA_TO_NONDMA);
2380
2381 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2382 ice_aqc_opc_free_res, NULL);
2383 if (status)
2384 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2385
2386 ice_free(hw, buf);
2387 return status;
2388 }
2389
2390 /**
2391 * ice_get_num_per_func - determine number of resources per PF
2392 * @hw: pointer to the HW structure
2393 * @max: value to be evenly split between each PF
2394 *
2395 * Determine the number of valid functions by going through the bitmap returned
2396 * from parsing capabilities and use this to calculate the number of resources
2397 * per PF based on the max value passed in.
2398 */
ice_get_num_per_func(struct ice_hw * hw,u32 max)2399 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2400 {
2401 u8 funcs;
2402
2403 #define ICE_CAPS_VALID_FUNCS_M 0xFF
2404 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2405 ICE_CAPS_VALID_FUNCS_M);
2406
2407 if (!funcs)
2408 return 0;
2409
2410 return max / funcs;
2411 }
2412
2413 /**
2414 * ice_print_led_caps - print LED capabilities
2415 * @hw: pointer to the ice_hw instance
2416 * @caps: pointer to common caps instance
2417 * @prefix: string to prefix when printing
2418 * @dbg: set to indicate debug print
2419 */
2420 static void
ice_print_led_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,char const * prefix,bool dbg)2421 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2422 char const *prefix, bool dbg)
2423 {
2424 u8 i;
2425
2426 if (dbg)
2427 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %u\n", prefix,
2428 caps->led_pin_num);
2429 else
2430 ice_info(hw, "%s: led_pin_num = %u\n", prefix,
2431 caps->led_pin_num);
2432
2433 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
2434 if (!caps->led[i])
2435 continue;
2436
2437 if (dbg)
2438 ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = %u\n",
2439 prefix, i, caps->led[i]);
2440 else
2441 ice_info(hw, "%s: led[%u] = %u\n", prefix, i,
2442 caps->led[i]);
2443 }
2444 }
2445
2446 /**
2447 * ice_print_sdp_caps - print SDP capabilities
2448 * @hw: pointer to the ice_hw instance
2449 * @caps: pointer to common caps instance
2450 * @prefix: string to prefix when printing
2451 * @dbg: set to indicate debug print
2452 */
2453 static void
ice_print_sdp_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,char const * prefix,bool dbg)2454 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2455 char const *prefix, bool dbg)
2456 {
2457 u8 i;
2458
2459 if (dbg)
2460 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %u\n", prefix,
2461 caps->sdp_pin_num);
2462 else
2463 ice_info(hw, "%s: sdp_pin_num = %u\n", prefix,
2464 caps->sdp_pin_num);
2465
2466 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
2467 if (!caps->sdp[i])
2468 continue;
2469
2470 if (dbg)
2471 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = %u\n",
2472 prefix, i, caps->sdp[i]);
2473 else
2474 ice_info(hw, "%s: sdp[%u] = %u\n", prefix,
2475 i, caps->sdp[i]);
2476 }
2477 }
2478
2479 /**
2480 * ice_parse_common_caps - parse common device/function capabilities
2481 * @hw: pointer to the HW struct
2482 * @caps: pointer to common capabilities structure
2483 * @elem: the capability element to parse
2484 * @prefix: message prefix for tracing capabilities
2485 *
2486 * Given a capability element, extract relevant details into the common
2487 * capability structure.
2488 *
2489 * Returns: true if the capability matches one of the common capability ids,
2490 * false otherwise.
2491 */
2492 static bool
ice_parse_common_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps,struct ice_aqc_list_caps_elem * elem,const char * prefix)2493 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2494 struct ice_aqc_list_caps_elem *elem, const char *prefix)
2495 {
2496 u32 logical_id = LE32_TO_CPU(elem->logical_id);
2497 u32 phys_id = LE32_TO_CPU(elem->phys_id);
2498 u32 number = LE32_TO_CPU(elem->number);
2499 u16 cap = LE16_TO_CPU(elem->cap);
2500 bool found = true;
2501
2502 switch (cap) {
2503 case ICE_AQC_CAPS_SWITCHING_MODE:
2504 caps->switching_mode = number;
2505 ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %u\n", prefix,
2506 caps->switching_mode);
2507 break;
2508 case ICE_AQC_CAPS_MANAGEABILITY_MODE:
2509 caps->mgmt_mode = number;
2510 caps->mgmt_protocols_mctp = logical_id;
2511 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %u\n", prefix,
2512 caps->mgmt_mode);
2513 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %u\n", prefix,
2514 caps->mgmt_protocols_mctp);
2515 break;
2516 case ICE_AQC_CAPS_OS2BMC:
2517 caps->os2bmc = number;
2518 ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %u\n", prefix, caps->os2bmc);
2519 break;
2520 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2521 caps->valid_functions = number;
2522 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = 0x%x\n", prefix,
2523 caps->valid_functions);
2524 break;
2525 case ICE_AQC_CAPS_SRIOV:
2526 caps->sr_iov_1_1 = (number == 1);
2527 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %u\n", prefix,
2528 caps->sr_iov_1_1);
2529 break;
2530 case ICE_AQC_CAPS_VMDQ:
2531 caps->vmdq = (number == 1);
2532 ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %u\n", prefix, caps->vmdq);
2533 break;
2534 case ICE_AQC_CAPS_802_1QBG:
2535 caps->evb_802_1_qbg = (number == 1);
2536 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %u\n", prefix, number);
2537 break;
2538 case ICE_AQC_CAPS_802_1BR:
2539 caps->evb_802_1_qbh = (number == 1);
2540 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %u\n", prefix, number);
2541 break;
2542 case ICE_AQC_CAPS_DCB:
2543 caps->dcb = (number == 1);
2544 caps->active_tc_bitmap = logical_id;
2545 caps->maxtc = phys_id;
2546 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %u\n", prefix, caps->dcb);
2547 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = 0x%x\n", prefix,
2548 caps->active_tc_bitmap);
2549 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %u\n", prefix, caps->maxtc);
2550 break;
2551 case ICE_AQC_CAPS_ISCSI:
2552 caps->iscsi = (number == 1);
2553 ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %u\n", prefix, caps->iscsi);
2554 break;
2555 case ICE_AQC_CAPS_RSS:
2556 caps->rss_table_size = number;
2557 caps->rss_table_entry_width = logical_id;
2558 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %u\n", prefix,
2559 caps->rss_table_size);
2560 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %u\n", prefix,
2561 caps->rss_table_entry_width);
2562 break;
2563 case ICE_AQC_CAPS_RXQS:
2564 caps->num_rxq = number;
2565 caps->rxq_first_id = phys_id;
2566 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %u\n", prefix,
2567 caps->num_rxq);
2568 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %u\n", prefix,
2569 caps->rxq_first_id);
2570 break;
2571 case ICE_AQC_CAPS_TXQS:
2572 caps->num_txq = number;
2573 caps->txq_first_id = phys_id;
2574 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %u\n", prefix,
2575 caps->num_txq);
2576 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %u\n", prefix,
2577 caps->txq_first_id);
2578 break;
2579 case ICE_AQC_CAPS_MSIX:
2580 caps->num_msix_vectors = number;
2581 caps->msix_vector_first_id = phys_id;
2582 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %u\n", prefix,
2583 caps->num_msix_vectors);
2584 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %u\n", prefix,
2585 caps->msix_vector_first_id);
2586 break;
2587 case ICE_AQC_CAPS_NVM_MGMT:
2588 caps->sec_rev_disabled =
2589 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
2590 true : false;
2591 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2592 caps->sec_rev_disabled);
2593 caps->update_disabled =
2594 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
2595 true : false;
2596 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2597 caps->update_disabled);
2598 caps->nvm_unified_update =
2599 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2600 true : false;
2601 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2602 caps->nvm_unified_update);
2603 caps->netlist_auth =
2604 (number & ICE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
2605 true : false;
2606 ice_debug(hw, ICE_DBG_INIT, "%s: netlist_auth = %d\n", prefix,
2607 caps->netlist_auth);
2608 break;
2609 case ICE_AQC_CAPS_CEM:
2610 caps->mgmt_cem = (number == 1);
2611 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %u\n", prefix,
2612 caps->mgmt_cem);
2613 break;
2614 case ICE_AQC_CAPS_IWARP:
2615 caps->iwarp = (number == 1);
2616 ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %u\n", prefix, caps->iwarp);
2617 break;
2618 case ICE_AQC_CAPS_ROCEV2_LAG:
2619 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
2620 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
2621 prefix, caps->roce_lag);
2622 break;
2623 case ICE_AQC_CAPS_LED:
2624 if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
2625 caps->led[phys_id] = true;
2626 caps->led_pin_num++;
2627 ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = 1\n", prefix, phys_id);
2628 }
2629 break;
2630 case ICE_AQC_CAPS_SDP:
2631 if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2632 caps->sdp[phys_id] = true;
2633 caps->sdp_pin_num++;
2634 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = 1\n", prefix, phys_id);
2635 }
2636 break;
2637 case ICE_AQC_CAPS_WR_CSR_PROT:
2638 caps->wr_csr_prot = number;
2639 caps->wr_csr_prot |= (u64)logical_id << 32;
2640 ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2641 (unsigned long long)caps->wr_csr_prot);
2642 break;
2643 case ICE_AQC_CAPS_WOL_PROXY:
2644 caps->num_wol_proxy_fltr = number;
2645 caps->wol_proxy_vsi_seid = logical_id;
2646 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2647 caps->acpi_prog_mthd = !!(phys_id &
2648 ICE_ACPI_PROG_MTHD_M);
2649 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2650 ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %u\n", prefix,
2651 caps->num_wol_proxy_fltr);
2652 ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %u\n", prefix,
2653 caps->wol_proxy_vsi_seid);
2654 ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %u\n",
2655 prefix, caps->apm_wol_support);
2656 break;
2657 case ICE_AQC_CAPS_MAX_MTU:
2658 caps->max_mtu = number;
2659 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %u\n",
2660 prefix, caps->max_mtu);
2661 break;
2662 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
2663 caps->pcie_reset_avoidance = (number > 0);
2664 ice_debug(hw, ICE_DBG_INIT,
2665 "%s: pcie_reset_avoidance = %d\n", prefix,
2666 caps->pcie_reset_avoidance);
2667 break;
2668 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
2669 caps->reset_restrict_support = (number == 1);
2670 ice_debug(hw, ICE_DBG_INIT,
2671 "%s: reset_restrict_support = %d\n", prefix,
2672 caps->reset_restrict_support);
2673 break;
2674 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
2675 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
2676 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
2677 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
2678 {
2679 u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
2680
2681 caps->ext_topo_dev_img_ver_high[index] = number;
2682 caps->ext_topo_dev_img_ver_low[index] = logical_id;
2683 caps->ext_topo_dev_img_part_num[index] =
2684 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
2685 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
2686 caps->ext_topo_dev_img_load_en[index] =
2687 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
2688 caps->ext_topo_dev_img_prog_en[index] =
2689 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
2690 caps->ext_topo_dev_img_ver_schema[index] =
2691 (phys_id & ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA) != 0;
2692 ice_debug(hw, ICE_DBG_INIT,
2693 "%s: ext_topo_dev_img_ver_high[%d] = %u\n",
2694 prefix, index,
2695 caps->ext_topo_dev_img_ver_high[index]);
2696 ice_debug(hw, ICE_DBG_INIT,
2697 "%s: ext_topo_dev_img_ver_low[%d] = %u\n",
2698 prefix, index,
2699 caps->ext_topo_dev_img_ver_low[index]);
2700 ice_debug(hw, ICE_DBG_INIT,
2701 "%s: ext_topo_dev_img_part_num[%d] = %u\n",
2702 prefix, index,
2703 caps->ext_topo_dev_img_part_num[index]);
2704 ice_debug(hw, ICE_DBG_INIT,
2705 "%s: ext_topo_dev_img_load_en[%d] = %d\n",
2706 prefix, index,
2707 caps->ext_topo_dev_img_load_en[index]);
2708 ice_debug(hw, ICE_DBG_INIT,
2709 "%s: ext_topo_dev_img_prog_en[%d] = %d\n",
2710 prefix, index,
2711 caps->ext_topo_dev_img_prog_en[index]);
2712 ice_debug(hw, ICE_DBG_INIT,
2713 "%s: ext_topo_dev_img_ver_schema[%d] = %d\n",
2714 prefix, index,
2715 caps->ext_topo_dev_img_ver_schema[index]);
2716 break;
2717 }
2718 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
2719 caps->tx_sched_topo_comp_mode_en = (number == 1);
2720 break;
2721 case ICE_AQC_CAPS_DYN_FLATTENING:
2722 caps->dyn_flattening_en = (number == 1);
2723 ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n",
2724 prefix, caps->dyn_flattening_en);
2725 break;
2726 case ICE_AQC_CAPS_OROM_RECOVERY_UPDATE:
2727 caps->orom_recovery_update = (number == 1);
2728 ice_debug(hw, ICE_DBG_INIT, "%s: orom_recovery_update = %d\n",
2729 prefix, caps->orom_recovery_update);
2730 break;
2731 case ICE_AQC_CAPS_NEXT_CLUSTER_ID:
2732 caps->next_cluster_id_support = (number == 1);
2733 ice_debug(hw, ICE_DBG_INIT, "%s: next_cluster_id_support = %d\n",
2734 prefix, caps->next_cluster_id_support);
2735 break;
2736 default:
2737 /* Not one of the recognized common capabilities */
2738 found = false;
2739 }
2740
2741 return found;
2742 }
2743
2744 /**
2745 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2746 * @hw: pointer to the HW structure
2747 * @caps: pointer to capabilities structure to fix
2748 *
2749 * Re-calculate the capabilities that are dependent on the number of physical
2750 * ports; i.e. some features are not supported or function differently on
2751 * devices with more than 4 ports.
2752 */
2753 static void
ice_recalc_port_limited_caps(struct ice_hw * hw,struct ice_hw_common_caps * caps)2754 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2755 {
2756 /* This assumes device capabilities are always scanned before function
2757 * capabilities during the initialization flow.
2758 */
2759 if (hw->dev_caps.num_funcs > 4) {
2760 /* Max 4 TCs per port */
2761 caps->maxtc = 4;
2762 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %u (based on #ports)\n",
2763 caps->maxtc);
2764 if (caps->iwarp) {
2765 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2766 caps->iwarp = 0;
2767 }
2768
2769 /* print message only when processing device capabilities
2770 * during initialization.
2771 */
2772 if (caps == &hw->dev_caps.common_cap)
2773 ice_info(hw, "RDMA functionality is not available with the current device configuration.\n");
2774 }
2775 }
2776
2777 /**
2778 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2779 * @hw: pointer to the HW struct
2780 * @func_p: pointer to function capabilities structure
2781 * @cap: pointer to the capability element to parse
2782 *
2783 * Extract function capabilities for ICE_AQC_CAPS_VF.
2784 */
2785 static void
ice_parse_vf_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2786 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2787 struct ice_aqc_list_caps_elem *cap)
2788 {
2789 u32 number = LE32_TO_CPU(cap->number);
2790 u32 logical_id = LE32_TO_CPU(cap->logical_id);
2791
2792 func_p->num_allocd_vfs = number;
2793 func_p->vf_base_id = logical_id;
2794 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %u\n",
2795 func_p->num_allocd_vfs);
2796 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %u\n",
2797 func_p->vf_base_id);
2798 }
2799
2800 /**
2801 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2802 * @hw: pointer to the HW struct
2803 * @func_p: pointer to function capabilities structure
2804 * @cap: pointer to the capability element to parse
2805 *
2806 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2807 */
2808 static void
ice_parse_vsi_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,struct ice_aqc_list_caps_elem * cap)2809 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2810 struct ice_aqc_list_caps_elem *cap)
2811 {
2812 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2813 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %u\n",
2814 LE32_TO_CPU(cap->number));
2815 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %u\n",
2816 func_p->guar_num_vsi);
2817 }
2818
2819 /**
2820 * ice_parse_func_caps - Parse function capabilities
2821 * @hw: pointer to the HW struct
2822 * @func_p: pointer to function capabilities structure
2823 * @buf: buffer containing the function capability records
2824 * @cap_count: the number of capabilities
2825 *
2826 * Helper function to parse function (0x000A) capabilities list. For
2827 * capabilities shared between device and function, this relies on
2828 * ice_parse_common_caps.
2829 *
2830 * Loop through the list of provided capabilities and extract the relevant
2831 * data into the function capabilities structured.
2832 */
2833 static void
ice_parse_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_p,void * buf,u32 cap_count)2834 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2835 void *buf, u32 cap_count)
2836 {
2837 struct ice_aqc_list_caps_elem *cap_resp;
2838 u32 i;
2839
2840 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2841
2842 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2843
2844 for (i = 0; i < cap_count; i++) {
2845 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2846 bool found;
2847
2848 found = ice_parse_common_caps(hw, &func_p->common_cap,
2849 &cap_resp[i], "func caps");
2850
2851 switch (cap) {
2852 case ICE_AQC_CAPS_VF:
2853 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2854 break;
2855 case ICE_AQC_CAPS_VSI:
2856 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2857 break;
2858 default:
2859 /* Don't list common capabilities as unknown */
2860 if (!found)
2861 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2862 i, cap);
2863 break;
2864 }
2865 }
2866
2867 ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2868 ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2869
2870 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2871 }
2872
2873 /**
2874 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2875 * @hw: pointer to the HW struct
2876 * @dev_p: pointer to device capabilities structure
2877 * @cap: capability element to parse
2878 *
2879 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2880 */
2881 static void
ice_parse_valid_functions_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2882 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2883 struct ice_aqc_list_caps_elem *cap)
2884 {
2885 u32 number = LE32_TO_CPU(cap->number);
2886
2887 dev_p->num_funcs = ice_hweight32(number);
2888 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %u\n",
2889 dev_p->num_funcs);
2890
2891 }
2892
2893 /**
2894 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2895 * @hw: pointer to the HW struct
2896 * @dev_p: pointer to device capabilities structure
2897 * @cap: capability element to parse
2898 *
2899 * Parse ICE_AQC_CAPS_VF for device capabilities.
2900 */
2901 static void
ice_parse_vf_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2902 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2903 struct ice_aqc_list_caps_elem *cap)
2904 {
2905 u32 number = LE32_TO_CPU(cap->number);
2906
2907 dev_p->num_vfs_exposed = number;
2908 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %u\n",
2909 dev_p->num_vfs_exposed);
2910 }
2911
2912 /**
2913 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2914 * @hw: pointer to the HW struct
2915 * @dev_p: pointer to device capabilities structure
2916 * @cap: capability element to parse
2917 *
2918 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2919 */
2920 static void
ice_parse_vsi_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2921 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2922 struct ice_aqc_list_caps_elem *cap)
2923 {
2924 u32 number = LE32_TO_CPU(cap->number);
2925
2926 dev_p->num_vsi_allocd_to_host = number;
2927 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %u\n",
2928 dev_p->num_vsi_allocd_to_host);
2929 }
2930
2931 /**
2932 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
2933 * @hw: pointer to the HW struct
2934 * @dev_p: pointer to device capabilities structure
2935 * @cap: capability element to parse
2936 *
2937 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
2938 */
2939 static void
ice_parse_nac_topo_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2940 ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2941 struct ice_aqc_list_caps_elem *cap)
2942 {
2943 dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
2944 dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
2945
2946 ice_info(hw, "PF is configured in %s mode with IP instance ID %u\n",
2947 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
2948 "primary" : "secondary", dev_p->nac_topo.id);
2949
2950 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
2951 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
2952 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
2953 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
2954 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %u\n",
2955 dev_p->nac_topo.id);
2956 }
2957
2958 /**
2959 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
2960 * @hw: pointer to the HW struct
2961 * @dev_p: pointer to device capabilities structure
2962 * @cap: capability element to parse
2963 *
2964 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading
2965 * enabled sensors.
2966 */
2967 static void
ice_parse_sensor_reading_cap(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,struct ice_aqc_list_caps_elem * cap)2968 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2969 struct ice_aqc_list_caps_elem *cap)
2970 {
2971 dev_p->supported_sensors = LE32_TO_CPU(cap->number);
2972
2973 ice_debug(hw, ICE_DBG_INIT,
2974 "dev caps: supported sensors (bitmap) = 0x%x\n",
2975 dev_p->supported_sensors);
2976 }
2977
2978 /**
2979 * ice_parse_dev_caps - Parse device capabilities
2980 * @hw: pointer to the HW struct
2981 * @dev_p: pointer to device capabilities structure
2982 * @buf: buffer containing the device capability records
2983 * @cap_count: the number of capabilities
2984 *
2985 * Helper device to parse device (0x000B) capabilities list. For
2986 * capabilities shared between device and function, this relies on
2987 * ice_parse_common_caps.
2988 *
2989 * Loop through the list of provided capabilities and extract the relevant
2990 * data into the device capabilities structured.
2991 */
2992 static void
ice_parse_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_p,void * buf,u32 cap_count)2993 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2994 void *buf, u32 cap_count)
2995 {
2996 struct ice_aqc_list_caps_elem *cap_resp;
2997 u32 i;
2998
2999 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
3000
3001 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
3002
3003 for (i = 0; i < cap_count; i++) {
3004 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
3005 bool found;
3006
3007 found = ice_parse_common_caps(hw, &dev_p->common_cap,
3008 &cap_resp[i], "dev caps");
3009
3010 switch (cap) {
3011 case ICE_AQC_CAPS_VALID_FUNCTIONS:
3012 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
3013 break;
3014 case ICE_AQC_CAPS_VF:
3015 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
3016 break;
3017 case ICE_AQC_CAPS_VSI:
3018 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
3019 break;
3020 case ICE_AQC_CAPS_NAC_TOPOLOGY:
3021 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
3022 break;
3023 case ICE_AQC_CAPS_SENSOR_READING:
3024 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
3025 break;
3026 default:
3027 /* Don't list common capabilities as unknown */
3028 if (!found)
3029 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%u]: 0x%x\n",
3030 i, cap);
3031 break;
3032 }
3033 }
3034
3035 ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
3036 ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
3037
3038 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
3039 }
3040
3041 /**
3042 * ice_aq_get_netlist_node
3043 * @hw: pointer to the hw struct
3044 * @cmd: get_link_topo AQ structure
3045 * @node_part_number: output node part number if node found
3046 * @node_handle: output node handle parameter if node found
3047 */
3048 int
ice_aq_get_netlist_node(struct ice_hw * hw,struct ice_aqc_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)3049 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
3050 u8 *node_part_number, u16 *node_handle)
3051 {
3052 struct ice_aq_desc desc;
3053
3054 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
3055 desc.params.get_link_topo = *cmd;
3056
3057 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
3058 return ICE_ERR_NOT_SUPPORTED;
3059
3060 if (node_handle)
3061 *node_handle =
3062 LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
3063 if (node_part_number)
3064 *node_part_number = desc.params.get_link_topo.node_part_num;
3065
3066 return 0;
3067 }
3068
3069 #define MAX_NETLIST_SIZE 10
3070 /**
3071 * ice_find_netlist_node
3072 * @hw: pointer to the hw struct
3073 * @node_type_ctx: type of netlist node to look for
3074 * @node_part_number: node part number to look for
3075 * @node_handle: output parameter if node found - optional
3076 *
3077 * Scan the netlist for a node handle of the given node type and part number.
3078 *
3079 * If node_handle is non-NULL it will be modified on function exit. It is only
3080 * valid if the function returns zero, and should be ignored on any non-zero
3081 * return value.
3082 *
3083 * Returns: 0 if the node is found, ICE_ERR_DOES_NOT_EXIST if no handle was
3084 * found, and an error code on failure to access the AQ.
3085 */
3086 int
ice_find_netlist_node(struct ice_hw * hw,u8 node_type_ctx,u8 node_part_number,u16 * node_handle)3087 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
3088 u16 *node_handle)
3089 {
3090 u8 idx;
3091
3092 for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
3093 struct ice_aqc_get_link_topo cmd;
3094 u8 rec_node_part_number;
3095 int status;
3096
3097 memset(&cmd, 0, sizeof(cmd));
3098
3099 cmd.addr.topo_params.node_type_ctx =
3100 (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
3101 cmd.addr.topo_params.index = idx;
3102
3103 status = ice_aq_get_netlist_node(hw, &cmd,
3104 &rec_node_part_number,
3105 node_handle);
3106 if (status)
3107 return status;
3108
3109 if (rec_node_part_number == node_part_number)
3110 return 0;
3111 }
3112
3113 return ICE_ERR_DOES_NOT_EXIST;
3114 }
3115
3116 /**
3117 * ice_aq_list_caps - query function/device capabilities
3118 * @hw: pointer to the HW struct
3119 * @buf: a buffer to hold the capabilities
3120 * @buf_size: size of the buffer
3121 * @cap_count: if not NULL, set to the number of capabilities reported
3122 * @opc: capabilities type to discover, device or function
3123 * @cd: pointer to command details structure or NULL
3124 *
3125 * Get the function (0x000A) or device (0x000B) capabilities description from
3126 * firmware and store it in the buffer.
3127 *
3128 * If the cap_count pointer is not NULL, then it is set to the number of
3129 * capabilities firmware will report. Note that if the buffer size is too
3130 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
3131 * cap_count will still be updated in this case. It is recommended that the
3132 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
3133 * firmware could return) to avoid this.
3134 */
3135 static int
ice_aq_list_caps(struct ice_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ice_adminq_opc opc,struct ice_sq_cd * cd)3136 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
3137 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
3138 {
3139 struct ice_aqc_list_caps *cmd;
3140 struct ice_aq_desc desc;
3141 int status;
3142
3143 cmd = &desc.params.get_cap;
3144
3145 if (opc != ice_aqc_opc_list_func_caps &&
3146 opc != ice_aqc_opc_list_dev_caps)
3147 return ICE_ERR_PARAM;
3148
3149 ice_fill_dflt_direct_cmd_desc(&desc, opc);
3150 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3151
3152 if (cap_count)
3153 *cap_count = LE32_TO_CPU(cmd->count);
3154
3155 return status;
3156 }
3157
3158 /**
3159 * ice_discover_dev_caps - Read and extract device capabilities
3160 * @hw: pointer to the hardware structure
3161 * @dev_caps: pointer to device capabilities structure
3162 *
3163 * Read the device capabilities and extract them into the dev_caps structure
3164 * for later use.
3165 */
3166 static int
ice_discover_dev_caps(struct ice_hw * hw,struct ice_hw_dev_caps * dev_caps)3167 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
3168 {
3169 u32 cap_count = 0;
3170 void *cbuf;
3171 int status;
3172
3173 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
3174 if (!cbuf)
3175 return ICE_ERR_NO_MEMORY;
3176
3177 /* Although the driver doesn't know the number of capabilities the
3178 * device will return, we can simply send a 4KB buffer, the maximum
3179 * possible size that firmware can return.
3180 */
3181 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
3182
3183 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
3184 ice_aqc_opc_list_dev_caps, NULL);
3185 if (!status)
3186 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
3187 ice_free(hw, cbuf);
3188
3189 return status;
3190 }
3191
3192 /**
3193 * ice_discover_func_caps - Read and extract function capabilities
3194 * @hw: pointer to the hardware structure
3195 * @func_caps: pointer to function capabilities structure
3196 *
3197 * Read the function capabilities and extract them into the func_caps structure
3198 * for later use.
3199 */
3200 static int
ice_discover_func_caps(struct ice_hw * hw,struct ice_hw_func_caps * func_caps)3201 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
3202 {
3203 u32 cap_count = 0;
3204 void *cbuf;
3205 int status;
3206
3207 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
3208 if (!cbuf)
3209 return ICE_ERR_NO_MEMORY;
3210
3211 /* Although the driver doesn't know the number of capabilities the
3212 * device will return, we can simply send a 4KB buffer, the maximum
3213 * possible size that firmware can return.
3214 */
3215 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
3216
3217 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
3218 ice_aqc_opc_list_func_caps, NULL);
3219 if (!status)
3220 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
3221 ice_free(hw, cbuf);
3222
3223 return status;
3224 }
3225
3226 /**
3227 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
3228 * @hw: pointer to the hardware structure
3229 */
ice_set_safe_mode_caps(struct ice_hw * hw)3230 void ice_set_safe_mode_caps(struct ice_hw *hw)
3231 {
3232 struct ice_hw_func_caps *func_caps = &hw->func_caps;
3233 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
3234 struct ice_hw_common_caps cached_caps;
3235 u32 num_funcs;
3236
3237 /* cache some func_caps values that should be restored after memset */
3238 cached_caps = func_caps->common_cap;
3239
3240 /* unset func capabilities */
3241 memset(func_caps, 0, sizeof(*func_caps));
3242
3243 #define ICE_RESTORE_FUNC_CAP(name) \
3244 func_caps->common_cap.name = cached_caps.name
3245
3246 /* restore cached values */
3247 ICE_RESTORE_FUNC_CAP(valid_functions);
3248 ICE_RESTORE_FUNC_CAP(txq_first_id);
3249 ICE_RESTORE_FUNC_CAP(rxq_first_id);
3250 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
3251 ICE_RESTORE_FUNC_CAP(max_mtu);
3252 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
3253
3254 /* one Tx and one Rx queue in safe mode */
3255 func_caps->common_cap.num_rxq = 1;
3256 func_caps->common_cap.num_txq = 1;
3257
3258 /* two MSIX vectors, one for traffic and one for misc causes */
3259 func_caps->common_cap.num_msix_vectors = 2;
3260 func_caps->guar_num_vsi = 1;
3261
3262 /* cache some dev_caps values that should be restored after memset */
3263 cached_caps = dev_caps->common_cap;
3264 num_funcs = dev_caps->num_funcs;
3265
3266 /* unset dev capabilities */
3267 memset(dev_caps, 0, sizeof(*dev_caps));
3268
3269 #define ICE_RESTORE_DEV_CAP(name) \
3270 dev_caps->common_cap.name = cached_caps.name
3271
3272 /* restore cached values */
3273 ICE_RESTORE_DEV_CAP(valid_functions);
3274 ICE_RESTORE_DEV_CAP(txq_first_id);
3275 ICE_RESTORE_DEV_CAP(rxq_first_id);
3276 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
3277 ICE_RESTORE_DEV_CAP(max_mtu);
3278 ICE_RESTORE_DEV_CAP(nvm_unified_update);
3279 dev_caps->num_funcs = num_funcs;
3280
3281 /* one Tx and one Rx queue per function in safe mode */
3282 dev_caps->common_cap.num_rxq = num_funcs;
3283 dev_caps->common_cap.num_txq = num_funcs;
3284
3285 /* two MSIX vectors per function */
3286 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
3287 }
3288
3289 /**
3290 * ice_get_caps - get info about the HW
3291 * @hw: pointer to the hardware structure
3292 */
ice_get_caps(struct ice_hw * hw)3293 int ice_get_caps(struct ice_hw *hw)
3294 {
3295 int status;
3296
3297 status = ice_discover_dev_caps(hw, &hw->dev_caps);
3298 if (status)
3299 return status;
3300
3301 return ice_discover_func_caps(hw, &hw->func_caps);
3302 }
3303
3304 /**
3305 * ice_aq_manage_mac_write - manage MAC address write command
3306 * @hw: pointer to the HW struct
3307 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
3308 * @flags: flags to control write behavior
3309 * @cd: pointer to command details structure or NULL
3310 *
3311 * This function is used to write MAC address to the NVM (0x0108).
3312 */
3313 int
ice_aq_manage_mac_write(struct ice_hw * hw,const u8 * mac_addr,u8 flags,struct ice_sq_cd * cd)3314 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
3315 struct ice_sq_cd *cd)
3316 {
3317 struct ice_aqc_manage_mac_write *cmd;
3318 struct ice_aq_desc desc;
3319
3320 cmd = &desc.params.mac_write;
3321 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
3322
3323 cmd->flags = flags;
3324 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
3325
3326 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3327 }
3328
3329 /**
3330 * ice_aq_clear_pxe_mode
3331 * @hw: pointer to the HW struct
3332 *
3333 * Tell the firmware that the driver is taking over from PXE (0x0110).
3334 */
ice_aq_clear_pxe_mode(struct ice_hw * hw)3335 static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
3336 {
3337 struct ice_aq_desc desc;
3338
3339 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
3340 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
3341
3342 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3343 }
3344
3345 /**
3346 * ice_clear_pxe_mode - clear pxe operations mode
3347 * @hw: pointer to the HW struct
3348 *
3349 * Make sure all PXE mode settings are cleared, including things
3350 * like descriptor fetch/write-back mode.
3351 */
ice_clear_pxe_mode(struct ice_hw * hw)3352 void ice_clear_pxe_mode(struct ice_hw *hw)
3353 {
3354 if (ice_check_sq_alive(hw, &hw->adminq))
3355 ice_aq_clear_pxe_mode(hw);
3356 }
3357
3358 /**
3359 * ice_aq_set_port_params - set physical port parameters
3360 * @pi: pointer to the port info struct
3361 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
3362 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
3363 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
3364 * @double_vlan: if set double VLAN is enabled
3365 * @cd: pointer to command details structure or NULL
3366 *
3367 * Set Physical port parameters (0x0203)
3368 */
3369 int
ice_aq_set_port_params(struct ice_port_info * pi,u16 bad_frame_vsi,bool save_bad_pac,bool pad_short_pac,bool double_vlan,struct ice_sq_cd * cd)3370 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
3371 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
3372 struct ice_sq_cd *cd)
3373 {
3374 struct ice_aqc_set_port_params *cmd;
3375 struct ice_hw *hw = pi->hw;
3376 struct ice_aq_desc desc;
3377 u16 cmd_flags = 0;
3378
3379 cmd = &desc.params.set_port_params;
3380
3381 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
3382 cmd->lb_mode = pi->loopback_mode |
3383 ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_VALID;
3384 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
3385 if (save_bad_pac)
3386 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
3387 if (pad_short_pac)
3388 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
3389 if (double_vlan)
3390 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
3391 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
3392
3393 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3394 }
3395
3396 /**
3397 * ice_is_100m_speed_supported
3398 * @hw: pointer to the HW struct
3399 *
3400 * returns true if 100M speeds are supported by the device,
3401 * false otherwise.
3402 */
ice_is_100m_speed_supported(struct ice_hw * hw)3403 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3404 {
3405 switch (hw->device_id) {
3406 case ICE_DEV_ID_E822C_SGMII:
3407 case ICE_DEV_ID_E822L_SGMII:
3408 case ICE_DEV_ID_E823L_1GBE:
3409 case ICE_DEV_ID_E823C_SGMII:
3410 return true;
3411 default:
3412 return false;
3413 }
3414 }
3415
3416 /**
3417 * ice_get_link_speed_based_on_phy_type - returns link speed
3418 * @phy_type_low: lower part of phy_type
3419 * @phy_type_high: higher part of phy_type
3420 *
3421 * This helper function will convert an entry in PHY type structure
3422 * [phy_type_low, phy_type_high] to its corresponding link speed.
3423 * Note: In the structure of [phy_type_low, phy_type_high], there should
3424 * be one bit set, as this function will convert one PHY type to its
3425 * speed.
3426 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3427 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
3428 */
3429 static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low,u64 phy_type_high)3430 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
3431 {
3432 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3433 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3434
3435 switch (phy_type_low) {
3436 case ICE_PHY_TYPE_LOW_100BASE_TX:
3437 case ICE_PHY_TYPE_LOW_100M_SGMII:
3438 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
3439 break;
3440 case ICE_PHY_TYPE_LOW_1000BASE_T:
3441 case ICE_PHY_TYPE_LOW_1000BASE_SX:
3442 case ICE_PHY_TYPE_LOW_1000BASE_LX:
3443 case ICE_PHY_TYPE_LOW_1000BASE_KX:
3444 case ICE_PHY_TYPE_LOW_1G_SGMII:
3445 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
3446 break;
3447 case ICE_PHY_TYPE_LOW_2500BASE_T:
3448 case ICE_PHY_TYPE_LOW_2500BASE_X:
3449 case ICE_PHY_TYPE_LOW_2500BASE_KX:
3450 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
3451 break;
3452 case ICE_PHY_TYPE_LOW_5GBASE_T:
3453 case ICE_PHY_TYPE_LOW_5GBASE_KR:
3454 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
3455 break;
3456 case ICE_PHY_TYPE_LOW_10GBASE_T:
3457 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
3458 case ICE_PHY_TYPE_LOW_10GBASE_SR:
3459 case ICE_PHY_TYPE_LOW_10GBASE_LR:
3460 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
3461 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
3462 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
3463 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
3464 break;
3465 case ICE_PHY_TYPE_LOW_25GBASE_T:
3466 case ICE_PHY_TYPE_LOW_25GBASE_CR:
3467 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
3468 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
3469 case ICE_PHY_TYPE_LOW_25GBASE_SR:
3470 case ICE_PHY_TYPE_LOW_25GBASE_LR:
3471 case ICE_PHY_TYPE_LOW_25GBASE_KR:
3472 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
3473 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
3474 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
3475 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
3476 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
3477 break;
3478 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
3479 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
3480 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
3481 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
3482 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
3483 case ICE_PHY_TYPE_LOW_40G_XLAUI:
3484 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
3485 break;
3486 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
3487 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
3488 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
3489 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
3490 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
3491 case ICE_PHY_TYPE_LOW_50G_LAUI2:
3492 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
3493 case ICE_PHY_TYPE_LOW_50G_AUI2:
3494 case ICE_PHY_TYPE_LOW_50GBASE_CP:
3495 case ICE_PHY_TYPE_LOW_50GBASE_SR:
3496 case ICE_PHY_TYPE_LOW_50GBASE_FR:
3497 case ICE_PHY_TYPE_LOW_50GBASE_LR:
3498 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
3499 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
3500 case ICE_PHY_TYPE_LOW_50G_AUI1:
3501 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
3502 break;
3503 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
3504 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
3505 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
3506 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
3507 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
3508 case ICE_PHY_TYPE_LOW_100G_CAUI4:
3509 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
3510 case ICE_PHY_TYPE_LOW_100G_AUI4:
3511 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
3512 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
3513 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
3514 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
3515 case ICE_PHY_TYPE_LOW_100GBASE_DR:
3516 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
3517 break;
3518 default:
3519 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
3520 break;
3521 }
3522
3523 switch (phy_type_high) {
3524 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
3525 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
3526 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
3527 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
3528 case ICE_PHY_TYPE_HIGH_100G_AUI2:
3529 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
3530 break;
3531 case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
3532 case ICE_PHY_TYPE_HIGH_200G_SR4:
3533 case ICE_PHY_TYPE_HIGH_200G_FR4:
3534 case ICE_PHY_TYPE_HIGH_200G_LR4:
3535 case ICE_PHY_TYPE_HIGH_200G_DR4:
3536 case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
3537 case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
3538 case ICE_PHY_TYPE_HIGH_200G_AUI4:
3539 case ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC:
3540 case ICE_PHY_TYPE_HIGH_200G_AUI8:
3541 speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
3542 break;
3543 default:
3544 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
3545 break;
3546 }
3547
3548 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
3549 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3550 return ICE_AQ_LINK_SPEED_UNKNOWN;
3551 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3552 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
3553 return ICE_AQ_LINK_SPEED_UNKNOWN;
3554 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
3555 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
3556 return speed_phy_type_low;
3557 else
3558 return speed_phy_type_high;
3559 }
3560
3561 /**
3562 * ice_update_phy_type
3563 * @phy_type_low: pointer to the lower part of phy_type
3564 * @phy_type_high: pointer to the higher part of phy_type
3565 * @link_speeds_bitmap: targeted link speeds bitmap
3566 *
3567 * Note: For the link_speeds_bitmap structure, you can check it at
3568 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3569 * link_speeds_bitmap include multiple speeds.
3570 *
3571 * Each entry in this [phy_type_low, phy_type_high] structure will
3572 * present a certain link speed. This helper function will turn on bits
3573 * in [phy_type_low, phy_type_high] structure based on the value of
3574 * link_speeds_bitmap input parameter.
3575 */
3576 void
ice_update_phy_type(u64 * phy_type_low,u64 * phy_type_high,u16 link_speeds_bitmap)3577 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
3578 u16 link_speeds_bitmap)
3579 {
3580 u64 pt_high;
3581 u64 pt_low;
3582 int index;
3583 u16 speed;
3584
3585 /* We first check with low part of phy_type */
3586 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
3587 pt_low = BIT_ULL(index);
3588 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
3589
3590 if (link_speeds_bitmap & speed)
3591 *phy_type_low |= BIT_ULL(index);
3592 }
3593
3594 /* We then check with high part of phy_type */
3595 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
3596 pt_high = BIT_ULL(index);
3597 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
3598
3599 if (link_speeds_bitmap & speed)
3600 *phy_type_high |= BIT_ULL(index);
3601 }
3602 }
3603
3604 /**
3605 * ice_aq_set_phy_cfg
3606 * @hw: pointer to the HW struct
3607 * @pi: port info structure of the interested logical port
3608 * @cfg: structure with PHY configuration data to be set
3609 * @cd: pointer to command details structure or NULL
3610 *
3611 * Set the various PHY configuration parameters supported on the Port.
3612 * One or more of the Set PHY config parameters may be ignored in an MFP
3613 * mode as the PF may not have the privilege to set some of the PHY Config
3614 * parameters. This status will be indicated by the command response (0x0601).
3615 */
3616 int
ice_aq_set_phy_cfg(struct ice_hw * hw,struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,struct ice_sq_cd * cd)3617 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3618 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
3619 {
3620 struct ice_aq_desc desc;
3621 int status;
3622
3623 if (!cfg)
3624 return ICE_ERR_PARAM;
3625
3626 /* Ensure that only valid bits of cfg->caps can be turned on. */
3627 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3628 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3629 cfg->caps);
3630
3631 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3632 }
3633
3634 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
3635 desc.params.set_phy.lport_num = pi->lport;
3636 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3637
3638 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3639 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
3640 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3641 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
3642 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3643 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
3644 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3645 cfg->low_power_ctrl_an);
3646 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3647 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3648 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3649 cfg->link_fec_opt);
3650
3651 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3652
3653 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3654 status = 0;
3655
3656 if (!status)
3657 pi->phy.curr_user_phy_cfg = *cfg;
3658
3659 return status;
3660 }
3661
3662 /**
3663 * ice_update_link_info - update status of the HW network link
3664 * @pi: port info structure of the interested logical port
3665 */
ice_update_link_info(struct ice_port_info * pi)3666 int ice_update_link_info(struct ice_port_info *pi)
3667 {
3668 struct ice_link_status *li;
3669 int status;
3670
3671 if (!pi)
3672 return ICE_ERR_PARAM;
3673
3674 li = &pi->phy.link_info;
3675
3676 status = ice_aq_get_link_info(pi, true, NULL, NULL);
3677 if (status)
3678 return status;
3679
3680 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3681 struct ice_aqc_get_phy_caps_data *pcaps;
3682 struct ice_hw *hw;
3683
3684 hw = pi->hw;
3685 pcaps = (struct ice_aqc_get_phy_caps_data *)
3686 ice_malloc(hw, sizeof(*pcaps));
3687 if (!pcaps)
3688 return ICE_ERR_NO_MEMORY;
3689
3690 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3691 pcaps, NULL);
3692
3693 if (!status)
3694 ice_memcpy(li->module_type, &pcaps->module_type,
3695 sizeof(li->module_type),
3696 ICE_NONDMA_TO_NONDMA);
3697
3698 ice_free(hw, pcaps);
3699 }
3700
3701 return status;
3702 }
3703
3704 /**
3705 * ice_cache_phy_user_req
3706 * @pi: port information structure
3707 * @cache_data: PHY logging data
3708 * @cache_mode: PHY logging mode
3709 *
3710 * Log the user request on (FC, FEC, SPEED) for later user.
3711 */
3712 static void
ice_cache_phy_user_req(struct ice_port_info * pi,struct ice_phy_cache_mode_data cache_data,enum ice_phy_cache_mode cache_mode)3713 ice_cache_phy_user_req(struct ice_port_info *pi,
3714 struct ice_phy_cache_mode_data cache_data,
3715 enum ice_phy_cache_mode cache_mode)
3716 {
3717 if (!pi)
3718 return;
3719
3720 switch (cache_mode) {
3721 case ICE_FC_MODE:
3722 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3723 break;
3724 case ICE_SPEED_MODE:
3725 pi->phy.curr_user_speed_req =
3726 cache_data.data.curr_user_speed_req;
3727 break;
3728 case ICE_FEC_MODE:
3729 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3730 break;
3731 default:
3732 break;
3733 }
3734 }
3735
3736 /**
3737 * ice_caps_to_fc_mode
3738 * @caps: PHY capabilities
3739 *
3740 * Convert PHY FC capabilities to ice FC mode
3741 */
ice_caps_to_fc_mode(u8 caps)3742 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
3743 {
3744 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
3745 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3746 return ICE_FC_FULL;
3747
3748 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
3749 return ICE_FC_TX_PAUSE;
3750
3751 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
3752 return ICE_FC_RX_PAUSE;
3753
3754 return ICE_FC_NONE;
3755 }
3756
3757 /**
3758 * ice_caps_to_fec_mode
3759 * @caps: PHY capabilities
3760 * @fec_options: Link FEC options
3761 *
3762 * Convert PHY FEC capabilities to ice FEC mode
3763 */
ice_caps_to_fec_mode(u8 caps,u8 fec_options)3764 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
3765 {
3766 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) {
3767 if (fec_options & ICE_AQC_PHY_FEC_DIS)
3768 return ICE_FEC_DIS_AUTO;
3769 else
3770 return ICE_FEC_AUTO;
3771 }
3772
3773 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3774 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3775 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
3776 ICE_AQC_PHY_FEC_25G_KR_REQ))
3777 return ICE_FEC_BASER;
3778
3779 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3780 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
3781 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
3782 return ICE_FEC_RS;
3783
3784 return ICE_FEC_NONE;
3785 }
3786
3787 /**
3788 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3789 * @pi: port information structure
3790 * @cfg: PHY configuration data to set FC mode
3791 * @req_mode: FC mode to configure
3792 */
3793 static int
ice_cfg_phy_fc(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fc_mode req_mode)3794 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3795 enum ice_fc_mode req_mode)
3796 {
3797 struct ice_phy_cache_mode_data cache_data;
3798 u8 pause_mask = 0x0;
3799
3800 if (!pi || !cfg)
3801 return ICE_ERR_BAD_PTR;
3802 switch (req_mode) {
3803 case ICE_FC_AUTO:
3804 {
3805 struct ice_aqc_get_phy_caps_data *pcaps;
3806 int status;
3807
3808 pcaps = (struct ice_aqc_get_phy_caps_data *)
3809 ice_malloc(pi->hw, sizeof(*pcaps));
3810 if (!pcaps)
3811 return ICE_ERR_NO_MEMORY;
3812 /* Query the value of FC that both the NIC and attached media
3813 * can do.
3814 */
3815 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
3816 pcaps, NULL);
3817 if (status) {
3818 ice_free(pi->hw, pcaps);
3819 return status;
3820 }
3821
3822 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3823 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3824
3825 ice_free(pi->hw, pcaps);
3826 break;
3827 }
3828 case ICE_FC_FULL:
3829 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3830 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3831 break;
3832 case ICE_FC_RX_PAUSE:
3833 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3834 break;
3835 case ICE_FC_TX_PAUSE:
3836 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3837 break;
3838 default:
3839 break;
3840 }
3841
3842 /* clear the old pause settings */
3843 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3844 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
3845
3846 /* set the new capabilities */
3847 cfg->caps |= pause_mask;
3848
3849 /* Cache user FC request */
3850 cache_data.data.curr_user_fc_req = req_mode;
3851 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
3852
3853 return 0;
3854 }
3855
3856 /**
3857 * ice_set_fc
3858 * @pi: port information structure
3859 * @aq_failures: pointer to status code, specific to ice_set_fc routine
3860 * @ena_auto_link_update: enable automatic link update
3861 *
3862 * Set the requested flow control mode.
3863 */
3864 int
ice_set_fc(struct ice_port_info * pi,u8 * aq_failures,bool ena_auto_link_update)3865 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3866 {
3867 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3868 struct ice_aqc_get_phy_caps_data *pcaps;
3869 struct ice_hw *hw;
3870 int status;
3871
3872 if (!pi || !aq_failures)
3873 return ICE_ERR_BAD_PTR;
3874
3875 *aq_failures = 0;
3876 hw = pi->hw;
3877
3878 pcaps = (struct ice_aqc_get_phy_caps_data *)
3879 ice_malloc(hw, sizeof(*pcaps));
3880 if (!pcaps)
3881 return ICE_ERR_NO_MEMORY;
3882
3883 /* Get the current PHY config */
3884 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
3885 pcaps, NULL);
3886
3887 if (status) {
3888 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3889 goto out;
3890 }
3891
3892 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3893
3894 /* Configure the set PHY data */
3895 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3896 if (status) {
3897 if (status != ICE_ERR_BAD_PTR)
3898 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3899
3900 goto out;
3901 }
3902
3903 /* If the capabilities have changed, then set the new config */
3904 if (cfg.caps != pcaps->caps) {
3905 int retry_count, retry_max = 10;
3906
3907 /* Auto restart link so settings take effect */
3908 if (ena_auto_link_update)
3909 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3910
3911 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3912 if (status) {
3913 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3914 goto out;
3915 }
3916
3917 /* Update the link info
3918 * It sometimes takes a really long time for link to
3919 * come back from the atomic reset. Thus, we wait a
3920 * little bit.
3921 */
3922 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3923 status = ice_update_link_info(pi);
3924
3925 if (!status)
3926 break;
3927
3928 ice_msec_delay(100, true);
3929 }
3930
3931 if (status)
3932 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3933 }
3934
3935 out:
3936 ice_free(hw, pcaps);
3937 return status;
3938 }
3939
3940 /**
3941 * ice_phy_caps_equals_cfg
3942 * @phy_caps: PHY capabilities
3943 * @phy_cfg: PHY configuration
3944 *
3945 * Helper function to determine if PHY capabilities matches PHY
3946 * configuration
3947 */
3948 bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data * phy_caps,struct ice_aqc_set_phy_cfg_data * phy_cfg)3949 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3950 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3951 {
3952 u8 caps_mask, cfg_mask;
3953
3954 if (!phy_caps || !phy_cfg)
3955 return false;
3956
3957 /* These bits are not common between capabilities and configuration.
3958 * Do not use them to determine equality.
3959 */
3960 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3961 ICE_AQC_PHY_EN_MOD_QUAL);
3962 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3963
3964 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3965 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3966 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3967 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3968 phy_caps->eee_cap != phy_cfg->eee_cap ||
3969 phy_caps->eeer_value != phy_cfg->eeer_value ||
3970 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3971 return false;
3972
3973 return true;
3974 }
3975
3976 /**
3977 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3978 * @pi: port information structure
3979 * @caps: PHY ability structure to copy data from
3980 * @cfg: PHY configuration structure to copy data to
3981 *
3982 * Helper function to copy AQC PHY get ability data to PHY set configuration
3983 * data structure
3984 */
3985 void
ice_copy_phy_caps_to_cfg(struct ice_port_info * pi,struct ice_aqc_get_phy_caps_data * caps,struct ice_aqc_set_phy_cfg_data * cfg)3986 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3987 struct ice_aqc_get_phy_caps_data *caps,
3988 struct ice_aqc_set_phy_cfg_data *cfg)
3989 {
3990 if (!pi || !caps || !cfg)
3991 return;
3992
3993 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3994 cfg->phy_type_low = caps->phy_type_low;
3995 cfg->phy_type_high = caps->phy_type_high;
3996 cfg->caps = caps->caps;
3997 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3998 cfg->eee_cap = caps->eee_cap;
3999 cfg->eeer_value = caps->eeer_value;
4000 cfg->link_fec_opt = caps->link_fec_options;
4001 cfg->module_compliance_enforcement =
4002 caps->module_compliance_enforcement;
4003 }
4004
4005 /**
4006 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
4007 * @pi: port information structure
4008 * @cfg: PHY configuration data to set FEC mode
4009 * @fec: FEC mode to configure
4010 */
4011 int
ice_cfg_phy_fec(struct ice_port_info * pi,struct ice_aqc_set_phy_cfg_data * cfg,enum ice_fec_mode fec)4012 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
4013 enum ice_fec_mode fec)
4014 {
4015 struct ice_aqc_get_phy_caps_data *pcaps;
4016 struct ice_hw *hw;
4017 int status = 0;
4018
4019 if (!pi || !cfg)
4020 return ICE_ERR_BAD_PTR;
4021
4022 hw = pi->hw;
4023
4024 pcaps = (struct ice_aqc_get_phy_caps_data *)
4025 ice_malloc(hw, sizeof(*pcaps));
4026 if (!pcaps)
4027 return ICE_ERR_NO_MEMORY;
4028
4029 status = ice_aq_get_phy_caps(pi, false,
4030 (ice_fw_supports_report_dflt_cfg(hw) ?
4031 ICE_AQC_REPORT_DFLT_CFG :
4032 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
4033
4034 if (status)
4035 goto out;
4036
4037 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
4038 cfg->link_fec_opt = pcaps->link_fec_options;
4039
4040 switch (fec) {
4041 case ICE_FEC_BASER:
4042 /* Clear RS bits, and AND BASE-R ability
4043 * bits and OR request bits.
4044 */
4045 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
4046 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
4047 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
4048 ICE_AQC_PHY_FEC_25G_KR_REQ;
4049 break;
4050 case ICE_FEC_RS:
4051 /* Clear BASE-R bits, and AND RS ability
4052 * bits and OR request bits.
4053 */
4054 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
4055 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
4056 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
4057 break;
4058 case ICE_FEC_NONE:
4059 /* Clear all FEC option bits. */
4060 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
4061 break;
4062 case ICE_FEC_DIS_AUTO:
4063 /* Set No FEC and auto FEC */
4064 if (!ice_fw_supports_fec_dis_auto(hw)) {
4065 status = ICE_ERR_NOT_SUPPORTED;
4066 goto out;
4067 }
4068 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
4069 /* fall-through */
4070 case ICE_FEC_AUTO:
4071 /* AND auto FEC bit, and all caps bits. */
4072 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
4073 cfg->link_fec_opt |= pcaps->link_fec_options;
4074 break;
4075 default:
4076 status = ICE_ERR_PARAM;
4077 break;
4078 }
4079
4080 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
4081 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
4082 struct ice_link_default_override_tlv tlv;
4083
4084 if (ice_get_link_default_override(&tlv, pi))
4085 goto out;
4086
4087 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
4088 (tlv.options & ICE_LINK_OVERRIDE_EN))
4089 cfg->link_fec_opt = tlv.fec_options;
4090 }
4091
4092 out:
4093 ice_free(hw, pcaps);
4094
4095 return status;
4096 }
4097
4098 /**
4099 * ice_get_link_status - get status of the HW network link
4100 * @pi: port information structure
4101 * @link_up: pointer to bool (true/false = linkup/linkdown)
4102 *
4103 * Variable link_up is true if link is up, false if link is down.
4104 * The variable link_up is invalid if status is non zero. As a
4105 * result of this call, link status reporting becomes enabled
4106 */
ice_get_link_status(struct ice_port_info * pi,bool * link_up)4107 int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
4108 {
4109 struct ice_phy_info *phy_info;
4110 int status = 0;
4111
4112 if (!pi || !link_up)
4113 return ICE_ERR_PARAM;
4114
4115 phy_info = &pi->phy;
4116
4117 if (phy_info->get_link_info) {
4118 status = ice_update_link_info(pi);
4119
4120 if (status)
4121 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
4122 status);
4123 }
4124
4125 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
4126
4127 return status;
4128 }
4129
4130 /**
4131 * ice_aq_set_link_restart_an
4132 * @pi: pointer to the port information structure
4133 * @ena_link: if true: enable link, if false: disable link
4134 * @cd: pointer to command details structure or NULL
4135 *
4136 * Sets up the link and restarts the Auto-Negotiation over the link.
4137 */
4138 int
ice_aq_set_link_restart_an(struct ice_port_info * pi,bool ena_link,struct ice_sq_cd * cd)4139 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
4140 struct ice_sq_cd *cd)
4141 {
4142 int status = ICE_ERR_AQ_ERROR;
4143 struct ice_aqc_restart_an *cmd;
4144 struct ice_aq_desc desc;
4145
4146 cmd = &desc.params.restart_an;
4147
4148 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
4149
4150 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
4151 cmd->lport_num = pi->lport;
4152 if (ena_link)
4153 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
4154 else
4155 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
4156
4157 status = ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
4158 if (status)
4159 return status;
4160
4161 if (ena_link)
4162 pi->phy.curr_user_phy_cfg.caps |= ICE_AQC_PHY_EN_LINK;
4163 else
4164 pi->phy.curr_user_phy_cfg.caps &= ~ICE_AQC_PHY_EN_LINK;
4165
4166 return 0;
4167 }
4168
4169 /**
4170 * ice_aq_set_event_mask
4171 * @hw: pointer to the HW struct
4172 * @port_num: port number of the physical function
4173 * @mask: event mask to be set
4174 * @cd: pointer to command details structure or NULL
4175 *
4176 * Set event mask (0x0613)
4177 */
4178 int
ice_aq_set_event_mask(struct ice_hw * hw,u8 port_num,u16 mask,struct ice_sq_cd * cd)4179 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
4180 struct ice_sq_cd *cd)
4181 {
4182 struct ice_aqc_set_event_mask *cmd;
4183 struct ice_aq_desc desc;
4184
4185 cmd = &desc.params.set_event_mask;
4186
4187 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
4188
4189 cmd->lport_num = port_num;
4190
4191 cmd->event_mask = CPU_TO_LE16(mask);
4192 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4193 }
4194
4195 /**
4196 * ice_aq_set_mac_loopback
4197 * @hw: pointer to the HW struct
4198 * @ena_lpbk: Enable or Disable loopback
4199 * @cd: pointer to command details structure or NULL
4200 *
4201 * Enable/disable loopback on a given port
4202 */
4203 int
ice_aq_set_mac_loopback(struct ice_hw * hw,bool ena_lpbk,struct ice_sq_cd * cd)4204 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
4205 {
4206 struct ice_aqc_set_mac_lb *cmd;
4207 struct ice_aq_desc desc;
4208
4209 cmd = &desc.params.set_mac_lb;
4210
4211 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
4212 if (ena_lpbk)
4213 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
4214
4215 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4216 }
4217
4218 /**
4219 * ice_aq_set_port_id_led
4220 * @pi: pointer to the port information
4221 * @is_orig_mode: is this LED set to original mode (by the net-list)
4222 * @cd: pointer to command details structure or NULL
4223 *
4224 * Set LED value for the given port (0x06e9)
4225 */
4226 int
ice_aq_set_port_id_led(struct ice_port_info * pi,bool is_orig_mode,struct ice_sq_cd * cd)4227 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
4228 struct ice_sq_cd *cd)
4229 {
4230 struct ice_aqc_set_port_id_led *cmd;
4231 struct ice_hw *hw = pi->hw;
4232 struct ice_aq_desc desc;
4233
4234 cmd = &desc.params.set_port_id_led;
4235
4236 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
4237
4238 if (is_orig_mode)
4239 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
4240 else
4241 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
4242
4243 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4244 }
4245
4246 /**
4247 * ice_aq_sff_eeprom
4248 * @hw: pointer to the HW struct
4249 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
4250 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
4251 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
4252 * @page: QSFP page
4253 * @set_page: set or ignore the page
4254 * @data: pointer to data buffer to be read/written to the I2C device.
4255 * @length: 1-16 for read, 1 for write.
4256 * @write: 0 read, 1 for write.
4257 * @cd: pointer to command details structure or NULL
4258 *
4259 * Read/Write SFF EEPROM (0x06EE)
4260 */
4261 int
ice_aq_sff_eeprom(struct ice_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 set_page,u8 * data,u8 length,bool write,struct ice_sq_cd * cd)4262 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
4263 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
4264 bool write, struct ice_sq_cd *cd)
4265 {
4266 struct ice_aqc_sff_eeprom *cmd;
4267 struct ice_aq_desc desc;
4268 int status;
4269
4270 if (!data || (mem_addr & 0xff00))
4271 return ICE_ERR_PARAM;
4272
4273 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
4274 cmd = &desc.params.read_write_sff_param;
4275 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
4276 cmd->lport_num = (u8)(lport & 0xff);
4277 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
4278 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
4279 ICE_AQC_SFF_I2CBUS_7BIT_M) |
4280 ((set_page <<
4281 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
4282 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
4283 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
4284 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
4285 if (write)
4286 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
4287
4288 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
4289 return status;
4290 }
4291
4292 /**
4293 * ice_aq_prog_topo_dev_nvm
4294 * @hw: pointer to the hardware structure
4295 * @topo_params: pointer to structure storing topology parameters for a device
4296 * @cd: pointer to command details structure or NULL
4297 *
4298 * Program Topology Device NVM (0x06F2)
4299 *
4300 */
4301 int
ice_aq_prog_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,struct ice_sq_cd * cd)4302 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
4303 struct ice_aqc_link_topo_params *topo_params,
4304 struct ice_sq_cd *cd)
4305 {
4306 struct ice_aqc_prog_topo_dev_nvm *cmd;
4307 struct ice_aq_desc desc;
4308
4309 cmd = &desc.params.prog_topo_dev_nvm;
4310
4311 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
4312
4313 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4314 ICE_NONDMA_TO_NONDMA);
4315
4316 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4317 }
4318
4319 /**
4320 * ice_aq_read_topo_dev_nvm
4321 * @hw: pointer to the hardware structure
4322 * @topo_params: pointer to structure storing topology parameters for a device
4323 * @start_address: byte offset in the topology device NVM
4324 * @data: pointer to data buffer
4325 * @data_size: number of bytes to be read from the topology device NVM
4326 * @cd: pointer to command details structure or NULL
4327 * Read Topology Device NVM (0x06F3)
4328 *
4329 */
4330 int
ice_aq_read_topo_dev_nvm(struct ice_hw * hw,struct ice_aqc_link_topo_params * topo_params,u32 start_address,u8 * data,u8 data_size,struct ice_sq_cd * cd)4331 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
4332 struct ice_aqc_link_topo_params *topo_params,
4333 u32 start_address, u8 *data, u8 data_size,
4334 struct ice_sq_cd *cd)
4335 {
4336 struct ice_aqc_read_topo_dev_nvm *cmd;
4337 struct ice_aq_desc desc;
4338 int status;
4339
4340 if (!data || data_size == 0 ||
4341 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
4342 return ICE_ERR_PARAM;
4343
4344 cmd = &desc.params.read_topo_dev_nvm;
4345
4346 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
4347
4348 desc.datalen = CPU_TO_LE16(data_size);
4349 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4350 ICE_NONDMA_TO_NONDMA);
4351 cmd->start_address = CPU_TO_LE32(start_address);
4352
4353 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4354 if (status)
4355 return status;
4356
4357 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
4358
4359 return 0;
4360 }
4361
ice_lut_type_to_size(u16 lut_type)4362 static u16 ice_lut_type_to_size(u16 lut_type)
4363 {
4364 switch (lut_type) {
4365 case ICE_LUT_VSI:
4366 return ICE_LUT_VSI_SIZE;
4367 case ICE_LUT_GLOBAL:
4368 return ICE_LUT_GLOBAL_SIZE;
4369 case ICE_LUT_PF:
4370 return ICE_LUT_PF_SIZE;
4371 case ICE_LUT_PF_SMALL:
4372 return ICE_LUT_PF_SMALL_SIZE;
4373 default:
4374 return 0;
4375 }
4376 }
4377
ice_lut_size_to_flag(u16 lut_size)4378 static u16 ice_lut_size_to_flag(u16 lut_size)
4379 {
4380 u16 f = 0;
4381
4382 switch (lut_size) {
4383 case ICE_LUT_GLOBAL_SIZE:
4384 f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG;
4385 break;
4386 case ICE_LUT_PF_SIZE:
4387 f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG;
4388 break;
4389 default:
4390 break;
4391 }
4392 return f << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S;
4393 }
4394
ice_lut_size_to_type(int lut_size)4395 int ice_lut_size_to_type(int lut_size)
4396 {
4397 switch (lut_size) {
4398 case ICE_LUT_VSI_SIZE:
4399 return ICE_LUT_VSI;
4400 case ICE_LUT_GLOBAL_SIZE:
4401 return ICE_LUT_GLOBAL;
4402 case ICE_LUT_PF_SIZE:
4403 return ICE_LUT_PF;
4404 case ICE_LUT_PF_SMALL_SIZE:
4405 return ICE_LUT_PF_SMALL;
4406 default:
4407 return -1;
4408 }
4409 }
4410
4411 /**
4412 * __ice_aq_get_set_rss_lut
4413 * @hw: pointer to the hardware structure
4414 * @params: RSS LUT parameters
4415 * @set: set true to set the table, false to get the table
4416 *
4417 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
4418 */
4419 static int
__ice_aq_get_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * params,bool set)4420 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
4421 {
4422 u16 flags, vsi_id, lut_type, lut_size, glob_lut_idx = 0, vsi_handle;
4423 struct ice_aqc_get_set_rss_lut *cmd_resp;
4424 struct ice_aq_desc desc;
4425 int status;
4426 u8 *lut;
4427
4428 if (!params)
4429 return ICE_ERR_PARAM;
4430
4431 vsi_handle = params->vsi_handle;
4432 lut = params->lut;
4433 lut_size = ice_lut_type_to_size(params->lut_type);
4434 lut_type = params->lut_type & ICE_LUT_TYPE_MASK;
4435 cmd_resp = &desc.params.get_set_rss_lut;
4436 if (lut_type == ICE_LUT_GLOBAL)
4437 glob_lut_idx = params->global_lut_id;
4438
4439 if (!lut || !lut_size || !ice_is_vsi_valid(hw, vsi_handle))
4440 return ICE_ERR_PARAM;
4441
4442 if (lut_size > params->lut_size)
4443 return ICE_ERR_INVAL_SIZE;
4444
4445 if (set && lut_size != params->lut_size)
4446 return ICE_ERR_PARAM;
4447
4448 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4449
4450 if (set) {
4451 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
4452 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4453 } else {
4454 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
4455 }
4456
4457 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4458 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
4459 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
4460 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
4461
4462 flags = ice_lut_size_to_flag(lut_size) |
4463 ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
4464 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M) |
4465 ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
4466 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
4467
4468 cmd_resp->flags = CPU_TO_LE16(flags);
4469 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4470 params->lut_size = LE16_TO_CPU(desc.datalen);
4471 return status;
4472 }
4473
4474 /**
4475 * ice_aq_get_rss_lut
4476 * @hw: pointer to the hardware structure
4477 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
4478 *
4479 * get the RSS lookup table, PF or VSI type
4480 */
4481 int
ice_aq_get_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * get_params)4482 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4483 {
4484 return __ice_aq_get_set_rss_lut(hw, get_params, false);
4485 }
4486
4487 /**
4488 * ice_aq_set_rss_lut
4489 * @hw: pointer to the hardware structure
4490 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
4491 *
4492 * set the RSS lookup table, PF or VSI type
4493 */
4494 int
ice_aq_set_rss_lut(struct ice_hw * hw,struct ice_aq_get_set_rss_lut_params * set_params)4495 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4496 {
4497 return __ice_aq_get_set_rss_lut(hw, set_params, true);
4498 }
4499
4500 /**
4501 * __ice_aq_get_set_rss_key
4502 * @hw: pointer to the HW struct
4503 * @vsi_id: VSI FW index
4504 * @key: pointer to key info struct
4505 * @set: set true to set the key, false to get the key
4506 *
4507 * get (0x0B04) or set (0x0B02) the RSS key per VSI
4508 */
__ice_aq_get_set_rss_key(struct ice_hw * hw,u16 vsi_id,struct ice_aqc_get_set_rss_keys * key,bool set)4509 static int __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4510 struct ice_aqc_get_set_rss_keys *key,
4511 bool set)
4512 {
4513 struct ice_aqc_get_set_rss_key *cmd_resp;
4514 u16 key_size = sizeof(*key);
4515 struct ice_aq_desc desc;
4516
4517 cmd_resp = &desc.params.get_set_rss_key;
4518
4519 if (set) {
4520 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
4521 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4522 } else {
4523 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
4524 }
4525
4526 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4527 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
4528 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
4529 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
4530
4531 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4532 }
4533
4534 /**
4535 * ice_aq_get_rss_key
4536 * @hw: pointer to the HW struct
4537 * @vsi_handle: software VSI handle
4538 * @key: pointer to key info struct
4539 *
4540 * get the RSS key per VSI
4541 */
4542 int
ice_aq_get_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * key)4543 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4544 struct ice_aqc_get_set_rss_keys *key)
4545 {
4546 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4547 return ICE_ERR_PARAM;
4548
4549 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4550 key, false);
4551 }
4552
4553 /**
4554 * ice_aq_set_rss_key
4555 * @hw: pointer to the HW struct
4556 * @vsi_handle: software VSI handle
4557 * @keys: pointer to key info struct
4558 *
4559 * set the RSS key per VSI
4560 */
4561 int
ice_aq_set_rss_key(struct ice_hw * hw,u16 vsi_handle,struct ice_aqc_get_set_rss_keys * keys)4562 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4563 struct ice_aqc_get_set_rss_keys *keys)
4564 {
4565 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4566 return ICE_ERR_PARAM;
4567
4568 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4569 keys, true);
4570 }
4571
4572 /**
4573 * ice_aq_add_lan_txq
4574 * @hw: pointer to the hardware structure
4575 * @num_qgrps: Number of added queue groups
4576 * @qg_list: list of queue groups to be added
4577 * @buf_size: size of buffer for indirect command
4578 * @cd: pointer to command details structure or NULL
4579 *
4580 * Add Tx LAN queue (0x0C30)
4581 *
4582 * NOTE:
4583 * Prior to calling add Tx LAN queue:
4584 * Initialize the following as part of the Tx queue context:
4585 * Completion queue ID if the queue uses Completion queue, Quanta profile,
4586 * Cache profile and Packet shaper profile.
4587 *
4588 * After add Tx LAN queue AQ command is completed:
4589 * Interrupts should be associated with specific queues,
4590 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
4591 * flow.
4592 */
4593 int
ice_aq_add_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * qg_list,u16 buf_size,struct ice_sq_cd * cd)4594 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4595 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
4596 struct ice_sq_cd *cd)
4597 {
4598 struct ice_aqc_add_tx_qgrp *list;
4599 struct ice_aqc_add_txqs *cmd;
4600 struct ice_aq_desc desc;
4601 u16 i, sum_size = 0;
4602
4603 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4604
4605 cmd = &desc.params.add_txqs;
4606
4607 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
4608
4609 if (!qg_list)
4610 return ICE_ERR_PARAM;
4611
4612 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4613 return ICE_ERR_PARAM;
4614
4615 for (i = 0, list = qg_list; i < num_qgrps; i++) {
4616 sum_size += ice_struct_size(list, txqs, list->num_txqs);
4617 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4618 list->num_txqs);
4619 }
4620
4621 if (buf_size != sum_size)
4622 return ICE_ERR_PARAM;
4623
4624 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4625
4626 cmd->num_qgrps = num_qgrps;
4627
4628 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4629 }
4630
4631 /**
4632 * ice_aq_dis_lan_txq
4633 * @hw: pointer to the hardware structure
4634 * @num_qgrps: number of groups in the list
4635 * @qg_list: the list of groups to disable
4636 * @buf_size: the total size of the qg_list buffer in bytes
4637 * @rst_src: if called due to reset, specifies the reset source
4638 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4639 * @cd: pointer to command details structure or NULL
4640 *
4641 * Disable LAN Tx queue (0x0C31)
4642 */
4643 static int
ice_aq_dis_lan_txq(struct ice_hw * hw,u8 num_qgrps,struct ice_aqc_dis_txq_item * qg_list,u16 buf_size,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)4644 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4645 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
4646 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4647 struct ice_sq_cd *cd)
4648 {
4649 struct ice_aqc_dis_txq_item *item;
4650 struct ice_aqc_dis_txqs *cmd;
4651 struct ice_aq_desc desc;
4652 int status;
4653 u16 i, sz = 0;
4654
4655 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4656 cmd = &desc.params.dis_txqs;
4657 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
4658
4659 /* qg_list can be NULL only in VM/VF reset flow */
4660 if (!qg_list && !rst_src)
4661 return ICE_ERR_PARAM;
4662
4663 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
4664 return ICE_ERR_PARAM;
4665
4666 cmd->num_entries = num_qgrps;
4667
4668 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4669 ICE_AQC_Q_DIS_TIMEOUT_M);
4670
4671 switch (rst_src) {
4672 case ICE_VM_RESET:
4673 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4674 cmd->vmvf_and_timeout |=
4675 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
4676 break;
4677 case ICE_VF_RESET:
4678 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4679 /* In this case, FW expects vmvf_num to be absolute VF ID */
4680 cmd->vmvf_and_timeout |=
4681 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
4682 ICE_AQC_Q_DIS_VMVF_NUM_M);
4683 break;
4684 case ICE_NO_RESET:
4685 default:
4686 break;
4687 }
4688
4689 /* flush pipe on time out */
4690 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4691 /* If no queue group info, we are in a reset flow. Issue the AQ */
4692 if (!qg_list)
4693 goto do_aq;
4694
4695 /* set RD bit to indicate that command buffer is provided by the driver
4696 * and it needs to be read by the firmware
4697 */
4698 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4699
4700 for (i = 0, item = qg_list; i < num_qgrps; i++) {
4701 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4702
4703 /* If the num of queues is even, add 2 bytes of padding */
4704 if ((item->num_qs % 2) == 0)
4705 item_size += 2;
4706
4707 sz += item_size;
4708
4709 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
4710 }
4711
4712 if (buf_size != sz)
4713 return ICE_ERR_PARAM;
4714
4715 do_aq:
4716 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4717 if (status) {
4718 if (!qg_list)
4719 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4720 vmvf_num, hw->adminq.sq_last_status);
4721 else
4722 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4723 LE16_TO_CPU(qg_list[0].q_id[0]),
4724 hw->adminq.sq_last_status);
4725 }
4726 return status;
4727 }
4728
4729 /**
4730 * ice_aq_move_recfg_lan_txq
4731 * @hw: pointer to the hardware structure
4732 * @num_qs: number of queues to move/reconfigure
4733 * @is_move: true if this operation involves node movement
4734 * @is_tc_change: true if this operation involves a TC change
4735 * @subseq_call: true if this operation is a subsequent call
4736 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4737 * @timeout: timeout in units of 100 usec (valid values 0-50)
4738 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
4739 * @buf: struct containing src/dest TEID and per-queue info
4740 * @buf_size: size of buffer for indirect command
4741 * @txqs_moved: out param, number of queues successfully moved
4742 * @cd: pointer to command details structure or NULL
4743 *
4744 * Move / Reconfigure Tx LAN queues (0x0C32)
4745 */
4746 int
ice_aq_move_recfg_lan_txq(struct ice_hw * hw,u8 num_qs,bool is_move,bool is_tc_change,bool subseq_call,bool flush_pipe,u8 timeout,u32 * blocked_cgds,struct ice_aqc_move_txqs_data * buf,u16 buf_size,u8 * txqs_moved,struct ice_sq_cd * cd)4747 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4748 bool is_tc_change, bool subseq_call, bool flush_pipe,
4749 u8 timeout, u32 *blocked_cgds,
4750 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
4751 u8 *txqs_moved, struct ice_sq_cd *cd)
4752 {
4753 struct ice_aqc_move_txqs *cmd;
4754 struct ice_aq_desc desc;
4755 int status;
4756
4757 cmd = &desc.params.move_txqs;
4758 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
4759
4760 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
4761 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4762 return ICE_ERR_PARAM;
4763
4764 if (is_tc_change && !flush_pipe && !blocked_cgds)
4765 return ICE_ERR_PARAM;
4766
4767 if (!is_move && !is_tc_change)
4768 return ICE_ERR_PARAM;
4769
4770 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4771
4772 if (is_move)
4773 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4774
4775 if (is_tc_change)
4776 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4777
4778 if (subseq_call)
4779 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4780
4781 if (flush_pipe)
4782 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4783
4784 cmd->num_qs = num_qs;
4785 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4786 ICE_AQC_Q_CMD_TIMEOUT_M);
4787
4788 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4789
4790 if (!status && txqs_moved)
4791 *txqs_moved = cmd->num_qs;
4792
4793 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4794 is_tc_change && !flush_pipe)
4795 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4796
4797 return status;
4798 }
4799
4800 /**
4801 * ice_aq_add_rdma_qsets
4802 * @hw: pointer to the hardware structure
4803 * @num_qset_grps: Number of RDMA Qset groups
4804 * @qset_list: list of qset groups to be added
4805 * @buf_size: size of buffer for indirect command
4806 * @cd: pointer to command details structure or NULL
4807 *
4808 * Add Tx RDMA Qsets (0x0C33)
4809 */
4810 int
ice_aq_add_rdma_qsets(struct ice_hw * hw,u8 num_qset_grps,struct ice_aqc_add_rdma_qset_data * qset_list,u16 buf_size,struct ice_sq_cd * cd)4811 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4812 struct ice_aqc_add_rdma_qset_data *qset_list,
4813 u16 buf_size, struct ice_sq_cd *cd)
4814 {
4815 struct ice_aqc_add_rdma_qset_data *list;
4816 struct ice_aqc_add_rdma_qset *cmd;
4817 struct ice_aq_desc desc;
4818 u16 i, sum_size = 0;
4819
4820 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4821
4822 cmd = &desc.params.add_rdma_qset;
4823
4824 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
4825
4826 if (!qset_list)
4827 return ICE_ERR_PARAM;
4828
4829 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
4830 return ICE_ERR_PARAM;
4831
4832 for (i = 0, list = qset_list; i < num_qset_grps; i++) {
4833 u16 num_qsets = LE16_TO_CPU(list->num_qsets);
4834
4835 sum_size += ice_struct_size(list, rdma_qsets, num_qsets);
4836 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4837 num_qsets);
4838 }
4839
4840 if (buf_size != sum_size)
4841 return ICE_ERR_PARAM;
4842
4843 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
4844
4845 cmd->num_qset_grps = num_qset_grps;
4846
4847 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4848 }
4849
4850 /* End of FW Admin Queue command wrappers */
4851
4852 /**
4853 * ice_write_byte - write a byte to a packed context structure
4854 * @src_ctx: the context structure to read from
4855 * @dest_ctx: the context to be written to
4856 * @ce_info: a description of the struct to be filled
4857 */
4858 static void
ice_write_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4859 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4860 {
4861 u8 src_byte, dest_byte, mask;
4862 u8 *from, *dest;
4863 u16 shift_width;
4864
4865 /* copy from the next struct field */
4866 from = src_ctx + ce_info->offset;
4867
4868 /* prepare the bits and mask */
4869 shift_width = ce_info->lsb % 8;
4870 mask = (u8)(BIT(ce_info->width) - 1);
4871
4872 src_byte = *from;
4873 src_byte &= mask;
4874
4875 /* shift to correct alignment */
4876 mask <<= shift_width;
4877 src_byte <<= shift_width;
4878
4879 /* get the current bits from the target bit string */
4880 dest = dest_ctx + (ce_info->lsb / 8);
4881
4882 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
4883
4884 dest_byte &= ~mask; /* get the bits not changing */
4885 dest_byte |= src_byte; /* add in the new bits */
4886
4887 /* put it all back */
4888 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
4889 }
4890
4891 /**
4892 * ice_write_word - write a word to a packed context structure
4893 * @src_ctx: the context structure to read from
4894 * @dest_ctx: the context to be written to
4895 * @ce_info: a description of the struct to be filled
4896 */
4897 static void
ice_write_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4898 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4899 {
4900 u16 src_word, mask;
4901 __le16 dest_word;
4902 u8 *from, *dest;
4903 u16 shift_width;
4904
4905 /* copy from the next struct field */
4906 from = src_ctx + ce_info->offset;
4907
4908 /* prepare the bits and mask */
4909 shift_width = ce_info->lsb % 8;
4910 mask = BIT(ce_info->width) - 1;
4911
4912 /* don't swizzle the bits until after the mask because the mask bits
4913 * will be in a different bit position on big endian machines
4914 */
4915 src_word = *(u16 *)from;
4916 src_word &= mask;
4917
4918 /* shift to correct alignment */
4919 mask <<= shift_width;
4920 src_word <<= shift_width;
4921
4922 /* get the current bits from the target bit string */
4923 dest = dest_ctx + (ce_info->lsb / 8);
4924
4925 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
4926
4927 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
4928 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
4929
4930 /* put it all back */
4931 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
4932 }
4933
4934 /**
4935 * ice_write_dword - write a dword to a packed context structure
4936 * @src_ctx: the context structure to read from
4937 * @dest_ctx: the context to be written to
4938 * @ce_info: a description of the struct to be filled
4939 */
4940 static void
ice_write_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4941 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4942 {
4943 u32 src_dword, mask;
4944 __le32 dest_dword;
4945 u8 *from, *dest;
4946 u16 shift_width;
4947
4948 /* copy from the next struct field */
4949 from = src_ctx + ce_info->offset;
4950
4951 /* prepare the bits and mask */
4952 shift_width = ce_info->lsb % 8;
4953
4954 /* if the field width is exactly 32 on an x86 machine, then the shift
4955 * operation will not work because the SHL instructions count is masked
4956 * to 5 bits so the shift will do nothing
4957 */
4958 if (ce_info->width < 32)
4959 mask = BIT(ce_info->width) - 1;
4960 else
4961 mask = (u32)~0;
4962
4963 /* don't swizzle the bits until after the mask because the mask bits
4964 * will be in a different bit position on big endian machines
4965 */
4966 src_dword = *(u32 *)from;
4967 src_dword &= mask;
4968
4969 /* shift to correct alignment */
4970 mask <<= shift_width;
4971 src_dword <<= shift_width;
4972
4973 /* get the current bits from the target bit string */
4974 dest = dest_ctx + (ce_info->lsb / 8);
4975
4976 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
4977
4978 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
4979 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
4980
4981 /* put it all back */
4982 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
4983 }
4984
4985 /**
4986 * ice_write_qword - write a qword to a packed context structure
4987 * @src_ctx: the context structure to read from
4988 * @dest_ctx: the context to be written to
4989 * @ce_info: a description of the struct to be filled
4990 */
4991 static void
ice_write_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)4992 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
4993 {
4994 u64 src_qword, mask;
4995 __le64 dest_qword;
4996 u8 *from, *dest;
4997 u16 shift_width;
4998
4999 /* copy from the next struct field */
5000 from = src_ctx + ce_info->offset;
5001
5002 /* prepare the bits and mask */
5003 shift_width = ce_info->lsb % 8;
5004
5005 /* if the field width is exactly 64 on an x86 machine, then the shift
5006 * operation will not work because the SHL instructions count is masked
5007 * to 6 bits so the shift will do nothing
5008 */
5009 if (ce_info->width < 64)
5010 mask = BIT_ULL(ce_info->width) - 1;
5011 else
5012 mask = (u64)~0;
5013
5014 /* don't swizzle the bits until after the mask because the mask bits
5015 * will be in a different bit position on big endian machines
5016 */
5017 src_qword = *(u64 *)from;
5018 src_qword &= mask;
5019
5020 /* shift to correct alignment */
5021 mask <<= shift_width;
5022 src_qword <<= shift_width;
5023
5024 /* get the current bits from the target bit string */
5025 dest = dest_ctx + (ce_info->lsb / 8);
5026
5027 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
5028
5029 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
5030 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
5031
5032 /* put it all back */
5033 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
5034 }
5035
5036 /**
5037 * ice_set_ctx - set context bits in packed structure
5038 * @hw: pointer to the hardware structure
5039 * @src_ctx: pointer to a generic non-packed context structure
5040 * @dest_ctx: pointer to memory for the packed structure
5041 * @ce_info: a description of the structure to be transformed
5042 */
5043 int
ice_set_ctx(struct ice_hw * hw,u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5044 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
5045 const struct ice_ctx_ele *ce_info)
5046 {
5047 int f;
5048
5049 for (f = 0; ce_info[f].width; f++) {
5050 /* We have to deal with each element of the FW response
5051 * using the correct size so that we are correct regardless
5052 * of the endianness of the machine.
5053 */
5054 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
5055 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
5056 f, ce_info[f].width, ce_info[f].size_of);
5057 continue;
5058 }
5059 switch (ce_info[f].size_of) {
5060 case sizeof(u8):
5061 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
5062 break;
5063 case sizeof(u16):
5064 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
5065 break;
5066 case sizeof(u32):
5067 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
5068 break;
5069 case sizeof(u64):
5070 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
5071 break;
5072 default:
5073 return ICE_ERR_INVAL_SIZE;
5074 }
5075 }
5076
5077 return 0;
5078 }
5079
5080 /**
5081 * ice_aq_get_internal_data
5082 * @hw: pointer to the hardware structure
5083 * @cluster_id: specific cluster to dump
5084 * @table_id: table ID within cluster
5085 * @start: index of line in the block to read
5086 * @buf: dump buffer
5087 * @buf_size: dump buffer size
5088 * @ret_buf_size: return buffer size (returned by FW)
5089 * @ret_next_cluster: next cluster to read (returned by FW)
5090 * @ret_next_table: next block to read (returned by FW)
5091 * @ret_next_index: next index to read (returned by FW)
5092 * @cd: pointer to command details structure
5093 *
5094 * Get internal FW/HW data (0xFF08) for debug purposes.
5095 */
5096 int
ice_aq_get_internal_data(struct ice_hw * hw,u16 cluster_id,u16 table_id,u32 start,void * buf,u16 buf_size,u16 * ret_buf_size,u16 * ret_next_cluster,u16 * ret_next_table,u32 * ret_next_index,struct ice_sq_cd * cd)5097 ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
5098 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
5099 u16 *ret_next_cluster, u16 *ret_next_table,
5100 u32 *ret_next_index, struct ice_sq_cd *cd)
5101 {
5102 struct ice_aqc_debug_dump_internals *cmd;
5103 struct ice_aq_desc desc;
5104 int status;
5105
5106 cmd = &desc.params.debug_dump;
5107
5108 if (buf_size == 0 || !buf)
5109 return ICE_ERR_PARAM;
5110
5111 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
5112
5113 cmd->cluster_id = CPU_TO_LE16(cluster_id);
5114 cmd->table_id = CPU_TO_LE16(table_id);
5115 cmd->idx = CPU_TO_LE32(start);
5116
5117 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5118
5119 if (!status) {
5120 if (ret_buf_size)
5121 *ret_buf_size = LE16_TO_CPU(desc.datalen);
5122 if (ret_next_cluster)
5123 *ret_next_cluster = LE16_TO_CPU(cmd->cluster_id);
5124 if (ret_next_table)
5125 *ret_next_table = LE16_TO_CPU(cmd->table_id);
5126 if (ret_next_index)
5127 *ret_next_index = LE32_TO_CPU(cmd->idx);
5128 }
5129
5130 return status;
5131 }
5132
5133 /**
5134 * ice_read_byte - read context byte into struct
5135 * @src_ctx: the context structure to read from
5136 * @dest_ctx: the context to be written to
5137 * @ce_info: a description of the struct to be filled
5138 */
5139 static void
ice_read_byte(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5140 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5141 {
5142 u8 dest_byte, mask;
5143 u8 *src, *target;
5144 u16 shift_width;
5145
5146 /* prepare the bits and mask */
5147 shift_width = ce_info->lsb % 8;
5148 mask = (u8)(BIT(ce_info->width) - 1);
5149
5150 /* shift to correct alignment */
5151 mask <<= shift_width;
5152
5153 /* get the current bits from the src bit string */
5154 src = src_ctx + (ce_info->lsb / 8);
5155
5156 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
5157
5158 dest_byte &= mask;
5159
5160 dest_byte >>= shift_width;
5161
5162 /* get the address from the struct field */
5163 target = dest_ctx + ce_info->offset;
5164
5165 /* put it back in the struct */
5166 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
5167 }
5168
5169 /**
5170 * ice_read_word - read context word into struct
5171 * @src_ctx: the context structure to read from
5172 * @dest_ctx: the context to be written to
5173 * @ce_info: a description of the struct to be filled
5174 */
5175 static void
ice_read_word(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5176 ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5177 {
5178 u16 dest_word, mask;
5179 u8 *src, *target;
5180 __le16 src_word;
5181 u16 shift_width;
5182
5183 /* prepare the bits and mask */
5184 shift_width = ce_info->lsb % 8;
5185 mask = BIT(ce_info->width) - 1;
5186
5187 /* shift to correct alignment */
5188 mask <<= shift_width;
5189
5190 /* get the current bits from the src bit string */
5191 src = src_ctx + (ce_info->lsb / 8);
5192
5193 ice_memcpy(&src_word, src, sizeof(src_word), ICE_NONDMA_TO_NONDMA);
5194
5195 /* the data in the memory is stored as little endian so mask it
5196 * correctly
5197 */
5198 src_word &= CPU_TO_LE16(mask);
5199
5200 /* get the data back into host order before shifting */
5201 dest_word = LE16_TO_CPU(src_word);
5202
5203 dest_word >>= shift_width;
5204
5205 /* get the address from the struct field */
5206 target = dest_ctx + ce_info->offset;
5207
5208 /* put it back in the struct */
5209 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
5210 }
5211
5212 /**
5213 * ice_read_dword - read context dword into struct
5214 * @src_ctx: the context structure to read from
5215 * @dest_ctx: the context to be written to
5216 * @ce_info: a description of the struct to be filled
5217 */
5218 static void
ice_read_dword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5219 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5220 {
5221 u32 dest_dword, mask;
5222 __le32 src_dword;
5223 u8 *src, *target;
5224 u16 shift_width;
5225
5226 /* prepare the bits and mask */
5227 shift_width = ce_info->lsb % 8;
5228
5229 /* if the field width is exactly 32 on an x86 machine, then the shift
5230 * operation will not work because the SHL instructions count is masked
5231 * to 5 bits so the shift will do nothing
5232 */
5233 if (ce_info->width < 32)
5234 mask = BIT(ce_info->width) - 1;
5235 else
5236 mask = (u32)~0;
5237
5238 /* shift to correct alignment */
5239 mask <<= shift_width;
5240
5241 /* get the current bits from the src bit string */
5242 src = src_ctx + (ce_info->lsb / 8);
5243
5244 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_NONDMA_TO_NONDMA);
5245
5246 /* the data in the memory is stored as little endian so mask it
5247 * correctly
5248 */
5249 src_dword &= CPU_TO_LE32(mask);
5250
5251 /* get the data back into host order before shifting */
5252 dest_dword = LE32_TO_CPU(src_dword);
5253
5254 dest_dword >>= shift_width;
5255
5256 /* get the address from the struct field */
5257 target = dest_ctx + ce_info->offset;
5258
5259 /* put it back in the struct */
5260 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
5261 }
5262
5263 /**
5264 * ice_read_qword - read context qword into struct
5265 * @src_ctx: the context structure to read from
5266 * @dest_ctx: the context to be written to
5267 * @ce_info: a description of the struct to be filled
5268 */
5269 static void
ice_read_qword(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5270 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5271 {
5272 u64 dest_qword, mask;
5273 __le64 src_qword;
5274 u8 *src, *target;
5275 u16 shift_width;
5276
5277 /* prepare the bits and mask */
5278 shift_width = ce_info->lsb % 8;
5279
5280 /* if the field width is exactly 64 on an x86 machine, then the shift
5281 * operation will not work because the SHL instructions count is masked
5282 * to 6 bits so the shift will do nothing
5283 */
5284 if (ce_info->width < 64)
5285 mask = BIT_ULL(ce_info->width) - 1;
5286 else
5287 mask = (u64)~0;
5288
5289 /* shift to correct alignment */
5290 mask <<= shift_width;
5291
5292 /* get the current bits from the src bit string */
5293 src = src_ctx + (ce_info->lsb / 8);
5294
5295 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_NONDMA_TO_NONDMA);
5296
5297 /* the data in the memory is stored as little endian so mask it
5298 * correctly
5299 */
5300 src_qword &= CPU_TO_LE64(mask);
5301
5302 /* get the data back into host order before shifting */
5303 dest_qword = LE64_TO_CPU(src_qword);
5304
5305 dest_qword >>= shift_width;
5306
5307 /* get the address from the struct field */
5308 target = dest_ctx + ce_info->offset;
5309
5310 /* put it back in the struct */
5311 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
5312 }
5313
5314 /**
5315 * ice_get_ctx - extract context bits from a packed structure
5316 * @src_ctx: pointer to a generic packed context structure
5317 * @dest_ctx: pointer to a generic non-packed context structure
5318 * @ce_info: a description of the structure to be read from
5319 */
5320 int
ice_get_ctx(u8 * src_ctx,u8 * dest_ctx,const struct ice_ctx_ele * ce_info)5321 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
5322 {
5323 int f;
5324
5325 for (f = 0; ce_info[f].width; f++) {
5326 switch (ce_info[f].size_of) {
5327 case 1:
5328 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
5329 break;
5330 case 2:
5331 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
5332 break;
5333 case 4:
5334 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
5335 break;
5336 case 8:
5337 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
5338 break;
5339 default:
5340 /* nothing to do, just keep going */
5341 break;
5342 }
5343 }
5344
5345 return 0;
5346 }
5347
5348 /**
5349 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
5350 * @hw: pointer to the HW struct
5351 * @vsi_handle: software VSI handle
5352 * @tc: TC number
5353 * @q_handle: software queue handle
5354 */
5355 struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw * hw,u16 vsi_handle,u8 tc,u16 q_handle)5356 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
5357 {
5358 struct ice_vsi_ctx *vsi;
5359 struct ice_q_ctx *q_ctx;
5360
5361 vsi = ice_get_vsi_ctx(hw, vsi_handle);
5362 if (!vsi)
5363 return NULL;
5364 if (q_handle >= vsi->num_lan_q_entries[tc])
5365 return NULL;
5366 if (!vsi->lan_q_ctx[tc])
5367 return NULL;
5368 q_ctx = vsi->lan_q_ctx[tc];
5369 return &q_ctx[q_handle];
5370 }
5371
5372 /**
5373 * ice_ena_vsi_txq
5374 * @pi: port information structure
5375 * @vsi_handle: software VSI handle
5376 * @tc: TC number
5377 * @q_handle: software queue handle
5378 * @num_qgrps: Number of added queue groups
5379 * @buf: list of queue groups to be added
5380 * @buf_size: size of buffer for indirect command
5381 * @cd: pointer to command details structure or NULL
5382 *
5383 * This function adds one LAN queue
5384 */
5385 int
ice_ena_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 q_handle,u8 num_qgrps,struct ice_aqc_add_tx_qgrp * buf,u16 buf_size,struct ice_sq_cd * cd)5386 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
5387 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
5388 struct ice_sq_cd *cd)
5389 {
5390 struct ice_aqc_txsched_elem_data node = { 0 };
5391 struct ice_sched_node *parent;
5392 struct ice_q_ctx *q_ctx;
5393 struct ice_hw *hw;
5394 int status;
5395
5396 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5397 return ICE_ERR_CFG;
5398
5399 if (num_qgrps > 1 || buf->num_txqs > 1)
5400 return ICE_ERR_MAX_LIMIT;
5401
5402 hw = pi->hw;
5403
5404 if (!ice_is_vsi_valid(hw, vsi_handle))
5405 return ICE_ERR_PARAM;
5406
5407 ice_acquire_lock(&pi->sched_lock);
5408
5409 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
5410 if (!q_ctx) {
5411 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
5412 q_handle);
5413 status = ICE_ERR_PARAM;
5414 goto ena_txq_exit;
5415 }
5416
5417 /* find a parent node */
5418 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5419 ICE_SCHED_NODE_OWNER_LAN);
5420 if (!parent) {
5421 status = ICE_ERR_PARAM;
5422 goto ena_txq_exit;
5423 }
5424
5425 buf->parent_teid = parent->info.node_teid;
5426 node.parent_teid = parent->info.node_teid;
5427 /* Mark that the values in the "generic" section as valid. The default
5428 * value in the "generic" section is zero. This means that :
5429 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
5430 * - 0 priority among siblings, indicated by Bit 1-3.
5431 * - WFQ, indicated by Bit 4.
5432 * - 0 Adjustment value is used in PSM credit update flow, indicated by
5433 * Bit 5-6.
5434 * - Bit 7 is reserved.
5435 * Without setting the generic section as valid in valid_sections, the
5436 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
5437 */
5438 buf->txqs[0].info.valid_sections =
5439 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5440 ICE_AQC_ELEM_VALID_EIR;
5441 buf->txqs[0].info.generic = 0;
5442 buf->txqs[0].info.cir_bw.bw_profile_idx =
5443 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5444 buf->txqs[0].info.cir_bw.bw_alloc =
5445 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5446 buf->txqs[0].info.eir_bw.bw_profile_idx =
5447 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5448 buf->txqs[0].info.eir_bw.bw_alloc =
5449 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5450
5451 /* add the LAN queue */
5452 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
5453 if (status) {
5454 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
5455 LE16_TO_CPU(buf->txqs[0].txq_id),
5456 hw->adminq.sq_last_status);
5457 goto ena_txq_exit;
5458 }
5459
5460 node.node_teid = buf->txqs[0].q_teid;
5461 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5462 q_ctx->q_handle = q_handle;
5463 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
5464
5465 /* add a leaf node into scheduler tree queue layer */
5466 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
5467 if (!status)
5468 status = ice_sched_replay_q_bw(pi, q_ctx);
5469
5470 ena_txq_exit:
5471 ice_release_lock(&pi->sched_lock);
5472 return status;
5473 }
5474
5475 /**
5476 * ice_dis_vsi_txq
5477 * @pi: port information structure
5478 * @vsi_handle: software VSI handle
5479 * @tc: TC number
5480 * @num_queues: number of queues
5481 * @q_handles: pointer to software queue handle array
5482 * @q_ids: pointer to the q_id array
5483 * @q_teids: pointer to queue node teids
5484 * @rst_src: if called due to reset, specifies the reset source
5485 * @vmvf_num: the relative VM or VF number that is undergoing the reset
5486 * @cd: pointer to command details structure or NULL
5487 *
5488 * This function removes queues and their corresponding nodes in SW DB
5489 */
5490 int
ice_dis_vsi_txq(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u8 num_queues,u16 * q_handles,u16 * q_ids,u32 * q_teids,enum ice_disq_rst_src rst_src,u16 vmvf_num,struct ice_sq_cd * cd)5491 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
5492 u16 *q_handles, u16 *q_ids, u32 *q_teids,
5493 enum ice_disq_rst_src rst_src, u16 vmvf_num,
5494 struct ice_sq_cd *cd)
5495 {
5496 struct ice_aqc_dis_txq_item *qg_list;
5497 struct ice_q_ctx *q_ctx;
5498 int status = ICE_ERR_DOES_NOT_EXIST;
5499 struct ice_hw *hw;
5500 u16 i, buf_size;
5501
5502 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5503 return ICE_ERR_CFG;
5504
5505 hw = pi->hw;
5506
5507 if (!num_queues) {
5508 /* if queue is disabled already yet the disable queue command
5509 * has to be sent to complete the VF reset, then call
5510 * ice_aq_dis_lan_txq without any queue information
5511 */
5512 if (rst_src)
5513 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5514 vmvf_num, NULL);
5515 return ICE_ERR_CFG;
5516 }
5517
5518 buf_size = ice_struct_size(qg_list, q_id, 1);
5519 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5520 if (!qg_list)
5521 return ICE_ERR_NO_MEMORY;
5522
5523 ice_acquire_lock(&pi->sched_lock);
5524
5525 for (i = 0; i < num_queues; i++) {
5526 struct ice_sched_node *node;
5527
5528 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5529 if (!node)
5530 continue;
5531 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5532 if (!q_ctx) {
5533 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5534 q_handles[i]);
5535 continue;
5536 }
5537 if (q_ctx->q_handle != q_handles[i]) {
5538 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5539 q_ctx->q_handle, q_handles[i]);
5540 continue;
5541 }
5542 qg_list->parent_teid = node->info.parent_teid;
5543 qg_list->num_qs = 1;
5544 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5545 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5546 vmvf_num, cd);
5547
5548 if (status)
5549 break;
5550 ice_free_sched_node(pi, node);
5551 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5552 }
5553 ice_release_lock(&pi->sched_lock);
5554 ice_free(hw, qg_list);
5555 return status;
5556 }
5557
5558 /**
5559 * ice_cfg_vsi_qs - configure the new/existing VSI queues
5560 * @pi: port information structure
5561 * @vsi_handle: software VSI handle
5562 * @tc_bitmap: TC bitmap
5563 * @maxqs: max queues array per TC
5564 * @owner: LAN or RDMA
5565 *
5566 * This function adds/updates the VSI queues per TC.
5567 */
5568 static int
ice_cfg_vsi_qs(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * maxqs,u8 owner)5569 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5570 u16 *maxqs, u8 owner)
5571 {
5572 int status = 0;
5573 u8 i;
5574
5575 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5576 return ICE_ERR_CFG;
5577
5578 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5579 return ICE_ERR_PARAM;
5580
5581 ice_acquire_lock(&pi->sched_lock);
5582
5583 ice_for_each_traffic_class(i) {
5584 /* configuration is possible only if TC node is present */
5585 if (!ice_sched_get_tc_node(pi, i))
5586 continue;
5587
5588 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
5589 ice_is_tc_ena(tc_bitmap, i));
5590 if (status)
5591 break;
5592 }
5593
5594 ice_release_lock(&pi->sched_lock);
5595 return status;
5596 }
5597
5598 /**
5599 * ice_cfg_vsi_lan - configure VSI LAN queues
5600 * @pi: port information structure
5601 * @vsi_handle: software VSI handle
5602 * @tc_bitmap: TC bitmap
5603 * @max_lanqs: max LAN queues array per TC
5604 *
5605 * This function adds/updates the VSI LAN queues per TC.
5606 */
5607 int
ice_cfg_vsi_lan(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_lanqs)5608 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5609 u16 *max_lanqs)
5610 {
5611 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
5612 ICE_SCHED_NODE_OWNER_LAN);
5613 }
5614
5615 /**
5616 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
5617 * @pi: port information structure
5618 * @vsi_handle: software VSI handle
5619 * @tc_bitmap: TC bitmap
5620 * @max_rdmaqs: max RDMA queues array per TC
5621 *
5622 * This function adds/updates the VSI RDMA queues per TC.
5623 */
5624 int
ice_cfg_vsi_rdma(struct ice_port_info * pi,u16 vsi_handle,u16 tc_bitmap,u16 * max_rdmaqs)5625 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
5626 u16 *max_rdmaqs)
5627 {
5628 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
5629 ICE_SCHED_NODE_OWNER_RDMA);
5630 }
5631
5632 /**
5633 * ice_ena_vsi_rdma_qset
5634 * @pi: port information structure
5635 * @vsi_handle: software VSI handle
5636 * @tc: TC number
5637 * @rdma_qset: pointer to RDMA qset
5638 * @num_qsets: number of RDMA qsets
5639 * @qset_teid: pointer to qset node teids
5640 *
5641 * This function adds RDMA qset
5642 */
5643 int
ice_ena_vsi_rdma_qset(struct ice_port_info * pi,u16 vsi_handle,u8 tc,u16 * rdma_qset,u16 num_qsets,u32 * qset_teid)5644 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
5645 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
5646 {
5647 struct ice_aqc_txsched_elem_data node = { 0 };
5648 struct ice_aqc_add_rdma_qset_data *buf;
5649 struct ice_sched_node *parent;
5650 struct ice_hw *hw;
5651 u16 i, buf_size;
5652 int status;
5653
5654 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5655 return ICE_ERR_CFG;
5656 hw = pi->hw;
5657
5658 if (!ice_is_vsi_valid(hw, vsi_handle))
5659 return ICE_ERR_PARAM;
5660
5661 buf_size = ice_struct_size(buf, rdma_qsets, num_qsets);
5662 buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size);
5663 if (!buf)
5664 return ICE_ERR_NO_MEMORY;
5665 ice_acquire_lock(&pi->sched_lock);
5666
5667 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
5668 ICE_SCHED_NODE_OWNER_RDMA);
5669 if (!parent) {
5670 status = ICE_ERR_PARAM;
5671 goto rdma_error_exit;
5672 }
5673 buf->parent_teid = parent->info.node_teid;
5674 node.parent_teid = parent->info.node_teid;
5675
5676 buf->num_qsets = CPU_TO_LE16(num_qsets);
5677 for (i = 0; i < num_qsets; i++) {
5678 buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]);
5679 buf->rdma_qsets[i].info.valid_sections =
5680 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
5681 ICE_AQC_ELEM_VALID_EIR;
5682 buf->rdma_qsets[i].info.generic = 0;
5683 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
5684 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5685 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
5686 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5687 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
5688 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
5689 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
5690 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
5691 }
5692 status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
5693 if (status) {
5694 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
5695 goto rdma_error_exit;
5696 }
5697 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
5698 for (i = 0; i < num_qsets; i++) {
5699 node.node_teid = buf->rdma_qsets[i].qset_teid;
5700 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
5701 &node, NULL);
5702 if (status)
5703 break;
5704 qset_teid[i] = LE32_TO_CPU(node.node_teid);
5705 }
5706 rdma_error_exit:
5707 ice_release_lock(&pi->sched_lock);
5708 ice_free(hw, buf);
5709 return status;
5710 }
5711
5712 /**
5713 * ice_dis_vsi_rdma_qset - free RDMA resources
5714 * @pi: port_info struct
5715 * @count: number of RDMA qsets to free
5716 * @qset_teid: TEID of qset node
5717 * @q_id: list of queue IDs being disabled
5718 */
5719 int
ice_dis_vsi_rdma_qset(struct ice_port_info * pi,u16 count,u32 * qset_teid,u16 * q_id)5720 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
5721 u16 *q_id)
5722 {
5723 struct ice_aqc_dis_txq_item *qg_list;
5724 struct ice_hw *hw;
5725 int status = 0;
5726 u16 qg_size;
5727 int i;
5728
5729 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5730 return ICE_ERR_CFG;
5731
5732 hw = pi->hw;
5733
5734 qg_size = ice_struct_size(qg_list, q_id, 1);
5735 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size);
5736 if (!qg_list)
5737 return ICE_ERR_NO_MEMORY;
5738
5739 ice_acquire_lock(&pi->sched_lock);
5740
5741 for (i = 0; i < count; i++) {
5742 struct ice_sched_node *node;
5743
5744 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
5745 if (!node)
5746 continue;
5747
5748 qg_list->parent_teid = node->info.parent_teid;
5749 qg_list->num_qs = 1;
5750 qg_list->q_id[0] =
5751 CPU_TO_LE16(q_id[i] |
5752 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
5753
5754 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
5755 ICE_NO_RESET, 0, NULL);
5756 if (status)
5757 break;
5758
5759 ice_free_sched_node(pi, node);
5760 }
5761
5762 ice_release_lock(&pi->sched_lock);
5763 ice_free(hw, qg_list);
5764 return status;
5765 }
5766
5767 /**
5768 * ice_aq_get_sensor_reading
5769 * @hw: pointer to the HW struct
5770 * @sensor: sensor type
5771 * @format: requested response format
5772 * @data: pointer to data to be read from the sensor
5773 * @cd: pointer to command details structure or NULL
5774 *
5775 * Get sensor reading (0x0632)
5776 */
5777 int
ice_aq_get_sensor_reading(struct ice_hw * hw,u8 sensor,u8 format,struct ice_aqc_get_sensor_reading_resp * data,struct ice_sq_cd * cd)5778 ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
5779 struct ice_aqc_get_sensor_reading_resp *data,
5780 struct ice_sq_cd *cd)
5781 {
5782 struct ice_aqc_get_sensor_reading *cmd;
5783 struct ice_aq_desc desc;
5784 int status;
5785
5786 if (!data)
5787 return ICE_ERR_PARAM;
5788
5789 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
5790 cmd = &desc.params.get_sensor_reading;
5791 cmd->sensor = sensor;
5792 cmd->format = format;
5793
5794 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5795
5796 if (!status)
5797 ice_memcpy(data, &desc.params.get_sensor_reading_resp,
5798 sizeof(*data), ICE_NONDMA_TO_NONDMA);
5799
5800 return status;
5801 }
5802
5803 /**
5804 * ice_is_main_vsi - checks whether the VSI is main VSI
5805 * @hw: pointer to the HW struct
5806 * @vsi_handle: VSI handle
5807 *
5808 * Checks whether the VSI is the main VSI (the first PF VSI created on
5809 * given PF).
5810 */
ice_is_main_vsi(struct ice_hw * hw,u16 vsi_handle)5811 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5812 {
5813 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5814 }
5815
5816 /**
5817 * ice_replay_pre_init - replay pre initialization
5818 * @hw: pointer to the HW struct
5819 * @sw: pointer to switch info struct for which function initializes filters
5820 *
5821 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
5822 */
5823 int
ice_replay_pre_init(struct ice_hw * hw,struct ice_switch_info * sw)5824 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5825 {
5826 int status;
5827 u8 i;
5828
5829 /* Delete old entries from replay filter list head if there is any */
5830 ice_rm_sw_replay_rule_info(hw, sw);
5831 /* In start of replay, move entries into replay_rules list, it
5832 * will allow adding rules entries back to filt_rules list,
5833 * which is operational list.
5834 */
5835 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
5836 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5837 &sw->recp_list[i].filt_replay_rules);
5838 ice_sched_replay_agg_vsi_preinit(hw);
5839
5840 status = ice_sched_replay_root_node_bw(hw->port_info);
5841 if (status)
5842 return status;
5843
5844 return ice_sched_replay_tc_node_bw(hw->port_info);
5845 }
5846
5847 /**
5848 * ice_replay_vsi - replay VSI configuration
5849 * @hw: pointer to the HW struct
5850 * @vsi_handle: driver VSI handle
5851 *
5852 * Restore all VSI configuration after reset. It is required to call this
5853 * function with main VSI first.
5854 */
ice_replay_vsi(struct ice_hw * hw,u16 vsi_handle)5855 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5856 {
5857 struct ice_switch_info *sw = hw->switch_info;
5858 struct ice_port_info *pi = hw->port_info;
5859 int status;
5860
5861 if (!ice_is_vsi_valid(hw, vsi_handle))
5862 return ICE_ERR_PARAM;
5863
5864 /* Replay pre-initialization if there is any */
5865 if (ice_is_main_vsi(hw, vsi_handle)) {
5866 status = ice_replay_pre_init(hw, sw);
5867 if (status)
5868 return status;
5869 }
5870 /* Replay per VSI all RSS configurations */
5871 status = ice_replay_rss_cfg(hw, vsi_handle);
5872 if (status)
5873 return status;
5874 /* Replay per VSI all filters */
5875 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5876 if (!status)
5877 status = ice_replay_vsi_agg(hw, vsi_handle);
5878 return status;
5879 }
5880
5881 /**
5882 * ice_replay_post - post replay configuration cleanup
5883 * @hw: pointer to the HW struct
5884 *
5885 * Post replay cleanup.
5886 */
ice_replay_post(struct ice_hw * hw)5887 void ice_replay_post(struct ice_hw *hw)
5888 {
5889 /* Delete old entries from replay filter list head */
5890 ice_rm_all_sw_replay_rule_info(hw);
5891 ice_sched_replay_agg(hw);
5892 }
5893
5894 /**
5895 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5896 * @hw: ptr to the hardware info
5897 * @reg: offset of 64 bit HW register to read from
5898 * @prev_stat_loaded: bool to specify if previous stats are loaded
5899 * @prev_stat: ptr to previous loaded stat value
5900 * @cur_stat: ptr to current stat value
5901 */
5902 void
ice_stat_update40(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5903 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5904 u64 *prev_stat, u64 *cur_stat)
5905 {
5906 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5907
5908 /* device stats are not reset at PFR, they likely will not be zeroed
5909 * when the driver starts. Thus, save the value from the first read
5910 * without adding to the statistic value so that we report stats which
5911 * count up from zero.
5912 */
5913 if (!prev_stat_loaded) {
5914 *prev_stat = new_data;
5915 return;
5916 }
5917
5918 /* Calculate the difference between the new and old values, and then
5919 * add it to the software stat value.
5920 */
5921 if (new_data >= *prev_stat)
5922 *cur_stat += new_data - *prev_stat;
5923 else
5924 /* to manage the potential roll-over */
5925 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5926
5927 /* Update the previously stored value to prepare for next read */
5928 *prev_stat = new_data;
5929 }
5930
5931 /**
5932 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5933 * @hw: ptr to the hardware info
5934 * @reg: offset of HW register to read from
5935 * @prev_stat_loaded: bool to specify if previous stats are loaded
5936 * @prev_stat: ptr to previous loaded stat value
5937 * @cur_stat: ptr to current stat value
5938 */
5939 void
ice_stat_update32(struct ice_hw * hw,u32 reg,bool prev_stat_loaded,u64 * prev_stat,u64 * cur_stat)5940 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5941 u64 *prev_stat, u64 *cur_stat)
5942 {
5943 u32 new_data;
5944
5945 new_data = rd32(hw, reg);
5946
5947 /* device stats are not reset at PFR, they likely will not be zeroed
5948 * when the driver starts. Thus, save the value from the first read
5949 * without adding to the statistic value so that we report stats which
5950 * count up from zero.
5951 */
5952 if (!prev_stat_loaded) {
5953 *prev_stat = new_data;
5954 return;
5955 }
5956
5957 /* Calculate the difference between the new and old values, and then
5958 * add it to the software stat value.
5959 */
5960 if (new_data >= *prev_stat)
5961 *cur_stat += new_data - *prev_stat;
5962 else
5963 /* to manage the potential roll-over */
5964 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5965
5966 /* Update the previously stored value to prepare for next read */
5967 *prev_stat = new_data;
5968 }
5969
5970 /**
5971 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5972 * @hw: ptr to the hardware info
5973 * @vsi_handle: VSI handle
5974 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
5975 * @cur_stats: ptr to current stats structure
5976 *
5977 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
5978 * thus cannot be read using the normal ice_stat_update32 function.
5979 *
5980 * Read the GLV_REPC register associated with the given VSI, and update the
5981 * rx_no_desc and rx_error values in the ice_eth_stats structure.
5982 *
5983 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
5984 * cleared each time it's read.
5985 *
5986 * Note that the GLV_RDPC register also counts the causes that would trigger
5987 * GLV_REPC. However, it does not give the finer grained detail about why the
5988 * packets are being dropped. The GLV_REPC values can be used to distinguish
5989 * whether Rx packets are dropped due to errors or due to no available
5990 * descriptors.
5991 */
5992 void
ice_stat_update_repc(struct ice_hw * hw,u16 vsi_handle,bool prev_stat_loaded,struct ice_eth_stats * cur_stats)5993 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5994 struct ice_eth_stats *cur_stats)
5995 {
5996 u16 vsi_num, no_desc, error_cnt;
5997 u32 repc;
5998
5999 if (!ice_is_vsi_valid(hw, vsi_handle))
6000 return;
6001
6002 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
6003
6004 /* If we haven't loaded stats yet, just clear the current value */
6005 if (!prev_stat_loaded) {
6006 wr32(hw, GLV_REPC(vsi_num), 0);
6007 return;
6008 }
6009
6010 repc = rd32(hw, GLV_REPC(vsi_num));
6011 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
6012 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
6013
6014 /* Clear the count by writing to the stats register */
6015 wr32(hw, GLV_REPC(vsi_num), 0);
6016
6017 cur_stats->rx_no_desc += no_desc;
6018 cur_stats->rx_errors += error_cnt;
6019 }
6020
6021 /**
6022 * ice_aq_alternate_write
6023 * @hw: pointer to the hardware structure
6024 * @reg_addr0: address of first dword to be written
6025 * @reg_val0: value to be written under 'reg_addr0'
6026 * @reg_addr1: address of second dword to be written
6027 * @reg_val1: value to be written under 'reg_addr1'
6028 *
6029 * Write one or two dwords to alternate structure. Fields are indicated
6030 * by 'reg_addr0' and 'reg_addr1' register numbers.
6031 */
6032 int
ice_aq_alternate_write(struct ice_hw * hw,u32 reg_addr0,u32 reg_val0,u32 reg_addr1,u32 reg_val1)6033 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
6034 u32 reg_addr1, u32 reg_val1)
6035 {
6036 struct ice_aqc_read_write_alt_direct *cmd;
6037 struct ice_aq_desc desc;
6038 int status;
6039
6040 cmd = &desc.params.read_write_alt_direct;
6041
6042 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
6043 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
6044 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
6045 cmd->dword0_value = CPU_TO_LE32(reg_val0);
6046 cmd->dword1_value = CPU_TO_LE32(reg_val1);
6047
6048 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6049
6050 return status;
6051 }
6052
6053 /**
6054 * ice_aq_alternate_read
6055 * @hw: pointer to the hardware structure
6056 * @reg_addr0: address of first dword to be read
6057 * @reg_val0: pointer for data read from 'reg_addr0'
6058 * @reg_addr1: address of second dword to be read
6059 * @reg_val1: pointer for data read from 'reg_addr1'
6060 *
6061 * Read one or two dwords from alternate structure. Fields are indicated
6062 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
6063 * is not passed then only register at 'reg_addr0' is read.
6064 */
6065 int
ice_aq_alternate_read(struct ice_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)6066 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
6067 u32 reg_addr1, u32 *reg_val1)
6068 {
6069 struct ice_aqc_read_write_alt_direct *cmd;
6070 struct ice_aq_desc desc;
6071 int status;
6072
6073 cmd = &desc.params.read_write_alt_direct;
6074
6075 if (!reg_val0)
6076 return ICE_ERR_PARAM;
6077
6078 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
6079 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
6080 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
6081
6082 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6083
6084 if (!status) {
6085 *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
6086
6087 if (reg_val1)
6088 *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
6089 }
6090
6091 return status;
6092 }
6093
6094 /**
6095 * ice_aq_alternate_write_done
6096 * @hw: pointer to the HW structure.
6097 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
6098 * @reset_needed: indicates the SW should trigger GLOBAL reset
6099 *
6100 * Indicates to the FW that alternate structures have been changed.
6101 */
6102 int
ice_aq_alternate_write_done(struct ice_hw * hw,u8 bios_mode,bool * reset_needed)6103 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
6104 {
6105 struct ice_aqc_done_alt_write *cmd;
6106 struct ice_aq_desc desc;
6107 int status;
6108
6109 cmd = &desc.params.done_alt_write;
6110
6111 if (!reset_needed)
6112 return ICE_ERR_PARAM;
6113
6114 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
6115 cmd->flags = bios_mode;
6116
6117 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6118 if (!status)
6119 *reset_needed = (LE16_TO_CPU(cmd->flags) &
6120 ICE_AQC_RESP_RESET_NEEDED) != 0;
6121
6122 return status;
6123 }
6124
6125 /**
6126 * ice_aq_alternate_clear
6127 * @hw: pointer to the HW structure.
6128 *
6129 * Clear the alternate structures of the port from which the function
6130 * is called.
6131 */
ice_aq_alternate_clear(struct ice_hw * hw)6132 int ice_aq_alternate_clear(struct ice_hw *hw)
6133 {
6134 struct ice_aq_desc desc;
6135 int status;
6136
6137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
6138
6139 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6140
6141 return status;
6142 }
6143
6144 /**
6145 * ice_sched_query_elem - query element information from HW
6146 * @hw: pointer to the HW struct
6147 * @node_teid: node TEID to be queried
6148 * @buf: buffer to element information
6149 *
6150 * This function queries HW element information
6151 */
6152 int
ice_sched_query_elem(struct ice_hw * hw,u32 node_teid,struct ice_aqc_txsched_elem_data * buf)6153 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
6154 struct ice_aqc_txsched_elem_data *buf)
6155 {
6156 u16 buf_size, num_elem_ret = 0;
6157 int status;
6158
6159 buf_size = sizeof(*buf);
6160 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
6161 buf->node_teid = CPU_TO_LE32(node_teid);
6162 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
6163 NULL);
6164 if (status || num_elem_ret != 1)
6165 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
6166 return status;
6167 }
6168
6169 /**
6170 * ice_get_fw_mode - returns FW mode
6171 * @hw: pointer to the HW struct
6172 */
ice_get_fw_mode(struct ice_hw * hw)6173 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
6174 {
6175 #define ICE_FW_MODE_DBG_M BIT(0)
6176 #define ICE_FW_MODE_REC_M BIT(1)
6177 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
6178 u32 fw_mode;
6179
6180 /* check the current FW mode */
6181 fw_mode = rd32(hw, GL_MNG_FWSM) & E800_GL_MNG_FWSM_FW_MODES_M;
6182 if (fw_mode & ICE_FW_MODE_DBG_M)
6183 return ICE_FW_MODE_DBG;
6184 else if (fw_mode & ICE_FW_MODE_REC_M)
6185 return ICE_FW_MODE_REC;
6186 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
6187 return ICE_FW_MODE_ROLLBACK;
6188 else
6189 return ICE_FW_MODE_NORMAL;
6190 }
6191
6192 /**
6193 * ice_get_cur_lldp_persist_status
6194 * @hw: pointer to the HW struct
6195 * @lldp_status: return value of LLDP persistent status
6196 *
6197 * Get the current status of LLDP persistent
6198 */
6199 int
ice_get_cur_lldp_persist_status(struct ice_hw * hw,u32 * lldp_status)6200 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
6201 {
6202 struct ice_port_info *pi = hw->port_info;
6203 __le32 raw_data;
6204 u32 data, mask;
6205 int ret;
6206
6207 if (!lldp_status)
6208 return ICE_ERR_BAD_PTR;
6209
6210 ret = ice_acquire_nvm(hw, ICE_RES_READ);
6211 if (ret)
6212 return ret;
6213
6214 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
6215 ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
6216 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
6217 false, true, NULL);
6218 if (!ret) {
6219 data = LE32_TO_CPU(raw_data);
6220 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
6221 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6222 data = data & mask;
6223 *lldp_status = data >>
6224 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6225 }
6226
6227 ice_release_nvm(hw);
6228
6229 return ret;
6230 }
6231
6232 /**
6233 * ice_get_dflt_lldp_persist_status
6234 * @hw: pointer to the HW struct
6235 * @lldp_status: return value of LLDP persistent status
6236 *
6237 * Get the default status of LLDP persistent
6238 */
6239 int
ice_get_dflt_lldp_persist_status(struct ice_hw * hw,u32 * lldp_status)6240 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
6241 {
6242 struct ice_port_info *pi = hw->port_info;
6243 u32 data, mask, loc_data, loc_data_tmp;
6244 __le16 loc_raw_data;
6245 __le32 raw_data;
6246 int ret;
6247
6248 if (!lldp_status)
6249 return ICE_ERR_BAD_PTR;
6250
6251 ret = ice_acquire_nvm(hw, ICE_RES_READ);
6252 if (ret)
6253 return ret;
6254
6255 /* Read the offset of EMP_SR_PTR */
6256 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
6257 ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
6258 ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
6259 &loc_raw_data, false, true, NULL);
6260 if (ret)
6261 goto exit;
6262
6263 loc_data = LE16_TO_CPU(loc_raw_data);
6264 if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
6265 loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
6266 loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
6267 } else {
6268 loc_data *= ICE_AQC_NVM_WORD_UNIT;
6269 }
6270
6271 /* Read the offset of LLDP configuration pointer */
6272 loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
6273 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
6274 ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
6275 false, true, NULL);
6276 if (ret)
6277 goto exit;
6278
6279 loc_data_tmp = LE16_TO_CPU(loc_raw_data);
6280 loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
6281 loc_data += loc_data_tmp;
6282
6283 /* We need to skip LLDP configuration section length (2 bytes) */
6284 loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
6285
6286 /* Read the LLDP Default Configure */
6287 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
6288 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
6289 true, NULL);
6290 if (!ret) {
6291 data = LE32_TO_CPU(raw_data);
6292 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
6293 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6294 data = data & mask;
6295 *lldp_status = data >>
6296 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6297 }
6298
6299 exit:
6300 ice_release_nvm(hw);
6301
6302 return ret;
6303 }
6304
6305 /**
6306 * ice_aq_read_i2c
6307 * @hw: pointer to the hw struct
6308 * @topo_addr: topology address for a device to communicate with
6309 * @bus_addr: 7-bit I2C bus address
6310 * @addr: I2C memory address (I2C offset) with up to 16 bits
6311 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
6312 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
6313 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
6314 * @cd: pointer to command details structure or NULL
6315 *
6316 * Read I2C (0x06E2)
6317 */
6318 int
ice_aq_read_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data,struct ice_sq_cd * cd)6319 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
6320 u16 bus_addr, __le16 addr, u8 params, u8 *data,
6321 struct ice_sq_cd *cd)
6322 {
6323 struct ice_aq_desc desc = { 0 };
6324 struct ice_aqc_i2c *cmd;
6325 u8 data_size;
6326 int status;
6327
6328 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
6329 cmd = &desc.params.read_write_i2c;
6330
6331 if (!data)
6332 return ICE_ERR_PARAM;
6333
6334 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
6335
6336 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
6337 cmd->topo_addr = topo_addr;
6338 cmd->i2c_params = params;
6339 cmd->i2c_addr = addr;
6340
6341 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6342 if (!status) {
6343 struct ice_aqc_read_i2c_resp *resp;
6344 u8 i;
6345
6346 resp = &desc.params.read_i2c_resp;
6347 for (i = 0; i < data_size; i++) {
6348 *data = resp->i2c_data[i];
6349 data++;
6350 }
6351 }
6352
6353 return status;
6354 }
6355
6356 /**
6357 * ice_aq_write_i2c
6358 * @hw: pointer to the hw struct
6359 * @topo_addr: topology address for a device to communicate with
6360 * @bus_addr: 7-bit I2C bus address
6361 * @addr: I2C memory address (I2C offset) with up to 16 bits
6362 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
6363 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
6364 * @cd: pointer to command details structure or NULL
6365 *
6366 * Write I2C (0x06E3)
6367 */
6368 int
ice_aq_write_i2c(struct ice_hw * hw,struct ice_aqc_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,const u8 * data,struct ice_sq_cd * cd)6369 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
6370 u16 bus_addr, __le16 addr, u8 params, const u8 *data,
6371 struct ice_sq_cd *cd)
6372 {
6373 struct ice_aq_desc desc = { 0 };
6374 struct ice_aqc_i2c *cmd;
6375 u8 i, data_size;
6376
6377 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
6378 cmd = &desc.params.read_write_i2c;
6379
6380 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
6381
6382 /* data_size limited to 4 */
6383 if (data_size > 4)
6384 return ICE_ERR_PARAM;
6385
6386 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
6387 cmd->topo_addr = topo_addr;
6388 cmd->i2c_params = params;
6389 cmd->i2c_addr = addr;
6390
6391 for (i = 0; i < data_size; i++) {
6392 cmd->i2c_data[i] = *data;
6393 data++;
6394 }
6395
6396 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6397 }
6398
6399 /**
6400 * ice_aq_set_gpio
6401 * @hw: pointer to the hw struct
6402 * @gpio_ctrl_handle: GPIO controller node handle
6403 * @pin_idx: IO Number of the GPIO that needs to be set
6404 * @value: SW provide IO value to set in the LSB
6405 * @cd: pointer to command details structure or NULL
6406 *
6407 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
6408 */
6409 int
ice_aq_set_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value,struct ice_sq_cd * cd)6410 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
6411 struct ice_sq_cd *cd)
6412 {
6413 struct ice_aqc_gpio *cmd;
6414 struct ice_aq_desc desc;
6415
6416 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
6417 cmd = &desc.params.read_write_gpio;
6418 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6419 cmd->gpio_num = pin_idx;
6420 cmd->gpio_val = value ? 1 : 0;
6421
6422 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6423 }
6424
6425 /**
6426 * ice_aq_get_gpio
6427 * @hw: pointer to the hw struct
6428 * @gpio_ctrl_handle: GPIO controller node handle
6429 * @pin_idx: IO Number of the GPIO that needs to be set
6430 * @value: IO value read
6431 * @cd: pointer to command details structure or NULL
6432 *
6433 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
6434 * the topology
6435 */
6436 int
ice_aq_get_gpio(struct ice_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value,struct ice_sq_cd * cd)6437 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
6438 bool *value, struct ice_sq_cd *cd)
6439 {
6440 struct ice_aqc_gpio *cmd;
6441 struct ice_aq_desc desc;
6442 int status;
6443
6444 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
6445 cmd = &desc.params.read_write_gpio;
6446 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6447 cmd->gpio_num = pin_idx;
6448
6449 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6450 if (status)
6451 return status;
6452
6453 *value = !!cmd->gpio_val;
6454 return 0;
6455 }
6456
6457 /**
6458 * ice_is_fw_api_min_ver
6459 * @hw: pointer to the hardware structure
6460 * @maj: major version
6461 * @min: minor version
6462 * @patch: patch version
6463 *
6464 * Checks if the firmware is minimum version
6465 */
ice_is_fw_api_min_ver(struct ice_hw * hw,u8 maj,u8 min,u8 patch)6466 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
6467 {
6468 if (hw->api_maj_ver == maj) {
6469 if (hw->api_min_ver > min)
6470 return true;
6471 if (hw->api_min_ver == min && hw->api_patch >= patch)
6472 return true;
6473 } else if (hw->api_maj_ver > maj) {
6474 return true;
6475 }
6476
6477 return false;
6478 }
6479
6480 /**
6481 * ice_is_fw_min_ver
6482 * @hw: pointer to the hardware structure
6483 * @branch: branch version
6484 * @maj: major version
6485 * @min: minor version
6486 * @patch: patch version
6487 *
6488 * Checks if the firmware is minimum version
6489 */
ice_is_fw_min_ver(struct ice_hw * hw,u8 branch,u8 maj,u8 min,u8 patch)6490 static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
6491 u8 patch)
6492 {
6493 if (hw->fw_branch == branch) {
6494 if (hw->fw_maj_ver > maj)
6495 return true;
6496 if (hw->fw_maj_ver == maj) {
6497 if (hw->fw_min_ver > min)
6498 return true;
6499 if (hw->fw_min_ver == min && hw->fw_patch >= patch)
6500 return true;
6501 }
6502 }
6503
6504 return false;
6505 }
6506
6507 /**
6508 * ice_fw_supports_link_override
6509 * @hw: pointer to the hardware structure
6510 *
6511 * Checks if the firmware supports link override
6512 */
ice_fw_supports_link_override(struct ice_hw * hw)6513 bool ice_fw_supports_link_override(struct ice_hw *hw)
6514 {
6515 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
6516 ICE_FW_API_LINK_OVERRIDE_MIN,
6517 ICE_FW_API_LINK_OVERRIDE_PATCH);
6518 }
6519
6520 /**
6521 * ice_get_link_default_override
6522 * @ldo: pointer to the link default override struct
6523 * @pi: pointer to the port info struct
6524 *
6525 * Gets the link default override for a port
6526 */
6527 int
ice_get_link_default_override(struct ice_link_default_override_tlv * ldo,struct ice_port_info * pi)6528 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
6529 struct ice_port_info *pi)
6530 {
6531 u16 i, tlv, tlv_len, tlv_start, buf, offset;
6532 struct ice_hw *hw = pi->hw;
6533 int status;
6534
6535 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
6536 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
6537 if (status) {
6538 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
6539 return status;
6540 }
6541
6542 /* Each port has its own config; calculate for our port */
6543 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
6544 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
6545
6546 /* link options first */
6547 status = ice_read_sr_word(hw, tlv_start, &buf);
6548 if (status) {
6549 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6550 return status;
6551 }
6552 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
6553 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
6554 ICE_LINK_OVERRIDE_PHY_CFG_S;
6555
6556 /* link PHY config */
6557 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
6558 status = ice_read_sr_word(hw, offset, &buf);
6559 if (status) {
6560 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
6561 return status;
6562 }
6563 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
6564
6565 /* PHY types low */
6566 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
6567 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6568 status = ice_read_sr_word(hw, (offset + i), &buf);
6569 if (status) {
6570 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6571 return status;
6572 }
6573 /* shift 16 bits at a time to fill 64 bits */
6574 ldo->phy_type_low |= ((u64)buf << (i * 16));
6575 }
6576
6577 /* PHY types high */
6578 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
6579 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
6580 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
6581 status = ice_read_sr_word(hw, (offset + i), &buf);
6582 if (status) {
6583 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6584 return status;
6585 }
6586 /* shift 16 bits at a time to fill 64 bits */
6587 ldo->phy_type_high |= ((u64)buf << (i * 16));
6588 }
6589
6590 return status;
6591 }
6592
6593 /**
6594 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
6595 * @caps: get PHY capability data
6596 */
ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data * caps)6597 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
6598 {
6599 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
6600 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
6601 ICE_AQC_PHY_AN_EN_CLAUSE73 |
6602 ICE_AQC_PHY_AN_EN_CLAUSE37))
6603 return true;
6604
6605 return false;
6606 }
6607
6608 /**
6609 * ice_is_fw_health_report_supported
6610 * @hw: pointer to the hardware structure
6611 *
6612 * Return true if firmware supports health status reports,
6613 * false otherwise
6614 */
ice_is_fw_health_report_supported(struct ice_hw * hw)6615 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
6616 {
6617 if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
6618 return true;
6619
6620 if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
6621 if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
6622 return true;
6623 if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
6624 hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
6625 return true;
6626 }
6627
6628 return false;
6629 }
6630
6631 /**
6632 * ice_aq_set_health_status_config - Configure FW health events
6633 * @hw: pointer to the HW struct
6634 * @event_source: type of diagnostic events to enable
6635 * @cd: pointer to command details structure or NULL
6636 *
6637 * Configure the health status event types that the firmware will send to this
6638 * PF. The supported event types are: PF-specific, all PFs, and global
6639 */
6640 int
ice_aq_set_health_status_config(struct ice_hw * hw,u8 event_source,struct ice_sq_cd * cd)6641 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
6642 struct ice_sq_cd *cd)
6643 {
6644 struct ice_aqc_set_health_status_config *cmd;
6645 struct ice_aq_desc desc;
6646
6647 cmd = &desc.params.set_health_status_config;
6648
6649 ice_fill_dflt_direct_cmd_desc(&desc,
6650 ice_aqc_opc_set_health_status_config);
6651
6652 cmd->event_source = event_source;
6653
6654 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6655 }
6656
6657 /**
6658 * ice_aq_get_port_options
6659 * @hw: pointer to the hw struct
6660 * @options: buffer for the resultant port options
6661 * @option_count: input - size of the buffer in port options structures,
6662 * output - number of returned port options
6663 * @lport: logical port to call the command with (optional)
6664 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6665 * when PF owns more than 1 port it must be true
6666 * @active_option_idx: index of active port option in returned buffer
6667 * @active_option_valid: active option in returned buffer is valid
6668 * @pending_option_idx: index of pending port option in returned buffer
6669 * @pending_option_valid: pending option in returned buffer is valid
6670 *
6671 * Calls Get Port Options AQC (0x06ea) and verifies result.
6672 */
6673 int
ice_aq_get_port_options(struct ice_hw * hw,struct ice_aqc_get_port_options_elem * options,u8 * option_count,u8 lport,bool lport_valid,u8 * active_option_idx,bool * active_option_valid,u8 * pending_option_idx,bool * pending_option_valid)6674 ice_aq_get_port_options(struct ice_hw *hw,
6675 struct ice_aqc_get_port_options_elem *options,
6676 u8 *option_count, u8 lport, bool lport_valid,
6677 u8 *active_option_idx, bool *active_option_valid,
6678 u8 *pending_option_idx, bool *pending_option_valid)
6679 {
6680 struct ice_aqc_get_port_options *cmd;
6681 struct ice_aq_desc desc;
6682 int status;
6683 u8 i;
6684
6685 /* options buffer shall be able to hold max returned options */
6686 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
6687 return ICE_ERR_PARAM;
6688
6689 cmd = &desc.params.get_port_options;
6690 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
6691
6692 cmd->lport_num = lport;
6693 cmd->lport_num_valid = lport_valid;
6694
6695 status = ice_aq_send_cmd(hw, &desc, options,
6696 *option_count * sizeof(*options), NULL);
6697 if (status)
6698 return status;
6699
6700 /* verify direct FW response & set output parameters */
6701 *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
6702 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
6703 *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
6704 if (*active_option_valid) {
6705 *active_option_idx = cmd->port_options &
6706 ICE_AQC_PORT_OPT_ACTIVE_M;
6707 if (*active_option_idx > (*option_count - 1))
6708 return ICE_ERR_OUT_OF_RANGE;
6709 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
6710 *active_option_idx);
6711 }
6712
6713 *pending_option_valid = cmd->pending_port_option_status &
6714 ICE_AQC_PENDING_PORT_OPT_VALID;
6715 if (*pending_option_valid) {
6716 *pending_option_idx = cmd->pending_port_option_status &
6717 ICE_AQC_PENDING_PORT_OPT_IDX_M;
6718 if (*pending_option_idx > (*option_count - 1))
6719 return ICE_ERR_OUT_OF_RANGE;
6720 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
6721 *pending_option_idx);
6722 }
6723
6724 /* mask output options fields */
6725 for (i = 0; i < *option_count; i++) {
6726 options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
6727 options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
6728 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
6729 options[i].pmd, options[i].max_lane_speed);
6730 }
6731
6732 return 0;
6733 }
6734
6735 /**
6736 * ice_aq_set_port_option
6737 * @hw: pointer to the hw struct
6738 * @lport: logical port to call the command with
6739 * @lport_valid: when false, FW uses port owned by the PF instead of lport,
6740 * when PF owns more than 1 port it must be true
6741 * @new_option: new port option to be written
6742 *
6743 * Calls Set Port Options AQC (0x06eb).
6744 */
6745 int
ice_aq_set_port_option(struct ice_hw * hw,u8 lport,u8 lport_valid,u8 new_option)6746 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
6747 u8 new_option)
6748 {
6749 struct ice_aqc_set_port_option *cmd;
6750 struct ice_aq_desc desc;
6751
6752 if (new_option >= ICE_AQC_PORT_OPT_COUNT_M)
6753 return ICE_ERR_PARAM;
6754
6755 cmd = &desc.params.set_port_option;
6756 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
6757
6758 cmd->lport_num = lport;
6759
6760 cmd->lport_num_valid = lport_valid;
6761 cmd->selected_port_option = new_option;
6762
6763 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6764 }
6765
6766 /**
6767 * ice_aq_set_lldp_mib - Set the LLDP MIB
6768 * @hw: pointer to the HW struct
6769 * @mib_type: Local, Remote or both Local and Remote MIBs
6770 * @buf: pointer to the caller-supplied buffer to store the MIB block
6771 * @buf_size: size of the buffer (in bytes)
6772 * @cd: pointer to command details structure or NULL
6773 *
6774 * Set the LLDP MIB. (0x0A08)
6775 */
6776 int
ice_aq_set_lldp_mib(struct ice_hw * hw,u8 mib_type,void * buf,u16 buf_size,struct ice_sq_cd * cd)6777 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6778 struct ice_sq_cd *cd)
6779 {
6780 struct ice_aqc_lldp_set_local_mib *cmd;
6781 struct ice_aq_desc desc;
6782
6783 cmd = &desc.params.lldp_set_mib;
6784
6785 if (buf_size == 0 || !buf)
6786 return ICE_ERR_PARAM;
6787
6788 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
6789
6790 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
6791 desc.datalen = CPU_TO_LE16(buf_size);
6792
6793 cmd->type = mib_type;
6794 cmd->length = CPU_TO_LE16(buf_size);
6795
6796 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6797 }
6798
6799 /**
6800 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
6801 * @hw: pointer to HW struct
6802 */
ice_fw_supports_lldp_fltr_ctrl(struct ice_hw * hw)6803 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6804 {
6805 if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC)
6806 return false;
6807
6808 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
6809 ICE_FW_API_LLDP_FLTR_MIN,
6810 ICE_FW_API_LLDP_FLTR_PATCH);
6811 }
6812
6813 /**
6814 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6815 * @hw: pointer to HW struct
6816 * @vsi_num: absolute HW index for VSI
6817 * @add: boolean for if adding or removing a filter
6818 */
6819 int
ice_lldp_fltr_add_remove(struct ice_hw * hw,u16 vsi_num,bool add)6820 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
6821 {
6822 struct ice_aqc_lldp_filter_ctrl *cmd;
6823 struct ice_aq_desc desc;
6824
6825 cmd = &desc.params.lldp_filter_ctrl;
6826
6827 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
6828
6829 if (add)
6830 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6831 else
6832 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6833
6834 cmd->vsi_num = CPU_TO_LE16(vsi_num);
6835
6836 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6837 }
6838
6839 /**
6840 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
6841 * @hw: pointer to HW struct
6842 */
ice_lldp_execute_pending_mib(struct ice_hw * hw)6843 int ice_lldp_execute_pending_mib(struct ice_hw *hw)
6844 {
6845 struct ice_aq_desc desc;
6846
6847 ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib);
6848
6849 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6850 }
6851
6852 /**
6853 * ice_fw_supports_report_dflt_cfg
6854 * @hw: pointer to the hardware structure
6855 *
6856 * Checks if the firmware supports report default configuration
6857 */
ice_fw_supports_report_dflt_cfg(struct ice_hw * hw)6858 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6859 {
6860 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
6861 ICE_FW_API_REPORT_DFLT_CFG_MIN,
6862 ICE_FW_API_REPORT_DFLT_CFG_PATCH);
6863 }
6864
6865 /* each of the indexes into the following array match the speed of a return
6866 * value from the list of AQ returned speeds like the range:
6867 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
6868 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) The array is defined as 15
6869 * elements long because the link_speed returned by the firmware is a 16 bit
6870 * value, but is indexed by [fls(speed) - 1]
6871 */
6872 static const u32 ice_aq_to_link_speed[] = {
6873 ICE_LINK_SPEED_10MBPS, /* BIT(0) */
6874 ICE_LINK_SPEED_100MBPS,
6875 ICE_LINK_SPEED_1000MBPS,
6876 ICE_LINK_SPEED_2500MBPS,
6877 ICE_LINK_SPEED_5000MBPS,
6878 ICE_LINK_SPEED_10000MBPS,
6879 ICE_LINK_SPEED_20000MBPS,
6880 ICE_LINK_SPEED_25000MBPS,
6881 ICE_LINK_SPEED_40000MBPS,
6882 ICE_LINK_SPEED_50000MBPS,
6883 ICE_LINK_SPEED_100000MBPS, /* BIT(10) */
6884 ICE_LINK_SPEED_200000MBPS,
6885 };
6886
6887 /**
6888 * ice_get_link_speed - get integer speed from table
6889 * @index: array index from fls(aq speed) - 1
6890 *
6891 * Returns: u32 value containing integer speed
6892 */
ice_get_link_speed(u16 index)6893 u32 ice_get_link_speed(u16 index)
6894 {
6895 if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
6896 return ICE_LINK_SPEED_UNKNOWN;
6897
6898 return ice_aq_to_link_speed[index];
6899 }
6900
6901 /**
6902 * ice_fw_supports_fec_dis_auto
6903 * @hw: pointer to the hardware structure
6904 *
6905 * Checks if the firmware supports FEC disable in Auto FEC mode
6906 */
ice_fw_supports_fec_dis_auto(struct ice_hw * hw)6907 bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
6908 {
6909 if (ice_is_e830(hw))
6910 return true;
6911 return ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E810,
6912 ICE_FW_FEC_DIS_AUTO_MAJ,
6913 ICE_FW_FEC_DIS_AUTO_MIN,
6914 ICE_FW_FEC_DIS_AUTO_PATCH) ||
6915 ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E82X,
6916 ICE_FW_FEC_DIS_AUTO_MAJ_E82X,
6917 ICE_FW_FEC_DIS_AUTO_MIN_E82X,
6918 ICE_FW_FEC_DIS_AUTO_PATCH_E82X);
6919 }
6920
6921 /**
6922 * ice_is_fw_auto_drop_supported
6923 * @hw: pointer to the hardware structure
6924 *
6925 * Checks if the firmware supports auto drop feature
6926 */
ice_is_fw_auto_drop_supported(struct ice_hw * hw)6927 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
6928 {
6929 if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
6930 hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)
6931 return true;
6932 return false;
6933 }
6934
6935