1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34
35 #include "ixl_pf.h"
36
37 #ifdef PCI_IOV
38 #include "ixl_pf_iov.h"
39 #endif
40
41 #ifdef IXL_IW
42 #include "ixl_iw.h"
43 #include "ixl_iw_int.h"
44 #endif
45
46 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
47 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
48 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
49 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
50 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
51 static char * ixl_switch_element_string(struct sbuf *, u8, u16);
52 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
53
54 /* Sysctls */
55 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
56 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
59 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
60 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
62
63 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
65
66 /* Debug Sysctls */
67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88
89 /* Debug Sysctls */
90 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94 static int ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS);
95 #ifdef IXL_DEBUG
96 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
97 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
98 #endif
99
100 #ifdef IXL_IW
101 extern int ixl_enable_iwarp;
102 extern int ixl_limit_iwarp_msix;
103 #endif
104
105 static const char * const ixl_fc_string[6] = {
106 "None",
107 "Rx",
108 "Tx",
109 "Full",
110 "Priority",
111 "Default"
112 };
113
114 static char *ixl_fec_string[3] = {
115 "CL108 RS-FEC",
116 "CL74 FC-FEC/BASE-R",
117 "None"
118 };
119
120 /* Functions for setting and checking driver state. Note the functions take
121 * bit positions, not bitmasks. The atomic_set_32 and atomic_clear_32
122 * operations require bitmasks. This can easily lead to programming error, so
123 * we provide wrapper functions to avoid this.
124 */
125
126 /**
127 * ixl_set_state - Set the specified state
128 * @s: the state bitmap
129 * @bit: the state to set
130 *
131 * Atomically update the state bitmap with the specified bit set.
132 */
133 inline void
ixl_set_state(volatile u32 * s,enum ixl_state bit)134 ixl_set_state(volatile u32 *s, enum ixl_state bit)
135 {
136 /* atomic_set_32 expects a bitmask */
137 atomic_set_32(s, BIT(bit));
138 }
139
140 /**
141 * ixl_clear_state - Clear the specified state
142 * @s: the state bitmap
143 * @bit: the state to clear
144 *
145 * Atomically update the state bitmap with the specified bit cleared.
146 */
147 inline void
ixl_clear_state(volatile u32 * s,enum ixl_state bit)148 ixl_clear_state(volatile u32 *s, enum ixl_state bit)
149 {
150 /* atomic_clear_32 expects a bitmask */
151 atomic_clear_32(s, BIT(bit));
152 }
153
154 /**
155 * ixl_test_state - Test the specified state
156 * @s: the state bitmap
157 * @bit: the bit to test
158 *
159 * Return true if the state is set, false otherwise. Use this only if the flow
160 * does not need to update the state. If you must update the state as well,
161 * prefer ixl_testandset_state.
162 */
163 inline bool
ixl_test_state(volatile u32 * s,enum ixl_state bit)164 ixl_test_state(volatile u32 *s, enum ixl_state bit)
165 {
166 return !!(*s & BIT(bit));
167 }
168
169 /**
170 * ixl_testandset_state - Test and set the specified state
171 * @s: the state bitmap
172 * @bit: the bit to test
173 *
174 * Atomically update the state bitmap, setting the specified bit. Returns the
175 * previous value of the bit.
176 */
177 inline u32
ixl_testandset_state(volatile u32 * s,enum ixl_state bit)178 ixl_testandset_state(volatile u32 *s, enum ixl_state bit)
179 {
180 /* atomic_testandset_32 expects a bit position, as opposed to bitmask
181 expected by other atomic functions */
182 return atomic_testandset_32(s, bit);
183 }
184
185 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
186
187 /*
188 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
189 */
190 void
ixl_nvm_version_str(struct i40e_hw * hw,struct sbuf * buf)191 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
192 {
193 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
194 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
195 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
196
197 sbuf_printf(buf,
198 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
199 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
200 hw->aq.api_maj_ver, hw->aq.api_min_ver,
201 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
202 IXL_NVM_VERSION_HI_SHIFT,
203 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
204 IXL_NVM_VERSION_LO_SHIFT,
205 hw->nvm.eetrack,
206 oem_ver, oem_build, oem_patch);
207 }
208
209 void
ixl_print_nvm_version(struct ixl_pf * pf)210 ixl_print_nvm_version(struct ixl_pf *pf)
211 {
212 struct i40e_hw *hw = &pf->hw;
213 device_t dev = pf->dev;
214 struct sbuf *sbuf;
215
216 sbuf = sbuf_new_auto();
217 ixl_nvm_version_str(hw, sbuf);
218 sbuf_finish(sbuf);
219 device_printf(dev, "%s\n", sbuf_data(sbuf));
220 sbuf_delete(sbuf);
221 }
222
223 /**
224 * ixl_get_fw_mode - Check the state of FW
225 * @hw: device hardware structure
226 *
227 * Identify state of FW. It might be in a recovery mode
228 * which limits functionality and requires special handling
229 * from the driver.
230 *
231 * @returns FW mode (normal, recovery, unexpected EMP reset)
232 */
233 static enum ixl_fw_mode
ixl_get_fw_mode(struct ixl_pf * pf)234 ixl_get_fw_mode(struct ixl_pf *pf)
235 {
236 struct i40e_hw *hw = &pf->hw;
237 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
238 u32 fwsts;
239
240 #ifdef IXL_DEBUG
241 if (pf->recovery_mode)
242 return IXL_FW_MODE_RECOVERY;
243 #endif
244 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
245
246 /* Is set and has one of expected values */
247 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
248 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
249 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
250 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
251 fw_mode = IXL_FW_MODE_RECOVERY;
252 else {
253 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
254 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
255 fw_mode = IXL_FW_MODE_UEMPR;
256 }
257 return (fw_mode);
258 }
259
260 /**
261 * ixl_pf_reset - Reset the PF
262 * @pf: PF structure
263 *
264 * Ensure that FW is in the right state and do the reset
265 * if needed.
266 *
267 * @returns zero on success, or an error code on failure.
268 */
269 int
ixl_pf_reset(struct ixl_pf * pf)270 ixl_pf_reset(struct ixl_pf *pf)
271 {
272 struct i40e_hw *hw = &pf->hw;
273 enum i40e_status_code status;
274 enum ixl_fw_mode fw_mode;
275
276 fw_mode = ixl_get_fw_mode(pf);
277 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
278 if (fw_mode == IXL_FW_MODE_RECOVERY) {
279 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
280 /* Don't try to reset device if it's in recovery mode */
281 return (0);
282 }
283
284 status = i40e_pf_reset(hw);
285 if (status == I40E_SUCCESS)
286 return (0);
287
288 /* Check FW mode again in case it has changed while
289 * waiting for reset to complete */
290 fw_mode = ixl_get_fw_mode(pf);
291 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
292 if (fw_mode == IXL_FW_MODE_RECOVERY) {
293 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
294 return (0);
295 }
296
297 if (fw_mode == IXL_FW_MODE_UEMPR)
298 device_printf(pf->dev,
299 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
300 else
301 device_printf(pf->dev, "PF reset failure %s\n",
302 i40e_stat_str(hw, status));
303 return (EIO);
304 }
305
306 /**
307 * ixl_setup_hmc - Setup LAN Host Memory Cache
308 * @pf: PF structure
309 *
310 * Init and configure LAN Host Memory Cache
311 *
312 * @returns 0 on success, EIO on error
313 */
314 int
ixl_setup_hmc(struct ixl_pf * pf)315 ixl_setup_hmc(struct ixl_pf *pf)
316 {
317 struct i40e_hw *hw = &pf->hw;
318 enum i40e_status_code status;
319
320 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
321 hw->func_caps.num_rx_qp, 0, 0);
322 if (status) {
323 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
324 i40e_stat_str(hw, status));
325 return (EIO);
326 }
327
328 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
329 if (status) {
330 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
331 i40e_stat_str(hw, status));
332 return (EIO);
333 }
334
335 return (0);
336 }
337
338 /**
339 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
340 * @pf: PF structure
341 *
342 * Shutdown Host Memory Cache if configured.
343 *
344 */
345 void
ixl_shutdown_hmc(struct ixl_pf * pf)346 ixl_shutdown_hmc(struct ixl_pf *pf)
347 {
348 struct i40e_hw *hw = &pf->hw;
349 enum i40e_status_code status;
350
351 /* HMC not configured, no need to shutdown */
352 if (hw->hmc.hmc_obj == NULL)
353 return;
354
355 status = i40e_shutdown_lan_hmc(hw);
356 if (status)
357 device_printf(pf->dev,
358 "Shutdown LAN HMC failed with code %s\n",
359 i40e_stat_str(hw, status));
360 }
361 /*
362 * Write PF ITR values to queue ITR registers.
363 */
364 void
ixl_configure_itr(struct ixl_pf * pf)365 ixl_configure_itr(struct ixl_pf *pf)
366 {
367 ixl_configure_tx_itr(pf);
368 ixl_configure_rx_itr(pf);
369 }
370
371 /*********************************************************************
372 *
373 * Get the hardware capabilities
374 *
375 **********************************************************************/
376
377 int
ixl_get_hw_capabilities(struct ixl_pf * pf)378 ixl_get_hw_capabilities(struct ixl_pf *pf)
379 {
380 struct i40e_aqc_list_capabilities_element_resp *buf;
381 struct i40e_hw *hw = &pf->hw;
382 device_t dev = pf->dev;
383 enum i40e_status_code status;
384 int len, i2c_intfc_num;
385 bool again = TRUE;
386 u16 needed;
387
388 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
389 hw->func_caps.iwarp = 0;
390 return (0);
391 }
392
393 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
394 retry:
395 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
396 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
397 device_printf(dev, "Unable to allocate cap memory\n");
398 return (ENOMEM);
399 }
400
401 /* This populates the hw struct */
402 status = i40e_aq_discover_capabilities(hw, buf, len,
403 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
404 free(buf, M_IXL);
405 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
406 (again == TRUE)) {
407 /* retry once with a larger buffer */
408 again = FALSE;
409 len = needed;
410 goto retry;
411 } else if (status != I40E_SUCCESS) {
412 device_printf(dev, "capability discovery failed; status %s, error %s\n",
413 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
414 return (ENODEV);
415 }
416
417 /*
418 * Some devices have both MDIO and I2C; since this isn't reported
419 * by the FW, check registers to see if an I2C interface exists.
420 */
421 i2c_intfc_num = ixl_find_i2c_interface(pf);
422 if (i2c_intfc_num != -1)
423 pf->has_i2c = true;
424
425 /* Determine functions to use for driver I2C accesses */
426 switch (pf->i2c_access_method) {
427 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
428 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
429 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
430 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
431 } else {
432 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
433 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
434 }
435 break;
436 }
437 case IXL_I2C_ACCESS_METHOD_AQ:
438 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
439 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
440 break;
441 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
442 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
443 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
444 break;
445 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
446 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
447 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
448 break;
449 default:
450 /* Should not happen */
451 device_printf(dev, "Error setting I2C access functions\n");
452 break;
453 }
454
455 /* Keep link active by default */
456 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
457
458 /* Print a subset of the capability information. */
459 device_printf(dev,
460 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
461 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
462 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
463 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
464 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
465 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
466 "MDIO shared");
467
468 return (0);
469 }
470
471 /* For the set_advertise sysctl */
472 void
ixl_set_initial_advertised_speeds(struct ixl_pf * pf)473 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
474 {
475 device_t dev = pf->dev;
476 int err;
477
478 /* Make sure to initialize the device to the complete list of
479 * supported speeds on driver load, to ensure unloading and
480 * reloading the driver will restore this value.
481 */
482 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
483 if (err) {
484 /* Non-fatal error */
485 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
486 __func__, err);
487 return;
488 }
489
490 pf->advertised_speed =
491 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
492 }
493
494 int
ixl_teardown_hw_structs(struct ixl_pf * pf)495 ixl_teardown_hw_structs(struct ixl_pf *pf)
496 {
497 enum i40e_status_code status = 0;
498 struct i40e_hw *hw = &pf->hw;
499 device_t dev = pf->dev;
500
501 /* Shutdown LAN HMC */
502 if (hw->hmc.hmc_obj) {
503 status = i40e_shutdown_lan_hmc(hw);
504 if (status) {
505 device_printf(dev,
506 "init: LAN HMC shutdown failure; status %s\n",
507 i40e_stat_str(hw, status));
508 goto err_out;
509 }
510 }
511
512 /* Shutdown admin queue */
513 ixl_disable_intr0(hw);
514 status = i40e_shutdown_adminq(hw);
515 if (status)
516 device_printf(dev,
517 "init: Admin Queue shutdown failure; status %s\n",
518 i40e_stat_str(hw, status));
519
520 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
521 err_out:
522 return (status);
523 }
524
525 /*
526 ** Creates new filter with given MAC address and VLAN ID
527 */
528 static struct ixl_mac_filter *
ixl_new_filter(struct ixl_ftl_head * headp,const u8 * macaddr,s16 vlan)529 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
530 {
531 struct ixl_mac_filter *f;
532
533 /* create a new empty filter */
534 f = malloc(sizeof(struct ixl_mac_filter),
535 M_IXL, M_NOWAIT | M_ZERO);
536 if (f) {
537 LIST_INSERT_HEAD(headp, f, ftle);
538 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
539 f->vlan = vlan;
540 }
541
542 return (f);
543 }
544
545 /**
546 * ixl_free_filters - Free all filters in given list
547 * headp - pointer to list head
548 *
549 * Frees memory used by each entry in the list.
550 * Does not remove filters from HW.
551 */
552 void
ixl_free_filters(struct ixl_ftl_head * headp)553 ixl_free_filters(struct ixl_ftl_head *headp)
554 {
555 struct ixl_mac_filter *f, *nf;
556
557 f = LIST_FIRST(headp);
558 while (f != NULL) {
559 nf = LIST_NEXT(f, ftle);
560 free(f, M_IXL);
561 f = nf;
562 }
563
564 LIST_INIT(headp);
565 }
566
567 static u_int
ixl_add_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)568 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
569 {
570 struct ixl_add_maddr_arg *ama = arg;
571 struct ixl_vsi *vsi = ama->vsi;
572 const u8 *macaddr = (u8*)LLADDR(sdl);
573 struct ixl_mac_filter *f;
574
575 /* Does one already exist */
576 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
577 if (f != NULL)
578 return (0);
579
580 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
581 if (f == NULL) {
582 device_printf(vsi->dev, "WARNING: no filter available!!\n");
583 return (0);
584 }
585 f->flags |= IXL_FILTER_MC;
586
587 return (1);
588 }
589
590 /*********************************************************************
591 * Filter Routines
592 *
593 * Routines for multicast and vlan filter management.
594 *
595 *********************************************************************/
596
597 /**
598 * ixl_add_multi - Add multicast filters to the hardware
599 * @vsi: The VSI structure
600 *
601 * In case number of multicast filters in the IFP exceeds 127 entries,
602 * multicast promiscuous mode will be enabled and the filters will be removed
603 * from the hardware
604 */
605 void
ixl_add_multi(struct ixl_vsi * vsi)606 ixl_add_multi(struct ixl_vsi *vsi)
607 {
608 if_t ifp = vsi->ifp;
609 int mcnt = 0;
610 struct ixl_add_maddr_arg cb_arg;
611
612 IOCTL_DEBUGOUT("ixl_add_multi: begin");
613
614 /*
615 * There is no need to check if the number of multicast addresses
616 * exceeds the MAX_MULTICAST_ADDR threshold and set promiscuous mode
617 * here, as all callers already handle this case.
618 */
619
620 cb_arg.vsi = vsi;
621 LIST_INIT(&cb_arg.to_add);
622
623 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
624 if (mcnt > 0)
625 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
626
627 IOCTL_DEBUGOUT("ixl_add_multi: end");
628 }
629
630 static u_int
ixl_match_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)631 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
632 {
633 struct ixl_mac_filter *f = arg;
634
635 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
636 return (1);
637 else
638 return (0);
639 }
640
641 /**
642 * ixl_dis_multi_promisc - Disable multicast promiscuous mode
643 * @vsi: The VSI structure
644 * @vsi_mcnt: Number of multicast filters in the VSI
645 *
646 * Disable multicast promiscuous mode based on number of entries in the IFP
647 * and the VSI, then re-add multicast filters.
648 *
649 */
650 static void
ixl_dis_multi_promisc(struct ixl_vsi * vsi,int vsi_mcnt)651 ixl_dis_multi_promisc(struct ixl_vsi *vsi, int vsi_mcnt)
652 {
653 struct ifnet *ifp = vsi->ifp;
654 struct i40e_hw *hw = vsi->hw;
655 int ifp_mcnt = 0;
656 enum i40e_status_code status;
657
658 /*
659 * Check if multicast promiscuous mode was actually enabled.
660 * If promiscuous mode was not enabled, don't attempt to disable it.
661 * Also, don't disable if IFF_PROMISC or IFF_ALLMULTI is set.
662 */
663 if (!(vsi->flags & IXL_FLAGS_MC_PROMISC) ||
664 (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)))
665 return;
666
667 ifp_mcnt = if_llmaddr_count(ifp);
668 /*
669 * Equal lists or empty ifp list mean the list has not been changed
670 * and in such case avoid disabling multicast promiscuous mode as it
671 * was not previously enabled. Case where multicast promiscuous mode has
672 * been enabled is when vsi_mcnt == 0 && ifp_mcnt > 0.
673 */
674 if (ifp_mcnt == vsi_mcnt || ifp_mcnt == 0 ||
675 ifp_mcnt >= MAX_MULTICAST_ADDR)
676 return;
677
678 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
679 FALSE, NULL);
680 if (status != I40E_SUCCESS) {
681 if_printf(ifp, "Failed to disable multicast promiscuous "
682 "mode, status: %s\n", i40e_stat_str(hw, status));
683
684 return;
685 }
686
687 /* Clear the flag since promiscuous mode is now disabled */
688 vsi->flags &= ~IXL_FLAGS_MC_PROMISC;
689 if_printf(ifp, "Disabled multicast promiscuous mode\n");
690
691 ixl_add_multi(vsi);
692 }
693
694 /**
695 * ixl_del_multi - Delete multicast filters from the hardware
696 * @vsi: The VSI structure
697 * @all: Bool to determine if all the multicast filters should be removed
698 *
699 * In case number of multicast filters in the IFP drops to 127 entries,
700 * multicast promiscuous mode will be disabled and the filters will be reapplied
701 * to the hardware.
702 */
703 void
ixl_del_multi(struct ixl_vsi * vsi,bool all)704 ixl_del_multi(struct ixl_vsi *vsi, bool all)
705 {
706 int to_del_cnt = 0, vsi_mcnt = 0;
707 if_t ifp = vsi->ifp;
708 struct ixl_mac_filter *f, *fn;
709 struct ixl_ftl_head to_del;
710
711 IOCTL_DEBUGOUT("ixl_del_multi: begin");
712
713 LIST_INIT(&to_del);
714 /* Search for removed multicast addresses */
715 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
716 if ((f->flags & IXL_FILTER_MC) == 0)
717 continue;
718
719 /* Count all the multicast filters in the VSI for comparison */
720 vsi_mcnt++;
721
722 if (!all && if_foreach_llmaddr(ifp, ixl_match_maddr, f) != 0)
723 continue;
724
725 LIST_REMOVE(f, ftle);
726 LIST_INSERT_HEAD(&to_del, f, ftle);
727 to_del_cnt++;
728 }
729
730 if (to_del_cnt > 0) {
731 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
732 return;
733 }
734
735 ixl_dis_multi_promisc(vsi, vsi_mcnt);
736
737 IOCTL_DEBUGOUT("ixl_del_multi: end");
738 }
739
740 void
ixl_link_up_msg(struct ixl_pf * pf)741 ixl_link_up_msg(struct ixl_pf *pf)
742 {
743 struct i40e_hw *hw = &pf->hw;
744 if_t ifp = pf->vsi.ifp;
745 char *req_fec_string, *neg_fec_string;
746 u8 fec_abilities;
747
748 fec_abilities = hw->phy.link_info.req_fec_info;
749 /* If both RS and KR are requested, only show RS */
750 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
751 req_fec_string = ixl_fec_string[0];
752 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
753 req_fec_string = ixl_fec_string[1];
754 else
755 req_fec_string = ixl_fec_string[2];
756
757 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
758 neg_fec_string = ixl_fec_string[0];
759 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
760 neg_fec_string = ixl_fec_string[1];
761 else
762 neg_fec_string = ixl_fec_string[2];
763
764 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
765 if_name(ifp),
766 ixl_link_speed_string(hw->phy.link_info.link_speed),
767 req_fec_string, neg_fec_string,
768 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
769 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
770 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
771 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
772 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
773 ixl_fc_string[1] : ixl_fc_string[0]);
774 }
775
776 /*
777 * Configure admin queue/misc interrupt cause registers in hardware.
778 */
779 void
ixl_configure_intr0_msix(struct ixl_pf * pf)780 ixl_configure_intr0_msix(struct ixl_pf *pf)
781 {
782 struct i40e_hw *hw = &pf->hw;
783 u32 reg;
784
785 /* First set up the adminq - vector 0 */
786 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
787 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
788
789 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
790 I40E_PFINT_ICR0_ENA_GRST_MASK |
791 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
792 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
793 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
794 I40E_PFINT_ICR0_ENA_VFLR_MASK |
795 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
796 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
797 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
798
799 /*
800 * 0x7FF is the end of the queue list.
801 * This means we won't use MSI-X vector 0 for a queue interrupt
802 * in MSI-X mode.
803 */
804 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
805 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
806 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
807
808 wr32(hw, I40E_PFINT_DYN_CTL0,
809 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
810 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
811
812 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
813 }
814
815 void
ixl_add_ifmedia(struct ifmedia * media,u64 phy_types)816 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
817 {
818 /* Display supported media types */
819 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
820 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
821
822 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
823 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
824 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
825 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
826 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
827 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
828
829 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
830 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
831
832 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
833 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
834
835 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
836 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
837 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
838 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
839
840 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
841 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
842 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
843 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
844 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
845 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
846
847 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
848 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
849 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
850 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
851 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
852 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
853 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
854 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
855 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
856 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
857
858 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
859 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
860
861 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
862 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
863 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
864 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
865 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
866 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
867 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
868 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
869 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
870 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
871 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
872
873 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
874 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
875
876 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
877 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
878 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
879 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
880
881 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
882 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
883 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
884 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
885 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
886 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
887 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
888 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
889 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
890 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
891 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
892 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
893 }
894
895 /*********************************************************************
896 *
897 * Get Firmware Switch configuration
898 * - this will need to be more robust when more complex
899 * switch configurations are enabled.
900 *
901 **********************************************************************/
902 int
ixl_switch_config(struct ixl_pf * pf)903 ixl_switch_config(struct ixl_pf *pf)
904 {
905 struct i40e_hw *hw = &pf->hw;
906 struct ixl_vsi *vsi = &pf->vsi;
907 device_t dev = iflib_get_dev(vsi->ctx);
908 struct i40e_aqc_get_switch_config_resp *sw_config;
909 u8 aq_buf[I40E_AQ_LARGE_BUF];
910 int ret;
911 u16 next = 0;
912
913 memset(&aq_buf, 0, sizeof(aq_buf));
914 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
915 ret = i40e_aq_get_switch_config(hw, sw_config,
916 sizeof(aq_buf), &next, NULL);
917 if (ret) {
918 device_printf(dev, "aq_get_switch_config() failed, error %d,"
919 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
920 return (ret);
921 }
922 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
923 device_printf(dev,
924 "Switch config: header reported: %d in structure, %d total\n",
925 LE16_TO_CPU(sw_config->header.num_reported),
926 LE16_TO_CPU(sw_config->header.num_total));
927 for (int i = 0;
928 i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
929 device_printf(dev,
930 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
931 sw_config->element[i].element_type,
932 LE16_TO_CPU(sw_config->element[i].seid),
933 LE16_TO_CPU(sw_config->element[i].uplink_seid),
934 LE16_TO_CPU(sw_config->element[i].downlink_seid));
935 }
936 }
937 /* Simplified due to a single VSI */
938 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
939 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
940 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
941 return (ret);
942 }
943
944 void
ixl_vsi_add_sysctls(struct ixl_vsi * vsi,const char * sysctl_name,bool queues_sysctls)945 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
946 {
947 struct sysctl_oid *tree;
948 struct sysctl_oid_list *child;
949 struct sysctl_oid_list *vsi_list;
950
951 tree = device_get_sysctl_tree(vsi->dev);
952 child = SYSCTL_CHILDREN(tree);
953 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
954 CTLFLAG_RD, NULL, "VSI Number");
955
956 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
957 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
958
959 /* Copy of netstat RX errors counter for validation purposes */
960 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
961 CTLFLAG_RD, &vsi->ierrors,
962 "RX packet errors");
963
964 if (queues_sysctls)
965 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
966 }
967
968 /*
969 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
970 * Writes to the ITR registers immediately.
971 */
972 static int
ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)973 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
974 {
975 struct ixl_pf *pf = (struct ixl_pf *)arg1;
976 device_t dev = pf->dev;
977 int error = 0;
978 int requested_tx_itr;
979
980 requested_tx_itr = pf->tx_itr;
981 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
982 if ((error) || (req->newptr == NULL))
983 return (error);
984 if (pf->dynamic_tx_itr) {
985 device_printf(dev,
986 "Cannot set TX itr value while dynamic TX itr is enabled\n");
987 return (EINVAL);
988 }
989 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
990 device_printf(dev,
991 "Invalid TX itr value; value must be between 0 and %d\n",
992 IXL_MAX_ITR);
993 return (EINVAL);
994 }
995
996 pf->tx_itr = requested_tx_itr;
997 ixl_configure_tx_itr(pf);
998
999 return (error);
1000 }
1001
1002 /*
1003 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1004 * Writes to the ITR registers immediately.
1005 */
1006 static int
ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)1007 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1008 {
1009 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1010 device_t dev = pf->dev;
1011 int error = 0;
1012 int requested_rx_itr;
1013
1014 requested_rx_itr = pf->rx_itr;
1015 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1016 if ((error) || (req->newptr == NULL))
1017 return (error);
1018 if (pf->dynamic_rx_itr) {
1019 device_printf(dev,
1020 "Cannot set RX itr value while dynamic RX itr is enabled\n");
1021 return (EINVAL);
1022 }
1023 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1024 device_printf(dev,
1025 "Invalid RX itr value; value must be between 0 and %d\n",
1026 IXL_MAX_ITR);
1027 return (EINVAL);
1028 }
1029
1030 pf->rx_itr = requested_rx_itr;
1031 ixl_configure_rx_itr(pf);
1032
1033 return (error);
1034 }
1035
1036 void
ixl_add_sysctls_mac_stats(struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child,struct i40e_hw_port_stats * stats)1037 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1038 struct sysctl_oid_list *child,
1039 struct i40e_hw_port_stats *stats)
1040 {
1041 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
1042 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
1043 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1044
1045 struct i40e_eth_stats *eth_stats = &stats->eth;
1046 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1047
1048 struct ixl_sysctl_info ctls[] =
1049 {
1050 {&stats->crc_errors, "crc_errors", "CRC Errors"},
1051 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1052 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1053 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1054 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1055 /* Packet Reception Stats */
1056 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1057 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1058 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1059 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1060 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1061 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1062 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1063 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1064 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1065 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1066 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1067 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1068 /* Packet Transmission Stats */
1069 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1070 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1071 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1072 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1073 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1074 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1075 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1076 /* Flow control */
1077 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1078 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1079 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1080 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1081 /* End */
1082 {0,0,0}
1083 };
1084
1085 struct ixl_sysctl_info *entry = ctls;
1086 while (entry->stat != 0)
1087 {
1088 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1089 CTLFLAG_RD, entry->stat,
1090 entry->description);
1091 entry++;
1092 }
1093 }
1094
1095 void
ixl_set_rss_key(struct ixl_pf * pf)1096 ixl_set_rss_key(struct ixl_pf *pf)
1097 {
1098 struct i40e_hw *hw = &pf->hw;
1099 struct ixl_vsi *vsi = &pf->vsi;
1100 device_t dev = pf->dev;
1101 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1102 enum i40e_status_code status;
1103
1104 /* Fetch the configured RSS key */
1105 rss_getkey((uint8_t *) &rss_seed);
1106 /* Fill out hash function seed */
1107 if (hw->mac.type == I40E_MAC_X722) {
1108 struct i40e_aqc_get_set_rss_key_data key_data;
1109 bcopy(rss_seed, &key_data, 52);
1110 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1111 if (status)
1112 device_printf(dev,
1113 "i40e_aq_set_rss_key status %s, error %s\n",
1114 i40e_stat_str(hw, status),
1115 i40e_aq_str(hw, hw->aq.asq_last_status));
1116 } else {
1117 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1118 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1119 }
1120 }
1121
1122 /*
1123 * Configure enabled PCTYPES for RSS.
1124 */
1125 void
ixl_set_rss_pctypes(struct ixl_pf * pf)1126 ixl_set_rss_pctypes(struct ixl_pf *pf)
1127 {
1128 struct i40e_hw *hw = &pf->hw;
1129 u64 set_hena = 0, hena;
1130
1131 u32 rss_hash_config;
1132
1133 rss_hash_config = rss_gethashconfig();
1134 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1135 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1136 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1137 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1138 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1139 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1140 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1141 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1142 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1143 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1144 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1145 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1146 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1147 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1148 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1149 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1150 hena |= set_hena;
1151 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1152 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1153
1154 }
1155
1156 /*
1157 ** Setup the PF's RSS parameters.
1158 */
1159 void
ixl_config_rss(struct ixl_pf * pf)1160 ixl_config_rss(struct ixl_pf *pf)
1161 {
1162 ixl_set_rss_key(pf);
1163 ixl_set_rss_pctypes(pf);
1164 ixl_set_rss_hlut(pf);
1165 }
1166
1167 /*
1168 * In some firmware versions there is default MAC/VLAN filter
1169 * configured which interferes with filters managed by driver.
1170 * Make sure it's removed.
1171 */
1172 void
ixl_del_default_hw_filters(struct ixl_vsi * vsi)1173 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1174 {
1175 struct i40e_aqc_remove_macvlan_element_data e;
1176
1177 bzero(&e, sizeof(e));
1178 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1179 e.vlan_tag = 0;
1180 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1181 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1182
1183 bzero(&e, sizeof(e));
1184 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1185 e.vlan_tag = 0;
1186 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1187 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1188 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1189 }
1190
1191 /*
1192 ** Initialize filter list and add filters that the hardware
1193 ** needs to know about.
1194 **
1195 ** Requires VSI's seid to be set before calling.
1196 */
1197 void
ixl_init_filters(struct ixl_vsi * vsi)1198 ixl_init_filters(struct ixl_vsi *vsi)
1199 {
1200 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1201
1202 ixl_dbg_filter(pf, "%s: start\n", __func__);
1203
1204 /* Initialize mac filter list for VSI */
1205 LIST_INIT(&vsi->ftl);
1206 vsi->num_hw_filters = 0;
1207
1208 /* Receive broadcast Ethernet frames */
1209 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1210
1211 if (IXL_VSI_IS_VF(vsi))
1212 return;
1213
1214 ixl_del_default_hw_filters(vsi);
1215
1216 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1217
1218 /*
1219 * Prevent Tx flow control frames from being sent out by
1220 * non-firmware transmitters.
1221 * This affects every VSI in the PF.
1222 */
1223 #ifndef IXL_DEBUG_FC
1224 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1225 #else
1226 if (pf->enable_tx_fc_filter)
1227 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1228 #endif
1229 }
1230
1231 void
ixl_reconfigure_filters(struct ixl_vsi * vsi)1232 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1233 {
1234 struct i40e_hw *hw = vsi->hw;
1235 struct ixl_ftl_head tmp;
1236 int cnt;
1237
1238 /*
1239 * The ixl_add_hw_filters function adds filters configured
1240 * in HW to a list in VSI. Move all filters to a temporary
1241 * list to avoid corrupting it by concatenating to itself.
1242 */
1243 LIST_INIT(&tmp);
1244 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1245 cnt = vsi->num_hw_filters;
1246 vsi->num_hw_filters = 0;
1247
1248 ixl_add_hw_filters(vsi, &tmp, cnt);
1249
1250 /*
1251 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp
1252 * will be NULL. Furthermore, the ftl of such vsi already contains
1253 * IXL_VLAN_ANY filter so we can skip that as well.
1254 */
1255 if (hw == NULL)
1256 return;
1257
1258 /* Filter could be removed if MAC address was changed */
1259 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1260
1261 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1262 return;
1263 /*
1264 * VLAN HW filtering is enabled, make sure that filters
1265 * for all registered VLAN tags are configured
1266 */
1267 ixl_add_vlan_filters(vsi, hw->mac.addr);
1268 }
1269
1270 /*
1271 * This routine adds a MAC/VLAN filter to the software filter
1272 * list, then adds that new filter to the HW if it doesn't already
1273 * exist in the SW filter list.
1274 */
1275 void
ixl_add_filter(struct ixl_vsi * vsi,const u8 * macaddr,s16 vlan)1276 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1277 {
1278 struct ixl_mac_filter *f, *tmp;
1279 struct ixl_pf *pf;
1280 device_t dev;
1281 struct ixl_ftl_head to_add;
1282 int to_add_cnt;
1283
1284 pf = vsi->back;
1285 dev = pf->dev;
1286 to_add_cnt = 1;
1287
1288 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1289 MAC_FORMAT_ARGS(macaddr), vlan);
1290
1291 /* Does one already exist */
1292 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1293 if (f != NULL)
1294 return;
1295
1296 LIST_INIT(&to_add);
1297 f = ixl_new_filter(&to_add, macaddr, vlan);
1298 if (f == NULL) {
1299 device_printf(dev, "WARNING: no filter available!!\n");
1300 return;
1301 }
1302 if (f->vlan != IXL_VLAN_ANY)
1303 f->flags |= IXL_FILTER_VLAN;
1304 else
1305 vsi->num_macs++;
1306
1307 /*
1308 ** Is this the first vlan being registered, if so we
1309 ** need to remove the ANY filter that indicates we are
1310 ** not in a vlan, and replace that with a 0 filter.
1311 */
1312 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1313 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1314 if (tmp != NULL) {
1315 struct ixl_ftl_head to_del;
1316
1317 /* Prepare new filter first to avoid removing
1318 * VLAN_ANY filter if allocation fails */
1319 f = ixl_new_filter(&to_add, macaddr, 0);
1320 if (f == NULL) {
1321 device_printf(dev, "WARNING: no filter available!!\n");
1322 free(LIST_FIRST(&to_add), M_IXL);
1323 return;
1324 }
1325 to_add_cnt++;
1326
1327 LIST_REMOVE(tmp, ftle);
1328 LIST_INIT(&to_del);
1329 LIST_INSERT_HEAD(&to_del, tmp, ftle);
1330 ixl_del_hw_filters(vsi, &to_del, 1);
1331 }
1332 }
1333
1334 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1335 }
1336
1337 /**
1338 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1339 * @vsi: pointer to VSI
1340 * @macaddr: MAC address
1341 *
1342 * Adds MAC/VLAN filter for each VLAN configured on the interface
1343 * if there is enough HW filters. Otherwise adds a single filter
1344 * for all tagged and untagged frames to allow all configured VLANs
1345 * to recieve traffic.
1346 */
1347 void
ixl_add_vlan_filters(struct ixl_vsi * vsi,const u8 * macaddr)1348 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1349 {
1350 struct ixl_ftl_head to_add;
1351 struct ixl_mac_filter *f;
1352 int to_add_cnt = 0;
1353 int i, vlan = 0;
1354
1355 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1356 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1357 return;
1358 }
1359 LIST_INIT(&to_add);
1360
1361 /* Add filter for untagged frames if it does not exist yet */
1362 f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1363 if (f == NULL) {
1364 f = ixl_new_filter(&to_add, macaddr, 0);
1365 if (f == NULL) {
1366 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1367 return;
1368 }
1369 to_add_cnt++;
1370 }
1371
1372 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1373 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1374 if (vlan == -1)
1375 break;
1376
1377 /* Does one already exist */
1378 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1379 if (f != NULL)
1380 continue;
1381
1382 f = ixl_new_filter(&to_add, macaddr, vlan);
1383 if (f == NULL) {
1384 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1385 ixl_free_filters(&to_add);
1386 return;
1387 }
1388 to_add_cnt++;
1389 }
1390
1391 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1392 }
1393
1394 void
ixl_del_filter(struct ixl_vsi * vsi,const u8 * macaddr,s16 vlan)1395 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1396 {
1397 struct ixl_mac_filter *f, *tmp;
1398 struct ixl_ftl_head ftl_head;
1399 int to_del_cnt = 1;
1400
1401 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1402 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1403 MAC_FORMAT_ARGS(macaddr), vlan);
1404
1405 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1406 if (f == NULL)
1407 return;
1408
1409 LIST_REMOVE(f, ftle);
1410 LIST_INIT(&ftl_head);
1411 LIST_INSERT_HEAD(&ftl_head, f, ftle);
1412 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1413 vsi->num_macs--;
1414
1415 /* If this is not the last vlan just remove the filter */
1416 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1417 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1418 return;
1419 }
1420
1421 /* It's the last vlan, we need to switch back to a non-vlan filter */
1422 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1423 if (tmp != NULL) {
1424 LIST_REMOVE(tmp, ftle);
1425 LIST_INSERT_AFTER(f, tmp, ftle);
1426 to_del_cnt++;
1427 }
1428 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1429
1430 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1431 }
1432
1433 /**
1434 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1435 * @vsi: VSI which filters need to be removed
1436 * @macaddr: MAC address
1437 *
1438 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1439 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1440 * so skip them to speed up processing. Those filters should be removed
1441 * using ixl_del_filter function.
1442 */
1443 void
ixl_del_all_vlan_filters(struct ixl_vsi * vsi,const u8 * macaddr)1444 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1445 {
1446 struct ixl_mac_filter *f, *tmp;
1447 struct ixl_ftl_head to_del;
1448 int to_del_cnt = 0;
1449
1450 LIST_INIT(&to_del);
1451
1452 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1453 if ((f->flags & IXL_FILTER_MC) != 0 ||
1454 !ixl_ether_is_equal(f->macaddr, macaddr))
1455 continue;
1456
1457 LIST_REMOVE(f, ftle);
1458 LIST_INSERT_HEAD(&to_del, f, ftle);
1459 to_del_cnt++;
1460 }
1461
1462 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1463 "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1464 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1465 if (to_del_cnt > 0)
1466 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1467 }
1468
1469 /*
1470 ** Find the filter with both matching mac addr and vlan id
1471 */
1472 struct ixl_mac_filter *
ixl_find_filter(struct ixl_ftl_head * headp,const u8 * macaddr,s16 vlan)1473 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1474 {
1475 struct ixl_mac_filter *f;
1476
1477 LIST_FOREACH(f, headp, ftle) {
1478 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1479 (f->vlan == vlan)) {
1480 return (f);
1481 }
1482 }
1483
1484 return (NULL);
1485 }
1486
1487 /*
1488 ** This routine takes additions to the vsi filter
1489 ** table and creates an Admin Queue call to create
1490 ** the filters in the hardware.
1491 */
1492 void
ixl_add_hw_filters(struct ixl_vsi * vsi,struct ixl_ftl_head * to_add,int cnt)1493 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1494 {
1495 struct i40e_aqc_add_macvlan_element_data *a, *b;
1496 struct ixl_mac_filter *f, *fn;
1497 struct ixl_pf *pf;
1498 struct i40e_hw *hw;
1499 device_t dev;
1500 enum i40e_status_code status;
1501 int j = 0;
1502
1503 pf = vsi->back;
1504 dev = vsi->dev;
1505 hw = &pf->hw;
1506
1507 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1508
1509 if (cnt < 1) {
1510 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1511 return;
1512 }
1513
1514 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1515 M_IXL, M_NOWAIT | M_ZERO);
1516 if (a == NULL) {
1517 device_printf(dev, "add_hw_filters failed to get memory\n");
1518 return;
1519 }
1520
1521 LIST_FOREACH(f, to_add, ftle) {
1522 b = &a[j]; // a pox on fvl long names :)
1523 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1524 if (f->vlan == IXL_VLAN_ANY) {
1525 b->vlan_tag = 0;
1526 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1527 } else {
1528 b->vlan_tag = f->vlan;
1529 b->flags = 0;
1530 }
1531 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1532 /* Some FW versions do not set match method
1533 * when adding filters fails. Initialize it with
1534 * expected error value to allow detection which
1535 * filters were not added */
1536 b->match_method = I40E_AQC_MM_ERR_NO_RES;
1537 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1538 MAC_FORMAT_ARGS(f->macaddr));
1539
1540 if (++j == cnt)
1541 break;
1542 }
1543 if (j != cnt) {
1544 /* Something went wrong */
1545 device_printf(dev,
1546 "%s ERROR: list of filters to short expected: %d, found: %d\n",
1547 __func__, cnt, j);
1548 ixl_free_filters(to_add);
1549 goto out_free;
1550 }
1551
1552 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1553 if (status == I40E_SUCCESS) {
1554 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1555 vsi->num_hw_filters += j;
1556 goto out_free;
1557 }
1558
1559 device_printf(dev,
1560 "i40e_aq_add_macvlan status %s, error %s\n",
1561 i40e_stat_str(hw, status),
1562 i40e_aq_str(hw, hw->aq.asq_last_status));
1563 j = 0;
1564
1565 /* Verify which filters were actually configured in HW
1566 * and add them to the list */
1567 LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1568 LIST_REMOVE(f, ftle);
1569 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1570 ixl_dbg_filter(pf,
1571 "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1572 __func__,
1573 MAC_FORMAT_ARGS(f->macaddr),
1574 f->vlan);
1575 free(f, M_IXL);
1576 } else {
1577 LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1578 vsi->num_hw_filters++;
1579 }
1580 j++;
1581 }
1582
1583 out_free:
1584 free(a, M_IXL);
1585 }
1586
1587 /*
1588 ** This routine takes removals in the vsi filter
1589 ** table and creates an Admin Queue call to delete
1590 ** the filters in the hardware.
1591 */
1592 void
ixl_del_hw_filters(struct ixl_vsi * vsi,struct ixl_ftl_head * to_del,int cnt)1593 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1594 {
1595 struct i40e_aqc_remove_macvlan_element_data *d, *e;
1596 struct ixl_pf *pf;
1597 struct i40e_hw *hw;
1598 device_t dev;
1599 struct ixl_mac_filter *f, *f_temp;
1600 enum i40e_status_code status;
1601 int j = 0;
1602
1603 pf = vsi->back;
1604 hw = &pf->hw;
1605 dev = vsi->dev;
1606
1607 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1608
1609 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1610 M_IXL, M_NOWAIT | M_ZERO);
1611 if (d == NULL) {
1612 device_printf(dev, "%s: failed to get memory\n", __func__);
1613 return;
1614 }
1615
1616 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1617 e = &d[j]; // a pox on fvl long names :)
1618 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1619 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1620 if (f->vlan == IXL_VLAN_ANY) {
1621 e->vlan_tag = 0;
1622 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1623 } else {
1624 e->vlan_tag = f->vlan;
1625 }
1626
1627 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1628 MAC_FORMAT_ARGS(f->macaddr));
1629
1630 /* delete entry from the list */
1631 LIST_REMOVE(f, ftle);
1632 free(f, M_IXL);
1633 if (++j == cnt)
1634 break;
1635 }
1636 if (j != cnt || !LIST_EMPTY(to_del)) {
1637 /* Something went wrong */
1638 device_printf(dev,
1639 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1640 __func__, cnt, j);
1641 ixl_free_filters(to_del);
1642 goto out_free;
1643 }
1644 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1645 if (status) {
1646 device_printf(dev,
1647 "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1648 __func__, i40e_stat_str(hw, status),
1649 i40e_aq_str(hw, hw->aq.asq_last_status));
1650 for (int i = 0; i < j; i++) {
1651 if (d[i].error_code == 0)
1652 continue;
1653 device_printf(dev,
1654 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1655 __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1656 d[i].vlan_tag);
1657 }
1658 }
1659
1660 vsi->num_hw_filters -= j;
1661
1662 out_free:
1663 free(d, M_IXL);
1664
1665 ixl_dbg_filter(pf, "%s: end\n", __func__);
1666 }
1667
1668 int
ixl_enable_tx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1669 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1670 {
1671 struct i40e_hw *hw = &pf->hw;
1672 int error = 0;
1673 u32 reg;
1674 u16 pf_qidx;
1675
1676 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1677
1678 ixl_dbg(pf, IXL_DBG_EN_DIS,
1679 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1680 pf_qidx, vsi_qidx);
1681
1682 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1683
1684 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1685 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1686 I40E_QTX_ENA_QENA_STAT_MASK;
1687 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1688 /* Verify the enable took */
1689 for (int j = 0; j < 10; j++) {
1690 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1691 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1692 break;
1693 i40e_usec_delay(10);
1694 }
1695 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1696 device_printf(pf->dev, "TX queue %d still disabled!\n",
1697 pf_qidx);
1698 error = ETIMEDOUT;
1699 }
1700
1701 return (error);
1702 }
1703
1704 int
ixl_enable_rx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1705 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1706 {
1707 struct i40e_hw *hw = &pf->hw;
1708 int error = 0;
1709 u32 reg;
1710 u16 pf_qidx;
1711
1712 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1713
1714 ixl_dbg(pf, IXL_DBG_EN_DIS,
1715 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1716 pf_qidx, vsi_qidx);
1717
1718 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1719 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1720 I40E_QRX_ENA_QENA_STAT_MASK;
1721 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1722 /* Verify the enable took */
1723 for (int j = 0; j < 10; j++) {
1724 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1725 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1726 break;
1727 i40e_usec_delay(10);
1728 }
1729 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1730 device_printf(pf->dev, "RX queue %d still disabled!\n",
1731 pf_qidx);
1732 error = ETIMEDOUT;
1733 }
1734
1735 return (error);
1736 }
1737
1738 int
ixl_enable_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1739 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1740 {
1741 int error = 0;
1742
1743 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1744 /* Called function already prints error message */
1745 if (error)
1746 return (error);
1747 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1748 return (error);
1749 }
1750
1751 /*
1752 * Returns error on first ring that is detected hung.
1753 */
1754 int
ixl_disable_tx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1755 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1756 {
1757 struct i40e_hw *hw = &pf->hw;
1758 int error = 0;
1759 u32 reg;
1760 u16 pf_qidx;
1761
1762 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1763
1764 ixl_dbg(pf, IXL_DBG_EN_DIS,
1765 "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1766 pf_qidx, vsi_qidx);
1767
1768 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1769 i40e_usec_delay(500);
1770
1771 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1772 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1773 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1774 /* Verify the disable took */
1775 for (int j = 0; j < 10; j++) {
1776 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1777 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1778 break;
1779 i40e_msec_delay(10);
1780 }
1781 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1782 device_printf(pf->dev, "TX queue %d still enabled!\n",
1783 pf_qidx);
1784 error = ETIMEDOUT;
1785 }
1786
1787 return (error);
1788 }
1789
1790 /*
1791 * Returns error on first ring that is detected hung.
1792 */
1793 int
ixl_disable_rx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1794 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1795 {
1796 struct i40e_hw *hw = &pf->hw;
1797 int error = 0;
1798 u32 reg;
1799 u16 pf_qidx;
1800
1801 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1802
1803 ixl_dbg(pf, IXL_DBG_EN_DIS,
1804 "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1805 pf_qidx, vsi_qidx);
1806
1807 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1808 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1809 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1810 /* Verify the disable took */
1811 for (int j = 0; j < 10; j++) {
1812 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1813 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1814 break;
1815 i40e_msec_delay(10);
1816 }
1817 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1818 device_printf(pf->dev, "RX queue %d still enabled!\n",
1819 pf_qidx);
1820 error = ETIMEDOUT;
1821 }
1822
1823 return (error);
1824 }
1825
1826 int
ixl_disable_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1827 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1828 {
1829 int error = 0;
1830
1831 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1832 /* Called function already prints error message */
1833 if (error)
1834 return (error);
1835 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1836 return (error);
1837 }
1838
1839 static void
ixl_handle_tx_mdd_event(struct ixl_pf * pf)1840 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1841 {
1842 struct i40e_hw *hw = &pf->hw;
1843 device_t dev = pf->dev;
1844 struct ixl_vf *vf;
1845 bool mdd_detected = false;
1846 bool pf_mdd_detected = false;
1847 bool vf_mdd_detected = false;
1848 u16 vf_num, queue;
1849 u8 pf_num, event;
1850 u8 pf_mdet_num, vp_mdet_num;
1851 u32 reg;
1852
1853 /* find what triggered the MDD event */
1854 reg = rd32(hw, I40E_GL_MDET_TX);
1855 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1856 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1857 I40E_GL_MDET_TX_PF_NUM_SHIFT;
1858 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1859 I40E_GL_MDET_TX_VF_NUM_SHIFT;
1860 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1861 I40E_GL_MDET_TX_EVENT_SHIFT;
1862 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1863 I40E_GL_MDET_TX_QUEUE_SHIFT;
1864 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1865 mdd_detected = true;
1866 }
1867
1868 if (!mdd_detected)
1869 return;
1870
1871 reg = rd32(hw, I40E_PF_MDET_TX);
1872 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1873 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1874 pf_mdet_num = hw->pf_id;
1875 pf_mdd_detected = true;
1876 }
1877
1878 /* Check if MDD was caused by a VF */
1879 for (int i = 0; i < pf->num_vfs; i++) {
1880 vf = &(pf->vfs[i]);
1881 reg = rd32(hw, I40E_VP_MDET_TX(i));
1882 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1883 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1884 vp_mdet_num = i;
1885 vf->num_mdd_events++;
1886 vf_mdd_detected = true;
1887 }
1888 }
1889
1890 /* Print out an error message */
1891 if (vf_mdd_detected && pf_mdd_detected)
1892 device_printf(dev,
1893 "Malicious Driver Detection event %d"
1894 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1895 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1896 else if (vf_mdd_detected && !pf_mdd_detected)
1897 device_printf(dev,
1898 "Malicious Driver Detection event %d"
1899 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1900 event, queue, pf_num, vf_num, vp_mdet_num);
1901 else if (!vf_mdd_detected && pf_mdd_detected)
1902 device_printf(dev,
1903 "Malicious Driver Detection event %d"
1904 " on TX queue %d, pf number %d (PF-%d)\n",
1905 event, queue, pf_num, pf_mdet_num);
1906 /* Theoretically shouldn't happen */
1907 else
1908 device_printf(dev,
1909 "TX Malicious Driver Detection event (unknown)\n");
1910 }
1911
1912 static void
ixl_handle_rx_mdd_event(struct ixl_pf * pf)1913 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1914 {
1915 struct i40e_hw *hw = &pf->hw;
1916 device_t dev = pf->dev;
1917 struct ixl_vf *vf;
1918 bool mdd_detected = false;
1919 bool pf_mdd_detected = false;
1920 bool vf_mdd_detected = false;
1921 u16 queue;
1922 u8 pf_num, event;
1923 u8 pf_mdet_num, vp_mdet_num;
1924 u32 reg;
1925
1926 /*
1927 * GL_MDET_RX doesn't contain VF number information, unlike
1928 * GL_MDET_TX.
1929 */
1930 reg = rd32(hw, I40E_GL_MDET_RX);
1931 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1932 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1933 I40E_GL_MDET_RX_FUNCTION_SHIFT;
1934 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1935 I40E_GL_MDET_RX_EVENT_SHIFT;
1936 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1937 I40E_GL_MDET_RX_QUEUE_SHIFT;
1938 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1939 mdd_detected = true;
1940 }
1941
1942 if (!mdd_detected)
1943 return;
1944
1945 reg = rd32(hw, I40E_PF_MDET_RX);
1946 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1947 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1948 pf_mdet_num = hw->pf_id;
1949 pf_mdd_detected = true;
1950 }
1951
1952 /* Check if MDD was caused by a VF */
1953 for (int i = 0; i < pf->num_vfs; i++) {
1954 vf = &(pf->vfs[i]);
1955 reg = rd32(hw, I40E_VP_MDET_RX(i));
1956 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1957 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1958 vp_mdet_num = i;
1959 vf->num_mdd_events++;
1960 vf_mdd_detected = true;
1961 }
1962 }
1963
1964 /* Print out an error message */
1965 if (vf_mdd_detected && pf_mdd_detected)
1966 device_printf(dev,
1967 "Malicious Driver Detection event %d"
1968 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1969 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1970 else if (vf_mdd_detected && !pf_mdd_detected)
1971 device_printf(dev,
1972 "Malicious Driver Detection event %d"
1973 " on RX queue %d, pf number %d, (VF-%d)\n",
1974 event, queue, pf_num, vp_mdet_num);
1975 else if (!vf_mdd_detected && pf_mdd_detected)
1976 device_printf(dev,
1977 "Malicious Driver Detection event %d"
1978 " on RX queue %d, pf number %d (PF-%d)\n",
1979 event, queue, pf_num, pf_mdet_num);
1980 /* Theoretically shouldn't happen */
1981 else
1982 device_printf(dev,
1983 "RX Malicious Driver Detection event (unknown)\n");
1984 }
1985
1986 /**
1987 * ixl_handle_mdd_event
1988 *
1989 * Called from interrupt handler to identify possibly malicious vfs
1990 * (But also detects events from the PF, as well)
1991 **/
1992 void
ixl_handle_mdd_event(struct ixl_pf * pf)1993 ixl_handle_mdd_event(struct ixl_pf *pf)
1994 {
1995 struct i40e_hw *hw = &pf->hw;
1996 u32 reg;
1997
1998 /*
1999 * Handle both TX/RX because it's possible they could
2000 * both trigger in the same interrupt.
2001 */
2002 ixl_handle_tx_mdd_event(pf);
2003 ixl_handle_rx_mdd_event(pf);
2004
2005 ixl_clear_state(&pf->state, IXL_STATE_MDD_PENDING);
2006
2007 /* re-enable mdd interrupt cause */
2008 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2009 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2010 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2011 ixl_flush(hw);
2012 }
2013
2014 void
ixl_enable_intr0(struct i40e_hw * hw)2015 ixl_enable_intr0(struct i40e_hw *hw)
2016 {
2017 u32 reg;
2018
2019 /* Use IXL_ITR_NONE so ITR isn't updated here */
2020 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2021 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2022 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2023 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2024 }
2025
2026 void
ixl_disable_intr0(struct i40e_hw * hw)2027 ixl_disable_intr0(struct i40e_hw *hw)
2028 {
2029 u32 reg;
2030
2031 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2032 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2033 ixl_flush(hw);
2034 }
2035
2036 void
ixl_enable_queue(struct i40e_hw * hw,int id)2037 ixl_enable_queue(struct i40e_hw *hw, int id)
2038 {
2039 u32 reg;
2040
2041 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2042 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2043 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2044 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2045 }
2046
2047 void
ixl_disable_queue(struct i40e_hw * hw,int id)2048 ixl_disable_queue(struct i40e_hw *hw, int id)
2049 {
2050 u32 reg;
2051
2052 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2053 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2054 }
2055
2056 void
ixl_handle_empr_reset(struct ixl_pf * pf)2057 ixl_handle_empr_reset(struct ixl_pf *pf)
2058 {
2059 struct ixl_vsi *vsi = &pf->vsi;
2060 bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING);
2061
2062 ixl_prepare_for_reset(pf, is_up);
2063 /*
2064 * i40e_pf_reset checks the type of reset and acts
2065 * accordingly. If EMP or Core reset was performed
2066 * doing PF reset is not necessary and it sometimes
2067 * fails.
2068 */
2069 ixl_pf_reset(pf);
2070
2071 if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
2072 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
2073 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
2074 device_printf(pf->dev,
2075 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
2076 pf->link_up = FALSE;
2077 ixl_update_link_status(pf);
2078 }
2079
2080 ixl_rebuild_hw_structs_after_reset(pf, is_up);
2081
2082 ixl_clear_state(&pf->state, IXL_STATE_RESETTING);
2083 }
2084
2085 void
ixl_update_stats_counters(struct ixl_pf * pf)2086 ixl_update_stats_counters(struct ixl_pf *pf)
2087 {
2088 struct i40e_hw *hw = &pf->hw;
2089 struct ixl_vsi *vsi = &pf->vsi;
2090 struct ixl_vf *vf;
2091 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2092
2093 struct i40e_hw_port_stats *nsd = &pf->stats;
2094 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2095
2096 /* Update hw stats */
2097 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2098 pf->stat_offsets_loaded,
2099 &osd->crc_errors, &nsd->crc_errors);
2100 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2101 pf->stat_offsets_loaded,
2102 &osd->illegal_bytes, &nsd->illegal_bytes);
2103 ixl_stat_update48(hw, I40E_GLPRT_GORCL(hw->port),
2104 pf->stat_offsets_loaded,
2105 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2106 ixl_stat_update48(hw, I40E_GLPRT_GOTCL(hw->port),
2107 pf->stat_offsets_loaded,
2108 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2109 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2110 pf->stat_offsets_loaded,
2111 &osd->eth.rx_discards,
2112 &nsd->eth.rx_discards);
2113 ixl_stat_update48(hw, I40E_GLPRT_UPRCL(hw->port),
2114 pf->stat_offsets_loaded,
2115 &osd->eth.rx_unicast,
2116 &nsd->eth.rx_unicast);
2117 ixl_stat_update48(hw, I40E_GLPRT_UPTCL(hw->port),
2118 pf->stat_offsets_loaded,
2119 &osd->eth.tx_unicast,
2120 &nsd->eth.tx_unicast);
2121 ixl_stat_update48(hw, I40E_GLPRT_MPRCL(hw->port),
2122 pf->stat_offsets_loaded,
2123 &osd->eth.rx_multicast,
2124 &nsd->eth.rx_multicast);
2125 ixl_stat_update48(hw, I40E_GLPRT_MPTCL(hw->port),
2126 pf->stat_offsets_loaded,
2127 &osd->eth.tx_multicast,
2128 &nsd->eth.tx_multicast);
2129 ixl_stat_update48(hw, I40E_GLPRT_BPRCL(hw->port),
2130 pf->stat_offsets_loaded,
2131 &osd->eth.rx_broadcast,
2132 &nsd->eth.rx_broadcast);
2133 ixl_stat_update48(hw, I40E_GLPRT_BPTCL(hw->port),
2134 pf->stat_offsets_loaded,
2135 &osd->eth.tx_broadcast,
2136 &nsd->eth.tx_broadcast);
2137
2138 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2139 pf->stat_offsets_loaded,
2140 &osd->tx_dropped_link_down,
2141 &nsd->tx_dropped_link_down);
2142 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2143 pf->stat_offsets_loaded,
2144 &osd->mac_local_faults,
2145 &nsd->mac_local_faults);
2146 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2147 pf->stat_offsets_loaded,
2148 &osd->mac_remote_faults,
2149 &nsd->mac_remote_faults);
2150 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2151 pf->stat_offsets_loaded,
2152 &osd->rx_length_errors,
2153 &nsd->rx_length_errors);
2154
2155 /* Flow control (LFC) stats */
2156 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2157 pf->stat_offsets_loaded,
2158 &osd->link_xon_rx, &nsd->link_xon_rx);
2159 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2160 pf->stat_offsets_loaded,
2161 &osd->link_xon_tx, &nsd->link_xon_tx);
2162 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2163 pf->stat_offsets_loaded,
2164 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2165 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2166 pf->stat_offsets_loaded,
2167 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2168
2169 /*
2170 * For watchdog management we need to know if we have been paused
2171 * during the last interval, so capture that here.
2172 */
2173 if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2174 vsi->shared->isc_pause_frames = 1;
2175
2176 /* Packet size stats rx */
2177 ixl_stat_update48(hw, I40E_GLPRT_PRC64L(hw->port),
2178 pf->stat_offsets_loaded,
2179 &osd->rx_size_64, &nsd->rx_size_64);
2180 ixl_stat_update48(hw, I40E_GLPRT_PRC127L(hw->port),
2181 pf->stat_offsets_loaded,
2182 &osd->rx_size_127, &nsd->rx_size_127);
2183 ixl_stat_update48(hw, I40E_GLPRT_PRC255L(hw->port),
2184 pf->stat_offsets_loaded,
2185 &osd->rx_size_255, &nsd->rx_size_255);
2186 ixl_stat_update48(hw, I40E_GLPRT_PRC511L(hw->port),
2187 pf->stat_offsets_loaded,
2188 &osd->rx_size_511, &nsd->rx_size_511);
2189 ixl_stat_update48(hw, I40E_GLPRT_PRC1023L(hw->port),
2190 pf->stat_offsets_loaded,
2191 &osd->rx_size_1023, &nsd->rx_size_1023);
2192 ixl_stat_update48(hw, I40E_GLPRT_PRC1522L(hw->port),
2193 pf->stat_offsets_loaded,
2194 &osd->rx_size_1522, &nsd->rx_size_1522);
2195 ixl_stat_update48(hw, I40E_GLPRT_PRC9522L(hw->port),
2196 pf->stat_offsets_loaded,
2197 &osd->rx_size_big, &nsd->rx_size_big);
2198
2199 /* Packet size stats tx */
2200 ixl_stat_update48(hw, I40E_GLPRT_PTC64L(hw->port),
2201 pf->stat_offsets_loaded,
2202 &osd->tx_size_64, &nsd->tx_size_64);
2203 ixl_stat_update48(hw, I40E_GLPRT_PTC127L(hw->port),
2204 pf->stat_offsets_loaded,
2205 &osd->tx_size_127, &nsd->tx_size_127);
2206 ixl_stat_update48(hw, I40E_GLPRT_PTC255L(hw->port),
2207 pf->stat_offsets_loaded,
2208 &osd->tx_size_255, &nsd->tx_size_255);
2209 ixl_stat_update48(hw, I40E_GLPRT_PTC511L(hw->port),
2210 pf->stat_offsets_loaded,
2211 &osd->tx_size_511, &nsd->tx_size_511);
2212 ixl_stat_update48(hw, I40E_GLPRT_PTC1023L(hw->port),
2213 pf->stat_offsets_loaded,
2214 &osd->tx_size_1023, &nsd->tx_size_1023);
2215 ixl_stat_update48(hw, I40E_GLPRT_PTC1522L(hw->port),
2216 pf->stat_offsets_loaded,
2217 &osd->tx_size_1522, &nsd->tx_size_1522);
2218 ixl_stat_update48(hw, I40E_GLPRT_PTC9522L(hw->port),
2219 pf->stat_offsets_loaded,
2220 &osd->tx_size_big, &nsd->tx_size_big);
2221
2222 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2223 pf->stat_offsets_loaded,
2224 &osd->rx_undersize, &nsd->rx_undersize);
2225 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2226 pf->stat_offsets_loaded,
2227 &osd->rx_fragments, &nsd->rx_fragments);
2228
2229 u64 rx_roc;
2230 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2231 pf->stat_offsets_loaded,
2232 &osd->rx_oversize, &rx_roc);
2233
2234 /*
2235 * Read from RXERR1 register to get the count for the packets
2236 * larger than RX MAX and include that in total rx_oversize count.
2237 *
2238 * Also need to add BIT(7) to hw->port value while indexing
2239 * I40E_GL_RXERR1 register as indexes 0..127 are for VFs when
2240 * SR-IOV is enabled. Indexes 128..143 are for PFs.
2241 */
2242 u64 rx_err1;
2243 ixl_stat_update64(hw,
2244 I40E_GL_RXERR1L(hw->pf_id + BIT(7)),
2245 pf->stat_offsets_loaded,
2246 &osd->rx_err1,
2247 &rx_err1);
2248
2249 nsd->rx_oversize = rx_roc + rx_err1;
2250
2251 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2252 pf->stat_offsets_loaded,
2253 &osd->rx_jabber, &nsd->rx_jabber);
2254 /* EEE */
2255 i40e_get_phy_lpi_status(hw, nsd);
2256
2257 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2258 &osd->tx_lpi_count, &nsd->tx_lpi_count,
2259 &osd->rx_lpi_count, &nsd->rx_lpi_count);
2260
2261 pf->stat_offsets_loaded = true;
2262 /* End hw stats */
2263
2264 /* Update vsi stats */
2265 ixl_update_vsi_stats(vsi);
2266
2267 for (int i = 0; i < pf->num_vfs; i++) {
2268 vf = &pf->vfs[i];
2269 if (vf->vf_flags & VF_FLAG_ENABLED)
2270 ixl_update_eth_stats(&pf->vfs[i].vsi);
2271 }
2272 }
2273
2274 /**
2275 * Update VSI-specific ethernet statistics counters.
2276 **/
2277 void
ixl_update_eth_stats(struct ixl_vsi * vsi)2278 ixl_update_eth_stats(struct ixl_vsi *vsi)
2279 {
2280 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2281 struct i40e_hw *hw = &pf->hw;
2282 struct i40e_eth_stats *es;
2283 struct i40e_eth_stats *oes;
2284 u16 stat_idx = vsi->info.stat_counter_idx;
2285
2286 es = &vsi->eth_stats;
2287 oes = &vsi->eth_stats_offsets;
2288
2289 /* Gather up the stats that the hw collects */
2290 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2291 vsi->stat_offsets_loaded,
2292 &oes->tx_errors, &es->tx_errors);
2293 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2294 vsi->stat_offsets_loaded,
2295 &oes->rx_discards, &es->rx_discards);
2296
2297 ixl_stat_update48(hw, I40E_GLV_GORCL(stat_idx),
2298 vsi->stat_offsets_loaded,
2299 &oes->rx_bytes, &es->rx_bytes);
2300 ixl_stat_update48(hw, I40E_GLV_UPRCL(stat_idx),
2301 vsi->stat_offsets_loaded,
2302 &oes->rx_unicast, &es->rx_unicast);
2303 ixl_stat_update48(hw, I40E_GLV_MPRCL(stat_idx),
2304 vsi->stat_offsets_loaded,
2305 &oes->rx_multicast, &es->rx_multicast);
2306 ixl_stat_update48(hw, I40E_GLV_BPRCL(stat_idx),
2307 vsi->stat_offsets_loaded,
2308 &oes->rx_broadcast, &es->rx_broadcast);
2309
2310 ixl_stat_update48(hw, I40E_GLV_GOTCL(stat_idx),
2311 vsi->stat_offsets_loaded,
2312 &oes->tx_bytes, &es->tx_bytes);
2313 ixl_stat_update48(hw, I40E_GLV_UPTCL(stat_idx),
2314 vsi->stat_offsets_loaded,
2315 &oes->tx_unicast, &es->tx_unicast);
2316 ixl_stat_update48(hw, I40E_GLV_MPTCL(stat_idx),
2317 vsi->stat_offsets_loaded,
2318 &oes->tx_multicast, &es->tx_multicast);
2319 ixl_stat_update48(hw, I40E_GLV_BPTCL(stat_idx),
2320 vsi->stat_offsets_loaded,
2321 &oes->tx_broadcast, &es->tx_broadcast);
2322 vsi->stat_offsets_loaded = true;
2323 }
2324
2325 void
ixl_update_vsi_stats(struct ixl_vsi * vsi)2326 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2327 {
2328 struct ixl_pf *pf;
2329 struct i40e_eth_stats *es;
2330 u64 tx_discards, csum_errs;
2331
2332 struct i40e_hw_port_stats *nsd;
2333
2334 pf = vsi->back;
2335 es = &vsi->eth_stats;
2336 nsd = &pf->stats;
2337
2338 ixl_update_eth_stats(vsi);
2339
2340 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2341
2342 csum_errs = 0;
2343 for (int i = 0; i < vsi->num_rx_queues; i++)
2344 csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2345 nsd->checksum_error = csum_errs;
2346
2347 /* Update ifnet stats */
2348 IXL_SET_IPACKETS(vsi, es->rx_unicast +
2349 es->rx_multicast +
2350 es->rx_broadcast);
2351 IXL_SET_OPACKETS(vsi, es->tx_unicast +
2352 es->tx_multicast +
2353 es->tx_broadcast);
2354 IXL_SET_IBYTES(vsi, es->rx_bytes);
2355 IXL_SET_OBYTES(vsi, es->tx_bytes);
2356 IXL_SET_IMCASTS(vsi, es->rx_multicast);
2357 IXL_SET_OMCASTS(vsi, es->tx_multicast);
2358
2359 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2360 nsd->checksum_error + nsd->rx_length_errors +
2361 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2362 nsd->rx_jabber);
2363 IXL_SET_OERRORS(vsi, es->tx_errors);
2364 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2365 IXL_SET_OQDROPS(vsi, tx_discards);
2366 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2367 IXL_SET_COLLISIONS(vsi, 0);
2368 }
2369
2370 /**
2371 * Reset all of the stats for the given pf
2372 **/
2373 void
ixl_pf_reset_stats(struct ixl_pf * pf)2374 ixl_pf_reset_stats(struct ixl_pf *pf)
2375 {
2376 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2377 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2378 pf->stat_offsets_loaded = false;
2379 }
2380
2381 /**
2382 * Resets all stats of the given vsi
2383 **/
2384 void
ixl_vsi_reset_stats(struct ixl_vsi * vsi)2385 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2386 {
2387 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2388 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2389 vsi->stat_offsets_loaded = false;
2390 }
2391
2392 /**
2393 * Helper function for reading and updating 48/64 bit stats from the hw
2394 *
2395 * Since the device stats are not reset at PFReset, they likely will not
2396 * be zeroed when the driver starts. We'll save the first values read
2397 * and use them as offsets to be subtracted from the raw values in order
2398 * to report stats that count from zero.
2399 **/
2400 static void
_ixl_stat_update_helper(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 mask,u64 * offset,u64 * stat)2401 _ixl_stat_update_helper(struct i40e_hw *hw, u32 reg,
2402 bool offset_loaded, u64 mask, u64 *offset, u64 *stat)
2403 {
2404 u64 new_data = rd64(hw, reg);
2405
2406 if (!offset_loaded)
2407 *offset = new_data;
2408 if (new_data >= *offset)
2409 *stat = new_data - *offset;
2410 else
2411 *stat = (new_data + mask) - *offset + 1;
2412 *stat &= mask;
2413 }
2414
2415 /**
2416 * Read and update a 48 bit stat from the hw
2417 **/
2418 void
ixl_stat_update48(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 * offset,u64 * stat)2419 ixl_stat_update48(struct i40e_hw *hw, u32 reg,
2420 bool offset_loaded, u64 *offset, u64 *stat)
2421 {
2422 _ixl_stat_update_helper(hw,
2423 reg,
2424 offset_loaded,
2425 0xFFFFFFFFFFFFULL,
2426 offset,
2427 stat);
2428 }
2429
2430 /**
2431 * ixl_stat_update64 - read and update a 64 bit stat from the chip.
2432 **/
2433 void
ixl_stat_update64(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 * offset,u64 * stat)2434 ixl_stat_update64(struct i40e_hw *hw, u32 reg,
2435 bool offset_loaded, u64 *offset, u64 *stat)
2436 {
2437 _ixl_stat_update_helper(hw,
2438 reg,
2439 offset_loaded,
2440 0xFFFFFFFFFFFFFFFFULL,
2441 offset,
2442 stat);
2443 }
2444
2445 /**
2446 * Read and update a 32 bit stat from the hw
2447 **/
2448 void
ixl_stat_update32(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 * offset,u64 * stat)2449 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2450 bool offset_loaded, u64 *offset, u64 *stat)
2451 {
2452 u32 new_data;
2453
2454 new_data = rd32(hw, reg);
2455 if (!offset_loaded)
2456 *offset = new_data;
2457 if (new_data >= *offset)
2458 *stat = (u32)(new_data - *offset);
2459 else
2460 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2461 }
2462
2463 /**
2464 * Add subset of device sysctls safe to use in recovery mode
2465 */
2466 void
ixl_add_sysctls_recovery_mode(struct ixl_pf * pf)2467 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2468 {
2469 device_t dev = pf->dev;
2470
2471 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2472 struct sysctl_oid_list *ctx_list =
2473 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2474
2475 struct sysctl_oid *debug_node;
2476 struct sysctl_oid_list *debug_list;
2477
2478 SYSCTL_ADD_PROC(ctx, ctx_list,
2479 OID_AUTO, "fw_version",
2480 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2481 ixl_sysctl_show_fw, "A", "Firmware version");
2482
2483 /* Add sysctls meant to print debug information, but don't list them
2484 * in "sysctl -a" output. */
2485 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2486 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2487 "Debug Sysctls");
2488 debug_list = SYSCTL_CHILDREN(debug_node);
2489
2490 SYSCTL_ADD_UINT(ctx, debug_list,
2491 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2492 &pf->hw.debug_mask, 0, "Shared code debug message level");
2493
2494 SYSCTL_ADD_UINT(ctx, debug_list,
2495 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2496 &pf->dbg_mask, 0, "Non-shared code debug message level");
2497
2498 SYSCTL_ADD_PROC(ctx, debug_list,
2499 OID_AUTO, "dump_debug_data",
2500 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2501 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2502
2503 SYSCTL_ADD_PROC(ctx, debug_list,
2504 OID_AUTO, "do_pf_reset",
2505 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2506 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2507
2508 SYSCTL_ADD_PROC(ctx, debug_list,
2509 OID_AUTO, "do_core_reset",
2510 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2511 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2512
2513 SYSCTL_ADD_PROC(ctx, debug_list,
2514 OID_AUTO, "do_global_reset",
2515 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2516 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2517
2518 SYSCTL_ADD_PROC(ctx, debug_list,
2519 OID_AUTO, "queue_interrupt_table",
2520 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2521 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2522
2523 SYSCTL_ADD_PROC(ctx, debug_list,
2524 OID_AUTO, "queue_int_ctln",
2525 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2526 pf, 0, ixl_sysctl_debug_queue_int_ctln, "A",
2527 "View MSI-X control registers for RX queues");
2528 }
2529
2530 void
ixl_add_device_sysctls(struct ixl_pf * pf)2531 ixl_add_device_sysctls(struct ixl_pf *pf)
2532 {
2533 device_t dev = pf->dev;
2534 struct i40e_hw *hw = &pf->hw;
2535
2536 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2537 struct sysctl_oid_list *ctx_list =
2538 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2539
2540 struct sysctl_oid *debug_node;
2541 struct sysctl_oid_list *debug_list;
2542
2543 struct sysctl_oid *fec_node;
2544 struct sysctl_oid_list *fec_list;
2545 struct sysctl_oid *eee_node;
2546 struct sysctl_oid_list *eee_list;
2547
2548 /* Set up sysctls */
2549 SYSCTL_ADD_PROC(ctx, ctx_list,
2550 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2551 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2552
2553 SYSCTL_ADD_PROC(ctx, ctx_list,
2554 OID_AUTO, "advertise_speed",
2555 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2556 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2557
2558 SYSCTL_ADD_PROC(ctx, ctx_list,
2559 OID_AUTO, "supported_speeds",
2560 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2561 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2562
2563 SYSCTL_ADD_PROC(ctx, ctx_list,
2564 OID_AUTO, "current_speed",
2565 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2566 ixl_sysctl_current_speed, "A", "Current Port Speed");
2567
2568 SYSCTL_ADD_PROC(ctx, ctx_list,
2569 OID_AUTO, "fw_version",
2570 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2571 ixl_sysctl_show_fw, "A", "Firmware version");
2572
2573 SYSCTL_ADD_PROC(ctx, ctx_list,
2574 OID_AUTO, "unallocated_queues",
2575 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2576 ixl_sysctl_unallocated_queues, "I",
2577 "Queues not allocated to a PF or VF");
2578
2579 SYSCTL_ADD_PROC(ctx, ctx_list,
2580 OID_AUTO, "tx_itr",
2581 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2582 ixl_sysctl_pf_tx_itr, "I",
2583 "Immediately set TX ITR value for all queues");
2584
2585 SYSCTL_ADD_PROC(ctx, ctx_list,
2586 OID_AUTO, "rx_itr",
2587 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2588 ixl_sysctl_pf_rx_itr, "I",
2589 "Immediately set RX ITR value for all queues");
2590
2591 SYSCTL_ADD_INT(ctx, ctx_list,
2592 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2593 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2594
2595 SYSCTL_ADD_INT(ctx, ctx_list,
2596 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2597 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2598
2599 /* Add FEC sysctls for 25G adapters */
2600 if (i40e_is_25G_device(hw->device_id)) {
2601 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2602 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2603 "FEC Sysctls");
2604 fec_list = SYSCTL_CHILDREN(fec_node);
2605
2606 SYSCTL_ADD_PROC(ctx, fec_list,
2607 OID_AUTO, "fc_ability",
2608 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2609 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2610
2611 SYSCTL_ADD_PROC(ctx, fec_list,
2612 OID_AUTO, "rs_ability",
2613 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2614 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2615
2616 SYSCTL_ADD_PROC(ctx, fec_list,
2617 OID_AUTO, "fc_requested",
2618 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2619 ixl_sysctl_fec_fc_request, "I",
2620 "FC FEC mode requested on link");
2621
2622 SYSCTL_ADD_PROC(ctx, fec_list,
2623 OID_AUTO, "rs_requested",
2624 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2625 ixl_sysctl_fec_rs_request, "I",
2626 "RS FEC mode requested on link");
2627
2628 SYSCTL_ADD_PROC(ctx, fec_list,
2629 OID_AUTO, "auto_fec_enabled",
2630 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2631 ixl_sysctl_fec_auto_enable, "I",
2632 "Let FW decide FEC ability/request modes");
2633 }
2634
2635 SYSCTL_ADD_PROC(ctx, ctx_list,
2636 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2637 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2638
2639 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2640 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2641 "Energy Efficient Ethernet (EEE) Sysctls");
2642 eee_list = SYSCTL_CHILDREN(eee_node);
2643
2644 SYSCTL_ADD_PROC(ctx, eee_list,
2645 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2646 pf, 0, ixl_sysctl_eee_enable, "I",
2647 "Enable Energy Efficient Ethernet (EEE)");
2648
2649 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2650 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2651 "TX LPI status");
2652
2653 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2654 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2655 "RX LPI status");
2656
2657 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2658 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2659 "TX LPI count");
2660
2661 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2662 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2663 "RX LPI count");
2664
2665 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2666 "link_active_on_if_down",
2667 CTLTYPE_INT | CTLFLAG_RWTUN,
2668 pf, 0, ixl_sysctl_set_link_active, "I",
2669 IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2670
2671 /* Add sysctls meant to print debug information, but don't list them
2672 * in "sysctl -a" output. */
2673 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2674 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2675 "Debug Sysctls");
2676 debug_list = SYSCTL_CHILDREN(debug_node);
2677
2678 SYSCTL_ADD_UINT(ctx, debug_list,
2679 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2680 &pf->hw.debug_mask, 0, "Shared code debug message level");
2681
2682 SYSCTL_ADD_UINT(ctx, debug_list,
2683 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2684 &pf->dbg_mask, 0, "Non-shared code debug message level");
2685
2686 SYSCTL_ADD_PROC(ctx, debug_list,
2687 OID_AUTO, "link_status",
2688 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2689 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2690
2691 SYSCTL_ADD_PROC(ctx, debug_list,
2692 OID_AUTO, "phy_abilities_init",
2693 CTLTYPE_STRING | CTLFLAG_RD,
2694 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2695
2696 SYSCTL_ADD_PROC(ctx, debug_list,
2697 OID_AUTO, "phy_abilities",
2698 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2699 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2700
2701 SYSCTL_ADD_PROC(ctx, debug_list,
2702 OID_AUTO, "filter_list",
2703 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2704 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2705
2706 SYSCTL_ADD_PROC(ctx, debug_list,
2707 OID_AUTO, "hw_res_alloc",
2708 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2709 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2710
2711 SYSCTL_ADD_PROC(ctx, debug_list,
2712 OID_AUTO, "switch_config",
2713 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2714 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2715
2716 SYSCTL_ADD_PROC(ctx, debug_list,
2717 OID_AUTO, "switch_vlans",
2718 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2719 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2720
2721 SYSCTL_ADD_PROC(ctx, debug_list,
2722 OID_AUTO, "rss_key",
2723 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2724 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2725
2726 SYSCTL_ADD_PROC(ctx, debug_list,
2727 OID_AUTO, "rss_lut",
2728 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2729 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2730
2731 SYSCTL_ADD_PROC(ctx, debug_list,
2732 OID_AUTO, "rss_hena",
2733 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2734 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2735
2736 SYSCTL_ADD_PROC(ctx, debug_list,
2737 OID_AUTO, "disable_fw_link_management",
2738 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2739 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2740
2741 SYSCTL_ADD_PROC(ctx, debug_list,
2742 OID_AUTO, "dump_debug_data",
2743 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2744 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2745
2746 SYSCTL_ADD_PROC(ctx, debug_list,
2747 OID_AUTO, "do_pf_reset",
2748 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2749 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2750
2751 SYSCTL_ADD_PROC(ctx, debug_list,
2752 OID_AUTO, "do_core_reset",
2753 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2754 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2755
2756 SYSCTL_ADD_PROC(ctx, debug_list,
2757 OID_AUTO, "do_global_reset",
2758 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2759 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2760
2761 SYSCTL_ADD_PROC(ctx, debug_list,
2762 OID_AUTO, "queue_interrupt_table",
2763 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2764 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2765
2766 SYSCTL_ADD_PROC(ctx, debug_list,
2767 OID_AUTO, "phy_statistics", CTLTYPE_STRING | CTLFLAG_RD,
2768 pf, 0, ixl_sysctl_phy_statistics, "A", "PHY Statistics");
2769
2770 if (pf->has_i2c) {
2771 SYSCTL_ADD_PROC(ctx, debug_list,
2772 OID_AUTO, "read_i2c_byte",
2773 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2774 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2775
2776 SYSCTL_ADD_PROC(ctx, debug_list,
2777 OID_AUTO, "write_i2c_byte",
2778 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2779 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2780
2781 SYSCTL_ADD_PROC(ctx, debug_list,
2782 OID_AUTO, "read_i2c_diag_data",
2783 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2784 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2785 }
2786 }
2787
2788 /*
2789 * Primarily for finding out how many queues can be assigned to VFs,
2790 * at runtime.
2791 */
2792 static int
ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)2793 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2794 {
2795 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2796 int queues;
2797
2798 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2799
2800 return sysctl_handle_int(oidp, NULL, queues, req);
2801 }
2802
2803 static const char *
ixl_link_speed_string(enum i40e_aq_link_speed link_speed)2804 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2805 {
2806 const char * link_speed_str[] = {
2807 "Unknown",
2808 "100 Mbps",
2809 "1 Gbps",
2810 "10 Gbps",
2811 "40 Gbps",
2812 "20 Gbps",
2813 "25 Gbps",
2814 "2.5 Gbps",
2815 "5 Gbps"
2816 };
2817 int index;
2818
2819 switch (link_speed) {
2820 case I40E_LINK_SPEED_100MB:
2821 index = 1;
2822 break;
2823 case I40E_LINK_SPEED_1GB:
2824 index = 2;
2825 break;
2826 case I40E_LINK_SPEED_10GB:
2827 index = 3;
2828 break;
2829 case I40E_LINK_SPEED_40GB:
2830 index = 4;
2831 break;
2832 case I40E_LINK_SPEED_20GB:
2833 index = 5;
2834 break;
2835 case I40E_LINK_SPEED_25GB:
2836 index = 6;
2837 break;
2838 case I40E_LINK_SPEED_2_5GB:
2839 index = 7;
2840 break;
2841 case I40E_LINK_SPEED_5GB:
2842 index = 8;
2843 break;
2844 case I40E_LINK_SPEED_UNKNOWN:
2845 default:
2846 index = 0;
2847 break;
2848 }
2849
2850 return (link_speed_str[index]);
2851 }
2852
2853 int
ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)2854 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2855 {
2856 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2857 struct i40e_hw *hw = &pf->hw;
2858 int error = 0;
2859
2860 ixl_update_link_status(pf);
2861
2862 error = sysctl_handle_string(oidp,
2863 __DECONST(void *,
2864 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2865 8, req);
2866
2867 return (error);
2868 }
2869
2870 /*
2871 * Converts 8-bit speeds value to and from sysctl flags and
2872 * Admin Queue flags.
2873 */
2874 static u8
ixl_convert_sysctl_aq_link_speed(u8 speeds,bool to_aq)2875 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2876 {
2877 #define SPEED_MAP_SIZE 8
2878 static u16 speedmap[SPEED_MAP_SIZE] = {
2879 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2880 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
2881 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
2882 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
2883 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
2884 (I40E_LINK_SPEED_40GB | (0x20 << 8)),
2885 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2886 (I40E_LINK_SPEED_5GB | (0x80 << 8)),
2887 };
2888 u8 retval = 0;
2889
2890 for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2891 if (to_aq)
2892 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2893 else
2894 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2895 }
2896
2897 return (retval);
2898 }
2899
2900 int
ixl_set_advertised_speeds(struct ixl_pf * pf,int speeds,bool from_aq)2901 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2902 {
2903 struct i40e_hw *hw = &pf->hw;
2904 device_t dev = pf->dev;
2905 struct i40e_aq_get_phy_abilities_resp abilities;
2906 struct i40e_aq_set_phy_config config;
2907 enum i40e_status_code aq_error = 0;
2908
2909 /* Get current capability information */
2910 aq_error = i40e_aq_get_phy_capabilities(hw,
2911 FALSE, FALSE, &abilities, NULL);
2912 if (aq_error) {
2913 device_printf(dev,
2914 "%s: Error getting phy capabilities %d,"
2915 " aq error: %d\n", __func__, aq_error,
2916 hw->aq.asq_last_status);
2917 return (EIO);
2918 }
2919
2920 /* Prepare new config */
2921 bzero(&config, sizeof(config));
2922 if (from_aq)
2923 config.link_speed = speeds;
2924 else
2925 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2926 config.phy_type = abilities.phy_type;
2927 config.phy_type_ext = abilities.phy_type_ext;
2928 config.abilities = abilities.abilities
2929 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2930 config.eee_capability = abilities.eee_capability;
2931 config.eeer = abilities.eeer_val;
2932 config.low_power_ctrl = abilities.d3_lpan;
2933 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2934 & I40E_AQ_PHY_FEC_CONFIG_MASK;
2935
2936 /* Do aq command & restart link */
2937 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2938 if (aq_error) {
2939 device_printf(dev,
2940 "%s: Error setting new phy config %d,"
2941 " aq error: %d\n", __func__, aq_error,
2942 hw->aq.asq_last_status);
2943 return (EIO);
2944 }
2945
2946 return (0);
2947 }
2948
2949 /*
2950 ** Supported link speeds
2951 ** Flags:
2952 ** 0x1 - 100 Mb
2953 ** 0x2 - 1G
2954 ** 0x4 - 10G
2955 ** 0x8 - 20G
2956 ** 0x10 - 25G
2957 ** 0x20 - 40G
2958 ** 0x40 - 2.5G
2959 ** 0x80 - 5G
2960 */
2961 static int
ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)2962 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2963 {
2964 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2965 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2966
2967 return sysctl_handle_int(oidp, NULL, supported, req);
2968 }
2969
2970 /*
2971 ** Control link advertise speed:
2972 ** Flags:
2973 ** 0x1 - advertise 100 Mb
2974 ** 0x2 - advertise 1G
2975 ** 0x4 - advertise 10G
2976 ** 0x8 - advertise 20G
2977 ** 0x10 - advertise 25G
2978 ** 0x20 - advertise 40G
2979 ** 0x40 - advertise 2.5G
2980 ** 0x80 - advertise 5G
2981 **
2982 ** Set to 0 to disable link
2983 */
2984 int
ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)2985 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2986 {
2987 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2988 device_t dev = pf->dev;
2989 u8 converted_speeds;
2990 int requested_ls = 0;
2991 int error = 0;
2992
2993 /* Read in new mode */
2994 requested_ls = pf->advertised_speed;
2995 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2996 if ((error) || (req->newptr == NULL))
2997 return (error);
2998 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2999 device_printf(dev, "Interface is currently in FW recovery mode. "
3000 "Setting advertise speed not supported\n");
3001 return (EINVAL);
3002 }
3003
3004 /* Error out if bits outside of possible flag range are set */
3005 if ((requested_ls & ~((u8)0xFF)) != 0) {
3006 device_printf(dev, "Input advertised speed out of range; "
3007 "valid flags are: 0x%02x\n",
3008 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3009 return (EINVAL);
3010 }
3011
3012 /* Check if adapter supports input value */
3013 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3014 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3015 device_printf(dev, "Invalid advertised speed; "
3016 "valid flags are: 0x%02x\n",
3017 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3018 return (EINVAL);
3019 }
3020
3021 error = ixl_set_advertised_speeds(pf, requested_ls, false);
3022 if (error)
3023 return (error);
3024
3025 pf->advertised_speed = requested_ls;
3026 ixl_update_link_status(pf);
3027 return (0);
3028 }
3029
3030 /*
3031 * Input: bitmap of enum i40e_aq_link_speed
3032 */
3033 u64
ixl_max_aq_speed_to_value(u8 link_speeds)3034 ixl_max_aq_speed_to_value(u8 link_speeds)
3035 {
3036 if (link_speeds & I40E_LINK_SPEED_40GB)
3037 return IF_Gbps(40);
3038 if (link_speeds & I40E_LINK_SPEED_25GB)
3039 return IF_Gbps(25);
3040 if (link_speeds & I40E_LINK_SPEED_20GB)
3041 return IF_Gbps(20);
3042 if (link_speeds & I40E_LINK_SPEED_10GB)
3043 return IF_Gbps(10);
3044 if (link_speeds & I40E_LINK_SPEED_5GB)
3045 return IF_Gbps(5);
3046 if (link_speeds & I40E_LINK_SPEED_2_5GB)
3047 return IF_Mbps(2500);
3048 if (link_speeds & I40E_LINK_SPEED_1GB)
3049 return IF_Gbps(1);
3050 if (link_speeds & I40E_LINK_SPEED_100MB)
3051 return IF_Mbps(100);
3052 else
3053 /* Minimum supported link speed */
3054 return IF_Mbps(100);
3055 }
3056
3057 /*
3058 ** Get the width and transaction speed of
3059 ** the bus this adapter is plugged into.
3060 */
3061 void
ixl_get_bus_info(struct ixl_pf * pf)3062 ixl_get_bus_info(struct ixl_pf *pf)
3063 {
3064 struct i40e_hw *hw = &pf->hw;
3065 device_t dev = pf->dev;
3066 u16 link;
3067 u32 offset, num_ports;
3068 u64 max_speed;
3069
3070 /* Some devices don't use PCIE */
3071 if (hw->mac.type == I40E_MAC_X722)
3072 return;
3073
3074 /* Read PCI Express Capabilities Link Status Register */
3075 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3076 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3077
3078 /* Fill out hw struct with PCIE info */
3079 i40e_set_pci_config_data(hw, link);
3080
3081 /* Use info to print out bandwidth messages */
3082 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3083 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3084 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3085 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3086 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3087 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3088 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3089 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3090 ("Unknown"));
3091
3092 /*
3093 * If adapter is in slot with maximum supported speed,
3094 * no warning message needs to be printed out.
3095 */
3096 if (hw->bus.speed >= i40e_bus_speed_8000
3097 && hw->bus.width >= i40e_bus_width_pcie_x8)
3098 return;
3099
3100 num_ports = bitcount32(hw->func_caps.valid_functions);
3101 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3102
3103 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3104 device_printf(dev, "PCI-Express bandwidth available"
3105 " for this device may be insufficient for"
3106 " optimal performance.\n");
3107 device_printf(dev, "Please move the device to a different"
3108 " PCI-e link with more lanes and/or higher"
3109 " transfer rate.\n");
3110 }
3111 }
3112
3113 static int
ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)3114 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3115 {
3116 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3117 struct i40e_hw *hw = &pf->hw;
3118 struct sbuf *sbuf;
3119
3120 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3121 ixl_nvm_version_str(hw, sbuf);
3122 sbuf_finish(sbuf);
3123 sbuf_delete(sbuf);
3124
3125 return (0);
3126 }
3127
3128 void
ixl_print_nvm_cmd(device_t dev,struct i40e_nvm_access * nvma)3129 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3130 {
3131 u8 nvma_ptr = nvma->config & 0xFF;
3132 u8 nvma_flags = (nvma->config & 0xF00) >> 8;
3133 const char * cmd_str;
3134
3135 switch (nvma->command) {
3136 case I40E_NVM_READ:
3137 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
3138 nvma->offset == 0 && nvma->data_size == 1) {
3139 device_printf(dev, "NVMUPD: Get Driver Status Command\n");
3140 return;
3141 }
3142 cmd_str = "READ ";
3143 break;
3144 case I40E_NVM_WRITE:
3145 cmd_str = "WRITE";
3146 break;
3147 default:
3148 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
3149 return;
3150 }
3151 device_printf(dev,
3152 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
3153 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
3154 }
3155
3156 int
ixl_handle_nvmupd_cmd(struct ixl_pf * pf,struct ifdrv * ifd)3157 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3158 {
3159 struct i40e_hw *hw = &pf->hw;
3160 struct i40e_nvm_access *nvma;
3161 device_t dev = pf->dev;
3162 enum i40e_status_code status = 0;
3163 size_t nvma_size, ifd_len, exp_len;
3164 int err, perrno;
3165
3166 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3167
3168 /* Sanity checks */
3169 nvma_size = sizeof(struct i40e_nvm_access);
3170 ifd_len = ifd->ifd_len;
3171
3172 if (ifd_len < nvma_size ||
3173 ifd->ifd_data == NULL) {
3174 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3175 __func__);
3176 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3177 __func__, ifd_len, nvma_size);
3178 device_printf(dev, "%s: data pointer: %p\n", __func__,
3179 ifd->ifd_data);
3180 return (EINVAL);
3181 }
3182
3183 nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3184 err = copyin(ifd->ifd_data, nvma, ifd_len);
3185 if (err) {
3186 device_printf(dev, "%s: Cannot get request from user space\n",
3187 __func__);
3188 free(nvma, M_IXL);
3189 return (err);
3190 }
3191
3192 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3193 ixl_print_nvm_cmd(dev, nvma);
3194
3195 if (IXL_PF_IS_RESETTING(pf)) {
3196 int count = 0;
3197 while (count++ < 100) {
3198 i40e_msec_delay(100);
3199 if (!(IXL_PF_IS_RESETTING(pf)))
3200 break;
3201 }
3202 }
3203
3204 if (IXL_PF_IS_RESETTING(pf)) {
3205 device_printf(dev,
3206 "%s: timeout waiting for EMP reset to finish\n",
3207 __func__);
3208 free(nvma, M_IXL);
3209 return (-EBUSY);
3210 }
3211
3212 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3213 device_printf(dev,
3214 "%s: invalid request, data size not in supported range\n",
3215 __func__);
3216 free(nvma, M_IXL);
3217 return (EINVAL);
3218 }
3219
3220 /*
3221 * Older versions of the NVM update tool don't set ifd_len to the size
3222 * of the entire buffer passed to the ioctl. Check the data_size field
3223 * in the contained i40e_nvm_access struct and ensure everything is
3224 * copied in from userspace.
3225 */
3226 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3227
3228 if (ifd_len < exp_len) {
3229 ifd_len = exp_len;
3230 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3231 err = copyin(ifd->ifd_data, nvma, ifd_len);
3232 if (err) {
3233 device_printf(dev, "%s: Cannot get request from user space\n",
3234 __func__);
3235 free(nvma, M_IXL);
3236 return (err);
3237 }
3238 }
3239
3240 // TODO: Might need a different lock here
3241 // IXL_PF_LOCK(pf);
3242 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3243 // IXL_PF_UNLOCK(pf);
3244
3245 err = copyout(nvma, ifd->ifd_data, ifd_len);
3246 free(nvma, M_IXL);
3247 if (err) {
3248 device_printf(dev, "%s: Cannot return data to user space\n",
3249 __func__);
3250 return (err);
3251 }
3252
3253 /* Let the nvmupdate report errors, show them only when debug is enabled */
3254 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3255 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3256 i40e_stat_str(hw, status), perrno);
3257
3258 /*
3259 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3260 * to run this ioctl again. So use -EACCES for -EPERM instead.
3261 */
3262 if (perrno == -EPERM)
3263 return (-EACCES);
3264 else
3265 return (perrno);
3266 }
3267
3268 int
ixl_find_i2c_interface(struct ixl_pf * pf)3269 ixl_find_i2c_interface(struct ixl_pf *pf)
3270 {
3271 struct i40e_hw *hw = &pf->hw;
3272 bool i2c_en, port_matched;
3273 u32 reg;
3274
3275 for (int i = 0; i < 4; i++) {
3276 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3277 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3278 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3279 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3280 & BIT(hw->port);
3281 if (i2c_en && port_matched)
3282 return (i);
3283 }
3284
3285 return (-1);
3286 }
3287
3288 void
ixl_set_link(struct ixl_pf * pf,bool enable)3289 ixl_set_link(struct ixl_pf *pf, bool enable)
3290 {
3291 struct i40e_hw *hw = &pf->hw;
3292 device_t dev = pf->dev;
3293 struct i40e_aq_get_phy_abilities_resp abilities;
3294 struct i40e_aq_set_phy_config config;
3295 enum i40e_status_code aq_error = 0;
3296 u32 phy_type, phy_type_ext;
3297
3298 /* Get initial capability information */
3299 aq_error = i40e_aq_get_phy_capabilities(hw,
3300 FALSE, TRUE, &abilities, NULL);
3301 if (aq_error) {
3302 device_printf(dev,
3303 "%s: Error getting phy capabilities %d,"
3304 " aq error: %d\n", __func__, aq_error,
3305 hw->aq.asq_last_status);
3306 return;
3307 }
3308
3309 phy_type = abilities.phy_type;
3310 phy_type_ext = abilities.phy_type_ext;
3311
3312 /* Get current capability information */
3313 aq_error = i40e_aq_get_phy_capabilities(hw,
3314 FALSE, FALSE, &abilities, NULL);
3315 if (aq_error) {
3316 device_printf(dev,
3317 "%s: Error getting phy capabilities %d,"
3318 " aq error: %d\n", __func__, aq_error,
3319 hw->aq.asq_last_status);
3320 return;
3321 }
3322
3323 /* Prepare new config */
3324 memset(&config, 0, sizeof(config));
3325 config.link_speed = abilities.link_speed;
3326 config.abilities = abilities.abilities;
3327 config.eee_capability = abilities.eee_capability;
3328 config.eeer = abilities.eeer_val;
3329 config.low_power_ctrl = abilities.d3_lpan;
3330 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3331 & I40E_AQ_PHY_FEC_CONFIG_MASK;
3332 config.phy_type = 0;
3333 config.phy_type_ext = 0;
3334
3335 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3336 I40E_AQ_PHY_FLAG_PAUSE_RX);
3337
3338 switch (pf->fc) {
3339 case I40E_FC_FULL:
3340 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3341 I40E_AQ_PHY_FLAG_PAUSE_RX;
3342 break;
3343 case I40E_FC_RX_PAUSE:
3344 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3345 break;
3346 case I40E_FC_TX_PAUSE:
3347 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3348 break;
3349 default:
3350 break;
3351 }
3352
3353 if (enable) {
3354 config.phy_type = phy_type;
3355 config.phy_type_ext = phy_type_ext;
3356
3357 }
3358
3359 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3360 if (aq_error) {
3361 device_printf(dev,
3362 "%s: Error setting new phy config %d,"
3363 " aq error: %d\n", __func__, aq_error,
3364 hw->aq.asq_last_status);
3365 return;
3366 }
3367
3368 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3369 if (aq_error) {
3370 device_printf(dev,
3371 "%s: Error set link config %d,"
3372 " aq error: %d\n", __func__, aq_error,
3373 hw->aq.asq_last_status);
3374 return;
3375 }
3376 }
3377
3378 static char *
ixl_phy_type_string(u32 bit_pos,bool ext)3379 ixl_phy_type_string(u32 bit_pos, bool ext)
3380 {
3381 static char * phy_types_str[32] = {
3382 "SGMII",
3383 "1000BASE-KX",
3384 "10GBASE-KX4",
3385 "10GBASE-KR",
3386 "40GBASE-KR4",
3387 "XAUI",
3388 "XFI",
3389 "SFI",
3390 "XLAUI",
3391 "XLPPI",
3392 "40GBASE-CR4",
3393 "10GBASE-CR1",
3394 "SFP+ Active DA",
3395 "QSFP+ Active DA",
3396 "Reserved (14)",
3397 "Reserved (15)",
3398 "Reserved (16)",
3399 "100BASE-TX",
3400 "1000BASE-T",
3401 "10GBASE-T",
3402 "10GBASE-SR",
3403 "10GBASE-LR",
3404 "10GBASE-SFP+Cu",
3405 "10GBASE-CR1",
3406 "40GBASE-CR4",
3407 "40GBASE-SR4",
3408 "40GBASE-LR4",
3409 "1000BASE-SX",
3410 "1000BASE-LX",
3411 "1000BASE-T Optical",
3412 "20GBASE-KR2",
3413 "Reserved (31)"
3414 };
3415 static char * ext_phy_types_str[8] = {
3416 "25GBASE-KR",
3417 "25GBASE-CR",
3418 "25GBASE-SR",
3419 "25GBASE-LR",
3420 "25GBASE-AOC",
3421 "25GBASE-ACC",
3422 "2.5GBASE-T",
3423 "5GBASE-T"
3424 };
3425
3426 if (ext && bit_pos > 7) return "Invalid_Ext";
3427 if (bit_pos > 31) return "Invalid";
3428
3429 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3430 }
3431
3432 /* TODO: ERJ: I don't this is necessary anymore. */
3433 int
ixl_aq_get_link_status(struct ixl_pf * pf,struct i40e_aqc_get_link_status * link_status)3434 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3435 {
3436 device_t dev = pf->dev;
3437 struct i40e_hw *hw = &pf->hw;
3438 struct i40e_aq_desc desc;
3439 enum i40e_status_code status;
3440
3441 struct i40e_aqc_get_link_status *aq_link_status =
3442 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3443
3444 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3445 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3446 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3447 if (status) {
3448 device_printf(dev,
3449 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3450 __func__, i40e_stat_str(hw, status),
3451 i40e_aq_str(hw, hw->aq.asq_last_status));
3452 return (EIO);
3453 }
3454
3455 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3456 return (0);
3457 }
3458
3459 static char *
ixl_phy_type_string_ls(u8 val)3460 ixl_phy_type_string_ls(u8 val)
3461 {
3462 if (val >= 0x1F)
3463 return ixl_phy_type_string(val - 0x1F, true);
3464 else
3465 return ixl_phy_type_string(val, false);
3466 }
3467
3468 static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)3469 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3470 {
3471 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3472 device_t dev = pf->dev;
3473 struct sbuf *buf;
3474 int error = 0;
3475
3476 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3477 if (!buf) {
3478 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3479 return (ENOMEM);
3480 }
3481
3482 struct i40e_aqc_get_link_status link_status;
3483 error = ixl_aq_get_link_status(pf, &link_status);
3484 if (error) {
3485 sbuf_delete(buf);
3486 return (error);
3487 }
3488
3489 sbuf_printf(buf, "\n"
3490 "PHY Type : 0x%02x<%s>\n"
3491 "Speed : 0x%02x\n"
3492 "Link info: 0x%02x\n"
3493 "AN info : 0x%02x\n"
3494 "Ext info : 0x%02x\n"
3495 "Loopback : 0x%02x\n"
3496 "Max Frame: %d\n"
3497 "Config : 0x%02x\n"
3498 "Power : 0x%02x",
3499 link_status.phy_type,
3500 ixl_phy_type_string_ls(link_status.phy_type),
3501 link_status.link_speed,
3502 link_status.link_info,
3503 link_status.an_info,
3504 link_status.ext_info,
3505 link_status.loopback,
3506 link_status.max_frame_size,
3507 link_status.config,
3508 link_status.power_desc);
3509
3510 error = sbuf_finish(buf);
3511 if (error)
3512 device_printf(dev, "Error finishing sbuf: %d\n", error);
3513
3514 sbuf_delete(buf);
3515 return (error);
3516 }
3517
3518 static int
ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)3519 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3520 {
3521 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3522 struct i40e_hw *hw = &pf->hw;
3523 device_t dev = pf->dev;
3524 enum i40e_status_code status;
3525 struct i40e_aq_get_phy_abilities_resp abilities;
3526 struct sbuf *buf;
3527 int error = 0;
3528
3529 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3530 if (!buf) {
3531 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3532 return (ENOMEM);
3533 }
3534
3535 status = i40e_aq_get_phy_capabilities(hw,
3536 FALSE, arg2 != 0, &abilities, NULL);
3537 if (status) {
3538 device_printf(dev,
3539 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3540 __func__, i40e_stat_str(hw, status),
3541 i40e_aq_str(hw, hw->aq.asq_last_status));
3542 sbuf_delete(buf);
3543 return (EIO);
3544 }
3545
3546 sbuf_printf(buf, "\n"
3547 "PHY Type : %08x",
3548 abilities.phy_type);
3549
3550 if (abilities.phy_type != 0) {
3551 sbuf_printf(buf, "<");
3552 for (int i = 0; i < 32; i++)
3553 if ((1 << i) & abilities.phy_type)
3554 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3555 sbuf_printf(buf, ">");
3556 }
3557
3558 sbuf_printf(buf, "\nPHY Ext : %02x",
3559 abilities.phy_type_ext);
3560
3561 if (abilities.phy_type_ext != 0) {
3562 sbuf_printf(buf, "<");
3563 for (int i = 0; i < 4; i++)
3564 if ((1 << i) & abilities.phy_type_ext)
3565 sbuf_printf(buf, "%s,",
3566 ixl_phy_type_string(i, true));
3567 sbuf_printf(buf, ">");
3568 }
3569
3570 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
3571 if (abilities.link_speed != 0) {
3572 u8 link_speed;
3573 sbuf_printf(buf, " <");
3574 for (int i = 0; i < 8; i++) {
3575 link_speed = (1 << i) & abilities.link_speed;
3576 if (link_speed)
3577 sbuf_printf(buf, "%s, ",
3578 ixl_link_speed_string(link_speed));
3579 }
3580 sbuf_printf(buf, ">");
3581 }
3582
3583 sbuf_printf(buf, "\n"
3584 "Abilities: %02x\n"
3585 "EEE cap : %04x\n"
3586 "EEER reg : %08x\n"
3587 "D3 Lpan : %02x\n"
3588 "ID : %02x %02x %02x %02x\n"
3589 "ModType : %02x %02x %02x\n"
3590 "ModType E: %01x\n"
3591 "FEC Cfg : %02x\n"
3592 "Ext CC : %02x",
3593 abilities.abilities, abilities.eee_capability,
3594 abilities.eeer_val, abilities.d3_lpan,
3595 abilities.phy_id[0], abilities.phy_id[1],
3596 abilities.phy_id[2], abilities.phy_id[3],
3597 abilities.module_type[0], abilities.module_type[1],
3598 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3599 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3600 abilities.ext_comp_code);
3601
3602 error = sbuf_finish(buf);
3603 if (error)
3604 device_printf(dev, "Error finishing sbuf: %d\n", error);
3605
3606 sbuf_delete(buf);
3607 return (error);
3608 }
3609
3610 static int
ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS)3611 ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS)
3612 {
3613 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3614 struct i40e_hw *hw = &pf->hw;
3615 device_t dev = pf->dev;
3616 struct sbuf *buf;
3617 int error = 0;
3618
3619 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3620 if (buf == NULL) {
3621 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3622 return (ENOMEM);
3623 }
3624
3625 if (hw->mac.type == I40E_MAC_X722) {
3626 sbuf_printf(buf, "\n"
3627 "PCS Link Control Register: unavailable\n"
3628 "PCS Link Status 1: unavailable\n"
3629 "PCS Link Status 2: unavailable\n"
3630 "XGMII FIFO Status: unavailable\n"
3631 "Auto-Negotiation (AN) Status: unavailable\n"
3632 "KR PCS Status: unavailable\n"
3633 "KR FEC Status 1 – FEC Correctable Blocks Counter: unavailable\n"
3634 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: unavailable"
3635 );
3636 } else {
3637 sbuf_printf(buf, "\n"
3638 "PCS Link Control Register: %#010X\n"
3639 "PCS Link Status 1: %#010X\n"
3640 "PCS Link Status 2: %#010X\n"
3641 "XGMII FIFO Status: %#010X\n"
3642 "Auto-Negotiation (AN) Status: %#010X\n"
3643 "KR PCS Status: %#010X\n"
3644 "KR FEC Status 1 – FEC Correctable Blocks Counter: %#010X\n"
3645 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: %#010X",
3646 rd32(hw, I40E_PRTMAC_PCS_LINK_CTRL),
3647 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS1(0)),
3648 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS2),
3649 rd32(hw, I40E_PRTMAC_PCS_XGMII_FIFO_STATUS),
3650 rd32(hw, I40E_PRTMAC_PCS_AN_LP_STATUS),
3651 rd32(hw, I40E_PRTMAC_PCS_KR_STATUS),
3652 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS1),
3653 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS2)
3654 );
3655 }
3656
3657 error = sbuf_finish(buf);
3658 if (error)
3659 device_printf(dev, "Error finishing sbuf: %d\n", error);
3660
3661 sbuf_delete(buf);
3662 return (error);
3663 }
3664
3665 static int
ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)3666 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3667 {
3668 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3669 struct ixl_vsi *vsi = &pf->vsi;
3670 struct ixl_mac_filter *f;
3671 device_t dev = pf->dev;
3672 int error = 0, ftl_len = 0, ftl_counter = 0;
3673
3674 struct sbuf *buf;
3675
3676 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3677 if (!buf) {
3678 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3679 return (ENOMEM);
3680 }
3681
3682 sbuf_printf(buf, "\n");
3683
3684 /* Print MAC filters */
3685 sbuf_printf(buf, "PF Filters:\n");
3686 LIST_FOREACH(f, &vsi->ftl, ftle)
3687 ftl_len++;
3688
3689 if (ftl_len < 1)
3690 sbuf_printf(buf, "(none)\n");
3691 else {
3692 LIST_FOREACH(f, &vsi->ftl, ftle) {
3693 sbuf_printf(buf,
3694 MAC_FORMAT ", vlan %4d, flags %#06x",
3695 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3696 /* don't print '\n' for last entry */
3697 if (++ftl_counter != ftl_len)
3698 sbuf_printf(buf, "\n");
3699 }
3700 }
3701
3702 #ifdef PCI_IOV
3703 /* TODO: Give each VF its own filter list sysctl */
3704 struct ixl_vf *vf;
3705 if (pf->num_vfs > 0) {
3706 sbuf_printf(buf, "\n\n");
3707 for (int i = 0; i < pf->num_vfs; i++) {
3708 vf = &pf->vfs[i];
3709 if (!(vf->vf_flags & VF_FLAG_ENABLED))
3710 continue;
3711
3712 vsi = &vf->vsi;
3713 ftl_len = 0, ftl_counter = 0;
3714 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3715 LIST_FOREACH(f, &vsi->ftl, ftle)
3716 ftl_len++;
3717
3718 if (ftl_len < 1)
3719 sbuf_printf(buf, "(none)\n");
3720 else {
3721 LIST_FOREACH(f, &vsi->ftl, ftle) {
3722 sbuf_printf(buf,
3723 MAC_FORMAT ", vlan %4d, flags %#06x\n",
3724 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3725 }
3726 }
3727 }
3728 }
3729 #endif
3730
3731 error = sbuf_finish(buf);
3732 if (error)
3733 device_printf(dev, "Error finishing sbuf: %d\n", error);
3734 sbuf_delete(buf);
3735
3736 return (error);
3737 }
3738
3739 #define IXL_SW_RES_SIZE 0x14
3740 int
ixl_res_alloc_cmp(const void * a,const void * b)3741 ixl_res_alloc_cmp(const void *a, const void *b)
3742 {
3743 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3744 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3745 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3746
3747 return ((int)one->resource_type - (int)two->resource_type);
3748 }
3749
3750 /*
3751 * Longest string length: 25
3752 */
3753 const char *
ixl_switch_res_type_string(u8 type)3754 ixl_switch_res_type_string(u8 type)
3755 {
3756 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3757 "VEB",
3758 "VSI",
3759 "Perfect Match MAC address",
3760 "S-tag",
3761 "(Reserved)",
3762 "Multicast hash entry",
3763 "Unicast hash entry",
3764 "VLAN",
3765 "VSI List entry",
3766 "(Reserved)",
3767 "VLAN Statistic Pool",
3768 "Mirror Rule",
3769 "Queue Set",
3770 "Inner VLAN Forward filter",
3771 "(Reserved)",
3772 "Inner MAC",
3773 "IP",
3774 "GRE/VN1 Key",
3775 "VN2 Key",
3776 "Tunneling Port"
3777 };
3778
3779 if (type < IXL_SW_RES_SIZE)
3780 return ixl_switch_res_type_strings[type];
3781 else
3782 return "(Reserved)";
3783 }
3784
3785 static int
ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)3786 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3787 {
3788 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3789 struct i40e_hw *hw = &pf->hw;
3790 device_t dev = pf->dev;
3791 struct sbuf *buf;
3792 enum i40e_status_code status;
3793 int error = 0;
3794
3795 u8 num_entries;
3796 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3797
3798 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3799 if (!buf) {
3800 device_printf(dev, "Could not allocate sbuf for output.\n");
3801 return (ENOMEM);
3802 }
3803
3804 bzero(resp, sizeof(resp));
3805 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3806 resp,
3807 IXL_SW_RES_SIZE,
3808 NULL);
3809 if (status) {
3810 device_printf(dev,
3811 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3812 __func__, i40e_stat_str(hw, status),
3813 i40e_aq_str(hw, hw->aq.asq_last_status));
3814 sbuf_delete(buf);
3815 return (error);
3816 }
3817
3818 /* Sort entries by type for display */
3819 qsort(resp, num_entries,
3820 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3821 &ixl_res_alloc_cmp);
3822
3823 sbuf_cat(buf, "\n");
3824 sbuf_printf(buf, "# of entries: %d\n", num_entries);
3825 sbuf_printf(buf,
3826 " Type | Guaranteed | Total | Used | Un-allocated\n"
3827 " | (this) | (all) | (this) | (all) \n");
3828 for (int i = 0; i < num_entries; i++) {
3829 sbuf_printf(buf,
3830 "%25s | %10d %5d %6d %12d",
3831 ixl_switch_res_type_string(resp[i].resource_type),
3832 resp[i].guaranteed,
3833 resp[i].total,
3834 resp[i].used,
3835 resp[i].total_unalloced);
3836 if (i < num_entries - 1)
3837 sbuf_cat(buf, "\n");
3838 }
3839
3840 error = sbuf_finish(buf);
3841 if (error)
3842 device_printf(dev, "Error finishing sbuf: %d\n", error);
3843
3844 sbuf_delete(buf);
3845 return (error);
3846 }
3847
3848 enum ixl_sw_seid_offset {
3849 IXL_SW_SEID_EMP = 1,
3850 IXL_SW_SEID_MAC_START = 2,
3851 IXL_SW_SEID_MAC_END = 5,
3852 IXL_SW_SEID_PF_START = 16,
3853 IXL_SW_SEID_PF_END = 31,
3854 IXL_SW_SEID_VF_START = 32,
3855 IXL_SW_SEID_VF_END = 159,
3856 };
3857
3858 /*
3859 * Caller must init and delete sbuf; this function will clear and
3860 * finish it for caller.
3861 *
3862 * Note: The SEID argument only applies for elements defined by FW at
3863 * power-on; these include the EMP, Ports, PFs and VFs.
3864 */
3865 static char *
ixl_switch_element_string(struct sbuf * s,u8 element_type,u16 seid)3866 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3867 {
3868 sbuf_clear(s);
3869
3870 /* If SEID is in certain ranges, then we can infer the
3871 * mapping of SEID to switch element.
3872 */
3873 if (seid == IXL_SW_SEID_EMP) {
3874 sbuf_cat(s, "EMP");
3875 goto out;
3876 } else if (seid >= IXL_SW_SEID_MAC_START &&
3877 seid <= IXL_SW_SEID_MAC_END) {
3878 sbuf_printf(s, "MAC %2d",
3879 seid - IXL_SW_SEID_MAC_START);
3880 goto out;
3881 } else if (seid >= IXL_SW_SEID_PF_START &&
3882 seid <= IXL_SW_SEID_PF_END) {
3883 sbuf_printf(s, "PF %3d",
3884 seid - IXL_SW_SEID_PF_START);
3885 goto out;
3886 } else if (seid >= IXL_SW_SEID_VF_START &&
3887 seid <= IXL_SW_SEID_VF_END) {
3888 sbuf_printf(s, "VF %3d",
3889 seid - IXL_SW_SEID_VF_START);
3890 goto out;
3891 }
3892
3893 switch (element_type) {
3894 case I40E_AQ_SW_ELEM_TYPE_BMC:
3895 sbuf_cat(s, "BMC");
3896 break;
3897 case I40E_AQ_SW_ELEM_TYPE_PV:
3898 sbuf_cat(s, "PV");
3899 break;
3900 case I40E_AQ_SW_ELEM_TYPE_VEB:
3901 sbuf_cat(s, "VEB");
3902 break;
3903 case I40E_AQ_SW_ELEM_TYPE_PA:
3904 sbuf_cat(s, "PA");
3905 break;
3906 case I40E_AQ_SW_ELEM_TYPE_VSI:
3907 sbuf_printf(s, "VSI");
3908 break;
3909 default:
3910 sbuf_cat(s, "?");
3911 break;
3912 }
3913
3914 out:
3915 sbuf_finish(s);
3916 return sbuf_data(s);
3917 }
3918
3919 static int
ixl_sw_cfg_elem_seid_cmp(const void * a,const void * b)3920 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3921 {
3922 const struct i40e_aqc_switch_config_element_resp *one, *two;
3923 one = (const struct i40e_aqc_switch_config_element_resp *)a;
3924 two = (const struct i40e_aqc_switch_config_element_resp *)b;
3925
3926 return ((int)one->seid - (int)two->seid);
3927 }
3928
3929 static int
ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)3930 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3931 {
3932 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3933 struct i40e_hw *hw = &pf->hw;
3934 device_t dev = pf->dev;
3935 struct sbuf *buf;
3936 struct sbuf *nmbuf;
3937 enum i40e_status_code status;
3938 int error = 0;
3939 u16 next = 0;
3940 u8 aq_buf[I40E_AQ_LARGE_BUF];
3941
3942 struct i40e_aqc_switch_config_element_resp *elem;
3943 struct i40e_aqc_get_switch_config_resp *sw_config;
3944 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3945
3946 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3947 if (!buf) {
3948 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3949 return (ENOMEM);
3950 }
3951
3952 status = i40e_aq_get_switch_config(hw, sw_config,
3953 sizeof(aq_buf), &next, NULL);
3954 if (status) {
3955 device_printf(dev,
3956 "%s: aq_get_switch_config() error %s, aq error %s\n",
3957 __func__, i40e_stat_str(hw, status),
3958 i40e_aq_str(hw, hw->aq.asq_last_status));
3959 sbuf_delete(buf);
3960 return error;
3961 }
3962 if (next)
3963 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3964 __func__, next);
3965
3966 nmbuf = sbuf_new_auto();
3967 if (!nmbuf) {
3968 device_printf(dev, "Could not allocate sbuf for name output.\n");
3969 sbuf_delete(buf);
3970 return (ENOMEM);
3971 }
3972
3973 /* Sort entries by SEID for display */
3974 qsort(sw_config->element, sw_config->header.num_reported,
3975 sizeof(struct i40e_aqc_switch_config_element_resp),
3976 &ixl_sw_cfg_elem_seid_cmp);
3977
3978 sbuf_cat(buf, "\n");
3979 /* Assuming <= 255 elements in switch */
3980 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3981 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3982 /* Exclude:
3983 * Revision -- all elements are revision 1 for now
3984 */
3985 sbuf_printf(buf,
3986 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n"
3987 " | | | (uplink)\n");
3988 for (int i = 0; i < sw_config->header.num_reported; i++) {
3989 elem = &sw_config->element[i];
3990
3991 // "%4d (%8s) | %8s %8s %#8x",
3992 sbuf_printf(buf, "%4d", elem->seid);
3993 sbuf_cat(buf, " ");
3994 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3995 elem->element_type, elem->seid));
3996 sbuf_cat(buf, " | ");
3997 sbuf_printf(buf, "%4d", elem->uplink_seid);
3998 sbuf_cat(buf, " ");
3999 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4000 0, elem->uplink_seid));
4001 sbuf_cat(buf, " | ");
4002 sbuf_printf(buf, "%4d", elem->downlink_seid);
4003 sbuf_cat(buf, " ");
4004 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4005 0, elem->downlink_seid));
4006 sbuf_cat(buf, " | ");
4007 sbuf_printf(buf, "%8d", elem->connection_type);
4008 if (i < sw_config->header.num_reported - 1)
4009 sbuf_cat(buf, "\n");
4010 }
4011 sbuf_delete(nmbuf);
4012
4013 error = sbuf_finish(buf);
4014 if (error)
4015 device_printf(dev, "Error finishing sbuf: %d\n", error);
4016
4017 sbuf_delete(buf);
4018
4019 return (error);
4020 }
4021
4022 static int
ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)4023 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
4024 {
4025 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4026 struct i40e_hw *hw = &pf->hw;
4027 device_t dev = pf->dev;
4028 int requested_vlan = -1;
4029 enum i40e_status_code status = 0;
4030 int error = 0;
4031
4032 error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
4033 if ((error) || (req->newptr == NULL))
4034 return (error);
4035
4036 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
4037 device_printf(dev, "Flags disallow setting of vlans\n");
4038 return (ENODEV);
4039 }
4040
4041 hw->switch_tag = requested_vlan;
4042 device_printf(dev,
4043 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
4044 hw->switch_tag, hw->first_tag, hw->second_tag);
4045 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
4046 if (status) {
4047 device_printf(dev,
4048 "%s: aq_set_switch_config() error %s, aq error %s\n",
4049 __func__, i40e_stat_str(hw, status),
4050 i40e_aq_str(hw, hw->aq.asq_last_status));
4051 return (status);
4052 }
4053 return (0);
4054 }
4055
4056 static int
ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)4057 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4058 {
4059 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4060 struct i40e_hw *hw = &pf->hw;
4061 device_t dev = pf->dev;
4062 struct sbuf *buf;
4063 int error = 0;
4064 enum i40e_status_code status;
4065 u32 reg;
4066
4067 struct i40e_aqc_get_set_rss_key_data key_data;
4068
4069 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4070 if (!buf) {
4071 device_printf(dev, "Could not allocate sbuf for output.\n");
4072 return (ENOMEM);
4073 }
4074
4075 bzero(&key_data, sizeof(key_data));
4076
4077 sbuf_cat(buf, "\n");
4078 if (hw->mac.type == I40E_MAC_X722) {
4079 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4080 if (status)
4081 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4082 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4083 } else {
4084 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4085 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4086 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
4087 }
4088 }
4089
4090 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4091
4092 error = sbuf_finish(buf);
4093 if (error)
4094 device_printf(dev, "Error finishing sbuf: %d\n", error);
4095 sbuf_delete(buf);
4096
4097 return (error);
4098 }
4099
4100 static void
ixl_sbuf_print_bytes(struct sbuf * sb,u8 * buf,int length,int label_offset,bool text)4101 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4102 {
4103 int i, j, k, width;
4104 char c;
4105
4106 if (length < 1 || buf == NULL) return;
4107
4108 int byte_stride = 16;
4109 int lines = length / byte_stride;
4110 int rem = length % byte_stride;
4111 if (rem > 0)
4112 lines++;
4113
4114 for (i = 0; i < lines; i++) {
4115 width = (rem > 0 && i == lines - 1)
4116 ? rem : byte_stride;
4117
4118 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4119
4120 for (j = 0; j < width; j++)
4121 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4122
4123 if (width < byte_stride) {
4124 for (k = 0; k < (byte_stride - width); k++)
4125 sbuf_printf(sb, " ");
4126 }
4127
4128 if (!text) {
4129 sbuf_printf(sb, "\n");
4130 continue;
4131 }
4132
4133 for (j = 0; j < width; j++) {
4134 c = (char)buf[i * byte_stride + j];
4135 if (c < 32 || c > 126)
4136 sbuf_printf(sb, ".");
4137 else
4138 sbuf_printf(sb, "%c", c);
4139
4140 if (j == width - 1)
4141 sbuf_printf(sb, "\n");
4142 }
4143 }
4144 }
4145
4146 static int
ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)4147 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4148 {
4149 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4150 struct i40e_hw *hw = &pf->hw;
4151 device_t dev = pf->dev;
4152 struct sbuf *buf;
4153 int error = 0;
4154 enum i40e_status_code status;
4155 u8 hlut[512];
4156 u32 reg;
4157
4158 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4159 if (!buf) {
4160 device_printf(dev, "Could not allocate sbuf for output.\n");
4161 return (ENOMEM);
4162 }
4163
4164 bzero(hlut, sizeof(hlut));
4165 sbuf_cat(buf, "\n");
4166 if (hw->mac.type == I40E_MAC_X722) {
4167 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4168 if (status)
4169 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4170 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4171 } else {
4172 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4173 reg = rd32(hw, I40E_PFQF_HLUT(i));
4174 bcopy(®, &hlut[i << 2], 4);
4175 }
4176 }
4177 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4178
4179 error = sbuf_finish(buf);
4180 if (error)
4181 device_printf(dev, "Error finishing sbuf: %d\n", error);
4182 sbuf_delete(buf);
4183
4184 return (error);
4185 }
4186
4187 static int
ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)4188 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4189 {
4190 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4191 struct i40e_hw *hw = &pf->hw;
4192 u64 hena;
4193
4194 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4195 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4196
4197 return sysctl_handle_long(oidp, NULL, hena, req);
4198 }
4199
4200 /*
4201 * Sysctl to disable firmware's link management
4202 *
4203 * 1 - Disable link management on this port
4204 * 0 - Re-enable link management
4205 *
4206 * On normal NVMs, firmware manages link by default.
4207 */
4208 static int
ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)4209 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4210 {
4211 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4212 struct i40e_hw *hw = &pf->hw;
4213 device_t dev = pf->dev;
4214 int requested_mode = -1;
4215 enum i40e_status_code status = 0;
4216 int error = 0;
4217
4218 /* Read in new mode */
4219 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4220 if ((error) || (req->newptr == NULL))
4221 return (error);
4222 /* Check for sane value */
4223 if (requested_mode < 0 || requested_mode > 1) {
4224 device_printf(dev, "Valid modes are 0 or 1\n");
4225 return (EINVAL);
4226 }
4227
4228 /* Set new mode */
4229 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4230 if (status) {
4231 device_printf(dev,
4232 "%s: Error setting new phy debug mode %s,"
4233 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4234 i40e_aq_str(hw, hw->aq.asq_last_status));
4235 return (EIO);
4236 }
4237
4238 return (0);
4239 }
4240
4241 /*
4242 * Read some diagnostic data from a (Q)SFP+ module
4243 *
4244 * SFP A2 QSFP Lower Page
4245 * Temperature 96-97 22-23
4246 * Vcc 98-99 26-27
4247 * TX power 102-103 34-35..40-41
4248 * RX power 104-105 50-51..56-57
4249 */
4250 static int
ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)4251 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4252 {
4253 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4254 device_t dev = pf->dev;
4255 struct sbuf *sbuf;
4256 int error = 0;
4257 u8 output;
4258
4259 if (req->oldptr == NULL) {
4260 error = SYSCTL_OUT(req, 0, 128);
4261 return (0);
4262 }
4263
4264 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4265 if (error) {
4266 device_printf(dev, "Error reading from i2c\n");
4267 return (error);
4268 }
4269
4270 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4271 if (output == 0x3) {
4272 /*
4273 * Check for:
4274 * - Internally calibrated data
4275 * - Diagnostic monitoring is implemented
4276 */
4277 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4278 if (!(output & 0x60)) {
4279 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4280 return (0);
4281 }
4282
4283 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4284
4285 for (u8 offset = 96; offset < 100; offset++) {
4286 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4287 sbuf_printf(sbuf, "%02X ", output);
4288 }
4289 for (u8 offset = 102; offset < 106; offset++) {
4290 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4291 sbuf_printf(sbuf, "%02X ", output);
4292 }
4293 } else if (output == 0xD || output == 0x11) {
4294 /*
4295 * QSFP+ modules are always internally calibrated, and must indicate
4296 * what types of diagnostic monitoring are implemented
4297 */
4298 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4299
4300 for (u8 offset = 22; offset < 24; offset++) {
4301 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4302 sbuf_printf(sbuf, "%02X ", output);
4303 }
4304 for (u8 offset = 26; offset < 28; offset++) {
4305 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4306 sbuf_printf(sbuf, "%02X ", output);
4307 }
4308 /* Read the data from the first lane */
4309 for (u8 offset = 34; offset < 36; offset++) {
4310 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4311 sbuf_printf(sbuf, "%02X ", output);
4312 }
4313 for (u8 offset = 50; offset < 52; offset++) {
4314 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4315 sbuf_printf(sbuf, "%02X ", output);
4316 }
4317 } else {
4318 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4319 return (0);
4320 }
4321
4322 sbuf_finish(sbuf);
4323 sbuf_delete(sbuf);
4324
4325 return (0);
4326 }
4327
4328 /*
4329 * Sysctl to read a byte from I2C bus.
4330 *
4331 * Input: 32-bit value:
4332 * bits 0-7: device address (0xA0 or 0xA2)
4333 * bits 8-15: offset (0-255)
4334 * bits 16-31: unused
4335 * Output: 8-bit value read
4336 */
4337 static int
ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)4338 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4339 {
4340 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4341 device_t dev = pf->dev;
4342 int input = -1, error = 0;
4343 u8 dev_addr, offset, output;
4344
4345 /* Read in I2C read parameters */
4346 error = sysctl_handle_int(oidp, &input, 0, req);
4347 if ((error) || (req->newptr == NULL))
4348 return (error);
4349 /* Validate device address */
4350 dev_addr = input & 0xFF;
4351 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4352 return (EINVAL);
4353 }
4354 offset = (input >> 8) & 0xFF;
4355
4356 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4357 if (error)
4358 return (error);
4359
4360 device_printf(dev, "%02X\n", output);
4361 return (0);
4362 }
4363
4364 /*
4365 * Sysctl to write a byte to the I2C bus.
4366 *
4367 * Input: 32-bit value:
4368 * bits 0-7: device address (0xA0 or 0xA2)
4369 * bits 8-15: offset (0-255)
4370 * bits 16-23: value to write
4371 * bits 24-31: unused
4372 * Output: 8-bit value written
4373 */
4374 static int
ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)4375 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4376 {
4377 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4378 device_t dev = pf->dev;
4379 int input = -1, error = 0;
4380 u8 dev_addr, offset, value;
4381
4382 /* Read in I2C write parameters */
4383 error = sysctl_handle_int(oidp, &input, 0, req);
4384 if ((error) || (req->newptr == NULL))
4385 return (error);
4386 /* Validate device address */
4387 dev_addr = input & 0xFF;
4388 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4389 return (EINVAL);
4390 }
4391 offset = (input >> 8) & 0xFF;
4392 value = (input >> 16) & 0xFF;
4393
4394 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4395 if (error)
4396 return (error);
4397
4398 device_printf(dev, "%02X written\n", value);
4399 return (0);
4400 }
4401
4402 static int
ixl_get_fec_config(struct ixl_pf * pf,struct i40e_aq_get_phy_abilities_resp * abilities,u8 bit_pos,int * is_set)4403 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4404 u8 bit_pos, int *is_set)
4405 {
4406 device_t dev = pf->dev;
4407 struct i40e_hw *hw = &pf->hw;
4408 enum i40e_status_code status;
4409
4410 if (IXL_PF_IN_RECOVERY_MODE(pf))
4411 return (EIO);
4412
4413 status = i40e_aq_get_phy_capabilities(hw,
4414 FALSE, FALSE, abilities, NULL);
4415 if (status) {
4416 device_printf(dev,
4417 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4418 __func__, i40e_stat_str(hw, status),
4419 i40e_aq_str(hw, hw->aq.asq_last_status));
4420 return (EIO);
4421 }
4422
4423 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4424 return (0);
4425 }
4426
4427 static int
ixl_set_fec_config(struct ixl_pf * pf,struct i40e_aq_get_phy_abilities_resp * abilities,u8 bit_pos,int set)4428 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4429 u8 bit_pos, int set)
4430 {
4431 device_t dev = pf->dev;
4432 struct i40e_hw *hw = &pf->hw;
4433 struct i40e_aq_set_phy_config config;
4434 enum i40e_status_code status;
4435
4436 /* Set new PHY config */
4437 memset(&config, 0, sizeof(config));
4438 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4439 if (set)
4440 config.fec_config |= bit_pos;
4441 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4442 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4443 config.phy_type = abilities->phy_type;
4444 config.phy_type_ext = abilities->phy_type_ext;
4445 config.link_speed = abilities->link_speed;
4446 config.eee_capability = abilities->eee_capability;
4447 config.eeer = abilities->eeer_val;
4448 config.low_power_ctrl = abilities->d3_lpan;
4449 status = i40e_aq_set_phy_config(hw, &config, NULL);
4450
4451 if (status) {
4452 device_printf(dev,
4453 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4454 __func__, i40e_stat_str(hw, status),
4455 i40e_aq_str(hw, hw->aq.asq_last_status));
4456 return (EIO);
4457 }
4458 }
4459
4460 return (0);
4461 }
4462
4463 static int
ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)4464 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4465 {
4466 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4467 int mode, error = 0;
4468
4469 struct i40e_aq_get_phy_abilities_resp abilities;
4470 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4471 if (error)
4472 return (error);
4473 /* Read in new mode */
4474 error = sysctl_handle_int(oidp, &mode, 0, req);
4475 if ((error) || (req->newptr == NULL))
4476 return (error);
4477
4478 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4479 }
4480
4481 static int
ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)4482 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4483 {
4484 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4485 int mode, error = 0;
4486
4487 struct i40e_aq_get_phy_abilities_resp abilities;
4488 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4489 if (error)
4490 return (error);
4491 /* Read in new mode */
4492 error = sysctl_handle_int(oidp, &mode, 0, req);
4493 if ((error) || (req->newptr == NULL))
4494 return (error);
4495
4496 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4497 }
4498
4499 static int
ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)4500 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4501 {
4502 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4503 int mode, error = 0;
4504
4505 struct i40e_aq_get_phy_abilities_resp abilities;
4506 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4507 if (error)
4508 return (error);
4509 /* Read in new mode */
4510 error = sysctl_handle_int(oidp, &mode, 0, req);
4511 if ((error) || (req->newptr == NULL))
4512 return (error);
4513
4514 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4515 }
4516
4517 static int
ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)4518 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4519 {
4520 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4521 int mode, error = 0;
4522
4523 struct i40e_aq_get_phy_abilities_resp abilities;
4524 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4525 if (error)
4526 return (error);
4527 /* Read in new mode */
4528 error = sysctl_handle_int(oidp, &mode, 0, req);
4529 if ((error) || (req->newptr == NULL))
4530 return (error);
4531
4532 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4533 }
4534
4535 static int
ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)4536 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4537 {
4538 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4539 int mode, error = 0;
4540
4541 struct i40e_aq_get_phy_abilities_resp abilities;
4542 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4543 if (error)
4544 return (error);
4545 /* Read in new mode */
4546 error = sysctl_handle_int(oidp, &mode, 0, req);
4547 if ((error) || (req->newptr == NULL))
4548 return (error);
4549
4550 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4551 }
4552
4553 static int
ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)4554 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4555 {
4556 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4557 struct i40e_hw *hw = &pf->hw;
4558 device_t dev = pf->dev;
4559 struct sbuf *buf;
4560 int error = 0;
4561 enum i40e_status_code status;
4562
4563 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4564 if (!buf) {
4565 device_printf(dev, "Could not allocate sbuf for output.\n");
4566 return (ENOMEM);
4567 }
4568
4569 u8 *final_buff;
4570 /* This amount is only necessary if reading the entire cluster into memory */
4571 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4572 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4573 if (final_buff == NULL) {
4574 device_printf(dev, "Could not allocate memory for output.\n");
4575 goto out;
4576 }
4577 int final_buff_len = 0;
4578
4579 u8 cluster_id = 1;
4580 bool more = true;
4581
4582 u8 dump_buf[4096];
4583 u16 curr_buff_size = 4096;
4584 u8 curr_next_table = 0;
4585 u32 curr_next_index = 0;
4586
4587 u16 ret_buff_size;
4588 u8 ret_next_table;
4589 u32 ret_next_index;
4590
4591 sbuf_cat(buf, "\n");
4592
4593 while (more) {
4594 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4595 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4596 if (status) {
4597 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4598 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4599 goto free_out;
4600 }
4601
4602 /* copy info out of temp buffer */
4603 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4604 final_buff_len += ret_buff_size;
4605
4606 if (ret_next_table != curr_next_table) {
4607 /* We're done with the current table; we can dump out read data. */
4608 sbuf_printf(buf, "%d:", curr_next_table);
4609 int bytes_printed = 0;
4610 while (bytes_printed <= final_buff_len) {
4611 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4612 bytes_printed += 16;
4613 }
4614 sbuf_cat(buf, "\n");
4615
4616 /* The entire cluster has been read; we're finished */
4617 if (ret_next_table == 0xFF)
4618 break;
4619
4620 /* Otherwise clear the output buffer and continue reading */
4621 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4622 final_buff_len = 0;
4623 }
4624
4625 if (ret_next_index == 0xFFFFFFFF)
4626 ret_next_index = 0;
4627
4628 bzero(dump_buf, sizeof(dump_buf));
4629 curr_next_table = ret_next_table;
4630 curr_next_index = ret_next_index;
4631 }
4632
4633 free_out:
4634 free(final_buff, M_IXL);
4635 out:
4636 error = sbuf_finish(buf);
4637 if (error)
4638 device_printf(dev, "Error finishing sbuf: %d\n", error);
4639 sbuf_delete(buf);
4640
4641 return (error);
4642 }
4643
4644 static int
ixl_start_fw_lldp(struct ixl_pf * pf)4645 ixl_start_fw_lldp(struct ixl_pf *pf)
4646 {
4647 struct i40e_hw *hw = &pf->hw;
4648 enum i40e_status_code status;
4649
4650 status = i40e_aq_start_lldp(hw, false, NULL);
4651 if (status != I40E_SUCCESS) {
4652 switch (hw->aq.asq_last_status) {
4653 case I40E_AQ_RC_EEXIST:
4654 device_printf(pf->dev,
4655 "FW LLDP agent is already running\n");
4656 break;
4657 case I40E_AQ_RC_EPERM:
4658 device_printf(pf->dev,
4659 "Device configuration forbids SW from starting "
4660 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4661 "attribute to \"Enabled\" to use this sysctl\n");
4662 return (EINVAL);
4663 default:
4664 device_printf(pf->dev,
4665 "Starting FW LLDP agent failed: error: %s, %s\n",
4666 i40e_stat_str(hw, status),
4667 i40e_aq_str(hw, hw->aq.asq_last_status));
4668 return (EINVAL);
4669 }
4670 }
4671
4672 ixl_clear_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4673 return (0);
4674 }
4675
4676 static int
ixl_stop_fw_lldp(struct ixl_pf * pf)4677 ixl_stop_fw_lldp(struct ixl_pf *pf)
4678 {
4679 struct i40e_hw *hw = &pf->hw;
4680 device_t dev = pf->dev;
4681 enum i40e_status_code status;
4682
4683 if (hw->func_caps.npar_enable != 0) {
4684 device_printf(dev,
4685 "Disabling FW LLDP agent is not supported on this device\n");
4686 return (EINVAL);
4687 }
4688
4689 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4690 device_printf(dev,
4691 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4692 return (EINVAL);
4693 }
4694
4695 status = i40e_aq_stop_lldp(hw, true, false, NULL);
4696 if (status != I40E_SUCCESS) {
4697 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4698 device_printf(dev,
4699 "Disabling FW LLDP agent failed: error: %s, %s\n",
4700 i40e_stat_str(hw, status),
4701 i40e_aq_str(hw, hw->aq.asq_last_status));
4702 return (EINVAL);
4703 }
4704
4705 device_printf(dev, "FW LLDP agent is already stopped\n");
4706 }
4707
4708 i40e_aq_set_dcb_parameters(hw, true, NULL);
4709 ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4710 return (0);
4711 }
4712
4713 static int
ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)4714 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4715 {
4716 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4717 int state, new_state, error = 0;
4718
4719 state = new_state = !ixl_test_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4720
4721 /* Read in new mode */
4722 error = sysctl_handle_int(oidp, &new_state, 0, req);
4723 if ((error) || (req->newptr == NULL))
4724 return (error);
4725
4726 /* Already in requested state */
4727 if (new_state == state)
4728 return (error);
4729
4730 if (new_state == 0)
4731 return ixl_stop_fw_lldp(pf);
4732
4733 return ixl_start_fw_lldp(pf);
4734 }
4735
4736 static int
ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)4737 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4738 {
4739 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4740 int state, new_state;
4741 int sysctl_handle_status = 0;
4742 enum i40e_status_code cmd_status;
4743
4744 /* Init states' values */
4745 state = new_state = ixl_test_state(&pf->state, IXL_STATE_EEE_ENABLED);
4746
4747 /* Get requested mode */
4748 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4749 if ((sysctl_handle_status) || (req->newptr == NULL))
4750 return (sysctl_handle_status);
4751
4752 /* Check if state has changed */
4753 if (new_state == state)
4754 return (0);
4755
4756 /* Set new state */
4757 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4758
4759 /* Save new state or report error */
4760 if (!cmd_status) {
4761 if (new_state == 0)
4762 ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
4763 else
4764 ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
4765 } else if (cmd_status == I40E_ERR_CONFIG)
4766 return (EPERM);
4767 else
4768 return (EIO);
4769
4770 return (0);
4771 }
4772
4773 static int
ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)4774 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4775 {
4776 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4777 int error, state;
4778
4779 state = ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4780
4781 error = sysctl_handle_int(oidp, &state, 0, req);
4782 if ((error) || (req->newptr == NULL))
4783 return (error);
4784
4785 if (state == 0)
4786 ixl_clear_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4787 else
4788 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4789
4790 return (0);
4791 }
4792
4793
4794 int
ixl_attach_get_link_status(struct ixl_pf * pf)4795 ixl_attach_get_link_status(struct ixl_pf *pf)
4796 {
4797 struct i40e_hw *hw = &pf->hw;
4798 device_t dev = pf->dev;
4799 enum i40e_status_code status;
4800
4801 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4802 (hw->aq.fw_maj_ver < 4)) {
4803 i40e_msec_delay(75);
4804 status = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4805 if (status != I40E_SUCCESS) {
4806 device_printf(dev,
4807 "%s link restart failed status: %s, aq_err=%s\n",
4808 __func__, i40e_stat_str(hw, status),
4809 i40e_aq_str(hw, hw->aq.asq_last_status));
4810 return (EINVAL);
4811 }
4812 }
4813
4814 /* Determine link state */
4815 hw->phy.get_link_info = TRUE;
4816 status = i40e_get_link_status(hw, &pf->link_up);
4817 if (status != I40E_SUCCESS) {
4818 device_printf(dev,
4819 "%s get link status, status: %s aq_err=%s\n",
4820 __func__, i40e_stat_str(hw, status),
4821 i40e_aq_str(hw, hw->aq.asq_last_status));
4822 /*
4823 * Most probably FW has not finished configuring PHY.
4824 * Retry periodically in a timer callback.
4825 */
4826 ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING);
4827 pf->link_poll_start = getsbinuptime();
4828 return (EAGAIN);
4829 }
4830 ixl_dbg_link(pf, "%s link_up: %d\n", __func__, pf->link_up);
4831
4832 /* Flow Control mode not set by user, read current FW settings */
4833 if (pf->fc == -1)
4834 pf->fc = hw->fc.current_mode;
4835
4836 return (0);
4837 }
4838
4839 static int
ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)4840 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4841 {
4842 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4843 int requested = 0, error = 0;
4844
4845 /* Read in new mode */
4846 error = sysctl_handle_int(oidp, &requested, 0, req);
4847 if ((error) || (req->newptr == NULL))
4848 return (error);
4849
4850 /* Initiate the PF reset later in the admin task */
4851 ixl_set_state(&pf->state, IXL_STATE_PF_RESET_REQ);
4852
4853 return (error);
4854 }
4855
4856 static int
ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)4857 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4858 {
4859 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4860 struct i40e_hw *hw = &pf->hw;
4861 int requested = 0, error = 0;
4862
4863 /* Read in new mode */
4864 error = sysctl_handle_int(oidp, &requested, 0, req);
4865 if ((error) || (req->newptr == NULL))
4866 return (error);
4867
4868 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4869
4870 return (error);
4871 }
4872
4873 static int
ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)4874 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4875 {
4876 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4877 struct i40e_hw *hw = &pf->hw;
4878 int requested = 0, error = 0;
4879
4880 /* Read in new mode */
4881 error = sysctl_handle_int(oidp, &requested, 0, req);
4882 if ((error) || (req->newptr == NULL))
4883 return (error);
4884
4885 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4886
4887 return (error);
4888 }
4889
4890 /*
4891 * Print out mapping of TX queue indexes and Rx queue indexes
4892 * to MSI-X vectors.
4893 */
4894 static int
ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)4895 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4896 {
4897 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4898 struct ixl_vsi *vsi = &pf->vsi;
4899 struct i40e_hw *hw = vsi->hw;
4900 device_t dev = pf->dev;
4901 struct sbuf *buf;
4902 int error = 0;
4903
4904 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4905 struct ixl_tx_queue *tx_que = vsi->tx_queues;
4906
4907 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4908 if (!buf) {
4909 device_printf(dev, "Could not allocate sbuf for output.\n");
4910 return (ENOMEM);
4911 }
4912
4913 sbuf_cat(buf, "\n");
4914 for (int i = 0; i < vsi->num_rx_queues; i++) {
4915 rx_que = &vsi->rx_queues[i];
4916 sbuf_printf(buf,
4917 "(rxq %3d): %d LNKLSTN: %08x QINT_RQCTL: %08x\n",
4918 i, rx_que->msix,
4919 rd32(hw, I40E_PFINT_LNKLSTN(rx_que->msix - 1)),
4920 rd32(hw, I40E_QINT_RQCTL(rx_que->msix - 1)));
4921 }
4922 for (int i = 0; i < vsi->num_tx_queues; i++) {
4923 tx_que = &vsi->tx_queues[i];
4924 sbuf_printf(buf, "(txq %3d): %d QINT_TQCTL: %08x\n",
4925 i, tx_que->msix,
4926 rd32(hw, I40E_QINT_TQCTL(tx_que->msix - 1)));
4927 }
4928
4929 error = sbuf_finish(buf);
4930 if (error)
4931 device_printf(dev, "Error finishing sbuf: %d\n", error);
4932 sbuf_delete(buf);
4933
4934 return (error);
4935 }
4936
4937 static int
ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS)4938 ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS)
4939 {
4940 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4941 struct ixl_vsi *vsi = &pf->vsi;
4942 struct i40e_hw *hw = vsi->hw;
4943 device_t dev = pf->dev;
4944 struct sbuf *buf;
4945 int error = 0;
4946
4947 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4948
4949 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4950 if (!buf) {
4951 device_printf(dev, "Could not allocate sbuf for output.\n");
4952 return (ENOMEM);
4953 }
4954
4955 sbuf_cat(buf, "\n");
4956 for (int i = 0; i < vsi->num_rx_queues; i++) {
4957 rx_que = &vsi->rx_queues[i];
4958 sbuf_printf(buf,
4959 "(rxq %3d): %d PFINT_DYN_CTLN: %08x\n",
4960 i, rx_que->msix,
4961 rd32(hw, I40E_PFINT_DYN_CTLN(rx_que->msix - 1)));
4962 }
4963
4964 error = sbuf_finish(buf);
4965 if (error)
4966 device_printf(dev, "Error finishing sbuf: %d\n", error);
4967 sbuf_delete(buf);
4968
4969 return (error);
4970 }
4971