1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34
35 #include "ixl_pf.h"
36
37 #ifdef PCI_IOV
38 #include "ixl_pf_iov.h"
39 #endif
40
41 #ifdef IXL_IW
42 #include "ixl_iw.h"
43 #include "ixl_iw_int.h"
44 #endif
45
46 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
47 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
48 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
49 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
50 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
51 static char * ixl_switch_element_string(struct sbuf *, u8, u16);
52 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
53
54 /* Sysctls */
55 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
56 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
59 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
60 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
62
63 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
65
66 /* Debug Sysctls */
67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88
89 /* Debug Sysctls */
90 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94 static int ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS);
95 #ifdef IXL_DEBUG
96 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
97 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
98 #endif
99
100 #ifdef IXL_IW
101 extern int ixl_enable_iwarp;
102 extern int ixl_limit_iwarp_msix;
103 #endif
104
105 static const char * const ixl_fc_string[6] = {
106 "None",
107 "Rx",
108 "Tx",
109 "Full",
110 "Priority",
111 "Default"
112 };
113
114 static char *ixl_fec_string[3] = {
115 "CL108 RS-FEC",
116 "CL74 FC-FEC/BASE-R",
117 "None"
118 };
119
120 /* Functions for setting and checking driver state. Note the functions take
121 * bit positions, not bitmasks. The atomic_set_32 and atomic_clear_32
122 * operations require bitmasks. This can easily lead to programming error, so
123 * we provide wrapper functions to avoid this.
124 */
125
126 /**
127 * ixl_set_state - Set the specified state
128 * @s: the state bitmap
129 * @bit: the state to set
130 *
131 * Atomically update the state bitmap with the specified bit set.
132 */
133 inline void
ixl_set_state(volatile u32 * s,enum ixl_state bit)134 ixl_set_state(volatile u32 *s, enum ixl_state bit)
135 {
136 /* atomic_set_32 expects a bitmask */
137 atomic_set_32(s, BIT(bit));
138 }
139
140 /**
141 * ixl_clear_state - Clear the specified state
142 * @s: the state bitmap
143 * @bit: the state to clear
144 *
145 * Atomically update the state bitmap with the specified bit cleared.
146 */
147 inline void
ixl_clear_state(volatile u32 * s,enum ixl_state bit)148 ixl_clear_state(volatile u32 *s, enum ixl_state bit)
149 {
150 /* atomic_clear_32 expects a bitmask */
151 atomic_clear_32(s, BIT(bit));
152 }
153
154 /**
155 * ixl_test_state - Test the specified state
156 * @s: the state bitmap
157 * @bit: the bit to test
158 *
159 * Return true if the state is set, false otherwise. Use this only if the flow
160 * does not need to update the state. If you must update the state as well,
161 * prefer ixl_testandset_state.
162 */
163 inline bool
ixl_test_state(volatile u32 * s,enum ixl_state bit)164 ixl_test_state(volatile u32 *s, enum ixl_state bit)
165 {
166 return !!(*s & BIT(bit));
167 }
168
169 /**
170 * ixl_testandset_state - Test and set the specified state
171 * @s: the state bitmap
172 * @bit: the bit to test
173 *
174 * Atomically update the state bitmap, setting the specified bit. Returns the
175 * previous value of the bit.
176 */
177 inline u32
ixl_testandset_state(volatile u32 * s,enum ixl_state bit)178 ixl_testandset_state(volatile u32 *s, enum ixl_state bit)
179 {
180 /* atomic_testandset_32 expects a bit position, as opposed to bitmask
181 expected by other atomic functions */
182 return atomic_testandset_32(s, bit);
183 }
184
185 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
186
187 /*
188 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
189 */
190 void
ixl_nvm_version_str(struct i40e_hw * hw,struct sbuf * buf)191 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
192 {
193 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
194 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
195 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
196
197 sbuf_printf(buf,
198 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
199 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
200 hw->aq.api_maj_ver, hw->aq.api_min_ver,
201 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
202 IXL_NVM_VERSION_HI_SHIFT,
203 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
204 IXL_NVM_VERSION_LO_SHIFT,
205 hw->nvm.eetrack,
206 oem_ver, oem_build, oem_patch);
207 }
208
209 void
ixl_print_nvm_version(struct ixl_pf * pf)210 ixl_print_nvm_version(struct ixl_pf *pf)
211 {
212 struct i40e_hw *hw = &pf->hw;
213 device_t dev = pf->dev;
214 struct sbuf *sbuf;
215
216 sbuf = sbuf_new_auto();
217 ixl_nvm_version_str(hw, sbuf);
218 sbuf_finish(sbuf);
219 device_printf(dev, "%s\n", sbuf_data(sbuf));
220 sbuf_delete(sbuf);
221 }
222
223 /**
224 * ixl_get_fw_mode - Check the state of FW
225 * @hw: device hardware structure
226 *
227 * Identify state of FW. It might be in a recovery mode
228 * which limits functionality and requires special handling
229 * from the driver.
230 *
231 * @returns FW mode (normal, recovery, unexpected EMP reset)
232 */
233 static enum ixl_fw_mode
ixl_get_fw_mode(struct ixl_pf * pf)234 ixl_get_fw_mode(struct ixl_pf *pf)
235 {
236 struct i40e_hw *hw = &pf->hw;
237 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
238 u32 fwsts;
239
240 #ifdef IXL_DEBUG
241 if (pf->recovery_mode)
242 return IXL_FW_MODE_RECOVERY;
243 #endif
244 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
245
246 /* Is set and has one of expected values */
247 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
248 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
249 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
250 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
251 fw_mode = IXL_FW_MODE_RECOVERY;
252 else {
253 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
254 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
255 fw_mode = IXL_FW_MODE_UEMPR;
256 }
257 return (fw_mode);
258 }
259
260 /**
261 * ixl_pf_reset - Reset the PF
262 * @pf: PF structure
263 *
264 * Ensure that FW is in the right state and do the reset
265 * if needed.
266 *
267 * @returns zero on success, or an error code on failure.
268 */
269 int
ixl_pf_reset(struct ixl_pf * pf)270 ixl_pf_reset(struct ixl_pf *pf)
271 {
272 struct i40e_hw *hw = &pf->hw;
273 enum i40e_status_code status;
274 enum ixl_fw_mode fw_mode;
275
276 fw_mode = ixl_get_fw_mode(pf);
277 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
278 if (fw_mode == IXL_FW_MODE_RECOVERY) {
279 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
280 /* Don't try to reset device if it's in recovery mode */
281 return (0);
282 }
283
284 status = i40e_pf_reset(hw);
285 if (status == I40E_SUCCESS)
286 return (0);
287
288 /* Check FW mode again in case it has changed while
289 * waiting for reset to complete */
290 fw_mode = ixl_get_fw_mode(pf);
291 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
292 if (fw_mode == IXL_FW_MODE_RECOVERY) {
293 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
294 return (0);
295 }
296
297 if (fw_mode == IXL_FW_MODE_UEMPR)
298 device_printf(pf->dev,
299 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
300 else
301 device_printf(pf->dev, "PF reset failure %s\n",
302 i40e_stat_str(hw, status));
303 return (EIO);
304 }
305
306 /**
307 * ixl_setup_hmc - Setup LAN Host Memory Cache
308 * @pf: PF structure
309 *
310 * Init and configure LAN Host Memory Cache
311 *
312 * @returns 0 on success, EIO on error
313 */
314 int
ixl_setup_hmc(struct ixl_pf * pf)315 ixl_setup_hmc(struct ixl_pf *pf)
316 {
317 struct i40e_hw *hw = &pf->hw;
318 enum i40e_status_code status;
319
320 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
321 hw->func_caps.num_rx_qp, 0, 0);
322 if (status) {
323 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
324 i40e_stat_str(hw, status));
325 return (EIO);
326 }
327
328 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
329 if (status) {
330 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
331 i40e_stat_str(hw, status));
332 return (EIO);
333 }
334
335 return (0);
336 }
337
338 /**
339 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
340 * @pf: PF structure
341 *
342 * Shutdown Host Memory Cache if configured.
343 *
344 */
345 void
ixl_shutdown_hmc(struct ixl_pf * pf)346 ixl_shutdown_hmc(struct ixl_pf *pf)
347 {
348 struct i40e_hw *hw = &pf->hw;
349 enum i40e_status_code status;
350
351 /* HMC not configured, no need to shutdown */
352 if (hw->hmc.hmc_obj == NULL)
353 return;
354
355 status = i40e_shutdown_lan_hmc(hw);
356 if (status)
357 device_printf(pf->dev,
358 "Shutdown LAN HMC failed with code %s\n",
359 i40e_stat_str(hw, status));
360 }
361 /*
362 * Write PF ITR values to queue ITR registers.
363 */
364 void
ixl_configure_itr(struct ixl_pf * pf)365 ixl_configure_itr(struct ixl_pf *pf)
366 {
367 ixl_configure_tx_itr(pf);
368 ixl_configure_rx_itr(pf);
369 }
370
371 /*********************************************************************
372 *
373 * Get the hardware capabilities
374 *
375 **********************************************************************/
376
377 int
ixl_get_hw_capabilities(struct ixl_pf * pf)378 ixl_get_hw_capabilities(struct ixl_pf *pf)
379 {
380 struct i40e_aqc_list_capabilities_element_resp *buf;
381 struct i40e_hw *hw = &pf->hw;
382 device_t dev = pf->dev;
383 enum i40e_status_code status;
384 int len, i2c_intfc_num;
385 bool again = TRUE;
386 u16 needed;
387
388 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
389 hw->func_caps.iwarp = 0;
390 return (0);
391 }
392
393 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
394 retry:
395 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
396 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
397 device_printf(dev, "Unable to allocate cap memory\n");
398 return (ENOMEM);
399 }
400
401 /* This populates the hw struct */
402 status = i40e_aq_discover_capabilities(hw, buf, len,
403 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
404 free(buf, M_IXL);
405 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
406 (again == TRUE)) {
407 /* retry once with a larger buffer */
408 again = FALSE;
409 len = needed;
410 goto retry;
411 } else if (status != I40E_SUCCESS) {
412 device_printf(dev, "capability discovery failed; status %s, error %s\n",
413 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
414 return (ENODEV);
415 }
416
417 /*
418 * Some devices have both MDIO and I2C; since this isn't reported
419 * by the FW, check registers to see if an I2C interface exists.
420 */
421 i2c_intfc_num = ixl_find_i2c_interface(pf);
422 if (i2c_intfc_num != -1)
423 pf->has_i2c = true;
424
425 /* Determine functions to use for driver I2C accesses */
426 switch (pf->i2c_access_method) {
427 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
428 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
429 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
430 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
431 } else {
432 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
433 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
434 }
435 break;
436 }
437 case IXL_I2C_ACCESS_METHOD_AQ:
438 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
439 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
440 break;
441 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
442 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
443 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
444 break;
445 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
446 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
447 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
448 break;
449 default:
450 /* Should not happen */
451 device_printf(dev, "Error setting I2C access functions\n");
452 break;
453 }
454
455 /* Keep link active by default */
456 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
457
458 /* Print a subset of the capability information. */
459 device_printf(dev,
460 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
461 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
462 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
463 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
464 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
465 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
466 "MDIO shared");
467
468 return (0);
469 }
470
471 /* For the set_advertise sysctl */
472 void
ixl_set_initial_advertised_speeds(struct ixl_pf * pf)473 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
474 {
475 device_t dev = pf->dev;
476 int err;
477
478 /* Make sure to initialize the device to the complete list of
479 * supported speeds on driver load, to ensure unloading and
480 * reloading the driver will restore this value.
481 */
482 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
483 if (err) {
484 /* Non-fatal error */
485 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
486 __func__, err);
487 return;
488 }
489
490 pf->advertised_speed =
491 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
492 }
493
494 int
ixl_teardown_hw_structs(struct ixl_pf * pf)495 ixl_teardown_hw_structs(struct ixl_pf *pf)
496 {
497 enum i40e_status_code status = 0;
498 struct i40e_hw *hw = &pf->hw;
499 device_t dev = pf->dev;
500
501 /* Shutdown LAN HMC */
502 if (hw->hmc.hmc_obj) {
503 status = i40e_shutdown_lan_hmc(hw);
504 if (status) {
505 device_printf(dev,
506 "init: LAN HMC shutdown failure; status %s\n",
507 i40e_stat_str(hw, status));
508 goto err_out;
509 }
510 }
511
512 /* Shutdown admin queue */
513 ixl_disable_intr0(hw);
514 status = i40e_shutdown_adminq(hw);
515 if (status)
516 device_printf(dev,
517 "init: Admin Queue shutdown failure; status %s\n",
518 i40e_stat_str(hw, status));
519
520 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
521 err_out:
522 return (status);
523 }
524
525 /*
526 ** Creates new filter with given MAC address and VLAN ID
527 */
528 static struct ixl_mac_filter *
ixl_new_filter(struct ixl_ftl_head * headp,const u8 * macaddr,s16 vlan)529 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
530 {
531 struct ixl_mac_filter *f;
532
533 /* create a new empty filter */
534 f = malloc(sizeof(struct ixl_mac_filter),
535 M_IXL, M_NOWAIT | M_ZERO);
536 if (f) {
537 LIST_INSERT_HEAD(headp, f, ftle);
538 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
539 f->vlan = vlan;
540 }
541
542 return (f);
543 }
544
545 /**
546 * ixl_free_filters - Free all filters in given list
547 * headp - pointer to list head
548 *
549 * Frees memory used by each entry in the list.
550 * Does not remove filters from HW.
551 */
552 void
ixl_free_filters(struct ixl_ftl_head * headp)553 ixl_free_filters(struct ixl_ftl_head *headp)
554 {
555 struct ixl_mac_filter *f, *nf;
556
557 f = LIST_FIRST(headp);
558 while (f != NULL) {
559 nf = LIST_NEXT(f, ftle);
560 free(f, M_IXL);
561 f = nf;
562 }
563
564 LIST_INIT(headp);
565 }
566
567 static u_int
ixl_add_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)568 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
569 {
570 struct ixl_add_maddr_arg *ama = arg;
571 struct ixl_vsi *vsi = ama->vsi;
572 const u8 *macaddr = (u8*)LLADDR(sdl);
573 struct ixl_mac_filter *f;
574
575 /* Does one already exist */
576 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
577 if (f != NULL)
578 return (0);
579
580 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
581 if (f == NULL) {
582 device_printf(vsi->dev, "WARNING: no filter available!!\n");
583 return (0);
584 }
585 f->flags |= IXL_FILTER_MC;
586
587 return (1);
588 }
589
590 /*********************************************************************
591 * Filter Routines
592 *
593 * Routines for multicast and vlan filter management.
594 *
595 *********************************************************************/
596
597 /**
598 * ixl_add_multi - Add multicast filters to the hardware
599 * @vsi: The VSI structure
600 *
601 * In case number of multicast filters in the IFP exceeds 127 entries,
602 * multicast promiscuous mode will be enabled and the filters will be removed
603 * from the hardware
604 */
605 void
ixl_add_multi(struct ixl_vsi * vsi)606 ixl_add_multi(struct ixl_vsi *vsi)
607 {
608 if_t ifp = vsi->ifp;
609 int mcnt = 0;
610 struct ixl_add_maddr_arg cb_arg;
611
612 IOCTL_DEBUGOUT("ixl_add_multi: begin");
613
614 /*
615 * There is no need to check if the number of multicast addresses
616 * exceeds the MAX_MULTICAST_ADDR threshold and set promiscuous mode
617 * here, as all callers already handle this case.
618 */
619
620 cb_arg.vsi = vsi;
621 LIST_INIT(&cb_arg.to_add);
622
623 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
624 if (mcnt > 0)
625 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
626
627 IOCTL_DEBUGOUT("ixl_add_multi: end");
628 }
629
630 static u_int
ixl_match_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)631 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
632 {
633 struct ixl_mac_filter *f = arg;
634
635 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
636 return (1);
637 else
638 return (0);
639 }
640
641 /**
642 * ixl_dis_multi_promisc - Disable multicast promiscuous mode
643 * @vsi: The VSI structure
644 * @vsi_mcnt: Number of multicast filters in the VSI
645 *
646 * Disable multicast promiscuous mode based on number of entries in the IFP
647 * and the VSI, then re-add multicast filters.
648 *
649 */
650 static void
ixl_dis_multi_promisc(struct ixl_vsi * vsi,int vsi_mcnt)651 ixl_dis_multi_promisc(struct ixl_vsi *vsi, int vsi_mcnt)
652 {
653 struct ifnet *ifp = vsi->ifp;
654 struct i40e_hw *hw = vsi->hw;
655 int ifp_mcnt = 0;
656 enum i40e_status_code status;
657
658 /*
659 * Check if multicast promiscuous mode was actually enabled.
660 * If promiscuous mode was not enabled, don't attempt to disable it.
661 * Also, don't disable if IFF_PROMISC or IFF_ALLMULTI is set.
662 */
663 if (!(vsi->flags & IXL_FLAGS_MC_PROMISC) ||
664 (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)))
665 return;
666
667 ifp_mcnt = if_llmaddr_count(ifp);
668 /*
669 * Equal lists or empty ifp list mean the list has not been changed
670 * and in such case avoid disabling multicast promiscuous mode as it
671 * was not previously enabled. Case where multicast promiscuous mode has
672 * been enabled is when vsi_mcnt == 0 && ifp_mcnt > 0.
673 */
674 if (ifp_mcnt == vsi_mcnt || ifp_mcnt == 0 ||
675 ifp_mcnt >= MAX_MULTICAST_ADDR)
676 return;
677
678 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
679 FALSE, NULL);
680 if (status != I40E_SUCCESS) {
681 if_printf(ifp, "Failed to disable multicast promiscuous "
682 "mode, status: %s\n", i40e_stat_str(hw, status));
683
684 return;
685 }
686
687 /* Clear the flag since promiscuous mode is now disabled */
688 vsi->flags &= ~IXL_FLAGS_MC_PROMISC;
689 if_printf(ifp, "Disabled multicast promiscuous mode\n");
690
691 ixl_add_multi(vsi);
692 }
693
694 /**
695 * ixl_del_multi - Delete multicast filters from the hardware
696 * @vsi: The VSI structure
697 * @all: Bool to determine if all the multicast filters should be removed
698 *
699 * In case number of multicast filters in the IFP drops to 127 entries,
700 * multicast promiscuous mode will be disabled and the filters will be reapplied
701 * to the hardware.
702 */
703 void
ixl_del_multi(struct ixl_vsi * vsi,bool all)704 ixl_del_multi(struct ixl_vsi *vsi, bool all)
705 {
706 int to_del_cnt = 0, vsi_mcnt = 0;
707 if_t ifp = vsi->ifp;
708 struct ixl_mac_filter *f, *fn;
709 struct ixl_ftl_head to_del;
710
711 IOCTL_DEBUGOUT("ixl_del_multi: begin");
712
713 LIST_INIT(&to_del);
714 /* Search for removed multicast addresses */
715 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
716 if ((f->flags & IXL_FILTER_MC) == 0)
717 continue;
718
719 /* Count all the multicast filters in the VSI for comparison */
720 vsi_mcnt++;
721
722 if (!all && if_foreach_llmaddr(ifp, ixl_match_maddr, f) != 0)
723 continue;
724
725 LIST_REMOVE(f, ftle);
726 LIST_INSERT_HEAD(&to_del, f, ftle);
727 to_del_cnt++;
728 }
729
730 if (to_del_cnt > 0) {
731 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
732 return;
733 }
734
735 ixl_dis_multi_promisc(vsi, vsi_mcnt);
736
737 IOCTL_DEBUGOUT("ixl_del_multi: end");
738 }
739
740 void
ixl_link_up_msg(struct ixl_pf * pf)741 ixl_link_up_msg(struct ixl_pf *pf)
742 {
743 struct i40e_hw *hw = &pf->hw;
744 if_t ifp = pf->vsi.ifp;
745 char *req_fec_string, *neg_fec_string;
746 u8 fec_abilities;
747
748 fec_abilities = hw->phy.link_info.req_fec_info;
749 /* If both RS and KR are requested, only show RS */
750 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
751 req_fec_string = ixl_fec_string[0];
752 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
753 req_fec_string = ixl_fec_string[1];
754 else
755 req_fec_string = ixl_fec_string[2];
756
757 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
758 neg_fec_string = ixl_fec_string[0];
759 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
760 neg_fec_string = ixl_fec_string[1];
761 else
762 neg_fec_string = ixl_fec_string[2];
763
764 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
765 if_name(ifp),
766 ixl_link_speed_string(hw->phy.link_info.link_speed),
767 req_fec_string, neg_fec_string,
768 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
769 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
770 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
771 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
772 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
773 ixl_fc_string[1] : ixl_fc_string[0]);
774 }
775
776 /*
777 * Configure admin queue/misc interrupt cause registers in hardware.
778 */
779 void
ixl_configure_intr0_msix(struct ixl_pf * pf)780 ixl_configure_intr0_msix(struct ixl_pf *pf)
781 {
782 struct i40e_hw *hw = &pf->hw;
783 u32 reg;
784
785 /* First set up the adminq - vector 0 */
786 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
787 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
788
789 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
790 I40E_PFINT_ICR0_ENA_GRST_MASK |
791 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
792 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
793 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
794 I40E_PFINT_ICR0_ENA_VFLR_MASK |
795 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
796 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
797 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
798
799 /*
800 * 0x7FF is the end of the queue list.
801 * This means we won't use MSI-X vector 0 for a queue interrupt
802 * in MSI-X mode.
803 */
804 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
805 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
806 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
807
808 wr32(hw, I40E_PFINT_DYN_CTL0,
809 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
810 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
811
812 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
813 }
814
815 void
ixl_add_ifmedia(struct ifmedia * media,u64 phy_types)816 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
817 {
818 /* Display supported media types */
819 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
820 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
821
822 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
823 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
824 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
825 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
826 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
827 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
828
829 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
830 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
831
832 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
833 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
834
835 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
836 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
837 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
838 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
839
840 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
841 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
842 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
843 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
844 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
845 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
846
847 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
848 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
849 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
850 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
851 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
852 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
853 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
854 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
855 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
856 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
857
858 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
859 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
860
861 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
862 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
863 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
864 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
865 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
866 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
867 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
868 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
869 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
870 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
871 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
872
873 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
874 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
875
876 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
877 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
878 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
879 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
880
881 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
882 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
883 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
884 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
885 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
886 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
887 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
888 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
889 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
890 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
891 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
892 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
893 }
894
895 /*********************************************************************
896 *
897 * Get Firmware Switch configuration
898 * - this will need to be more robust when more complex
899 * switch configurations are enabled.
900 *
901 **********************************************************************/
902 int
ixl_switch_config(struct ixl_pf * pf)903 ixl_switch_config(struct ixl_pf *pf)
904 {
905 struct i40e_hw *hw = &pf->hw;
906 struct ixl_vsi *vsi = &pf->vsi;
907 device_t dev = iflib_get_dev(vsi->ctx);
908 struct i40e_aqc_get_switch_config_resp *sw_config;
909 u8 aq_buf[I40E_AQ_LARGE_BUF];
910 int ret;
911 u16 next = 0;
912
913 memset(&aq_buf, 0, sizeof(aq_buf));
914 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
915 ret = i40e_aq_get_switch_config(hw, sw_config,
916 sizeof(aq_buf), &next, NULL);
917 if (ret) {
918 device_printf(dev, "aq_get_switch_config() failed, error %d,"
919 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
920 return (ret);
921 }
922 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
923 device_printf(dev,
924 "Switch config: header reported: %d in structure, %d total\n",
925 LE16_TO_CPU(sw_config->header.num_reported),
926 LE16_TO_CPU(sw_config->header.num_total));
927 for (int i = 0;
928 i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
929 device_printf(dev,
930 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
931 sw_config->element[i].element_type,
932 LE16_TO_CPU(sw_config->element[i].seid),
933 LE16_TO_CPU(sw_config->element[i].uplink_seid),
934 LE16_TO_CPU(sw_config->element[i].downlink_seid));
935 }
936 }
937 /* Simplified due to a single VSI */
938 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
939 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
940 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
941 return (ret);
942 }
943
944 void
ixl_vsi_add_sysctls(struct ixl_vsi * vsi,const char * sysctl_name,bool queues_sysctls)945 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
946 {
947 struct sysctl_oid *tree;
948 struct sysctl_oid_list *child;
949 struct sysctl_oid_list *vsi_list;
950
951 tree = device_get_sysctl_tree(vsi->dev);
952 child = SYSCTL_CHILDREN(tree);
953 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
954 CTLFLAG_RD, NULL, "VSI Number");
955
956 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
957 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
958
959 /* Copy of netstat RX errors counter for validation purposes */
960 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
961 CTLFLAG_RD, &vsi->ierrors,
962 "RX packet errors");
963
964 if (queues_sysctls)
965 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
966 }
967
968 /*
969 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
970 * Writes to the ITR registers immediately.
971 */
972 static int
ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)973 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
974 {
975 struct ixl_pf *pf = (struct ixl_pf *)arg1;
976 device_t dev = pf->dev;
977 int error = 0;
978 int requested_tx_itr;
979
980 requested_tx_itr = pf->tx_itr;
981 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
982 if ((error) || (req->newptr == NULL))
983 return (error);
984 if (pf->dynamic_tx_itr) {
985 device_printf(dev,
986 "Cannot set TX itr value while dynamic TX itr is enabled\n");
987 return (EINVAL);
988 }
989 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
990 device_printf(dev,
991 "Invalid TX itr value; value must be between 0 and %d\n",
992 IXL_MAX_ITR);
993 return (EINVAL);
994 }
995
996 pf->tx_itr = requested_tx_itr;
997 ixl_configure_tx_itr(pf);
998
999 return (error);
1000 }
1001
1002 /*
1003 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1004 * Writes to the ITR registers immediately.
1005 */
1006 static int
ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)1007 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1008 {
1009 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1010 device_t dev = pf->dev;
1011 int error = 0;
1012 int requested_rx_itr;
1013
1014 requested_rx_itr = pf->rx_itr;
1015 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1016 if ((error) || (req->newptr == NULL))
1017 return (error);
1018 if (pf->dynamic_rx_itr) {
1019 device_printf(dev,
1020 "Cannot set RX itr value while dynamic RX itr is enabled\n");
1021 return (EINVAL);
1022 }
1023 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1024 device_printf(dev,
1025 "Invalid RX itr value; value must be between 0 and %d\n",
1026 IXL_MAX_ITR);
1027 return (EINVAL);
1028 }
1029
1030 pf->rx_itr = requested_rx_itr;
1031 ixl_configure_rx_itr(pf);
1032
1033 return (error);
1034 }
1035
1036 void
ixl_add_sysctls_mac_stats(struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child,struct i40e_hw_port_stats * stats)1037 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1038 struct sysctl_oid_list *child,
1039 struct i40e_hw_port_stats *stats)
1040 {
1041 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
1042 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
1043 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1044
1045 struct i40e_eth_stats *eth_stats = &stats->eth;
1046 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1047
1048 struct ixl_sysctl_info ctls[] =
1049 {
1050 {&stats->crc_errors, "crc_errors", "CRC Errors"},
1051 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1052 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1053 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1054 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1055 /* Packet Reception Stats */
1056 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1057 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1058 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1059 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1060 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1061 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1062 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1063 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1064 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1065 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1066 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1067 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1068 /* Packet Transmission Stats */
1069 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1070 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1071 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1072 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1073 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1074 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1075 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1076 /* Flow control */
1077 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1078 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1079 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1080 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1081 /* End */
1082 {0,0,0}
1083 };
1084
1085 struct ixl_sysctl_info *entry = ctls;
1086 while (entry->stat != 0)
1087 {
1088 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1089 CTLFLAG_RD, entry->stat,
1090 entry->description);
1091 entry++;
1092 }
1093 }
1094
1095 void
ixl_set_rss_key(struct ixl_pf * pf)1096 ixl_set_rss_key(struct ixl_pf *pf)
1097 {
1098 struct i40e_hw *hw = &pf->hw;
1099 struct ixl_vsi *vsi = &pf->vsi;
1100 device_t dev = pf->dev;
1101 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1102 enum i40e_status_code status;
1103
1104 #ifdef RSS
1105 /* Fetch the configured RSS key */
1106 rss_getkey((uint8_t *) &rss_seed);
1107 #else
1108 ixl_get_default_rss_key(rss_seed);
1109 #endif
1110 /* Fill out hash function seed */
1111 if (hw->mac.type == I40E_MAC_X722) {
1112 struct i40e_aqc_get_set_rss_key_data key_data;
1113 bcopy(rss_seed, &key_data, 52);
1114 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1115 if (status)
1116 device_printf(dev,
1117 "i40e_aq_set_rss_key status %s, error %s\n",
1118 i40e_stat_str(hw, status),
1119 i40e_aq_str(hw, hw->aq.asq_last_status));
1120 } else {
1121 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1122 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1123 }
1124 }
1125
1126 /*
1127 * Configure enabled PCTYPES for RSS.
1128 */
1129 void
ixl_set_rss_pctypes(struct ixl_pf * pf)1130 ixl_set_rss_pctypes(struct ixl_pf *pf)
1131 {
1132 struct i40e_hw *hw = &pf->hw;
1133 u64 set_hena = 0, hena;
1134
1135 #ifdef RSS
1136 u32 rss_hash_config;
1137
1138 rss_hash_config = rss_gethashconfig();
1139 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1140 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1141 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1142 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1143 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1144 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1145 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1146 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1147 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1148 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1149 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1150 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1151 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1152 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1153 #else
1154 if (hw->mac.type == I40E_MAC_X722)
1155 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1156 else
1157 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1158 #endif
1159 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1160 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1161 hena |= set_hena;
1162 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1163 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1164
1165 }
1166
1167 /*
1168 ** Setup the PF's RSS parameters.
1169 */
1170 void
ixl_config_rss(struct ixl_pf * pf)1171 ixl_config_rss(struct ixl_pf *pf)
1172 {
1173 ixl_set_rss_key(pf);
1174 ixl_set_rss_pctypes(pf);
1175 ixl_set_rss_hlut(pf);
1176 }
1177
1178 /*
1179 * In some firmware versions there is default MAC/VLAN filter
1180 * configured which interferes with filters managed by driver.
1181 * Make sure it's removed.
1182 */
1183 void
ixl_del_default_hw_filters(struct ixl_vsi * vsi)1184 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1185 {
1186 struct i40e_aqc_remove_macvlan_element_data e;
1187
1188 bzero(&e, sizeof(e));
1189 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1190 e.vlan_tag = 0;
1191 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1192 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1193
1194 bzero(&e, sizeof(e));
1195 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1196 e.vlan_tag = 0;
1197 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1198 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1199 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1200 }
1201
1202 /*
1203 ** Initialize filter list and add filters that the hardware
1204 ** needs to know about.
1205 **
1206 ** Requires VSI's seid to be set before calling.
1207 */
1208 void
ixl_init_filters(struct ixl_vsi * vsi)1209 ixl_init_filters(struct ixl_vsi *vsi)
1210 {
1211 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1212
1213 ixl_dbg_filter(pf, "%s: start\n", __func__);
1214
1215 /* Initialize mac filter list for VSI */
1216 LIST_INIT(&vsi->ftl);
1217 vsi->num_hw_filters = 0;
1218
1219 /* Receive broadcast Ethernet frames */
1220 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1221
1222 if (IXL_VSI_IS_VF(vsi))
1223 return;
1224
1225 ixl_del_default_hw_filters(vsi);
1226
1227 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1228
1229 /*
1230 * Prevent Tx flow control frames from being sent out by
1231 * non-firmware transmitters.
1232 * This affects every VSI in the PF.
1233 */
1234 #ifndef IXL_DEBUG_FC
1235 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1236 #else
1237 if (pf->enable_tx_fc_filter)
1238 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1239 #endif
1240 }
1241
1242 void
ixl_reconfigure_filters(struct ixl_vsi * vsi)1243 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1244 {
1245 struct i40e_hw *hw = vsi->hw;
1246 struct ixl_ftl_head tmp;
1247 int cnt;
1248
1249 /*
1250 * The ixl_add_hw_filters function adds filters configured
1251 * in HW to a list in VSI. Move all filters to a temporary
1252 * list to avoid corrupting it by concatenating to itself.
1253 */
1254 LIST_INIT(&tmp);
1255 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1256 cnt = vsi->num_hw_filters;
1257 vsi->num_hw_filters = 0;
1258
1259 ixl_add_hw_filters(vsi, &tmp, cnt);
1260
1261 /*
1262 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp
1263 * will be NULL. Furthermore, the ftl of such vsi already contains
1264 * IXL_VLAN_ANY filter so we can skip that as well.
1265 */
1266 if (hw == NULL)
1267 return;
1268
1269 /* Filter could be removed if MAC address was changed */
1270 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1271
1272 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1273 return;
1274 /*
1275 * VLAN HW filtering is enabled, make sure that filters
1276 * for all registered VLAN tags are configured
1277 */
1278 ixl_add_vlan_filters(vsi, hw->mac.addr);
1279 }
1280
1281 /*
1282 * This routine adds a MAC/VLAN filter to the software filter
1283 * list, then adds that new filter to the HW if it doesn't already
1284 * exist in the SW filter list.
1285 */
1286 void
ixl_add_filter(struct ixl_vsi * vsi,const u8 * macaddr,s16 vlan)1287 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1288 {
1289 struct ixl_mac_filter *f, *tmp;
1290 struct ixl_pf *pf;
1291 device_t dev;
1292 struct ixl_ftl_head to_add;
1293 int to_add_cnt;
1294
1295 pf = vsi->back;
1296 dev = pf->dev;
1297 to_add_cnt = 1;
1298
1299 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1300 MAC_FORMAT_ARGS(macaddr), vlan);
1301
1302 /* Does one already exist */
1303 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1304 if (f != NULL)
1305 return;
1306
1307 LIST_INIT(&to_add);
1308 f = ixl_new_filter(&to_add, macaddr, vlan);
1309 if (f == NULL) {
1310 device_printf(dev, "WARNING: no filter available!!\n");
1311 return;
1312 }
1313 if (f->vlan != IXL_VLAN_ANY)
1314 f->flags |= IXL_FILTER_VLAN;
1315 else
1316 vsi->num_macs++;
1317
1318 /*
1319 ** Is this the first vlan being registered, if so we
1320 ** need to remove the ANY filter that indicates we are
1321 ** not in a vlan, and replace that with a 0 filter.
1322 */
1323 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1324 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1325 if (tmp != NULL) {
1326 struct ixl_ftl_head to_del;
1327
1328 /* Prepare new filter first to avoid removing
1329 * VLAN_ANY filter if allocation fails */
1330 f = ixl_new_filter(&to_add, macaddr, 0);
1331 if (f == NULL) {
1332 device_printf(dev, "WARNING: no filter available!!\n");
1333 free(LIST_FIRST(&to_add), M_IXL);
1334 return;
1335 }
1336 to_add_cnt++;
1337
1338 LIST_REMOVE(tmp, ftle);
1339 LIST_INIT(&to_del);
1340 LIST_INSERT_HEAD(&to_del, tmp, ftle);
1341 ixl_del_hw_filters(vsi, &to_del, 1);
1342 }
1343 }
1344
1345 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1346 }
1347
1348 /**
1349 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1350 * @vsi: pointer to VSI
1351 * @macaddr: MAC address
1352 *
1353 * Adds MAC/VLAN filter for each VLAN configured on the interface
1354 * if there is enough HW filters. Otherwise adds a single filter
1355 * for all tagged and untagged frames to allow all configured VLANs
1356 * to recieve traffic.
1357 */
1358 void
ixl_add_vlan_filters(struct ixl_vsi * vsi,const u8 * macaddr)1359 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1360 {
1361 struct ixl_ftl_head to_add;
1362 struct ixl_mac_filter *f;
1363 int to_add_cnt = 0;
1364 int i, vlan = 0;
1365
1366 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1367 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1368 return;
1369 }
1370 LIST_INIT(&to_add);
1371
1372 /* Add filter for untagged frames if it does not exist yet */
1373 f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1374 if (f == NULL) {
1375 f = ixl_new_filter(&to_add, macaddr, 0);
1376 if (f == NULL) {
1377 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1378 return;
1379 }
1380 to_add_cnt++;
1381 }
1382
1383 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1384 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1385 if (vlan == -1)
1386 break;
1387
1388 /* Does one already exist */
1389 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1390 if (f != NULL)
1391 continue;
1392
1393 f = ixl_new_filter(&to_add, macaddr, vlan);
1394 if (f == NULL) {
1395 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1396 ixl_free_filters(&to_add);
1397 return;
1398 }
1399 to_add_cnt++;
1400 }
1401
1402 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1403 }
1404
1405 void
ixl_del_filter(struct ixl_vsi * vsi,const u8 * macaddr,s16 vlan)1406 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1407 {
1408 struct ixl_mac_filter *f, *tmp;
1409 struct ixl_ftl_head ftl_head;
1410 int to_del_cnt = 1;
1411
1412 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1413 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1414 MAC_FORMAT_ARGS(macaddr), vlan);
1415
1416 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1417 if (f == NULL)
1418 return;
1419
1420 LIST_REMOVE(f, ftle);
1421 LIST_INIT(&ftl_head);
1422 LIST_INSERT_HEAD(&ftl_head, f, ftle);
1423 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1424 vsi->num_macs--;
1425
1426 /* If this is not the last vlan just remove the filter */
1427 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1428 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1429 return;
1430 }
1431
1432 /* It's the last vlan, we need to switch back to a non-vlan filter */
1433 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1434 if (tmp != NULL) {
1435 LIST_REMOVE(tmp, ftle);
1436 LIST_INSERT_AFTER(f, tmp, ftle);
1437 to_del_cnt++;
1438 }
1439 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1440
1441 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1442 }
1443
1444 /**
1445 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1446 * @vsi: VSI which filters need to be removed
1447 * @macaddr: MAC address
1448 *
1449 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1450 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1451 * so skip them to speed up processing. Those filters should be removed
1452 * using ixl_del_filter function.
1453 */
1454 void
ixl_del_all_vlan_filters(struct ixl_vsi * vsi,const u8 * macaddr)1455 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1456 {
1457 struct ixl_mac_filter *f, *tmp;
1458 struct ixl_ftl_head to_del;
1459 int to_del_cnt = 0;
1460
1461 LIST_INIT(&to_del);
1462
1463 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1464 if ((f->flags & IXL_FILTER_MC) != 0 ||
1465 !ixl_ether_is_equal(f->macaddr, macaddr))
1466 continue;
1467
1468 LIST_REMOVE(f, ftle);
1469 LIST_INSERT_HEAD(&to_del, f, ftle);
1470 to_del_cnt++;
1471 }
1472
1473 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1474 "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1475 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1476 if (to_del_cnt > 0)
1477 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1478 }
1479
1480 /*
1481 ** Find the filter with both matching mac addr and vlan id
1482 */
1483 struct ixl_mac_filter *
ixl_find_filter(struct ixl_ftl_head * headp,const u8 * macaddr,s16 vlan)1484 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1485 {
1486 struct ixl_mac_filter *f;
1487
1488 LIST_FOREACH(f, headp, ftle) {
1489 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1490 (f->vlan == vlan)) {
1491 return (f);
1492 }
1493 }
1494
1495 return (NULL);
1496 }
1497
1498 /*
1499 ** This routine takes additions to the vsi filter
1500 ** table and creates an Admin Queue call to create
1501 ** the filters in the hardware.
1502 */
1503 void
ixl_add_hw_filters(struct ixl_vsi * vsi,struct ixl_ftl_head * to_add,int cnt)1504 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1505 {
1506 struct i40e_aqc_add_macvlan_element_data *a, *b;
1507 struct ixl_mac_filter *f, *fn;
1508 struct ixl_pf *pf;
1509 struct i40e_hw *hw;
1510 device_t dev;
1511 enum i40e_status_code status;
1512 int j = 0;
1513
1514 pf = vsi->back;
1515 dev = vsi->dev;
1516 hw = &pf->hw;
1517
1518 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1519
1520 if (cnt < 1) {
1521 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1522 return;
1523 }
1524
1525 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1526 M_IXL, M_NOWAIT | M_ZERO);
1527 if (a == NULL) {
1528 device_printf(dev, "add_hw_filters failed to get memory\n");
1529 return;
1530 }
1531
1532 LIST_FOREACH(f, to_add, ftle) {
1533 b = &a[j]; // a pox on fvl long names :)
1534 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1535 if (f->vlan == IXL_VLAN_ANY) {
1536 b->vlan_tag = 0;
1537 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1538 } else {
1539 b->vlan_tag = f->vlan;
1540 b->flags = 0;
1541 }
1542 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1543 /* Some FW versions do not set match method
1544 * when adding filters fails. Initialize it with
1545 * expected error value to allow detection which
1546 * filters were not added */
1547 b->match_method = I40E_AQC_MM_ERR_NO_RES;
1548 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1549 MAC_FORMAT_ARGS(f->macaddr));
1550
1551 if (++j == cnt)
1552 break;
1553 }
1554 if (j != cnt) {
1555 /* Something went wrong */
1556 device_printf(dev,
1557 "%s ERROR: list of filters to short expected: %d, found: %d\n",
1558 __func__, cnt, j);
1559 ixl_free_filters(to_add);
1560 goto out_free;
1561 }
1562
1563 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1564 if (status == I40E_SUCCESS) {
1565 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1566 vsi->num_hw_filters += j;
1567 goto out_free;
1568 }
1569
1570 device_printf(dev,
1571 "i40e_aq_add_macvlan status %s, error %s\n",
1572 i40e_stat_str(hw, status),
1573 i40e_aq_str(hw, hw->aq.asq_last_status));
1574 j = 0;
1575
1576 /* Verify which filters were actually configured in HW
1577 * and add them to the list */
1578 LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1579 LIST_REMOVE(f, ftle);
1580 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1581 ixl_dbg_filter(pf,
1582 "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1583 __func__,
1584 MAC_FORMAT_ARGS(f->macaddr),
1585 f->vlan);
1586 free(f, M_IXL);
1587 } else {
1588 LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1589 vsi->num_hw_filters++;
1590 }
1591 j++;
1592 }
1593
1594 out_free:
1595 free(a, M_IXL);
1596 }
1597
1598 /*
1599 ** This routine takes removals in the vsi filter
1600 ** table and creates an Admin Queue call to delete
1601 ** the filters in the hardware.
1602 */
1603 void
ixl_del_hw_filters(struct ixl_vsi * vsi,struct ixl_ftl_head * to_del,int cnt)1604 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1605 {
1606 struct i40e_aqc_remove_macvlan_element_data *d, *e;
1607 struct ixl_pf *pf;
1608 struct i40e_hw *hw;
1609 device_t dev;
1610 struct ixl_mac_filter *f, *f_temp;
1611 enum i40e_status_code status;
1612 int j = 0;
1613
1614 pf = vsi->back;
1615 hw = &pf->hw;
1616 dev = vsi->dev;
1617
1618 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1619
1620 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1621 M_IXL, M_NOWAIT | M_ZERO);
1622 if (d == NULL) {
1623 device_printf(dev, "%s: failed to get memory\n", __func__);
1624 return;
1625 }
1626
1627 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1628 e = &d[j]; // a pox on fvl long names :)
1629 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1630 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1631 if (f->vlan == IXL_VLAN_ANY) {
1632 e->vlan_tag = 0;
1633 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1634 } else {
1635 e->vlan_tag = f->vlan;
1636 }
1637
1638 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1639 MAC_FORMAT_ARGS(f->macaddr));
1640
1641 /* delete entry from the list */
1642 LIST_REMOVE(f, ftle);
1643 free(f, M_IXL);
1644 if (++j == cnt)
1645 break;
1646 }
1647 if (j != cnt || !LIST_EMPTY(to_del)) {
1648 /* Something went wrong */
1649 device_printf(dev,
1650 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1651 __func__, cnt, j);
1652 ixl_free_filters(to_del);
1653 goto out_free;
1654 }
1655 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1656 if (status) {
1657 device_printf(dev,
1658 "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1659 __func__, i40e_stat_str(hw, status),
1660 i40e_aq_str(hw, hw->aq.asq_last_status));
1661 for (int i = 0; i < j; i++) {
1662 if (d[i].error_code == 0)
1663 continue;
1664 device_printf(dev,
1665 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1666 __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1667 d[i].vlan_tag);
1668 }
1669 }
1670
1671 vsi->num_hw_filters -= j;
1672
1673 out_free:
1674 free(d, M_IXL);
1675
1676 ixl_dbg_filter(pf, "%s: end\n", __func__);
1677 }
1678
1679 int
ixl_enable_tx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1680 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1681 {
1682 struct i40e_hw *hw = &pf->hw;
1683 int error = 0;
1684 u32 reg;
1685 u16 pf_qidx;
1686
1687 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1688
1689 ixl_dbg(pf, IXL_DBG_EN_DIS,
1690 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1691 pf_qidx, vsi_qidx);
1692
1693 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1694
1695 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1696 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1697 I40E_QTX_ENA_QENA_STAT_MASK;
1698 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1699 /* Verify the enable took */
1700 for (int j = 0; j < 10; j++) {
1701 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1702 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1703 break;
1704 i40e_usec_delay(10);
1705 }
1706 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1707 device_printf(pf->dev, "TX queue %d still disabled!\n",
1708 pf_qidx);
1709 error = ETIMEDOUT;
1710 }
1711
1712 return (error);
1713 }
1714
1715 int
ixl_enable_rx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1716 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1717 {
1718 struct i40e_hw *hw = &pf->hw;
1719 int error = 0;
1720 u32 reg;
1721 u16 pf_qidx;
1722
1723 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1724
1725 ixl_dbg(pf, IXL_DBG_EN_DIS,
1726 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1727 pf_qidx, vsi_qidx);
1728
1729 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1730 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1731 I40E_QRX_ENA_QENA_STAT_MASK;
1732 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1733 /* Verify the enable took */
1734 for (int j = 0; j < 10; j++) {
1735 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1736 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1737 break;
1738 i40e_usec_delay(10);
1739 }
1740 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1741 device_printf(pf->dev, "RX queue %d still disabled!\n",
1742 pf_qidx);
1743 error = ETIMEDOUT;
1744 }
1745
1746 return (error);
1747 }
1748
1749 int
ixl_enable_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1750 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1751 {
1752 int error = 0;
1753
1754 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1755 /* Called function already prints error message */
1756 if (error)
1757 return (error);
1758 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1759 return (error);
1760 }
1761
1762 /*
1763 * Returns error on first ring that is detected hung.
1764 */
1765 int
ixl_disable_tx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1766 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1767 {
1768 struct i40e_hw *hw = &pf->hw;
1769 int error = 0;
1770 u32 reg;
1771 u16 pf_qidx;
1772
1773 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1774
1775 ixl_dbg(pf, IXL_DBG_EN_DIS,
1776 "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1777 pf_qidx, vsi_qidx);
1778
1779 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1780 i40e_usec_delay(500);
1781
1782 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1783 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1784 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1785 /* Verify the disable took */
1786 for (int j = 0; j < 10; j++) {
1787 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1788 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1789 break;
1790 i40e_msec_delay(10);
1791 }
1792 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1793 device_printf(pf->dev, "TX queue %d still enabled!\n",
1794 pf_qidx);
1795 error = ETIMEDOUT;
1796 }
1797
1798 return (error);
1799 }
1800
1801 /*
1802 * Returns error on first ring that is detected hung.
1803 */
1804 int
ixl_disable_rx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1805 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1806 {
1807 struct i40e_hw *hw = &pf->hw;
1808 int error = 0;
1809 u32 reg;
1810 u16 pf_qidx;
1811
1812 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1813
1814 ixl_dbg(pf, IXL_DBG_EN_DIS,
1815 "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1816 pf_qidx, vsi_qidx);
1817
1818 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1819 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1820 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1821 /* Verify the disable took */
1822 for (int j = 0; j < 10; j++) {
1823 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1824 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1825 break;
1826 i40e_msec_delay(10);
1827 }
1828 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1829 device_printf(pf->dev, "RX queue %d still enabled!\n",
1830 pf_qidx);
1831 error = ETIMEDOUT;
1832 }
1833
1834 return (error);
1835 }
1836
1837 int
ixl_disable_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1838 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1839 {
1840 int error = 0;
1841
1842 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1843 /* Called function already prints error message */
1844 if (error)
1845 return (error);
1846 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1847 return (error);
1848 }
1849
1850 static void
ixl_handle_tx_mdd_event(struct ixl_pf * pf)1851 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1852 {
1853 struct i40e_hw *hw = &pf->hw;
1854 device_t dev = pf->dev;
1855 struct ixl_vf *vf;
1856 bool mdd_detected = false;
1857 bool pf_mdd_detected = false;
1858 bool vf_mdd_detected = false;
1859 u16 vf_num, queue;
1860 u8 pf_num, event;
1861 u8 pf_mdet_num, vp_mdet_num;
1862 u32 reg;
1863
1864 /* find what triggered the MDD event */
1865 reg = rd32(hw, I40E_GL_MDET_TX);
1866 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1867 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1868 I40E_GL_MDET_TX_PF_NUM_SHIFT;
1869 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1870 I40E_GL_MDET_TX_VF_NUM_SHIFT;
1871 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1872 I40E_GL_MDET_TX_EVENT_SHIFT;
1873 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1874 I40E_GL_MDET_TX_QUEUE_SHIFT;
1875 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1876 mdd_detected = true;
1877 }
1878
1879 if (!mdd_detected)
1880 return;
1881
1882 reg = rd32(hw, I40E_PF_MDET_TX);
1883 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1884 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1885 pf_mdet_num = hw->pf_id;
1886 pf_mdd_detected = true;
1887 }
1888
1889 /* Check if MDD was caused by a VF */
1890 for (int i = 0; i < pf->num_vfs; i++) {
1891 vf = &(pf->vfs[i]);
1892 reg = rd32(hw, I40E_VP_MDET_TX(i));
1893 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1894 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1895 vp_mdet_num = i;
1896 vf->num_mdd_events++;
1897 vf_mdd_detected = true;
1898 }
1899 }
1900
1901 /* Print out an error message */
1902 if (vf_mdd_detected && pf_mdd_detected)
1903 device_printf(dev,
1904 "Malicious Driver Detection event %d"
1905 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1906 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1907 else if (vf_mdd_detected && !pf_mdd_detected)
1908 device_printf(dev,
1909 "Malicious Driver Detection event %d"
1910 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1911 event, queue, pf_num, vf_num, vp_mdet_num);
1912 else if (!vf_mdd_detected && pf_mdd_detected)
1913 device_printf(dev,
1914 "Malicious Driver Detection event %d"
1915 " on TX queue %d, pf number %d (PF-%d)\n",
1916 event, queue, pf_num, pf_mdet_num);
1917 /* Theoretically shouldn't happen */
1918 else
1919 device_printf(dev,
1920 "TX Malicious Driver Detection event (unknown)\n");
1921 }
1922
1923 static void
ixl_handle_rx_mdd_event(struct ixl_pf * pf)1924 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1925 {
1926 struct i40e_hw *hw = &pf->hw;
1927 device_t dev = pf->dev;
1928 struct ixl_vf *vf;
1929 bool mdd_detected = false;
1930 bool pf_mdd_detected = false;
1931 bool vf_mdd_detected = false;
1932 u16 queue;
1933 u8 pf_num, event;
1934 u8 pf_mdet_num, vp_mdet_num;
1935 u32 reg;
1936
1937 /*
1938 * GL_MDET_RX doesn't contain VF number information, unlike
1939 * GL_MDET_TX.
1940 */
1941 reg = rd32(hw, I40E_GL_MDET_RX);
1942 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1943 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1944 I40E_GL_MDET_RX_FUNCTION_SHIFT;
1945 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1946 I40E_GL_MDET_RX_EVENT_SHIFT;
1947 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1948 I40E_GL_MDET_RX_QUEUE_SHIFT;
1949 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1950 mdd_detected = true;
1951 }
1952
1953 if (!mdd_detected)
1954 return;
1955
1956 reg = rd32(hw, I40E_PF_MDET_RX);
1957 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1958 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1959 pf_mdet_num = hw->pf_id;
1960 pf_mdd_detected = true;
1961 }
1962
1963 /* Check if MDD was caused by a VF */
1964 for (int i = 0; i < pf->num_vfs; i++) {
1965 vf = &(pf->vfs[i]);
1966 reg = rd32(hw, I40E_VP_MDET_RX(i));
1967 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1968 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1969 vp_mdet_num = i;
1970 vf->num_mdd_events++;
1971 vf_mdd_detected = true;
1972 }
1973 }
1974
1975 /* Print out an error message */
1976 if (vf_mdd_detected && pf_mdd_detected)
1977 device_printf(dev,
1978 "Malicious Driver Detection event %d"
1979 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1980 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1981 else if (vf_mdd_detected && !pf_mdd_detected)
1982 device_printf(dev,
1983 "Malicious Driver Detection event %d"
1984 " on RX queue %d, pf number %d, (VF-%d)\n",
1985 event, queue, pf_num, vp_mdet_num);
1986 else if (!vf_mdd_detected && pf_mdd_detected)
1987 device_printf(dev,
1988 "Malicious Driver Detection event %d"
1989 " on RX queue %d, pf number %d (PF-%d)\n",
1990 event, queue, pf_num, pf_mdet_num);
1991 /* Theoretically shouldn't happen */
1992 else
1993 device_printf(dev,
1994 "RX Malicious Driver Detection event (unknown)\n");
1995 }
1996
1997 /**
1998 * ixl_handle_mdd_event
1999 *
2000 * Called from interrupt handler to identify possibly malicious vfs
2001 * (But also detects events from the PF, as well)
2002 **/
2003 void
ixl_handle_mdd_event(struct ixl_pf * pf)2004 ixl_handle_mdd_event(struct ixl_pf *pf)
2005 {
2006 struct i40e_hw *hw = &pf->hw;
2007 u32 reg;
2008
2009 /*
2010 * Handle both TX/RX because it's possible they could
2011 * both trigger in the same interrupt.
2012 */
2013 ixl_handle_tx_mdd_event(pf);
2014 ixl_handle_rx_mdd_event(pf);
2015
2016 ixl_clear_state(&pf->state, IXL_STATE_MDD_PENDING);
2017
2018 /* re-enable mdd interrupt cause */
2019 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2020 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2021 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2022 ixl_flush(hw);
2023 }
2024
2025 void
ixl_enable_intr0(struct i40e_hw * hw)2026 ixl_enable_intr0(struct i40e_hw *hw)
2027 {
2028 u32 reg;
2029
2030 /* Use IXL_ITR_NONE so ITR isn't updated here */
2031 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2032 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2033 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2034 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2035 }
2036
2037 void
ixl_disable_intr0(struct i40e_hw * hw)2038 ixl_disable_intr0(struct i40e_hw *hw)
2039 {
2040 u32 reg;
2041
2042 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2043 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2044 ixl_flush(hw);
2045 }
2046
2047 void
ixl_enable_queue(struct i40e_hw * hw,int id)2048 ixl_enable_queue(struct i40e_hw *hw, int id)
2049 {
2050 u32 reg;
2051
2052 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2053 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2054 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2055 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2056 }
2057
2058 void
ixl_disable_queue(struct i40e_hw * hw,int id)2059 ixl_disable_queue(struct i40e_hw *hw, int id)
2060 {
2061 u32 reg;
2062
2063 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2064 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2065 }
2066
2067 void
ixl_handle_empr_reset(struct ixl_pf * pf)2068 ixl_handle_empr_reset(struct ixl_pf *pf)
2069 {
2070 struct ixl_vsi *vsi = &pf->vsi;
2071 bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING);
2072
2073 ixl_prepare_for_reset(pf, is_up);
2074 /*
2075 * i40e_pf_reset checks the type of reset and acts
2076 * accordingly. If EMP or Core reset was performed
2077 * doing PF reset is not necessary and it sometimes
2078 * fails.
2079 */
2080 ixl_pf_reset(pf);
2081
2082 if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
2083 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
2084 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
2085 device_printf(pf->dev,
2086 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
2087 pf->link_up = FALSE;
2088 ixl_update_link_status(pf);
2089 }
2090
2091 ixl_rebuild_hw_structs_after_reset(pf, is_up);
2092
2093 ixl_clear_state(&pf->state, IXL_STATE_RESETTING);
2094 }
2095
2096 void
ixl_update_stats_counters(struct ixl_pf * pf)2097 ixl_update_stats_counters(struct ixl_pf *pf)
2098 {
2099 struct i40e_hw *hw = &pf->hw;
2100 struct ixl_vsi *vsi = &pf->vsi;
2101 struct ixl_vf *vf;
2102 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2103
2104 struct i40e_hw_port_stats *nsd = &pf->stats;
2105 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2106
2107 /* Update hw stats */
2108 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2109 pf->stat_offsets_loaded,
2110 &osd->crc_errors, &nsd->crc_errors);
2111 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2112 pf->stat_offsets_loaded,
2113 &osd->illegal_bytes, &nsd->illegal_bytes);
2114 ixl_stat_update48(hw, I40E_GLPRT_GORCL(hw->port),
2115 pf->stat_offsets_loaded,
2116 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2117 ixl_stat_update48(hw, I40E_GLPRT_GOTCL(hw->port),
2118 pf->stat_offsets_loaded,
2119 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2120 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2121 pf->stat_offsets_loaded,
2122 &osd->eth.rx_discards,
2123 &nsd->eth.rx_discards);
2124 ixl_stat_update48(hw, I40E_GLPRT_UPRCL(hw->port),
2125 pf->stat_offsets_loaded,
2126 &osd->eth.rx_unicast,
2127 &nsd->eth.rx_unicast);
2128 ixl_stat_update48(hw, I40E_GLPRT_UPTCL(hw->port),
2129 pf->stat_offsets_loaded,
2130 &osd->eth.tx_unicast,
2131 &nsd->eth.tx_unicast);
2132 ixl_stat_update48(hw, I40E_GLPRT_MPRCL(hw->port),
2133 pf->stat_offsets_loaded,
2134 &osd->eth.rx_multicast,
2135 &nsd->eth.rx_multicast);
2136 ixl_stat_update48(hw, I40E_GLPRT_MPTCL(hw->port),
2137 pf->stat_offsets_loaded,
2138 &osd->eth.tx_multicast,
2139 &nsd->eth.tx_multicast);
2140 ixl_stat_update48(hw, I40E_GLPRT_BPRCL(hw->port),
2141 pf->stat_offsets_loaded,
2142 &osd->eth.rx_broadcast,
2143 &nsd->eth.rx_broadcast);
2144 ixl_stat_update48(hw, I40E_GLPRT_BPTCL(hw->port),
2145 pf->stat_offsets_loaded,
2146 &osd->eth.tx_broadcast,
2147 &nsd->eth.tx_broadcast);
2148
2149 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2150 pf->stat_offsets_loaded,
2151 &osd->tx_dropped_link_down,
2152 &nsd->tx_dropped_link_down);
2153 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2154 pf->stat_offsets_loaded,
2155 &osd->mac_local_faults,
2156 &nsd->mac_local_faults);
2157 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2158 pf->stat_offsets_loaded,
2159 &osd->mac_remote_faults,
2160 &nsd->mac_remote_faults);
2161 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2162 pf->stat_offsets_loaded,
2163 &osd->rx_length_errors,
2164 &nsd->rx_length_errors);
2165
2166 /* Flow control (LFC) stats */
2167 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2168 pf->stat_offsets_loaded,
2169 &osd->link_xon_rx, &nsd->link_xon_rx);
2170 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2171 pf->stat_offsets_loaded,
2172 &osd->link_xon_tx, &nsd->link_xon_tx);
2173 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2174 pf->stat_offsets_loaded,
2175 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2176 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2177 pf->stat_offsets_loaded,
2178 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2179
2180 /*
2181 * For watchdog management we need to know if we have been paused
2182 * during the last interval, so capture that here.
2183 */
2184 if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2185 vsi->shared->isc_pause_frames = 1;
2186
2187 /* Packet size stats rx */
2188 ixl_stat_update48(hw, I40E_GLPRT_PRC64L(hw->port),
2189 pf->stat_offsets_loaded,
2190 &osd->rx_size_64, &nsd->rx_size_64);
2191 ixl_stat_update48(hw, I40E_GLPRT_PRC127L(hw->port),
2192 pf->stat_offsets_loaded,
2193 &osd->rx_size_127, &nsd->rx_size_127);
2194 ixl_stat_update48(hw, I40E_GLPRT_PRC255L(hw->port),
2195 pf->stat_offsets_loaded,
2196 &osd->rx_size_255, &nsd->rx_size_255);
2197 ixl_stat_update48(hw, I40E_GLPRT_PRC511L(hw->port),
2198 pf->stat_offsets_loaded,
2199 &osd->rx_size_511, &nsd->rx_size_511);
2200 ixl_stat_update48(hw, I40E_GLPRT_PRC1023L(hw->port),
2201 pf->stat_offsets_loaded,
2202 &osd->rx_size_1023, &nsd->rx_size_1023);
2203 ixl_stat_update48(hw, I40E_GLPRT_PRC1522L(hw->port),
2204 pf->stat_offsets_loaded,
2205 &osd->rx_size_1522, &nsd->rx_size_1522);
2206 ixl_stat_update48(hw, I40E_GLPRT_PRC9522L(hw->port),
2207 pf->stat_offsets_loaded,
2208 &osd->rx_size_big, &nsd->rx_size_big);
2209
2210 /* Packet size stats tx */
2211 ixl_stat_update48(hw, I40E_GLPRT_PTC64L(hw->port),
2212 pf->stat_offsets_loaded,
2213 &osd->tx_size_64, &nsd->tx_size_64);
2214 ixl_stat_update48(hw, I40E_GLPRT_PTC127L(hw->port),
2215 pf->stat_offsets_loaded,
2216 &osd->tx_size_127, &nsd->tx_size_127);
2217 ixl_stat_update48(hw, I40E_GLPRT_PTC255L(hw->port),
2218 pf->stat_offsets_loaded,
2219 &osd->tx_size_255, &nsd->tx_size_255);
2220 ixl_stat_update48(hw, I40E_GLPRT_PTC511L(hw->port),
2221 pf->stat_offsets_loaded,
2222 &osd->tx_size_511, &nsd->tx_size_511);
2223 ixl_stat_update48(hw, I40E_GLPRT_PTC1023L(hw->port),
2224 pf->stat_offsets_loaded,
2225 &osd->tx_size_1023, &nsd->tx_size_1023);
2226 ixl_stat_update48(hw, I40E_GLPRT_PTC1522L(hw->port),
2227 pf->stat_offsets_loaded,
2228 &osd->tx_size_1522, &nsd->tx_size_1522);
2229 ixl_stat_update48(hw, I40E_GLPRT_PTC9522L(hw->port),
2230 pf->stat_offsets_loaded,
2231 &osd->tx_size_big, &nsd->tx_size_big);
2232
2233 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2234 pf->stat_offsets_loaded,
2235 &osd->rx_undersize, &nsd->rx_undersize);
2236 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2237 pf->stat_offsets_loaded,
2238 &osd->rx_fragments, &nsd->rx_fragments);
2239
2240 u64 rx_roc;
2241 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2242 pf->stat_offsets_loaded,
2243 &osd->rx_oversize, &rx_roc);
2244
2245 /*
2246 * Read from RXERR1 register to get the count for the packets
2247 * larger than RX MAX and include that in total rx_oversize count.
2248 *
2249 * Also need to add BIT(7) to hw->port value while indexing
2250 * I40E_GL_RXERR1 register as indexes 0..127 are for VFs when
2251 * SR-IOV is enabled. Indexes 128..143 are for PFs.
2252 */
2253 u64 rx_err1;
2254 ixl_stat_update64(hw,
2255 I40E_GL_RXERR1L(hw->pf_id + BIT(7)),
2256 pf->stat_offsets_loaded,
2257 &osd->rx_err1,
2258 &rx_err1);
2259
2260 nsd->rx_oversize = rx_roc + rx_err1;
2261
2262 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2263 pf->stat_offsets_loaded,
2264 &osd->rx_jabber, &nsd->rx_jabber);
2265 /* EEE */
2266 i40e_get_phy_lpi_status(hw, nsd);
2267
2268 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2269 &osd->tx_lpi_count, &nsd->tx_lpi_count,
2270 &osd->rx_lpi_count, &nsd->rx_lpi_count);
2271
2272 pf->stat_offsets_loaded = true;
2273 /* End hw stats */
2274
2275 /* Update vsi stats */
2276 ixl_update_vsi_stats(vsi);
2277
2278 for (int i = 0; i < pf->num_vfs; i++) {
2279 vf = &pf->vfs[i];
2280 if (vf->vf_flags & VF_FLAG_ENABLED)
2281 ixl_update_eth_stats(&pf->vfs[i].vsi);
2282 }
2283 }
2284
2285 /**
2286 * Update VSI-specific ethernet statistics counters.
2287 **/
2288 void
ixl_update_eth_stats(struct ixl_vsi * vsi)2289 ixl_update_eth_stats(struct ixl_vsi *vsi)
2290 {
2291 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2292 struct i40e_hw *hw = &pf->hw;
2293 struct i40e_eth_stats *es;
2294 struct i40e_eth_stats *oes;
2295 u16 stat_idx = vsi->info.stat_counter_idx;
2296
2297 es = &vsi->eth_stats;
2298 oes = &vsi->eth_stats_offsets;
2299
2300 /* Gather up the stats that the hw collects */
2301 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2302 vsi->stat_offsets_loaded,
2303 &oes->tx_errors, &es->tx_errors);
2304 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2305 vsi->stat_offsets_loaded,
2306 &oes->rx_discards, &es->rx_discards);
2307
2308 ixl_stat_update48(hw, I40E_GLV_GORCL(stat_idx),
2309 vsi->stat_offsets_loaded,
2310 &oes->rx_bytes, &es->rx_bytes);
2311 ixl_stat_update48(hw, I40E_GLV_UPRCL(stat_idx),
2312 vsi->stat_offsets_loaded,
2313 &oes->rx_unicast, &es->rx_unicast);
2314 ixl_stat_update48(hw, I40E_GLV_MPRCL(stat_idx),
2315 vsi->stat_offsets_loaded,
2316 &oes->rx_multicast, &es->rx_multicast);
2317 ixl_stat_update48(hw, I40E_GLV_BPRCL(stat_idx),
2318 vsi->stat_offsets_loaded,
2319 &oes->rx_broadcast, &es->rx_broadcast);
2320
2321 ixl_stat_update48(hw, I40E_GLV_GOTCL(stat_idx),
2322 vsi->stat_offsets_loaded,
2323 &oes->tx_bytes, &es->tx_bytes);
2324 ixl_stat_update48(hw, I40E_GLV_UPTCL(stat_idx),
2325 vsi->stat_offsets_loaded,
2326 &oes->tx_unicast, &es->tx_unicast);
2327 ixl_stat_update48(hw, I40E_GLV_MPTCL(stat_idx),
2328 vsi->stat_offsets_loaded,
2329 &oes->tx_multicast, &es->tx_multicast);
2330 ixl_stat_update48(hw, I40E_GLV_BPTCL(stat_idx),
2331 vsi->stat_offsets_loaded,
2332 &oes->tx_broadcast, &es->tx_broadcast);
2333 vsi->stat_offsets_loaded = true;
2334 }
2335
2336 void
ixl_update_vsi_stats(struct ixl_vsi * vsi)2337 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2338 {
2339 struct ixl_pf *pf;
2340 struct i40e_eth_stats *es;
2341 u64 tx_discards, csum_errs;
2342
2343 struct i40e_hw_port_stats *nsd;
2344
2345 pf = vsi->back;
2346 es = &vsi->eth_stats;
2347 nsd = &pf->stats;
2348
2349 ixl_update_eth_stats(vsi);
2350
2351 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2352
2353 csum_errs = 0;
2354 for (int i = 0; i < vsi->num_rx_queues; i++)
2355 csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2356 nsd->checksum_error = csum_errs;
2357
2358 /* Update ifnet stats */
2359 IXL_SET_IPACKETS(vsi, es->rx_unicast +
2360 es->rx_multicast +
2361 es->rx_broadcast);
2362 IXL_SET_OPACKETS(vsi, es->tx_unicast +
2363 es->tx_multicast +
2364 es->tx_broadcast);
2365 IXL_SET_IBYTES(vsi, es->rx_bytes);
2366 IXL_SET_OBYTES(vsi, es->tx_bytes);
2367 IXL_SET_IMCASTS(vsi, es->rx_multicast);
2368 IXL_SET_OMCASTS(vsi, es->tx_multicast);
2369
2370 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2371 nsd->checksum_error + nsd->rx_length_errors +
2372 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2373 nsd->rx_jabber);
2374 IXL_SET_OERRORS(vsi, es->tx_errors);
2375 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2376 IXL_SET_OQDROPS(vsi, tx_discards);
2377 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2378 IXL_SET_COLLISIONS(vsi, 0);
2379 }
2380
2381 /**
2382 * Reset all of the stats for the given pf
2383 **/
2384 void
ixl_pf_reset_stats(struct ixl_pf * pf)2385 ixl_pf_reset_stats(struct ixl_pf *pf)
2386 {
2387 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2388 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2389 pf->stat_offsets_loaded = false;
2390 }
2391
2392 /**
2393 * Resets all stats of the given vsi
2394 **/
2395 void
ixl_vsi_reset_stats(struct ixl_vsi * vsi)2396 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2397 {
2398 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2399 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2400 vsi->stat_offsets_loaded = false;
2401 }
2402
2403 /**
2404 * Helper function for reading and updating 48/64 bit stats from the hw
2405 *
2406 * Since the device stats are not reset at PFReset, they likely will not
2407 * be zeroed when the driver starts. We'll save the first values read
2408 * and use them as offsets to be subtracted from the raw values in order
2409 * to report stats that count from zero.
2410 **/
2411 static void
_ixl_stat_update_helper(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 mask,u64 * offset,u64 * stat)2412 _ixl_stat_update_helper(struct i40e_hw *hw, u32 reg,
2413 bool offset_loaded, u64 mask, u64 *offset, u64 *stat)
2414 {
2415 u64 new_data = rd64(hw, reg);
2416
2417 if (!offset_loaded)
2418 *offset = new_data;
2419 if (new_data >= *offset)
2420 *stat = new_data - *offset;
2421 else
2422 *stat = (new_data + mask) - *offset + 1;
2423 *stat &= mask;
2424 }
2425
2426 /**
2427 * Read and update a 48 bit stat from the hw
2428 **/
2429 void
ixl_stat_update48(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 * offset,u64 * stat)2430 ixl_stat_update48(struct i40e_hw *hw, u32 reg,
2431 bool offset_loaded, u64 *offset, u64 *stat)
2432 {
2433 _ixl_stat_update_helper(hw,
2434 reg,
2435 offset_loaded,
2436 0xFFFFFFFFFFFFULL,
2437 offset,
2438 stat);
2439 }
2440
2441 /**
2442 * ixl_stat_update64 - read and update a 64 bit stat from the chip.
2443 **/
2444 void
ixl_stat_update64(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 * offset,u64 * stat)2445 ixl_stat_update64(struct i40e_hw *hw, u32 reg,
2446 bool offset_loaded, u64 *offset, u64 *stat)
2447 {
2448 _ixl_stat_update_helper(hw,
2449 reg,
2450 offset_loaded,
2451 0xFFFFFFFFFFFFFFFFULL,
2452 offset,
2453 stat);
2454 }
2455
2456 /**
2457 * Read and update a 32 bit stat from the hw
2458 **/
2459 void
ixl_stat_update32(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 * offset,u64 * stat)2460 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2461 bool offset_loaded, u64 *offset, u64 *stat)
2462 {
2463 u32 new_data;
2464
2465 new_data = rd32(hw, reg);
2466 if (!offset_loaded)
2467 *offset = new_data;
2468 if (new_data >= *offset)
2469 *stat = (u32)(new_data - *offset);
2470 else
2471 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2472 }
2473
2474 /**
2475 * Add subset of device sysctls safe to use in recovery mode
2476 */
2477 void
ixl_add_sysctls_recovery_mode(struct ixl_pf * pf)2478 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2479 {
2480 device_t dev = pf->dev;
2481
2482 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2483 struct sysctl_oid_list *ctx_list =
2484 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2485
2486 struct sysctl_oid *debug_node;
2487 struct sysctl_oid_list *debug_list;
2488
2489 SYSCTL_ADD_PROC(ctx, ctx_list,
2490 OID_AUTO, "fw_version",
2491 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2492 ixl_sysctl_show_fw, "A", "Firmware version");
2493
2494 /* Add sysctls meant to print debug information, but don't list them
2495 * in "sysctl -a" output. */
2496 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2497 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2498 "Debug Sysctls");
2499 debug_list = SYSCTL_CHILDREN(debug_node);
2500
2501 SYSCTL_ADD_UINT(ctx, debug_list,
2502 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2503 &pf->hw.debug_mask, 0, "Shared code debug message level");
2504
2505 SYSCTL_ADD_UINT(ctx, debug_list,
2506 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2507 &pf->dbg_mask, 0, "Non-shared code debug message level");
2508
2509 SYSCTL_ADD_PROC(ctx, debug_list,
2510 OID_AUTO, "dump_debug_data",
2511 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2512 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2513
2514 SYSCTL_ADD_PROC(ctx, debug_list,
2515 OID_AUTO, "do_pf_reset",
2516 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2517 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2518
2519 SYSCTL_ADD_PROC(ctx, debug_list,
2520 OID_AUTO, "do_core_reset",
2521 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2522 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2523
2524 SYSCTL_ADD_PROC(ctx, debug_list,
2525 OID_AUTO, "do_global_reset",
2526 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2527 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2528
2529 SYSCTL_ADD_PROC(ctx, debug_list,
2530 OID_AUTO, "queue_interrupt_table",
2531 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2532 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2533
2534 SYSCTL_ADD_PROC(ctx, debug_list,
2535 OID_AUTO, "queue_int_ctln",
2536 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2537 pf, 0, ixl_sysctl_debug_queue_int_ctln, "A",
2538 "View MSI-X control registers for RX queues");
2539 }
2540
2541 void
ixl_add_device_sysctls(struct ixl_pf * pf)2542 ixl_add_device_sysctls(struct ixl_pf *pf)
2543 {
2544 device_t dev = pf->dev;
2545 struct i40e_hw *hw = &pf->hw;
2546
2547 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2548 struct sysctl_oid_list *ctx_list =
2549 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2550
2551 struct sysctl_oid *debug_node;
2552 struct sysctl_oid_list *debug_list;
2553
2554 struct sysctl_oid *fec_node;
2555 struct sysctl_oid_list *fec_list;
2556 struct sysctl_oid *eee_node;
2557 struct sysctl_oid_list *eee_list;
2558
2559 /* Set up sysctls */
2560 SYSCTL_ADD_PROC(ctx, ctx_list,
2561 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2562 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2563
2564 SYSCTL_ADD_PROC(ctx, ctx_list,
2565 OID_AUTO, "advertise_speed",
2566 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2567 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2568
2569 SYSCTL_ADD_PROC(ctx, ctx_list,
2570 OID_AUTO, "supported_speeds",
2571 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2572 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2573
2574 SYSCTL_ADD_PROC(ctx, ctx_list,
2575 OID_AUTO, "current_speed",
2576 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2577 ixl_sysctl_current_speed, "A", "Current Port Speed");
2578
2579 SYSCTL_ADD_PROC(ctx, ctx_list,
2580 OID_AUTO, "fw_version",
2581 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2582 ixl_sysctl_show_fw, "A", "Firmware version");
2583
2584 SYSCTL_ADD_PROC(ctx, ctx_list,
2585 OID_AUTO, "unallocated_queues",
2586 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2587 ixl_sysctl_unallocated_queues, "I",
2588 "Queues not allocated to a PF or VF");
2589
2590 SYSCTL_ADD_PROC(ctx, ctx_list,
2591 OID_AUTO, "tx_itr",
2592 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2593 ixl_sysctl_pf_tx_itr, "I",
2594 "Immediately set TX ITR value for all queues");
2595
2596 SYSCTL_ADD_PROC(ctx, ctx_list,
2597 OID_AUTO, "rx_itr",
2598 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2599 ixl_sysctl_pf_rx_itr, "I",
2600 "Immediately set RX ITR value for all queues");
2601
2602 SYSCTL_ADD_INT(ctx, ctx_list,
2603 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2604 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2605
2606 SYSCTL_ADD_INT(ctx, ctx_list,
2607 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2608 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2609
2610 /* Add FEC sysctls for 25G adapters */
2611 if (i40e_is_25G_device(hw->device_id)) {
2612 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2613 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2614 "FEC Sysctls");
2615 fec_list = SYSCTL_CHILDREN(fec_node);
2616
2617 SYSCTL_ADD_PROC(ctx, fec_list,
2618 OID_AUTO, "fc_ability",
2619 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2620 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2621
2622 SYSCTL_ADD_PROC(ctx, fec_list,
2623 OID_AUTO, "rs_ability",
2624 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2625 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2626
2627 SYSCTL_ADD_PROC(ctx, fec_list,
2628 OID_AUTO, "fc_requested",
2629 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2630 ixl_sysctl_fec_fc_request, "I",
2631 "FC FEC mode requested on link");
2632
2633 SYSCTL_ADD_PROC(ctx, fec_list,
2634 OID_AUTO, "rs_requested",
2635 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2636 ixl_sysctl_fec_rs_request, "I",
2637 "RS FEC mode requested on link");
2638
2639 SYSCTL_ADD_PROC(ctx, fec_list,
2640 OID_AUTO, "auto_fec_enabled",
2641 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2642 ixl_sysctl_fec_auto_enable, "I",
2643 "Let FW decide FEC ability/request modes");
2644 }
2645
2646 SYSCTL_ADD_PROC(ctx, ctx_list,
2647 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2648 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2649
2650 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2651 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2652 "Energy Efficient Ethernet (EEE) Sysctls");
2653 eee_list = SYSCTL_CHILDREN(eee_node);
2654
2655 SYSCTL_ADD_PROC(ctx, eee_list,
2656 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2657 pf, 0, ixl_sysctl_eee_enable, "I",
2658 "Enable Energy Efficient Ethernet (EEE)");
2659
2660 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2661 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2662 "TX LPI status");
2663
2664 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2665 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2666 "RX LPI status");
2667
2668 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2669 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2670 "TX LPI count");
2671
2672 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2673 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2674 "RX LPI count");
2675
2676 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2677 "link_active_on_if_down",
2678 CTLTYPE_INT | CTLFLAG_RWTUN,
2679 pf, 0, ixl_sysctl_set_link_active, "I",
2680 IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2681
2682 /* Add sysctls meant to print debug information, but don't list them
2683 * in "sysctl -a" output. */
2684 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2685 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2686 "Debug Sysctls");
2687 debug_list = SYSCTL_CHILDREN(debug_node);
2688
2689 SYSCTL_ADD_UINT(ctx, debug_list,
2690 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2691 &pf->hw.debug_mask, 0, "Shared code debug message level");
2692
2693 SYSCTL_ADD_UINT(ctx, debug_list,
2694 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2695 &pf->dbg_mask, 0, "Non-shared code debug message level");
2696
2697 SYSCTL_ADD_PROC(ctx, debug_list,
2698 OID_AUTO, "link_status",
2699 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2700 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2701
2702 SYSCTL_ADD_PROC(ctx, debug_list,
2703 OID_AUTO, "phy_abilities_init",
2704 CTLTYPE_STRING | CTLFLAG_RD,
2705 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2706
2707 SYSCTL_ADD_PROC(ctx, debug_list,
2708 OID_AUTO, "phy_abilities",
2709 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2710 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2711
2712 SYSCTL_ADD_PROC(ctx, debug_list,
2713 OID_AUTO, "filter_list",
2714 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2715 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2716
2717 SYSCTL_ADD_PROC(ctx, debug_list,
2718 OID_AUTO, "hw_res_alloc",
2719 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2720 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2721
2722 SYSCTL_ADD_PROC(ctx, debug_list,
2723 OID_AUTO, "switch_config",
2724 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2725 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2726
2727 SYSCTL_ADD_PROC(ctx, debug_list,
2728 OID_AUTO, "switch_vlans",
2729 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2730 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2731
2732 SYSCTL_ADD_PROC(ctx, debug_list,
2733 OID_AUTO, "rss_key",
2734 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2735 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2736
2737 SYSCTL_ADD_PROC(ctx, debug_list,
2738 OID_AUTO, "rss_lut",
2739 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2740 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2741
2742 SYSCTL_ADD_PROC(ctx, debug_list,
2743 OID_AUTO, "rss_hena",
2744 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2745 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2746
2747 SYSCTL_ADD_PROC(ctx, debug_list,
2748 OID_AUTO, "disable_fw_link_management",
2749 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2750 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2751
2752 SYSCTL_ADD_PROC(ctx, debug_list,
2753 OID_AUTO, "dump_debug_data",
2754 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2755 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2756
2757 SYSCTL_ADD_PROC(ctx, debug_list,
2758 OID_AUTO, "do_pf_reset",
2759 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2760 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2761
2762 SYSCTL_ADD_PROC(ctx, debug_list,
2763 OID_AUTO, "do_core_reset",
2764 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2765 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2766
2767 SYSCTL_ADD_PROC(ctx, debug_list,
2768 OID_AUTO, "do_global_reset",
2769 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2770 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2771
2772 SYSCTL_ADD_PROC(ctx, debug_list,
2773 OID_AUTO, "queue_interrupt_table",
2774 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2775 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2776
2777 SYSCTL_ADD_PROC(ctx, debug_list,
2778 OID_AUTO, "phy_statistics", CTLTYPE_STRING | CTLFLAG_RD,
2779 pf, 0, ixl_sysctl_phy_statistics, "A", "PHY Statistics");
2780
2781 if (pf->has_i2c) {
2782 SYSCTL_ADD_PROC(ctx, debug_list,
2783 OID_AUTO, "read_i2c_byte",
2784 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2785 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2786
2787 SYSCTL_ADD_PROC(ctx, debug_list,
2788 OID_AUTO, "write_i2c_byte",
2789 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2790 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2791
2792 SYSCTL_ADD_PROC(ctx, debug_list,
2793 OID_AUTO, "read_i2c_diag_data",
2794 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2795 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2796 }
2797 }
2798
2799 /*
2800 * Primarily for finding out how many queues can be assigned to VFs,
2801 * at runtime.
2802 */
2803 static int
ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)2804 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2805 {
2806 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2807 int queues;
2808
2809 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2810
2811 return sysctl_handle_int(oidp, NULL, queues, req);
2812 }
2813
2814 static const char *
ixl_link_speed_string(enum i40e_aq_link_speed link_speed)2815 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2816 {
2817 const char * link_speed_str[] = {
2818 "Unknown",
2819 "100 Mbps",
2820 "1 Gbps",
2821 "10 Gbps",
2822 "40 Gbps",
2823 "20 Gbps",
2824 "25 Gbps",
2825 "2.5 Gbps",
2826 "5 Gbps"
2827 };
2828 int index;
2829
2830 switch (link_speed) {
2831 case I40E_LINK_SPEED_100MB:
2832 index = 1;
2833 break;
2834 case I40E_LINK_SPEED_1GB:
2835 index = 2;
2836 break;
2837 case I40E_LINK_SPEED_10GB:
2838 index = 3;
2839 break;
2840 case I40E_LINK_SPEED_40GB:
2841 index = 4;
2842 break;
2843 case I40E_LINK_SPEED_20GB:
2844 index = 5;
2845 break;
2846 case I40E_LINK_SPEED_25GB:
2847 index = 6;
2848 break;
2849 case I40E_LINK_SPEED_2_5GB:
2850 index = 7;
2851 break;
2852 case I40E_LINK_SPEED_5GB:
2853 index = 8;
2854 break;
2855 case I40E_LINK_SPEED_UNKNOWN:
2856 default:
2857 index = 0;
2858 break;
2859 }
2860
2861 return (link_speed_str[index]);
2862 }
2863
2864 int
ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)2865 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2866 {
2867 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2868 struct i40e_hw *hw = &pf->hw;
2869 int error = 0;
2870
2871 ixl_update_link_status(pf);
2872
2873 error = sysctl_handle_string(oidp,
2874 __DECONST(void *,
2875 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2876 8, req);
2877
2878 return (error);
2879 }
2880
2881 /*
2882 * Converts 8-bit speeds value to and from sysctl flags and
2883 * Admin Queue flags.
2884 */
2885 static u8
ixl_convert_sysctl_aq_link_speed(u8 speeds,bool to_aq)2886 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2887 {
2888 #define SPEED_MAP_SIZE 8
2889 static u16 speedmap[SPEED_MAP_SIZE] = {
2890 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2891 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
2892 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
2893 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
2894 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
2895 (I40E_LINK_SPEED_40GB | (0x20 << 8)),
2896 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2897 (I40E_LINK_SPEED_5GB | (0x80 << 8)),
2898 };
2899 u8 retval = 0;
2900
2901 for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2902 if (to_aq)
2903 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2904 else
2905 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2906 }
2907
2908 return (retval);
2909 }
2910
2911 int
ixl_set_advertised_speeds(struct ixl_pf * pf,int speeds,bool from_aq)2912 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2913 {
2914 struct i40e_hw *hw = &pf->hw;
2915 device_t dev = pf->dev;
2916 struct i40e_aq_get_phy_abilities_resp abilities;
2917 struct i40e_aq_set_phy_config config;
2918 enum i40e_status_code aq_error = 0;
2919
2920 /* Get current capability information */
2921 aq_error = i40e_aq_get_phy_capabilities(hw,
2922 FALSE, FALSE, &abilities, NULL);
2923 if (aq_error) {
2924 device_printf(dev,
2925 "%s: Error getting phy capabilities %d,"
2926 " aq error: %d\n", __func__, aq_error,
2927 hw->aq.asq_last_status);
2928 return (EIO);
2929 }
2930
2931 /* Prepare new config */
2932 bzero(&config, sizeof(config));
2933 if (from_aq)
2934 config.link_speed = speeds;
2935 else
2936 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2937 config.phy_type = abilities.phy_type;
2938 config.phy_type_ext = abilities.phy_type_ext;
2939 config.abilities = abilities.abilities
2940 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2941 config.eee_capability = abilities.eee_capability;
2942 config.eeer = abilities.eeer_val;
2943 config.low_power_ctrl = abilities.d3_lpan;
2944 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2945 & I40E_AQ_PHY_FEC_CONFIG_MASK;
2946
2947 /* Do aq command & restart link */
2948 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2949 if (aq_error) {
2950 device_printf(dev,
2951 "%s: Error setting new phy config %d,"
2952 " aq error: %d\n", __func__, aq_error,
2953 hw->aq.asq_last_status);
2954 return (EIO);
2955 }
2956
2957 return (0);
2958 }
2959
2960 /*
2961 ** Supported link speeds
2962 ** Flags:
2963 ** 0x1 - 100 Mb
2964 ** 0x2 - 1G
2965 ** 0x4 - 10G
2966 ** 0x8 - 20G
2967 ** 0x10 - 25G
2968 ** 0x20 - 40G
2969 ** 0x40 - 2.5G
2970 ** 0x80 - 5G
2971 */
2972 static int
ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)2973 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2974 {
2975 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2976 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2977
2978 return sysctl_handle_int(oidp, NULL, supported, req);
2979 }
2980
2981 /*
2982 ** Control link advertise speed:
2983 ** Flags:
2984 ** 0x1 - advertise 100 Mb
2985 ** 0x2 - advertise 1G
2986 ** 0x4 - advertise 10G
2987 ** 0x8 - advertise 20G
2988 ** 0x10 - advertise 25G
2989 ** 0x20 - advertise 40G
2990 ** 0x40 - advertise 2.5G
2991 ** 0x80 - advertise 5G
2992 **
2993 ** Set to 0 to disable link
2994 */
2995 int
ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)2996 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2997 {
2998 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2999 device_t dev = pf->dev;
3000 u8 converted_speeds;
3001 int requested_ls = 0;
3002 int error = 0;
3003
3004 /* Read in new mode */
3005 requested_ls = pf->advertised_speed;
3006 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3007 if ((error) || (req->newptr == NULL))
3008 return (error);
3009 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
3010 device_printf(dev, "Interface is currently in FW recovery mode. "
3011 "Setting advertise speed not supported\n");
3012 return (EINVAL);
3013 }
3014
3015 /* Error out if bits outside of possible flag range are set */
3016 if ((requested_ls & ~((u8)0xFF)) != 0) {
3017 device_printf(dev, "Input advertised speed out of range; "
3018 "valid flags are: 0x%02x\n",
3019 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3020 return (EINVAL);
3021 }
3022
3023 /* Check if adapter supports input value */
3024 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3025 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3026 device_printf(dev, "Invalid advertised speed; "
3027 "valid flags are: 0x%02x\n",
3028 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3029 return (EINVAL);
3030 }
3031
3032 error = ixl_set_advertised_speeds(pf, requested_ls, false);
3033 if (error)
3034 return (error);
3035
3036 pf->advertised_speed = requested_ls;
3037 ixl_update_link_status(pf);
3038 return (0);
3039 }
3040
3041 /*
3042 * Input: bitmap of enum i40e_aq_link_speed
3043 */
3044 u64
ixl_max_aq_speed_to_value(u8 link_speeds)3045 ixl_max_aq_speed_to_value(u8 link_speeds)
3046 {
3047 if (link_speeds & I40E_LINK_SPEED_40GB)
3048 return IF_Gbps(40);
3049 if (link_speeds & I40E_LINK_SPEED_25GB)
3050 return IF_Gbps(25);
3051 if (link_speeds & I40E_LINK_SPEED_20GB)
3052 return IF_Gbps(20);
3053 if (link_speeds & I40E_LINK_SPEED_10GB)
3054 return IF_Gbps(10);
3055 if (link_speeds & I40E_LINK_SPEED_5GB)
3056 return IF_Gbps(5);
3057 if (link_speeds & I40E_LINK_SPEED_2_5GB)
3058 return IF_Mbps(2500);
3059 if (link_speeds & I40E_LINK_SPEED_1GB)
3060 return IF_Gbps(1);
3061 if (link_speeds & I40E_LINK_SPEED_100MB)
3062 return IF_Mbps(100);
3063 else
3064 /* Minimum supported link speed */
3065 return IF_Mbps(100);
3066 }
3067
3068 /*
3069 ** Get the width and transaction speed of
3070 ** the bus this adapter is plugged into.
3071 */
3072 void
ixl_get_bus_info(struct ixl_pf * pf)3073 ixl_get_bus_info(struct ixl_pf *pf)
3074 {
3075 struct i40e_hw *hw = &pf->hw;
3076 device_t dev = pf->dev;
3077 u16 link;
3078 u32 offset, num_ports;
3079 u64 max_speed;
3080
3081 /* Some devices don't use PCIE */
3082 if (hw->mac.type == I40E_MAC_X722)
3083 return;
3084
3085 /* Read PCI Express Capabilities Link Status Register */
3086 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3087 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3088
3089 /* Fill out hw struct with PCIE info */
3090 i40e_set_pci_config_data(hw, link);
3091
3092 /* Use info to print out bandwidth messages */
3093 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3094 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3095 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3096 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3097 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3098 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3099 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3100 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3101 ("Unknown"));
3102
3103 /*
3104 * If adapter is in slot with maximum supported speed,
3105 * no warning message needs to be printed out.
3106 */
3107 if (hw->bus.speed >= i40e_bus_speed_8000
3108 && hw->bus.width >= i40e_bus_width_pcie_x8)
3109 return;
3110
3111 num_ports = bitcount32(hw->func_caps.valid_functions);
3112 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3113
3114 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3115 device_printf(dev, "PCI-Express bandwidth available"
3116 " for this device may be insufficient for"
3117 " optimal performance.\n");
3118 device_printf(dev, "Please move the device to a different"
3119 " PCI-e link with more lanes and/or higher"
3120 " transfer rate.\n");
3121 }
3122 }
3123
3124 static int
ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)3125 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3126 {
3127 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3128 struct i40e_hw *hw = &pf->hw;
3129 struct sbuf *sbuf;
3130
3131 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3132 ixl_nvm_version_str(hw, sbuf);
3133 sbuf_finish(sbuf);
3134 sbuf_delete(sbuf);
3135
3136 return (0);
3137 }
3138
3139 void
ixl_print_nvm_cmd(device_t dev,struct i40e_nvm_access * nvma)3140 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3141 {
3142 u8 nvma_ptr = nvma->config & 0xFF;
3143 u8 nvma_flags = (nvma->config & 0xF00) >> 8;
3144 const char * cmd_str;
3145
3146 switch (nvma->command) {
3147 case I40E_NVM_READ:
3148 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
3149 nvma->offset == 0 && nvma->data_size == 1) {
3150 device_printf(dev, "NVMUPD: Get Driver Status Command\n");
3151 return;
3152 }
3153 cmd_str = "READ ";
3154 break;
3155 case I40E_NVM_WRITE:
3156 cmd_str = "WRITE";
3157 break;
3158 default:
3159 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
3160 return;
3161 }
3162 device_printf(dev,
3163 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
3164 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
3165 }
3166
3167 int
ixl_handle_nvmupd_cmd(struct ixl_pf * pf,struct ifdrv * ifd)3168 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3169 {
3170 struct i40e_hw *hw = &pf->hw;
3171 struct i40e_nvm_access *nvma;
3172 device_t dev = pf->dev;
3173 enum i40e_status_code status = 0;
3174 size_t nvma_size, ifd_len, exp_len;
3175 int err, perrno;
3176
3177 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3178
3179 /* Sanity checks */
3180 nvma_size = sizeof(struct i40e_nvm_access);
3181 ifd_len = ifd->ifd_len;
3182
3183 if (ifd_len < nvma_size ||
3184 ifd->ifd_data == NULL) {
3185 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3186 __func__);
3187 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3188 __func__, ifd_len, nvma_size);
3189 device_printf(dev, "%s: data pointer: %p\n", __func__,
3190 ifd->ifd_data);
3191 return (EINVAL);
3192 }
3193
3194 nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3195 err = copyin(ifd->ifd_data, nvma, ifd_len);
3196 if (err) {
3197 device_printf(dev, "%s: Cannot get request from user space\n",
3198 __func__);
3199 free(nvma, M_IXL);
3200 return (err);
3201 }
3202
3203 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3204 ixl_print_nvm_cmd(dev, nvma);
3205
3206 if (IXL_PF_IS_RESETTING(pf)) {
3207 int count = 0;
3208 while (count++ < 100) {
3209 i40e_msec_delay(100);
3210 if (!(IXL_PF_IS_RESETTING(pf)))
3211 break;
3212 }
3213 }
3214
3215 if (IXL_PF_IS_RESETTING(pf)) {
3216 device_printf(dev,
3217 "%s: timeout waiting for EMP reset to finish\n",
3218 __func__);
3219 free(nvma, M_IXL);
3220 return (-EBUSY);
3221 }
3222
3223 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3224 device_printf(dev,
3225 "%s: invalid request, data size not in supported range\n",
3226 __func__);
3227 free(nvma, M_IXL);
3228 return (EINVAL);
3229 }
3230
3231 /*
3232 * Older versions of the NVM update tool don't set ifd_len to the size
3233 * of the entire buffer passed to the ioctl. Check the data_size field
3234 * in the contained i40e_nvm_access struct and ensure everything is
3235 * copied in from userspace.
3236 */
3237 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3238
3239 if (ifd_len < exp_len) {
3240 ifd_len = exp_len;
3241 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3242 err = copyin(ifd->ifd_data, nvma, ifd_len);
3243 if (err) {
3244 device_printf(dev, "%s: Cannot get request from user space\n",
3245 __func__);
3246 free(nvma, M_IXL);
3247 return (err);
3248 }
3249 }
3250
3251 // TODO: Might need a different lock here
3252 // IXL_PF_LOCK(pf);
3253 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3254 // IXL_PF_UNLOCK(pf);
3255
3256 err = copyout(nvma, ifd->ifd_data, ifd_len);
3257 free(nvma, M_IXL);
3258 if (err) {
3259 device_printf(dev, "%s: Cannot return data to user space\n",
3260 __func__);
3261 return (err);
3262 }
3263
3264 /* Let the nvmupdate report errors, show them only when debug is enabled */
3265 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3266 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3267 i40e_stat_str(hw, status), perrno);
3268
3269 /*
3270 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3271 * to run this ioctl again. So use -EACCES for -EPERM instead.
3272 */
3273 if (perrno == -EPERM)
3274 return (-EACCES);
3275 else
3276 return (perrno);
3277 }
3278
3279 int
ixl_find_i2c_interface(struct ixl_pf * pf)3280 ixl_find_i2c_interface(struct ixl_pf *pf)
3281 {
3282 struct i40e_hw *hw = &pf->hw;
3283 bool i2c_en, port_matched;
3284 u32 reg;
3285
3286 for (int i = 0; i < 4; i++) {
3287 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3288 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3289 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3290 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3291 & BIT(hw->port);
3292 if (i2c_en && port_matched)
3293 return (i);
3294 }
3295
3296 return (-1);
3297 }
3298
3299 void
ixl_set_link(struct ixl_pf * pf,bool enable)3300 ixl_set_link(struct ixl_pf *pf, bool enable)
3301 {
3302 struct i40e_hw *hw = &pf->hw;
3303 device_t dev = pf->dev;
3304 struct i40e_aq_get_phy_abilities_resp abilities;
3305 struct i40e_aq_set_phy_config config;
3306 enum i40e_status_code aq_error = 0;
3307 u32 phy_type, phy_type_ext;
3308
3309 /* Get initial capability information */
3310 aq_error = i40e_aq_get_phy_capabilities(hw,
3311 FALSE, TRUE, &abilities, NULL);
3312 if (aq_error) {
3313 device_printf(dev,
3314 "%s: Error getting phy capabilities %d,"
3315 " aq error: %d\n", __func__, aq_error,
3316 hw->aq.asq_last_status);
3317 return;
3318 }
3319
3320 phy_type = abilities.phy_type;
3321 phy_type_ext = abilities.phy_type_ext;
3322
3323 /* Get current capability information */
3324 aq_error = i40e_aq_get_phy_capabilities(hw,
3325 FALSE, FALSE, &abilities, NULL);
3326 if (aq_error) {
3327 device_printf(dev,
3328 "%s: Error getting phy capabilities %d,"
3329 " aq error: %d\n", __func__, aq_error,
3330 hw->aq.asq_last_status);
3331 return;
3332 }
3333
3334 /* Prepare new config */
3335 memset(&config, 0, sizeof(config));
3336 config.link_speed = abilities.link_speed;
3337 config.abilities = abilities.abilities;
3338 config.eee_capability = abilities.eee_capability;
3339 config.eeer = abilities.eeer_val;
3340 config.low_power_ctrl = abilities.d3_lpan;
3341 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3342 & I40E_AQ_PHY_FEC_CONFIG_MASK;
3343 config.phy_type = 0;
3344 config.phy_type_ext = 0;
3345
3346 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3347 I40E_AQ_PHY_FLAG_PAUSE_RX);
3348
3349 switch (pf->fc) {
3350 case I40E_FC_FULL:
3351 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3352 I40E_AQ_PHY_FLAG_PAUSE_RX;
3353 break;
3354 case I40E_FC_RX_PAUSE:
3355 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3356 break;
3357 case I40E_FC_TX_PAUSE:
3358 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3359 break;
3360 default:
3361 break;
3362 }
3363
3364 if (enable) {
3365 config.phy_type = phy_type;
3366 config.phy_type_ext = phy_type_ext;
3367
3368 }
3369
3370 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3371 if (aq_error) {
3372 device_printf(dev,
3373 "%s: Error setting new phy config %d,"
3374 " aq error: %d\n", __func__, aq_error,
3375 hw->aq.asq_last_status);
3376 return;
3377 }
3378
3379 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3380 if (aq_error) {
3381 device_printf(dev,
3382 "%s: Error set link config %d,"
3383 " aq error: %d\n", __func__, aq_error,
3384 hw->aq.asq_last_status);
3385 return;
3386 }
3387 }
3388
3389 static char *
ixl_phy_type_string(u32 bit_pos,bool ext)3390 ixl_phy_type_string(u32 bit_pos, bool ext)
3391 {
3392 static char * phy_types_str[32] = {
3393 "SGMII",
3394 "1000BASE-KX",
3395 "10GBASE-KX4",
3396 "10GBASE-KR",
3397 "40GBASE-KR4",
3398 "XAUI",
3399 "XFI",
3400 "SFI",
3401 "XLAUI",
3402 "XLPPI",
3403 "40GBASE-CR4",
3404 "10GBASE-CR1",
3405 "SFP+ Active DA",
3406 "QSFP+ Active DA",
3407 "Reserved (14)",
3408 "Reserved (15)",
3409 "Reserved (16)",
3410 "100BASE-TX",
3411 "1000BASE-T",
3412 "10GBASE-T",
3413 "10GBASE-SR",
3414 "10GBASE-LR",
3415 "10GBASE-SFP+Cu",
3416 "10GBASE-CR1",
3417 "40GBASE-CR4",
3418 "40GBASE-SR4",
3419 "40GBASE-LR4",
3420 "1000BASE-SX",
3421 "1000BASE-LX",
3422 "1000BASE-T Optical",
3423 "20GBASE-KR2",
3424 "Reserved (31)"
3425 };
3426 static char * ext_phy_types_str[8] = {
3427 "25GBASE-KR",
3428 "25GBASE-CR",
3429 "25GBASE-SR",
3430 "25GBASE-LR",
3431 "25GBASE-AOC",
3432 "25GBASE-ACC",
3433 "2.5GBASE-T",
3434 "5GBASE-T"
3435 };
3436
3437 if (ext && bit_pos > 7) return "Invalid_Ext";
3438 if (bit_pos > 31) return "Invalid";
3439
3440 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3441 }
3442
3443 /* TODO: ERJ: I don't this is necessary anymore. */
3444 int
ixl_aq_get_link_status(struct ixl_pf * pf,struct i40e_aqc_get_link_status * link_status)3445 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3446 {
3447 device_t dev = pf->dev;
3448 struct i40e_hw *hw = &pf->hw;
3449 struct i40e_aq_desc desc;
3450 enum i40e_status_code status;
3451
3452 struct i40e_aqc_get_link_status *aq_link_status =
3453 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3454
3455 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3456 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3457 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3458 if (status) {
3459 device_printf(dev,
3460 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3461 __func__, i40e_stat_str(hw, status),
3462 i40e_aq_str(hw, hw->aq.asq_last_status));
3463 return (EIO);
3464 }
3465
3466 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3467 return (0);
3468 }
3469
3470 static char *
ixl_phy_type_string_ls(u8 val)3471 ixl_phy_type_string_ls(u8 val)
3472 {
3473 if (val >= 0x1F)
3474 return ixl_phy_type_string(val - 0x1F, true);
3475 else
3476 return ixl_phy_type_string(val, false);
3477 }
3478
3479 static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)3480 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3481 {
3482 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3483 device_t dev = pf->dev;
3484 struct sbuf *buf;
3485 int error = 0;
3486
3487 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3488 if (!buf) {
3489 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3490 return (ENOMEM);
3491 }
3492
3493 struct i40e_aqc_get_link_status link_status;
3494 error = ixl_aq_get_link_status(pf, &link_status);
3495 if (error) {
3496 sbuf_delete(buf);
3497 return (error);
3498 }
3499
3500 sbuf_printf(buf, "\n"
3501 "PHY Type : 0x%02x<%s>\n"
3502 "Speed : 0x%02x\n"
3503 "Link info: 0x%02x\n"
3504 "AN info : 0x%02x\n"
3505 "Ext info : 0x%02x\n"
3506 "Loopback : 0x%02x\n"
3507 "Max Frame: %d\n"
3508 "Config : 0x%02x\n"
3509 "Power : 0x%02x",
3510 link_status.phy_type,
3511 ixl_phy_type_string_ls(link_status.phy_type),
3512 link_status.link_speed,
3513 link_status.link_info,
3514 link_status.an_info,
3515 link_status.ext_info,
3516 link_status.loopback,
3517 link_status.max_frame_size,
3518 link_status.config,
3519 link_status.power_desc);
3520
3521 error = sbuf_finish(buf);
3522 if (error)
3523 device_printf(dev, "Error finishing sbuf: %d\n", error);
3524
3525 sbuf_delete(buf);
3526 return (error);
3527 }
3528
3529 static int
ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)3530 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3531 {
3532 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3533 struct i40e_hw *hw = &pf->hw;
3534 device_t dev = pf->dev;
3535 enum i40e_status_code status;
3536 struct i40e_aq_get_phy_abilities_resp abilities;
3537 struct sbuf *buf;
3538 int error = 0;
3539
3540 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3541 if (!buf) {
3542 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3543 return (ENOMEM);
3544 }
3545
3546 status = i40e_aq_get_phy_capabilities(hw,
3547 FALSE, arg2 != 0, &abilities, NULL);
3548 if (status) {
3549 device_printf(dev,
3550 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3551 __func__, i40e_stat_str(hw, status),
3552 i40e_aq_str(hw, hw->aq.asq_last_status));
3553 sbuf_delete(buf);
3554 return (EIO);
3555 }
3556
3557 sbuf_printf(buf, "\n"
3558 "PHY Type : %08x",
3559 abilities.phy_type);
3560
3561 if (abilities.phy_type != 0) {
3562 sbuf_printf(buf, "<");
3563 for (int i = 0; i < 32; i++)
3564 if ((1 << i) & abilities.phy_type)
3565 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3566 sbuf_printf(buf, ">");
3567 }
3568
3569 sbuf_printf(buf, "\nPHY Ext : %02x",
3570 abilities.phy_type_ext);
3571
3572 if (abilities.phy_type_ext != 0) {
3573 sbuf_printf(buf, "<");
3574 for (int i = 0; i < 4; i++)
3575 if ((1 << i) & abilities.phy_type_ext)
3576 sbuf_printf(buf, "%s,",
3577 ixl_phy_type_string(i, true));
3578 sbuf_printf(buf, ">");
3579 }
3580
3581 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
3582 if (abilities.link_speed != 0) {
3583 u8 link_speed;
3584 sbuf_printf(buf, " <");
3585 for (int i = 0; i < 8; i++) {
3586 link_speed = (1 << i) & abilities.link_speed;
3587 if (link_speed)
3588 sbuf_printf(buf, "%s, ",
3589 ixl_link_speed_string(link_speed));
3590 }
3591 sbuf_printf(buf, ">");
3592 }
3593
3594 sbuf_printf(buf, "\n"
3595 "Abilities: %02x\n"
3596 "EEE cap : %04x\n"
3597 "EEER reg : %08x\n"
3598 "D3 Lpan : %02x\n"
3599 "ID : %02x %02x %02x %02x\n"
3600 "ModType : %02x %02x %02x\n"
3601 "ModType E: %01x\n"
3602 "FEC Cfg : %02x\n"
3603 "Ext CC : %02x",
3604 abilities.abilities, abilities.eee_capability,
3605 abilities.eeer_val, abilities.d3_lpan,
3606 abilities.phy_id[0], abilities.phy_id[1],
3607 abilities.phy_id[2], abilities.phy_id[3],
3608 abilities.module_type[0], abilities.module_type[1],
3609 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3610 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3611 abilities.ext_comp_code);
3612
3613 error = sbuf_finish(buf);
3614 if (error)
3615 device_printf(dev, "Error finishing sbuf: %d\n", error);
3616
3617 sbuf_delete(buf);
3618 return (error);
3619 }
3620
3621 static int
ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS)3622 ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS)
3623 {
3624 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3625 struct i40e_hw *hw = &pf->hw;
3626 device_t dev = pf->dev;
3627 struct sbuf *buf;
3628 int error = 0;
3629
3630 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3631 if (buf == NULL) {
3632 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3633 return (ENOMEM);
3634 }
3635
3636 if (hw->mac.type == I40E_MAC_X722) {
3637 sbuf_printf(buf, "\n"
3638 "PCS Link Control Register: unavailable\n"
3639 "PCS Link Status 1: unavailable\n"
3640 "PCS Link Status 2: unavailable\n"
3641 "XGMII FIFO Status: unavailable\n"
3642 "Auto-Negotiation (AN) Status: unavailable\n"
3643 "KR PCS Status: unavailable\n"
3644 "KR FEC Status 1 – FEC Correctable Blocks Counter: unavailable\n"
3645 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: unavailable"
3646 );
3647 } else {
3648 sbuf_printf(buf, "\n"
3649 "PCS Link Control Register: %#010X\n"
3650 "PCS Link Status 1: %#010X\n"
3651 "PCS Link Status 2: %#010X\n"
3652 "XGMII FIFO Status: %#010X\n"
3653 "Auto-Negotiation (AN) Status: %#010X\n"
3654 "KR PCS Status: %#010X\n"
3655 "KR FEC Status 1 – FEC Correctable Blocks Counter: %#010X\n"
3656 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: %#010X",
3657 rd32(hw, I40E_PRTMAC_PCS_LINK_CTRL),
3658 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS1(0)),
3659 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS2),
3660 rd32(hw, I40E_PRTMAC_PCS_XGMII_FIFO_STATUS),
3661 rd32(hw, I40E_PRTMAC_PCS_AN_LP_STATUS),
3662 rd32(hw, I40E_PRTMAC_PCS_KR_STATUS),
3663 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS1),
3664 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS2)
3665 );
3666 }
3667
3668 error = sbuf_finish(buf);
3669 if (error)
3670 device_printf(dev, "Error finishing sbuf: %d\n", error);
3671
3672 sbuf_delete(buf);
3673 return (error);
3674 }
3675
3676 static int
ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)3677 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3678 {
3679 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3680 struct ixl_vsi *vsi = &pf->vsi;
3681 struct ixl_mac_filter *f;
3682 device_t dev = pf->dev;
3683 int error = 0, ftl_len = 0, ftl_counter = 0;
3684
3685 struct sbuf *buf;
3686
3687 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3688 if (!buf) {
3689 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3690 return (ENOMEM);
3691 }
3692
3693 sbuf_printf(buf, "\n");
3694
3695 /* Print MAC filters */
3696 sbuf_printf(buf, "PF Filters:\n");
3697 LIST_FOREACH(f, &vsi->ftl, ftle)
3698 ftl_len++;
3699
3700 if (ftl_len < 1)
3701 sbuf_printf(buf, "(none)\n");
3702 else {
3703 LIST_FOREACH(f, &vsi->ftl, ftle) {
3704 sbuf_printf(buf,
3705 MAC_FORMAT ", vlan %4d, flags %#06x",
3706 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3707 /* don't print '\n' for last entry */
3708 if (++ftl_counter != ftl_len)
3709 sbuf_printf(buf, "\n");
3710 }
3711 }
3712
3713 #ifdef PCI_IOV
3714 /* TODO: Give each VF its own filter list sysctl */
3715 struct ixl_vf *vf;
3716 if (pf->num_vfs > 0) {
3717 sbuf_printf(buf, "\n\n");
3718 for (int i = 0; i < pf->num_vfs; i++) {
3719 vf = &pf->vfs[i];
3720 if (!(vf->vf_flags & VF_FLAG_ENABLED))
3721 continue;
3722
3723 vsi = &vf->vsi;
3724 ftl_len = 0, ftl_counter = 0;
3725 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3726 LIST_FOREACH(f, &vsi->ftl, ftle)
3727 ftl_len++;
3728
3729 if (ftl_len < 1)
3730 sbuf_printf(buf, "(none)\n");
3731 else {
3732 LIST_FOREACH(f, &vsi->ftl, ftle) {
3733 sbuf_printf(buf,
3734 MAC_FORMAT ", vlan %4d, flags %#06x\n",
3735 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3736 }
3737 }
3738 }
3739 }
3740 #endif
3741
3742 error = sbuf_finish(buf);
3743 if (error)
3744 device_printf(dev, "Error finishing sbuf: %d\n", error);
3745 sbuf_delete(buf);
3746
3747 return (error);
3748 }
3749
3750 #define IXL_SW_RES_SIZE 0x14
3751 int
ixl_res_alloc_cmp(const void * a,const void * b)3752 ixl_res_alloc_cmp(const void *a, const void *b)
3753 {
3754 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3755 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3756 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3757
3758 return ((int)one->resource_type - (int)two->resource_type);
3759 }
3760
3761 /*
3762 * Longest string length: 25
3763 */
3764 const char *
ixl_switch_res_type_string(u8 type)3765 ixl_switch_res_type_string(u8 type)
3766 {
3767 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3768 "VEB",
3769 "VSI",
3770 "Perfect Match MAC address",
3771 "S-tag",
3772 "(Reserved)",
3773 "Multicast hash entry",
3774 "Unicast hash entry",
3775 "VLAN",
3776 "VSI List entry",
3777 "(Reserved)",
3778 "VLAN Statistic Pool",
3779 "Mirror Rule",
3780 "Queue Set",
3781 "Inner VLAN Forward filter",
3782 "(Reserved)",
3783 "Inner MAC",
3784 "IP",
3785 "GRE/VN1 Key",
3786 "VN2 Key",
3787 "Tunneling Port"
3788 };
3789
3790 if (type < IXL_SW_RES_SIZE)
3791 return ixl_switch_res_type_strings[type];
3792 else
3793 return "(Reserved)";
3794 }
3795
3796 static int
ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)3797 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3798 {
3799 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3800 struct i40e_hw *hw = &pf->hw;
3801 device_t dev = pf->dev;
3802 struct sbuf *buf;
3803 enum i40e_status_code status;
3804 int error = 0;
3805
3806 u8 num_entries;
3807 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3808
3809 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3810 if (!buf) {
3811 device_printf(dev, "Could not allocate sbuf for output.\n");
3812 return (ENOMEM);
3813 }
3814
3815 bzero(resp, sizeof(resp));
3816 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3817 resp,
3818 IXL_SW_RES_SIZE,
3819 NULL);
3820 if (status) {
3821 device_printf(dev,
3822 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3823 __func__, i40e_stat_str(hw, status),
3824 i40e_aq_str(hw, hw->aq.asq_last_status));
3825 sbuf_delete(buf);
3826 return (error);
3827 }
3828
3829 /* Sort entries by type for display */
3830 qsort(resp, num_entries,
3831 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3832 &ixl_res_alloc_cmp);
3833
3834 sbuf_cat(buf, "\n");
3835 sbuf_printf(buf, "# of entries: %d\n", num_entries);
3836 sbuf_printf(buf,
3837 " Type | Guaranteed | Total | Used | Un-allocated\n"
3838 " | (this) | (all) | (this) | (all) \n");
3839 for (int i = 0; i < num_entries; i++) {
3840 sbuf_printf(buf,
3841 "%25s | %10d %5d %6d %12d",
3842 ixl_switch_res_type_string(resp[i].resource_type),
3843 resp[i].guaranteed,
3844 resp[i].total,
3845 resp[i].used,
3846 resp[i].total_unalloced);
3847 if (i < num_entries - 1)
3848 sbuf_cat(buf, "\n");
3849 }
3850
3851 error = sbuf_finish(buf);
3852 if (error)
3853 device_printf(dev, "Error finishing sbuf: %d\n", error);
3854
3855 sbuf_delete(buf);
3856 return (error);
3857 }
3858
3859 enum ixl_sw_seid_offset {
3860 IXL_SW_SEID_EMP = 1,
3861 IXL_SW_SEID_MAC_START = 2,
3862 IXL_SW_SEID_MAC_END = 5,
3863 IXL_SW_SEID_PF_START = 16,
3864 IXL_SW_SEID_PF_END = 31,
3865 IXL_SW_SEID_VF_START = 32,
3866 IXL_SW_SEID_VF_END = 159,
3867 };
3868
3869 /*
3870 * Caller must init and delete sbuf; this function will clear and
3871 * finish it for caller.
3872 *
3873 * Note: The SEID argument only applies for elements defined by FW at
3874 * power-on; these include the EMP, Ports, PFs and VFs.
3875 */
3876 static char *
ixl_switch_element_string(struct sbuf * s,u8 element_type,u16 seid)3877 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3878 {
3879 sbuf_clear(s);
3880
3881 /* If SEID is in certain ranges, then we can infer the
3882 * mapping of SEID to switch element.
3883 */
3884 if (seid == IXL_SW_SEID_EMP) {
3885 sbuf_cat(s, "EMP");
3886 goto out;
3887 } else if (seid >= IXL_SW_SEID_MAC_START &&
3888 seid <= IXL_SW_SEID_MAC_END) {
3889 sbuf_printf(s, "MAC %2d",
3890 seid - IXL_SW_SEID_MAC_START);
3891 goto out;
3892 } else if (seid >= IXL_SW_SEID_PF_START &&
3893 seid <= IXL_SW_SEID_PF_END) {
3894 sbuf_printf(s, "PF %3d",
3895 seid - IXL_SW_SEID_PF_START);
3896 goto out;
3897 } else if (seid >= IXL_SW_SEID_VF_START &&
3898 seid <= IXL_SW_SEID_VF_END) {
3899 sbuf_printf(s, "VF %3d",
3900 seid - IXL_SW_SEID_VF_START);
3901 goto out;
3902 }
3903
3904 switch (element_type) {
3905 case I40E_AQ_SW_ELEM_TYPE_BMC:
3906 sbuf_cat(s, "BMC");
3907 break;
3908 case I40E_AQ_SW_ELEM_TYPE_PV:
3909 sbuf_cat(s, "PV");
3910 break;
3911 case I40E_AQ_SW_ELEM_TYPE_VEB:
3912 sbuf_cat(s, "VEB");
3913 break;
3914 case I40E_AQ_SW_ELEM_TYPE_PA:
3915 sbuf_cat(s, "PA");
3916 break;
3917 case I40E_AQ_SW_ELEM_TYPE_VSI:
3918 sbuf_printf(s, "VSI");
3919 break;
3920 default:
3921 sbuf_cat(s, "?");
3922 break;
3923 }
3924
3925 out:
3926 sbuf_finish(s);
3927 return sbuf_data(s);
3928 }
3929
3930 static int
ixl_sw_cfg_elem_seid_cmp(const void * a,const void * b)3931 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3932 {
3933 const struct i40e_aqc_switch_config_element_resp *one, *two;
3934 one = (const struct i40e_aqc_switch_config_element_resp *)a;
3935 two = (const struct i40e_aqc_switch_config_element_resp *)b;
3936
3937 return ((int)one->seid - (int)two->seid);
3938 }
3939
3940 static int
ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)3941 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3942 {
3943 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3944 struct i40e_hw *hw = &pf->hw;
3945 device_t dev = pf->dev;
3946 struct sbuf *buf;
3947 struct sbuf *nmbuf;
3948 enum i40e_status_code status;
3949 int error = 0;
3950 u16 next = 0;
3951 u8 aq_buf[I40E_AQ_LARGE_BUF];
3952
3953 struct i40e_aqc_switch_config_element_resp *elem;
3954 struct i40e_aqc_get_switch_config_resp *sw_config;
3955 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3956
3957 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3958 if (!buf) {
3959 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3960 return (ENOMEM);
3961 }
3962
3963 status = i40e_aq_get_switch_config(hw, sw_config,
3964 sizeof(aq_buf), &next, NULL);
3965 if (status) {
3966 device_printf(dev,
3967 "%s: aq_get_switch_config() error %s, aq error %s\n",
3968 __func__, i40e_stat_str(hw, status),
3969 i40e_aq_str(hw, hw->aq.asq_last_status));
3970 sbuf_delete(buf);
3971 return error;
3972 }
3973 if (next)
3974 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3975 __func__, next);
3976
3977 nmbuf = sbuf_new_auto();
3978 if (!nmbuf) {
3979 device_printf(dev, "Could not allocate sbuf for name output.\n");
3980 sbuf_delete(buf);
3981 return (ENOMEM);
3982 }
3983
3984 /* Sort entries by SEID for display */
3985 qsort(sw_config->element, sw_config->header.num_reported,
3986 sizeof(struct i40e_aqc_switch_config_element_resp),
3987 &ixl_sw_cfg_elem_seid_cmp);
3988
3989 sbuf_cat(buf, "\n");
3990 /* Assuming <= 255 elements in switch */
3991 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3992 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3993 /* Exclude:
3994 * Revision -- all elements are revision 1 for now
3995 */
3996 sbuf_printf(buf,
3997 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n"
3998 " | | | (uplink)\n");
3999 for (int i = 0; i < sw_config->header.num_reported; i++) {
4000 elem = &sw_config->element[i];
4001
4002 // "%4d (%8s) | %8s %8s %#8x",
4003 sbuf_printf(buf, "%4d", elem->seid);
4004 sbuf_cat(buf, " ");
4005 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4006 elem->element_type, elem->seid));
4007 sbuf_cat(buf, " | ");
4008 sbuf_printf(buf, "%4d", elem->uplink_seid);
4009 sbuf_cat(buf, " ");
4010 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4011 0, elem->uplink_seid));
4012 sbuf_cat(buf, " | ");
4013 sbuf_printf(buf, "%4d", elem->downlink_seid);
4014 sbuf_cat(buf, " ");
4015 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4016 0, elem->downlink_seid));
4017 sbuf_cat(buf, " | ");
4018 sbuf_printf(buf, "%8d", elem->connection_type);
4019 if (i < sw_config->header.num_reported - 1)
4020 sbuf_cat(buf, "\n");
4021 }
4022 sbuf_delete(nmbuf);
4023
4024 error = sbuf_finish(buf);
4025 if (error)
4026 device_printf(dev, "Error finishing sbuf: %d\n", error);
4027
4028 sbuf_delete(buf);
4029
4030 return (error);
4031 }
4032
4033 static int
ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)4034 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
4035 {
4036 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4037 struct i40e_hw *hw = &pf->hw;
4038 device_t dev = pf->dev;
4039 int requested_vlan = -1;
4040 enum i40e_status_code status = 0;
4041 int error = 0;
4042
4043 error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
4044 if ((error) || (req->newptr == NULL))
4045 return (error);
4046
4047 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
4048 device_printf(dev, "Flags disallow setting of vlans\n");
4049 return (ENODEV);
4050 }
4051
4052 hw->switch_tag = requested_vlan;
4053 device_printf(dev,
4054 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
4055 hw->switch_tag, hw->first_tag, hw->second_tag);
4056 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
4057 if (status) {
4058 device_printf(dev,
4059 "%s: aq_set_switch_config() error %s, aq error %s\n",
4060 __func__, i40e_stat_str(hw, status),
4061 i40e_aq_str(hw, hw->aq.asq_last_status));
4062 return (status);
4063 }
4064 return (0);
4065 }
4066
4067 static int
ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)4068 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4069 {
4070 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4071 struct i40e_hw *hw = &pf->hw;
4072 device_t dev = pf->dev;
4073 struct sbuf *buf;
4074 int error = 0;
4075 enum i40e_status_code status;
4076 u32 reg;
4077
4078 struct i40e_aqc_get_set_rss_key_data key_data;
4079
4080 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4081 if (!buf) {
4082 device_printf(dev, "Could not allocate sbuf for output.\n");
4083 return (ENOMEM);
4084 }
4085
4086 bzero(&key_data, sizeof(key_data));
4087
4088 sbuf_cat(buf, "\n");
4089 if (hw->mac.type == I40E_MAC_X722) {
4090 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4091 if (status)
4092 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4093 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4094 } else {
4095 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4096 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4097 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
4098 }
4099 }
4100
4101 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4102
4103 error = sbuf_finish(buf);
4104 if (error)
4105 device_printf(dev, "Error finishing sbuf: %d\n", error);
4106 sbuf_delete(buf);
4107
4108 return (error);
4109 }
4110
4111 static void
ixl_sbuf_print_bytes(struct sbuf * sb,u8 * buf,int length,int label_offset,bool text)4112 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4113 {
4114 int i, j, k, width;
4115 char c;
4116
4117 if (length < 1 || buf == NULL) return;
4118
4119 int byte_stride = 16;
4120 int lines = length / byte_stride;
4121 int rem = length % byte_stride;
4122 if (rem > 0)
4123 lines++;
4124
4125 for (i = 0; i < lines; i++) {
4126 width = (rem > 0 && i == lines - 1)
4127 ? rem : byte_stride;
4128
4129 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4130
4131 for (j = 0; j < width; j++)
4132 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4133
4134 if (width < byte_stride) {
4135 for (k = 0; k < (byte_stride - width); k++)
4136 sbuf_printf(sb, " ");
4137 }
4138
4139 if (!text) {
4140 sbuf_printf(sb, "\n");
4141 continue;
4142 }
4143
4144 for (j = 0; j < width; j++) {
4145 c = (char)buf[i * byte_stride + j];
4146 if (c < 32 || c > 126)
4147 sbuf_printf(sb, ".");
4148 else
4149 sbuf_printf(sb, "%c", c);
4150
4151 if (j == width - 1)
4152 sbuf_printf(sb, "\n");
4153 }
4154 }
4155 }
4156
4157 static int
ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)4158 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4159 {
4160 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4161 struct i40e_hw *hw = &pf->hw;
4162 device_t dev = pf->dev;
4163 struct sbuf *buf;
4164 int error = 0;
4165 enum i40e_status_code status;
4166 u8 hlut[512];
4167 u32 reg;
4168
4169 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4170 if (!buf) {
4171 device_printf(dev, "Could not allocate sbuf for output.\n");
4172 return (ENOMEM);
4173 }
4174
4175 bzero(hlut, sizeof(hlut));
4176 sbuf_cat(buf, "\n");
4177 if (hw->mac.type == I40E_MAC_X722) {
4178 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4179 if (status)
4180 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4181 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4182 } else {
4183 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4184 reg = rd32(hw, I40E_PFQF_HLUT(i));
4185 bcopy(®, &hlut[i << 2], 4);
4186 }
4187 }
4188 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4189
4190 error = sbuf_finish(buf);
4191 if (error)
4192 device_printf(dev, "Error finishing sbuf: %d\n", error);
4193 sbuf_delete(buf);
4194
4195 return (error);
4196 }
4197
4198 static int
ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)4199 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4200 {
4201 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4202 struct i40e_hw *hw = &pf->hw;
4203 u64 hena;
4204
4205 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4206 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4207
4208 return sysctl_handle_long(oidp, NULL, hena, req);
4209 }
4210
4211 /*
4212 * Sysctl to disable firmware's link management
4213 *
4214 * 1 - Disable link management on this port
4215 * 0 - Re-enable link management
4216 *
4217 * On normal NVMs, firmware manages link by default.
4218 */
4219 static int
ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)4220 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4221 {
4222 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4223 struct i40e_hw *hw = &pf->hw;
4224 device_t dev = pf->dev;
4225 int requested_mode = -1;
4226 enum i40e_status_code status = 0;
4227 int error = 0;
4228
4229 /* Read in new mode */
4230 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4231 if ((error) || (req->newptr == NULL))
4232 return (error);
4233 /* Check for sane value */
4234 if (requested_mode < 0 || requested_mode > 1) {
4235 device_printf(dev, "Valid modes are 0 or 1\n");
4236 return (EINVAL);
4237 }
4238
4239 /* Set new mode */
4240 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4241 if (status) {
4242 device_printf(dev,
4243 "%s: Error setting new phy debug mode %s,"
4244 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4245 i40e_aq_str(hw, hw->aq.asq_last_status));
4246 return (EIO);
4247 }
4248
4249 return (0);
4250 }
4251
4252 /*
4253 * Read some diagnostic data from a (Q)SFP+ module
4254 *
4255 * SFP A2 QSFP Lower Page
4256 * Temperature 96-97 22-23
4257 * Vcc 98-99 26-27
4258 * TX power 102-103 34-35..40-41
4259 * RX power 104-105 50-51..56-57
4260 */
4261 static int
ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)4262 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4263 {
4264 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4265 device_t dev = pf->dev;
4266 struct sbuf *sbuf;
4267 int error = 0;
4268 u8 output;
4269
4270 if (req->oldptr == NULL) {
4271 error = SYSCTL_OUT(req, 0, 128);
4272 return (0);
4273 }
4274
4275 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4276 if (error) {
4277 device_printf(dev, "Error reading from i2c\n");
4278 return (error);
4279 }
4280
4281 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4282 if (output == 0x3) {
4283 /*
4284 * Check for:
4285 * - Internally calibrated data
4286 * - Diagnostic monitoring is implemented
4287 */
4288 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4289 if (!(output & 0x60)) {
4290 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4291 return (0);
4292 }
4293
4294 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4295
4296 for (u8 offset = 96; offset < 100; offset++) {
4297 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4298 sbuf_printf(sbuf, "%02X ", output);
4299 }
4300 for (u8 offset = 102; offset < 106; offset++) {
4301 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4302 sbuf_printf(sbuf, "%02X ", output);
4303 }
4304 } else if (output == 0xD || output == 0x11) {
4305 /*
4306 * QSFP+ modules are always internally calibrated, and must indicate
4307 * what types of diagnostic monitoring are implemented
4308 */
4309 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4310
4311 for (u8 offset = 22; offset < 24; offset++) {
4312 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4313 sbuf_printf(sbuf, "%02X ", output);
4314 }
4315 for (u8 offset = 26; offset < 28; offset++) {
4316 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4317 sbuf_printf(sbuf, "%02X ", output);
4318 }
4319 /* Read the data from the first lane */
4320 for (u8 offset = 34; offset < 36; offset++) {
4321 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4322 sbuf_printf(sbuf, "%02X ", output);
4323 }
4324 for (u8 offset = 50; offset < 52; offset++) {
4325 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4326 sbuf_printf(sbuf, "%02X ", output);
4327 }
4328 } else {
4329 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4330 return (0);
4331 }
4332
4333 sbuf_finish(sbuf);
4334 sbuf_delete(sbuf);
4335
4336 return (0);
4337 }
4338
4339 /*
4340 * Sysctl to read a byte from I2C bus.
4341 *
4342 * Input: 32-bit value:
4343 * bits 0-7: device address (0xA0 or 0xA2)
4344 * bits 8-15: offset (0-255)
4345 * bits 16-31: unused
4346 * Output: 8-bit value read
4347 */
4348 static int
ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)4349 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4350 {
4351 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4352 device_t dev = pf->dev;
4353 int input = -1, error = 0;
4354 u8 dev_addr, offset, output;
4355
4356 /* Read in I2C read parameters */
4357 error = sysctl_handle_int(oidp, &input, 0, req);
4358 if ((error) || (req->newptr == NULL))
4359 return (error);
4360 /* Validate device address */
4361 dev_addr = input & 0xFF;
4362 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4363 return (EINVAL);
4364 }
4365 offset = (input >> 8) & 0xFF;
4366
4367 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4368 if (error)
4369 return (error);
4370
4371 device_printf(dev, "%02X\n", output);
4372 return (0);
4373 }
4374
4375 /*
4376 * Sysctl to write a byte to the I2C bus.
4377 *
4378 * Input: 32-bit value:
4379 * bits 0-7: device address (0xA0 or 0xA2)
4380 * bits 8-15: offset (0-255)
4381 * bits 16-23: value to write
4382 * bits 24-31: unused
4383 * Output: 8-bit value written
4384 */
4385 static int
ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)4386 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4387 {
4388 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4389 device_t dev = pf->dev;
4390 int input = -1, error = 0;
4391 u8 dev_addr, offset, value;
4392
4393 /* Read in I2C write parameters */
4394 error = sysctl_handle_int(oidp, &input, 0, req);
4395 if ((error) || (req->newptr == NULL))
4396 return (error);
4397 /* Validate device address */
4398 dev_addr = input & 0xFF;
4399 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4400 return (EINVAL);
4401 }
4402 offset = (input >> 8) & 0xFF;
4403 value = (input >> 16) & 0xFF;
4404
4405 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4406 if (error)
4407 return (error);
4408
4409 device_printf(dev, "%02X written\n", value);
4410 return (0);
4411 }
4412
4413 static int
ixl_get_fec_config(struct ixl_pf * pf,struct i40e_aq_get_phy_abilities_resp * abilities,u8 bit_pos,int * is_set)4414 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4415 u8 bit_pos, int *is_set)
4416 {
4417 device_t dev = pf->dev;
4418 struct i40e_hw *hw = &pf->hw;
4419 enum i40e_status_code status;
4420
4421 if (IXL_PF_IN_RECOVERY_MODE(pf))
4422 return (EIO);
4423
4424 status = i40e_aq_get_phy_capabilities(hw,
4425 FALSE, FALSE, abilities, NULL);
4426 if (status) {
4427 device_printf(dev,
4428 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4429 __func__, i40e_stat_str(hw, status),
4430 i40e_aq_str(hw, hw->aq.asq_last_status));
4431 return (EIO);
4432 }
4433
4434 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4435 return (0);
4436 }
4437
4438 static int
ixl_set_fec_config(struct ixl_pf * pf,struct i40e_aq_get_phy_abilities_resp * abilities,u8 bit_pos,int set)4439 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4440 u8 bit_pos, int set)
4441 {
4442 device_t dev = pf->dev;
4443 struct i40e_hw *hw = &pf->hw;
4444 struct i40e_aq_set_phy_config config;
4445 enum i40e_status_code status;
4446
4447 /* Set new PHY config */
4448 memset(&config, 0, sizeof(config));
4449 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4450 if (set)
4451 config.fec_config |= bit_pos;
4452 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4453 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4454 config.phy_type = abilities->phy_type;
4455 config.phy_type_ext = abilities->phy_type_ext;
4456 config.link_speed = abilities->link_speed;
4457 config.eee_capability = abilities->eee_capability;
4458 config.eeer = abilities->eeer_val;
4459 config.low_power_ctrl = abilities->d3_lpan;
4460 status = i40e_aq_set_phy_config(hw, &config, NULL);
4461
4462 if (status) {
4463 device_printf(dev,
4464 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4465 __func__, i40e_stat_str(hw, status),
4466 i40e_aq_str(hw, hw->aq.asq_last_status));
4467 return (EIO);
4468 }
4469 }
4470
4471 return (0);
4472 }
4473
4474 static int
ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)4475 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4476 {
4477 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4478 int mode, error = 0;
4479
4480 struct i40e_aq_get_phy_abilities_resp abilities;
4481 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4482 if (error)
4483 return (error);
4484 /* Read in new mode */
4485 error = sysctl_handle_int(oidp, &mode, 0, req);
4486 if ((error) || (req->newptr == NULL))
4487 return (error);
4488
4489 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4490 }
4491
4492 static int
ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)4493 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4494 {
4495 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4496 int mode, error = 0;
4497
4498 struct i40e_aq_get_phy_abilities_resp abilities;
4499 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4500 if (error)
4501 return (error);
4502 /* Read in new mode */
4503 error = sysctl_handle_int(oidp, &mode, 0, req);
4504 if ((error) || (req->newptr == NULL))
4505 return (error);
4506
4507 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4508 }
4509
4510 static int
ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)4511 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4512 {
4513 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4514 int mode, error = 0;
4515
4516 struct i40e_aq_get_phy_abilities_resp abilities;
4517 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4518 if (error)
4519 return (error);
4520 /* Read in new mode */
4521 error = sysctl_handle_int(oidp, &mode, 0, req);
4522 if ((error) || (req->newptr == NULL))
4523 return (error);
4524
4525 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4526 }
4527
4528 static int
ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)4529 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4530 {
4531 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4532 int mode, error = 0;
4533
4534 struct i40e_aq_get_phy_abilities_resp abilities;
4535 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4536 if (error)
4537 return (error);
4538 /* Read in new mode */
4539 error = sysctl_handle_int(oidp, &mode, 0, req);
4540 if ((error) || (req->newptr == NULL))
4541 return (error);
4542
4543 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4544 }
4545
4546 static int
ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)4547 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4548 {
4549 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4550 int mode, error = 0;
4551
4552 struct i40e_aq_get_phy_abilities_resp abilities;
4553 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4554 if (error)
4555 return (error);
4556 /* Read in new mode */
4557 error = sysctl_handle_int(oidp, &mode, 0, req);
4558 if ((error) || (req->newptr == NULL))
4559 return (error);
4560
4561 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4562 }
4563
4564 static int
ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)4565 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4566 {
4567 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4568 struct i40e_hw *hw = &pf->hw;
4569 device_t dev = pf->dev;
4570 struct sbuf *buf;
4571 int error = 0;
4572 enum i40e_status_code status;
4573
4574 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4575 if (!buf) {
4576 device_printf(dev, "Could not allocate sbuf for output.\n");
4577 return (ENOMEM);
4578 }
4579
4580 u8 *final_buff;
4581 /* This amount is only necessary if reading the entire cluster into memory */
4582 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4583 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4584 if (final_buff == NULL) {
4585 device_printf(dev, "Could not allocate memory for output.\n");
4586 goto out;
4587 }
4588 int final_buff_len = 0;
4589
4590 u8 cluster_id = 1;
4591 bool more = true;
4592
4593 u8 dump_buf[4096];
4594 u16 curr_buff_size = 4096;
4595 u8 curr_next_table = 0;
4596 u32 curr_next_index = 0;
4597
4598 u16 ret_buff_size;
4599 u8 ret_next_table;
4600 u32 ret_next_index;
4601
4602 sbuf_cat(buf, "\n");
4603
4604 while (more) {
4605 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4606 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4607 if (status) {
4608 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4609 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4610 goto free_out;
4611 }
4612
4613 /* copy info out of temp buffer */
4614 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4615 final_buff_len += ret_buff_size;
4616
4617 if (ret_next_table != curr_next_table) {
4618 /* We're done with the current table; we can dump out read data. */
4619 sbuf_printf(buf, "%d:", curr_next_table);
4620 int bytes_printed = 0;
4621 while (bytes_printed <= final_buff_len) {
4622 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4623 bytes_printed += 16;
4624 }
4625 sbuf_cat(buf, "\n");
4626
4627 /* The entire cluster has been read; we're finished */
4628 if (ret_next_table == 0xFF)
4629 break;
4630
4631 /* Otherwise clear the output buffer and continue reading */
4632 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4633 final_buff_len = 0;
4634 }
4635
4636 if (ret_next_index == 0xFFFFFFFF)
4637 ret_next_index = 0;
4638
4639 bzero(dump_buf, sizeof(dump_buf));
4640 curr_next_table = ret_next_table;
4641 curr_next_index = ret_next_index;
4642 }
4643
4644 free_out:
4645 free(final_buff, M_IXL);
4646 out:
4647 error = sbuf_finish(buf);
4648 if (error)
4649 device_printf(dev, "Error finishing sbuf: %d\n", error);
4650 sbuf_delete(buf);
4651
4652 return (error);
4653 }
4654
4655 static int
ixl_start_fw_lldp(struct ixl_pf * pf)4656 ixl_start_fw_lldp(struct ixl_pf *pf)
4657 {
4658 struct i40e_hw *hw = &pf->hw;
4659 enum i40e_status_code status;
4660
4661 status = i40e_aq_start_lldp(hw, false, NULL);
4662 if (status != I40E_SUCCESS) {
4663 switch (hw->aq.asq_last_status) {
4664 case I40E_AQ_RC_EEXIST:
4665 device_printf(pf->dev,
4666 "FW LLDP agent is already running\n");
4667 break;
4668 case I40E_AQ_RC_EPERM:
4669 device_printf(pf->dev,
4670 "Device configuration forbids SW from starting "
4671 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4672 "attribute to \"Enabled\" to use this sysctl\n");
4673 return (EINVAL);
4674 default:
4675 device_printf(pf->dev,
4676 "Starting FW LLDP agent failed: error: %s, %s\n",
4677 i40e_stat_str(hw, status),
4678 i40e_aq_str(hw, hw->aq.asq_last_status));
4679 return (EINVAL);
4680 }
4681 }
4682
4683 ixl_clear_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4684 return (0);
4685 }
4686
4687 static int
ixl_stop_fw_lldp(struct ixl_pf * pf)4688 ixl_stop_fw_lldp(struct ixl_pf *pf)
4689 {
4690 struct i40e_hw *hw = &pf->hw;
4691 device_t dev = pf->dev;
4692 enum i40e_status_code status;
4693
4694 if (hw->func_caps.npar_enable != 0) {
4695 device_printf(dev,
4696 "Disabling FW LLDP agent is not supported on this device\n");
4697 return (EINVAL);
4698 }
4699
4700 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4701 device_printf(dev,
4702 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4703 return (EINVAL);
4704 }
4705
4706 status = i40e_aq_stop_lldp(hw, true, false, NULL);
4707 if (status != I40E_SUCCESS) {
4708 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4709 device_printf(dev,
4710 "Disabling FW LLDP agent failed: error: %s, %s\n",
4711 i40e_stat_str(hw, status),
4712 i40e_aq_str(hw, hw->aq.asq_last_status));
4713 return (EINVAL);
4714 }
4715
4716 device_printf(dev, "FW LLDP agent is already stopped\n");
4717 }
4718
4719 i40e_aq_set_dcb_parameters(hw, true, NULL);
4720 ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4721 return (0);
4722 }
4723
4724 static int
ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)4725 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4726 {
4727 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4728 int state, new_state, error = 0;
4729
4730 state = new_state = !ixl_test_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4731
4732 /* Read in new mode */
4733 error = sysctl_handle_int(oidp, &new_state, 0, req);
4734 if ((error) || (req->newptr == NULL))
4735 return (error);
4736
4737 /* Already in requested state */
4738 if (new_state == state)
4739 return (error);
4740
4741 if (new_state == 0)
4742 return ixl_stop_fw_lldp(pf);
4743
4744 return ixl_start_fw_lldp(pf);
4745 }
4746
4747 static int
ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)4748 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4749 {
4750 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4751 int state, new_state;
4752 int sysctl_handle_status = 0;
4753 enum i40e_status_code cmd_status;
4754
4755 /* Init states' values */
4756 state = new_state = ixl_test_state(&pf->state, IXL_STATE_EEE_ENABLED);
4757
4758 /* Get requested mode */
4759 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4760 if ((sysctl_handle_status) || (req->newptr == NULL))
4761 return (sysctl_handle_status);
4762
4763 /* Check if state has changed */
4764 if (new_state == state)
4765 return (0);
4766
4767 /* Set new state */
4768 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4769
4770 /* Save new state or report error */
4771 if (!cmd_status) {
4772 if (new_state == 0)
4773 ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
4774 else
4775 ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
4776 } else if (cmd_status == I40E_ERR_CONFIG)
4777 return (EPERM);
4778 else
4779 return (EIO);
4780
4781 return (0);
4782 }
4783
4784 static int
ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)4785 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4786 {
4787 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4788 int error, state;
4789
4790 state = ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4791
4792 error = sysctl_handle_int(oidp, &state, 0, req);
4793 if ((error) || (req->newptr == NULL))
4794 return (error);
4795
4796 if (state == 0)
4797 ixl_clear_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4798 else
4799 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4800
4801 return (0);
4802 }
4803
4804
4805 int
ixl_attach_get_link_status(struct ixl_pf * pf)4806 ixl_attach_get_link_status(struct ixl_pf *pf)
4807 {
4808 struct i40e_hw *hw = &pf->hw;
4809 device_t dev = pf->dev;
4810 enum i40e_status_code status;
4811
4812 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4813 (hw->aq.fw_maj_ver < 4)) {
4814 i40e_msec_delay(75);
4815 status = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4816 if (status != I40E_SUCCESS) {
4817 device_printf(dev,
4818 "%s link restart failed status: %s, aq_err=%s\n",
4819 __func__, i40e_stat_str(hw, status),
4820 i40e_aq_str(hw, hw->aq.asq_last_status));
4821 return (EINVAL);
4822 }
4823 }
4824
4825 /* Determine link state */
4826 hw->phy.get_link_info = TRUE;
4827 status = i40e_get_link_status(hw, &pf->link_up);
4828 if (status != I40E_SUCCESS) {
4829 device_printf(dev,
4830 "%s get link status, status: %s aq_err=%s\n",
4831 __func__, i40e_stat_str(hw, status),
4832 i40e_aq_str(hw, hw->aq.asq_last_status));
4833 /*
4834 * Most probably FW has not finished configuring PHY.
4835 * Retry periodically in a timer callback.
4836 */
4837 ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING);
4838 pf->link_poll_start = getsbinuptime();
4839 return (EAGAIN);
4840 }
4841 ixl_dbg_link(pf, "%s link_up: %d\n", __func__, pf->link_up);
4842
4843 /* Flow Control mode not set by user, read current FW settings */
4844 if (pf->fc == -1)
4845 pf->fc = hw->fc.current_mode;
4846
4847 return (0);
4848 }
4849
4850 static int
ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)4851 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4852 {
4853 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4854 int requested = 0, error = 0;
4855
4856 /* Read in new mode */
4857 error = sysctl_handle_int(oidp, &requested, 0, req);
4858 if ((error) || (req->newptr == NULL))
4859 return (error);
4860
4861 /* Initiate the PF reset later in the admin task */
4862 ixl_set_state(&pf->state, IXL_STATE_PF_RESET_REQ);
4863
4864 return (error);
4865 }
4866
4867 static int
ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)4868 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4869 {
4870 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4871 struct i40e_hw *hw = &pf->hw;
4872 int requested = 0, error = 0;
4873
4874 /* Read in new mode */
4875 error = sysctl_handle_int(oidp, &requested, 0, req);
4876 if ((error) || (req->newptr == NULL))
4877 return (error);
4878
4879 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4880
4881 return (error);
4882 }
4883
4884 static int
ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)4885 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4886 {
4887 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4888 struct i40e_hw *hw = &pf->hw;
4889 int requested = 0, error = 0;
4890
4891 /* Read in new mode */
4892 error = sysctl_handle_int(oidp, &requested, 0, req);
4893 if ((error) || (req->newptr == NULL))
4894 return (error);
4895
4896 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4897
4898 return (error);
4899 }
4900
4901 /*
4902 * Print out mapping of TX queue indexes and Rx queue indexes
4903 * to MSI-X vectors.
4904 */
4905 static int
ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)4906 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4907 {
4908 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4909 struct ixl_vsi *vsi = &pf->vsi;
4910 struct i40e_hw *hw = vsi->hw;
4911 device_t dev = pf->dev;
4912 struct sbuf *buf;
4913 int error = 0;
4914
4915 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4916 struct ixl_tx_queue *tx_que = vsi->tx_queues;
4917
4918 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4919 if (!buf) {
4920 device_printf(dev, "Could not allocate sbuf for output.\n");
4921 return (ENOMEM);
4922 }
4923
4924 sbuf_cat(buf, "\n");
4925 for (int i = 0; i < vsi->num_rx_queues; i++) {
4926 rx_que = &vsi->rx_queues[i];
4927 sbuf_printf(buf,
4928 "(rxq %3d): %d LNKLSTN: %08x QINT_RQCTL: %08x\n",
4929 i, rx_que->msix,
4930 rd32(hw, I40E_PFINT_LNKLSTN(rx_que->msix - 1)),
4931 rd32(hw, I40E_QINT_RQCTL(rx_que->msix - 1)));
4932 }
4933 for (int i = 0; i < vsi->num_tx_queues; i++) {
4934 tx_que = &vsi->tx_queues[i];
4935 sbuf_printf(buf, "(txq %3d): %d QINT_TQCTL: %08x\n",
4936 i, tx_que->msix,
4937 rd32(hw, I40E_QINT_TQCTL(tx_que->msix - 1)));
4938 }
4939
4940 error = sbuf_finish(buf);
4941 if (error)
4942 device_printf(dev, "Error finishing sbuf: %d\n", error);
4943 sbuf_delete(buf);
4944
4945 return (error);
4946 }
4947
4948 static int
ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS)4949 ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS)
4950 {
4951 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4952 struct ixl_vsi *vsi = &pf->vsi;
4953 struct i40e_hw *hw = vsi->hw;
4954 device_t dev = pf->dev;
4955 struct sbuf *buf;
4956 int error = 0;
4957
4958 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4959
4960 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4961 if (!buf) {
4962 device_printf(dev, "Could not allocate sbuf for output.\n");
4963 return (ENOMEM);
4964 }
4965
4966 sbuf_cat(buf, "\n");
4967 for (int i = 0; i < vsi->num_rx_queues; i++) {
4968 rx_que = &vsi->rx_queues[i];
4969 sbuf_printf(buf,
4970 "(rxq %3d): %d PFINT_DYN_CTLN: %08x\n",
4971 i, rx_que->msix,
4972 rd32(hw, I40E_PFINT_DYN_CTLN(rx_que->msix - 1)));
4973 }
4974
4975 error = sbuf_finish(buf);
4976 if (error)
4977 device_printf(dev, "Error finishing sbuf: %d\n", error);
4978 sbuf_delete(buf);
4979
4980 return (error);
4981 }
4982