1 /******************************************************************************
2
3 Copyright (c) 2013-2018, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33
34
35 #include "ixl_pf.h"
36
37 #ifdef PCI_IOV
38 #include "ixl_pf_iov.h"
39 #endif
40
41 #ifdef IXL_IW
42 #include "ixl_iw.h"
43 #include "ixl_iw_int.h"
44 #endif
45
46 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
47 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
48 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
49 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
50 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
51 static char * ixl_switch_element_string(struct sbuf *, u8, u16);
52 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
53
54 /* Sysctls */
55 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
56 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
59 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
60 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
62
63 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
65
66 /* Debug Sysctls */
67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
88
89 /* Debug Sysctls */
90 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94 static int ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS);
95 #ifdef IXL_DEBUG
96 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
97 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
98 #endif
99
100 #ifdef IXL_IW
101 extern int ixl_enable_iwarp;
102 extern int ixl_limit_iwarp_msix;
103 #endif
104
105 static const char * const ixl_fc_string[6] = {
106 "None",
107 "Rx",
108 "Tx",
109 "Full",
110 "Priority",
111 "Default"
112 };
113
114 static char *ixl_fec_string[3] = {
115 "CL108 RS-FEC",
116 "CL74 FC-FEC/BASE-R",
117 "None"
118 };
119
120 /* Functions for setting and checking driver state. Note the functions take
121 * bit positions, not bitmasks. The atomic_set_32 and atomic_clear_32
122 * operations require bitmasks. This can easily lead to programming error, so
123 * we provide wrapper functions to avoid this.
124 */
125
126 /**
127 * ixl_set_state - Set the specified state
128 * @s: the state bitmap
129 * @bit: the state to set
130 *
131 * Atomically update the state bitmap with the specified bit set.
132 */
133 inline void
ixl_set_state(volatile u32 * s,enum ixl_state bit)134 ixl_set_state(volatile u32 *s, enum ixl_state bit)
135 {
136 /* atomic_set_32 expects a bitmask */
137 atomic_set_32(s, BIT(bit));
138 }
139
140 /**
141 * ixl_clear_state - Clear the specified state
142 * @s: the state bitmap
143 * @bit: the state to clear
144 *
145 * Atomically update the state bitmap with the specified bit cleared.
146 */
147 inline void
ixl_clear_state(volatile u32 * s,enum ixl_state bit)148 ixl_clear_state(volatile u32 *s, enum ixl_state bit)
149 {
150 /* atomic_clear_32 expects a bitmask */
151 atomic_clear_32(s, BIT(bit));
152 }
153
154 /**
155 * ixl_test_state - Test the specified state
156 * @s: the state bitmap
157 * @bit: the bit to test
158 *
159 * Return true if the state is set, false otherwise. Use this only if the flow
160 * does not need to update the state. If you must update the state as well,
161 * prefer ixl_testandset_state.
162 */
163 inline bool
ixl_test_state(volatile u32 * s,enum ixl_state bit)164 ixl_test_state(volatile u32 *s, enum ixl_state bit)
165 {
166 return !!(*s & BIT(bit));
167 }
168
169 /**
170 * ixl_testandset_state - Test and set the specified state
171 * @s: the state bitmap
172 * @bit: the bit to test
173 *
174 * Atomically update the state bitmap, setting the specified bit. Returns the
175 * previous value of the bit.
176 */
177 inline u32
ixl_testandset_state(volatile u32 * s,enum ixl_state bit)178 ixl_testandset_state(volatile u32 *s, enum ixl_state bit)
179 {
180 /* atomic_testandset_32 expects a bit position, as opposed to bitmask
181 expected by other atomic functions */
182 return atomic_testandset_32(s, bit);
183 }
184
185 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
186
187 /*
188 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
189 */
190 void
ixl_nvm_version_str(struct i40e_hw * hw,struct sbuf * buf)191 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
192 {
193 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
194 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
195 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
196
197 sbuf_printf(buf,
198 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
199 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
200 hw->aq.api_maj_ver, hw->aq.api_min_ver,
201 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
202 IXL_NVM_VERSION_HI_SHIFT,
203 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
204 IXL_NVM_VERSION_LO_SHIFT,
205 hw->nvm.eetrack,
206 oem_ver, oem_build, oem_patch);
207 }
208
209 void
ixl_print_nvm_version(struct ixl_pf * pf)210 ixl_print_nvm_version(struct ixl_pf *pf)
211 {
212 struct i40e_hw *hw = &pf->hw;
213 device_t dev = pf->dev;
214 struct sbuf *sbuf;
215
216 sbuf = sbuf_new_auto();
217 ixl_nvm_version_str(hw, sbuf);
218 sbuf_finish(sbuf);
219 device_printf(dev, "%s\n", sbuf_data(sbuf));
220 sbuf_delete(sbuf);
221 }
222
223 /**
224 * ixl_get_fw_mode - Check the state of FW
225 * @hw: device hardware structure
226 *
227 * Identify state of FW. It might be in a recovery mode
228 * which limits functionality and requires special handling
229 * from the driver.
230 *
231 * @returns FW mode (normal, recovery, unexpected EMP reset)
232 */
233 static enum ixl_fw_mode
ixl_get_fw_mode(struct ixl_pf * pf)234 ixl_get_fw_mode(struct ixl_pf *pf)
235 {
236 struct i40e_hw *hw = &pf->hw;
237 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
238 u32 fwsts;
239
240 #ifdef IXL_DEBUG
241 if (pf->recovery_mode)
242 return IXL_FW_MODE_RECOVERY;
243 #endif
244 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
245
246 /* Is set and has one of expected values */
247 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
248 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
249 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
250 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
251 fw_mode = IXL_FW_MODE_RECOVERY;
252 else {
253 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
254 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
255 fw_mode = IXL_FW_MODE_UEMPR;
256 }
257 return (fw_mode);
258 }
259
260 /**
261 * ixl_pf_reset - Reset the PF
262 * @pf: PF structure
263 *
264 * Ensure that FW is in the right state and do the reset
265 * if needed.
266 *
267 * @returns zero on success, or an error code on failure.
268 */
269 int
ixl_pf_reset(struct ixl_pf * pf)270 ixl_pf_reset(struct ixl_pf *pf)
271 {
272 struct i40e_hw *hw = &pf->hw;
273 enum i40e_status_code status;
274 enum ixl_fw_mode fw_mode;
275
276 fw_mode = ixl_get_fw_mode(pf);
277 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
278 if (fw_mode == IXL_FW_MODE_RECOVERY) {
279 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
280 /* Don't try to reset device if it's in recovery mode */
281 return (0);
282 }
283
284 status = i40e_pf_reset(hw);
285 if (status == I40E_SUCCESS)
286 return (0);
287
288 /* Check FW mode again in case it has changed while
289 * waiting for reset to complete */
290 fw_mode = ixl_get_fw_mode(pf);
291 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
292 if (fw_mode == IXL_FW_MODE_RECOVERY) {
293 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
294 return (0);
295 }
296
297 if (fw_mode == IXL_FW_MODE_UEMPR)
298 device_printf(pf->dev,
299 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
300 else
301 device_printf(pf->dev, "PF reset failure %s\n",
302 i40e_stat_str(hw, status));
303 return (EIO);
304 }
305
306 /**
307 * ixl_setup_hmc - Setup LAN Host Memory Cache
308 * @pf: PF structure
309 *
310 * Init and configure LAN Host Memory Cache
311 *
312 * @returns 0 on success, EIO on error
313 */
314 int
ixl_setup_hmc(struct ixl_pf * pf)315 ixl_setup_hmc(struct ixl_pf *pf)
316 {
317 struct i40e_hw *hw = &pf->hw;
318 enum i40e_status_code status;
319
320 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
321 hw->func_caps.num_rx_qp, 0, 0);
322 if (status) {
323 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
324 i40e_stat_str(hw, status));
325 return (EIO);
326 }
327
328 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
329 if (status) {
330 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
331 i40e_stat_str(hw, status));
332 return (EIO);
333 }
334
335 return (0);
336 }
337
338 /**
339 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
340 * @pf: PF structure
341 *
342 * Shutdown Host Memory Cache if configured.
343 *
344 */
345 void
ixl_shutdown_hmc(struct ixl_pf * pf)346 ixl_shutdown_hmc(struct ixl_pf *pf)
347 {
348 struct i40e_hw *hw = &pf->hw;
349 enum i40e_status_code status;
350
351 /* HMC not configured, no need to shutdown */
352 if (hw->hmc.hmc_obj == NULL)
353 return;
354
355 status = i40e_shutdown_lan_hmc(hw);
356 if (status)
357 device_printf(pf->dev,
358 "Shutdown LAN HMC failed with code %s\n",
359 i40e_stat_str(hw, status));
360 }
361 /*
362 * Write PF ITR values to queue ITR registers.
363 */
364 void
ixl_configure_itr(struct ixl_pf * pf)365 ixl_configure_itr(struct ixl_pf *pf)
366 {
367 ixl_configure_tx_itr(pf);
368 ixl_configure_rx_itr(pf);
369 }
370
371 /*********************************************************************
372 *
373 * Get the hardware capabilities
374 *
375 **********************************************************************/
376
377 int
ixl_get_hw_capabilities(struct ixl_pf * pf)378 ixl_get_hw_capabilities(struct ixl_pf *pf)
379 {
380 struct i40e_aqc_list_capabilities_element_resp *buf;
381 struct i40e_hw *hw = &pf->hw;
382 device_t dev = pf->dev;
383 enum i40e_status_code status;
384 int len, i2c_intfc_num;
385 bool again = TRUE;
386 u16 needed;
387
388 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
389 hw->func_caps.iwarp = 0;
390 return (0);
391 }
392
393 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
394 retry:
395 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
396 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
397 device_printf(dev, "Unable to allocate cap memory\n");
398 return (ENOMEM);
399 }
400
401 /* This populates the hw struct */
402 status = i40e_aq_discover_capabilities(hw, buf, len,
403 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
404 free(buf, M_IXL);
405 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
406 (again == TRUE)) {
407 /* retry once with a larger buffer */
408 again = FALSE;
409 len = needed;
410 goto retry;
411 } else if (status != I40E_SUCCESS) {
412 device_printf(dev, "capability discovery failed; status %s, error %s\n",
413 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
414 return (ENODEV);
415 }
416
417 /*
418 * Some devices have both MDIO and I2C; since this isn't reported
419 * by the FW, check registers to see if an I2C interface exists.
420 */
421 i2c_intfc_num = ixl_find_i2c_interface(pf);
422 if (i2c_intfc_num != -1)
423 pf->has_i2c = true;
424
425 /* Determine functions to use for driver I2C accesses */
426 switch (pf->i2c_access_method) {
427 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
428 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
429 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
430 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
431 } else {
432 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
433 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
434 }
435 break;
436 }
437 case IXL_I2C_ACCESS_METHOD_AQ:
438 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
439 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
440 break;
441 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
442 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
443 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
444 break;
445 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
446 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
447 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
448 break;
449 default:
450 /* Should not happen */
451 device_printf(dev, "Error setting I2C access functions\n");
452 break;
453 }
454
455 /* Keep link active by default */
456 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
457
458 /* Print a subset of the capability information. */
459 device_printf(dev,
460 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
461 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
462 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
463 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
464 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
465 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
466 "MDIO shared");
467
468 return (0);
469 }
470
471 /* For the set_advertise sysctl */
472 void
ixl_set_initial_advertised_speeds(struct ixl_pf * pf)473 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
474 {
475 device_t dev = pf->dev;
476 int err;
477
478 /* Make sure to initialize the device to the complete list of
479 * supported speeds on driver load, to ensure unloading and
480 * reloading the driver will restore this value.
481 */
482 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
483 if (err) {
484 /* Non-fatal error */
485 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
486 __func__, err);
487 return;
488 }
489
490 pf->advertised_speed =
491 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
492 }
493
494 int
ixl_teardown_hw_structs(struct ixl_pf * pf)495 ixl_teardown_hw_structs(struct ixl_pf *pf)
496 {
497 enum i40e_status_code status = 0;
498 struct i40e_hw *hw = &pf->hw;
499 device_t dev = pf->dev;
500
501 /* Shutdown LAN HMC */
502 if (hw->hmc.hmc_obj) {
503 status = i40e_shutdown_lan_hmc(hw);
504 if (status) {
505 device_printf(dev,
506 "init: LAN HMC shutdown failure; status %s\n",
507 i40e_stat_str(hw, status));
508 goto err_out;
509 }
510 }
511
512 /* Shutdown admin queue */
513 ixl_disable_intr0(hw);
514 status = i40e_shutdown_adminq(hw);
515 if (status)
516 device_printf(dev,
517 "init: Admin Queue shutdown failure; status %s\n",
518 i40e_stat_str(hw, status));
519
520 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
521 err_out:
522 return (status);
523 }
524
525 /*
526 ** Creates new filter with given MAC address and VLAN ID
527 */
528 static struct ixl_mac_filter *
ixl_new_filter(struct ixl_ftl_head * headp,const u8 * macaddr,s16 vlan)529 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
530 {
531 struct ixl_mac_filter *f;
532
533 /* create a new empty filter */
534 f = malloc(sizeof(struct ixl_mac_filter),
535 M_IXL, M_NOWAIT | M_ZERO);
536 if (f) {
537 LIST_INSERT_HEAD(headp, f, ftle);
538 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
539 f->vlan = vlan;
540 }
541
542 return (f);
543 }
544
545 /**
546 * ixl_free_filters - Free all filters in given list
547 * headp - pointer to list head
548 *
549 * Frees memory used by each entry in the list.
550 * Does not remove filters from HW.
551 */
552 void
ixl_free_filters(struct ixl_ftl_head * headp)553 ixl_free_filters(struct ixl_ftl_head *headp)
554 {
555 struct ixl_mac_filter *f, *nf;
556
557 f = LIST_FIRST(headp);
558 while (f != NULL) {
559 nf = LIST_NEXT(f, ftle);
560 free(f, M_IXL);
561 f = nf;
562 }
563
564 LIST_INIT(headp);
565 }
566
567 static u_int
ixl_add_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)568 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
569 {
570 struct ixl_add_maddr_arg *ama = arg;
571 struct ixl_vsi *vsi = ama->vsi;
572 const u8 *macaddr = (u8*)LLADDR(sdl);
573 struct ixl_mac_filter *f;
574
575 /* Does one already exist */
576 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
577 if (f != NULL)
578 return (0);
579
580 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
581 if (f == NULL) {
582 device_printf(vsi->dev, "WARNING: no filter available!!\n");
583 return (0);
584 }
585 f->flags |= IXL_FILTER_MC;
586
587 return (1);
588 }
589
590 /*********************************************************************
591 * Filter Routines
592 *
593 * Routines for multicast and vlan filter management.
594 *
595 *********************************************************************/
596
597 /**
598 * ixl_add_multi - Add multicast filters to the hardware
599 * @vsi: The VSI structure
600 *
601 * In case number of multicast filters in the IFP exceeds 127 entries,
602 * multicast promiscuous mode will be enabled and the filters will be removed
603 * from the hardware
604 */
605 void
ixl_add_multi(struct ixl_vsi * vsi)606 ixl_add_multi(struct ixl_vsi *vsi)
607 {
608 if_t ifp = vsi->ifp;
609 struct i40e_hw *hw = vsi->hw;
610 int mcnt = 0;
611 struct ixl_add_maddr_arg cb_arg;
612 enum i40e_status_code status;
613
614 IOCTL_DEBUGOUT("ixl_add_multi: begin");
615
616 mcnt = if_llmaddr_count(ifp);
617 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
618 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
619 TRUE, NULL);
620 if (status != I40E_SUCCESS)
621 if_printf(ifp, "Failed to enable multicast promiscuous "
622 "mode, status: %s\n", i40e_stat_str(hw, status));
623 else
624 if_printf(ifp, "Enabled multicast promiscuous mode\n");
625 /* Delete all existing MC filters */
626 ixl_del_multi(vsi, true);
627 return;
628 }
629
630 cb_arg.vsi = vsi;
631 LIST_INIT(&cb_arg.to_add);
632
633 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
634 if (mcnt > 0)
635 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
636
637 IOCTL_DEBUGOUT("ixl_add_multi: end");
638 }
639
640 static u_int
ixl_match_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)641 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
642 {
643 struct ixl_mac_filter *f = arg;
644
645 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
646 return (1);
647 else
648 return (0);
649 }
650
651 /**
652 * ixl_dis_multi_promisc - Disable multicast promiscuous mode
653 * @vsi: The VSI structure
654 * @vsi_mcnt: Number of multicast filters in the VSI
655 *
656 * Disable multicast promiscuous mode based on number of entries in the IFP
657 * and the VSI, then re-add multicast filters.
658 *
659 */
660 static void
ixl_dis_multi_promisc(struct ixl_vsi * vsi,int vsi_mcnt)661 ixl_dis_multi_promisc(struct ixl_vsi *vsi, int vsi_mcnt)
662 {
663 struct ifnet *ifp = vsi->ifp;
664 struct i40e_hw *hw = vsi->hw;
665 int ifp_mcnt = 0;
666 enum i40e_status_code status;
667
668 ifp_mcnt = if_llmaddr_count(ifp);
669 /*
670 * Equal lists or empty ifp list mean the list has not been changed
671 * and in such case avoid disabling multicast promiscuous mode as it
672 * was not previously enabled. Case where multicast promiscuous mode has
673 * been enabled is when vsi_mcnt == 0 && ifp_mcnt > 0.
674 */
675 if (ifp_mcnt == vsi_mcnt || ifp_mcnt == 0 ||
676 ifp_mcnt >= MAX_MULTICAST_ADDR)
677 return;
678
679 status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
680 FALSE, NULL);
681 if (status != I40E_SUCCESS) {
682 if_printf(ifp, "Failed to disable multicast promiscuous "
683 "mode, status: %s\n", i40e_stat_str(hw, status));
684
685 return;
686 }
687
688 if_printf(ifp, "Disabled multicast promiscuous mode\n");
689
690 ixl_add_multi(vsi);
691 }
692
693 /**
694 * ixl_del_multi - Delete multicast filters from the hardware
695 * @vsi: The VSI structure
696 * @all: Bool to determine if all the multicast filters should be removed
697 *
698 * In case number of multicast filters in the IFP drops to 127 entries,
699 * multicast promiscuous mode will be disabled and the filters will be reapplied
700 * to the hardware.
701 */
702 void
ixl_del_multi(struct ixl_vsi * vsi,bool all)703 ixl_del_multi(struct ixl_vsi *vsi, bool all)
704 {
705 int to_del_cnt = 0, vsi_mcnt = 0;
706 if_t ifp = vsi->ifp;
707 struct ixl_mac_filter *f, *fn;
708 struct ixl_ftl_head to_del;
709
710 IOCTL_DEBUGOUT("ixl_del_multi: begin");
711
712 LIST_INIT(&to_del);
713 /* Search for removed multicast addresses */
714 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
715 if ((f->flags & IXL_FILTER_MC) == 0)
716 continue;
717
718 /* Count all the multicast filters in the VSI for comparison */
719 vsi_mcnt++;
720
721 if (!all && if_foreach_llmaddr(ifp, ixl_match_maddr, f) != 0)
722 continue;
723
724 LIST_REMOVE(f, ftle);
725 LIST_INSERT_HEAD(&to_del, f, ftle);
726 to_del_cnt++;
727 }
728
729 if (to_del_cnt > 0) {
730 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
731 return;
732 }
733
734 ixl_dis_multi_promisc(vsi, vsi_mcnt);
735
736 IOCTL_DEBUGOUT("ixl_del_multi: end");
737 }
738
739 void
ixl_link_up_msg(struct ixl_pf * pf)740 ixl_link_up_msg(struct ixl_pf *pf)
741 {
742 struct i40e_hw *hw = &pf->hw;
743 if_t ifp = pf->vsi.ifp;
744 char *req_fec_string, *neg_fec_string;
745 u8 fec_abilities;
746
747 fec_abilities = hw->phy.link_info.req_fec_info;
748 /* If both RS and KR are requested, only show RS */
749 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
750 req_fec_string = ixl_fec_string[0];
751 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
752 req_fec_string = ixl_fec_string[1];
753 else
754 req_fec_string = ixl_fec_string[2];
755
756 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
757 neg_fec_string = ixl_fec_string[0];
758 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
759 neg_fec_string = ixl_fec_string[1];
760 else
761 neg_fec_string = ixl_fec_string[2];
762
763 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
764 if_name(ifp),
765 ixl_link_speed_string(hw->phy.link_info.link_speed),
766 req_fec_string, neg_fec_string,
767 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
768 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
769 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
770 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
771 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
772 ixl_fc_string[1] : ixl_fc_string[0]);
773 }
774
775 /*
776 * Configure admin queue/misc interrupt cause registers in hardware.
777 */
778 void
ixl_configure_intr0_msix(struct ixl_pf * pf)779 ixl_configure_intr0_msix(struct ixl_pf *pf)
780 {
781 struct i40e_hw *hw = &pf->hw;
782 u32 reg;
783
784 /* First set up the adminq - vector 0 */
785 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
786 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
787
788 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
789 I40E_PFINT_ICR0_ENA_GRST_MASK |
790 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
791 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
792 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
793 I40E_PFINT_ICR0_ENA_VFLR_MASK |
794 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
795 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
796 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
797
798 /*
799 * 0x7FF is the end of the queue list.
800 * This means we won't use MSI-X vector 0 for a queue interrupt
801 * in MSI-X mode.
802 */
803 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
804 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
805 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
806
807 wr32(hw, I40E_PFINT_DYN_CTL0,
808 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
809 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
810
811 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
812 }
813
814 void
ixl_add_ifmedia(struct ifmedia * media,u64 phy_types)815 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
816 {
817 /* Display supported media types */
818 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
819 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
820
821 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
822 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
823 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
824 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
825 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
826 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
827
828 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
829 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
830
831 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
832 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
833
834 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
835 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
836 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
837 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
838
839 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
840 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
841 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
842 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
843 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
844 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
845
846 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
847 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
848 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
849 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
850 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
851 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
852 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
853 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
854 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
855 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
856
857 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
858 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
859
860 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
861 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
862 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
863 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
864 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
865 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
866 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
867 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
868 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
869 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
870 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
871
872 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
873 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
874
875 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
876 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
877 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
878 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
879
880 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
881 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
882 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
883 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
884 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
885 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
886 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
887 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
888 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
889 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
890 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
891 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
892 }
893
894 /*********************************************************************
895 *
896 * Get Firmware Switch configuration
897 * - this will need to be more robust when more complex
898 * switch configurations are enabled.
899 *
900 **********************************************************************/
901 int
ixl_switch_config(struct ixl_pf * pf)902 ixl_switch_config(struct ixl_pf *pf)
903 {
904 struct i40e_hw *hw = &pf->hw;
905 struct ixl_vsi *vsi = &pf->vsi;
906 device_t dev = iflib_get_dev(vsi->ctx);
907 struct i40e_aqc_get_switch_config_resp *sw_config;
908 u8 aq_buf[I40E_AQ_LARGE_BUF];
909 int ret;
910 u16 next = 0;
911
912 memset(&aq_buf, 0, sizeof(aq_buf));
913 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
914 ret = i40e_aq_get_switch_config(hw, sw_config,
915 sizeof(aq_buf), &next, NULL);
916 if (ret) {
917 device_printf(dev, "aq_get_switch_config() failed, error %d,"
918 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
919 return (ret);
920 }
921 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
922 device_printf(dev,
923 "Switch config: header reported: %d in structure, %d total\n",
924 LE16_TO_CPU(sw_config->header.num_reported),
925 LE16_TO_CPU(sw_config->header.num_total));
926 for (int i = 0;
927 i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
928 device_printf(dev,
929 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
930 sw_config->element[i].element_type,
931 LE16_TO_CPU(sw_config->element[i].seid),
932 LE16_TO_CPU(sw_config->element[i].uplink_seid),
933 LE16_TO_CPU(sw_config->element[i].downlink_seid));
934 }
935 }
936 /* Simplified due to a single VSI */
937 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
938 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
939 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
940 return (ret);
941 }
942
943 void
ixl_vsi_add_sysctls(struct ixl_vsi * vsi,const char * sysctl_name,bool queues_sysctls)944 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
945 {
946 struct sysctl_oid *tree;
947 struct sysctl_oid_list *child;
948 struct sysctl_oid_list *vsi_list;
949
950 tree = device_get_sysctl_tree(vsi->dev);
951 child = SYSCTL_CHILDREN(tree);
952 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
953 CTLFLAG_RD, NULL, "VSI Number");
954
955 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
956 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
957
958 /* Copy of netstat RX errors counter for validation purposes */
959 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
960 CTLFLAG_RD, &vsi->ierrors,
961 "RX packet errors");
962
963 if (queues_sysctls)
964 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
965 }
966
967 /*
968 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
969 * Writes to the ITR registers immediately.
970 */
971 static int
ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)972 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
973 {
974 struct ixl_pf *pf = (struct ixl_pf *)arg1;
975 device_t dev = pf->dev;
976 int error = 0;
977 int requested_tx_itr;
978
979 requested_tx_itr = pf->tx_itr;
980 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
981 if ((error) || (req->newptr == NULL))
982 return (error);
983 if (pf->dynamic_tx_itr) {
984 device_printf(dev,
985 "Cannot set TX itr value while dynamic TX itr is enabled\n");
986 return (EINVAL);
987 }
988 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
989 device_printf(dev,
990 "Invalid TX itr value; value must be between 0 and %d\n",
991 IXL_MAX_ITR);
992 return (EINVAL);
993 }
994
995 pf->tx_itr = requested_tx_itr;
996 ixl_configure_tx_itr(pf);
997
998 return (error);
999 }
1000
1001 /*
1002 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
1003 * Writes to the ITR registers immediately.
1004 */
1005 static int
ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)1006 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
1007 {
1008 struct ixl_pf *pf = (struct ixl_pf *)arg1;
1009 device_t dev = pf->dev;
1010 int error = 0;
1011 int requested_rx_itr;
1012
1013 requested_rx_itr = pf->rx_itr;
1014 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
1015 if ((error) || (req->newptr == NULL))
1016 return (error);
1017 if (pf->dynamic_rx_itr) {
1018 device_printf(dev,
1019 "Cannot set RX itr value while dynamic RX itr is enabled\n");
1020 return (EINVAL);
1021 }
1022 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
1023 device_printf(dev,
1024 "Invalid RX itr value; value must be between 0 and %d\n",
1025 IXL_MAX_ITR);
1026 return (EINVAL);
1027 }
1028
1029 pf->rx_itr = requested_rx_itr;
1030 ixl_configure_rx_itr(pf);
1031
1032 return (error);
1033 }
1034
1035 void
ixl_add_sysctls_mac_stats(struct sysctl_ctx_list * ctx,struct sysctl_oid_list * child,struct i40e_hw_port_stats * stats)1036 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
1037 struct sysctl_oid_list *child,
1038 struct i40e_hw_port_stats *stats)
1039 {
1040 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
1041 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
1042 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
1043
1044 struct i40e_eth_stats *eth_stats = &stats->eth;
1045 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
1046
1047 struct ixl_sysctl_info ctls[] =
1048 {
1049 {&stats->crc_errors, "crc_errors", "CRC Errors"},
1050 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
1051 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
1052 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
1053 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
1054 /* Packet Reception Stats */
1055 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
1056 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
1057 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
1058 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
1059 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
1060 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
1061 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
1062 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
1063 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
1064 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
1065 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
1066 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
1067 /* Packet Transmission Stats */
1068 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
1069 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
1070 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
1071 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
1072 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
1073 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
1074 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
1075 /* Flow control */
1076 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
1077 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
1078 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
1079 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
1080 /* End */
1081 {0,0,0}
1082 };
1083
1084 struct ixl_sysctl_info *entry = ctls;
1085 while (entry->stat != 0)
1086 {
1087 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
1088 CTLFLAG_RD, entry->stat,
1089 entry->description);
1090 entry++;
1091 }
1092 }
1093
1094 void
ixl_set_rss_key(struct ixl_pf * pf)1095 ixl_set_rss_key(struct ixl_pf *pf)
1096 {
1097 struct i40e_hw *hw = &pf->hw;
1098 struct ixl_vsi *vsi = &pf->vsi;
1099 device_t dev = pf->dev;
1100 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
1101 enum i40e_status_code status;
1102
1103 #ifdef RSS
1104 /* Fetch the configured RSS key */
1105 rss_getkey((uint8_t *) &rss_seed);
1106 #else
1107 ixl_get_default_rss_key(rss_seed);
1108 #endif
1109 /* Fill out hash function seed */
1110 if (hw->mac.type == I40E_MAC_X722) {
1111 struct i40e_aqc_get_set_rss_key_data key_data;
1112 bcopy(rss_seed, &key_data, 52);
1113 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
1114 if (status)
1115 device_printf(dev,
1116 "i40e_aq_set_rss_key status %s, error %s\n",
1117 i40e_stat_str(hw, status),
1118 i40e_aq_str(hw, hw->aq.asq_last_status));
1119 } else {
1120 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
1121 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
1122 }
1123 }
1124
1125 /*
1126 * Configure enabled PCTYPES for RSS.
1127 */
1128 void
ixl_set_rss_pctypes(struct ixl_pf * pf)1129 ixl_set_rss_pctypes(struct ixl_pf *pf)
1130 {
1131 struct i40e_hw *hw = &pf->hw;
1132 u64 set_hena = 0, hena;
1133
1134 #ifdef RSS
1135 u32 rss_hash_config;
1136
1137 rss_hash_config = rss_gethashconfig();
1138 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1139 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
1140 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1141 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
1142 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1143 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1144 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1145 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1146 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1147 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1148 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1149 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1150 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1151 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1152 #else
1153 if (hw->mac.type == I40E_MAC_X722)
1154 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1155 else
1156 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1157 #endif
1158 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1159 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1160 hena |= set_hena;
1161 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1162 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1163
1164 }
1165
1166 /*
1167 ** Setup the PF's RSS parameters.
1168 */
1169 void
ixl_config_rss(struct ixl_pf * pf)1170 ixl_config_rss(struct ixl_pf *pf)
1171 {
1172 ixl_set_rss_key(pf);
1173 ixl_set_rss_pctypes(pf);
1174 ixl_set_rss_hlut(pf);
1175 }
1176
1177 /*
1178 * In some firmware versions there is default MAC/VLAN filter
1179 * configured which interferes with filters managed by driver.
1180 * Make sure it's removed.
1181 */
1182 void
ixl_del_default_hw_filters(struct ixl_vsi * vsi)1183 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1184 {
1185 struct i40e_aqc_remove_macvlan_element_data e;
1186
1187 bzero(&e, sizeof(e));
1188 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1189 e.vlan_tag = 0;
1190 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1191 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1192
1193 bzero(&e, sizeof(e));
1194 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1195 e.vlan_tag = 0;
1196 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1197 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1198 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1199 }
1200
1201 /*
1202 ** Initialize filter list and add filters that the hardware
1203 ** needs to know about.
1204 **
1205 ** Requires VSI's seid to be set before calling.
1206 */
1207 void
ixl_init_filters(struct ixl_vsi * vsi)1208 ixl_init_filters(struct ixl_vsi *vsi)
1209 {
1210 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1211
1212 ixl_dbg_filter(pf, "%s: start\n", __func__);
1213
1214 /* Initialize mac filter list for VSI */
1215 LIST_INIT(&vsi->ftl);
1216 vsi->num_hw_filters = 0;
1217
1218 /* Receive broadcast Ethernet frames */
1219 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1220
1221 if (IXL_VSI_IS_VF(vsi))
1222 return;
1223
1224 ixl_del_default_hw_filters(vsi);
1225
1226 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1227
1228 /*
1229 * Prevent Tx flow control frames from being sent out by
1230 * non-firmware transmitters.
1231 * This affects every VSI in the PF.
1232 */
1233 #ifndef IXL_DEBUG_FC
1234 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1235 #else
1236 if (pf->enable_tx_fc_filter)
1237 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1238 #endif
1239 }
1240
1241 void
ixl_reconfigure_filters(struct ixl_vsi * vsi)1242 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1243 {
1244 struct i40e_hw *hw = vsi->hw;
1245 struct ixl_ftl_head tmp;
1246 int cnt;
1247
1248 /*
1249 * The ixl_add_hw_filters function adds filters configured
1250 * in HW to a list in VSI. Move all filters to a temporary
1251 * list to avoid corrupting it by concatenating to itself.
1252 */
1253 LIST_INIT(&tmp);
1254 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1255 cnt = vsi->num_hw_filters;
1256 vsi->num_hw_filters = 0;
1257
1258 ixl_add_hw_filters(vsi, &tmp, cnt);
1259
1260 /*
1261 * When the vsi is allocated for the VFs, both vsi->hw and vsi->ifp
1262 * will be NULL. Furthermore, the ftl of such vsi already contains
1263 * IXL_VLAN_ANY filter so we can skip that as well.
1264 */
1265 if (hw == NULL)
1266 return;
1267
1268 /* Filter could be removed if MAC address was changed */
1269 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1270
1271 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1272 return;
1273 /*
1274 * VLAN HW filtering is enabled, make sure that filters
1275 * for all registered VLAN tags are configured
1276 */
1277 ixl_add_vlan_filters(vsi, hw->mac.addr);
1278 }
1279
1280 /*
1281 * This routine adds a MAC/VLAN filter to the software filter
1282 * list, then adds that new filter to the HW if it doesn't already
1283 * exist in the SW filter list.
1284 */
1285 void
ixl_add_filter(struct ixl_vsi * vsi,const u8 * macaddr,s16 vlan)1286 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1287 {
1288 struct ixl_mac_filter *f, *tmp;
1289 struct ixl_pf *pf;
1290 device_t dev;
1291 struct ixl_ftl_head to_add;
1292 int to_add_cnt;
1293
1294 pf = vsi->back;
1295 dev = pf->dev;
1296 to_add_cnt = 1;
1297
1298 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1299 MAC_FORMAT_ARGS(macaddr), vlan);
1300
1301 /* Does one already exist */
1302 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1303 if (f != NULL)
1304 return;
1305
1306 LIST_INIT(&to_add);
1307 f = ixl_new_filter(&to_add, macaddr, vlan);
1308 if (f == NULL) {
1309 device_printf(dev, "WARNING: no filter available!!\n");
1310 return;
1311 }
1312 if (f->vlan != IXL_VLAN_ANY)
1313 f->flags |= IXL_FILTER_VLAN;
1314 else
1315 vsi->num_macs++;
1316
1317 /*
1318 ** Is this the first vlan being registered, if so we
1319 ** need to remove the ANY filter that indicates we are
1320 ** not in a vlan, and replace that with a 0 filter.
1321 */
1322 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1323 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1324 if (tmp != NULL) {
1325 struct ixl_ftl_head to_del;
1326
1327 /* Prepare new filter first to avoid removing
1328 * VLAN_ANY filter if allocation fails */
1329 f = ixl_new_filter(&to_add, macaddr, 0);
1330 if (f == NULL) {
1331 device_printf(dev, "WARNING: no filter available!!\n");
1332 free(LIST_FIRST(&to_add), M_IXL);
1333 return;
1334 }
1335 to_add_cnt++;
1336
1337 LIST_REMOVE(tmp, ftle);
1338 LIST_INIT(&to_del);
1339 LIST_INSERT_HEAD(&to_del, tmp, ftle);
1340 ixl_del_hw_filters(vsi, &to_del, 1);
1341 }
1342 }
1343
1344 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1345 }
1346
1347 /**
1348 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1349 * @vsi: pointer to VSI
1350 * @macaddr: MAC address
1351 *
1352 * Adds MAC/VLAN filter for each VLAN configured on the interface
1353 * if there is enough HW filters. Otherwise adds a single filter
1354 * for all tagged and untagged frames to allow all configured VLANs
1355 * to recieve traffic.
1356 */
1357 void
ixl_add_vlan_filters(struct ixl_vsi * vsi,const u8 * macaddr)1358 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1359 {
1360 struct ixl_ftl_head to_add;
1361 struct ixl_mac_filter *f;
1362 int to_add_cnt = 0;
1363 int i, vlan = 0;
1364
1365 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1366 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1367 return;
1368 }
1369 LIST_INIT(&to_add);
1370
1371 /* Add filter for untagged frames if it does not exist yet */
1372 f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1373 if (f == NULL) {
1374 f = ixl_new_filter(&to_add, macaddr, 0);
1375 if (f == NULL) {
1376 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1377 return;
1378 }
1379 to_add_cnt++;
1380 }
1381
1382 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1383 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1384 if (vlan == -1)
1385 break;
1386
1387 /* Does one already exist */
1388 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1389 if (f != NULL)
1390 continue;
1391
1392 f = ixl_new_filter(&to_add, macaddr, vlan);
1393 if (f == NULL) {
1394 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1395 ixl_free_filters(&to_add);
1396 return;
1397 }
1398 to_add_cnt++;
1399 }
1400
1401 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1402 }
1403
1404 void
ixl_del_filter(struct ixl_vsi * vsi,const u8 * macaddr,s16 vlan)1405 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1406 {
1407 struct ixl_mac_filter *f, *tmp;
1408 struct ixl_ftl_head ftl_head;
1409 int to_del_cnt = 1;
1410
1411 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1412 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1413 MAC_FORMAT_ARGS(macaddr), vlan);
1414
1415 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1416 if (f == NULL)
1417 return;
1418
1419 LIST_REMOVE(f, ftle);
1420 LIST_INIT(&ftl_head);
1421 LIST_INSERT_HEAD(&ftl_head, f, ftle);
1422 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1423 vsi->num_macs--;
1424
1425 /* If this is not the last vlan just remove the filter */
1426 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1427 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1428 return;
1429 }
1430
1431 /* It's the last vlan, we need to switch back to a non-vlan filter */
1432 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1433 if (tmp != NULL) {
1434 LIST_REMOVE(tmp, ftle);
1435 LIST_INSERT_AFTER(f, tmp, ftle);
1436 to_del_cnt++;
1437 }
1438 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1439
1440 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1441 }
1442
1443 /**
1444 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1445 * @vsi: VSI which filters need to be removed
1446 * @macaddr: MAC address
1447 *
1448 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1449 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1450 * so skip them to speed up processing. Those filters should be removed
1451 * using ixl_del_filter function.
1452 */
1453 void
ixl_del_all_vlan_filters(struct ixl_vsi * vsi,const u8 * macaddr)1454 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1455 {
1456 struct ixl_mac_filter *f, *tmp;
1457 struct ixl_ftl_head to_del;
1458 int to_del_cnt = 0;
1459
1460 LIST_INIT(&to_del);
1461
1462 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1463 if ((f->flags & IXL_FILTER_MC) != 0 ||
1464 !ixl_ether_is_equal(f->macaddr, macaddr))
1465 continue;
1466
1467 LIST_REMOVE(f, ftle);
1468 LIST_INSERT_HEAD(&to_del, f, ftle);
1469 to_del_cnt++;
1470 }
1471
1472 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1473 "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1474 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1475 if (to_del_cnt > 0)
1476 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1477 }
1478
1479 /*
1480 ** Find the filter with both matching mac addr and vlan id
1481 */
1482 struct ixl_mac_filter *
ixl_find_filter(struct ixl_ftl_head * headp,const u8 * macaddr,s16 vlan)1483 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1484 {
1485 struct ixl_mac_filter *f;
1486
1487 LIST_FOREACH(f, headp, ftle) {
1488 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1489 (f->vlan == vlan)) {
1490 return (f);
1491 }
1492 }
1493
1494 return (NULL);
1495 }
1496
1497 /*
1498 ** This routine takes additions to the vsi filter
1499 ** table and creates an Admin Queue call to create
1500 ** the filters in the hardware.
1501 */
1502 void
ixl_add_hw_filters(struct ixl_vsi * vsi,struct ixl_ftl_head * to_add,int cnt)1503 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1504 {
1505 struct i40e_aqc_add_macvlan_element_data *a, *b;
1506 struct ixl_mac_filter *f, *fn;
1507 struct ixl_pf *pf;
1508 struct i40e_hw *hw;
1509 device_t dev;
1510 enum i40e_status_code status;
1511 int j = 0;
1512
1513 pf = vsi->back;
1514 dev = vsi->dev;
1515 hw = &pf->hw;
1516
1517 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1518
1519 if (cnt < 1) {
1520 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1521 return;
1522 }
1523
1524 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1525 M_IXL, M_NOWAIT | M_ZERO);
1526 if (a == NULL) {
1527 device_printf(dev, "add_hw_filters failed to get memory\n");
1528 return;
1529 }
1530
1531 LIST_FOREACH(f, to_add, ftle) {
1532 b = &a[j]; // a pox on fvl long names :)
1533 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1534 if (f->vlan == IXL_VLAN_ANY) {
1535 b->vlan_tag = 0;
1536 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1537 } else {
1538 b->vlan_tag = f->vlan;
1539 b->flags = 0;
1540 }
1541 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1542 /* Some FW versions do not set match method
1543 * when adding filters fails. Initialize it with
1544 * expected error value to allow detection which
1545 * filters were not added */
1546 b->match_method = I40E_AQC_MM_ERR_NO_RES;
1547 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1548 MAC_FORMAT_ARGS(f->macaddr));
1549
1550 if (++j == cnt)
1551 break;
1552 }
1553 if (j != cnt) {
1554 /* Something went wrong */
1555 device_printf(dev,
1556 "%s ERROR: list of filters to short expected: %d, found: %d\n",
1557 __func__, cnt, j);
1558 ixl_free_filters(to_add);
1559 goto out_free;
1560 }
1561
1562 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1563 if (status == I40E_SUCCESS) {
1564 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1565 vsi->num_hw_filters += j;
1566 goto out_free;
1567 }
1568
1569 device_printf(dev,
1570 "i40e_aq_add_macvlan status %s, error %s\n",
1571 i40e_stat_str(hw, status),
1572 i40e_aq_str(hw, hw->aq.asq_last_status));
1573 j = 0;
1574
1575 /* Verify which filters were actually configured in HW
1576 * and add them to the list */
1577 LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1578 LIST_REMOVE(f, ftle);
1579 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1580 ixl_dbg_filter(pf,
1581 "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1582 __func__,
1583 MAC_FORMAT_ARGS(f->macaddr),
1584 f->vlan);
1585 free(f, M_IXL);
1586 } else {
1587 LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1588 vsi->num_hw_filters++;
1589 }
1590 j++;
1591 }
1592
1593 out_free:
1594 free(a, M_IXL);
1595 }
1596
1597 /*
1598 ** This routine takes removals in the vsi filter
1599 ** table and creates an Admin Queue call to delete
1600 ** the filters in the hardware.
1601 */
1602 void
ixl_del_hw_filters(struct ixl_vsi * vsi,struct ixl_ftl_head * to_del,int cnt)1603 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1604 {
1605 struct i40e_aqc_remove_macvlan_element_data *d, *e;
1606 struct ixl_pf *pf;
1607 struct i40e_hw *hw;
1608 device_t dev;
1609 struct ixl_mac_filter *f, *f_temp;
1610 enum i40e_status_code status;
1611 int j = 0;
1612
1613 pf = vsi->back;
1614 hw = &pf->hw;
1615 dev = vsi->dev;
1616
1617 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1618
1619 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1620 M_IXL, M_NOWAIT | M_ZERO);
1621 if (d == NULL) {
1622 device_printf(dev, "%s: failed to get memory\n", __func__);
1623 return;
1624 }
1625
1626 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1627 e = &d[j]; // a pox on fvl long names :)
1628 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1629 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1630 if (f->vlan == IXL_VLAN_ANY) {
1631 e->vlan_tag = 0;
1632 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1633 } else {
1634 e->vlan_tag = f->vlan;
1635 }
1636
1637 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1638 MAC_FORMAT_ARGS(f->macaddr));
1639
1640 /* delete entry from the list */
1641 LIST_REMOVE(f, ftle);
1642 free(f, M_IXL);
1643 if (++j == cnt)
1644 break;
1645 }
1646 if (j != cnt || !LIST_EMPTY(to_del)) {
1647 /* Something went wrong */
1648 device_printf(dev,
1649 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1650 __func__, cnt, j);
1651 ixl_free_filters(to_del);
1652 goto out_free;
1653 }
1654 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1655 if (status) {
1656 device_printf(dev,
1657 "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1658 __func__, i40e_stat_str(hw, status),
1659 i40e_aq_str(hw, hw->aq.asq_last_status));
1660 for (int i = 0; i < j; i++) {
1661 if (d[i].error_code == 0)
1662 continue;
1663 device_printf(dev,
1664 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1665 __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1666 d[i].vlan_tag);
1667 }
1668 }
1669
1670 vsi->num_hw_filters -= j;
1671
1672 out_free:
1673 free(d, M_IXL);
1674
1675 ixl_dbg_filter(pf, "%s: end\n", __func__);
1676 }
1677
1678 int
ixl_enable_tx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1679 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1680 {
1681 struct i40e_hw *hw = &pf->hw;
1682 int error = 0;
1683 u32 reg;
1684 u16 pf_qidx;
1685
1686 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1687
1688 ixl_dbg(pf, IXL_DBG_EN_DIS,
1689 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1690 pf_qidx, vsi_qidx);
1691
1692 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1693
1694 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1695 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1696 I40E_QTX_ENA_QENA_STAT_MASK;
1697 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1698 /* Verify the enable took */
1699 for (int j = 0; j < 10; j++) {
1700 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1701 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1702 break;
1703 i40e_usec_delay(10);
1704 }
1705 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1706 device_printf(pf->dev, "TX queue %d still disabled!\n",
1707 pf_qidx);
1708 error = ETIMEDOUT;
1709 }
1710
1711 return (error);
1712 }
1713
1714 int
ixl_enable_rx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1715 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1716 {
1717 struct i40e_hw *hw = &pf->hw;
1718 int error = 0;
1719 u32 reg;
1720 u16 pf_qidx;
1721
1722 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1723
1724 ixl_dbg(pf, IXL_DBG_EN_DIS,
1725 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1726 pf_qidx, vsi_qidx);
1727
1728 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1729 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1730 I40E_QRX_ENA_QENA_STAT_MASK;
1731 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1732 /* Verify the enable took */
1733 for (int j = 0; j < 10; j++) {
1734 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1735 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1736 break;
1737 i40e_usec_delay(10);
1738 }
1739 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1740 device_printf(pf->dev, "RX queue %d still disabled!\n",
1741 pf_qidx);
1742 error = ETIMEDOUT;
1743 }
1744
1745 return (error);
1746 }
1747
1748 int
ixl_enable_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1749 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1750 {
1751 int error = 0;
1752
1753 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1754 /* Called function already prints error message */
1755 if (error)
1756 return (error);
1757 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1758 return (error);
1759 }
1760
1761 /*
1762 * Returns error on first ring that is detected hung.
1763 */
1764 int
ixl_disable_tx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1765 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1766 {
1767 struct i40e_hw *hw = &pf->hw;
1768 int error = 0;
1769 u32 reg;
1770 u16 pf_qidx;
1771
1772 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1773
1774 ixl_dbg(pf, IXL_DBG_EN_DIS,
1775 "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1776 pf_qidx, vsi_qidx);
1777
1778 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1779 i40e_usec_delay(500);
1780
1781 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1782 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1783 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1784 /* Verify the disable took */
1785 for (int j = 0; j < 10; j++) {
1786 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1787 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1788 break;
1789 i40e_msec_delay(10);
1790 }
1791 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1792 device_printf(pf->dev, "TX queue %d still enabled!\n",
1793 pf_qidx);
1794 error = ETIMEDOUT;
1795 }
1796
1797 return (error);
1798 }
1799
1800 /*
1801 * Returns error on first ring that is detected hung.
1802 */
1803 int
ixl_disable_rx_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1804 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1805 {
1806 struct i40e_hw *hw = &pf->hw;
1807 int error = 0;
1808 u32 reg;
1809 u16 pf_qidx;
1810
1811 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1812
1813 ixl_dbg(pf, IXL_DBG_EN_DIS,
1814 "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1815 pf_qidx, vsi_qidx);
1816
1817 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1818 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1819 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1820 /* Verify the disable took */
1821 for (int j = 0; j < 10; j++) {
1822 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1823 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1824 break;
1825 i40e_msec_delay(10);
1826 }
1827 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1828 device_printf(pf->dev, "RX queue %d still enabled!\n",
1829 pf_qidx);
1830 error = ETIMEDOUT;
1831 }
1832
1833 return (error);
1834 }
1835
1836 int
ixl_disable_ring(struct ixl_pf * pf,struct ixl_pf_qtag * qtag,u16 vsi_qidx)1837 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1838 {
1839 int error = 0;
1840
1841 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1842 /* Called function already prints error message */
1843 if (error)
1844 return (error);
1845 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1846 return (error);
1847 }
1848
1849 static void
ixl_handle_tx_mdd_event(struct ixl_pf * pf)1850 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1851 {
1852 struct i40e_hw *hw = &pf->hw;
1853 device_t dev = pf->dev;
1854 struct ixl_vf *vf;
1855 bool mdd_detected = false;
1856 bool pf_mdd_detected = false;
1857 bool vf_mdd_detected = false;
1858 u16 vf_num, queue;
1859 u8 pf_num, event;
1860 u8 pf_mdet_num, vp_mdet_num;
1861 u32 reg;
1862
1863 /* find what triggered the MDD event */
1864 reg = rd32(hw, I40E_GL_MDET_TX);
1865 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1866 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1867 I40E_GL_MDET_TX_PF_NUM_SHIFT;
1868 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1869 I40E_GL_MDET_TX_VF_NUM_SHIFT;
1870 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1871 I40E_GL_MDET_TX_EVENT_SHIFT;
1872 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1873 I40E_GL_MDET_TX_QUEUE_SHIFT;
1874 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1875 mdd_detected = true;
1876 }
1877
1878 if (!mdd_detected)
1879 return;
1880
1881 reg = rd32(hw, I40E_PF_MDET_TX);
1882 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1883 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1884 pf_mdet_num = hw->pf_id;
1885 pf_mdd_detected = true;
1886 }
1887
1888 /* Check if MDD was caused by a VF */
1889 for (int i = 0; i < pf->num_vfs; i++) {
1890 vf = &(pf->vfs[i]);
1891 reg = rd32(hw, I40E_VP_MDET_TX(i));
1892 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1893 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1894 vp_mdet_num = i;
1895 vf->num_mdd_events++;
1896 vf_mdd_detected = true;
1897 }
1898 }
1899
1900 /* Print out an error message */
1901 if (vf_mdd_detected && pf_mdd_detected)
1902 device_printf(dev,
1903 "Malicious Driver Detection event %d"
1904 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1905 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1906 else if (vf_mdd_detected && !pf_mdd_detected)
1907 device_printf(dev,
1908 "Malicious Driver Detection event %d"
1909 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1910 event, queue, pf_num, vf_num, vp_mdet_num);
1911 else if (!vf_mdd_detected && pf_mdd_detected)
1912 device_printf(dev,
1913 "Malicious Driver Detection event %d"
1914 " on TX queue %d, pf number %d (PF-%d)\n",
1915 event, queue, pf_num, pf_mdet_num);
1916 /* Theoretically shouldn't happen */
1917 else
1918 device_printf(dev,
1919 "TX Malicious Driver Detection event (unknown)\n");
1920 }
1921
1922 static void
ixl_handle_rx_mdd_event(struct ixl_pf * pf)1923 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1924 {
1925 struct i40e_hw *hw = &pf->hw;
1926 device_t dev = pf->dev;
1927 struct ixl_vf *vf;
1928 bool mdd_detected = false;
1929 bool pf_mdd_detected = false;
1930 bool vf_mdd_detected = false;
1931 u16 queue;
1932 u8 pf_num, event;
1933 u8 pf_mdet_num, vp_mdet_num;
1934 u32 reg;
1935
1936 /*
1937 * GL_MDET_RX doesn't contain VF number information, unlike
1938 * GL_MDET_TX.
1939 */
1940 reg = rd32(hw, I40E_GL_MDET_RX);
1941 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1942 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1943 I40E_GL_MDET_RX_FUNCTION_SHIFT;
1944 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1945 I40E_GL_MDET_RX_EVENT_SHIFT;
1946 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1947 I40E_GL_MDET_RX_QUEUE_SHIFT;
1948 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1949 mdd_detected = true;
1950 }
1951
1952 if (!mdd_detected)
1953 return;
1954
1955 reg = rd32(hw, I40E_PF_MDET_RX);
1956 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1957 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1958 pf_mdet_num = hw->pf_id;
1959 pf_mdd_detected = true;
1960 }
1961
1962 /* Check if MDD was caused by a VF */
1963 for (int i = 0; i < pf->num_vfs; i++) {
1964 vf = &(pf->vfs[i]);
1965 reg = rd32(hw, I40E_VP_MDET_RX(i));
1966 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1967 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1968 vp_mdet_num = i;
1969 vf->num_mdd_events++;
1970 vf_mdd_detected = true;
1971 }
1972 }
1973
1974 /* Print out an error message */
1975 if (vf_mdd_detected && pf_mdd_detected)
1976 device_printf(dev,
1977 "Malicious Driver Detection event %d"
1978 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1979 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1980 else if (vf_mdd_detected && !pf_mdd_detected)
1981 device_printf(dev,
1982 "Malicious Driver Detection event %d"
1983 " on RX queue %d, pf number %d, (VF-%d)\n",
1984 event, queue, pf_num, vp_mdet_num);
1985 else if (!vf_mdd_detected && pf_mdd_detected)
1986 device_printf(dev,
1987 "Malicious Driver Detection event %d"
1988 " on RX queue %d, pf number %d (PF-%d)\n",
1989 event, queue, pf_num, pf_mdet_num);
1990 /* Theoretically shouldn't happen */
1991 else
1992 device_printf(dev,
1993 "RX Malicious Driver Detection event (unknown)\n");
1994 }
1995
1996 /**
1997 * ixl_handle_mdd_event
1998 *
1999 * Called from interrupt handler to identify possibly malicious vfs
2000 * (But also detects events from the PF, as well)
2001 **/
2002 void
ixl_handle_mdd_event(struct ixl_pf * pf)2003 ixl_handle_mdd_event(struct ixl_pf *pf)
2004 {
2005 struct i40e_hw *hw = &pf->hw;
2006 u32 reg;
2007
2008 /*
2009 * Handle both TX/RX because it's possible they could
2010 * both trigger in the same interrupt.
2011 */
2012 ixl_handle_tx_mdd_event(pf);
2013 ixl_handle_rx_mdd_event(pf);
2014
2015 ixl_clear_state(&pf->state, IXL_STATE_MDD_PENDING);
2016
2017 /* re-enable mdd interrupt cause */
2018 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2019 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
2020 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2021 ixl_flush(hw);
2022 }
2023
2024 void
ixl_enable_intr0(struct i40e_hw * hw)2025 ixl_enable_intr0(struct i40e_hw *hw)
2026 {
2027 u32 reg;
2028
2029 /* Use IXL_ITR_NONE so ITR isn't updated here */
2030 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
2031 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2032 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2033 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2034 }
2035
2036 void
ixl_disable_intr0(struct i40e_hw * hw)2037 ixl_disable_intr0(struct i40e_hw *hw)
2038 {
2039 u32 reg;
2040
2041 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
2042 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
2043 ixl_flush(hw);
2044 }
2045
2046 void
ixl_enable_queue(struct i40e_hw * hw,int id)2047 ixl_enable_queue(struct i40e_hw *hw, int id)
2048 {
2049 u32 reg;
2050
2051 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2052 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2053 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2054 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2055 }
2056
2057 void
ixl_disable_queue(struct i40e_hw * hw,int id)2058 ixl_disable_queue(struct i40e_hw *hw, int id)
2059 {
2060 u32 reg;
2061
2062 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2063 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
2064 }
2065
2066 void
ixl_handle_empr_reset(struct ixl_pf * pf)2067 ixl_handle_empr_reset(struct ixl_pf *pf)
2068 {
2069 struct ixl_vsi *vsi = &pf->vsi;
2070 bool is_up = !!(if_getdrvflags(vsi->ifp) & IFF_DRV_RUNNING);
2071
2072 ixl_prepare_for_reset(pf, is_up);
2073 /*
2074 * i40e_pf_reset checks the type of reset and acts
2075 * accordingly. If EMP or Core reset was performed
2076 * doing PF reset is not necessary and it sometimes
2077 * fails.
2078 */
2079 ixl_pf_reset(pf);
2080
2081 if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
2082 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
2083 ixl_set_state(&pf->state, IXL_STATE_RECOVERY_MODE);
2084 device_printf(pf->dev,
2085 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
2086 pf->link_up = FALSE;
2087 ixl_update_link_status(pf);
2088 }
2089
2090 ixl_rebuild_hw_structs_after_reset(pf, is_up);
2091
2092 ixl_clear_state(&pf->state, IXL_STATE_RESETTING);
2093 }
2094
2095 void
ixl_update_stats_counters(struct ixl_pf * pf)2096 ixl_update_stats_counters(struct ixl_pf *pf)
2097 {
2098 struct i40e_hw *hw = &pf->hw;
2099 struct ixl_vsi *vsi = &pf->vsi;
2100 struct ixl_vf *vf;
2101 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
2102
2103 struct i40e_hw_port_stats *nsd = &pf->stats;
2104 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
2105
2106 /* Update hw stats */
2107 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
2108 pf->stat_offsets_loaded,
2109 &osd->crc_errors, &nsd->crc_errors);
2110 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
2111 pf->stat_offsets_loaded,
2112 &osd->illegal_bytes, &nsd->illegal_bytes);
2113 ixl_stat_update48(hw, I40E_GLPRT_GORCL(hw->port),
2114 pf->stat_offsets_loaded,
2115 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
2116 ixl_stat_update48(hw, I40E_GLPRT_GOTCL(hw->port),
2117 pf->stat_offsets_loaded,
2118 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
2119 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
2120 pf->stat_offsets_loaded,
2121 &osd->eth.rx_discards,
2122 &nsd->eth.rx_discards);
2123 ixl_stat_update48(hw, I40E_GLPRT_UPRCL(hw->port),
2124 pf->stat_offsets_loaded,
2125 &osd->eth.rx_unicast,
2126 &nsd->eth.rx_unicast);
2127 ixl_stat_update48(hw, I40E_GLPRT_UPTCL(hw->port),
2128 pf->stat_offsets_loaded,
2129 &osd->eth.tx_unicast,
2130 &nsd->eth.tx_unicast);
2131 ixl_stat_update48(hw, I40E_GLPRT_MPRCL(hw->port),
2132 pf->stat_offsets_loaded,
2133 &osd->eth.rx_multicast,
2134 &nsd->eth.rx_multicast);
2135 ixl_stat_update48(hw, I40E_GLPRT_MPTCL(hw->port),
2136 pf->stat_offsets_loaded,
2137 &osd->eth.tx_multicast,
2138 &nsd->eth.tx_multicast);
2139 ixl_stat_update48(hw, I40E_GLPRT_BPRCL(hw->port),
2140 pf->stat_offsets_loaded,
2141 &osd->eth.rx_broadcast,
2142 &nsd->eth.rx_broadcast);
2143 ixl_stat_update48(hw, I40E_GLPRT_BPTCL(hw->port),
2144 pf->stat_offsets_loaded,
2145 &osd->eth.tx_broadcast,
2146 &nsd->eth.tx_broadcast);
2147
2148 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2149 pf->stat_offsets_loaded,
2150 &osd->tx_dropped_link_down,
2151 &nsd->tx_dropped_link_down);
2152 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2153 pf->stat_offsets_loaded,
2154 &osd->mac_local_faults,
2155 &nsd->mac_local_faults);
2156 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2157 pf->stat_offsets_loaded,
2158 &osd->mac_remote_faults,
2159 &nsd->mac_remote_faults);
2160 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2161 pf->stat_offsets_loaded,
2162 &osd->rx_length_errors,
2163 &nsd->rx_length_errors);
2164
2165 /* Flow control (LFC) stats */
2166 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2167 pf->stat_offsets_loaded,
2168 &osd->link_xon_rx, &nsd->link_xon_rx);
2169 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2170 pf->stat_offsets_loaded,
2171 &osd->link_xon_tx, &nsd->link_xon_tx);
2172 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2173 pf->stat_offsets_loaded,
2174 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2175 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2176 pf->stat_offsets_loaded,
2177 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2178
2179 /*
2180 * For watchdog management we need to know if we have been paused
2181 * during the last interval, so capture that here.
2182 */
2183 if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2184 vsi->shared->isc_pause_frames = 1;
2185
2186 /* Packet size stats rx */
2187 ixl_stat_update48(hw, I40E_GLPRT_PRC64L(hw->port),
2188 pf->stat_offsets_loaded,
2189 &osd->rx_size_64, &nsd->rx_size_64);
2190 ixl_stat_update48(hw, I40E_GLPRT_PRC127L(hw->port),
2191 pf->stat_offsets_loaded,
2192 &osd->rx_size_127, &nsd->rx_size_127);
2193 ixl_stat_update48(hw, I40E_GLPRT_PRC255L(hw->port),
2194 pf->stat_offsets_loaded,
2195 &osd->rx_size_255, &nsd->rx_size_255);
2196 ixl_stat_update48(hw, I40E_GLPRT_PRC511L(hw->port),
2197 pf->stat_offsets_loaded,
2198 &osd->rx_size_511, &nsd->rx_size_511);
2199 ixl_stat_update48(hw, I40E_GLPRT_PRC1023L(hw->port),
2200 pf->stat_offsets_loaded,
2201 &osd->rx_size_1023, &nsd->rx_size_1023);
2202 ixl_stat_update48(hw, I40E_GLPRT_PRC1522L(hw->port),
2203 pf->stat_offsets_loaded,
2204 &osd->rx_size_1522, &nsd->rx_size_1522);
2205 ixl_stat_update48(hw, I40E_GLPRT_PRC9522L(hw->port),
2206 pf->stat_offsets_loaded,
2207 &osd->rx_size_big, &nsd->rx_size_big);
2208
2209 /* Packet size stats tx */
2210 ixl_stat_update48(hw, I40E_GLPRT_PTC64L(hw->port),
2211 pf->stat_offsets_loaded,
2212 &osd->tx_size_64, &nsd->tx_size_64);
2213 ixl_stat_update48(hw, I40E_GLPRT_PTC127L(hw->port),
2214 pf->stat_offsets_loaded,
2215 &osd->tx_size_127, &nsd->tx_size_127);
2216 ixl_stat_update48(hw, I40E_GLPRT_PTC255L(hw->port),
2217 pf->stat_offsets_loaded,
2218 &osd->tx_size_255, &nsd->tx_size_255);
2219 ixl_stat_update48(hw, I40E_GLPRT_PTC511L(hw->port),
2220 pf->stat_offsets_loaded,
2221 &osd->tx_size_511, &nsd->tx_size_511);
2222 ixl_stat_update48(hw, I40E_GLPRT_PTC1023L(hw->port),
2223 pf->stat_offsets_loaded,
2224 &osd->tx_size_1023, &nsd->tx_size_1023);
2225 ixl_stat_update48(hw, I40E_GLPRT_PTC1522L(hw->port),
2226 pf->stat_offsets_loaded,
2227 &osd->tx_size_1522, &nsd->tx_size_1522);
2228 ixl_stat_update48(hw, I40E_GLPRT_PTC9522L(hw->port),
2229 pf->stat_offsets_loaded,
2230 &osd->tx_size_big, &nsd->tx_size_big);
2231
2232 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2233 pf->stat_offsets_loaded,
2234 &osd->rx_undersize, &nsd->rx_undersize);
2235 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2236 pf->stat_offsets_loaded,
2237 &osd->rx_fragments, &nsd->rx_fragments);
2238
2239 u64 rx_roc;
2240 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2241 pf->stat_offsets_loaded,
2242 &osd->rx_oversize, &rx_roc);
2243
2244 /*
2245 * Read from RXERR1 register to get the count for the packets
2246 * larger than RX MAX and include that in total rx_oversize count.
2247 *
2248 * Also need to add BIT(7) to hw->port value while indexing
2249 * I40E_GL_RXERR1 register as indexes 0..127 are for VFs when
2250 * SR-IOV is enabled. Indexes 128..143 are for PFs.
2251 */
2252 u64 rx_err1;
2253 ixl_stat_update64(hw,
2254 I40E_GL_RXERR1L(hw->pf_id + BIT(7)),
2255 pf->stat_offsets_loaded,
2256 &osd->rx_err1,
2257 &rx_err1);
2258
2259 nsd->rx_oversize = rx_roc + rx_err1;
2260
2261 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2262 pf->stat_offsets_loaded,
2263 &osd->rx_jabber, &nsd->rx_jabber);
2264 /* EEE */
2265 i40e_get_phy_lpi_status(hw, nsd);
2266
2267 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2268 &osd->tx_lpi_count, &nsd->tx_lpi_count,
2269 &osd->rx_lpi_count, &nsd->rx_lpi_count);
2270
2271 pf->stat_offsets_loaded = true;
2272 /* End hw stats */
2273
2274 /* Update vsi stats */
2275 ixl_update_vsi_stats(vsi);
2276
2277 for (int i = 0; i < pf->num_vfs; i++) {
2278 vf = &pf->vfs[i];
2279 if (vf->vf_flags & VF_FLAG_ENABLED)
2280 ixl_update_eth_stats(&pf->vfs[i].vsi);
2281 }
2282 }
2283
2284 /**
2285 * Update VSI-specific ethernet statistics counters.
2286 **/
2287 void
ixl_update_eth_stats(struct ixl_vsi * vsi)2288 ixl_update_eth_stats(struct ixl_vsi *vsi)
2289 {
2290 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2291 struct i40e_hw *hw = &pf->hw;
2292 struct i40e_eth_stats *es;
2293 struct i40e_eth_stats *oes;
2294 u16 stat_idx = vsi->info.stat_counter_idx;
2295
2296 es = &vsi->eth_stats;
2297 oes = &vsi->eth_stats_offsets;
2298
2299 /* Gather up the stats that the hw collects */
2300 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2301 vsi->stat_offsets_loaded,
2302 &oes->tx_errors, &es->tx_errors);
2303 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2304 vsi->stat_offsets_loaded,
2305 &oes->rx_discards, &es->rx_discards);
2306
2307 ixl_stat_update48(hw, I40E_GLV_GORCL(stat_idx),
2308 vsi->stat_offsets_loaded,
2309 &oes->rx_bytes, &es->rx_bytes);
2310 ixl_stat_update48(hw, I40E_GLV_UPRCL(stat_idx),
2311 vsi->stat_offsets_loaded,
2312 &oes->rx_unicast, &es->rx_unicast);
2313 ixl_stat_update48(hw, I40E_GLV_MPRCL(stat_idx),
2314 vsi->stat_offsets_loaded,
2315 &oes->rx_multicast, &es->rx_multicast);
2316 ixl_stat_update48(hw, I40E_GLV_BPRCL(stat_idx),
2317 vsi->stat_offsets_loaded,
2318 &oes->rx_broadcast, &es->rx_broadcast);
2319
2320 ixl_stat_update48(hw, I40E_GLV_GOTCL(stat_idx),
2321 vsi->stat_offsets_loaded,
2322 &oes->tx_bytes, &es->tx_bytes);
2323 ixl_stat_update48(hw, I40E_GLV_UPTCL(stat_idx),
2324 vsi->stat_offsets_loaded,
2325 &oes->tx_unicast, &es->tx_unicast);
2326 ixl_stat_update48(hw, I40E_GLV_MPTCL(stat_idx),
2327 vsi->stat_offsets_loaded,
2328 &oes->tx_multicast, &es->tx_multicast);
2329 ixl_stat_update48(hw, I40E_GLV_BPTCL(stat_idx),
2330 vsi->stat_offsets_loaded,
2331 &oes->tx_broadcast, &es->tx_broadcast);
2332 vsi->stat_offsets_loaded = true;
2333 }
2334
2335 void
ixl_update_vsi_stats(struct ixl_vsi * vsi)2336 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2337 {
2338 struct ixl_pf *pf;
2339 struct i40e_eth_stats *es;
2340 u64 tx_discards, csum_errs;
2341
2342 struct i40e_hw_port_stats *nsd;
2343
2344 pf = vsi->back;
2345 es = &vsi->eth_stats;
2346 nsd = &pf->stats;
2347
2348 ixl_update_eth_stats(vsi);
2349
2350 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2351
2352 csum_errs = 0;
2353 for (int i = 0; i < vsi->num_rx_queues; i++)
2354 csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2355 nsd->checksum_error = csum_errs;
2356
2357 /* Update ifnet stats */
2358 IXL_SET_IPACKETS(vsi, es->rx_unicast +
2359 es->rx_multicast +
2360 es->rx_broadcast);
2361 IXL_SET_OPACKETS(vsi, es->tx_unicast +
2362 es->tx_multicast +
2363 es->tx_broadcast);
2364 IXL_SET_IBYTES(vsi, es->rx_bytes);
2365 IXL_SET_OBYTES(vsi, es->tx_bytes);
2366 IXL_SET_IMCASTS(vsi, es->rx_multicast);
2367 IXL_SET_OMCASTS(vsi, es->tx_multicast);
2368
2369 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2370 nsd->checksum_error + nsd->rx_length_errors +
2371 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2372 nsd->rx_jabber);
2373 IXL_SET_OERRORS(vsi, es->tx_errors);
2374 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2375 IXL_SET_OQDROPS(vsi, tx_discards);
2376 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2377 IXL_SET_COLLISIONS(vsi, 0);
2378 }
2379
2380 /**
2381 * Reset all of the stats for the given pf
2382 **/
2383 void
ixl_pf_reset_stats(struct ixl_pf * pf)2384 ixl_pf_reset_stats(struct ixl_pf *pf)
2385 {
2386 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2387 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2388 pf->stat_offsets_loaded = false;
2389 }
2390
2391 /**
2392 * Resets all stats of the given vsi
2393 **/
2394 void
ixl_vsi_reset_stats(struct ixl_vsi * vsi)2395 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2396 {
2397 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2398 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2399 vsi->stat_offsets_loaded = false;
2400 }
2401
2402 /**
2403 * Helper function for reading and updating 48/64 bit stats from the hw
2404 *
2405 * Since the device stats are not reset at PFReset, they likely will not
2406 * be zeroed when the driver starts. We'll save the first values read
2407 * and use them as offsets to be subtracted from the raw values in order
2408 * to report stats that count from zero.
2409 **/
2410 static void
_ixl_stat_update_helper(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 mask,u64 * offset,u64 * stat)2411 _ixl_stat_update_helper(struct i40e_hw *hw, u32 reg,
2412 bool offset_loaded, u64 mask, u64 *offset, u64 *stat)
2413 {
2414 u64 new_data = rd64(hw, reg);
2415
2416 if (!offset_loaded)
2417 *offset = new_data;
2418 if (new_data >= *offset)
2419 *stat = new_data - *offset;
2420 else
2421 *stat = (new_data + mask) - *offset + 1;
2422 *stat &= mask;
2423 }
2424
2425 /**
2426 * Read and update a 48 bit stat from the hw
2427 **/
2428 void
ixl_stat_update48(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 * offset,u64 * stat)2429 ixl_stat_update48(struct i40e_hw *hw, u32 reg,
2430 bool offset_loaded, u64 *offset, u64 *stat)
2431 {
2432 _ixl_stat_update_helper(hw,
2433 reg,
2434 offset_loaded,
2435 0xFFFFFFFFFFFFULL,
2436 offset,
2437 stat);
2438 }
2439
2440 /**
2441 * ixl_stat_update64 - read and update a 64 bit stat from the chip.
2442 **/
2443 void
ixl_stat_update64(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 * offset,u64 * stat)2444 ixl_stat_update64(struct i40e_hw *hw, u32 reg,
2445 bool offset_loaded, u64 *offset, u64 *stat)
2446 {
2447 _ixl_stat_update_helper(hw,
2448 reg,
2449 offset_loaded,
2450 0xFFFFFFFFFFFFFFFFULL,
2451 offset,
2452 stat);
2453 }
2454
2455 /**
2456 * Read and update a 32 bit stat from the hw
2457 **/
2458 void
ixl_stat_update32(struct i40e_hw * hw,u32 reg,bool offset_loaded,u64 * offset,u64 * stat)2459 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2460 bool offset_loaded, u64 *offset, u64 *stat)
2461 {
2462 u32 new_data;
2463
2464 new_data = rd32(hw, reg);
2465 if (!offset_loaded)
2466 *offset = new_data;
2467 if (new_data >= *offset)
2468 *stat = (u32)(new_data - *offset);
2469 else
2470 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2471 }
2472
2473 /**
2474 * Add subset of device sysctls safe to use in recovery mode
2475 */
2476 void
ixl_add_sysctls_recovery_mode(struct ixl_pf * pf)2477 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2478 {
2479 device_t dev = pf->dev;
2480
2481 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2482 struct sysctl_oid_list *ctx_list =
2483 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2484
2485 struct sysctl_oid *debug_node;
2486 struct sysctl_oid_list *debug_list;
2487
2488 SYSCTL_ADD_PROC(ctx, ctx_list,
2489 OID_AUTO, "fw_version",
2490 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2491 ixl_sysctl_show_fw, "A", "Firmware version");
2492
2493 /* Add sysctls meant to print debug information, but don't list them
2494 * in "sysctl -a" output. */
2495 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2496 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2497 "Debug Sysctls");
2498 debug_list = SYSCTL_CHILDREN(debug_node);
2499
2500 SYSCTL_ADD_UINT(ctx, debug_list,
2501 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2502 &pf->hw.debug_mask, 0, "Shared code debug message level");
2503
2504 SYSCTL_ADD_UINT(ctx, debug_list,
2505 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2506 &pf->dbg_mask, 0, "Non-shared code debug message level");
2507
2508 SYSCTL_ADD_PROC(ctx, debug_list,
2509 OID_AUTO, "dump_debug_data",
2510 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2511 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2512
2513 SYSCTL_ADD_PROC(ctx, debug_list,
2514 OID_AUTO, "do_pf_reset",
2515 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2516 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2517
2518 SYSCTL_ADD_PROC(ctx, debug_list,
2519 OID_AUTO, "do_core_reset",
2520 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2521 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2522
2523 SYSCTL_ADD_PROC(ctx, debug_list,
2524 OID_AUTO, "do_global_reset",
2525 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2526 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2527
2528 SYSCTL_ADD_PROC(ctx, debug_list,
2529 OID_AUTO, "queue_interrupt_table",
2530 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2531 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2532
2533 SYSCTL_ADD_PROC(ctx, debug_list,
2534 OID_AUTO, "queue_int_ctln",
2535 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2536 pf, 0, ixl_sysctl_debug_queue_int_ctln, "A",
2537 "View MSI-X control registers for RX queues");
2538 }
2539
2540 void
ixl_add_device_sysctls(struct ixl_pf * pf)2541 ixl_add_device_sysctls(struct ixl_pf *pf)
2542 {
2543 device_t dev = pf->dev;
2544 struct i40e_hw *hw = &pf->hw;
2545
2546 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2547 struct sysctl_oid_list *ctx_list =
2548 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2549
2550 struct sysctl_oid *debug_node;
2551 struct sysctl_oid_list *debug_list;
2552
2553 struct sysctl_oid *fec_node;
2554 struct sysctl_oid_list *fec_list;
2555 struct sysctl_oid *eee_node;
2556 struct sysctl_oid_list *eee_list;
2557
2558 /* Set up sysctls */
2559 SYSCTL_ADD_PROC(ctx, ctx_list,
2560 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2561 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2562
2563 SYSCTL_ADD_PROC(ctx, ctx_list,
2564 OID_AUTO, "advertise_speed",
2565 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2566 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2567
2568 SYSCTL_ADD_PROC(ctx, ctx_list,
2569 OID_AUTO, "supported_speeds",
2570 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2571 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2572
2573 SYSCTL_ADD_PROC(ctx, ctx_list,
2574 OID_AUTO, "current_speed",
2575 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2576 ixl_sysctl_current_speed, "A", "Current Port Speed");
2577
2578 SYSCTL_ADD_PROC(ctx, ctx_list,
2579 OID_AUTO, "fw_version",
2580 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2581 ixl_sysctl_show_fw, "A", "Firmware version");
2582
2583 SYSCTL_ADD_PROC(ctx, ctx_list,
2584 OID_AUTO, "unallocated_queues",
2585 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2586 ixl_sysctl_unallocated_queues, "I",
2587 "Queues not allocated to a PF or VF");
2588
2589 SYSCTL_ADD_PROC(ctx, ctx_list,
2590 OID_AUTO, "tx_itr",
2591 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2592 ixl_sysctl_pf_tx_itr, "I",
2593 "Immediately set TX ITR value for all queues");
2594
2595 SYSCTL_ADD_PROC(ctx, ctx_list,
2596 OID_AUTO, "rx_itr",
2597 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2598 ixl_sysctl_pf_rx_itr, "I",
2599 "Immediately set RX ITR value for all queues");
2600
2601 SYSCTL_ADD_INT(ctx, ctx_list,
2602 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2603 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2604
2605 SYSCTL_ADD_INT(ctx, ctx_list,
2606 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2607 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2608
2609 /* Add FEC sysctls for 25G adapters */
2610 if (i40e_is_25G_device(hw->device_id)) {
2611 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2612 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2613 "FEC Sysctls");
2614 fec_list = SYSCTL_CHILDREN(fec_node);
2615
2616 SYSCTL_ADD_PROC(ctx, fec_list,
2617 OID_AUTO, "fc_ability",
2618 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2619 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2620
2621 SYSCTL_ADD_PROC(ctx, fec_list,
2622 OID_AUTO, "rs_ability",
2623 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2624 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2625
2626 SYSCTL_ADD_PROC(ctx, fec_list,
2627 OID_AUTO, "fc_requested",
2628 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2629 ixl_sysctl_fec_fc_request, "I",
2630 "FC FEC mode requested on link");
2631
2632 SYSCTL_ADD_PROC(ctx, fec_list,
2633 OID_AUTO, "rs_requested",
2634 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2635 ixl_sysctl_fec_rs_request, "I",
2636 "RS FEC mode requested on link");
2637
2638 SYSCTL_ADD_PROC(ctx, fec_list,
2639 OID_AUTO, "auto_fec_enabled",
2640 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2641 ixl_sysctl_fec_auto_enable, "I",
2642 "Let FW decide FEC ability/request modes");
2643 }
2644
2645 SYSCTL_ADD_PROC(ctx, ctx_list,
2646 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2647 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2648
2649 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2650 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2651 "Energy Efficient Ethernet (EEE) Sysctls");
2652 eee_list = SYSCTL_CHILDREN(eee_node);
2653
2654 SYSCTL_ADD_PROC(ctx, eee_list,
2655 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2656 pf, 0, ixl_sysctl_eee_enable, "I",
2657 "Enable Energy Efficient Ethernet (EEE)");
2658
2659 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2660 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2661 "TX LPI status");
2662
2663 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2664 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2665 "RX LPI status");
2666
2667 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2668 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2669 "TX LPI count");
2670
2671 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2672 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2673 "RX LPI count");
2674
2675 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2676 "link_active_on_if_down",
2677 CTLTYPE_INT | CTLFLAG_RWTUN,
2678 pf, 0, ixl_sysctl_set_link_active, "I",
2679 IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2680
2681 /* Add sysctls meant to print debug information, but don't list them
2682 * in "sysctl -a" output. */
2683 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2684 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2685 "Debug Sysctls");
2686 debug_list = SYSCTL_CHILDREN(debug_node);
2687
2688 SYSCTL_ADD_UINT(ctx, debug_list,
2689 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2690 &pf->hw.debug_mask, 0, "Shared code debug message level");
2691
2692 SYSCTL_ADD_UINT(ctx, debug_list,
2693 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2694 &pf->dbg_mask, 0, "Non-shared code debug message level");
2695
2696 SYSCTL_ADD_PROC(ctx, debug_list,
2697 OID_AUTO, "link_status",
2698 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2699 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2700
2701 SYSCTL_ADD_PROC(ctx, debug_list,
2702 OID_AUTO, "phy_abilities_init",
2703 CTLTYPE_STRING | CTLFLAG_RD,
2704 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2705
2706 SYSCTL_ADD_PROC(ctx, debug_list,
2707 OID_AUTO, "phy_abilities",
2708 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2709 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2710
2711 SYSCTL_ADD_PROC(ctx, debug_list,
2712 OID_AUTO, "filter_list",
2713 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2714 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2715
2716 SYSCTL_ADD_PROC(ctx, debug_list,
2717 OID_AUTO, "hw_res_alloc",
2718 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2719 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2720
2721 SYSCTL_ADD_PROC(ctx, debug_list,
2722 OID_AUTO, "switch_config",
2723 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2724 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2725
2726 SYSCTL_ADD_PROC(ctx, debug_list,
2727 OID_AUTO, "switch_vlans",
2728 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2729 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2730
2731 SYSCTL_ADD_PROC(ctx, debug_list,
2732 OID_AUTO, "rss_key",
2733 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2734 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2735
2736 SYSCTL_ADD_PROC(ctx, debug_list,
2737 OID_AUTO, "rss_lut",
2738 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2739 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2740
2741 SYSCTL_ADD_PROC(ctx, debug_list,
2742 OID_AUTO, "rss_hena",
2743 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2744 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2745
2746 SYSCTL_ADD_PROC(ctx, debug_list,
2747 OID_AUTO, "disable_fw_link_management",
2748 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2749 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2750
2751 SYSCTL_ADD_PROC(ctx, debug_list,
2752 OID_AUTO, "dump_debug_data",
2753 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2754 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2755
2756 SYSCTL_ADD_PROC(ctx, debug_list,
2757 OID_AUTO, "do_pf_reset",
2758 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2759 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2760
2761 SYSCTL_ADD_PROC(ctx, debug_list,
2762 OID_AUTO, "do_core_reset",
2763 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2764 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2765
2766 SYSCTL_ADD_PROC(ctx, debug_list,
2767 OID_AUTO, "do_global_reset",
2768 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2769 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2770
2771 SYSCTL_ADD_PROC(ctx, debug_list,
2772 OID_AUTO, "queue_interrupt_table",
2773 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2774 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2775
2776 SYSCTL_ADD_PROC(ctx, debug_list,
2777 OID_AUTO, "phy_statistics", CTLTYPE_STRING | CTLFLAG_RD,
2778 pf, 0, ixl_sysctl_phy_statistics, "A", "PHY Statistics");
2779
2780 if (pf->has_i2c) {
2781 SYSCTL_ADD_PROC(ctx, debug_list,
2782 OID_AUTO, "read_i2c_byte",
2783 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2784 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2785
2786 SYSCTL_ADD_PROC(ctx, debug_list,
2787 OID_AUTO, "write_i2c_byte",
2788 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2789 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2790
2791 SYSCTL_ADD_PROC(ctx, debug_list,
2792 OID_AUTO, "read_i2c_diag_data",
2793 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2794 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2795 }
2796 }
2797
2798 /*
2799 * Primarily for finding out how many queues can be assigned to VFs,
2800 * at runtime.
2801 */
2802 static int
ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)2803 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2804 {
2805 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2806 int queues;
2807
2808 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2809
2810 return sysctl_handle_int(oidp, NULL, queues, req);
2811 }
2812
2813 static const char *
ixl_link_speed_string(enum i40e_aq_link_speed link_speed)2814 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2815 {
2816 const char * link_speed_str[] = {
2817 "Unknown",
2818 "100 Mbps",
2819 "1 Gbps",
2820 "10 Gbps",
2821 "40 Gbps",
2822 "20 Gbps",
2823 "25 Gbps",
2824 "2.5 Gbps",
2825 "5 Gbps"
2826 };
2827 int index;
2828
2829 switch (link_speed) {
2830 case I40E_LINK_SPEED_100MB:
2831 index = 1;
2832 break;
2833 case I40E_LINK_SPEED_1GB:
2834 index = 2;
2835 break;
2836 case I40E_LINK_SPEED_10GB:
2837 index = 3;
2838 break;
2839 case I40E_LINK_SPEED_40GB:
2840 index = 4;
2841 break;
2842 case I40E_LINK_SPEED_20GB:
2843 index = 5;
2844 break;
2845 case I40E_LINK_SPEED_25GB:
2846 index = 6;
2847 break;
2848 case I40E_LINK_SPEED_2_5GB:
2849 index = 7;
2850 break;
2851 case I40E_LINK_SPEED_5GB:
2852 index = 8;
2853 break;
2854 case I40E_LINK_SPEED_UNKNOWN:
2855 default:
2856 index = 0;
2857 break;
2858 }
2859
2860 return (link_speed_str[index]);
2861 }
2862
2863 int
ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)2864 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2865 {
2866 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2867 struct i40e_hw *hw = &pf->hw;
2868 int error = 0;
2869
2870 ixl_update_link_status(pf);
2871
2872 error = sysctl_handle_string(oidp,
2873 __DECONST(void *,
2874 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2875 8, req);
2876
2877 return (error);
2878 }
2879
2880 /*
2881 * Converts 8-bit speeds value to and from sysctl flags and
2882 * Admin Queue flags.
2883 */
2884 static u8
ixl_convert_sysctl_aq_link_speed(u8 speeds,bool to_aq)2885 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2886 {
2887 #define SPEED_MAP_SIZE 8
2888 static u16 speedmap[SPEED_MAP_SIZE] = {
2889 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2890 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
2891 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
2892 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
2893 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
2894 (I40E_LINK_SPEED_40GB | (0x20 << 8)),
2895 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2896 (I40E_LINK_SPEED_5GB | (0x80 << 8)),
2897 };
2898 u8 retval = 0;
2899
2900 for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2901 if (to_aq)
2902 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2903 else
2904 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2905 }
2906
2907 return (retval);
2908 }
2909
2910 int
ixl_set_advertised_speeds(struct ixl_pf * pf,int speeds,bool from_aq)2911 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2912 {
2913 struct i40e_hw *hw = &pf->hw;
2914 device_t dev = pf->dev;
2915 struct i40e_aq_get_phy_abilities_resp abilities;
2916 struct i40e_aq_set_phy_config config;
2917 enum i40e_status_code aq_error = 0;
2918
2919 /* Get current capability information */
2920 aq_error = i40e_aq_get_phy_capabilities(hw,
2921 FALSE, FALSE, &abilities, NULL);
2922 if (aq_error) {
2923 device_printf(dev,
2924 "%s: Error getting phy capabilities %d,"
2925 " aq error: %d\n", __func__, aq_error,
2926 hw->aq.asq_last_status);
2927 return (EIO);
2928 }
2929
2930 /* Prepare new config */
2931 bzero(&config, sizeof(config));
2932 if (from_aq)
2933 config.link_speed = speeds;
2934 else
2935 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2936 config.phy_type = abilities.phy_type;
2937 config.phy_type_ext = abilities.phy_type_ext;
2938 config.abilities = abilities.abilities
2939 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2940 config.eee_capability = abilities.eee_capability;
2941 config.eeer = abilities.eeer_val;
2942 config.low_power_ctrl = abilities.d3_lpan;
2943 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2944 & I40E_AQ_PHY_FEC_CONFIG_MASK;
2945
2946 /* Do aq command & restart link */
2947 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2948 if (aq_error) {
2949 device_printf(dev,
2950 "%s: Error setting new phy config %d,"
2951 " aq error: %d\n", __func__, aq_error,
2952 hw->aq.asq_last_status);
2953 return (EIO);
2954 }
2955
2956 return (0);
2957 }
2958
2959 /*
2960 ** Supported link speeds
2961 ** Flags:
2962 ** 0x1 - 100 Mb
2963 ** 0x2 - 1G
2964 ** 0x4 - 10G
2965 ** 0x8 - 20G
2966 ** 0x10 - 25G
2967 ** 0x20 - 40G
2968 ** 0x40 - 2.5G
2969 ** 0x80 - 5G
2970 */
2971 static int
ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)2972 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2973 {
2974 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2975 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2976
2977 return sysctl_handle_int(oidp, NULL, supported, req);
2978 }
2979
2980 /*
2981 ** Control link advertise speed:
2982 ** Flags:
2983 ** 0x1 - advertise 100 Mb
2984 ** 0x2 - advertise 1G
2985 ** 0x4 - advertise 10G
2986 ** 0x8 - advertise 20G
2987 ** 0x10 - advertise 25G
2988 ** 0x20 - advertise 40G
2989 ** 0x40 - advertise 2.5G
2990 ** 0x80 - advertise 5G
2991 **
2992 ** Set to 0 to disable link
2993 */
2994 int
ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)2995 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2996 {
2997 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2998 device_t dev = pf->dev;
2999 u8 converted_speeds;
3000 int requested_ls = 0;
3001 int error = 0;
3002
3003 /* Read in new mode */
3004 requested_ls = pf->advertised_speed;
3005 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
3006 if ((error) || (req->newptr == NULL))
3007 return (error);
3008 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
3009 device_printf(dev, "Interface is currently in FW recovery mode. "
3010 "Setting advertise speed not supported\n");
3011 return (EINVAL);
3012 }
3013
3014 /* Error out if bits outside of possible flag range are set */
3015 if ((requested_ls & ~((u8)0xFF)) != 0) {
3016 device_printf(dev, "Input advertised speed out of range; "
3017 "valid flags are: 0x%02x\n",
3018 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3019 return (EINVAL);
3020 }
3021
3022 /* Check if adapter supports input value */
3023 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
3024 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
3025 device_printf(dev, "Invalid advertised speed; "
3026 "valid flags are: 0x%02x\n",
3027 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
3028 return (EINVAL);
3029 }
3030
3031 error = ixl_set_advertised_speeds(pf, requested_ls, false);
3032 if (error)
3033 return (error);
3034
3035 pf->advertised_speed = requested_ls;
3036 ixl_update_link_status(pf);
3037 return (0);
3038 }
3039
3040 /*
3041 * Input: bitmap of enum i40e_aq_link_speed
3042 */
3043 u64
ixl_max_aq_speed_to_value(u8 link_speeds)3044 ixl_max_aq_speed_to_value(u8 link_speeds)
3045 {
3046 if (link_speeds & I40E_LINK_SPEED_40GB)
3047 return IF_Gbps(40);
3048 if (link_speeds & I40E_LINK_SPEED_25GB)
3049 return IF_Gbps(25);
3050 if (link_speeds & I40E_LINK_SPEED_20GB)
3051 return IF_Gbps(20);
3052 if (link_speeds & I40E_LINK_SPEED_10GB)
3053 return IF_Gbps(10);
3054 if (link_speeds & I40E_LINK_SPEED_5GB)
3055 return IF_Gbps(5);
3056 if (link_speeds & I40E_LINK_SPEED_2_5GB)
3057 return IF_Mbps(2500);
3058 if (link_speeds & I40E_LINK_SPEED_1GB)
3059 return IF_Gbps(1);
3060 if (link_speeds & I40E_LINK_SPEED_100MB)
3061 return IF_Mbps(100);
3062 else
3063 /* Minimum supported link speed */
3064 return IF_Mbps(100);
3065 }
3066
3067 /*
3068 ** Get the width and transaction speed of
3069 ** the bus this adapter is plugged into.
3070 */
3071 void
ixl_get_bus_info(struct ixl_pf * pf)3072 ixl_get_bus_info(struct ixl_pf *pf)
3073 {
3074 struct i40e_hw *hw = &pf->hw;
3075 device_t dev = pf->dev;
3076 u16 link;
3077 u32 offset, num_ports;
3078 u64 max_speed;
3079
3080 /* Some devices don't use PCIE */
3081 if (hw->mac.type == I40E_MAC_X722)
3082 return;
3083
3084 /* Read PCI Express Capabilities Link Status Register */
3085 pci_find_cap(dev, PCIY_EXPRESS, &offset);
3086 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
3087
3088 /* Fill out hw struct with PCIE info */
3089 i40e_set_pci_config_data(hw, link);
3090
3091 /* Use info to print out bandwidth messages */
3092 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
3093 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
3094 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
3095 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
3096 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
3097 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
3098 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
3099 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
3100 ("Unknown"));
3101
3102 /*
3103 * If adapter is in slot with maximum supported speed,
3104 * no warning message needs to be printed out.
3105 */
3106 if (hw->bus.speed >= i40e_bus_speed_8000
3107 && hw->bus.width >= i40e_bus_width_pcie_x8)
3108 return;
3109
3110 num_ports = bitcount32(hw->func_caps.valid_functions);
3111 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
3112
3113 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
3114 device_printf(dev, "PCI-Express bandwidth available"
3115 " for this device may be insufficient for"
3116 " optimal performance.\n");
3117 device_printf(dev, "Please move the device to a different"
3118 " PCI-e link with more lanes and/or higher"
3119 " transfer rate.\n");
3120 }
3121 }
3122
3123 static int
ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)3124 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
3125 {
3126 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3127 struct i40e_hw *hw = &pf->hw;
3128 struct sbuf *sbuf;
3129
3130 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3131 ixl_nvm_version_str(hw, sbuf);
3132 sbuf_finish(sbuf);
3133 sbuf_delete(sbuf);
3134
3135 return (0);
3136 }
3137
3138 void
ixl_print_nvm_cmd(device_t dev,struct i40e_nvm_access * nvma)3139 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
3140 {
3141 u8 nvma_ptr = nvma->config & 0xFF;
3142 u8 nvma_flags = (nvma->config & 0xF00) >> 8;
3143 const char * cmd_str;
3144
3145 switch (nvma->command) {
3146 case I40E_NVM_READ:
3147 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
3148 nvma->offset == 0 && nvma->data_size == 1) {
3149 device_printf(dev, "NVMUPD: Get Driver Status Command\n");
3150 return;
3151 }
3152 cmd_str = "READ ";
3153 break;
3154 case I40E_NVM_WRITE:
3155 cmd_str = "WRITE";
3156 break;
3157 default:
3158 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
3159 return;
3160 }
3161 device_printf(dev,
3162 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
3163 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
3164 }
3165
3166 int
ixl_handle_nvmupd_cmd(struct ixl_pf * pf,struct ifdrv * ifd)3167 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
3168 {
3169 struct i40e_hw *hw = &pf->hw;
3170 struct i40e_nvm_access *nvma;
3171 device_t dev = pf->dev;
3172 enum i40e_status_code status = 0;
3173 size_t nvma_size, ifd_len, exp_len;
3174 int err, perrno;
3175
3176 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3177
3178 /* Sanity checks */
3179 nvma_size = sizeof(struct i40e_nvm_access);
3180 ifd_len = ifd->ifd_len;
3181
3182 if (ifd_len < nvma_size ||
3183 ifd->ifd_data == NULL) {
3184 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3185 __func__);
3186 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3187 __func__, ifd_len, nvma_size);
3188 device_printf(dev, "%s: data pointer: %p\n", __func__,
3189 ifd->ifd_data);
3190 return (EINVAL);
3191 }
3192
3193 nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3194 err = copyin(ifd->ifd_data, nvma, ifd_len);
3195 if (err) {
3196 device_printf(dev, "%s: Cannot get request from user space\n",
3197 __func__);
3198 free(nvma, M_IXL);
3199 return (err);
3200 }
3201
3202 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3203 ixl_print_nvm_cmd(dev, nvma);
3204
3205 if (IXL_PF_IS_RESETTING(pf)) {
3206 int count = 0;
3207 while (count++ < 100) {
3208 i40e_msec_delay(100);
3209 if (!(IXL_PF_IS_RESETTING(pf)))
3210 break;
3211 }
3212 }
3213
3214 if (IXL_PF_IS_RESETTING(pf)) {
3215 device_printf(dev,
3216 "%s: timeout waiting for EMP reset to finish\n",
3217 __func__);
3218 free(nvma, M_IXL);
3219 return (-EBUSY);
3220 }
3221
3222 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3223 device_printf(dev,
3224 "%s: invalid request, data size not in supported range\n",
3225 __func__);
3226 free(nvma, M_IXL);
3227 return (EINVAL);
3228 }
3229
3230 /*
3231 * Older versions of the NVM update tool don't set ifd_len to the size
3232 * of the entire buffer passed to the ioctl. Check the data_size field
3233 * in the contained i40e_nvm_access struct and ensure everything is
3234 * copied in from userspace.
3235 */
3236 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3237
3238 if (ifd_len < exp_len) {
3239 ifd_len = exp_len;
3240 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3241 err = copyin(ifd->ifd_data, nvma, ifd_len);
3242 if (err) {
3243 device_printf(dev, "%s: Cannot get request from user space\n",
3244 __func__);
3245 free(nvma, M_IXL);
3246 return (err);
3247 }
3248 }
3249
3250 // TODO: Might need a different lock here
3251 // IXL_PF_LOCK(pf);
3252 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3253 // IXL_PF_UNLOCK(pf);
3254
3255 err = copyout(nvma, ifd->ifd_data, ifd_len);
3256 free(nvma, M_IXL);
3257 if (err) {
3258 device_printf(dev, "%s: Cannot return data to user space\n",
3259 __func__);
3260 return (err);
3261 }
3262
3263 /* Let the nvmupdate report errors, show them only when debug is enabled */
3264 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3265 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3266 i40e_stat_str(hw, status), perrno);
3267
3268 /*
3269 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3270 * to run this ioctl again. So use -EACCES for -EPERM instead.
3271 */
3272 if (perrno == -EPERM)
3273 return (-EACCES);
3274 else
3275 return (perrno);
3276 }
3277
3278 int
ixl_find_i2c_interface(struct ixl_pf * pf)3279 ixl_find_i2c_interface(struct ixl_pf *pf)
3280 {
3281 struct i40e_hw *hw = &pf->hw;
3282 bool i2c_en, port_matched;
3283 u32 reg;
3284
3285 for (int i = 0; i < 4; i++) {
3286 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3287 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3288 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3289 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3290 & BIT(hw->port);
3291 if (i2c_en && port_matched)
3292 return (i);
3293 }
3294
3295 return (-1);
3296 }
3297
3298 void
ixl_set_link(struct ixl_pf * pf,bool enable)3299 ixl_set_link(struct ixl_pf *pf, bool enable)
3300 {
3301 struct i40e_hw *hw = &pf->hw;
3302 device_t dev = pf->dev;
3303 struct i40e_aq_get_phy_abilities_resp abilities;
3304 struct i40e_aq_set_phy_config config;
3305 enum i40e_status_code aq_error = 0;
3306 u32 phy_type, phy_type_ext;
3307
3308 /* Get initial capability information */
3309 aq_error = i40e_aq_get_phy_capabilities(hw,
3310 FALSE, TRUE, &abilities, NULL);
3311 if (aq_error) {
3312 device_printf(dev,
3313 "%s: Error getting phy capabilities %d,"
3314 " aq error: %d\n", __func__, aq_error,
3315 hw->aq.asq_last_status);
3316 return;
3317 }
3318
3319 phy_type = abilities.phy_type;
3320 phy_type_ext = abilities.phy_type_ext;
3321
3322 /* Get current capability information */
3323 aq_error = i40e_aq_get_phy_capabilities(hw,
3324 FALSE, FALSE, &abilities, NULL);
3325 if (aq_error) {
3326 device_printf(dev,
3327 "%s: Error getting phy capabilities %d,"
3328 " aq error: %d\n", __func__, aq_error,
3329 hw->aq.asq_last_status);
3330 return;
3331 }
3332
3333 /* Prepare new config */
3334 memset(&config, 0, sizeof(config));
3335 config.link_speed = abilities.link_speed;
3336 config.abilities = abilities.abilities;
3337 config.eee_capability = abilities.eee_capability;
3338 config.eeer = abilities.eeer_val;
3339 config.low_power_ctrl = abilities.d3_lpan;
3340 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3341 & I40E_AQ_PHY_FEC_CONFIG_MASK;
3342 config.phy_type = 0;
3343 config.phy_type_ext = 0;
3344
3345 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3346 I40E_AQ_PHY_FLAG_PAUSE_RX);
3347
3348 switch (pf->fc) {
3349 case I40E_FC_FULL:
3350 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3351 I40E_AQ_PHY_FLAG_PAUSE_RX;
3352 break;
3353 case I40E_FC_RX_PAUSE:
3354 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3355 break;
3356 case I40E_FC_TX_PAUSE:
3357 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3358 break;
3359 default:
3360 break;
3361 }
3362
3363 if (enable) {
3364 config.phy_type = phy_type;
3365 config.phy_type_ext = phy_type_ext;
3366
3367 }
3368
3369 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3370 if (aq_error) {
3371 device_printf(dev,
3372 "%s: Error setting new phy config %d,"
3373 " aq error: %d\n", __func__, aq_error,
3374 hw->aq.asq_last_status);
3375 return;
3376 }
3377
3378 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3379 if (aq_error) {
3380 device_printf(dev,
3381 "%s: Error set link config %d,"
3382 " aq error: %d\n", __func__, aq_error,
3383 hw->aq.asq_last_status);
3384 return;
3385 }
3386 }
3387
3388 static char *
ixl_phy_type_string(u32 bit_pos,bool ext)3389 ixl_phy_type_string(u32 bit_pos, bool ext)
3390 {
3391 static char * phy_types_str[32] = {
3392 "SGMII",
3393 "1000BASE-KX",
3394 "10GBASE-KX4",
3395 "10GBASE-KR",
3396 "40GBASE-KR4",
3397 "XAUI",
3398 "XFI",
3399 "SFI",
3400 "XLAUI",
3401 "XLPPI",
3402 "40GBASE-CR4",
3403 "10GBASE-CR1",
3404 "SFP+ Active DA",
3405 "QSFP+ Active DA",
3406 "Reserved (14)",
3407 "Reserved (15)",
3408 "Reserved (16)",
3409 "100BASE-TX",
3410 "1000BASE-T",
3411 "10GBASE-T",
3412 "10GBASE-SR",
3413 "10GBASE-LR",
3414 "10GBASE-SFP+Cu",
3415 "10GBASE-CR1",
3416 "40GBASE-CR4",
3417 "40GBASE-SR4",
3418 "40GBASE-LR4",
3419 "1000BASE-SX",
3420 "1000BASE-LX",
3421 "1000BASE-T Optical",
3422 "20GBASE-KR2",
3423 "Reserved (31)"
3424 };
3425 static char * ext_phy_types_str[8] = {
3426 "25GBASE-KR",
3427 "25GBASE-CR",
3428 "25GBASE-SR",
3429 "25GBASE-LR",
3430 "25GBASE-AOC",
3431 "25GBASE-ACC",
3432 "2.5GBASE-T",
3433 "5GBASE-T"
3434 };
3435
3436 if (ext && bit_pos > 7) return "Invalid_Ext";
3437 if (bit_pos > 31) return "Invalid";
3438
3439 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3440 }
3441
3442 /* TODO: ERJ: I don't this is necessary anymore. */
3443 int
ixl_aq_get_link_status(struct ixl_pf * pf,struct i40e_aqc_get_link_status * link_status)3444 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3445 {
3446 device_t dev = pf->dev;
3447 struct i40e_hw *hw = &pf->hw;
3448 struct i40e_aq_desc desc;
3449 enum i40e_status_code status;
3450
3451 struct i40e_aqc_get_link_status *aq_link_status =
3452 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3453
3454 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3455 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3456 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3457 if (status) {
3458 device_printf(dev,
3459 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3460 __func__, i40e_stat_str(hw, status),
3461 i40e_aq_str(hw, hw->aq.asq_last_status));
3462 return (EIO);
3463 }
3464
3465 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3466 return (0);
3467 }
3468
3469 static char *
ixl_phy_type_string_ls(u8 val)3470 ixl_phy_type_string_ls(u8 val)
3471 {
3472 if (val >= 0x1F)
3473 return ixl_phy_type_string(val - 0x1F, true);
3474 else
3475 return ixl_phy_type_string(val, false);
3476 }
3477
3478 static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)3479 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3480 {
3481 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3482 device_t dev = pf->dev;
3483 struct sbuf *buf;
3484 int error = 0;
3485
3486 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3487 if (!buf) {
3488 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3489 return (ENOMEM);
3490 }
3491
3492 struct i40e_aqc_get_link_status link_status;
3493 error = ixl_aq_get_link_status(pf, &link_status);
3494 if (error) {
3495 sbuf_delete(buf);
3496 return (error);
3497 }
3498
3499 sbuf_printf(buf, "\n"
3500 "PHY Type : 0x%02x<%s>\n"
3501 "Speed : 0x%02x\n"
3502 "Link info: 0x%02x\n"
3503 "AN info : 0x%02x\n"
3504 "Ext info : 0x%02x\n"
3505 "Loopback : 0x%02x\n"
3506 "Max Frame: %d\n"
3507 "Config : 0x%02x\n"
3508 "Power : 0x%02x",
3509 link_status.phy_type,
3510 ixl_phy_type_string_ls(link_status.phy_type),
3511 link_status.link_speed,
3512 link_status.link_info,
3513 link_status.an_info,
3514 link_status.ext_info,
3515 link_status.loopback,
3516 link_status.max_frame_size,
3517 link_status.config,
3518 link_status.power_desc);
3519
3520 error = sbuf_finish(buf);
3521 if (error)
3522 device_printf(dev, "Error finishing sbuf: %d\n", error);
3523
3524 sbuf_delete(buf);
3525 return (error);
3526 }
3527
3528 static int
ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)3529 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3530 {
3531 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3532 struct i40e_hw *hw = &pf->hw;
3533 device_t dev = pf->dev;
3534 enum i40e_status_code status;
3535 struct i40e_aq_get_phy_abilities_resp abilities;
3536 struct sbuf *buf;
3537 int error = 0;
3538
3539 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3540 if (!buf) {
3541 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3542 return (ENOMEM);
3543 }
3544
3545 status = i40e_aq_get_phy_capabilities(hw,
3546 FALSE, arg2 != 0, &abilities, NULL);
3547 if (status) {
3548 device_printf(dev,
3549 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3550 __func__, i40e_stat_str(hw, status),
3551 i40e_aq_str(hw, hw->aq.asq_last_status));
3552 sbuf_delete(buf);
3553 return (EIO);
3554 }
3555
3556 sbuf_printf(buf, "\n"
3557 "PHY Type : %08x",
3558 abilities.phy_type);
3559
3560 if (abilities.phy_type != 0) {
3561 sbuf_printf(buf, "<");
3562 for (int i = 0; i < 32; i++)
3563 if ((1 << i) & abilities.phy_type)
3564 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3565 sbuf_printf(buf, ">");
3566 }
3567
3568 sbuf_printf(buf, "\nPHY Ext : %02x",
3569 abilities.phy_type_ext);
3570
3571 if (abilities.phy_type_ext != 0) {
3572 sbuf_printf(buf, "<");
3573 for (int i = 0; i < 4; i++)
3574 if ((1 << i) & abilities.phy_type_ext)
3575 sbuf_printf(buf, "%s,",
3576 ixl_phy_type_string(i, true));
3577 sbuf_printf(buf, ">");
3578 }
3579
3580 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
3581 if (abilities.link_speed != 0) {
3582 u8 link_speed;
3583 sbuf_printf(buf, " <");
3584 for (int i = 0; i < 8; i++) {
3585 link_speed = (1 << i) & abilities.link_speed;
3586 if (link_speed)
3587 sbuf_printf(buf, "%s, ",
3588 ixl_link_speed_string(link_speed));
3589 }
3590 sbuf_printf(buf, ">");
3591 }
3592
3593 sbuf_printf(buf, "\n"
3594 "Abilities: %02x\n"
3595 "EEE cap : %04x\n"
3596 "EEER reg : %08x\n"
3597 "D3 Lpan : %02x\n"
3598 "ID : %02x %02x %02x %02x\n"
3599 "ModType : %02x %02x %02x\n"
3600 "ModType E: %01x\n"
3601 "FEC Cfg : %02x\n"
3602 "Ext CC : %02x",
3603 abilities.abilities, abilities.eee_capability,
3604 abilities.eeer_val, abilities.d3_lpan,
3605 abilities.phy_id[0], abilities.phy_id[1],
3606 abilities.phy_id[2], abilities.phy_id[3],
3607 abilities.module_type[0], abilities.module_type[1],
3608 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3609 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3610 abilities.ext_comp_code);
3611
3612 error = sbuf_finish(buf);
3613 if (error)
3614 device_printf(dev, "Error finishing sbuf: %d\n", error);
3615
3616 sbuf_delete(buf);
3617 return (error);
3618 }
3619
3620 static int
ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS)3621 ixl_sysctl_phy_statistics(SYSCTL_HANDLER_ARGS)
3622 {
3623 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3624 struct i40e_hw *hw = &pf->hw;
3625 device_t dev = pf->dev;
3626 struct sbuf *buf;
3627 int error = 0;
3628
3629 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3630 if (buf == NULL) {
3631 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3632 return (ENOMEM);
3633 }
3634
3635 if (hw->mac.type == I40E_MAC_X722) {
3636 sbuf_printf(buf, "\n"
3637 "PCS Link Control Register: unavailable\n"
3638 "PCS Link Status 1: unavailable\n"
3639 "PCS Link Status 2: unavailable\n"
3640 "XGMII FIFO Status: unavailable\n"
3641 "Auto-Negotiation (AN) Status: unavailable\n"
3642 "KR PCS Status: unavailable\n"
3643 "KR FEC Status 1 – FEC Correctable Blocks Counter: unavailable\n"
3644 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: unavailable"
3645 );
3646 } else {
3647 sbuf_printf(buf, "\n"
3648 "PCS Link Control Register: %#010X\n"
3649 "PCS Link Status 1: %#010X\n"
3650 "PCS Link Status 2: %#010X\n"
3651 "XGMII FIFO Status: %#010X\n"
3652 "Auto-Negotiation (AN) Status: %#010X\n"
3653 "KR PCS Status: %#010X\n"
3654 "KR FEC Status 1 – FEC Correctable Blocks Counter: %#010X\n"
3655 "KR FEC Status 2 – FEC Uncorrectable Blocks Counter: %#010X",
3656 rd32(hw, I40E_PRTMAC_PCS_LINK_CTRL),
3657 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS1(0)),
3658 rd32(hw, I40E_PRTMAC_PCS_LINK_STATUS2),
3659 rd32(hw, I40E_PRTMAC_PCS_XGMII_FIFO_STATUS),
3660 rd32(hw, I40E_PRTMAC_PCS_AN_LP_STATUS),
3661 rd32(hw, I40E_PRTMAC_PCS_KR_STATUS),
3662 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS1),
3663 rd32(hw, I40E_PRTMAC_PCS_FEC_KR_STATUS2)
3664 );
3665 }
3666
3667 error = sbuf_finish(buf);
3668 if (error)
3669 device_printf(dev, "Error finishing sbuf: %d\n", error);
3670
3671 sbuf_delete(buf);
3672 return (error);
3673 }
3674
3675 static int
ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)3676 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3677 {
3678 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3679 struct ixl_vsi *vsi = &pf->vsi;
3680 struct ixl_mac_filter *f;
3681 device_t dev = pf->dev;
3682 int error = 0, ftl_len = 0, ftl_counter = 0;
3683
3684 struct sbuf *buf;
3685
3686 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3687 if (!buf) {
3688 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3689 return (ENOMEM);
3690 }
3691
3692 sbuf_printf(buf, "\n");
3693
3694 /* Print MAC filters */
3695 sbuf_printf(buf, "PF Filters:\n");
3696 LIST_FOREACH(f, &vsi->ftl, ftle)
3697 ftl_len++;
3698
3699 if (ftl_len < 1)
3700 sbuf_printf(buf, "(none)\n");
3701 else {
3702 LIST_FOREACH(f, &vsi->ftl, ftle) {
3703 sbuf_printf(buf,
3704 MAC_FORMAT ", vlan %4d, flags %#06x",
3705 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3706 /* don't print '\n' for last entry */
3707 if (++ftl_counter != ftl_len)
3708 sbuf_printf(buf, "\n");
3709 }
3710 }
3711
3712 #ifdef PCI_IOV
3713 /* TODO: Give each VF its own filter list sysctl */
3714 struct ixl_vf *vf;
3715 if (pf->num_vfs > 0) {
3716 sbuf_printf(buf, "\n\n");
3717 for (int i = 0; i < pf->num_vfs; i++) {
3718 vf = &pf->vfs[i];
3719 if (!(vf->vf_flags & VF_FLAG_ENABLED))
3720 continue;
3721
3722 vsi = &vf->vsi;
3723 ftl_len = 0, ftl_counter = 0;
3724 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3725 LIST_FOREACH(f, &vsi->ftl, ftle)
3726 ftl_len++;
3727
3728 if (ftl_len < 1)
3729 sbuf_printf(buf, "(none)\n");
3730 else {
3731 LIST_FOREACH(f, &vsi->ftl, ftle) {
3732 sbuf_printf(buf,
3733 MAC_FORMAT ", vlan %4d, flags %#06x\n",
3734 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3735 }
3736 }
3737 }
3738 }
3739 #endif
3740
3741 error = sbuf_finish(buf);
3742 if (error)
3743 device_printf(dev, "Error finishing sbuf: %d\n", error);
3744 sbuf_delete(buf);
3745
3746 return (error);
3747 }
3748
3749 #define IXL_SW_RES_SIZE 0x14
3750 int
ixl_res_alloc_cmp(const void * a,const void * b)3751 ixl_res_alloc_cmp(const void *a, const void *b)
3752 {
3753 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3754 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3755 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3756
3757 return ((int)one->resource_type - (int)two->resource_type);
3758 }
3759
3760 /*
3761 * Longest string length: 25
3762 */
3763 const char *
ixl_switch_res_type_string(u8 type)3764 ixl_switch_res_type_string(u8 type)
3765 {
3766 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3767 "VEB",
3768 "VSI",
3769 "Perfect Match MAC address",
3770 "S-tag",
3771 "(Reserved)",
3772 "Multicast hash entry",
3773 "Unicast hash entry",
3774 "VLAN",
3775 "VSI List entry",
3776 "(Reserved)",
3777 "VLAN Statistic Pool",
3778 "Mirror Rule",
3779 "Queue Set",
3780 "Inner VLAN Forward filter",
3781 "(Reserved)",
3782 "Inner MAC",
3783 "IP",
3784 "GRE/VN1 Key",
3785 "VN2 Key",
3786 "Tunneling Port"
3787 };
3788
3789 if (type < IXL_SW_RES_SIZE)
3790 return ixl_switch_res_type_strings[type];
3791 else
3792 return "(Reserved)";
3793 }
3794
3795 static int
ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)3796 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3797 {
3798 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3799 struct i40e_hw *hw = &pf->hw;
3800 device_t dev = pf->dev;
3801 struct sbuf *buf;
3802 enum i40e_status_code status;
3803 int error = 0;
3804
3805 u8 num_entries;
3806 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3807
3808 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3809 if (!buf) {
3810 device_printf(dev, "Could not allocate sbuf for output.\n");
3811 return (ENOMEM);
3812 }
3813
3814 bzero(resp, sizeof(resp));
3815 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3816 resp,
3817 IXL_SW_RES_SIZE,
3818 NULL);
3819 if (status) {
3820 device_printf(dev,
3821 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3822 __func__, i40e_stat_str(hw, status),
3823 i40e_aq_str(hw, hw->aq.asq_last_status));
3824 sbuf_delete(buf);
3825 return (error);
3826 }
3827
3828 /* Sort entries by type for display */
3829 qsort(resp, num_entries,
3830 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3831 &ixl_res_alloc_cmp);
3832
3833 sbuf_cat(buf, "\n");
3834 sbuf_printf(buf, "# of entries: %d\n", num_entries);
3835 sbuf_printf(buf,
3836 " Type | Guaranteed | Total | Used | Un-allocated\n"
3837 " | (this) | (all) | (this) | (all) \n");
3838 for (int i = 0; i < num_entries; i++) {
3839 sbuf_printf(buf,
3840 "%25s | %10d %5d %6d %12d",
3841 ixl_switch_res_type_string(resp[i].resource_type),
3842 resp[i].guaranteed,
3843 resp[i].total,
3844 resp[i].used,
3845 resp[i].total_unalloced);
3846 if (i < num_entries - 1)
3847 sbuf_cat(buf, "\n");
3848 }
3849
3850 error = sbuf_finish(buf);
3851 if (error)
3852 device_printf(dev, "Error finishing sbuf: %d\n", error);
3853
3854 sbuf_delete(buf);
3855 return (error);
3856 }
3857
3858 enum ixl_sw_seid_offset {
3859 IXL_SW_SEID_EMP = 1,
3860 IXL_SW_SEID_MAC_START = 2,
3861 IXL_SW_SEID_MAC_END = 5,
3862 IXL_SW_SEID_PF_START = 16,
3863 IXL_SW_SEID_PF_END = 31,
3864 IXL_SW_SEID_VF_START = 32,
3865 IXL_SW_SEID_VF_END = 159,
3866 };
3867
3868 /*
3869 * Caller must init and delete sbuf; this function will clear and
3870 * finish it for caller.
3871 *
3872 * Note: The SEID argument only applies for elements defined by FW at
3873 * power-on; these include the EMP, Ports, PFs and VFs.
3874 */
3875 static char *
ixl_switch_element_string(struct sbuf * s,u8 element_type,u16 seid)3876 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3877 {
3878 sbuf_clear(s);
3879
3880 /* If SEID is in certain ranges, then we can infer the
3881 * mapping of SEID to switch element.
3882 */
3883 if (seid == IXL_SW_SEID_EMP) {
3884 sbuf_cat(s, "EMP");
3885 goto out;
3886 } else if (seid >= IXL_SW_SEID_MAC_START &&
3887 seid <= IXL_SW_SEID_MAC_END) {
3888 sbuf_printf(s, "MAC %2d",
3889 seid - IXL_SW_SEID_MAC_START);
3890 goto out;
3891 } else if (seid >= IXL_SW_SEID_PF_START &&
3892 seid <= IXL_SW_SEID_PF_END) {
3893 sbuf_printf(s, "PF %3d",
3894 seid - IXL_SW_SEID_PF_START);
3895 goto out;
3896 } else if (seid >= IXL_SW_SEID_VF_START &&
3897 seid <= IXL_SW_SEID_VF_END) {
3898 sbuf_printf(s, "VF %3d",
3899 seid - IXL_SW_SEID_VF_START);
3900 goto out;
3901 }
3902
3903 switch (element_type) {
3904 case I40E_AQ_SW_ELEM_TYPE_BMC:
3905 sbuf_cat(s, "BMC");
3906 break;
3907 case I40E_AQ_SW_ELEM_TYPE_PV:
3908 sbuf_cat(s, "PV");
3909 break;
3910 case I40E_AQ_SW_ELEM_TYPE_VEB:
3911 sbuf_cat(s, "VEB");
3912 break;
3913 case I40E_AQ_SW_ELEM_TYPE_PA:
3914 sbuf_cat(s, "PA");
3915 break;
3916 case I40E_AQ_SW_ELEM_TYPE_VSI:
3917 sbuf_printf(s, "VSI");
3918 break;
3919 default:
3920 sbuf_cat(s, "?");
3921 break;
3922 }
3923
3924 out:
3925 sbuf_finish(s);
3926 return sbuf_data(s);
3927 }
3928
3929 static int
ixl_sw_cfg_elem_seid_cmp(const void * a,const void * b)3930 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3931 {
3932 const struct i40e_aqc_switch_config_element_resp *one, *two;
3933 one = (const struct i40e_aqc_switch_config_element_resp *)a;
3934 two = (const struct i40e_aqc_switch_config_element_resp *)b;
3935
3936 return ((int)one->seid - (int)two->seid);
3937 }
3938
3939 static int
ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)3940 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3941 {
3942 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3943 struct i40e_hw *hw = &pf->hw;
3944 device_t dev = pf->dev;
3945 struct sbuf *buf;
3946 struct sbuf *nmbuf;
3947 enum i40e_status_code status;
3948 int error = 0;
3949 u16 next = 0;
3950 u8 aq_buf[I40E_AQ_LARGE_BUF];
3951
3952 struct i40e_aqc_switch_config_element_resp *elem;
3953 struct i40e_aqc_get_switch_config_resp *sw_config;
3954 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3955
3956 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3957 if (!buf) {
3958 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3959 return (ENOMEM);
3960 }
3961
3962 status = i40e_aq_get_switch_config(hw, sw_config,
3963 sizeof(aq_buf), &next, NULL);
3964 if (status) {
3965 device_printf(dev,
3966 "%s: aq_get_switch_config() error %s, aq error %s\n",
3967 __func__, i40e_stat_str(hw, status),
3968 i40e_aq_str(hw, hw->aq.asq_last_status));
3969 sbuf_delete(buf);
3970 return error;
3971 }
3972 if (next)
3973 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3974 __func__, next);
3975
3976 nmbuf = sbuf_new_auto();
3977 if (!nmbuf) {
3978 device_printf(dev, "Could not allocate sbuf for name output.\n");
3979 sbuf_delete(buf);
3980 return (ENOMEM);
3981 }
3982
3983 /* Sort entries by SEID for display */
3984 qsort(sw_config->element, sw_config->header.num_reported,
3985 sizeof(struct i40e_aqc_switch_config_element_resp),
3986 &ixl_sw_cfg_elem_seid_cmp);
3987
3988 sbuf_cat(buf, "\n");
3989 /* Assuming <= 255 elements in switch */
3990 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3991 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3992 /* Exclude:
3993 * Revision -- all elements are revision 1 for now
3994 */
3995 sbuf_printf(buf,
3996 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n"
3997 " | | | (uplink)\n");
3998 for (int i = 0; i < sw_config->header.num_reported; i++) {
3999 elem = &sw_config->element[i];
4000
4001 // "%4d (%8s) | %8s %8s %#8x",
4002 sbuf_printf(buf, "%4d", elem->seid);
4003 sbuf_cat(buf, " ");
4004 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4005 elem->element_type, elem->seid));
4006 sbuf_cat(buf, " | ");
4007 sbuf_printf(buf, "%4d", elem->uplink_seid);
4008 sbuf_cat(buf, " ");
4009 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4010 0, elem->uplink_seid));
4011 sbuf_cat(buf, " | ");
4012 sbuf_printf(buf, "%4d", elem->downlink_seid);
4013 sbuf_cat(buf, " ");
4014 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
4015 0, elem->downlink_seid));
4016 sbuf_cat(buf, " | ");
4017 sbuf_printf(buf, "%8d", elem->connection_type);
4018 if (i < sw_config->header.num_reported - 1)
4019 sbuf_cat(buf, "\n");
4020 }
4021 sbuf_delete(nmbuf);
4022
4023 error = sbuf_finish(buf);
4024 if (error)
4025 device_printf(dev, "Error finishing sbuf: %d\n", error);
4026
4027 sbuf_delete(buf);
4028
4029 return (error);
4030 }
4031
4032 static int
ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)4033 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
4034 {
4035 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4036 struct i40e_hw *hw = &pf->hw;
4037 device_t dev = pf->dev;
4038 int requested_vlan = -1;
4039 enum i40e_status_code status = 0;
4040 int error = 0;
4041
4042 error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
4043 if ((error) || (req->newptr == NULL))
4044 return (error);
4045
4046 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
4047 device_printf(dev, "Flags disallow setting of vlans\n");
4048 return (ENODEV);
4049 }
4050
4051 hw->switch_tag = requested_vlan;
4052 device_printf(dev,
4053 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
4054 hw->switch_tag, hw->first_tag, hw->second_tag);
4055 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
4056 if (status) {
4057 device_printf(dev,
4058 "%s: aq_set_switch_config() error %s, aq error %s\n",
4059 __func__, i40e_stat_str(hw, status),
4060 i40e_aq_str(hw, hw->aq.asq_last_status));
4061 return (status);
4062 }
4063 return (0);
4064 }
4065
4066 static int
ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)4067 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
4068 {
4069 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4070 struct i40e_hw *hw = &pf->hw;
4071 device_t dev = pf->dev;
4072 struct sbuf *buf;
4073 int error = 0;
4074 enum i40e_status_code status;
4075 u32 reg;
4076
4077 struct i40e_aqc_get_set_rss_key_data key_data;
4078
4079 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4080 if (!buf) {
4081 device_printf(dev, "Could not allocate sbuf for output.\n");
4082 return (ENOMEM);
4083 }
4084
4085 bzero(&key_data, sizeof(key_data));
4086
4087 sbuf_cat(buf, "\n");
4088 if (hw->mac.type == I40E_MAC_X722) {
4089 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
4090 if (status)
4091 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
4092 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4093 } else {
4094 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4095 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
4096 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
4097 }
4098 }
4099
4100 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
4101
4102 error = sbuf_finish(buf);
4103 if (error)
4104 device_printf(dev, "Error finishing sbuf: %d\n", error);
4105 sbuf_delete(buf);
4106
4107 return (error);
4108 }
4109
4110 static void
ixl_sbuf_print_bytes(struct sbuf * sb,u8 * buf,int length,int label_offset,bool text)4111 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
4112 {
4113 int i, j, k, width;
4114 char c;
4115
4116 if (length < 1 || buf == NULL) return;
4117
4118 int byte_stride = 16;
4119 int lines = length / byte_stride;
4120 int rem = length % byte_stride;
4121 if (rem > 0)
4122 lines++;
4123
4124 for (i = 0; i < lines; i++) {
4125 width = (rem > 0 && i == lines - 1)
4126 ? rem : byte_stride;
4127
4128 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
4129
4130 for (j = 0; j < width; j++)
4131 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
4132
4133 if (width < byte_stride) {
4134 for (k = 0; k < (byte_stride - width); k++)
4135 sbuf_printf(sb, " ");
4136 }
4137
4138 if (!text) {
4139 sbuf_printf(sb, "\n");
4140 continue;
4141 }
4142
4143 for (j = 0; j < width; j++) {
4144 c = (char)buf[i * byte_stride + j];
4145 if (c < 32 || c > 126)
4146 sbuf_printf(sb, ".");
4147 else
4148 sbuf_printf(sb, "%c", c);
4149
4150 if (j == width - 1)
4151 sbuf_printf(sb, "\n");
4152 }
4153 }
4154 }
4155
4156 static int
ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)4157 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
4158 {
4159 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4160 struct i40e_hw *hw = &pf->hw;
4161 device_t dev = pf->dev;
4162 struct sbuf *buf;
4163 int error = 0;
4164 enum i40e_status_code status;
4165 u8 hlut[512];
4166 u32 reg;
4167
4168 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4169 if (!buf) {
4170 device_printf(dev, "Could not allocate sbuf for output.\n");
4171 return (ENOMEM);
4172 }
4173
4174 bzero(hlut, sizeof(hlut));
4175 sbuf_cat(buf, "\n");
4176 if (hw->mac.type == I40E_MAC_X722) {
4177 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
4178 if (status)
4179 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
4180 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4181 } else {
4182 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
4183 reg = rd32(hw, I40E_PFQF_HLUT(i));
4184 bcopy(®, &hlut[i << 2], 4);
4185 }
4186 }
4187 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
4188
4189 error = sbuf_finish(buf);
4190 if (error)
4191 device_printf(dev, "Error finishing sbuf: %d\n", error);
4192 sbuf_delete(buf);
4193
4194 return (error);
4195 }
4196
4197 static int
ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)4198 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
4199 {
4200 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4201 struct i40e_hw *hw = &pf->hw;
4202 u64 hena;
4203
4204 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
4205 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
4206
4207 return sysctl_handle_long(oidp, NULL, hena, req);
4208 }
4209
4210 /*
4211 * Sysctl to disable firmware's link management
4212 *
4213 * 1 - Disable link management on this port
4214 * 0 - Re-enable link management
4215 *
4216 * On normal NVMs, firmware manages link by default.
4217 */
4218 static int
ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)4219 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
4220 {
4221 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4222 struct i40e_hw *hw = &pf->hw;
4223 device_t dev = pf->dev;
4224 int requested_mode = -1;
4225 enum i40e_status_code status = 0;
4226 int error = 0;
4227
4228 /* Read in new mode */
4229 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4230 if ((error) || (req->newptr == NULL))
4231 return (error);
4232 /* Check for sane value */
4233 if (requested_mode < 0 || requested_mode > 1) {
4234 device_printf(dev, "Valid modes are 0 or 1\n");
4235 return (EINVAL);
4236 }
4237
4238 /* Set new mode */
4239 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4240 if (status) {
4241 device_printf(dev,
4242 "%s: Error setting new phy debug mode %s,"
4243 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4244 i40e_aq_str(hw, hw->aq.asq_last_status));
4245 return (EIO);
4246 }
4247
4248 return (0);
4249 }
4250
4251 /*
4252 * Read some diagnostic data from a (Q)SFP+ module
4253 *
4254 * SFP A2 QSFP Lower Page
4255 * Temperature 96-97 22-23
4256 * Vcc 98-99 26-27
4257 * TX power 102-103 34-35..40-41
4258 * RX power 104-105 50-51..56-57
4259 */
4260 static int
ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)4261 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4262 {
4263 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4264 device_t dev = pf->dev;
4265 struct sbuf *sbuf;
4266 int error = 0;
4267 u8 output;
4268
4269 if (req->oldptr == NULL) {
4270 error = SYSCTL_OUT(req, 0, 128);
4271 return (0);
4272 }
4273
4274 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4275 if (error) {
4276 device_printf(dev, "Error reading from i2c\n");
4277 return (error);
4278 }
4279
4280 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4281 if (output == 0x3) {
4282 /*
4283 * Check for:
4284 * - Internally calibrated data
4285 * - Diagnostic monitoring is implemented
4286 */
4287 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4288 if (!(output & 0x60)) {
4289 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4290 return (0);
4291 }
4292
4293 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4294
4295 for (u8 offset = 96; offset < 100; offset++) {
4296 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4297 sbuf_printf(sbuf, "%02X ", output);
4298 }
4299 for (u8 offset = 102; offset < 106; offset++) {
4300 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4301 sbuf_printf(sbuf, "%02X ", output);
4302 }
4303 } else if (output == 0xD || output == 0x11) {
4304 /*
4305 * QSFP+ modules are always internally calibrated, and must indicate
4306 * what types of diagnostic monitoring are implemented
4307 */
4308 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4309
4310 for (u8 offset = 22; offset < 24; offset++) {
4311 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4312 sbuf_printf(sbuf, "%02X ", output);
4313 }
4314 for (u8 offset = 26; offset < 28; offset++) {
4315 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4316 sbuf_printf(sbuf, "%02X ", output);
4317 }
4318 /* Read the data from the first lane */
4319 for (u8 offset = 34; offset < 36; offset++) {
4320 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4321 sbuf_printf(sbuf, "%02X ", output);
4322 }
4323 for (u8 offset = 50; offset < 52; offset++) {
4324 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4325 sbuf_printf(sbuf, "%02X ", output);
4326 }
4327 } else {
4328 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4329 return (0);
4330 }
4331
4332 sbuf_finish(sbuf);
4333 sbuf_delete(sbuf);
4334
4335 return (0);
4336 }
4337
4338 /*
4339 * Sysctl to read a byte from I2C bus.
4340 *
4341 * Input: 32-bit value:
4342 * bits 0-7: device address (0xA0 or 0xA2)
4343 * bits 8-15: offset (0-255)
4344 * bits 16-31: unused
4345 * Output: 8-bit value read
4346 */
4347 static int
ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)4348 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4349 {
4350 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4351 device_t dev = pf->dev;
4352 int input = -1, error = 0;
4353 u8 dev_addr, offset, output;
4354
4355 /* Read in I2C read parameters */
4356 error = sysctl_handle_int(oidp, &input, 0, req);
4357 if ((error) || (req->newptr == NULL))
4358 return (error);
4359 /* Validate device address */
4360 dev_addr = input & 0xFF;
4361 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4362 return (EINVAL);
4363 }
4364 offset = (input >> 8) & 0xFF;
4365
4366 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4367 if (error)
4368 return (error);
4369
4370 device_printf(dev, "%02X\n", output);
4371 return (0);
4372 }
4373
4374 /*
4375 * Sysctl to write a byte to the I2C bus.
4376 *
4377 * Input: 32-bit value:
4378 * bits 0-7: device address (0xA0 or 0xA2)
4379 * bits 8-15: offset (0-255)
4380 * bits 16-23: value to write
4381 * bits 24-31: unused
4382 * Output: 8-bit value written
4383 */
4384 static int
ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)4385 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4386 {
4387 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4388 device_t dev = pf->dev;
4389 int input = -1, error = 0;
4390 u8 dev_addr, offset, value;
4391
4392 /* Read in I2C write parameters */
4393 error = sysctl_handle_int(oidp, &input, 0, req);
4394 if ((error) || (req->newptr == NULL))
4395 return (error);
4396 /* Validate device address */
4397 dev_addr = input & 0xFF;
4398 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4399 return (EINVAL);
4400 }
4401 offset = (input >> 8) & 0xFF;
4402 value = (input >> 16) & 0xFF;
4403
4404 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4405 if (error)
4406 return (error);
4407
4408 device_printf(dev, "%02X written\n", value);
4409 return (0);
4410 }
4411
4412 static int
ixl_get_fec_config(struct ixl_pf * pf,struct i40e_aq_get_phy_abilities_resp * abilities,u8 bit_pos,int * is_set)4413 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4414 u8 bit_pos, int *is_set)
4415 {
4416 device_t dev = pf->dev;
4417 struct i40e_hw *hw = &pf->hw;
4418 enum i40e_status_code status;
4419
4420 if (IXL_PF_IN_RECOVERY_MODE(pf))
4421 return (EIO);
4422
4423 status = i40e_aq_get_phy_capabilities(hw,
4424 FALSE, FALSE, abilities, NULL);
4425 if (status) {
4426 device_printf(dev,
4427 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4428 __func__, i40e_stat_str(hw, status),
4429 i40e_aq_str(hw, hw->aq.asq_last_status));
4430 return (EIO);
4431 }
4432
4433 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4434 return (0);
4435 }
4436
4437 static int
ixl_set_fec_config(struct ixl_pf * pf,struct i40e_aq_get_phy_abilities_resp * abilities,u8 bit_pos,int set)4438 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4439 u8 bit_pos, int set)
4440 {
4441 device_t dev = pf->dev;
4442 struct i40e_hw *hw = &pf->hw;
4443 struct i40e_aq_set_phy_config config;
4444 enum i40e_status_code status;
4445
4446 /* Set new PHY config */
4447 memset(&config, 0, sizeof(config));
4448 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4449 if (set)
4450 config.fec_config |= bit_pos;
4451 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4452 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4453 config.phy_type = abilities->phy_type;
4454 config.phy_type_ext = abilities->phy_type_ext;
4455 config.link_speed = abilities->link_speed;
4456 config.eee_capability = abilities->eee_capability;
4457 config.eeer = abilities->eeer_val;
4458 config.low_power_ctrl = abilities->d3_lpan;
4459 status = i40e_aq_set_phy_config(hw, &config, NULL);
4460
4461 if (status) {
4462 device_printf(dev,
4463 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4464 __func__, i40e_stat_str(hw, status),
4465 i40e_aq_str(hw, hw->aq.asq_last_status));
4466 return (EIO);
4467 }
4468 }
4469
4470 return (0);
4471 }
4472
4473 static int
ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)4474 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4475 {
4476 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4477 int mode, error = 0;
4478
4479 struct i40e_aq_get_phy_abilities_resp abilities;
4480 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4481 if (error)
4482 return (error);
4483 /* Read in new mode */
4484 error = sysctl_handle_int(oidp, &mode, 0, req);
4485 if ((error) || (req->newptr == NULL))
4486 return (error);
4487
4488 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4489 }
4490
4491 static int
ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)4492 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4493 {
4494 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4495 int mode, error = 0;
4496
4497 struct i40e_aq_get_phy_abilities_resp abilities;
4498 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4499 if (error)
4500 return (error);
4501 /* Read in new mode */
4502 error = sysctl_handle_int(oidp, &mode, 0, req);
4503 if ((error) || (req->newptr == NULL))
4504 return (error);
4505
4506 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4507 }
4508
4509 static int
ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)4510 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4511 {
4512 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4513 int mode, error = 0;
4514
4515 struct i40e_aq_get_phy_abilities_resp abilities;
4516 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4517 if (error)
4518 return (error);
4519 /* Read in new mode */
4520 error = sysctl_handle_int(oidp, &mode, 0, req);
4521 if ((error) || (req->newptr == NULL))
4522 return (error);
4523
4524 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4525 }
4526
4527 static int
ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)4528 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4529 {
4530 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4531 int mode, error = 0;
4532
4533 struct i40e_aq_get_phy_abilities_resp abilities;
4534 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4535 if (error)
4536 return (error);
4537 /* Read in new mode */
4538 error = sysctl_handle_int(oidp, &mode, 0, req);
4539 if ((error) || (req->newptr == NULL))
4540 return (error);
4541
4542 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4543 }
4544
4545 static int
ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)4546 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4547 {
4548 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4549 int mode, error = 0;
4550
4551 struct i40e_aq_get_phy_abilities_resp abilities;
4552 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4553 if (error)
4554 return (error);
4555 /* Read in new mode */
4556 error = sysctl_handle_int(oidp, &mode, 0, req);
4557 if ((error) || (req->newptr == NULL))
4558 return (error);
4559
4560 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4561 }
4562
4563 static int
ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)4564 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4565 {
4566 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4567 struct i40e_hw *hw = &pf->hw;
4568 device_t dev = pf->dev;
4569 struct sbuf *buf;
4570 int error = 0;
4571 enum i40e_status_code status;
4572
4573 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4574 if (!buf) {
4575 device_printf(dev, "Could not allocate sbuf for output.\n");
4576 return (ENOMEM);
4577 }
4578
4579 u8 *final_buff;
4580 /* This amount is only necessary if reading the entire cluster into memory */
4581 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4582 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4583 if (final_buff == NULL) {
4584 device_printf(dev, "Could not allocate memory for output.\n");
4585 goto out;
4586 }
4587 int final_buff_len = 0;
4588
4589 u8 cluster_id = 1;
4590 bool more = true;
4591
4592 u8 dump_buf[4096];
4593 u16 curr_buff_size = 4096;
4594 u8 curr_next_table = 0;
4595 u32 curr_next_index = 0;
4596
4597 u16 ret_buff_size;
4598 u8 ret_next_table;
4599 u32 ret_next_index;
4600
4601 sbuf_cat(buf, "\n");
4602
4603 while (more) {
4604 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4605 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4606 if (status) {
4607 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4608 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4609 goto free_out;
4610 }
4611
4612 /* copy info out of temp buffer */
4613 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4614 final_buff_len += ret_buff_size;
4615
4616 if (ret_next_table != curr_next_table) {
4617 /* We're done with the current table; we can dump out read data. */
4618 sbuf_printf(buf, "%d:", curr_next_table);
4619 int bytes_printed = 0;
4620 while (bytes_printed <= final_buff_len) {
4621 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4622 bytes_printed += 16;
4623 }
4624 sbuf_cat(buf, "\n");
4625
4626 /* The entire cluster has been read; we're finished */
4627 if (ret_next_table == 0xFF)
4628 break;
4629
4630 /* Otherwise clear the output buffer and continue reading */
4631 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4632 final_buff_len = 0;
4633 }
4634
4635 if (ret_next_index == 0xFFFFFFFF)
4636 ret_next_index = 0;
4637
4638 bzero(dump_buf, sizeof(dump_buf));
4639 curr_next_table = ret_next_table;
4640 curr_next_index = ret_next_index;
4641 }
4642
4643 free_out:
4644 free(final_buff, M_IXL);
4645 out:
4646 error = sbuf_finish(buf);
4647 if (error)
4648 device_printf(dev, "Error finishing sbuf: %d\n", error);
4649 sbuf_delete(buf);
4650
4651 return (error);
4652 }
4653
4654 static int
ixl_start_fw_lldp(struct ixl_pf * pf)4655 ixl_start_fw_lldp(struct ixl_pf *pf)
4656 {
4657 struct i40e_hw *hw = &pf->hw;
4658 enum i40e_status_code status;
4659
4660 status = i40e_aq_start_lldp(hw, false, NULL);
4661 if (status != I40E_SUCCESS) {
4662 switch (hw->aq.asq_last_status) {
4663 case I40E_AQ_RC_EEXIST:
4664 device_printf(pf->dev,
4665 "FW LLDP agent is already running\n");
4666 break;
4667 case I40E_AQ_RC_EPERM:
4668 device_printf(pf->dev,
4669 "Device configuration forbids SW from starting "
4670 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4671 "attribute to \"Enabled\" to use this sysctl\n");
4672 return (EINVAL);
4673 default:
4674 device_printf(pf->dev,
4675 "Starting FW LLDP agent failed: error: %s, %s\n",
4676 i40e_stat_str(hw, status),
4677 i40e_aq_str(hw, hw->aq.asq_last_status));
4678 return (EINVAL);
4679 }
4680 }
4681
4682 ixl_clear_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4683 return (0);
4684 }
4685
4686 static int
ixl_stop_fw_lldp(struct ixl_pf * pf)4687 ixl_stop_fw_lldp(struct ixl_pf *pf)
4688 {
4689 struct i40e_hw *hw = &pf->hw;
4690 device_t dev = pf->dev;
4691 enum i40e_status_code status;
4692
4693 if (hw->func_caps.npar_enable != 0) {
4694 device_printf(dev,
4695 "Disabling FW LLDP agent is not supported on this device\n");
4696 return (EINVAL);
4697 }
4698
4699 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4700 device_printf(dev,
4701 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4702 return (EINVAL);
4703 }
4704
4705 status = i40e_aq_stop_lldp(hw, true, false, NULL);
4706 if (status != I40E_SUCCESS) {
4707 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4708 device_printf(dev,
4709 "Disabling FW LLDP agent failed: error: %s, %s\n",
4710 i40e_stat_str(hw, status),
4711 i40e_aq_str(hw, hw->aq.asq_last_status));
4712 return (EINVAL);
4713 }
4714
4715 device_printf(dev, "FW LLDP agent is already stopped\n");
4716 }
4717
4718 i40e_aq_set_dcb_parameters(hw, true, NULL);
4719 ixl_set_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4720 return (0);
4721 }
4722
4723 static int
ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)4724 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4725 {
4726 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4727 int state, new_state, error = 0;
4728
4729 state = new_state = !ixl_test_state(&pf->state, IXL_STATE_FW_LLDP_DISABLED);
4730
4731 /* Read in new mode */
4732 error = sysctl_handle_int(oidp, &new_state, 0, req);
4733 if ((error) || (req->newptr == NULL))
4734 return (error);
4735
4736 /* Already in requested state */
4737 if (new_state == state)
4738 return (error);
4739
4740 if (new_state == 0)
4741 return ixl_stop_fw_lldp(pf);
4742
4743 return ixl_start_fw_lldp(pf);
4744 }
4745
4746 static int
ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)4747 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4748 {
4749 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4750 int state, new_state;
4751 int sysctl_handle_status = 0;
4752 enum i40e_status_code cmd_status;
4753
4754 /* Init states' values */
4755 state = new_state = ixl_test_state(&pf->state, IXL_STATE_EEE_ENABLED);
4756
4757 /* Get requested mode */
4758 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4759 if ((sysctl_handle_status) || (req->newptr == NULL))
4760 return (sysctl_handle_status);
4761
4762 /* Check if state has changed */
4763 if (new_state == state)
4764 return (0);
4765
4766 /* Set new state */
4767 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4768
4769 /* Save new state or report error */
4770 if (!cmd_status) {
4771 if (new_state == 0)
4772 ixl_clear_state(&pf->state, IXL_STATE_EEE_ENABLED);
4773 else
4774 ixl_set_state(&pf->state, IXL_STATE_EEE_ENABLED);
4775 } else if (cmd_status == I40E_ERR_CONFIG)
4776 return (EPERM);
4777 else
4778 return (EIO);
4779
4780 return (0);
4781 }
4782
4783 static int
ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)4784 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4785 {
4786 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4787 int error, state;
4788
4789 state = ixl_test_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4790
4791 error = sysctl_handle_int(oidp, &state, 0, req);
4792 if ((error) || (req->newptr == NULL))
4793 return (error);
4794
4795 if (state == 0)
4796 ixl_clear_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4797 else
4798 ixl_set_state(&pf->state, IXL_STATE_LINK_ACTIVE_ON_DOWN);
4799
4800 return (0);
4801 }
4802
4803
4804 int
ixl_attach_get_link_status(struct ixl_pf * pf)4805 ixl_attach_get_link_status(struct ixl_pf *pf)
4806 {
4807 struct i40e_hw *hw = &pf->hw;
4808 device_t dev = pf->dev;
4809 enum i40e_status_code status;
4810
4811 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4812 (hw->aq.fw_maj_ver < 4)) {
4813 i40e_msec_delay(75);
4814 status = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4815 if (status != I40E_SUCCESS) {
4816 device_printf(dev,
4817 "%s link restart failed status: %s, aq_err=%s\n",
4818 __func__, i40e_stat_str(hw, status),
4819 i40e_aq_str(hw, hw->aq.asq_last_status));
4820 return (EINVAL);
4821 }
4822 }
4823
4824 /* Determine link state */
4825 hw->phy.get_link_info = TRUE;
4826 status = i40e_get_link_status(hw, &pf->link_up);
4827 if (status != I40E_SUCCESS) {
4828 device_printf(dev,
4829 "%s get link status, status: %s aq_err=%s\n",
4830 __func__, i40e_stat_str(hw, status),
4831 i40e_aq_str(hw, hw->aq.asq_last_status));
4832 /*
4833 * Most probably FW has not finished configuring PHY.
4834 * Retry periodically in a timer callback.
4835 */
4836 ixl_set_state(&pf->state, IXL_STATE_LINK_POLLING);
4837 pf->link_poll_start = getsbinuptime();
4838 return (EAGAIN);
4839 }
4840 ixl_dbg_link(pf, "%s link_up: %d\n", __func__, pf->link_up);
4841
4842 /* Flow Control mode not set by user, read current FW settings */
4843 if (pf->fc == -1)
4844 pf->fc = hw->fc.current_mode;
4845
4846 return (0);
4847 }
4848
4849 static int
ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)4850 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4851 {
4852 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4853 int requested = 0, error = 0;
4854
4855 /* Read in new mode */
4856 error = sysctl_handle_int(oidp, &requested, 0, req);
4857 if ((error) || (req->newptr == NULL))
4858 return (error);
4859
4860 /* Initiate the PF reset later in the admin task */
4861 ixl_set_state(&pf->state, IXL_STATE_PF_RESET_REQ);
4862
4863 return (error);
4864 }
4865
4866 static int
ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)4867 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4868 {
4869 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4870 struct i40e_hw *hw = &pf->hw;
4871 int requested = 0, error = 0;
4872
4873 /* Read in new mode */
4874 error = sysctl_handle_int(oidp, &requested, 0, req);
4875 if ((error) || (req->newptr == NULL))
4876 return (error);
4877
4878 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4879
4880 return (error);
4881 }
4882
4883 static int
ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)4884 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4885 {
4886 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4887 struct i40e_hw *hw = &pf->hw;
4888 int requested = 0, error = 0;
4889
4890 /* Read in new mode */
4891 error = sysctl_handle_int(oidp, &requested, 0, req);
4892 if ((error) || (req->newptr == NULL))
4893 return (error);
4894
4895 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4896
4897 return (error);
4898 }
4899
4900 /*
4901 * Print out mapping of TX queue indexes and Rx queue indexes
4902 * to MSI-X vectors.
4903 */
4904 static int
ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)4905 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4906 {
4907 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4908 struct ixl_vsi *vsi = &pf->vsi;
4909 struct i40e_hw *hw = vsi->hw;
4910 device_t dev = pf->dev;
4911 struct sbuf *buf;
4912 int error = 0;
4913
4914 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4915 struct ixl_tx_queue *tx_que = vsi->tx_queues;
4916
4917 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4918 if (!buf) {
4919 device_printf(dev, "Could not allocate sbuf for output.\n");
4920 return (ENOMEM);
4921 }
4922
4923 sbuf_cat(buf, "\n");
4924 for (int i = 0; i < vsi->num_rx_queues; i++) {
4925 rx_que = &vsi->rx_queues[i];
4926 sbuf_printf(buf,
4927 "(rxq %3d): %d LNKLSTN: %08x QINT_RQCTL: %08x\n",
4928 i, rx_que->msix,
4929 rd32(hw, I40E_PFINT_LNKLSTN(rx_que->msix - 1)),
4930 rd32(hw, I40E_QINT_RQCTL(rx_que->msix - 1)));
4931 }
4932 for (int i = 0; i < vsi->num_tx_queues; i++) {
4933 tx_que = &vsi->tx_queues[i];
4934 sbuf_printf(buf, "(txq %3d): %d QINT_TQCTL: %08x\n",
4935 i, tx_que->msix,
4936 rd32(hw, I40E_QINT_TQCTL(tx_que->msix - 1)));
4937 }
4938
4939 error = sbuf_finish(buf);
4940 if (error)
4941 device_printf(dev, "Error finishing sbuf: %d\n", error);
4942 sbuf_delete(buf);
4943
4944 return (error);
4945 }
4946
4947 static int
ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS)4948 ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS)
4949 {
4950 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4951 struct ixl_vsi *vsi = &pf->vsi;
4952 struct i40e_hw *hw = vsi->hw;
4953 device_t dev = pf->dev;
4954 struct sbuf *buf;
4955 int error = 0;
4956
4957 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4958
4959 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4960 if (!buf) {
4961 device_printf(dev, "Could not allocate sbuf for output.\n");
4962 return (ENOMEM);
4963 }
4964
4965 sbuf_cat(buf, "\n");
4966 for (int i = 0; i < vsi->num_rx_queues; i++) {
4967 rx_que = &vsi->rx_queues[i];
4968 sbuf_printf(buf,
4969 "(rxq %3d): %d PFINT_DYN_CTLN: %08x\n",
4970 i, rx_que->msix,
4971 rd32(hw, I40E_PFINT_DYN_CTLN(rx_que->msix - 1)));
4972 }
4973
4974 error = sbuf_finish(buf);
4975 if (error)
4976 device_printf(dev, "Error finishing sbuf: %d\n", error);
4977 sbuf_delete(buf);
4978
4979 return (error);
4980 }
4981