1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved. 14 * Copyright 2019 Joyent, Inc. 15 * Copyright 2021 Oxide Computer Company 16 */ 17 18 #include "i40e_sw.h" 19 20 /* 21 * ------------------- 22 * Statistics Overview 23 * ------------------- 24 * 25 * As part of managing the driver and understanding what's going on, we keep 26 * track of statistics from two different sources: 27 * 28 * - Statistics from the device 29 * - Statistics maintained by the driver 30 * 31 * Generally, the hardware provides us traditional IETF and MIB Ethernet 32 * statistics, for example, the total packets in and out, various errors in 33 * packets, the negotiated status etc. The driver, on the other hand, primarily 34 * contains statistics around driver-specific issues, such as information about 35 * checksumming on receive and transmit and the data in and out of a specific 36 * ring. 37 * 38 * We export statistics in two different forms. The first form is the required 39 * GLDv3 endpoints, specifically: 40 * 41 * - The general GLDv3 mc_getstat interface 42 * - The GLDv3 ring mri_stat interface 43 * 44 * The second form that we export statistics is through kstats. kstats are 45 * exported in different ways. Particularly we arrange the kstats to monitor the 46 * layout of the device. Currently we have kstats which capture both the IEEE 47 * and driver-implementation specific stats. There are kstats for each of the 48 * following structures: 49 * 50 * - Each physical function 51 * - Each VSI 52 * - Each Queue 53 * 54 * The PF's kstat is called 'pfstats' so as not to collide with other system 55 * provided kstats. Thus, for instance 0, usually the first PF, the full kstat 56 * would be: i40e:0:pfstats:. 57 * 58 * The kstat for each VSI is called vsi_%instance. So for the first PF, which is 59 * instance zero and the first vsi, which has id 0, it will be named vsi_0 and 60 * the full kstat would be i40e:0:vsi_0:. 61 * 62 * The kstat for each queue is trqpair_tx_%queue and trqpair_rx_%queue. Note 63 * that these are labeled based on their local index, which may mean that 64 * different instances have overlapping sets of queues. This isn't a problem as 65 * the kstats will always use the instance number of the pf to distinguish it in 66 * the kstat tuple. 67 * 68 * --------------------- 69 * Hardware Arrangements 70 * --------------------- 71 * 72 * The hardware keeps statistics at each physical function/MAC (PF) and it keeps 73 * statistics on each virtual station interface (VSI). 74 * 75 * The hardware keeps these statistics as 32-bit and 48-bit counters. We are 76 * required to read them and then compute the differences between them. The 77 * 48-bit counters span more than one 32-bit register in the BAR. The hardware 78 * suggests that to read them, we perform 64-bit reads of the lower of the two 79 * registers that make up a 48-bit stat. The hardware guarantees that the reads 80 * of those two registers will be atomic and we'll get a consistent value, not a 81 * property it has for every read of two registers. 82 * 83 * For every kstat we have based on this, we have a corresponding uint64_t that 84 * we keep around as a base value in a separate structure. Whenever we read a 85 * value, we end up grabbing the current value, calculating a difference between 86 * the previously stored value and the current one, and updating the kstat with 87 * that difference. After which, we go through and update the base value that we 88 * stored. This is all encapsulated in i40e_stat_get_uint32() and 89 * i40e_stat_get_uint48(). 90 * 91 * The only unfortunate thing here is that the hardware doesn't give us any kind 92 * of overflow counter. It just tries to make sure that the uint32_t and 93 * uint48_t counters are large enough to hopefully not overflow right away. This 94 * isn't the most reassuring statement and we should investigate ways of 95 * ensuring that if a system is active, but not actively measured, we don't lose 96 * data. 97 * 98 * The pf kstats data is stored in the i40e_t`i40e_pf_kstat. It is backed by the 99 * i40e_t`i40e_pf_stat structure. Similarly the VSI related kstats are in 100 * i40e_t`i40e_vsis[idx].iv_kstats and the data is backed in the 101 * i40e_t`i40e_vsis[idx].iv_stats. All of this data is protected by the 102 * i40e_stat_lock, which should be taken last, when acquiring locks. 103 */ 104 105 static void 106 i40e_stat_get_uint48(i40e_t *i40e, uintptr_t reg, kstat_named_t *kstat, 107 uint64_t *base, boolean_t init) 108 { 109 i40e_hw_t *hw = &i40e->i40e_hw_space; 110 uint64_t raw, delta; 111 112 ASSERT(MUTEX_HELD(&i40e->i40e_stat_lock)); 113 114 raw = ddi_get64(i40e->i40e_osdep_space.ios_reg_handle, 115 (uint64_t *)((uintptr_t)hw->hw_addr + reg)); 116 117 if (init == B_TRUE) { 118 *base = raw; 119 return; 120 } 121 122 /* 123 * Check for wraparound, note that the counter is actually only 48-bits, 124 * even though it has two uint32_t regs present. 125 */ 126 if (raw >= *base) { 127 delta = raw - *base; 128 } else { 129 delta = 0x1000000000000ULL - *base + raw; 130 } 131 132 kstat->value.ui64 += delta; 133 *base = raw; 134 } 135 136 static void 137 i40e_stat_get_uint32(i40e_t *i40e, uintptr_t reg, kstat_named_t *kstat, 138 uint64_t *base, boolean_t init) 139 { 140 i40e_hw_t *hw = &i40e->i40e_hw_space; 141 uint64_t raw, delta; 142 143 ASSERT(MUTEX_HELD(&i40e->i40e_stat_lock)); 144 145 raw = ddi_get32(i40e->i40e_osdep_space.ios_reg_handle, 146 (uint32_t *)((uintptr_t)hw->hw_addr + reg)); 147 148 if (init == B_TRUE) { 149 *base = raw; 150 return; 151 } 152 153 /* 154 * Watch out for wraparound as we only have a 32-bit counter. 155 */ 156 if (raw >= *base) { 157 delta = raw - *base; 158 } else { 159 delta = 0x100000000ULL - *base + raw; 160 } 161 162 kstat->value.ui64 += delta; 163 *base = raw; 164 165 } 166 167 static void 168 i40e_stat_vsi_update(i40e_t *i40e, uint_t idx, boolean_t init) 169 { 170 i40e_vsi_stats_t *ivs; 171 i40e_vsi_kstats_t *ivk; 172 uint16_t id = i40e->i40e_vsis[idx].iv_stats_id; 173 174 ASSERT3P(i40e->i40e_vsis[idx].iv_kstats, !=, NULL); 175 ivs = &i40e->i40e_vsis[idx].iv_stats; 176 ivk = i40e->i40e_vsis[idx].iv_kstats->ks_data; 177 178 mutex_enter(&i40e->i40e_stat_lock); 179 180 i40e_stat_get_uint48(i40e, I40E_GLV_GORCL(id), &ivk->ivk_rx_bytes, 181 &ivs->ivs_rx_bytes, init); 182 i40e_stat_get_uint48(i40e, I40E_GLV_UPRCL(id), &ivk->ivk_rx_unicast, 183 &ivs->ivs_rx_unicast, init); 184 i40e_stat_get_uint48(i40e, I40E_GLV_MPRCL(id), &ivk->ivk_rx_multicast, 185 &ivs->ivs_rx_multicast, init); 186 i40e_stat_get_uint48(i40e, I40E_GLV_BPRCL(id), &ivk->ivk_rx_broadcast, 187 &ivs->ivs_rx_broadcast, init); 188 189 i40e_stat_get_uint32(i40e, I40E_GLV_RDPC(id), &ivk->ivk_rx_discards, 190 &ivs->ivs_rx_discards, init); 191 i40e_stat_get_uint32(i40e, I40E_GLV_RUPP(id), 192 &ivk->ivk_rx_unknown_protocol, 193 &ivs->ivs_rx_unknown_protocol, 194 init); 195 196 i40e_stat_get_uint48(i40e, I40E_GLV_GOTCL(id), &ivk->ivk_tx_bytes, 197 &ivs->ivs_tx_bytes, init); 198 i40e_stat_get_uint48(i40e, I40E_GLV_UPTCL(id), &ivk->ivk_tx_unicast, 199 &ivs->ivs_tx_unicast, init); 200 i40e_stat_get_uint48(i40e, I40E_GLV_MPTCL(id), &ivk->ivk_tx_multicast, 201 &ivs->ivs_tx_multicast, init); 202 i40e_stat_get_uint48(i40e, I40E_GLV_BPTCL(id), &ivk->ivk_tx_broadcast, 203 &ivs->ivs_tx_broadcast, init); 204 205 i40e_stat_get_uint32(i40e, I40E_GLV_TEPC(id), &ivk->ivk_tx_errors, 206 &ivs->ivs_tx_errors, init); 207 208 mutex_exit(&i40e->i40e_stat_lock); 209 210 /* 211 * We follow ixgbe's lead here and that if a kstat update didn't work 212 * 100% then we mark service unaffected as opposed to when fetching 213 * things for MAC directly. 214 */ 215 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) != 216 DDI_FM_OK) { 217 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_UNAFFECTED); 218 } 219 } 220 221 static int 222 i40e_stat_vsi_kstat_update(kstat_t *ksp, int rw) 223 { 224 i40e_t *i40e; 225 226 if (rw == KSTAT_WRITE) 227 return (EACCES); 228 229 i40e = ksp->ks_private; 230 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) 231 i40e_stat_vsi_update(i40e, i, B_FALSE); 232 233 return (0); 234 } 235 236 void 237 i40e_stat_vsi_fini(i40e_t *i40e, uint_t idx) 238 { 239 if (i40e->i40e_vsis[idx].iv_kstats != NULL) { 240 kstat_delete(i40e->i40e_vsis[idx].iv_kstats); 241 i40e->i40e_vsis[idx].iv_kstats = NULL; 242 } 243 } 244 245 boolean_t 246 i40e_stat_vsi_init(i40e_t *i40e, uint_t idx) 247 { 248 kstat_t *ksp; 249 i40e_vsi_kstats_t *ivk; 250 char buf[64]; 251 uint16_t vsi_id = i40e->i40e_vsis[idx].iv_seid; 252 253 (void) snprintf(buf, sizeof (buf), "vsi_%u", vsi_id); 254 255 ksp = kstat_create(I40E_MODULE_NAME, ddi_get_instance(i40e->i40e_dip), 256 buf, "net", KSTAT_TYPE_NAMED, 257 sizeof (i40e_vsi_kstats_t) / sizeof (kstat_named_t), 0); 258 259 if (ksp == NULL) { 260 i40e_error(i40e, "Failed to create kstats for VSI %u", vsi_id); 261 return (B_FALSE); 262 } 263 264 i40e->i40e_vsis[idx].iv_kstats = ksp; 265 ivk = ksp->ks_data; 266 ksp->ks_update = i40e_stat_vsi_kstat_update; 267 ksp->ks_private = i40e; 268 269 kstat_named_init(&ivk->ivk_rx_bytes, "rx_bytes", 270 KSTAT_DATA_UINT64); 271 kstat_named_init(&ivk->ivk_rx_unicast, "rx_unicast", 272 KSTAT_DATA_UINT64); 273 kstat_named_init(&ivk->ivk_rx_multicast, "rx_multicast", 274 KSTAT_DATA_UINT64); 275 kstat_named_init(&ivk->ivk_rx_broadcast, "rx_broadcast", 276 KSTAT_DATA_UINT64); 277 kstat_named_init(&ivk->ivk_rx_discards, "rx_discards", 278 KSTAT_DATA_UINT64); 279 kstat_named_init(&ivk->ivk_rx_unknown_protocol, "rx_unknown_protocol", 280 KSTAT_DATA_UINT64); 281 kstat_named_init(&ivk->ivk_tx_bytes, "tx_bytes", 282 KSTAT_DATA_UINT64); 283 kstat_named_init(&ivk->ivk_tx_unicast, "tx_unicast", 284 KSTAT_DATA_UINT64); 285 kstat_named_init(&ivk->ivk_tx_multicast, "tx_multicast", 286 KSTAT_DATA_UINT64); 287 kstat_named_init(&ivk->ivk_tx_broadcast, "tx_broadcast", 288 KSTAT_DATA_UINT64); 289 kstat_named_init(&ivk->ivk_tx_errors, "tx_errors", 290 KSTAT_DATA_UINT64); 291 292 bzero(&i40e->i40e_vsis[idx].iv_stats, sizeof (i40e_vsi_stats_t)); 293 i40e_stat_vsi_update(i40e, idx, B_TRUE); 294 kstat_install(i40e->i40e_vsis[idx].iv_kstats); 295 296 return (B_TRUE); 297 } 298 299 static void 300 i40e_stat_pf_update(i40e_t *i40e, boolean_t init) 301 { 302 i40e_pf_stats_t *ips; 303 i40e_pf_kstats_t *ipk; 304 int port = i40e->i40e_hw_space.port; 305 int i; 306 307 ASSERT(i40e->i40e_pf_kstat != NULL); 308 ips = &i40e->i40e_pf_stat; 309 ipk = i40e->i40e_pf_kstat->ks_data; 310 311 mutex_enter(&i40e->i40e_stat_lock); 312 313 /* 64-bit PCIe regs */ 314 i40e_stat_get_uint48(i40e, I40E_GLPRT_GORCL(port), 315 &ipk->ipk_rx_bytes, &ips->ips_rx_bytes, init); 316 i40e_stat_get_uint48(i40e, I40E_GLPRT_UPRCL(port), 317 &ipk->ipk_rx_unicast, &ips->ips_rx_unicast, init); 318 i40e_stat_get_uint48(i40e, I40E_GLPRT_MPRCL(port), 319 &ipk->ipk_rx_multicast, &ips->ips_rx_multicast, init); 320 i40e_stat_get_uint48(i40e, I40E_GLPRT_BPRCL(port), 321 &ipk->ipk_rx_broadcast, &ips->ips_rx_broadcast, init); 322 i40e_stat_get_uint48(i40e, I40E_GLPRT_GOTCL(port), 323 &ipk->ipk_tx_bytes, &ips->ips_tx_bytes, init); 324 i40e_stat_get_uint48(i40e, I40E_GLPRT_UPTCL(port), 325 &ipk->ipk_tx_unicast, &ips->ips_tx_unicast, init); 326 i40e_stat_get_uint48(i40e, I40E_GLPRT_MPTCL(port), 327 &ipk->ipk_tx_multicast, &ips->ips_tx_multicast, init); 328 i40e_stat_get_uint48(i40e, I40E_GLPRT_BPTCL(port), 329 &ipk->ipk_tx_broadcast, &ips->ips_tx_broadcast, init); 330 331 i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC64L(port), 332 &ipk->ipk_rx_size_64, &ips->ips_rx_size_64, init); 333 i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC127L(port), 334 &ipk->ipk_rx_size_127, &ips->ips_rx_size_127, init); 335 i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC255L(port), 336 &ipk->ipk_rx_size_255, &ips->ips_rx_size_255, init); 337 i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC511L(port), 338 &ipk->ipk_rx_size_511, &ips->ips_rx_size_511, init); 339 i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC1023L(port), 340 &ipk->ipk_rx_size_1023, &ips->ips_rx_size_1023, init); 341 i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC1522L(port), 342 &ipk->ipk_rx_size_1522, &ips->ips_rx_size_1522, init); 343 i40e_stat_get_uint48(i40e, I40E_GLPRT_PRC9522L(port), 344 &ipk->ipk_rx_size_9522, &ips->ips_rx_size_9522, init); 345 346 i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC64L(port), 347 &ipk->ipk_tx_size_64, &ips->ips_tx_size_64, init); 348 i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC127L(port), 349 &ipk->ipk_tx_size_127, &ips->ips_tx_size_127, init); 350 i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC255L(port), 351 &ipk->ipk_tx_size_255, &ips->ips_tx_size_255, init); 352 i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC511L(port), 353 &ipk->ipk_tx_size_511, &ips->ips_tx_size_511, init); 354 i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC1023L(port), 355 &ipk->ipk_tx_size_1023, &ips->ips_tx_size_1023, init); 356 i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC1522L(port), 357 &ipk->ipk_tx_size_1522, &ips->ips_tx_size_1522, init); 358 i40e_stat_get_uint48(i40e, I40E_GLPRT_PTC9522L(port), 359 &ipk->ipk_tx_size_9522, &ips->ips_tx_size_9522, init); 360 361 /* 32-bit PCIe regs */ 362 i40e_stat_get_uint32(i40e, I40E_GLPRT_LXONRXC(port), 363 &ipk->ipk_link_xon_rx, &ips->ips_link_xon_rx, init); 364 i40e_stat_get_uint32(i40e, I40E_GLPRT_LXOFFRXC(port), 365 &ipk->ipk_link_xoff_rx, &ips->ips_link_xoff_rx, init); 366 i40e_stat_get_uint32(i40e, I40E_GLPRT_LXONTXC(port), 367 &ipk->ipk_link_xon_tx, &ips->ips_link_xon_tx, init); 368 i40e_stat_get_uint32(i40e, I40E_GLPRT_LXOFFTXC(port), 369 &ipk->ipk_link_xoff_tx, &ips->ips_link_xoff_tx, init); 370 371 for (i = 0; i < 8; i++) { 372 i40e_stat_get_uint32(i40e, I40E_GLPRT_PXONRXC(port, i), 373 &ipk->ipk_priority_xon_rx[i], &ips->ips_priority_xon_rx[i], 374 init); 375 i40e_stat_get_uint32(i40e, I40E_GLPRT_PXOFFRXC(port, i), 376 &ipk->ipk_priority_xoff_rx[i], 377 &ips->ips_priority_xoff_rx[i], 378 init); 379 i40e_stat_get_uint32(i40e, I40E_GLPRT_PXONTXC(port, i), 380 &ipk->ipk_priority_xon_tx[i], &ips->ips_priority_xon_tx[i], 381 init); 382 i40e_stat_get_uint32(i40e, I40E_GLPRT_PXOFFTXC(port, i), 383 &ipk->ipk_priority_xoff_tx[i], 384 &ips->ips_priority_xoff_tx[i], 385 init); 386 i40e_stat_get_uint32(i40e, I40E_GLPRT_RXON2OFFCNT(port, i), 387 &ipk->ipk_priority_xon_2_xoff[i], 388 &ips->ips_priority_xon_2_xoff[i], 389 init); 390 } 391 392 i40e_stat_get_uint32(i40e, I40E_GLPRT_CRCERRS(port), 393 &ipk->ipk_crc_errors, &ips->ips_crc_errors, init); 394 i40e_stat_get_uint32(i40e, I40E_GLPRT_ILLERRC(port), 395 &ipk->ipk_illegal_bytes, &ips->ips_illegal_bytes, init); 396 i40e_stat_get_uint32(i40e, I40E_GLPRT_MLFC(port), 397 &ipk->ipk_mac_local_faults, &ips->ips_mac_local_faults, init); 398 i40e_stat_get_uint32(i40e, I40E_GLPRT_MRFC(port), 399 &ipk->ipk_mac_remote_faults, &ips->ips_mac_remote_faults, init); 400 i40e_stat_get_uint32(i40e, I40E_GLPRT_RLEC(port), 401 &ipk->ipk_rx_length_errors, &ips->ips_rx_length_errors, init); 402 i40e_stat_get_uint32(i40e, I40E_GLPRT_RUC(port), 403 &ipk->ipk_rx_undersize, &ips->ips_rx_undersize, init); 404 i40e_stat_get_uint32(i40e, I40E_GLPRT_RFC(port), 405 &ipk->ipk_rx_fragments, &ips->ips_rx_fragments, init); 406 i40e_stat_get_uint32(i40e, I40E_GLPRT_ROC(port), 407 &ipk->ipk_rx_oversize, &ips->ips_rx_oversize, init); 408 i40e_stat_get_uint32(i40e, I40E_GLPRT_RJC(port), 409 &ipk->ipk_rx_jabber, &ips->ips_rx_jabber, init); 410 i40e_stat_get_uint32(i40e, I40E_GLPRT_RDPC(port), 411 &ipk->ipk_rx_discards, &ips->ips_rx_discards, init); 412 i40e_stat_get_uint32(i40e, I40E_GLPRT_LDPC(port), 413 &ipk->ipk_rx_vm_discards, &ips->ips_rx_vm_discards, init); 414 i40e_stat_get_uint32(i40e, I40E_GLPRT_MSPDC(port), 415 &ipk->ipk_rx_short_discards, &ips->ips_rx_short_discards, init); 416 i40e_stat_get_uint32(i40e, I40E_GLPRT_TDOLD(port), 417 &ipk->ipk_tx_dropped_link_down, &ips->ips_tx_dropped_link_down, 418 init); 419 i40e_stat_get_uint32(i40e, I40E_GLPRT_RUPP(port), 420 &ipk->ipk_rx_unknown_protocol, &ips->ips_rx_unknown_protocol, init); 421 422 /* 64-bit */ 423 i40e_stat_get_uint48(i40e, I40E_GL_RXERR1_L(port), &ipk->ipk_rx_err1, 424 &ips->ips_rx_err1, init); 425 i40e_stat_get_uint48(i40e, I40E_GL_RXERR2_L(port), &ipk->ipk_rx_err2, 426 &ips->ips_rx_err2, init); 427 428 mutex_exit(&i40e->i40e_stat_lock); 429 430 /* 431 * We follow ixgbe's lead here and that if a kstat update didn't work 432 * 100% then we mark service unaffected as opposed to when fetching 433 * things for MAC directly. 434 */ 435 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) != 436 DDI_FM_OK) { 437 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_UNAFFECTED); 438 } 439 } 440 441 static int 442 i40e_stat_pf_kstat_update(kstat_t *ksp, int rw) 443 { 444 i40e_t *i40e; 445 446 if (rw == KSTAT_WRITE) 447 return (EACCES); 448 449 i40e = ksp->ks_private; 450 i40e_stat_pf_update(i40e, B_FALSE); 451 return (0); 452 } 453 454 455 static boolean_t 456 i40e_stat_pf_init(i40e_t *i40e) 457 { 458 kstat_t *ksp; 459 i40e_pf_kstats_t *ipk; 460 461 ksp = kstat_create(I40E_MODULE_NAME, ddi_get_instance(i40e->i40e_dip), 462 "pfstats", "net", KSTAT_TYPE_NAMED, 463 sizeof (i40e_pf_kstats_t) / sizeof (kstat_named_t), 0); 464 if (ksp == NULL) { 465 i40e_error(i40e, "Could not create kernel statistics."); 466 return (B_FALSE); 467 } 468 469 i40e->i40e_pf_kstat = ksp; 470 ipk = ksp->ks_data; 471 ksp->ks_update = i40e_stat_pf_kstat_update; 472 ksp->ks_private = i40e; 473 474 kstat_named_init(&ipk->ipk_rx_bytes, "rx_bytes", 475 KSTAT_DATA_UINT64); 476 kstat_named_init(&ipk->ipk_rx_unicast, "rx_unicast", 477 KSTAT_DATA_UINT64); 478 kstat_named_init(&ipk->ipk_rx_multicast, "rx_multicast", 479 KSTAT_DATA_UINT64); 480 kstat_named_init(&ipk->ipk_rx_broadcast, "rx_broadcast", 481 KSTAT_DATA_UINT64); 482 kstat_named_init(&ipk->ipk_tx_bytes, "tx_bytes", 483 KSTAT_DATA_UINT64); 484 kstat_named_init(&ipk->ipk_tx_unicast, "tx_unicast", 485 KSTAT_DATA_UINT64); 486 kstat_named_init(&ipk->ipk_tx_multicast, "tx_multicast", 487 KSTAT_DATA_UINT64); 488 kstat_named_init(&ipk->ipk_tx_broadcast, "tx_broadcast", 489 KSTAT_DATA_UINT64); 490 491 kstat_named_init(&ipk->ipk_rx_size_64, "rx_size_64", 492 KSTAT_DATA_UINT64); 493 kstat_named_init(&ipk->ipk_rx_size_127, "rx_size_127", 494 KSTAT_DATA_UINT64); 495 kstat_named_init(&ipk->ipk_rx_size_255, "rx_size_255", 496 KSTAT_DATA_UINT64); 497 kstat_named_init(&ipk->ipk_rx_size_511, "rx_size_511", 498 KSTAT_DATA_UINT64); 499 kstat_named_init(&ipk->ipk_rx_size_1023, "rx_size_1023", 500 KSTAT_DATA_UINT64); 501 kstat_named_init(&ipk->ipk_rx_size_1522, "rx_size_1522", 502 KSTAT_DATA_UINT64); 503 kstat_named_init(&ipk->ipk_rx_size_9522, "rx_size_9522", 504 KSTAT_DATA_UINT64); 505 506 kstat_named_init(&ipk->ipk_tx_size_64, "tx_size_64", 507 KSTAT_DATA_UINT64); 508 kstat_named_init(&ipk->ipk_tx_size_127, "tx_size_127", 509 KSTAT_DATA_UINT64); 510 kstat_named_init(&ipk->ipk_tx_size_255, "tx_size_255", 511 KSTAT_DATA_UINT64); 512 kstat_named_init(&ipk->ipk_tx_size_511, "tx_size_511", 513 KSTAT_DATA_UINT64); 514 kstat_named_init(&ipk->ipk_tx_size_1023, "tx_size_1023", 515 KSTAT_DATA_UINT64); 516 kstat_named_init(&ipk->ipk_tx_size_1522, "tx_size_1522", 517 KSTAT_DATA_UINT64); 518 kstat_named_init(&ipk->ipk_tx_size_9522, "tx_size_9522", 519 KSTAT_DATA_UINT64); 520 521 kstat_named_init(&ipk->ipk_link_xon_rx, "link_xon_rx", 522 KSTAT_DATA_UINT64); 523 kstat_named_init(&ipk->ipk_link_xoff_rx, "link_xoff_rx", 524 KSTAT_DATA_UINT64); 525 kstat_named_init(&ipk->ipk_link_xon_tx, "link_xon_tx", 526 KSTAT_DATA_UINT64); 527 kstat_named_init(&ipk->ipk_link_xoff_tx, "link_xoff_tx", 528 KSTAT_DATA_UINT64); 529 530 kstat_named_init(&ipk->ipk_priority_xon_rx[0], "priority_xon_rx[0]", 531 KSTAT_DATA_UINT64); 532 kstat_named_init(&ipk->ipk_priority_xoff_rx[0], "priority_xoff_rx[0]", 533 KSTAT_DATA_UINT64); 534 kstat_named_init(&ipk->ipk_priority_xon_tx[0], "priority_xon_tx[0]", 535 KSTAT_DATA_UINT64); 536 kstat_named_init(&ipk->ipk_priority_xoff_tx[0], "priority_xoff_tx[0]", 537 KSTAT_DATA_UINT64); 538 kstat_named_init(&ipk->ipk_priority_xon_2_xoff[0], 539 "priority_xon_2_xoff[0]", 540 KSTAT_DATA_UINT64); 541 542 kstat_named_init(&ipk->ipk_priority_xon_rx[1], "priority_xon_rx[1]", 543 KSTAT_DATA_UINT64); 544 kstat_named_init(&ipk->ipk_priority_xoff_rx[1], "priority_xoff_rx[1]", 545 KSTAT_DATA_UINT64); 546 kstat_named_init(&ipk->ipk_priority_xon_tx[1], "priority_xon_tx[1]", 547 KSTAT_DATA_UINT64); 548 kstat_named_init(&ipk->ipk_priority_xoff_tx[1], "priority_xoff_tx[1]", 549 KSTAT_DATA_UINT64); 550 kstat_named_init(&ipk->ipk_priority_xon_2_xoff[1], 551 "priority_xon_2_xoff[1]", 552 KSTAT_DATA_UINT64); 553 554 kstat_named_init(&ipk->ipk_priority_xon_rx[2], "priority_xon_rx[2]", 555 KSTAT_DATA_UINT64); 556 kstat_named_init(&ipk->ipk_priority_xoff_rx[2], "priority_xoff_rx[2]", 557 KSTAT_DATA_UINT64); 558 kstat_named_init(&ipk->ipk_priority_xon_tx[2], "priority_xon_tx[2]", 559 KSTAT_DATA_UINT64); 560 kstat_named_init(&ipk->ipk_priority_xoff_tx[2], "priority_xoff_tx[2]", 561 KSTAT_DATA_UINT64); 562 kstat_named_init(&ipk->ipk_priority_xon_2_xoff[2], 563 "priority_xon_2_xoff[2]", 564 KSTAT_DATA_UINT64); 565 566 kstat_named_init(&ipk->ipk_priority_xon_rx[3], "priority_xon_rx[3]", 567 KSTAT_DATA_UINT64); 568 kstat_named_init(&ipk->ipk_priority_xoff_rx[3], "priority_xoff_rx[3]", 569 KSTAT_DATA_UINT64); 570 kstat_named_init(&ipk->ipk_priority_xon_tx[3], "priority_xon_tx[3]", 571 KSTAT_DATA_UINT64); 572 kstat_named_init(&ipk->ipk_priority_xoff_tx[3], "priority_xoff_tx[3]", 573 KSTAT_DATA_UINT64); 574 kstat_named_init(&ipk->ipk_priority_xon_2_xoff[3], 575 "priority_xon_2_xoff[3]", 576 KSTAT_DATA_UINT64); 577 578 kstat_named_init(&ipk->ipk_priority_xon_rx[4], "priority_xon_rx[4]", 579 KSTAT_DATA_UINT64); 580 kstat_named_init(&ipk->ipk_priority_xoff_rx[4], "priority_xoff_rx[4]", 581 KSTAT_DATA_UINT64); 582 kstat_named_init(&ipk->ipk_priority_xon_tx[4], "priority_xon_tx[4]", 583 KSTAT_DATA_UINT64); 584 kstat_named_init(&ipk->ipk_priority_xoff_tx[4], "priority_xoff_tx[4]", 585 KSTAT_DATA_UINT64); 586 kstat_named_init(&ipk->ipk_priority_xon_2_xoff[4], 587 "priority_xon_2_xoff[4]", 588 KSTAT_DATA_UINT64); 589 590 kstat_named_init(&ipk->ipk_priority_xon_rx[5], "priority_xon_rx[5]", 591 KSTAT_DATA_UINT64); 592 kstat_named_init(&ipk->ipk_priority_xoff_rx[5], "priority_xoff_rx[5]", 593 KSTAT_DATA_UINT64); 594 kstat_named_init(&ipk->ipk_priority_xon_tx[5], "priority_xon_tx[5]", 595 KSTAT_DATA_UINT64); 596 kstat_named_init(&ipk->ipk_priority_xoff_tx[5], "priority_xoff_tx[5]", 597 KSTAT_DATA_UINT64); 598 kstat_named_init(&ipk->ipk_priority_xon_2_xoff[5], 599 "priority_xon_2_xoff[5]", 600 KSTAT_DATA_UINT64); 601 602 kstat_named_init(&ipk->ipk_priority_xon_rx[6], "priority_xon_rx[6]", 603 KSTAT_DATA_UINT64); 604 kstat_named_init(&ipk->ipk_priority_xoff_rx[6], "priority_xoff_rx[6]", 605 KSTAT_DATA_UINT64); 606 kstat_named_init(&ipk->ipk_priority_xon_tx[6], "priority_xon_tx[6]", 607 KSTAT_DATA_UINT64); 608 kstat_named_init(&ipk->ipk_priority_xoff_tx[6], "priority_xoff_tx[6]", 609 KSTAT_DATA_UINT64); 610 kstat_named_init(&ipk->ipk_priority_xon_2_xoff[6], 611 "priority_xon_2_xoff[6]", 612 KSTAT_DATA_UINT64); 613 614 kstat_named_init(&ipk->ipk_priority_xon_rx[7], "priority_xon_rx[7]", 615 KSTAT_DATA_UINT64); 616 kstat_named_init(&ipk->ipk_priority_xoff_rx[7], "priority_xoff_rx[7]", 617 KSTAT_DATA_UINT64); 618 kstat_named_init(&ipk->ipk_priority_xon_tx[7], "priority_xon_tx[7]", 619 KSTAT_DATA_UINT64); 620 kstat_named_init(&ipk->ipk_priority_xoff_tx[7], "priority_xoff_tx[7]", 621 KSTAT_DATA_UINT64); 622 kstat_named_init(&ipk->ipk_priority_xon_2_xoff[7], 623 "priority_xon_2_xoff[7]", 624 KSTAT_DATA_UINT64); 625 626 kstat_named_init(&ipk->ipk_crc_errors, "crc_errors", 627 KSTAT_DATA_UINT64); 628 kstat_named_init(&ipk->ipk_illegal_bytes, "illegal_bytes", 629 KSTAT_DATA_UINT64); 630 kstat_named_init(&ipk->ipk_mac_local_faults, "mac_local_faults", 631 KSTAT_DATA_UINT64); 632 kstat_named_init(&ipk->ipk_mac_remote_faults, "mac_remote_faults", 633 KSTAT_DATA_UINT64); 634 kstat_named_init(&ipk->ipk_rx_length_errors, "rx_length_errors", 635 KSTAT_DATA_UINT64); 636 kstat_named_init(&ipk->ipk_rx_undersize, "rx_undersize", 637 KSTAT_DATA_UINT64); 638 kstat_named_init(&ipk->ipk_rx_fragments, "rx_fragments", 639 KSTAT_DATA_UINT64); 640 kstat_named_init(&ipk->ipk_rx_oversize, "rx_oversize", 641 KSTAT_DATA_UINT64); 642 kstat_named_init(&ipk->ipk_rx_jabber, "rx_jabber", 643 KSTAT_DATA_UINT64); 644 kstat_named_init(&ipk->ipk_rx_discards, "rx_discards", 645 KSTAT_DATA_UINT64); 646 kstat_named_init(&ipk->ipk_rx_vm_discards, "rx_vm_discards", 647 KSTAT_DATA_UINT64); 648 kstat_named_init(&ipk->ipk_rx_short_discards, "rx_short_discards", 649 KSTAT_DATA_UINT64); 650 kstat_named_init(&ipk->ipk_tx_dropped_link_down, "tx_dropped_link_down", 651 KSTAT_DATA_UINT64); 652 kstat_named_init(&ipk->ipk_rx_unknown_protocol, "rx_unknown_protocol", 653 KSTAT_DATA_UINT64); 654 kstat_named_init(&ipk->ipk_rx_err1, "rx_err1", 655 KSTAT_DATA_UINT64); 656 kstat_named_init(&ipk->ipk_rx_err2, "rx_err2", 657 KSTAT_DATA_UINT64); 658 659 660 bzero(&i40e->i40e_pf_stat, sizeof (i40e_pf_stats_t)); 661 i40e_stat_pf_update(i40e, B_TRUE); 662 663 kstat_install(i40e->i40e_pf_kstat); 664 665 return (B_TRUE); 666 } 667 668 void 669 i40e_stats_fini(i40e_t *i40e) 670 { 671 #ifdef DEBUG 672 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) { 673 ASSERT3P(i40e->i40e_vsis[i].iv_kstats, ==, NULL); 674 } 675 #endif 676 677 if (i40e->i40e_pf_kstat != NULL) { 678 kstat_delete(i40e->i40e_pf_kstat); 679 i40e->i40e_pf_kstat = NULL; 680 } 681 682 mutex_destroy(&i40e->i40e_stat_lock); 683 } 684 685 boolean_t 686 i40e_stats_init(i40e_t *i40e) 687 { 688 mutex_init(&i40e->i40e_stat_lock, NULL, MUTEX_DRIVER, NULL); 689 if (i40e_stat_pf_init(i40e) == B_FALSE) { 690 mutex_destroy(&i40e->i40e_stat_lock); 691 return (B_FALSE); 692 } 693 694 return (B_TRUE); 695 } 696 697 /* 698 * For Nemo/GLDv3. 699 */ 700 int 701 i40e_m_stat(void *arg, uint_t stat, uint64_t *val) 702 { 703 i40e_t *i40e = (i40e_t *)arg; 704 i40e_hw_t *hw = &i40e->i40e_hw_space; 705 int port = i40e->i40e_hw_space.port; 706 i40e_pf_stats_t *ips; 707 i40e_pf_kstats_t *ipk; 708 709 710 ASSERT(i40e->i40e_pf_kstat != NULL); 711 ips = &i40e->i40e_pf_stat; 712 ipk = i40e->i40e_pf_kstat->ks_data; 713 714 /* 715 * We need both locks, as various stats are protected by different 716 * things here. 717 */ 718 mutex_enter(&i40e->i40e_general_lock); 719 720 if (i40e->i40e_state & I40E_SUSPENDED) { 721 mutex_exit(&i40e->i40e_general_lock); 722 return (ECANCELED); 723 } 724 725 mutex_enter(&i40e->i40e_stat_lock); 726 727 /* 728 * Unfortunately the GLDv3 conflates two rather different things here. 729 * We're combining statistics about the physical port represented by 730 * this instance with statistics that describe the properties of the 731 * logical interface. As such, we're going to use the various aspects of 732 * the port to describe these stats as they represent what the physical 733 * instance is doing, even though that that means some tools may be 734 * confused and that to see the logical traffic on the interface itself 735 * sans VNICs and the like will require more work. 736 * 737 * Stats which are not listed in this switch statement are unimplemented 738 * at this time in hardware or don't currently apply to the device. 739 */ 740 switch (stat) { 741 /* MIB-II stats (RFC 1213 and RFC 1573) */ 742 case MAC_STAT_IFSPEED: 743 *val = i40e->i40e_link_speed * 1000000ull; 744 break; 745 case MAC_STAT_MULTIRCV: 746 i40e_stat_get_uint48(i40e, I40E_GLPRT_MPRCL(port), 747 &ipk->ipk_rx_multicast, &ips->ips_rx_multicast, B_FALSE); 748 *val = ipk->ipk_rx_multicast.value.ui64; 749 break; 750 case MAC_STAT_BRDCSTRCV: 751 i40e_stat_get_uint48(i40e, I40E_GLPRT_BPRCL(port), 752 &ipk->ipk_rx_broadcast, &ips->ips_rx_broadcast, B_FALSE); 753 *val = ipk->ipk_rx_broadcast.value.ui64; 754 break; 755 case MAC_STAT_MULTIXMT: 756 i40e_stat_get_uint48(i40e, I40E_GLPRT_MPTCL(port), 757 &ipk->ipk_tx_multicast, &ips->ips_tx_multicast, B_FALSE); 758 *val = ipk->ipk_tx_multicast.value.ui64; 759 break; 760 case MAC_STAT_BRDCSTXMT: 761 i40e_stat_get_uint48(i40e, I40E_GLPRT_BPTCL(port), 762 &ipk->ipk_tx_broadcast, &ips->ips_tx_broadcast, B_FALSE); 763 *val = ipk->ipk_tx_broadcast.value.ui64; 764 break; 765 case MAC_STAT_NORCVBUF: 766 i40e_stat_get_uint32(i40e, I40E_GLPRT_RDPC(port), 767 &ipk->ipk_rx_discards, &ips->ips_rx_discards, B_FALSE); 768 i40e_stat_get_uint32(i40e, I40E_GLPRT_LDPC(port), 769 &ipk->ipk_rx_vm_discards, &ips->ips_rx_vm_discards, 770 B_FALSE); 771 *val = ipk->ipk_rx_discards.value.ui64 + 772 ipk->ipk_rx_vm_discards.value.ui64; 773 break; 774 /* 775 * Note, that some RXERR2 stats are also duplicated by the switch filter 776 * stats; however, since we're not using those at this time, it seems 777 * reasonable to include them. 778 */ 779 case MAC_STAT_IERRORS: 780 i40e_stat_get_uint32(i40e, I40E_GLPRT_CRCERRS(port), 781 &ipk->ipk_crc_errors, &ips->ips_crc_errors, B_FALSE); 782 i40e_stat_get_uint32(i40e, I40E_GLPRT_ILLERRC(port), 783 &ipk->ipk_illegal_bytes, &ips->ips_illegal_bytes, B_FALSE); 784 i40e_stat_get_uint32(i40e, I40E_GLPRT_RLEC(port), 785 &ipk->ipk_rx_length_errors, &ips->ips_rx_length_errors, 786 B_FALSE); 787 i40e_stat_get_uint48(i40e, I40E_GL_RXERR1_L(port), 788 &ipk->ipk_rx_err1, &ips->ips_rx_err1, B_FALSE); 789 i40e_stat_get_uint48(i40e, I40E_GL_RXERR2_L(port), 790 &ipk->ipk_rx_err2, &ips->ips_rx_err2, B_FALSE); 791 792 *val = ipk->ipk_crc_errors.value.ui64 + 793 ipk->ipk_illegal_bytes.value.ui64 + 794 ipk->ipk_rx_length_errors.value.ui64 + 795 ipk->ipk_rx_err1.value.ui64 + 796 ipk->ipk_rx_err2.value.ui64; 797 break; 798 case MAC_STAT_UNKNOWNS: 799 i40e_stat_get_uint32(i40e, I40E_GLPRT_RUPP(port), 800 &ipk->ipk_rx_unknown_protocol, 801 &ips->ips_rx_unknown_protocol, 802 B_FALSE); 803 *val = ipk->ipk_rx_unknown_protocol.value.ui64; 804 break; 805 case MAC_STAT_RBYTES: 806 i40e_stat_get_uint48(i40e, I40E_GLPRT_GORCL(port), 807 &ipk->ipk_rx_bytes, &ips->ips_rx_bytes, B_FALSE); 808 *val = ipk->ipk_rx_bytes.value.ui64; 809 break; 810 case MAC_STAT_IPACKETS: 811 i40e_stat_get_uint48(i40e, I40E_GLPRT_UPRCL(port), 812 &ipk->ipk_rx_unicast, &ips->ips_rx_unicast, B_FALSE); 813 i40e_stat_get_uint48(i40e, I40E_GLPRT_MPRCL(port), 814 &ipk->ipk_rx_multicast, &ips->ips_rx_multicast, B_FALSE); 815 i40e_stat_get_uint48(i40e, I40E_GLPRT_BPRCL(port), 816 &ipk->ipk_rx_broadcast, &ips->ips_rx_broadcast, B_FALSE); 817 *val = ipk->ipk_rx_unicast.value.ui64 + 818 ipk->ipk_rx_multicast.value.ui64 + 819 ipk->ipk_rx_broadcast.value.ui64; 820 break; 821 case MAC_STAT_OBYTES: 822 i40e_stat_get_uint48(i40e, I40E_GLPRT_GOTCL(port), 823 &ipk->ipk_tx_bytes, &ips->ips_tx_bytes, B_FALSE); 824 *val = ipk->ipk_tx_bytes.value.ui64; 825 break; 826 case MAC_STAT_OPACKETS: 827 i40e_stat_get_uint48(i40e, I40E_GLPRT_UPTCL(port), 828 &ipk->ipk_tx_unicast, &ips->ips_tx_unicast, B_FALSE); 829 i40e_stat_get_uint48(i40e, I40E_GLPRT_MPTCL(port), 830 &ipk->ipk_tx_multicast, &ips->ips_tx_multicast, B_FALSE); 831 i40e_stat_get_uint48(i40e, I40E_GLPRT_BPTCL(port), 832 &ipk->ipk_tx_broadcast, &ips->ips_tx_broadcast, B_FALSE); 833 *val = ipk->ipk_tx_unicast.value.ui64 + 834 ipk->ipk_tx_multicast.value.ui64 + 835 ipk->ipk_tx_broadcast.value.ui64; 836 break; 837 case MAC_STAT_UNDERFLOWS: 838 i40e_stat_get_uint32(i40e, I40E_GLPRT_RUC(port), 839 &ipk->ipk_rx_undersize, &ips->ips_rx_undersize, B_FALSE); 840 i40e_stat_get_uint32(i40e, I40E_GLPRT_RFC(port), 841 &ipk->ipk_rx_fragments, &ips->ips_rx_fragments, B_FALSE); 842 i40e_stat_get_uint32(i40e, I40E_GLPRT_MSPDC(port), 843 &ipk->ipk_rx_short_discards, &ips->ips_rx_short_discards, 844 B_FALSE); 845 *val = ipk->ipk_rx_undersize.value.ui64 + 846 ipk->ipk_rx_fragments.value.ui64 + 847 ipk->ipk_rx_short_discards.value.ui64; 848 break; 849 case MAC_STAT_OVERFLOWS: 850 i40e_stat_get_uint32(i40e, I40E_GLPRT_ROC(port), 851 &ipk->ipk_rx_oversize, &ips->ips_rx_oversize, B_FALSE); 852 i40e_stat_get_uint32(i40e, I40E_GLPRT_RJC(port), 853 &ipk->ipk_rx_jabber, &ips->ips_rx_jabber, B_FALSE); 854 *val = ipk->ipk_rx_oversize.value.ui64 + 855 ipk->ipk_rx_fragments.value.ui64; 856 break; 857 858 /* RFC 1643 stats */ 859 case ETHER_STAT_FCS_ERRORS: 860 i40e_stat_get_uint32(i40e, I40E_GLPRT_CRCERRS(port), 861 &ipk->ipk_crc_errors, &ips->ips_crc_errors, B_FALSE); 862 *val = ipk->ipk_crc_errors.value.ui64; 863 break; 864 case ETHER_STAT_TOOLONG_ERRORS: 865 i40e_stat_get_uint32(i40e, I40E_GLPRT_ROC(port), 866 &ipk->ipk_rx_oversize, &ips->ips_rx_oversize, B_FALSE); 867 *val = ipk->ipk_rx_oversize.value.ui64; 868 break; 869 case ETHER_STAT_MACRCV_ERRORS: 870 i40e_stat_get_uint32(i40e, I40E_GLPRT_ILLERRC(port), 871 &ipk->ipk_illegal_bytes, &ips->ips_illegal_bytes, B_FALSE); 872 i40e_stat_get_uint32(i40e, I40E_GLPRT_RLEC(port), 873 &ipk->ipk_rx_length_errors, &ips->ips_rx_length_errors, 874 B_FALSE); 875 i40e_stat_get_uint32(i40e, I40E_GLPRT_RFC(port), 876 &ipk->ipk_rx_fragments, &ips->ips_rx_fragments, B_FALSE); 877 *val = ipk->ipk_illegal_bytes.value.ui64 + 878 ipk->ipk_rx_length_errors.value.ui64 + 879 ipk->ipk_rx_fragments.value.ui64; 880 break; 881 /* MII/GMII stats */ 882 883 /* 884 * The receiver address is apparently the same as the port number. 885 */ 886 case ETHER_STAT_XCVR_ADDR: 887 /* The Receiver address is apparently the same as the port */ 888 *val = i40e->i40e_hw_space.port; 889 break; 890 case ETHER_STAT_XCVR_ID: 891 switch (hw->phy.media_type) { 892 case I40E_MEDIA_TYPE_BASET: 893 /* 894 * Transform the data here into the ID. Note, generally 895 * the revision is left out. 896 */ 897 *val = i40e->i40e_phy.phy_id[3] << 24 | 898 i40e->i40e_phy.phy_id[2] << 16 | 899 i40e->i40e_phy.phy_id[1] << 8; 900 break; 901 case I40E_MEDIA_TYPE_FIBER: 902 case I40E_MEDIA_TYPE_BACKPLANE: 903 case I40E_MEDIA_TYPE_CX4: 904 case I40E_MEDIA_TYPE_DA: 905 case I40E_MEDIA_TYPE_VIRTUAL: 906 *val = i40e->i40e_phy.phy_id[0] | 907 i40e->i40e_phy.phy_id[1] << 8 | 908 i40e->i40e_phy.phy_id[2] << 16; 909 break; 910 case I40E_MEDIA_TYPE_UNKNOWN: 911 default: 912 goto unimpl; 913 } 914 break; 915 case ETHER_STAT_XCVR_INUSE: 916 switch (hw->phy.link_info.phy_type) { 917 case I40E_PHY_TYPE_100BASE_TX: 918 *val = XCVR_100T2; 919 break; 920 case I40E_PHY_TYPE_1000BASE_T: 921 *val = XCVR_1000T; 922 break; 923 default: 924 *val = XCVR_UNDEFINED; 925 break; 926 } 927 break; 928 929 /* 930 * This group answers the question of do we support a given speed in 931 * theory. 932 */ 933 case ETHER_STAT_CAP_100FDX: 934 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_100MB) != 0; 935 break; 936 case ETHER_STAT_CAP_1000FDX: 937 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_1GB) != 0; 938 break; 939 case ETHER_STAT_CAP_2500FDX: 940 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_2_5GB) != 0; 941 break; 942 case ETHER_STAT_CAP_5000FDX: 943 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_5GB) != 0; 944 break; 945 case ETHER_STAT_CAP_10GFDX: 946 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_10GB) != 0; 947 break; 948 case ETHER_STAT_CAP_25GFDX: 949 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_25GB) != 0; 950 break; 951 case ETHER_STAT_CAP_40GFDX: 952 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_40GB) != 0; 953 break; 954 955 /* 956 * These ask are we currently advertising these speeds and abilities. 957 * Until we support setting these because we're working with a copper 958 * PHY, then the only things we advertise are based on the link PHY 959 * speeds. In other words, we advertise everything we support. 960 */ 961 case ETHER_STAT_ADV_CAP_100FDX: 962 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_100MB) != 0; 963 break; 964 case ETHER_STAT_ADV_CAP_1000FDX: 965 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_1GB) != 0; 966 break; 967 case ETHER_STAT_ADV_CAP_2500FDX: 968 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_2_5GB) != 0; 969 break; 970 case ETHER_STAT_ADV_CAP_5000FDX: 971 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_5GB) != 0; 972 break; 973 case ETHER_STAT_ADV_CAP_10GFDX: 974 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_10GB) != 0; 975 break; 976 case ETHER_STAT_ADV_CAP_25GFDX: 977 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_25GB) != 0; 978 break; 979 case ETHER_STAT_ADV_CAP_40GFDX: 980 *val = (i40e->i40e_phy.link_speed & I40E_LINK_SPEED_40GB) != 0; 981 break; 982 983 /* 984 * These ask if the peer supports these speeds, e.g. what did they tell 985 * us in auto-negotiation. Unfortunately, hardware doesn't appear to 986 * give us a way to determine whether or not they actually support 987 * something, only what they have enabled. This means that all we can 988 * tell the user is the speed that we're currently at, unfortunately. 989 */ 990 case ETHER_STAT_LP_CAP_100FDX: 991 *val = i40e->i40e_link_speed == 100; 992 break; 993 case ETHER_STAT_LP_CAP_1000FDX: 994 *val = i40e->i40e_link_speed == 1000; 995 break; 996 case ETHER_STAT_LP_CAP_2500FDX: 997 *val = i40e->i40e_link_speed == 2500; 998 break; 999 case ETHER_STAT_LP_CAP_5000FDX: 1000 *val = i40e->i40e_link_speed == 5000; 1001 break; 1002 case ETHER_STAT_LP_CAP_10GFDX: 1003 *val = i40e->i40e_link_speed == 10000; 1004 break; 1005 case ETHER_STAT_LP_CAP_25GFDX: 1006 *val = i40e->i40e_link_speed == 25000; 1007 break; 1008 case ETHER_STAT_LP_CAP_40GFDX: 1009 *val = i40e->i40e_link_speed == 40000; 1010 break; 1011 1012 /* 1013 * Statistics for unsupported speeds. Note that these often have the 1014 * same constraints as the other ones. For example, we can't answer the 1015 * question of the ETHER_STAT_LP_CAP family because hardware doesn't 1016 * give us any way of knowing whether or not it does. 1017 */ 1018 case ETHER_STAT_CAP_100HDX: 1019 case ETHER_STAT_CAP_1000HDX: 1020 case ETHER_STAT_CAP_10FDX: 1021 case ETHER_STAT_CAP_10HDX: 1022 case ETHER_STAT_CAP_100T4: 1023 case ETHER_STAT_CAP_100GFDX: 1024 case ETHER_STAT_CAP_50GFDX: 1025 case ETHER_STAT_ADV_CAP_1000HDX: 1026 case ETHER_STAT_ADV_CAP_100HDX: 1027 case ETHER_STAT_ADV_CAP_10FDX: 1028 case ETHER_STAT_ADV_CAP_10HDX: 1029 case ETHER_STAT_ADV_CAP_100T4: 1030 case ETHER_STAT_ADV_CAP_100GFDX: 1031 case ETHER_STAT_ADV_CAP_50GFDX: 1032 case ETHER_STAT_LP_CAP_1000HDX: 1033 case ETHER_STAT_LP_CAP_100HDX: 1034 case ETHER_STAT_LP_CAP_10FDX: 1035 case ETHER_STAT_LP_CAP_10HDX: 1036 case ETHER_STAT_LP_CAP_100T4: 1037 case ETHER_STAT_LP_CAP_100GFDX: 1038 case ETHER_STAT_LP_CAP_50GFDX: 1039 *val = 0; 1040 break; 1041 1042 case ETHER_STAT_LINK_DUPLEX: 1043 *val = i40e->i40e_link_duplex; 1044 break; 1045 case ETHER_STAT_TOOSHORT_ERRORS: 1046 i40e_stat_get_uint32(i40e, I40E_GLPRT_RUC(port), 1047 &ipk->ipk_rx_undersize, &ips->ips_rx_undersize, B_FALSE); 1048 1049 i40e_stat_get_uint32(i40e, I40E_GLPRT_MSPDC(port), 1050 &ipk->ipk_rx_short_discards, &ips->ips_rx_short_discards, 1051 B_FALSE); 1052 *val = ipk->ipk_rx_undersize.value.ui64 + 1053 ipk->ipk_rx_short_discards.value.ui64; 1054 break; 1055 case ETHER_STAT_JABBER_ERRORS: 1056 i40e_stat_get_uint32(i40e, I40E_GLPRT_RJC(port), 1057 &ipk->ipk_rx_jabber, &ips->ips_rx_jabber, B_FALSE); 1058 *val = ipk->ipk_rx_jabber.value.ui64; 1059 break; 1060 1061 /* 1062 * Non-Link speed related capabilities. 1063 */ 1064 case ETHER_STAT_CAP_AUTONEG: 1065 *val = 1; 1066 break; 1067 1068 case ETHER_STAT_ADV_CAP_AUTONEG: 1069 *val = 1; 1070 break; 1071 1072 case ETHER_STAT_LP_CAP_AUTONEG: 1073 *val = (hw->phy.link_info.an_info & I40E_AQ_LP_AN_ABILITY) != 0; 1074 break; 1075 1076 case ETHER_STAT_LINK_AUTONEG: 1077 *val = 1; 1078 break; 1079 1080 /* 1081 * Note that while the hardware does support the pause functionality, at 1082 * this time we do not use it at all and effectively disable it. 1083 */ 1084 case ETHER_STAT_CAP_ASMPAUSE: 1085 *val = (i40e->i40e_phy.abilities & 1086 I40E_AQ_PHY_FLAG_PAUSE_RX) != 0; 1087 break; 1088 case ETHER_STAT_CAP_PAUSE: 1089 *val = (i40e->i40e_phy.abilities & 1090 I40E_AQ_PHY_FLAG_PAUSE_TX) != 0; 1091 break; 1092 1093 /* 1094 * Because we don't support these at this time, they are always 1095 * hard-coded to zero. 1096 */ 1097 case ETHER_STAT_ADV_CAP_ASMPAUSE: 1098 case ETHER_STAT_ADV_CAP_PAUSE: 1099 *val = 0; 1100 break; 1101 1102 /* 1103 * Like the other LP fields, we can only answer the question have we 1104 * enabled it, not whether the other end actually supports it. 1105 */ 1106 case ETHER_STAT_LP_CAP_ASMPAUSE: 1107 case ETHER_STAT_LINK_ASMPAUSE: 1108 *val = (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) != 0; 1109 break; 1110 case ETHER_STAT_LP_CAP_PAUSE: 1111 case ETHER_STAT_LINK_PAUSE: 1112 *val = (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) != 0; 1113 break; 1114 1115 default: 1116 unimpl: 1117 mutex_exit(&i40e->i40e_stat_lock); 1118 mutex_exit(&i40e->i40e_general_lock); 1119 return (ENOTSUP); 1120 } 1121 1122 mutex_exit(&i40e->i40e_stat_lock); 1123 mutex_exit(&i40e->i40e_general_lock); 1124 1125 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) != 1126 DDI_FM_OK) { 1127 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_DEGRADED); 1128 return (EIO); 1129 } 1130 1131 return (0); 1132 } 1133 1134 int 1135 i40e_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val) 1136 { 1137 i40e_trqpair_t *itrq = (i40e_trqpair_t *)rh; 1138 i40e_t *i40e = itrq->itrq_i40e; 1139 1140 if (i40e->i40e_state & I40E_SUSPENDED) { 1141 return (ECANCELED); 1142 } 1143 1144 switch (stat) { 1145 case MAC_STAT_RBYTES: 1146 *val = itrq->itrq_rxstat.irxs_bytes.value.ui64; 1147 break; 1148 case MAC_STAT_IPACKETS: 1149 *val = itrq->itrq_rxstat.irxs_packets.value.ui64; 1150 break; 1151 default: 1152 *val = 0; 1153 return (ENOTSUP); 1154 } 1155 1156 return (0); 1157 } 1158 1159 int 1160 i40e_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val) 1161 { 1162 i40e_trqpair_t *itrq = (i40e_trqpair_t *)rh; 1163 i40e_t *i40e = itrq->itrq_i40e; 1164 1165 if (i40e->i40e_state & I40E_SUSPENDED) { 1166 return (ECANCELED); 1167 } 1168 1169 switch (stat) { 1170 case MAC_STAT_OBYTES: 1171 *val = itrq->itrq_txstat.itxs_bytes.value.ui64; 1172 break; 1173 case MAC_STAT_OPACKETS: 1174 *val = itrq->itrq_txstat.itxs_packets.value.ui64; 1175 break; 1176 default: 1177 *val = 0; 1178 return (ENOTSUP); 1179 } 1180 1181 return (0); 1182 } 1183 1184 /* 1185 * When we end up refactoring all off the queue assignments and have non-static 1186 * queue to VSI mappings, then we may need to revisit the general locking 1187 * strategy that we employ and have the kstat creation / deletion be part of the 1188 * ring start and stop routines. 1189 */ 1190 void 1191 i40e_stats_trqpair_fini(i40e_trqpair_t *itrq) 1192 { 1193 if (itrq->itrq_txkstat != NULL) { 1194 kstat_delete(itrq->itrq_txkstat); 1195 itrq->itrq_txkstat = NULL; 1196 } 1197 1198 if (itrq->itrq_rxkstat != NULL) { 1199 kstat_delete(itrq->itrq_rxkstat); 1200 itrq->itrq_rxkstat = NULL; 1201 } 1202 } 1203 1204 boolean_t 1205 i40e_stats_trqpair_init(i40e_trqpair_t *itrq) 1206 { 1207 char buf[128]; 1208 i40e_t *i40e = itrq->itrq_i40e; 1209 i40e_txq_stat_t *tsp = &itrq->itrq_txstat; 1210 i40e_rxq_stat_t *rsp = &itrq->itrq_rxstat; 1211 1212 (void) snprintf(buf, sizeof (buf), "trqpair_tx_%d", itrq->itrq_index); 1213 itrq->itrq_txkstat = kstat_create(I40E_MODULE_NAME, 1214 ddi_get_instance(i40e->i40e_dip), buf, "net", KSTAT_TYPE_NAMED, 1215 sizeof (i40e_txq_stat_t) / sizeof (kstat_named_t), 1216 KSTAT_FLAG_VIRTUAL); 1217 1218 if (itrq->itrq_txkstat == NULL) 1219 return (B_FALSE); 1220 1221 (void) snprintf(buf, sizeof (buf), "trqpair_rx_%d", itrq->itrq_index); 1222 itrq->itrq_rxkstat = kstat_create(I40E_MODULE_NAME, 1223 ddi_get_instance(i40e->i40e_dip), buf, "net", KSTAT_TYPE_NAMED, 1224 sizeof (i40e_rxq_stat_t) / sizeof (kstat_named_t), 1225 KSTAT_FLAG_VIRTUAL); 1226 1227 if (itrq->itrq_rxkstat == NULL) { 1228 kstat_delete(itrq->itrq_txkstat); 1229 itrq->itrq_txkstat = NULL; 1230 return (B_FALSE); 1231 } 1232 1233 itrq->itrq_txkstat->ks_data = &itrq->itrq_txstat; 1234 itrq->itrq_rxkstat->ks_data = &itrq->itrq_rxstat; 1235 1236 kstat_named_init(&tsp->itxs_bytes, "tx_bytes", 1237 KSTAT_DATA_UINT64); 1238 tsp->itxs_bytes.value.ui64 = 0; 1239 kstat_named_init(&tsp->itxs_packets, "tx_packets", 1240 KSTAT_DATA_UINT64); 1241 tsp->itxs_packets.value.ui64 = 0; 1242 kstat_named_init(&tsp->itxs_descriptors, "tx_descriptors", 1243 KSTAT_DATA_UINT64); 1244 tsp->itxs_descriptors.value.ui64 = 0; 1245 kstat_named_init(&tsp->itxs_recycled, "tx_recycled", 1246 KSTAT_DATA_UINT64); 1247 tsp->itxs_recycled.value.ui64 = 0; 1248 kstat_named_init(&tsp->itxs_force_copy, "tx_force_copy", 1249 KSTAT_DATA_UINT64); 1250 tsp->itxs_force_copy.value.ui64 = 0; 1251 kstat_named_init(&tsp->itxs_tso_force_copy, "tx_tso_force_copy", 1252 KSTAT_DATA_UINT64); 1253 tsp->itxs_tso_force_copy.value.ui64 = 0; 1254 1255 kstat_named_init(&tsp->itxs_hck_meoifail, "tx_hck_meoifail", 1256 KSTAT_DATA_UINT64); 1257 tsp->itxs_hck_meoifail.value.ui64 = 0; 1258 kstat_named_init(&tsp->itxs_hck_nol2info, "tx_hck_nol2info", 1259 KSTAT_DATA_UINT64); 1260 tsp->itxs_hck_nol2info.value.ui64 = 0; 1261 kstat_named_init(&tsp->itxs_hck_nol3info, "tx_hck_nol3info", 1262 KSTAT_DATA_UINT64); 1263 tsp->itxs_hck_nol3info.value.ui64 = 0; 1264 kstat_named_init(&tsp->itxs_hck_nol4info, "tx_hck_nol4info", 1265 KSTAT_DATA_UINT64); 1266 tsp->itxs_hck_nol4info.value.ui64 = 0; 1267 kstat_named_init(&tsp->itxs_hck_badl3, "tx_hck_badl3", 1268 KSTAT_DATA_UINT64); 1269 tsp->itxs_hck_badl3.value.ui64 = 0; 1270 kstat_named_init(&tsp->itxs_hck_badl4, "tx_hck_badl4", 1271 KSTAT_DATA_UINT64); 1272 tsp->itxs_hck_badl4.value.ui64 = 0; 1273 kstat_named_init(&tsp->itxs_lso_nohck, "tx_lso_nohck", 1274 KSTAT_DATA_UINT64); 1275 tsp->itxs_lso_nohck.value.ui64 = 0; 1276 kstat_named_init(&tsp->itxs_bind_fails, "tx_bind_fails", 1277 KSTAT_DATA_UINT64); 1278 tsp->itxs_bind_fails.value.ui64 = 0; 1279 kstat_named_init(&tsp->itxs_tx_short, "tx_short", 1280 KSTAT_DATA_UINT64); 1281 tsp->itxs_tx_short.value.ui64 = 0; 1282 kstat_named_init(&tsp->itxs_err_notcb, "tx_err_notcb", 1283 KSTAT_DATA_UINT64); 1284 tsp->itxs_err_notcb.value.ui64 = 0; 1285 kstat_named_init(&tsp->itxs_err_nodescs, "tx_err_nodescs", 1286 KSTAT_DATA_UINT64); 1287 tsp->itxs_err_nodescs.value.ui64 = 0; 1288 kstat_named_init(&tsp->itxs_err_context, "tx_err_context", 1289 KSTAT_DATA_UINT64); 1290 tsp->itxs_err_context.value.ui64 = 0; 1291 kstat_named_init(&tsp->itxs_num_unblocked, "tx_num_unblocked", 1292 KSTAT_DATA_UINT64); 1293 tsp->itxs_num_unblocked.value.ui64 = 0; 1294 1295 1296 kstat_named_init(&rsp->irxs_bytes, "rx_bytes", 1297 KSTAT_DATA_UINT64); 1298 rsp->irxs_bytes.value.ui64 = 0; 1299 kstat_named_init(&rsp->irxs_packets, "rx_packets", 1300 KSTAT_DATA_UINT64); 1301 rsp->irxs_packets.value.ui64 = 0; 1302 kstat_named_init(&rsp->irxs_rx_desc_error, "rx_desc_error", 1303 KSTAT_DATA_UINT64); 1304 rsp->irxs_rx_desc_error.value.ui64 = 0; 1305 kstat_named_init(&rsp->irxs_rx_intr_limit, "rx_intr_limit", 1306 KSTAT_DATA_UINT64); 1307 rsp->irxs_rx_intr_limit.value.ui64 = 0; 1308 kstat_named_init(&rsp->irxs_rx_bind_norcb, "rx_bind_norcb", 1309 KSTAT_DATA_UINT64); 1310 rsp->irxs_rx_bind_norcb.value.ui64 = 0; 1311 kstat_named_init(&rsp->irxs_rx_bind_nomp, "rx_bind_nomp", 1312 KSTAT_DATA_UINT64); 1313 rsp->irxs_rx_bind_nomp.value.ui64 = 0; 1314 kstat_named_init(&rsp->irxs_rx_copy_nomem, "rx_copy_nomem", 1315 KSTAT_DATA_UINT64); 1316 rsp->irxs_rx_copy_nomem.value.ui64 = 0; 1317 kstat_named_init(&rsp->irxs_hck_v4hdrok, "rx_hck_v4hdrok", 1318 KSTAT_DATA_UINT64); 1319 rsp->irxs_hck_v4hdrok.value.ui64 = 0; 1320 kstat_named_init(&rsp->irxs_hck_l4hdrok, "rx_hck_l4hdrok", 1321 KSTAT_DATA_UINT64); 1322 rsp->irxs_hck_l4hdrok.value.ui64 = 0; 1323 kstat_named_init(&rsp->irxs_hck_unknown, "rx_hck_unknown", 1324 KSTAT_DATA_UINT64); 1325 rsp->irxs_hck_unknown.value.ui64 = 0; 1326 kstat_named_init(&rsp->irxs_hck_nol3l4p, "rx_hck_nol3l4p", 1327 KSTAT_DATA_UINT64); 1328 rsp->irxs_hck_nol3l4p.value.ui64 = 0; 1329 kstat_named_init(&rsp->irxs_hck_iperr, "rx_hck_iperr", 1330 KSTAT_DATA_UINT64); 1331 rsp->irxs_hck_iperr.value.ui64 = 0; 1332 kstat_named_init(&rsp->irxs_hck_eiperr, "rx_hck_eiperr", 1333 KSTAT_DATA_UINT64); 1334 rsp->irxs_hck_eiperr.value.ui64 = 0; 1335 kstat_named_init(&rsp->irxs_hck_l4err, "rx_hck_l4err", 1336 KSTAT_DATA_UINT64); 1337 rsp->irxs_hck_l4err.value.ui64 = 0; 1338 kstat_named_init(&rsp->irxs_hck_v6skip, "rx_hck_v6skip", 1339 KSTAT_DATA_UINT64); 1340 rsp->irxs_hck_v6skip.value.ui64 = 0; 1341 kstat_named_init(&rsp->irxs_hck_set, "rx_hck_set", 1342 KSTAT_DATA_UINT64); 1343 rsp->irxs_hck_set.value.ui64 = 0; 1344 kstat_named_init(&rsp->irxs_hck_miss, "rx_hck_miss", 1345 KSTAT_DATA_UINT64); 1346 rsp->irxs_hck_miss.value.ui64 = 0; 1347 1348 kstat_install(itrq->itrq_txkstat); 1349 kstat_install(itrq->itrq_rxkstat); 1350 1351 return (B_TRUE); 1352 } 1353