1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_ddb.h" 32 #include "opt_inet.h" 33 #include "opt_inet6.h" 34 #include "opt_rss.h" 35 36 #include <sys/param.h> 37 #include <sys/conf.h> 38 #include <sys/priv.h> 39 #include <sys/kernel.h> 40 #include <sys/bus.h> 41 #include <sys/module.h> 42 #include <sys/malloc.h> 43 #include <sys/queue.h> 44 #include <sys/taskqueue.h> 45 #include <sys/pciio.h> 46 #include <dev/pci/pcireg.h> 47 #include <dev/pci/pcivar.h> 48 #include <dev/pci/pci_private.h> 49 #include <sys/firmware.h> 50 #include <sys/sbuf.h> 51 #include <sys/smp.h> 52 #include <sys/socket.h> 53 #include <sys/sockio.h> 54 #include <sys/sysctl.h> 55 #include <net/ethernet.h> 56 #include <net/if.h> 57 #include <net/if_types.h> 58 #include <net/if_dl.h> 59 #include <net/if_vlan_var.h> 60 #ifdef RSS 61 #include <net/rss_config.h> 62 #endif 63 #if defined(__i386__) || defined(__amd64__) 64 #include <vm/vm.h> 65 #include <vm/pmap.h> 66 #endif 67 #ifdef DDB 68 #include <ddb/ddb.h> 69 #include <ddb/db_lex.h> 70 #endif 71 72 #include "common/common.h" 73 #include "common/t4_msg.h" 74 #include "common/t4_regs.h" 75 #include "common/t4_regs_values.h" 76 #include "cudbg/cudbg.h" 77 #include "t4_ioctl.h" 78 #include "t4_l2t.h" 79 #include "t4_mp_ring.h" 80 #include "t4_if.h" 81 82 /* T4 bus driver interface */ 83 static int t4_probe(device_t); 84 static int t4_attach(device_t); 85 static int t4_detach(device_t); 86 static int t4_ready(device_t); 87 static int t4_read_port_device(device_t, int, device_t *); 88 static device_method_t t4_methods[] = { 89 DEVMETHOD(device_probe, t4_probe), 90 DEVMETHOD(device_attach, t4_attach), 91 DEVMETHOD(device_detach, t4_detach), 92 93 DEVMETHOD(t4_is_main_ready, t4_ready), 94 DEVMETHOD(t4_read_port_device, t4_read_port_device), 95 96 DEVMETHOD_END 97 }; 98 static driver_t t4_driver = { 99 "t4nex", 100 t4_methods, 101 sizeof(struct adapter) 102 }; 103 104 105 /* T4 port (cxgbe) interface */ 106 static int cxgbe_probe(device_t); 107 static int cxgbe_attach(device_t); 108 static int cxgbe_detach(device_t); 109 device_method_t cxgbe_methods[] = { 110 DEVMETHOD(device_probe, cxgbe_probe), 111 DEVMETHOD(device_attach, cxgbe_attach), 112 DEVMETHOD(device_detach, cxgbe_detach), 113 { 0, 0 } 114 }; 115 static driver_t cxgbe_driver = { 116 "cxgbe", 117 cxgbe_methods, 118 sizeof(struct port_info) 119 }; 120 121 /* T4 VI (vcxgbe) interface */ 122 static int vcxgbe_probe(device_t); 123 static int vcxgbe_attach(device_t); 124 static int vcxgbe_detach(device_t); 125 static device_method_t vcxgbe_methods[] = { 126 DEVMETHOD(device_probe, vcxgbe_probe), 127 DEVMETHOD(device_attach, vcxgbe_attach), 128 DEVMETHOD(device_detach, vcxgbe_detach), 129 { 0, 0 } 130 }; 131 static driver_t vcxgbe_driver = { 132 "vcxgbe", 133 vcxgbe_methods, 134 sizeof(struct vi_info) 135 }; 136 137 static d_ioctl_t t4_ioctl; 138 139 static struct cdevsw t4_cdevsw = { 140 .d_version = D_VERSION, 141 .d_ioctl = t4_ioctl, 142 .d_name = "t4nex", 143 }; 144 145 /* T5 bus driver interface */ 146 static int t5_probe(device_t); 147 static device_method_t t5_methods[] = { 148 DEVMETHOD(device_probe, t5_probe), 149 DEVMETHOD(device_attach, t4_attach), 150 DEVMETHOD(device_detach, t4_detach), 151 152 DEVMETHOD(t4_is_main_ready, t4_ready), 153 DEVMETHOD(t4_read_port_device, t4_read_port_device), 154 155 DEVMETHOD_END 156 }; 157 static driver_t t5_driver = { 158 "t5nex", 159 t5_methods, 160 sizeof(struct adapter) 161 }; 162 163 164 /* T5 port (cxl) interface */ 165 static driver_t cxl_driver = { 166 "cxl", 167 cxgbe_methods, 168 sizeof(struct port_info) 169 }; 170 171 /* T5 VI (vcxl) interface */ 172 static driver_t vcxl_driver = { 173 "vcxl", 174 vcxgbe_methods, 175 sizeof(struct vi_info) 176 }; 177 178 /* T6 bus driver interface */ 179 static int t6_probe(device_t); 180 static device_method_t t6_methods[] = { 181 DEVMETHOD(device_probe, t6_probe), 182 DEVMETHOD(device_attach, t4_attach), 183 DEVMETHOD(device_detach, t4_detach), 184 185 DEVMETHOD(t4_is_main_ready, t4_ready), 186 DEVMETHOD(t4_read_port_device, t4_read_port_device), 187 188 DEVMETHOD_END 189 }; 190 static driver_t t6_driver = { 191 "t6nex", 192 t6_methods, 193 sizeof(struct adapter) 194 }; 195 196 197 /* T6 port (cc) interface */ 198 static driver_t cc_driver = { 199 "cc", 200 cxgbe_methods, 201 sizeof(struct port_info) 202 }; 203 204 /* T6 VI (vcc) interface */ 205 static driver_t vcc_driver = { 206 "vcc", 207 vcxgbe_methods, 208 sizeof(struct vi_info) 209 }; 210 211 /* ifnet + media interface */ 212 static void cxgbe_init(void *); 213 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 214 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 215 static void cxgbe_qflush(struct ifnet *); 216 static int cxgbe_media_change(struct ifnet *); 217 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 218 219 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 220 221 /* 222 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 223 * then ADAPTER_LOCK, then t4_uld_list_lock. 224 */ 225 static struct sx t4_list_lock; 226 SLIST_HEAD(, adapter) t4_list; 227 #ifdef TCP_OFFLOAD 228 static struct sx t4_uld_list_lock; 229 SLIST_HEAD(, uld_info) t4_uld_list; 230 #endif 231 232 /* 233 * Tunables. See tweak_tunables() too. 234 * 235 * Each tunable is set to a default value here if it's known at compile-time. 236 * Otherwise it is set to -n as an indication to tweak_tunables() that it should 237 * provide a reasonable default (upto n) when the driver is loaded. 238 * 239 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 240 * T5 are under hw.cxl. 241 */ 242 243 /* 244 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 245 */ 246 #define NTXQ_10G 16 247 int t4_ntxq10g = -NTXQ_10G; 248 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 249 250 #define NRXQ_10G 8 251 int t4_nrxq10g = -NRXQ_10G; 252 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 253 254 #define NTXQ_1G 4 255 int t4_ntxq1g = -NTXQ_1G; 256 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 257 258 #define NRXQ_1G 2 259 int t4_nrxq1g = -NRXQ_1G; 260 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 261 262 #define NTXQ_VI 1 263 static int t4_ntxq_vi = -NTXQ_VI; 264 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi); 265 266 #define NRXQ_VI 1 267 static int t4_nrxq_vi = -NRXQ_VI; 268 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi); 269 270 static int t4_rsrv_noflowq = 0; 271 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 272 273 #ifdef TCP_OFFLOAD 274 #define NOFLDTXQ_10G 8 275 static int t4_nofldtxq10g = -NOFLDTXQ_10G; 276 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 277 278 #define NOFLDRXQ_10G 2 279 static int t4_nofldrxq10g = -NOFLDRXQ_10G; 280 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 281 282 #define NOFLDTXQ_1G 2 283 static int t4_nofldtxq1g = -NOFLDTXQ_1G; 284 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 285 286 #define NOFLDRXQ_1G 1 287 static int t4_nofldrxq1g = -NOFLDRXQ_1G; 288 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 289 290 #define NOFLDTXQ_VI 1 291 static int t4_nofldtxq_vi = -NOFLDTXQ_VI; 292 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi); 293 294 #define NOFLDRXQ_VI 1 295 static int t4_nofldrxq_vi = -NOFLDRXQ_VI; 296 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi); 297 298 /* 0 means chip/fw default, non-zero number is value in microseconds */ 299 static u_long t4_toe_keepalive_idle = 0; 300 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_idle", &t4_toe_keepalive_idle); 301 302 /* 0 means chip/fw default, non-zero number is value in microseconds */ 303 static u_long t4_toe_keepalive_interval = 0; 304 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_interval", &t4_toe_keepalive_interval); 305 306 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */ 307 static int t4_toe_keepalive_count = 0; 308 TUNABLE_INT("hw.cxgbe.toe.keepalive_count", &t4_toe_keepalive_count); 309 310 /* 0 means chip/fw default, non-zero number is value in microseconds */ 311 static u_long t4_toe_rexmt_min = 0; 312 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_min", &t4_toe_rexmt_min); 313 314 /* 0 means chip/fw default, non-zero number is value in microseconds */ 315 static u_long t4_toe_rexmt_max = 0; 316 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_max", &t4_toe_rexmt_max); 317 318 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */ 319 static int t4_toe_rexmt_count = 0; 320 TUNABLE_INT("hw.cxgbe.toe.rexmt_count", &t4_toe_rexmt_count); 321 322 /* -1 means chip/fw default, other values are raw backoff values to use */ 323 static int t4_toe_rexmt_backoff[16] = { 324 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 325 }; 326 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.0", &t4_toe_rexmt_backoff[0]); 327 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.1", &t4_toe_rexmt_backoff[1]); 328 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.2", &t4_toe_rexmt_backoff[2]); 329 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.3", &t4_toe_rexmt_backoff[3]); 330 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.4", &t4_toe_rexmt_backoff[4]); 331 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.5", &t4_toe_rexmt_backoff[5]); 332 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.6", &t4_toe_rexmt_backoff[6]); 333 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.7", &t4_toe_rexmt_backoff[7]); 334 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.8", &t4_toe_rexmt_backoff[8]); 335 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.9", &t4_toe_rexmt_backoff[9]); 336 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.10", &t4_toe_rexmt_backoff[10]); 337 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.11", &t4_toe_rexmt_backoff[11]); 338 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.12", &t4_toe_rexmt_backoff[12]); 339 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.13", &t4_toe_rexmt_backoff[13]); 340 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.14", &t4_toe_rexmt_backoff[14]); 341 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.15", &t4_toe_rexmt_backoff[15]); 342 #endif 343 344 #ifdef DEV_NETMAP 345 #define NNMTXQ_VI 2 346 static int t4_nnmtxq_vi = -NNMTXQ_VI; 347 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi); 348 349 #define NNMRXQ_VI 2 350 static int t4_nnmrxq_vi = -NNMRXQ_VI; 351 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi); 352 #endif 353 354 /* 355 * Holdoff parameters for 10G and 1G ports. 356 */ 357 #define TMR_IDX_10G 1 358 int t4_tmr_idx_10g = TMR_IDX_10G; 359 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 360 361 #define PKTC_IDX_10G (-1) 362 int t4_pktc_idx_10g = PKTC_IDX_10G; 363 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 364 365 #define TMR_IDX_1G 1 366 int t4_tmr_idx_1g = TMR_IDX_1G; 367 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 368 369 #define PKTC_IDX_1G (-1) 370 int t4_pktc_idx_1g = PKTC_IDX_1G; 371 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 372 373 /* 374 * Size (# of entries) of each tx and rx queue. 375 */ 376 unsigned int t4_qsize_txq = TX_EQ_QSIZE; 377 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 378 379 unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 380 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 381 382 /* 383 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 384 */ 385 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 386 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 387 388 /* 389 * Configuration file. 390 */ 391 #define DEFAULT_CF "default" 392 #define FLASH_CF "flash" 393 #define UWIRE_CF "uwire" 394 #define FPGA_CF "fpga" 395 static char t4_cfg_file[32] = DEFAULT_CF; 396 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 397 398 /* 399 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 400 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 401 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 402 * mark or when signalled to do so, 0 to never emit PAUSE. 403 */ 404 static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 405 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 406 407 /* 408 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS, 409 * FEC_RESERVED respectively). 410 * -1 to run with the firmware default. 411 * 0 to disable FEC. 412 */ 413 static int t4_fec = -1; 414 TUNABLE_INT("hw.cxgbe.fec", &t4_fec); 415 416 /* 417 * Link autonegotiation. 418 * -1 to run with the firmware default. 419 * 0 to disable. 420 * 1 to enable. 421 */ 422 static int t4_autoneg = -1; 423 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg); 424 425 /* 426 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 427 * encouraged respectively). 428 */ 429 static unsigned int t4_fw_install = 1; 430 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 431 432 /* 433 * ASIC features that will be used. Disable the ones you don't want so that the 434 * chip resources aren't wasted on features that will not be used. 435 */ 436 static int t4_nbmcaps_allowed = 0; 437 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 438 439 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 440 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 441 442 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 443 FW_CAPS_CONFIG_SWITCH_EGRESS; 444 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 445 446 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 447 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 448 449 static int t4_toecaps_allowed = -1; 450 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 451 452 static int t4_rdmacaps_allowed = -1; 453 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 454 455 static int t4_cryptocaps_allowed = 0; 456 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed); 457 458 static int t4_iscsicaps_allowed = -1; 459 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 460 461 static int t4_fcoecaps_allowed = 0; 462 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 463 464 static int t5_write_combine = 0; 465 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 466 467 static int t4_num_vis = 1; 468 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 469 470 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 471 static int vi_mac_funcs[] = { 472 FW_VI_FUNC_OFLD, 473 FW_VI_FUNC_IWARP, 474 FW_VI_FUNC_OPENISCSI, 475 FW_VI_FUNC_OPENFCOE, 476 FW_VI_FUNC_FOISCSI, 477 FW_VI_FUNC_FOFCOE, 478 }; 479 480 struct intrs_and_queues { 481 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 482 uint16_t nirq; /* Total # of vectors */ 483 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 484 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 485 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 486 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 487 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 488 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 489 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 490 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 491 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 492 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 493 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 494 495 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 496 uint16_t ntxq_vi; /* # of NIC txq's */ 497 uint16_t nrxq_vi; /* # of NIC rxq's */ 498 uint16_t nofldtxq_vi; /* # of TOE txq's */ 499 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 500 uint16_t nnmtxq_vi; /* # of netmap txq's */ 501 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 502 }; 503 504 struct filter_entry { 505 uint32_t valid:1; /* filter allocated and valid */ 506 uint32_t locked:1; /* filter is administratively locked */ 507 uint32_t pending:1; /* filter action is pending firmware reply */ 508 uint32_t smtidx:8; /* Source MAC Table index for smac */ 509 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 510 511 struct t4_filter_specification fs; 512 }; 513 514 static void setup_memwin(struct adapter *); 515 static void position_memwin(struct adapter *, int, uint32_t); 516 static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 517 static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 518 int); 519 static inline int write_via_memwin(struct adapter *, int, uint32_t, 520 const uint32_t *, int); 521 static int validate_mem_range(struct adapter *, uint32_t, int); 522 static int fwmtype_to_hwmtype(int); 523 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 524 uint32_t *); 525 static int fixup_devlog_params(struct adapter *); 526 static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 527 struct intrs_and_queues *); 528 static int prep_firmware(struct adapter *); 529 static int partition_resources(struct adapter *, const struct firmware *, 530 const char *); 531 static int get_params__pre_init(struct adapter *); 532 static int get_params__post_init(struct adapter *); 533 static int set_params__post_init(struct adapter *); 534 static void t4_set_desc(struct adapter *); 535 static void build_medialist(struct port_info *, struct ifmedia *); 536 static void init_l1cfg(struct port_info *); 537 static int cxgbe_init_synchronized(struct vi_info *); 538 static int cxgbe_uninit_synchronized(struct vi_info *); 539 static void quiesce_txq(struct adapter *, struct sge_txq *); 540 static void quiesce_wrq(struct adapter *, struct sge_wrq *); 541 static void quiesce_iq(struct adapter *, struct sge_iq *); 542 static void quiesce_fl(struct adapter *, struct sge_fl *); 543 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 544 driver_intr_t *, void *, char *); 545 static int t4_free_irq(struct adapter *, struct irq *); 546 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 547 static void vi_refresh_stats(struct adapter *, struct vi_info *); 548 static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 549 static void cxgbe_tick(void *); 550 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 551 static void cxgbe_sysctls(struct port_info *); 552 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 553 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 554 static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 555 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 556 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 557 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 558 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 559 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 560 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 561 static int sysctl_fec(SYSCTL_HANDLER_ARGS); 562 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); 563 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 564 static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 565 #ifdef SBUF_DRAIN 566 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 567 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 568 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 569 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 570 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 571 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 572 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 573 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 574 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 575 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 576 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 577 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 578 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 579 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 580 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 581 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 582 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 583 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 584 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 585 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 586 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 587 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 588 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 589 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 590 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 591 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 592 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 593 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 594 static int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 595 #endif 596 #ifdef TCP_OFFLOAD 597 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 598 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 599 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 600 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS); 601 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS); 602 #endif 603 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 604 static uint32_t mode_to_fconf(uint32_t); 605 static uint32_t mode_to_iconf(uint32_t); 606 static int check_fspec_against_fconf_iconf(struct adapter *, 607 struct t4_filter_specification *); 608 static int get_filter_mode(struct adapter *, uint32_t *); 609 static int set_filter_mode(struct adapter *, uint32_t); 610 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 611 static int get_filter(struct adapter *, struct t4_filter *); 612 static int set_filter(struct adapter *, struct t4_filter *); 613 static int del_filter(struct adapter *, struct t4_filter *); 614 static void clear_filter(struct filter_entry *); 615 static int set_filter_wr(struct adapter *, int); 616 static int del_filter_wr(struct adapter *, int); 617 static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, 618 struct mbuf *); 619 static int get_sge_context(struct adapter *, struct t4_sge_context *); 620 static int load_fw(struct adapter *, struct t4_data *); 621 static int load_cfg(struct adapter *, struct t4_data *); 622 static int load_boot(struct adapter *, struct t4_bootrom *); 623 static int load_bootcfg(struct adapter *, struct t4_data *); 624 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *); 625 static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 626 static int read_i2c(struct adapter *, struct t4_i2c_data *); 627 #ifdef TCP_OFFLOAD 628 static int toe_capability(struct vi_info *, int); 629 #endif 630 static int mod_event(module_t, int, void *); 631 static int notify_siblings(device_t, int); 632 633 struct { 634 uint16_t device; 635 char *desc; 636 } t4_pciids[] = { 637 {0xa000, "Chelsio Terminator 4 FPGA"}, 638 {0x4400, "Chelsio T440-dbg"}, 639 {0x4401, "Chelsio T420-CR"}, 640 {0x4402, "Chelsio T422-CR"}, 641 {0x4403, "Chelsio T440-CR"}, 642 {0x4404, "Chelsio T420-BCH"}, 643 {0x4405, "Chelsio T440-BCH"}, 644 {0x4406, "Chelsio T440-CH"}, 645 {0x4407, "Chelsio T420-SO"}, 646 {0x4408, "Chelsio T420-CX"}, 647 {0x4409, "Chelsio T420-BT"}, 648 {0x440a, "Chelsio T404-BT"}, 649 {0x440e, "Chelsio T440-LP-CR"}, 650 }, t5_pciids[] = { 651 {0xb000, "Chelsio Terminator 5 FPGA"}, 652 {0x5400, "Chelsio T580-dbg"}, 653 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 654 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 655 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 656 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 657 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 658 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 659 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 660 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 661 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 662 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 663 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 664 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 665 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 666 #ifdef notyet 667 {0x5404, "Chelsio T520-BCH"}, 668 {0x5405, "Chelsio T540-BCH"}, 669 {0x5406, "Chelsio T540-CH"}, 670 {0x5408, "Chelsio T520-CX"}, 671 {0x540b, "Chelsio B520-SR"}, 672 {0x540c, "Chelsio B504-BT"}, 673 {0x540f, "Chelsio Amsterdam"}, 674 {0x5413, "Chelsio T580-CHR"}, 675 #endif 676 }, t6_pciids[] = { 677 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ 678 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */ 679 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ 680 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ 681 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */ 682 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */ 683 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */ 684 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */ 685 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ 686 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ 687 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */ 688 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ 689 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */ 690 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */ 691 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */ 692 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */ 693 694 /* Custom */ 695 {0x6480, "Chelsio T6225 80"}, 696 {0x6481, "Chelsio T62100 81"}, 697 }; 698 699 #ifdef TCP_OFFLOAD 700 /* 701 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 702 * exactly the same for both rxq and ofld_rxq. 703 */ 704 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 705 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 706 #endif 707 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 708 709 static int 710 t4_probe(device_t dev) 711 { 712 int i; 713 uint16_t v = pci_get_vendor(dev); 714 uint16_t d = pci_get_device(dev); 715 uint8_t f = pci_get_function(dev); 716 717 if (v != PCI_VENDOR_ID_CHELSIO) 718 return (ENXIO); 719 720 /* Attach only to PF0 of the FPGA */ 721 if (d == 0xa000 && f != 0) 722 return (ENXIO); 723 724 for (i = 0; i < nitems(t4_pciids); i++) { 725 if (d == t4_pciids[i].device) { 726 device_set_desc(dev, t4_pciids[i].desc); 727 return (BUS_PROBE_DEFAULT); 728 } 729 } 730 731 return (ENXIO); 732 } 733 734 static int 735 t5_probe(device_t dev) 736 { 737 int i; 738 uint16_t v = pci_get_vendor(dev); 739 uint16_t d = pci_get_device(dev); 740 uint8_t f = pci_get_function(dev); 741 742 if (v != PCI_VENDOR_ID_CHELSIO) 743 return (ENXIO); 744 745 /* Attach only to PF0 of the FPGA */ 746 if (d == 0xb000 && f != 0) 747 return (ENXIO); 748 749 for (i = 0; i < nitems(t5_pciids); i++) { 750 if (d == t5_pciids[i].device) { 751 device_set_desc(dev, t5_pciids[i].desc); 752 return (BUS_PROBE_DEFAULT); 753 } 754 } 755 756 return (ENXIO); 757 } 758 759 static int 760 t6_probe(device_t dev) 761 { 762 int i; 763 uint16_t v = pci_get_vendor(dev); 764 uint16_t d = pci_get_device(dev); 765 766 if (v != PCI_VENDOR_ID_CHELSIO) 767 return (ENXIO); 768 769 for (i = 0; i < nitems(t6_pciids); i++) { 770 if (d == t6_pciids[i].device) { 771 device_set_desc(dev, t6_pciids[i].desc); 772 return (BUS_PROBE_DEFAULT); 773 } 774 } 775 776 return (ENXIO); 777 } 778 779 static void 780 t5_attribute_workaround(device_t dev) 781 { 782 device_t root_port; 783 uint32_t v; 784 785 /* 786 * The T5 chips do not properly echo the No Snoop and Relaxed 787 * Ordering attributes when replying to a TLP from a Root 788 * Port. As a workaround, find the parent Root Port and 789 * disable No Snoop and Relaxed Ordering. Note that this 790 * affects all devices under this root port. 791 */ 792 root_port = pci_find_pcie_root_port(dev); 793 if (root_port == NULL) { 794 device_printf(dev, "Unable to find parent root port\n"); 795 return; 796 } 797 798 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 799 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 800 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 801 0) 802 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 803 device_get_nameunit(root_port)); 804 } 805 806 static const struct devnames devnames[] = { 807 { 808 .nexus_name = "t4nex", 809 .ifnet_name = "cxgbe", 810 .vi_ifnet_name = "vcxgbe", 811 .pf03_drv_name = "t4iov", 812 .vf_nexus_name = "t4vf", 813 .vf_ifnet_name = "cxgbev" 814 }, { 815 .nexus_name = "t5nex", 816 .ifnet_name = "cxl", 817 .vi_ifnet_name = "vcxl", 818 .pf03_drv_name = "t5iov", 819 .vf_nexus_name = "t5vf", 820 .vf_ifnet_name = "cxlv" 821 }, { 822 .nexus_name = "t6nex", 823 .ifnet_name = "cc", 824 .vi_ifnet_name = "vcc", 825 .pf03_drv_name = "t6iov", 826 .vf_nexus_name = "t6vf", 827 .vf_ifnet_name = "ccv" 828 } 829 }; 830 831 void 832 t4_init_devnames(struct adapter *sc) 833 { 834 int id; 835 836 id = chip_id(sc); 837 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) 838 sc->names = &devnames[id - CHELSIO_T4]; 839 else { 840 device_printf(sc->dev, "chip id %d is not supported.\n", id); 841 sc->names = NULL; 842 } 843 } 844 845 static int 846 t4_attach(device_t dev) 847 { 848 struct adapter *sc; 849 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 850 struct make_dev_args mda; 851 struct intrs_and_queues iaq; 852 struct sge *s; 853 uint8_t *buf; 854 #ifdef TCP_OFFLOAD 855 int ofld_rqidx, ofld_tqidx; 856 #endif 857 #ifdef DEV_NETMAP 858 int nm_rqidx, nm_tqidx; 859 #endif 860 int num_vis; 861 862 sc = device_get_softc(dev); 863 sc->dev = dev; 864 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); 865 866 if ((pci_get_device(dev) & 0xff00) == 0x5400) 867 t5_attribute_workaround(dev); 868 pci_enable_busmaster(dev); 869 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 870 uint32_t v; 871 872 pci_set_max_read_req(dev, 4096); 873 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 874 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 875 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 876 877 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 878 } 879 880 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); 881 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); 882 sc->traceq = -1; 883 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 884 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 885 device_get_nameunit(dev)); 886 887 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 888 device_get_nameunit(dev)); 889 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 890 t4_add_adapter(sc); 891 892 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 893 TAILQ_INIT(&sc->sfl); 894 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 895 896 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 897 898 rc = t4_map_bars_0_and_4(sc); 899 if (rc != 0) 900 goto done; /* error message displayed already */ 901 902 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 903 904 /* Prepare the adapter for operation. */ 905 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 906 rc = -t4_prep_adapter(sc, buf); 907 free(buf, M_CXGBE); 908 if (rc != 0) { 909 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 910 goto done; 911 } 912 913 /* 914 * This is the real PF# to which we're attaching. Works from within PCI 915 * passthrough environments too, where pci_get_function() could return a 916 * different PF# depending on the passthrough configuration. We need to 917 * use the real PF# in all our communication with the firmware. 918 */ 919 j = t4_read_reg(sc, A_PL_WHOAMI); 920 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); 921 sc->mbox = sc->pf; 922 923 t4_init_devnames(sc); 924 if (sc->names == NULL) { 925 rc = ENOTSUP; 926 goto done; /* error message displayed already */ 927 } 928 929 /* 930 * Do this really early, with the memory windows set up even before the 931 * character device. The userland tool's register i/o and mem read 932 * will work even in "recovery mode". 933 */ 934 setup_memwin(sc); 935 if (t4_init_devlog_params(sc, 0) == 0) 936 fixup_devlog_params(sc); 937 make_dev_args_init(&mda); 938 mda.mda_devsw = &t4_cdevsw; 939 mda.mda_uid = UID_ROOT; 940 mda.mda_gid = GID_WHEEL; 941 mda.mda_mode = 0600; 942 mda.mda_si_drv1 = sc; 943 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); 944 if (rc != 0) 945 device_printf(dev, "failed to create nexus char device: %d.\n", 946 rc); 947 948 /* Go no further if recovery mode has been requested. */ 949 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 950 device_printf(dev, "recovery mode.\n"); 951 goto done; 952 } 953 954 #if defined(__i386__) 955 if ((cpu_feature & CPUID_CX8) == 0) { 956 device_printf(dev, "64 bit atomics not available.\n"); 957 rc = ENOTSUP; 958 goto done; 959 } 960 #endif 961 962 /* Prepare the firmware for operation */ 963 rc = prep_firmware(sc); 964 if (rc != 0) 965 goto done; /* error message displayed already */ 966 967 rc = get_params__post_init(sc); 968 if (rc != 0) 969 goto done; /* error message displayed already */ 970 971 rc = set_params__post_init(sc); 972 if (rc != 0) 973 goto done; /* error message displayed already */ 974 975 rc = t4_map_bar_2(sc); 976 if (rc != 0) 977 goto done; /* error message displayed already */ 978 979 rc = t4_create_dma_tag(sc); 980 if (rc != 0) 981 goto done; /* error message displayed already */ 982 983 /* 984 * Number of VIs to create per-port. The first VI is the "main" regular 985 * VI for the port. The rest are additional virtual interfaces on the 986 * same physical port. Note that the main VI does not have native 987 * netmap support but the extra VIs do. 988 * 989 * Limit the number of VIs per port to the number of available 990 * MAC addresses per port. 991 */ 992 if (t4_num_vis >= 1) 993 num_vis = t4_num_vis; 994 else 995 num_vis = 1; 996 if (num_vis > nitems(vi_mac_funcs)) { 997 num_vis = nitems(vi_mac_funcs); 998 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 999 } 1000 1001 /* 1002 * First pass over all the ports - allocate VIs and initialize some 1003 * basic parameters like mac address, port type, etc. We also figure 1004 * out whether a port is 10G or 1G and use that information when 1005 * calculating how many interrupts to attempt to allocate. 1006 */ 1007 n10g = n1g = 0; 1008 for_each_port(sc, i) { 1009 struct port_info *pi; 1010 1011 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 1012 sc->port[i] = pi; 1013 1014 /* These must be set before t4_port_init */ 1015 pi->adapter = sc; 1016 pi->port_id = i; 1017 /* 1018 * XXX: vi[0] is special so we can't delay this allocation until 1019 * pi->nvi's final value is known. 1020 */ 1021 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 1022 M_ZERO | M_WAITOK); 1023 1024 /* 1025 * Allocate the "main" VI and initialize parameters 1026 * like mac addr. 1027 */ 1028 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 1029 if (rc != 0) { 1030 device_printf(dev, "unable to initialize port %d: %d\n", 1031 i, rc); 1032 free(pi->vi, M_CXGBE); 1033 free(pi, M_CXGBE); 1034 sc->port[i] = NULL; 1035 goto done; 1036 } 1037 1038 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 1039 device_get_nameunit(dev), i); 1040 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 1041 sc->chan_map[pi->tx_chan] = i; 1042 1043 if (port_top_speed(pi) >= 10) { 1044 n10g++; 1045 } else { 1046 n1g++; 1047 } 1048 1049 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1); 1050 if (pi->dev == NULL) { 1051 device_printf(dev, 1052 "failed to add device for port %d.\n", i); 1053 rc = ENXIO; 1054 goto done; 1055 } 1056 pi->vi[0].dev = pi->dev; 1057 device_set_softc(pi->dev, pi); 1058 } 1059 1060 /* 1061 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 1062 */ 1063 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 1064 if (rc != 0) 1065 goto done; /* error message displayed already */ 1066 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0) 1067 num_vis = 1; 1068 1069 sc->intr_type = iaq.intr_type; 1070 sc->intr_count = iaq.nirq; 1071 1072 s = &sc->sge; 1073 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 1074 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 1075 if (num_vis > 1) { 1076 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi; 1077 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi; 1078 } 1079 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 1080 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 1081 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 1082 #ifdef TCP_OFFLOAD 1083 if (is_offload(sc)) { 1084 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 1085 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 1086 if (num_vis > 1) { 1087 s->nofldrxq += (n10g + n1g) * (num_vis - 1) * 1088 iaq.nofldrxq_vi; 1089 s->nofldtxq += (n10g + n1g) * (num_vis - 1) * 1090 iaq.nofldtxq_vi; 1091 } 1092 s->neq += s->nofldtxq + s->nofldrxq; 1093 s->niq += s->nofldrxq; 1094 1095 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 1096 M_CXGBE, M_ZERO | M_WAITOK); 1097 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 1098 M_CXGBE, M_ZERO | M_WAITOK); 1099 } 1100 #endif 1101 #ifdef DEV_NETMAP 1102 if (num_vis > 1) { 1103 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi; 1104 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi; 1105 } 1106 s->neq += s->nnmtxq + s->nnmrxq; 1107 s->niq += s->nnmrxq; 1108 1109 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 1110 M_CXGBE, M_ZERO | M_WAITOK); 1111 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 1112 M_CXGBE, M_ZERO | M_WAITOK); 1113 #endif 1114 1115 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 1116 M_ZERO | M_WAITOK); 1117 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 1118 M_ZERO | M_WAITOK); 1119 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 1120 M_ZERO | M_WAITOK); 1121 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 1122 M_ZERO | M_WAITOK); 1123 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 1124 M_ZERO | M_WAITOK); 1125 1126 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 1127 M_ZERO | M_WAITOK); 1128 1129 t4_init_l2t(sc, M_WAITOK); 1130 t4_init_tx_sched(sc); 1131 1132 /* 1133 * Second pass over the ports. This time we know the number of rx and 1134 * tx queues that each port should get. 1135 */ 1136 rqidx = tqidx = 0; 1137 #ifdef TCP_OFFLOAD 1138 ofld_rqidx = ofld_tqidx = 0; 1139 #endif 1140 #ifdef DEV_NETMAP 1141 nm_rqidx = nm_tqidx = 0; 1142 #endif 1143 for_each_port(sc, i) { 1144 struct port_info *pi = sc->port[i]; 1145 struct vi_info *vi; 1146 1147 if (pi == NULL) 1148 continue; 1149 1150 pi->nvi = num_vis; 1151 for_each_vi(pi, j, vi) { 1152 vi->pi = pi; 1153 vi->qsize_rxq = t4_qsize_rxq; 1154 vi->qsize_txq = t4_qsize_txq; 1155 1156 vi->first_rxq = rqidx; 1157 vi->first_txq = tqidx; 1158 if (port_top_speed(pi) >= 10) { 1159 vi->tmr_idx = t4_tmr_idx_10g; 1160 vi->pktc_idx = t4_pktc_idx_10g; 1161 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 1162 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi; 1163 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi; 1164 } else { 1165 vi->tmr_idx = t4_tmr_idx_1g; 1166 vi->pktc_idx = t4_pktc_idx_1g; 1167 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 1168 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi; 1169 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi; 1170 } 1171 rqidx += vi->nrxq; 1172 tqidx += vi->ntxq; 1173 1174 if (j == 0 && vi->ntxq > 1) 1175 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1176 else 1177 vi->rsrv_noflowq = 0; 1178 1179 #ifdef TCP_OFFLOAD 1180 vi->first_ofld_rxq = ofld_rqidx; 1181 vi->first_ofld_txq = ofld_tqidx; 1182 if (port_top_speed(pi) >= 10) { 1183 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1184 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1185 iaq.nofldrxq_vi; 1186 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1187 iaq.nofldtxq_vi; 1188 } else { 1189 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1190 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1191 iaq.nofldrxq_vi; 1192 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1193 iaq.nofldtxq_vi; 1194 } 1195 ofld_rqidx += vi->nofldrxq; 1196 ofld_tqidx += vi->nofldtxq; 1197 #endif 1198 #ifdef DEV_NETMAP 1199 if (j > 0) { 1200 vi->first_nm_rxq = nm_rqidx; 1201 vi->first_nm_txq = nm_tqidx; 1202 vi->nnmrxq = iaq.nnmrxq_vi; 1203 vi->nnmtxq = iaq.nnmtxq_vi; 1204 nm_rqidx += vi->nnmrxq; 1205 nm_tqidx += vi->nnmtxq; 1206 } 1207 #endif 1208 } 1209 } 1210 1211 rc = t4_setup_intr_handlers(sc); 1212 if (rc != 0) { 1213 device_printf(dev, 1214 "failed to setup interrupt handlers: %d\n", rc); 1215 goto done; 1216 } 1217 1218 rc = bus_generic_probe(dev); 1219 if (rc != 0) { 1220 device_printf(dev, "failed to probe child drivers: %d\n", rc); 1221 goto done; 1222 } 1223 1224 rc = bus_generic_attach(dev); 1225 if (rc != 0) { 1226 device_printf(dev, 1227 "failed to attach all child ports: %d\n", rc); 1228 goto done; 1229 } 1230 1231 device_printf(dev, 1232 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1233 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1234 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1235 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1236 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1237 1238 t4_set_desc(sc); 1239 1240 notify_siblings(dev, 0); 1241 1242 done: 1243 if (rc != 0 && sc->cdev) { 1244 /* cdev was created and so cxgbetool works; recover that way. */ 1245 device_printf(dev, 1246 "error during attach, adapter is now in recovery mode.\n"); 1247 rc = 0; 1248 } 1249 1250 if (rc != 0) 1251 t4_detach_common(dev); 1252 else 1253 t4_sysctls(sc); 1254 1255 return (rc); 1256 } 1257 1258 static int 1259 t4_ready(device_t dev) 1260 { 1261 struct adapter *sc; 1262 1263 sc = device_get_softc(dev); 1264 if (sc->flags & FW_OK) 1265 return (0); 1266 return (ENXIO); 1267 } 1268 1269 static int 1270 t4_read_port_device(device_t dev, int port, device_t *child) 1271 { 1272 struct adapter *sc; 1273 struct port_info *pi; 1274 1275 sc = device_get_softc(dev); 1276 if (port < 0 || port >= MAX_NPORTS) 1277 return (EINVAL); 1278 pi = sc->port[port]; 1279 if (pi == NULL || pi->dev == NULL) 1280 return (ENXIO); 1281 *child = pi->dev; 1282 return (0); 1283 } 1284 1285 static int 1286 notify_siblings(device_t dev, int detaching) 1287 { 1288 device_t sibling; 1289 int error, i; 1290 1291 error = 0; 1292 for (i = 0; i < PCI_FUNCMAX; i++) { 1293 if (i == pci_get_function(dev)) 1294 continue; 1295 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), 1296 pci_get_slot(dev), i); 1297 if (sibling == NULL || !device_is_attached(sibling)) 1298 continue; 1299 if (detaching) 1300 error = T4_DETACH_CHILD(sibling); 1301 else 1302 (void)T4_ATTACH_CHILD(sibling); 1303 if (error) 1304 break; 1305 } 1306 return (error); 1307 } 1308 1309 /* 1310 * Idempotent 1311 */ 1312 static int 1313 t4_detach(device_t dev) 1314 { 1315 struct adapter *sc; 1316 int rc; 1317 1318 sc = device_get_softc(dev); 1319 1320 rc = notify_siblings(dev, 1); 1321 if (rc) { 1322 device_printf(dev, 1323 "failed to detach sibling devices: %d\n", rc); 1324 return (rc); 1325 } 1326 1327 return (t4_detach_common(dev)); 1328 } 1329 1330 int 1331 t4_detach_common(device_t dev) 1332 { 1333 struct adapter *sc; 1334 struct port_info *pi; 1335 int i, rc; 1336 1337 sc = device_get_softc(dev); 1338 1339 if (sc->flags & FULL_INIT_DONE) { 1340 if (!(sc->flags & IS_VF)) 1341 t4_intr_disable(sc); 1342 } 1343 1344 if (sc->cdev) { 1345 destroy_dev(sc->cdev); 1346 sc->cdev = NULL; 1347 } 1348 1349 if (device_is_attached(dev)) { 1350 rc = bus_generic_detach(dev); 1351 if (rc) { 1352 device_printf(dev, 1353 "failed to detach child devices: %d\n", rc); 1354 return (rc); 1355 } 1356 } 1357 1358 for (i = 0; i < sc->intr_count; i++) 1359 t4_free_irq(sc, &sc->irq[i]); 1360 1361 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1362 t4_free_tx_sched(sc); 1363 1364 for (i = 0; i < MAX_NPORTS; i++) { 1365 pi = sc->port[i]; 1366 if (pi) { 1367 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1368 if (pi->dev) 1369 device_delete_child(dev, pi->dev); 1370 1371 mtx_destroy(&pi->pi_lock); 1372 free(pi->vi, M_CXGBE); 1373 free(pi, M_CXGBE); 1374 } 1375 } 1376 1377 device_delete_children(dev); 1378 1379 if (sc->flags & FULL_INIT_DONE) 1380 adapter_full_uninit(sc); 1381 1382 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1383 t4_fw_bye(sc, sc->mbox); 1384 1385 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1386 pci_release_msi(dev); 1387 1388 if (sc->regs_res) 1389 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1390 sc->regs_res); 1391 1392 if (sc->udbs_res) 1393 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1394 sc->udbs_res); 1395 1396 if (sc->msix_res) 1397 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1398 sc->msix_res); 1399 1400 if (sc->l2t) 1401 t4_free_l2t(sc->l2t); 1402 1403 #ifdef TCP_OFFLOAD 1404 free(sc->sge.ofld_rxq, M_CXGBE); 1405 free(sc->sge.ofld_txq, M_CXGBE); 1406 #endif 1407 #ifdef DEV_NETMAP 1408 free(sc->sge.nm_rxq, M_CXGBE); 1409 free(sc->sge.nm_txq, M_CXGBE); 1410 #endif 1411 free(sc->irq, M_CXGBE); 1412 free(sc->sge.rxq, M_CXGBE); 1413 free(sc->sge.txq, M_CXGBE); 1414 free(sc->sge.ctrlq, M_CXGBE); 1415 free(sc->sge.iqmap, M_CXGBE); 1416 free(sc->sge.eqmap, M_CXGBE); 1417 free(sc->tids.ftid_tab, M_CXGBE); 1418 t4_destroy_dma_tag(sc); 1419 if (mtx_initialized(&sc->sc_lock)) { 1420 sx_xlock(&t4_list_lock); 1421 SLIST_REMOVE(&t4_list, sc, adapter, link); 1422 sx_xunlock(&t4_list_lock); 1423 mtx_destroy(&sc->sc_lock); 1424 } 1425 1426 callout_drain(&sc->sfl_callout); 1427 if (mtx_initialized(&sc->tids.ftid_lock)) 1428 mtx_destroy(&sc->tids.ftid_lock); 1429 if (mtx_initialized(&sc->sfl_lock)) 1430 mtx_destroy(&sc->sfl_lock); 1431 if (mtx_initialized(&sc->ifp_lock)) 1432 mtx_destroy(&sc->ifp_lock); 1433 if (mtx_initialized(&sc->reg_lock)) 1434 mtx_destroy(&sc->reg_lock); 1435 1436 for (i = 0; i < NUM_MEMWIN; i++) { 1437 struct memwin *mw = &sc->memwin[i]; 1438 1439 if (rw_initialized(&mw->mw_lock)) 1440 rw_destroy(&mw->mw_lock); 1441 } 1442 1443 bzero(sc, sizeof(*sc)); 1444 1445 return (0); 1446 } 1447 1448 static int 1449 cxgbe_probe(device_t dev) 1450 { 1451 char buf[128]; 1452 struct port_info *pi = device_get_softc(dev); 1453 1454 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1455 device_set_desc_copy(dev, buf); 1456 1457 return (BUS_PROBE_DEFAULT); 1458 } 1459 1460 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1461 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1462 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1463 #define T4_CAP_ENABLE (T4_CAP) 1464 1465 static int 1466 cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1467 { 1468 struct ifnet *ifp; 1469 struct sbuf *sb; 1470 1471 vi->xact_addr_filt = -1; 1472 callout_init(&vi->tick, 1); 1473 1474 /* Allocate an ifnet and set it up */ 1475 ifp = if_alloc(IFT_ETHER); 1476 if (ifp == NULL) { 1477 device_printf(dev, "Cannot allocate ifnet\n"); 1478 return (ENOMEM); 1479 } 1480 vi->ifp = ifp; 1481 ifp->if_softc = vi; 1482 1483 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1484 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1485 1486 ifp->if_init = cxgbe_init; 1487 ifp->if_ioctl = cxgbe_ioctl; 1488 ifp->if_transmit = cxgbe_transmit; 1489 ifp->if_qflush = cxgbe_qflush; 1490 ifp->if_get_counter = cxgbe_get_counter; 1491 1492 ifp->if_capabilities = T4_CAP; 1493 #ifdef TCP_OFFLOAD 1494 if (vi->nofldrxq != 0) 1495 ifp->if_capabilities |= IFCAP_TOE; 1496 #endif 1497 #ifdef DEV_NETMAP 1498 if (vi->nnmrxq != 0) 1499 ifp->if_capabilities |= IFCAP_NETMAP; 1500 #endif 1501 ifp->if_capenable = T4_CAP_ENABLE; 1502 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1503 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1504 1505 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1506 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1507 ifp->if_hw_tsomaxsegsize = 65536; 1508 1509 /* Initialize ifmedia for this VI */ 1510 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1511 cxgbe_media_status); 1512 1513 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1514 EVENTHANDLER_PRI_ANY); 1515 1516 ether_ifattach(ifp, vi->hw_addr); 1517 #ifdef DEV_NETMAP 1518 if (ifp->if_capabilities & IFCAP_NETMAP) 1519 cxgbe_nm_attach(vi); 1520 #endif 1521 sb = sbuf_new_auto(); 1522 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1523 #ifdef TCP_OFFLOAD 1524 if (ifp->if_capabilities & IFCAP_TOE) 1525 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1526 vi->nofldtxq, vi->nofldrxq); 1527 #endif 1528 #ifdef DEV_NETMAP 1529 if (ifp->if_capabilities & IFCAP_NETMAP) 1530 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1531 vi->nnmtxq, vi->nnmrxq); 1532 #endif 1533 sbuf_finish(sb); 1534 device_printf(dev, "%s\n", sbuf_data(sb)); 1535 sbuf_delete(sb); 1536 1537 vi_sysctls(vi); 1538 1539 return (0); 1540 } 1541 1542 static int 1543 cxgbe_attach(device_t dev) 1544 { 1545 struct port_info *pi = device_get_softc(dev); 1546 struct adapter *sc = pi->adapter; 1547 struct vi_info *vi; 1548 int i, rc; 1549 1550 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1551 1552 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1553 if (rc) 1554 return (rc); 1555 1556 for_each_vi(pi, i, vi) { 1557 if (i == 0) 1558 continue; 1559 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); 1560 if (vi->dev == NULL) { 1561 device_printf(dev, "failed to add VI %d\n", i); 1562 continue; 1563 } 1564 device_set_softc(vi->dev, vi); 1565 } 1566 1567 cxgbe_sysctls(pi); 1568 1569 bus_generic_attach(dev); 1570 1571 return (0); 1572 } 1573 1574 static void 1575 cxgbe_vi_detach(struct vi_info *vi) 1576 { 1577 struct ifnet *ifp = vi->ifp; 1578 1579 ether_ifdetach(ifp); 1580 1581 if (vi->vlan_c) 1582 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1583 1584 /* Let detach proceed even if these fail. */ 1585 #ifdef DEV_NETMAP 1586 if (ifp->if_capabilities & IFCAP_NETMAP) 1587 cxgbe_nm_detach(vi); 1588 #endif 1589 cxgbe_uninit_synchronized(vi); 1590 callout_drain(&vi->tick); 1591 vi_full_uninit(vi); 1592 1593 ifmedia_removeall(&vi->media); 1594 if_free(vi->ifp); 1595 vi->ifp = NULL; 1596 } 1597 1598 static int 1599 cxgbe_detach(device_t dev) 1600 { 1601 struct port_info *pi = device_get_softc(dev); 1602 struct adapter *sc = pi->adapter; 1603 int rc; 1604 1605 /* Detach the extra VIs first. */ 1606 rc = bus_generic_detach(dev); 1607 if (rc) 1608 return (rc); 1609 device_delete_children(dev); 1610 1611 doom_vi(sc, &pi->vi[0]); 1612 1613 if (pi->flags & HAS_TRACEQ) { 1614 sc->traceq = -1; /* cloner should not create ifnet */ 1615 t4_tracer_port_detach(sc); 1616 } 1617 1618 cxgbe_vi_detach(&pi->vi[0]); 1619 callout_drain(&pi->tick); 1620 1621 end_synchronized_op(sc, 0); 1622 1623 return (0); 1624 } 1625 1626 static void 1627 cxgbe_init(void *arg) 1628 { 1629 struct vi_info *vi = arg; 1630 struct adapter *sc = vi->pi->adapter; 1631 1632 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1633 return; 1634 cxgbe_init_synchronized(vi); 1635 end_synchronized_op(sc, 0); 1636 } 1637 1638 static int 1639 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1640 { 1641 int rc = 0, mtu, flags, can_sleep; 1642 struct vi_info *vi = ifp->if_softc; 1643 struct adapter *sc = vi->pi->adapter; 1644 struct ifreq *ifr = (struct ifreq *)data; 1645 uint32_t mask; 1646 1647 switch (cmd) { 1648 case SIOCSIFMTU: 1649 mtu = ifr->ifr_mtu; 1650 if (mtu < ETHERMIN || mtu > MAX_MTU) 1651 return (EINVAL); 1652 1653 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1654 if (rc) 1655 return (rc); 1656 ifp->if_mtu = mtu; 1657 if (vi->flags & VI_INIT_DONE) { 1658 t4_update_fl_bufsize(ifp); 1659 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1660 rc = update_mac_settings(ifp, XGMAC_MTU); 1661 } 1662 end_synchronized_op(sc, 0); 1663 break; 1664 1665 case SIOCSIFFLAGS: 1666 can_sleep = 0; 1667 redo_sifflags: 1668 rc = begin_synchronized_op(sc, vi, 1669 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1670 if (rc) 1671 return (rc); 1672 1673 if (ifp->if_flags & IFF_UP) { 1674 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1675 flags = vi->if_flags; 1676 if ((ifp->if_flags ^ flags) & 1677 (IFF_PROMISC | IFF_ALLMULTI)) { 1678 if (can_sleep == 1) { 1679 end_synchronized_op(sc, 0); 1680 can_sleep = 0; 1681 goto redo_sifflags; 1682 } 1683 rc = update_mac_settings(ifp, 1684 XGMAC_PROMISC | XGMAC_ALLMULTI); 1685 } 1686 } else { 1687 if (can_sleep == 0) { 1688 end_synchronized_op(sc, LOCK_HELD); 1689 can_sleep = 1; 1690 goto redo_sifflags; 1691 } 1692 rc = cxgbe_init_synchronized(vi); 1693 } 1694 vi->if_flags = ifp->if_flags; 1695 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1696 if (can_sleep == 0) { 1697 end_synchronized_op(sc, LOCK_HELD); 1698 can_sleep = 1; 1699 goto redo_sifflags; 1700 } 1701 rc = cxgbe_uninit_synchronized(vi); 1702 } 1703 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1704 break; 1705 1706 case SIOCADDMULTI: 1707 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1708 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1709 if (rc) 1710 return (rc); 1711 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1712 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1713 end_synchronized_op(sc, LOCK_HELD); 1714 break; 1715 1716 case SIOCSIFCAP: 1717 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1718 if (rc) 1719 return (rc); 1720 1721 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1722 if (mask & IFCAP_TXCSUM) { 1723 ifp->if_capenable ^= IFCAP_TXCSUM; 1724 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1725 1726 if (IFCAP_TSO4 & ifp->if_capenable && 1727 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1728 ifp->if_capenable &= ~IFCAP_TSO4; 1729 if_printf(ifp, 1730 "tso4 disabled due to -txcsum.\n"); 1731 } 1732 } 1733 if (mask & IFCAP_TXCSUM_IPV6) { 1734 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1735 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1736 1737 if (IFCAP_TSO6 & ifp->if_capenable && 1738 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1739 ifp->if_capenable &= ~IFCAP_TSO6; 1740 if_printf(ifp, 1741 "tso6 disabled due to -txcsum6.\n"); 1742 } 1743 } 1744 if (mask & IFCAP_RXCSUM) 1745 ifp->if_capenable ^= IFCAP_RXCSUM; 1746 if (mask & IFCAP_RXCSUM_IPV6) 1747 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1748 1749 /* 1750 * Note that we leave CSUM_TSO alone (it is always set). The 1751 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1752 * sending a TSO request our way, so it's sufficient to toggle 1753 * IFCAP_TSOx only. 1754 */ 1755 if (mask & IFCAP_TSO4) { 1756 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1757 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1758 if_printf(ifp, "enable txcsum first.\n"); 1759 rc = EAGAIN; 1760 goto fail; 1761 } 1762 ifp->if_capenable ^= IFCAP_TSO4; 1763 } 1764 if (mask & IFCAP_TSO6) { 1765 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1766 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1767 if_printf(ifp, "enable txcsum6 first.\n"); 1768 rc = EAGAIN; 1769 goto fail; 1770 } 1771 ifp->if_capenable ^= IFCAP_TSO6; 1772 } 1773 if (mask & IFCAP_LRO) { 1774 #if defined(INET) || defined(INET6) 1775 int i; 1776 struct sge_rxq *rxq; 1777 1778 ifp->if_capenable ^= IFCAP_LRO; 1779 for_each_rxq(vi, i, rxq) { 1780 if (ifp->if_capenable & IFCAP_LRO) 1781 rxq->iq.flags |= IQ_LRO_ENABLED; 1782 else 1783 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1784 } 1785 #endif 1786 } 1787 #ifdef TCP_OFFLOAD 1788 if (mask & IFCAP_TOE) { 1789 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1790 1791 rc = toe_capability(vi, enable); 1792 if (rc != 0) 1793 goto fail; 1794 1795 ifp->if_capenable ^= mask; 1796 } 1797 #endif 1798 if (mask & IFCAP_VLAN_HWTAGGING) { 1799 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1800 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1801 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1802 } 1803 if (mask & IFCAP_VLAN_MTU) { 1804 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1805 1806 /* Need to find out how to disable auto-mtu-inflation */ 1807 } 1808 if (mask & IFCAP_VLAN_HWTSO) 1809 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1810 if (mask & IFCAP_VLAN_HWCSUM) 1811 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1812 1813 #ifdef VLAN_CAPABILITIES 1814 VLAN_CAPABILITIES(ifp); 1815 #endif 1816 fail: 1817 end_synchronized_op(sc, 0); 1818 break; 1819 1820 case SIOCSIFMEDIA: 1821 case SIOCGIFMEDIA: 1822 case SIOCGIFXMEDIA: 1823 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1824 break; 1825 1826 case SIOCGI2C: { 1827 struct ifi2creq i2c; 1828 1829 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1830 if (rc != 0) 1831 break; 1832 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1833 rc = EPERM; 1834 break; 1835 } 1836 if (i2c.len > sizeof(i2c.data)) { 1837 rc = EINVAL; 1838 break; 1839 } 1840 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1841 if (rc) 1842 return (rc); 1843 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1844 i2c.offset, i2c.len, &i2c.data[0]); 1845 end_synchronized_op(sc, 0); 1846 if (rc == 0) 1847 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1848 break; 1849 } 1850 1851 default: 1852 rc = ether_ioctl(ifp, cmd, data); 1853 } 1854 1855 return (rc); 1856 } 1857 1858 static int 1859 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1860 { 1861 struct vi_info *vi = ifp->if_softc; 1862 struct port_info *pi = vi->pi; 1863 struct adapter *sc = pi->adapter; 1864 struct sge_txq *txq; 1865 void *items[1]; 1866 int rc; 1867 1868 M_ASSERTPKTHDR(m); 1869 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1870 1871 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1872 m_freem(m); 1873 return (ENETDOWN); 1874 } 1875 1876 rc = parse_pkt(sc, &m); 1877 if (__predict_false(rc != 0)) { 1878 MPASS(m == NULL); /* was freed already */ 1879 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1880 return (rc); 1881 } 1882 1883 /* Select a txq. */ 1884 txq = &sc->sge.txq[vi->first_txq]; 1885 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1886 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1887 vi->rsrv_noflowq); 1888 1889 items[0] = m; 1890 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1891 if (__predict_false(rc != 0)) 1892 m_freem(m); 1893 1894 return (rc); 1895 } 1896 1897 static void 1898 cxgbe_qflush(struct ifnet *ifp) 1899 { 1900 struct vi_info *vi = ifp->if_softc; 1901 struct sge_txq *txq; 1902 int i; 1903 1904 /* queues do not exist if !VI_INIT_DONE. */ 1905 if (vi->flags & VI_INIT_DONE) { 1906 for_each_txq(vi, i, txq) { 1907 TXQ_LOCK(txq); 1908 txq->eq.flags |= EQ_QFLUSH; 1909 TXQ_UNLOCK(txq); 1910 while (!mp_ring_is_idle(txq->r)) { 1911 mp_ring_check_drainage(txq->r, 0); 1912 pause("qflush", 1); 1913 } 1914 TXQ_LOCK(txq); 1915 txq->eq.flags &= ~EQ_QFLUSH; 1916 TXQ_UNLOCK(txq); 1917 } 1918 } 1919 if_qflush(ifp); 1920 } 1921 1922 static uint64_t 1923 vi_get_counter(struct ifnet *ifp, ift_counter c) 1924 { 1925 struct vi_info *vi = ifp->if_softc; 1926 struct fw_vi_stats_vf *s = &vi->stats; 1927 1928 vi_refresh_stats(vi->pi->adapter, vi); 1929 1930 switch (c) { 1931 case IFCOUNTER_IPACKETS: 1932 return (s->rx_bcast_frames + s->rx_mcast_frames + 1933 s->rx_ucast_frames); 1934 case IFCOUNTER_IERRORS: 1935 return (s->rx_err_frames); 1936 case IFCOUNTER_OPACKETS: 1937 return (s->tx_bcast_frames + s->tx_mcast_frames + 1938 s->tx_ucast_frames + s->tx_offload_frames); 1939 case IFCOUNTER_OERRORS: 1940 return (s->tx_drop_frames); 1941 case IFCOUNTER_IBYTES: 1942 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 1943 s->rx_ucast_bytes); 1944 case IFCOUNTER_OBYTES: 1945 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 1946 s->tx_ucast_bytes + s->tx_offload_bytes); 1947 case IFCOUNTER_IMCASTS: 1948 return (s->rx_mcast_frames); 1949 case IFCOUNTER_OMCASTS: 1950 return (s->tx_mcast_frames); 1951 case IFCOUNTER_OQDROPS: { 1952 uint64_t drops; 1953 1954 drops = 0; 1955 if (vi->flags & VI_INIT_DONE) { 1956 int i; 1957 struct sge_txq *txq; 1958 1959 for_each_txq(vi, i, txq) 1960 drops += counter_u64_fetch(txq->r->drops); 1961 } 1962 1963 return (drops); 1964 1965 } 1966 1967 default: 1968 return (if_get_counter_default(ifp, c)); 1969 } 1970 } 1971 1972 uint64_t 1973 cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1974 { 1975 struct vi_info *vi = ifp->if_softc; 1976 struct port_info *pi = vi->pi; 1977 struct adapter *sc = pi->adapter; 1978 struct port_stats *s = &pi->stats; 1979 1980 if (pi->nvi > 1 || sc->flags & IS_VF) 1981 return (vi_get_counter(ifp, c)); 1982 1983 cxgbe_refresh_stats(sc, pi); 1984 1985 switch (c) { 1986 case IFCOUNTER_IPACKETS: 1987 return (s->rx_frames); 1988 1989 case IFCOUNTER_IERRORS: 1990 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1991 s->rx_fcs_err + s->rx_len_err); 1992 1993 case IFCOUNTER_OPACKETS: 1994 return (s->tx_frames); 1995 1996 case IFCOUNTER_OERRORS: 1997 return (s->tx_error_frames); 1998 1999 case IFCOUNTER_IBYTES: 2000 return (s->rx_octets); 2001 2002 case IFCOUNTER_OBYTES: 2003 return (s->tx_octets); 2004 2005 case IFCOUNTER_IMCASTS: 2006 return (s->rx_mcast_frames); 2007 2008 case IFCOUNTER_OMCASTS: 2009 return (s->tx_mcast_frames); 2010 2011 case IFCOUNTER_IQDROPS: 2012 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 2013 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 2014 s->rx_trunc3 + pi->tnl_cong_drops); 2015 2016 case IFCOUNTER_OQDROPS: { 2017 uint64_t drops; 2018 2019 drops = s->tx_drop; 2020 if (vi->flags & VI_INIT_DONE) { 2021 int i; 2022 struct sge_txq *txq; 2023 2024 for_each_txq(vi, i, txq) 2025 drops += counter_u64_fetch(txq->r->drops); 2026 } 2027 2028 return (drops); 2029 2030 } 2031 2032 default: 2033 return (if_get_counter_default(ifp, c)); 2034 } 2035 } 2036 2037 static int 2038 cxgbe_media_change(struct ifnet *ifp) 2039 { 2040 struct vi_info *vi = ifp->if_softc; 2041 2042 device_printf(vi->dev, "%s unimplemented.\n", __func__); 2043 2044 return (EOPNOTSUPP); 2045 } 2046 2047 static void 2048 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2049 { 2050 struct vi_info *vi = ifp->if_softc; 2051 struct port_info *pi = vi->pi; 2052 struct ifmedia_entry *cur; 2053 struct link_config *lc = &pi->link_cfg; 2054 2055 /* 2056 * If all the interfaces are administratively down the firmware does not 2057 * report transceiver changes. Refresh port info here so that ifconfig 2058 * displays accurate information at all times. 2059 */ 2060 if (begin_synchronized_op(pi->adapter, NULL, SLEEP_OK | INTR_OK, 2061 "t4med") == 0) { 2062 PORT_LOCK(pi); 2063 if (pi->up_vis == 0) { 2064 t4_update_port_info(pi); 2065 build_medialist(pi, &vi->media); 2066 } 2067 PORT_UNLOCK(pi); 2068 end_synchronized_op(pi->adapter, 0); 2069 } 2070 2071 cur = vi->media.ifm_cur; 2072 2073 ifmr->ifm_status = IFM_AVALID; 2074 if (lc->link_ok == 0) 2075 return; 2076 2077 ifmr->ifm_status |= IFM_ACTIVE; 2078 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); 2079 if (lc->fc & PAUSE_RX) 2080 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2081 if (lc->fc & PAUSE_TX) 2082 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2083 2084 /* active and current will differ iff current media is autoselect. */ 2085 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 2086 return; 2087 2088 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 2089 if (lc->fc & PAUSE_RX) 2090 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2091 if (lc->fc & PAUSE_TX) 2092 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2093 switch (lc->speed) { 2094 case 10000: 2095 ifmr->ifm_active |= IFM_10G_T; 2096 break; 2097 case 1000: 2098 ifmr->ifm_active |= IFM_1000_T; 2099 break; 2100 case 100: 2101 ifmr->ifm_active |= IFM_100_TX; 2102 break; 2103 case 10: 2104 ifmr->ifm_active |= IFM_10_T; 2105 break; 2106 default: 2107 device_printf(vi->dev, "link up but speed unknown (%u)\n", 2108 lc->speed); 2109 } 2110 } 2111 2112 static int 2113 vcxgbe_probe(device_t dev) 2114 { 2115 char buf[128]; 2116 struct vi_info *vi = device_get_softc(dev); 2117 2118 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 2119 vi - vi->pi->vi); 2120 device_set_desc_copy(dev, buf); 2121 2122 return (BUS_PROBE_DEFAULT); 2123 } 2124 2125 static int 2126 vcxgbe_attach(device_t dev) 2127 { 2128 struct vi_info *vi; 2129 struct port_info *pi; 2130 struct adapter *sc; 2131 int func, index, rc; 2132 u32 param, val; 2133 2134 vi = device_get_softc(dev); 2135 pi = vi->pi; 2136 sc = pi->adapter; 2137 2138 index = vi - pi->vi; 2139 KASSERT(index < nitems(vi_mac_funcs), 2140 ("%s: VI %s doesn't have a MAC func", __func__, 2141 device_get_nameunit(dev))); 2142 func = vi_mac_funcs[index]; 2143 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 2144 vi->hw_addr, &vi->rss_size, func, 0); 2145 if (rc < 0) { 2146 device_printf(dev, "Failed to allocate virtual interface " 2147 "for port %d: %d\n", pi->port_id, -rc); 2148 return (-rc); 2149 } 2150 vi->viid = rc; 2151 if (chip_id(sc) <= CHELSIO_T5) 2152 vi->smt_idx = (rc & 0x7f) << 1; 2153 else 2154 vi->smt_idx = (rc & 0x7f); 2155 2156 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 2157 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 2158 V_FW_PARAMS_PARAM_YZ(vi->viid); 2159 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2160 if (rc) 2161 vi->rss_base = 0xffff; 2162 else { 2163 /* MPASS((val >> 16) == rss_size); */ 2164 vi->rss_base = val & 0xffff; 2165 } 2166 2167 rc = cxgbe_vi_attach(dev, vi); 2168 if (rc) { 2169 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2170 return (rc); 2171 } 2172 return (0); 2173 } 2174 2175 static int 2176 vcxgbe_detach(device_t dev) 2177 { 2178 struct vi_info *vi; 2179 struct adapter *sc; 2180 2181 vi = device_get_softc(dev); 2182 sc = vi->pi->adapter; 2183 2184 doom_vi(sc, vi); 2185 2186 cxgbe_vi_detach(vi); 2187 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2188 2189 end_synchronized_op(sc, 0); 2190 2191 return (0); 2192 } 2193 2194 void 2195 t4_fatal_err(struct adapter *sc) 2196 { 2197 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 2198 t4_intr_disable(sc); 2199 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 2200 device_get_nameunit(sc->dev)); 2201 } 2202 2203 void 2204 t4_add_adapter(struct adapter *sc) 2205 { 2206 sx_xlock(&t4_list_lock); 2207 SLIST_INSERT_HEAD(&t4_list, sc, link); 2208 sx_xunlock(&t4_list_lock); 2209 } 2210 2211 int 2212 t4_map_bars_0_and_4(struct adapter *sc) 2213 { 2214 sc->regs_rid = PCIR_BAR(0); 2215 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2216 &sc->regs_rid, RF_ACTIVE); 2217 if (sc->regs_res == NULL) { 2218 device_printf(sc->dev, "cannot map registers.\n"); 2219 return (ENXIO); 2220 } 2221 sc->bt = rman_get_bustag(sc->regs_res); 2222 sc->bh = rman_get_bushandle(sc->regs_res); 2223 sc->mmio_len = rman_get_size(sc->regs_res); 2224 setbit(&sc->doorbells, DOORBELL_KDB); 2225 2226 sc->msix_rid = PCIR_BAR(4); 2227 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2228 &sc->msix_rid, RF_ACTIVE); 2229 if (sc->msix_res == NULL) { 2230 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 2231 return (ENXIO); 2232 } 2233 2234 return (0); 2235 } 2236 2237 int 2238 t4_map_bar_2(struct adapter *sc) 2239 { 2240 2241 /* 2242 * T4: only iWARP driver uses the userspace doorbells. There is no need 2243 * to map it if RDMA is disabled. 2244 */ 2245 if (is_t4(sc) && sc->rdmacaps == 0) 2246 return (0); 2247 2248 sc->udbs_rid = PCIR_BAR(2); 2249 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2250 &sc->udbs_rid, RF_ACTIVE); 2251 if (sc->udbs_res == NULL) { 2252 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 2253 return (ENXIO); 2254 } 2255 sc->udbs_base = rman_get_virtual(sc->udbs_res); 2256 2257 if (chip_id(sc) >= CHELSIO_T5) { 2258 setbit(&sc->doorbells, DOORBELL_UDB); 2259 #if defined(__i386__) || defined(__amd64__) 2260 if (t5_write_combine) { 2261 int rc, mode; 2262 2263 /* 2264 * Enable write combining on BAR2. This is the 2265 * userspace doorbell BAR and is split into 128B 2266 * (UDBS_SEG_SIZE) doorbell regions, each associated 2267 * with an egress queue. The first 64B has the doorbell 2268 * and the second 64B can be used to submit a tx work 2269 * request with an implicit doorbell. 2270 */ 2271 2272 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 2273 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 2274 if (rc == 0) { 2275 clrbit(&sc->doorbells, DOORBELL_UDB); 2276 setbit(&sc->doorbells, DOORBELL_WCWR); 2277 setbit(&sc->doorbells, DOORBELL_UDBWC); 2278 } else { 2279 device_printf(sc->dev, 2280 "couldn't enable write combining: %d\n", 2281 rc); 2282 } 2283 2284 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); 2285 t4_write_reg(sc, A_SGE_STAT_CFG, 2286 V_STATSOURCE_T5(7) | mode); 2287 } 2288 #endif 2289 } 2290 2291 return (0); 2292 } 2293 2294 struct memwin_init { 2295 uint32_t base; 2296 uint32_t aperture; 2297 }; 2298 2299 static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2300 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2301 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2302 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2303 }; 2304 2305 static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2306 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2307 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2308 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2309 }; 2310 2311 static void 2312 setup_memwin(struct adapter *sc) 2313 { 2314 const struct memwin_init *mw_init; 2315 struct memwin *mw; 2316 int i; 2317 uint32_t bar0; 2318 2319 if (is_t4(sc)) { 2320 /* 2321 * Read low 32b of bar0 indirectly via the hardware backdoor 2322 * mechanism. Works from within PCI passthrough environments 2323 * too, where rman_get_start() can return a different value. We 2324 * need to program the T4 memory window decoders with the actual 2325 * addresses that will be coming across the PCIe link. 2326 */ 2327 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2328 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2329 2330 mw_init = &t4_memwin[0]; 2331 } else { 2332 /* T5+ use the relative offset inside the PCIe BAR */ 2333 bar0 = 0; 2334 2335 mw_init = &t5_memwin[0]; 2336 } 2337 2338 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2339 rw_init(&mw->mw_lock, "memory window access"); 2340 mw->mw_base = mw_init->base; 2341 mw->mw_aperture = mw_init->aperture; 2342 mw->mw_curpos = 0; 2343 t4_write_reg(sc, 2344 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2345 (mw->mw_base + bar0) | V_BIR(0) | 2346 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2347 rw_wlock(&mw->mw_lock); 2348 position_memwin(sc, i, 0); 2349 rw_wunlock(&mw->mw_lock); 2350 } 2351 2352 /* flush */ 2353 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2354 } 2355 2356 /* 2357 * Positions the memory window at the given address in the card's address space. 2358 * There are some alignment requirements and the actual position may be at an 2359 * address prior to the requested address. mw->mw_curpos always has the actual 2360 * position of the window. 2361 */ 2362 static void 2363 position_memwin(struct adapter *sc, int idx, uint32_t addr) 2364 { 2365 struct memwin *mw; 2366 uint32_t pf; 2367 uint32_t reg; 2368 2369 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2370 mw = &sc->memwin[idx]; 2371 rw_assert(&mw->mw_lock, RA_WLOCKED); 2372 2373 if (is_t4(sc)) { 2374 pf = 0; 2375 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2376 } else { 2377 pf = V_PFNUM(sc->pf); 2378 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2379 } 2380 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2381 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2382 t4_read_reg(sc, reg); /* flush */ 2383 } 2384 2385 static int 2386 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2387 int len, int rw) 2388 { 2389 struct memwin *mw; 2390 uint32_t mw_end, v; 2391 2392 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2393 2394 /* Memory can only be accessed in naturally aligned 4 byte units */ 2395 if (addr & 3 || len & 3 || len <= 0) 2396 return (EINVAL); 2397 2398 mw = &sc->memwin[idx]; 2399 while (len > 0) { 2400 rw_rlock(&mw->mw_lock); 2401 mw_end = mw->mw_curpos + mw->mw_aperture; 2402 if (addr >= mw_end || addr < mw->mw_curpos) { 2403 /* Will need to reposition the window */ 2404 if (!rw_try_upgrade(&mw->mw_lock)) { 2405 rw_runlock(&mw->mw_lock); 2406 rw_wlock(&mw->mw_lock); 2407 } 2408 rw_assert(&mw->mw_lock, RA_WLOCKED); 2409 position_memwin(sc, idx, addr); 2410 rw_downgrade(&mw->mw_lock); 2411 mw_end = mw->mw_curpos + mw->mw_aperture; 2412 } 2413 rw_assert(&mw->mw_lock, RA_RLOCKED); 2414 while (addr < mw_end && len > 0) { 2415 if (rw == 0) { 2416 v = t4_read_reg(sc, mw->mw_base + addr - 2417 mw->mw_curpos); 2418 *val++ = le32toh(v); 2419 } else { 2420 v = *val++; 2421 t4_write_reg(sc, mw->mw_base + addr - 2422 mw->mw_curpos, htole32(v)); 2423 } 2424 addr += 4; 2425 len -= 4; 2426 } 2427 rw_runlock(&mw->mw_lock); 2428 } 2429 2430 return (0); 2431 } 2432 2433 static inline int 2434 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2435 int len) 2436 { 2437 2438 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2439 } 2440 2441 static inline int 2442 write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2443 const uint32_t *val, int len) 2444 { 2445 2446 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2447 } 2448 2449 static int 2450 t4_range_cmp(const void *a, const void *b) 2451 { 2452 return ((const struct t4_range *)a)->start - 2453 ((const struct t4_range *)b)->start; 2454 } 2455 2456 /* 2457 * Verify that the memory range specified by the addr/len pair is valid within 2458 * the card's address space. 2459 */ 2460 static int 2461 validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2462 { 2463 struct t4_range mem_ranges[4], *r, *next; 2464 uint32_t em, addr_len; 2465 int i, n, remaining; 2466 2467 /* Memory can only be accessed in naturally aligned 4 byte units */ 2468 if (addr & 3 || len & 3 || len <= 0) 2469 return (EINVAL); 2470 2471 /* Enabled memories */ 2472 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2473 2474 r = &mem_ranges[0]; 2475 n = 0; 2476 bzero(r, sizeof(mem_ranges)); 2477 if (em & F_EDRAM0_ENABLE) { 2478 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2479 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2480 if (r->size > 0) { 2481 r->start = G_EDRAM0_BASE(addr_len) << 20; 2482 if (addr >= r->start && 2483 addr + len <= r->start + r->size) 2484 return (0); 2485 r++; 2486 n++; 2487 } 2488 } 2489 if (em & F_EDRAM1_ENABLE) { 2490 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2491 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2492 if (r->size > 0) { 2493 r->start = G_EDRAM1_BASE(addr_len) << 20; 2494 if (addr >= r->start && 2495 addr + len <= r->start + r->size) 2496 return (0); 2497 r++; 2498 n++; 2499 } 2500 } 2501 if (em & F_EXT_MEM_ENABLE) { 2502 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2503 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2504 if (r->size > 0) { 2505 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2506 if (addr >= r->start && 2507 addr + len <= r->start + r->size) 2508 return (0); 2509 r++; 2510 n++; 2511 } 2512 } 2513 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2514 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2515 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2516 if (r->size > 0) { 2517 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2518 if (addr >= r->start && 2519 addr + len <= r->start + r->size) 2520 return (0); 2521 r++; 2522 n++; 2523 } 2524 } 2525 MPASS(n <= nitems(mem_ranges)); 2526 2527 if (n > 1) { 2528 /* Sort and merge the ranges. */ 2529 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2530 2531 /* Start from index 0 and examine the next n - 1 entries. */ 2532 r = &mem_ranges[0]; 2533 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2534 2535 MPASS(r->size > 0); /* r is a valid entry. */ 2536 next = r + 1; 2537 MPASS(next->size > 0); /* and so is the next one. */ 2538 2539 while (r->start + r->size >= next->start) { 2540 /* Merge the next one into the current entry. */ 2541 r->size = max(r->start + r->size, 2542 next->start + next->size) - r->start; 2543 n--; /* One fewer entry in total. */ 2544 if (--remaining == 0) 2545 goto done; /* short circuit */ 2546 next++; 2547 } 2548 if (next != r + 1) { 2549 /* 2550 * Some entries were merged into r and next 2551 * points to the first valid entry that couldn't 2552 * be merged. 2553 */ 2554 MPASS(next->size > 0); /* must be valid */ 2555 memcpy(r + 1, next, remaining * sizeof(*r)); 2556 #ifdef INVARIANTS 2557 /* 2558 * This so that the foo->size assertion in the 2559 * next iteration of the loop do the right 2560 * thing for entries that were pulled up and are 2561 * no longer valid. 2562 */ 2563 MPASS(n < nitems(mem_ranges)); 2564 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2565 sizeof(struct t4_range)); 2566 #endif 2567 } 2568 } 2569 done: 2570 /* Done merging the ranges. */ 2571 MPASS(n > 0); 2572 r = &mem_ranges[0]; 2573 for (i = 0; i < n; i++, r++) { 2574 if (addr >= r->start && 2575 addr + len <= r->start + r->size) 2576 return (0); 2577 } 2578 } 2579 2580 return (EFAULT); 2581 } 2582 2583 static int 2584 fwmtype_to_hwmtype(int mtype) 2585 { 2586 2587 switch (mtype) { 2588 case FW_MEMTYPE_EDC0: 2589 return (MEM_EDC0); 2590 case FW_MEMTYPE_EDC1: 2591 return (MEM_EDC1); 2592 case FW_MEMTYPE_EXTMEM: 2593 return (MEM_MC0); 2594 case FW_MEMTYPE_EXTMEM1: 2595 return (MEM_MC1); 2596 default: 2597 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2598 } 2599 } 2600 2601 /* 2602 * Verify that the memory range specified by the memtype/offset/len pair is 2603 * valid and lies entirely within the memtype specified. The global address of 2604 * the start of the range is returned in addr. 2605 */ 2606 static int 2607 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2608 uint32_t *addr) 2609 { 2610 uint32_t em, addr_len, maddr; 2611 2612 /* Memory can only be accessed in naturally aligned 4 byte units */ 2613 if (off & 3 || len & 3 || len == 0) 2614 return (EINVAL); 2615 2616 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2617 switch (fwmtype_to_hwmtype(mtype)) { 2618 case MEM_EDC0: 2619 if (!(em & F_EDRAM0_ENABLE)) 2620 return (EINVAL); 2621 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2622 maddr = G_EDRAM0_BASE(addr_len) << 20; 2623 break; 2624 case MEM_EDC1: 2625 if (!(em & F_EDRAM1_ENABLE)) 2626 return (EINVAL); 2627 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2628 maddr = G_EDRAM1_BASE(addr_len) << 20; 2629 break; 2630 case MEM_MC: 2631 if (!(em & F_EXT_MEM_ENABLE)) 2632 return (EINVAL); 2633 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2634 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2635 break; 2636 case MEM_MC1: 2637 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2638 return (EINVAL); 2639 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2640 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2641 break; 2642 default: 2643 return (EINVAL); 2644 } 2645 2646 *addr = maddr + off; /* global address */ 2647 return (validate_mem_range(sc, *addr, len)); 2648 } 2649 2650 static int 2651 fixup_devlog_params(struct adapter *sc) 2652 { 2653 struct devlog_params *dparams = &sc->params.devlog; 2654 int rc; 2655 2656 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2657 dparams->size, &dparams->addr); 2658 2659 return (rc); 2660 } 2661 2662 static int 2663 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2664 struct intrs_and_queues *iaq) 2665 { 2666 int rc, itype, navail, nrxq10g, nrxq1g, n; 2667 int nofldrxq10g = 0, nofldrxq1g = 0; 2668 2669 bzero(iaq, sizeof(*iaq)); 2670 2671 iaq->ntxq10g = t4_ntxq10g; 2672 iaq->ntxq1g = t4_ntxq1g; 2673 iaq->ntxq_vi = t4_ntxq_vi; 2674 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2675 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2676 iaq->nrxq_vi = t4_nrxq_vi; 2677 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2678 #ifdef TCP_OFFLOAD 2679 if (is_offload(sc)) { 2680 iaq->nofldtxq10g = t4_nofldtxq10g; 2681 iaq->nofldtxq1g = t4_nofldtxq1g; 2682 iaq->nofldtxq_vi = t4_nofldtxq_vi; 2683 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2684 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2685 iaq->nofldrxq_vi = t4_nofldrxq_vi; 2686 } 2687 #endif 2688 #ifdef DEV_NETMAP 2689 iaq->nnmtxq_vi = t4_nnmtxq_vi; 2690 iaq->nnmrxq_vi = t4_nnmrxq_vi; 2691 #endif 2692 2693 for (itype = INTR_MSIX; itype; itype >>= 1) { 2694 2695 if ((itype & t4_intr_types) == 0) 2696 continue; /* not allowed */ 2697 2698 if (itype == INTR_MSIX) 2699 navail = pci_msix_count(sc->dev); 2700 else if (itype == INTR_MSI) 2701 navail = pci_msi_count(sc->dev); 2702 else 2703 navail = 1; 2704 restart: 2705 if (navail == 0) 2706 continue; 2707 2708 iaq->intr_type = itype; 2709 iaq->intr_flags_10g = 0; 2710 iaq->intr_flags_1g = 0; 2711 2712 /* 2713 * Best option: an interrupt vector for errors, one for the 2714 * firmware event queue, and one for every rxq (NIC and TOE) of 2715 * every VI. The VIs that support netmap use the same 2716 * interrupts for the NIC rx queues and the netmap rx queues 2717 * because only one set of queues is active at a time. 2718 */ 2719 iaq->nirq = T4_EXTRA_INTR; 2720 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2721 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2722 iaq->nirq += (n10g + n1g) * (num_vis - 1) * 2723 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */ 2724 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi; 2725 if (iaq->nirq <= navail && 2726 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2727 iaq->intr_flags_10g = INTR_ALL; 2728 iaq->intr_flags_1g = INTR_ALL; 2729 goto allocate; 2730 } 2731 2732 /* Disable the VIs (and netmap) if there aren't enough intrs */ 2733 if (num_vis > 1) { 2734 device_printf(sc->dev, "virtual interfaces disabled " 2735 "because num_vis=%u with current settings " 2736 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, " 2737 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, " 2738 "nnmrxq_vi=%u) would need %u interrupts but " 2739 "only %u are available.\n", num_vis, nrxq10g, 2740 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi, 2741 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq, 2742 navail); 2743 num_vis = 1; 2744 iaq->ntxq_vi = iaq->nrxq_vi = 0; 2745 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 2746 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 2747 goto restart; 2748 } 2749 2750 /* 2751 * Second best option: a vector for errors, one for the firmware 2752 * event queue, and vectors for either all the NIC rx queues or 2753 * all the TOE rx queues. The queues that don't get vectors 2754 * will forward their interrupts to those that do. 2755 */ 2756 iaq->nirq = T4_EXTRA_INTR; 2757 if (nrxq10g >= nofldrxq10g) { 2758 iaq->intr_flags_10g = INTR_RXQ; 2759 iaq->nirq += n10g * nrxq10g; 2760 } else { 2761 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2762 iaq->nirq += n10g * nofldrxq10g; 2763 } 2764 if (nrxq1g >= nofldrxq1g) { 2765 iaq->intr_flags_1g = INTR_RXQ; 2766 iaq->nirq += n1g * nrxq1g; 2767 } else { 2768 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2769 iaq->nirq += n1g * nofldrxq1g; 2770 } 2771 if (iaq->nirq <= navail && 2772 (itype != INTR_MSI || powerof2(iaq->nirq))) 2773 goto allocate; 2774 2775 /* 2776 * Next best option: an interrupt vector for errors, one for the 2777 * firmware event queue, and at least one per main-VI. At this 2778 * point we know we'll have to downsize nrxq and/or nofldrxq to 2779 * fit what's available to us. 2780 */ 2781 iaq->nirq = T4_EXTRA_INTR; 2782 iaq->nirq += n10g + n1g; 2783 if (iaq->nirq <= navail) { 2784 int leftover = navail - iaq->nirq; 2785 2786 if (n10g > 0) { 2787 int target = max(nrxq10g, nofldrxq10g); 2788 2789 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2790 INTR_RXQ : INTR_OFLD_RXQ; 2791 2792 n = 1; 2793 while (n < target && leftover >= n10g) { 2794 leftover -= n10g; 2795 iaq->nirq += n10g; 2796 n++; 2797 } 2798 iaq->nrxq10g = min(n, nrxq10g); 2799 #ifdef TCP_OFFLOAD 2800 iaq->nofldrxq10g = min(n, nofldrxq10g); 2801 #endif 2802 } 2803 2804 if (n1g > 0) { 2805 int target = max(nrxq1g, nofldrxq1g); 2806 2807 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2808 INTR_RXQ : INTR_OFLD_RXQ; 2809 2810 n = 1; 2811 while (n < target && leftover >= n1g) { 2812 leftover -= n1g; 2813 iaq->nirq += n1g; 2814 n++; 2815 } 2816 iaq->nrxq1g = min(n, nrxq1g); 2817 #ifdef TCP_OFFLOAD 2818 iaq->nofldrxq1g = min(n, nofldrxq1g); 2819 #endif 2820 } 2821 2822 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2823 goto allocate; 2824 } 2825 2826 /* 2827 * Least desirable option: one interrupt vector for everything. 2828 */ 2829 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2830 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2831 #ifdef TCP_OFFLOAD 2832 if (is_offload(sc)) 2833 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2834 #endif 2835 allocate: 2836 navail = iaq->nirq; 2837 rc = 0; 2838 if (itype == INTR_MSIX) 2839 rc = pci_alloc_msix(sc->dev, &navail); 2840 else if (itype == INTR_MSI) 2841 rc = pci_alloc_msi(sc->dev, &navail); 2842 2843 if (rc == 0) { 2844 if (navail == iaq->nirq) 2845 return (0); 2846 2847 /* 2848 * Didn't get the number requested. Use whatever number 2849 * the kernel is willing to allocate (it's in navail). 2850 */ 2851 device_printf(sc->dev, "fewer vectors than requested, " 2852 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2853 itype, iaq->nirq, navail); 2854 pci_release_msi(sc->dev); 2855 goto restart; 2856 } 2857 2858 device_printf(sc->dev, 2859 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2860 itype, rc, iaq->nirq, navail); 2861 } 2862 2863 device_printf(sc->dev, 2864 "failed to find a usable interrupt type. " 2865 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2866 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2867 2868 return (ENXIO); 2869 } 2870 2871 #define FW_VERSION(chip) ( \ 2872 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2873 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2874 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2875 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2876 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2877 2878 struct fw_info { 2879 uint8_t chip; 2880 char *kld_name; 2881 char *fw_mod_name; 2882 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2883 } fw_info[] = { 2884 { 2885 .chip = CHELSIO_T4, 2886 .kld_name = "t4fw_cfg", 2887 .fw_mod_name = "t4fw", 2888 .fw_hdr = { 2889 .chip = FW_HDR_CHIP_T4, 2890 .fw_ver = htobe32_const(FW_VERSION(T4)), 2891 .intfver_nic = FW_INTFVER(T4, NIC), 2892 .intfver_vnic = FW_INTFVER(T4, VNIC), 2893 .intfver_ofld = FW_INTFVER(T4, OFLD), 2894 .intfver_ri = FW_INTFVER(T4, RI), 2895 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2896 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2897 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2898 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2899 }, 2900 }, { 2901 .chip = CHELSIO_T5, 2902 .kld_name = "t5fw_cfg", 2903 .fw_mod_name = "t5fw", 2904 .fw_hdr = { 2905 .chip = FW_HDR_CHIP_T5, 2906 .fw_ver = htobe32_const(FW_VERSION(T5)), 2907 .intfver_nic = FW_INTFVER(T5, NIC), 2908 .intfver_vnic = FW_INTFVER(T5, VNIC), 2909 .intfver_ofld = FW_INTFVER(T5, OFLD), 2910 .intfver_ri = FW_INTFVER(T5, RI), 2911 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2912 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2913 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2914 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2915 }, 2916 }, { 2917 .chip = CHELSIO_T6, 2918 .kld_name = "t6fw_cfg", 2919 .fw_mod_name = "t6fw", 2920 .fw_hdr = { 2921 .chip = FW_HDR_CHIP_T6, 2922 .fw_ver = htobe32_const(FW_VERSION(T6)), 2923 .intfver_nic = FW_INTFVER(T6, NIC), 2924 .intfver_vnic = FW_INTFVER(T6, VNIC), 2925 .intfver_ofld = FW_INTFVER(T6, OFLD), 2926 .intfver_ri = FW_INTFVER(T6, RI), 2927 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), 2928 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 2929 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), 2930 .intfver_fcoe = FW_INTFVER(T6, FCOE), 2931 }, 2932 } 2933 }; 2934 2935 static struct fw_info * 2936 find_fw_info(int chip) 2937 { 2938 int i; 2939 2940 for (i = 0; i < nitems(fw_info); i++) { 2941 if (fw_info[i].chip == chip) 2942 return (&fw_info[i]); 2943 } 2944 return (NULL); 2945 } 2946 2947 /* 2948 * Is the given firmware API compatible with the one the driver was compiled 2949 * with? 2950 */ 2951 static int 2952 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2953 { 2954 2955 /* short circuit if it's the exact same firmware version */ 2956 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2957 return (1); 2958 2959 /* 2960 * XXX: Is this too conservative? Perhaps I should limit this to the 2961 * features that are supported in the driver. 2962 */ 2963 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2964 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2965 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2966 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2967 return (1); 2968 #undef SAME_INTF 2969 2970 return (0); 2971 } 2972 2973 /* 2974 * The firmware in the KLD is usable, but should it be installed? This routine 2975 * explains itself in detail if it indicates the KLD firmware should be 2976 * installed. 2977 */ 2978 static int 2979 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2980 { 2981 const char *reason; 2982 2983 if (!card_fw_usable) { 2984 reason = "incompatible or unusable"; 2985 goto install; 2986 } 2987 2988 if (k > c) { 2989 reason = "older than the version bundled with this driver"; 2990 goto install; 2991 } 2992 2993 if (t4_fw_install == 2 && k != c) { 2994 reason = "different than the version bundled with this driver"; 2995 goto install; 2996 } 2997 2998 return (0); 2999 3000 install: 3001 if (t4_fw_install == 0) { 3002 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 3003 "but the driver is prohibited from installing a different " 3004 "firmware on the card.\n", 3005 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3006 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 3007 3008 return (0); 3009 } 3010 3011 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 3012 "installing firmware %u.%u.%u.%u on card.\n", 3013 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3014 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 3015 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 3016 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 3017 3018 return (1); 3019 } 3020 3021 /* 3022 * Establish contact with the firmware and determine if we are the master driver 3023 * or not, and whether we are responsible for chip initialization. 3024 */ 3025 static int 3026 prep_firmware(struct adapter *sc) 3027 { 3028 const struct firmware *fw = NULL, *default_cfg; 3029 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 3030 enum dev_state state; 3031 struct fw_info *fw_info; 3032 struct fw_hdr *card_fw; /* fw on the card */ 3033 const struct fw_hdr *kld_fw; /* fw in the KLD */ 3034 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 3035 against */ 3036 3037 /* This is the firmware whose headers the driver was compiled against */ 3038 fw_info = find_fw_info(chip_id(sc)); 3039 if (fw_info == NULL) { 3040 device_printf(sc->dev, 3041 "unable to look up firmware information for chip %d.\n", 3042 chip_id(sc)); 3043 return (EINVAL); 3044 } 3045 drv_fw = &fw_info->fw_hdr; 3046 3047 /* 3048 * The firmware KLD contains many modules. The KLD name is also the 3049 * name of the module that contains the default config file. 3050 */ 3051 default_cfg = firmware_get(fw_info->kld_name); 3052 3053 /* This is the firmware in the KLD */ 3054 fw = firmware_get(fw_info->fw_mod_name); 3055 if (fw != NULL) { 3056 kld_fw = (const void *)fw->data; 3057 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 3058 } else { 3059 kld_fw = NULL; 3060 kld_fw_usable = 0; 3061 } 3062 3063 /* Read the header of the firmware on the card */ 3064 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 3065 rc = -t4_read_flash(sc, FLASH_FW_START, 3066 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 3067 if (rc == 0) { 3068 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 3069 if (card_fw->fw_ver == be32toh(0xffffffff)) { 3070 uint32_t d = be32toh(kld_fw->fw_ver); 3071 3072 if (!kld_fw_usable) { 3073 device_printf(sc->dev, 3074 "no firmware on the card and no usable " 3075 "firmware bundled with the driver.\n"); 3076 rc = EIO; 3077 goto done; 3078 } else if (t4_fw_install == 0) { 3079 device_printf(sc->dev, 3080 "no firmware on the card and the driver " 3081 "is prohibited from installing new " 3082 "firmware.\n"); 3083 rc = EIO; 3084 goto done; 3085 } 3086 3087 device_printf(sc->dev, "no firmware on the card, " 3088 "installing firmware %d.%d.%d.%d\n", 3089 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3090 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); 3091 rc = t4_fw_forceinstall(sc, fw->data, fw->datasize); 3092 if (rc < 0) { 3093 rc = -rc; 3094 device_printf(sc->dev, 3095 "firmware install failed: %d.\n", rc); 3096 goto done; 3097 } 3098 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 3099 card_fw_usable = 1; 3100 need_fw_reset = 0; 3101 } 3102 } else { 3103 device_printf(sc->dev, 3104 "Unable to read card's firmware header: %d\n", rc); 3105 card_fw_usable = 0; 3106 } 3107 3108 /* Contact firmware. */ 3109 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 3110 if (rc < 0 || state == DEV_STATE_ERR) { 3111 rc = -rc; 3112 device_printf(sc->dev, 3113 "failed to connect to the firmware: %d, %d.\n", rc, state); 3114 goto done; 3115 } 3116 pf = rc; 3117 if (pf == sc->mbox) 3118 sc->flags |= MASTER_PF; 3119 else if (state == DEV_STATE_UNINIT) { 3120 /* 3121 * We didn't get to be the master so we definitely won't be 3122 * configuring the chip. It's a bug if someone else hasn't 3123 * configured it already. 3124 */ 3125 device_printf(sc->dev, "couldn't be master(%d), " 3126 "device not already initialized either(%d).\n", rc, state); 3127 rc = EPROTO; 3128 goto done; 3129 } 3130 3131 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 3132 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 3133 /* 3134 * Common case: the firmware on the card is an exact match and 3135 * the KLD is an exact match too, or the KLD is 3136 * absent/incompatible. Note that t4_fw_install = 2 is ignored 3137 * here -- use cxgbetool loadfw if you want to reinstall the 3138 * same firmware as the one on the card. 3139 */ 3140 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 3141 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 3142 be32toh(card_fw->fw_ver))) { 3143 3144 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 3145 if (rc != 0) { 3146 device_printf(sc->dev, 3147 "failed to install firmware: %d\n", rc); 3148 goto done; 3149 } 3150 3151 /* Installed successfully, update the cached header too. */ 3152 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 3153 card_fw_usable = 1; 3154 need_fw_reset = 0; /* already reset as part of load_fw */ 3155 } 3156 3157 if (!card_fw_usable) { 3158 uint32_t d, c, k; 3159 3160 d = ntohl(drv_fw->fw_ver); 3161 c = ntohl(card_fw->fw_ver); 3162 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 3163 3164 device_printf(sc->dev, "Cannot find a usable firmware: " 3165 "fw_install %d, chip state %d, " 3166 "driver compiled with %d.%d.%d.%d, " 3167 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 3168 t4_fw_install, state, 3169 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3170 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 3171 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3172 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 3173 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 3174 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 3175 rc = EINVAL; 3176 goto done; 3177 } 3178 3179 /* Reset device */ 3180 if (need_fw_reset && 3181 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 3182 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 3183 if (rc != ETIMEDOUT && rc != EIO) 3184 t4_fw_bye(sc, sc->mbox); 3185 goto done; 3186 } 3187 sc->flags |= FW_OK; 3188 3189 rc = get_params__pre_init(sc); 3190 if (rc != 0) 3191 goto done; /* error message displayed already */ 3192 3193 /* Partition adapter resources as specified in the config file. */ 3194 if (state == DEV_STATE_UNINIT) { 3195 3196 KASSERT(sc->flags & MASTER_PF, 3197 ("%s: trying to change chip settings when not master.", 3198 __func__)); 3199 3200 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 3201 if (rc != 0) 3202 goto done; /* error message displayed already */ 3203 3204 t4_tweak_chip_settings(sc); 3205 3206 /* get basic stuff going */ 3207 rc = -t4_fw_initialize(sc, sc->mbox); 3208 if (rc != 0) { 3209 device_printf(sc->dev, "fw init failed: %d.\n", rc); 3210 goto done; 3211 } 3212 } else { 3213 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 3214 sc->cfcsum = 0; 3215 } 3216 3217 done: 3218 free(card_fw, M_CXGBE); 3219 if (fw != NULL) 3220 firmware_put(fw, FIRMWARE_UNLOAD); 3221 if (default_cfg != NULL) 3222 firmware_put(default_cfg, FIRMWARE_UNLOAD); 3223 3224 return (rc); 3225 } 3226 3227 #define FW_PARAM_DEV(param) \ 3228 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3229 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3230 #define FW_PARAM_PFVF(param) \ 3231 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3232 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 3233 3234 /* 3235 * Partition chip resources for use between various PFs, VFs, etc. 3236 */ 3237 static int 3238 partition_resources(struct adapter *sc, const struct firmware *default_cfg, 3239 const char *name_prefix) 3240 { 3241 const struct firmware *cfg = NULL; 3242 int rc = 0; 3243 struct fw_caps_config_cmd caps; 3244 uint32_t mtype, moff, finicsum, cfcsum; 3245 3246 /* 3247 * Figure out what configuration file to use. Pick the default config 3248 * file for the card if the user hasn't specified one explicitly. 3249 */ 3250 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 3251 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 3252 /* Card specific overrides go here. */ 3253 if (pci_get_device(sc->dev) == 0x440a) 3254 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 3255 if (is_fpga(sc)) 3256 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 3257 } 3258 3259 /* 3260 * We need to load another module if the profile is anything except 3261 * "default" or "flash". 3262 */ 3263 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 3264 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3265 char s[32]; 3266 3267 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 3268 cfg = firmware_get(s); 3269 if (cfg == NULL) { 3270 if (default_cfg != NULL) { 3271 device_printf(sc->dev, 3272 "unable to load module \"%s\" for " 3273 "configuration profile \"%s\", will use " 3274 "the default config file instead.\n", 3275 s, sc->cfg_file); 3276 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3277 "%s", DEFAULT_CF); 3278 } else { 3279 device_printf(sc->dev, 3280 "unable to load module \"%s\" for " 3281 "configuration profile \"%s\", will use " 3282 "the config file on the card's flash " 3283 "instead.\n", s, sc->cfg_file); 3284 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3285 "%s", FLASH_CF); 3286 } 3287 } 3288 } 3289 3290 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 3291 default_cfg == NULL) { 3292 device_printf(sc->dev, 3293 "default config file not available, will use the config " 3294 "file on the card's flash instead.\n"); 3295 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 3296 } 3297 3298 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3299 u_int cflen; 3300 const uint32_t *cfdata; 3301 uint32_t param, val, addr; 3302 3303 KASSERT(cfg != NULL || default_cfg != NULL, 3304 ("%s: no config to upload", __func__)); 3305 3306 /* 3307 * Ask the firmware where it wants us to upload the config file. 3308 */ 3309 param = FW_PARAM_DEV(CF); 3310 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3311 if (rc != 0) { 3312 /* No support for config file? Shouldn't happen. */ 3313 device_printf(sc->dev, 3314 "failed to query config file location: %d.\n", rc); 3315 goto done; 3316 } 3317 mtype = G_FW_PARAMS_PARAM_Y(val); 3318 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 3319 3320 /* 3321 * XXX: sheer laziness. We deliberately added 4 bytes of 3322 * useless stuffing/comments at the end of the config file so 3323 * it's ok to simply throw away the last remaining bytes when 3324 * the config file is not an exact multiple of 4. This also 3325 * helps with the validate_mt_off_len check. 3326 */ 3327 if (cfg != NULL) { 3328 cflen = cfg->datasize & ~3; 3329 cfdata = cfg->data; 3330 } else { 3331 cflen = default_cfg->datasize & ~3; 3332 cfdata = default_cfg->data; 3333 } 3334 3335 if (cflen > FLASH_CFG_MAX_SIZE) { 3336 device_printf(sc->dev, 3337 "config file too long (%d, max allowed is %d). " 3338 "Will try to use the config on the card, if any.\n", 3339 cflen, FLASH_CFG_MAX_SIZE); 3340 goto use_config_on_flash; 3341 } 3342 3343 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3344 if (rc != 0) { 3345 device_printf(sc->dev, 3346 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 3347 "Will try to use the config on the card, if any.\n", 3348 __func__, mtype, moff, cflen, rc); 3349 goto use_config_on_flash; 3350 } 3351 write_via_memwin(sc, 2, addr, cfdata, cflen); 3352 } else { 3353 use_config_on_flash: 3354 mtype = FW_MEMTYPE_FLASH; 3355 moff = t4_flash_cfg_addr(sc); 3356 } 3357 3358 bzero(&caps, sizeof(caps)); 3359 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3360 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3361 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3362 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3363 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3364 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3365 if (rc != 0) { 3366 device_printf(sc->dev, 3367 "failed to pre-process config file: %d " 3368 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3369 goto done; 3370 } 3371 3372 finicsum = be32toh(caps.finicsum); 3373 cfcsum = be32toh(caps.cfcsum); 3374 if (finicsum != cfcsum) { 3375 device_printf(sc->dev, 3376 "WARNING: config file checksum mismatch: %08x %08x\n", 3377 finicsum, cfcsum); 3378 } 3379 sc->cfcsum = cfcsum; 3380 3381 #define LIMIT_CAPS(x) do { \ 3382 caps.x &= htobe16(t4_##x##_allowed); \ 3383 } while (0) 3384 3385 /* 3386 * Let the firmware know what features will (not) be used so it can tune 3387 * things accordingly. 3388 */ 3389 LIMIT_CAPS(nbmcaps); 3390 LIMIT_CAPS(linkcaps); 3391 LIMIT_CAPS(switchcaps); 3392 LIMIT_CAPS(niccaps); 3393 LIMIT_CAPS(toecaps); 3394 LIMIT_CAPS(rdmacaps); 3395 LIMIT_CAPS(cryptocaps); 3396 LIMIT_CAPS(iscsicaps); 3397 LIMIT_CAPS(fcoecaps); 3398 #undef LIMIT_CAPS 3399 3400 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3401 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3402 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3403 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3404 if (rc != 0) { 3405 device_printf(sc->dev, 3406 "failed to process config file: %d.\n", rc); 3407 } 3408 done: 3409 if (cfg != NULL) 3410 firmware_put(cfg, FIRMWARE_UNLOAD); 3411 return (rc); 3412 } 3413 3414 /* 3415 * Retrieve parameters that are needed (or nice to have) very early. 3416 */ 3417 static int 3418 get_params__pre_init(struct adapter *sc) 3419 { 3420 int rc; 3421 uint32_t param[2], val[2]; 3422 3423 t4_get_version_info(sc); 3424 3425 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 3426 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 3427 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 3428 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 3429 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 3430 3431 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", 3432 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), 3433 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), 3434 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), 3435 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); 3436 3437 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 3438 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 3439 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 3440 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 3441 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 3442 3443 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", 3444 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), 3445 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), 3446 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), 3447 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); 3448 3449 param[0] = FW_PARAM_DEV(PORTVEC); 3450 param[1] = FW_PARAM_DEV(CCLK); 3451 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3452 if (rc != 0) { 3453 device_printf(sc->dev, 3454 "failed to query parameters (pre_init): %d.\n", rc); 3455 return (rc); 3456 } 3457 3458 sc->params.portvec = val[0]; 3459 sc->params.nports = bitcount32(val[0]); 3460 sc->params.vpd.cclk = val[1]; 3461 3462 /* Read device log parameters. */ 3463 rc = -t4_init_devlog_params(sc, 1); 3464 if (rc == 0) 3465 fixup_devlog_params(sc); 3466 else { 3467 device_printf(sc->dev, 3468 "failed to get devlog parameters: %d.\n", rc); 3469 rc = 0; /* devlog isn't critical for device operation */ 3470 } 3471 3472 return (rc); 3473 } 3474 3475 /* 3476 * Retrieve various parameters that are of interest to the driver. The device 3477 * has been initialized by the firmware at this point. 3478 */ 3479 static int 3480 get_params__post_init(struct adapter *sc) 3481 { 3482 int rc; 3483 uint32_t param[7], val[7]; 3484 struct fw_caps_config_cmd caps; 3485 3486 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3487 param[1] = FW_PARAM_PFVF(EQ_START); 3488 param[2] = FW_PARAM_PFVF(FILTER_START); 3489 param[3] = FW_PARAM_PFVF(FILTER_END); 3490 param[4] = FW_PARAM_PFVF(L2T_START); 3491 param[5] = FW_PARAM_PFVF(L2T_END); 3492 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3493 if (rc != 0) { 3494 device_printf(sc->dev, 3495 "failed to query parameters (post_init): %d.\n", rc); 3496 return (rc); 3497 } 3498 3499 sc->sge.iq_start = val[0]; 3500 sc->sge.eq_start = val[1]; 3501 sc->tids.ftid_base = val[2]; 3502 sc->tids.nftids = val[3] - val[2] + 1; 3503 sc->params.ftid_min = val[2]; 3504 sc->params.ftid_max = val[3]; 3505 sc->vres.l2t.start = val[4]; 3506 sc->vres.l2t.size = val[5] - val[4] + 1; 3507 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3508 ("%s: L2 table size (%u) larger than expected (%u)", 3509 __func__, sc->vres.l2t.size, L2T_SIZE)); 3510 3511 /* get capabilites */ 3512 bzero(&caps, sizeof(caps)); 3513 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3514 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3515 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3516 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3517 if (rc != 0) { 3518 device_printf(sc->dev, 3519 "failed to get card capabilities: %d.\n", rc); 3520 return (rc); 3521 } 3522 3523 #define READ_CAPS(x) do { \ 3524 sc->x = htobe16(caps.x); \ 3525 } while (0) 3526 READ_CAPS(nbmcaps); 3527 READ_CAPS(linkcaps); 3528 READ_CAPS(switchcaps); 3529 READ_CAPS(niccaps); 3530 READ_CAPS(toecaps); 3531 READ_CAPS(rdmacaps); 3532 READ_CAPS(cryptocaps); 3533 READ_CAPS(iscsicaps); 3534 READ_CAPS(fcoecaps); 3535 3536 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3537 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3538 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3539 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3540 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3541 if (rc != 0) { 3542 device_printf(sc->dev, 3543 "failed to query NIC parameters: %d.\n", rc); 3544 return (rc); 3545 } 3546 sc->tids.etid_base = val[0]; 3547 sc->params.etid_min = val[0]; 3548 sc->tids.netids = val[1] - val[0] + 1; 3549 sc->params.netids = sc->tids.netids; 3550 sc->params.eo_wr_cred = val[2]; 3551 sc->params.ethoffload = 1; 3552 } 3553 3554 if (sc->toecaps) { 3555 /* query offload-related parameters */ 3556 param[0] = FW_PARAM_DEV(NTID); 3557 param[1] = FW_PARAM_PFVF(SERVER_START); 3558 param[2] = FW_PARAM_PFVF(SERVER_END); 3559 param[3] = FW_PARAM_PFVF(TDDP_START); 3560 param[4] = FW_PARAM_PFVF(TDDP_END); 3561 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3562 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3563 if (rc != 0) { 3564 device_printf(sc->dev, 3565 "failed to query TOE parameters: %d.\n", rc); 3566 return (rc); 3567 } 3568 sc->tids.ntids = val[0]; 3569 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3570 sc->tids.stid_base = val[1]; 3571 sc->tids.nstids = val[2] - val[1] + 1; 3572 sc->vres.ddp.start = val[3]; 3573 sc->vres.ddp.size = val[4] - val[3] + 1; 3574 sc->params.ofldq_wr_cred = val[5]; 3575 sc->params.offload = 1; 3576 } 3577 if (sc->rdmacaps) { 3578 param[0] = FW_PARAM_PFVF(STAG_START); 3579 param[1] = FW_PARAM_PFVF(STAG_END); 3580 param[2] = FW_PARAM_PFVF(RQ_START); 3581 param[3] = FW_PARAM_PFVF(RQ_END); 3582 param[4] = FW_PARAM_PFVF(PBL_START); 3583 param[5] = FW_PARAM_PFVF(PBL_END); 3584 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3585 if (rc != 0) { 3586 device_printf(sc->dev, 3587 "failed to query RDMA parameters(1): %d.\n", rc); 3588 return (rc); 3589 } 3590 sc->vres.stag.start = val[0]; 3591 sc->vres.stag.size = val[1] - val[0] + 1; 3592 sc->vres.rq.start = val[2]; 3593 sc->vres.rq.size = val[3] - val[2] + 1; 3594 sc->vres.pbl.start = val[4]; 3595 sc->vres.pbl.size = val[5] - val[4] + 1; 3596 3597 param[0] = FW_PARAM_PFVF(SQRQ_START); 3598 param[1] = FW_PARAM_PFVF(SQRQ_END); 3599 param[2] = FW_PARAM_PFVF(CQ_START); 3600 param[3] = FW_PARAM_PFVF(CQ_END); 3601 param[4] = FW_PARAM_PFVF(OCQ_START); 3602 param[5] = FW_PARAM_PFVF(OCQ_END); 3603 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3604 if (rc != 0) { 3605 device_printf(sc->dev, 3606 "failed to query RDMA parameters(2): %d.\n", rc); 3607 return (rc); 3608 } 3609 sc->vres.qp.start = val[0]; 3610 sc->vres.qp.size = val[1] - val[0] + 1; 3611 sc->vres.cq.start = val[2]; 3612 sc->vres.cq.size = val[3] - val[2] + 1; 3613 sc->vres.ocq.start = val[4]; 3614 sc->vres.ocq.size = val[5] - val[4] + 1; 3615 3616 param[0] = FW_PARAM_PFVF(SRQ_START); 3617 param[1] = FW_PARAM_PFVF(SRQ_END); 3618 param[2] = FW_PARAM_DEV(MAXORDIRD_QP); 3619 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); 3620 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); 3621 if (rc != 0) { 3622 device_printf(sc->dev, 3623 "failed to query RDMA parameters(3): %d.\n", rc); 3624 return (rc); 3625 } 3626 sc->vres.srq.start = val[0]; 3627 sc->vres.srq.size = val[1] - val[0] + 1; 3628 sc->params.max_ordird_qp = val[2]; 3629 sc->params.max_ird_adapter = val[3]; 3630 } 3631 if (sc->iscsicaps) { 3632 param[0] = FW_PARAM_PFVF(ISCSI_START); 3633 param[1] = FW_PARAM_PFVF(ISCSI_END); 3634 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3635 if (rc != 0) { 3636 device_printf(sc->dev, 3637 "failed to query iSCSI parameters: %d.\n", rc); 3638 return (rc); 3639 } 3640 sc->vres.iscsi.start = val[0]; 3641 sc->vres.iscsi.size = val[1] - val[0] + 1; 3642 } 3643 3644 t4_init_sge_params(sc); 3645 3646 /* 3647 * We've got the params we wanted to query via the firmware. Now grab 3648 * some others directly from the chip. 3649 */ 3650 rc = t4_read_chip_settings(sc); 3651 3652 return (rc); 3653 } 3654 3655 static int 3656 set_params__post_init(struct adapter *sc) 3657 { 3658 uint32_t param, val; 3659 #ifdef TCP_OFFLOAD 3660 int i, v, shift; 3661 #endif 3662 3663 /* ask for encapsulated CPLs */ 3664 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3665 val = 1; 3666 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3667 3668 #ifdef TCP_OFFLOAD 3669 /* 3670 * Override the TOE timers with user provided tunables. This is not the 3671 * recommended way to change the timers (the firmware config file is) so 3672 * these tunables are not documented. 3673 * 3674 * All the timer tunables are in microseconds. 3675 */ 3676 if (t4_toe_keepalive_idle != 0) { 3677 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle); 3678 v &= M_KEEPALIVEIDLE; 3679 t4_set_reg_field(sc, A_TP_KEEP_IDLE, 3680 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v)); 3681 } 3682 if (t4_toe_keepalive_interval != 0) { 3683 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval); 3684 v &= M_KEEPALIVEINTVL; 3685 t4_set_reg_field(sc, A_TP_KEEP_INTVL, 3686 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v)); 3687 } 3688 if (t4_toe_keepalive_count != 0) { 3689 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2; 3690 t4_set_reg_field(sc, A_TP_SHIFT_CNT, 3691 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) | 3692 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2), 3693 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v)); 3694 } 3695 if (t4_toe_rexmt_min != 0) { 3696 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min); 3697 v &= M_RXTMIN; 3698 t4_set_reg_field(sc, A_TP_RXT_MIN, 3699 V_RXTMIN(M_RXTMIN), V_RXTMIN(v)); 3700 } 3701 if (t4_toe_rexmt_max != 0) { 3702 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max); 3703 v &= M_RXTMAX; 3704 t4_set_reg_field(sc, A_TP_RXT_MAX, 3705 V_RXTMAX(M_RXTMAX), V_RXTMAX(v)); 3706 } 3707 if (t4_toe_rexmt_count != 0) { 3708 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2; 3709 t4_set_reg_field(sc, A_TP_SHIFT_CNT, 3710 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) | 3711 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2), 3712 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v)); 3713 } 3714 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) { 3715 if (t4_toe_rexmt_backoff[i] != -1) { 3716 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0; 3717 shift = (i & 3) << 3; 3718 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3), 3719 M_TIMERBACKOFFINDEX0 << shift, v << shift); 3720 } 3721 } 3722 #endif 3723 return (0); 3724 } 3725 3726 #undef FW_PARAM_PFVF 3727 #undef FW_PARAM_DEV 3728 3729 static void 3730 t4_set_desc(struct adapter *sc) 3731 { 3732 char buf[128]; 3733 struct adapter_params *p = &sc->params; 3734 3735 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); 3736 3737 device_set_desc_copy(sc->dev, buf); 3738 } 3739 3740 static void 3741 build_medialist(struct port_info *pi, struct ifmedia *media) 3742 { 3743 int m; 3744 3745 PORT_LOCK_ASSERT_OWNED(pi); 3746 3747 ifmedia_removeall(media); 3748 3749 /* 3750 * XXX: Would it be better to ifmedia_add all 4 combinations of pause 3751 * settings for every speed instead of just txpause|rxpause? ifconfig 3752 * media display looks much better if autoselect is the only case where 3753 * ifm_current is different from ifm_active. If the user picks anything 3754 * except txpause|rxpause the display is ugly. 3755 */ 3756 m = IFM_ETHER | IFM_FDX | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3757 3758 switch(pi->port_type) { 3759 case FW_PORT_TYPE_BT_XFI: 3760 case FW_PORT_TYPE_BT_XAUI: 3761 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3762 /* fall through */ 3763 3764 case FW_PORT_TYPE_BT_SGMII: 3765 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3766 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3767 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3768 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3769 break; 3770 3771 case FW_PORT_TYPE_CX4: 3772 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3773 ifmedia_set(media, m | IFM_10G_CX4); 3774 break; 3775 3776 case FW_PORT_TYPE_QSFP_10G: 3777 case FW_PORT_TYPE_SFP: 3778 case FW_PORT_TYPE_FIBER_XFI: 3779 case FW_PORT_TYPE_FIBER_XAUI: 3780 switch (pi->mod_type) { 3781 3782 case FW_PORT_MOD_TYPE_LR: 3783 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3784 ifmedia_set(media, m | IFM_10G_LR); 3785 break; 3786 3787 case FW_PORT_MOD_TYPE_SR: 3788 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3789 ifmedia_set(media, m | IFM_10G_SR); 3790 break; 3791 3792 case FW_PORT_MOD_TYPE_LRM: 3793 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3794 ifmedia_set(media, m | IFM_10G_LRM); 3795 break; 3796 3797 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3798 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3799 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3800 ifmedia_set(media, m | IFM_10G_TWINAX); 3801 break; 3802 3803 case FW_PORT_MOD_TYPE_NONE: 3804 m &= ~IFM_FDX; 3805 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3806 ifmedia_set(media, m | IFM_NONE); 3807 break; 3808 3809 case FW_PORT_MOD_TYPE_NA: 3810 case FW_PORT_MOD_TYPE_ER: 3811 default: 3812 device_printf(pi->dev, 3813 "unknown port_type (%d), mod_type (%d)\n", 3814 pi->port_type, pi->mod_type); 3815 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3816 ifmedia_set(media, m | IFM_UNKNOWN); 3817 break; 3818 } 3819 break; 3820 3821 case FW_PORT_TYPE_CR_QSFP: 3822 case FW_PORT_TYPE_SFP28: 3823 case FW_PORT_TYPE_KR_SFP28: 3824 switch (pi->mod_type) { 3825 3826 case FW_PORT_MOD_TYPE_SR: 3827 ifmedia_add(media, m | IFM_25G_SR, 0, NULL); 3828 ifmedia_set(media, m | IFM_25G_SR); 3829 break; 3830 3831 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3832 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3833 ifmedia_add(media, m | IFM_25G_CR, 0, NULL); 3834 ifmedia_set(media, m | IFM_25G_CR); 3835 break; 3836 3837 case FW_PORT_MOD_TYPE_NONE: 3838 m &= ~IFM_FDX; 3839 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3840 ifmedia_set(media, m | IFM_NONE); 3841 break; 3842 3843 default: 3844 device_printf(pi->dev, 3845 "unknown port_type (%d), mod_type (%d)\n", 3846 pi->port_type, pi->mod_type); 3847 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3848 ifmedia_set(media, m | IFM_UNKNOWN); 3849 break; 3850 } 3851 break; 3852 3853 case FW_PORT_TYPE_QSFP: 3854 switch (pi->mod_type) { 3855 3856 case FW_PORT_MOD_TYPE_LR: 3857 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3858 ifmedia_set(media, m | IFM_40G_LR4); 3859 break; 3860 3861 case FW_PORT_MOD_TYPE_SR: 3862 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3863 ifmedia_set(media, m | IFM_40G_SR4); 3864 break; 3865 3866 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3867 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3868 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3869 ifmedia_set(media, m | IFM_40G_CR4); 3870 break; 3871 3872 case FW_PORT_MOD_TYPE_NONE: 3873 m &= ~IFM_FDX; 3874 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3875 ifmedia_set(media, m | IFM_NONE); 3876 break; 3877 3878 default: 3879 device_printf(pi->dev, 3880 "unknown port_type (%d), mod_type (%d)\n", 3881 pi->port_type, pi->mod_type); 3882 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3883 ifmedia_set(media, m | IFM_UNKNOWN); 3884 break; 3885 } 3886 break; 3887 3888 case FW_PORT_TYPE_KR4_100G: 3889 case FW_PORT_TYPE_CR4_QSFP: 3890 switch (pi->mod_type) { 3891 3892 case FW_PORT_MOD_TYPE_LR: 3893 ifmedia_add(media, m | IFM_100G_LR4, 0, NULL); 3894 ifmedia_set(media, m | IFM_100G_LR4); 3895 break; 3896 3897 case FW_PORT_MOD_TYPE_SR: 3898 ifmedia_add(media, m | IFM_100G_SR4, 0, NULL); 3899 ifmedia_set(media, m | IFM_100G_SR4); 3900 break; 3901 3902 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3903 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3904 ifmedia_add(media, m | IFM_100G_CR4, 0, NULL); 3905 ifmedia_set(media, m | IFM_100G_CR4); 3906 break; 3907 3908 case FW_PORT_MOD_TYPE_NONE: 3909 m &= ~IFM_FDX; 3910 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3911 ifmedia_set(media, m | IFM_NONE); 3912 break; 3913 3914 default: 3915 device_printf(pi->dev, 3916 "unknown port_type (%d), mod_type (%d)\n", 3917 pi->port_type, pi->mod_type); 3918 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3919 ifmedia_set(media, m | IFM_UNKNOWN); 3920 break; 3921 } 3922 break; 3923 3924 default: 3925 device_printf(pi->dev, 3926 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3927 pi->mod_type); 3928 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3929 ifmedia_set(media, m | IFM_UNKNOWN); 3930 break; 3931 } 3932 } 3933 3934 /* 3935 * Update all the requested_* fields in the link config and then send a mailbox 3936 * command to apply the settings. 3937 */ 3938 static void 3939 init_l1cfg(struct port_info *pi) 3940 { 3941 struct adapter *sc = pi->adapter; 3942 struct link_config *lc = &pi->link_cfg; 3943 int rc; 3944 3945 ASSERT_SYNCHRONIZED_OP(sc); 3946 3947 if (t4_autoneg != 0 && lc->supported & FW_PORT_CAP_ANEG) { 3948 lc->requested_aneg = AUTONEG_ENABLE; 3949 lc->requested_speed = 0; 3950 } else { 3951 lc->requested_aneg = AUTONEG_DISABLE; 3952 lc->requested_speed = port_top_speed(pi); /* in Gbps */ 3953 } 3954 3955 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX); 3956 3957 if (t4_fec != -1) { 3958 lc->requested_fec = t4_fec & (FEC_RS | FEC_BASER_RS | 3959 FEC_RESERVED); 3960 } else { 3961 /* Use the suggested value provided by the firmware in acaps */ 3962 if (lc->advertising & FW_PORT_CAP_FEC_RS) 3963 lc->requested_fec = FEC_RS; 3964 else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS) 3965 lc->requested_fec = FEC_BASER_RS; 3966 else if (lc->advertising & FW_PORT_CAP_FEC_RESERVED) 3967 lc->requested_fec = FEC_RESERVED; 3968 else 3969 lc->requested_fec = 0; 3970 } 3971 3972 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 3973 if (rc != 0) { 3974 device_printf(pi->dev, "l1cfg failed: %d\n", rc); 3975 } else { 3976 lc->fc = lc->requested_fc; 3977 lc->fec = lc->requested_fec; 3978 } 3979 } 3980 3981 #define FW_MAC_EXACT_CHUNK 7 3982 3983 /* 3984 * Program the port's XGMAC based on parameters in ifnet. The caller also 3985 * indicates which parameters should be programmed (the rest are left alone). 3986 */ 3987 int 3988 update_mac_settings(struct ifnet *ifp, int flags) 3989 { 3990 int rc = 0; 3991 struct vi_info *vi = ifp->if_softc; 3992 struct port_info *pi = vi->pi; 3993 struct adapter *sc = pi->adapter; 3994 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3995 3996 ASSERT_SYNCHRONIZED_OP(sc); 3997 KASSERT(flags, ("%s: not told what to update.", __func__)); 3998 3999 if (flags & XGMAC_MTU) 4000 mtu = ifp->if_mtu; 4001 4002 if (flags & XGMAC_PROMISC) 4003 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 4004 4005 if (flags & XGMAC_ALLMULTI) 4006 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 4007 4008 if (flags & XGMAC_VLANEX) 4009 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 4010 4011 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 4012 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 4013 allmulti, 1, vlanex, false); 4014 if (rc) { 4015 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 4016 rc); 4017 return (rc); 4018 } 4019 } 4020 4021 if (flags & XGMAC_UCADDR) { 4022 uint8_t ucaddr[ETHER_ADDR_LEN]; 4023 4024 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 4025 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 4026 ucaddr, true, true); 4027 if (rc < 0) { 4028 rc = -rc; 4029 if_printf(ifp, "change_mac failed: %d\n", rc); 4030 return (rc); 4031 } else { 4032 vi->xact_addr_filt = rc; 4033 rc = 0; 4034 } 4035 } 4036 4037 if (flags & XGMAC_MCADDRS) { 4038 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 4039 int del = 1; 4040 uint64_t hash = 0; 4041 struct ifmultiaddr *ifma; 4042 int i = 0, j; 4043 4044 if_maddr_rlock(ifp); 4045 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 4046 if (ifma->ifma_addr->sa_family != AF_LINK) 4047 continue; 4048 mcaddr[i] = 4049 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 4050 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 4051 i++; 4052 4053 if (i == FW_MAC_EXACT_CHUNK) { 4054 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 4055 del, i, mcaddr, NULL, &hash, 0); 4056 if (rc < 0) { 4057 rc = -rc; 4058 for (j = 0; j < i; j++) { 4059 if_printf(ifp, 4060 "failed to add mc address" 4061 " %02x:%02x:%02x:" 4062 "%02x:%02x:%02x rc=%d\n", 4063 mcaddr[j][0], mcaddr[j][1], 4064 mcaddr[j][2], mcaddr[j][3], 4065 mcaddr[j][4], mcaddr[j][5], 4066 rc); 4067 } 4068 goto mcfail; 4069 } 4070 del = 0; 4071 i = 0; 4072 } 4073 } 4074 if (i > 0) { 4075 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 4076 mcaddr, NULL, &hash, 0); 4077 if (rc < 0) { 4078 rc = -rc; 4079 for (j = 0; j < i; j++) { 4080 if_printf(ifp, 4081 "failed to add mc address" 4082 " %02x:%02x:%02x:" 4083 "%02x:%02x:%02x rc=%d\n", 4084 mcaddr[j][0], mcaddr[j][1], 4085 mcaddr[j][2], mcaddr[j][3], 4086 mcaddr[j][4], mcaddr[j][5], 4087 rc); 4088 } 4089 goto mcfail; 4090 } 4091 } 4092 4093 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 4094 if (rc != 0) 4095 if_printf(ifp, "failed to set mc address hash: %d", rc); 4096 mcfail: 4097 if_maddr_runlock(ifp); 4098 } 4099 4100 return (rc); 4101 } 4102 4103 /* 4104 * {begin|end}_synchronized_op must be called from the same thread. 4105 */ 4106 int 4107 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 4108 char *wmesg) 4109 { 4110 int rc, pri; 4111 4112 #ifdef WITNESS 4113 /* the caller thinks it's ok to sleep, but is it really? */ 4114 if (flags & SLEEP_OK) 4115 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 4116 "begin_synchronized_op"); 4117 #endif 4118 4119 if (INTR_OK) 4120 pri = PCATCH; 4121 else 4122 pri = 0; 4123 4124 ADAPTER_LOCK(sc); 4125 for (;;) { 4126 4127 if (vi && IS_DOOMED(vi)) { 4128 rc = ENXIO; 4129 goto done; 4130 } 4131 4132 if (!IS_BUSY(sc)) { 4133 rc = 0; 4134 break; 4135 } 4136 4137 if (!(flags & SLEEP_OK)) { 4138 rc = EBUSY; 4139 goto done; 4140 } 4141 4142 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 4143 rc = EINTR; 4144 goto done; 4145 } 4146 } 4147 4148 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 4149 SET_BUSY(sc); 4150 #ifdef INVARIANTS 4151 sc->last_op = wmesg; 4152 sc->last_op_thr = curthread; 4153 sc->last_op_flags = flags; 4154 #endif 4155 4156 done: 4157 if (!(flags & HOLD_LOCK) || rc) 4158 ADAPTER_UNLOCK(sc); 4159 4160 return (rc); 4161 } 4162 4163 /* 4164 * Tell if_ioctl and if_init that the VI is going away. This is 4165 * special variant of begin_synchronized_op and must be paired with a 4166 * call to end_synchronized_op. 4167 */ 4168 void 4169 doom_vi(struct adapter *sc, struct vi_info *vi) 4170 { 4171 4172 ADAPTER_LOCK(sc); 4173 SET_DOOMED(vi); 4174 wakeup(&sc->flags); 4175 while (IS_BUSY(sc)) 4176 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 4177 SET_BUSY(sc); 4178 #ifdef INVARIANTS 4179 sc->last_op = "t4detach"; 4180 sc->last_op_thr = curthread; 4181 sc->last_op_flags = 0; 4182 #endif 4183 ADAPTER_UNLOCK(sc); 4184 } 4185 4186 /* 4187 * {begin|end}_synchronized_op must be called from the same thread. 4188 */ 4189 void 4190 end_synchronized_op(struct adapter *sc, int flags) 4191 { 4192 4193 if (flags & LOCK_HELD) 4194 ADAPTER_LOCK_ASSERT_OWNED(sc); 4195 else 4196 ADAPTER_LOCK(sc); 4197 4198 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 4199 CLR_BUSY(sc); 4200 wakeup(&sc->flags); 4201 ADAPTER_UNLOCK(sc); 4202 } 4203 4204 static int 4205 cxgbe_init_synchronized(struct vi_info *vi) 4206 { 4207 struct port_info *pi = vi->pi; 4208 struct adapter *sc = pi->adapter; 4209 struct ifnet *ifp = vi->ifp; 4210 int rc = 0, i; 4211 struct sge_txq *txq; 4212 4213 ASSERT_SYNCHRONIZED_OP(sc); 4214 4215 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4216 return (0); /* already running */ 4217 4218 if (!(sc->flags & FULL_INIT_DONE) && 4219 ((rc = adapter_full_init(sc)) != 0)) 4220 return (rc); /* error message displayed already */ 4221 4222 if (!(vi->flags & VI_INIT_DONE) && 4223 ((rc = vi_full_init(vi)) != 0)) 4224 return (rc); /* error message displayed already */ 4225 4226 rc = update_mac_settings(ifp, XGMAC_ALL); 4227 if (rc) 4228 goto done; /* error message displayed already */ 4229 4230 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 4231 if (rc != 0) { 4232 if_printf(ifp, "enable_vi failed: %d\n", rc); 4233 goto done; 4234 } 4235 4236 /* 4237 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 4238 * if this changes. 4239 */ 4240 4241 for_each_txq(vi, i, txq) { 4242 TXQ_LOCK(txq); 4243 txq->eq.flags |= EQ_ENABLED; 4244 TXQ_UNLOCK(txq); 4245 } 4246 4247 /* 4248 * The first iq of the first port to come up is used for tracing. 4249 */ 4250 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 4251 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 4252 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 4253 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 4254 V_QUEUENUMBER(sc->traceq)); 4255 pi->flags |= HAS_TRACEQ; 4256 } 4257 4258 /* all ok */ 4259 PORT_LOCK(pi); 4260 if (pi->up_vis++ == 0) { 4261 t4_update_port_info(pi); 4262 build_medialist(vi->pi, &vi->media); 4263 init_l1cfg(pi); 4264 } 4265 ifp->if_drv_flags |= IFF_DRV_RUNNING; 4266 4267 if (pi->nvi > 1 || sc->flags & IS_VF) 4268 callout_reset(&vi->tick, hz, vi_tick, vi); 4269 else 4270 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 4271 PORT_UNLOCK(pi); 4272 done: 4273 if (rc != 0) 4274 cxgbe_uninit_synchronized(vi); 4275 4276 return (rc); 4277 } 4278 4279 /* 4280 * Idempotent. 4281 */ 4282 static int 4283 cxgbe_uninit_synchronized(struct vi_info *vi) 4284 { 4285 struct port_info *pi = vi->pi; 4286 struct adapter *sc = pi->adapter; 4287 struct ifnet *ifp = vi->ifp; 4288 int rc, i; 4289 struct sge_txq *txq; 4290 4291 ASSERT_SYNCHRONIZED_OP(sc); 4292 4293 if (!(vi->flags & VI_INIT_DONE)) { 4294 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 4295 ("uninited VI is running")); 4296 return (0); 4297 } 4298 4299 /* 4300 * Disable the VI so that all its data in either direction is discarded 4301 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 4302 * tick) intact as the TP can deliver negative advice or data that it's 4303 * holding in its RAM (for an offloaded connection) even after the VI is 4304 * disabled. 4305 */ 4306 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 4307 if (rc) { 4308 if_printf(ifp, "disable_vi failed: %d\n", rc); 4309 return (rc); 4310 } 4311 4312 for_each_txq(vi, i, txq) { 4313 TXQ_LOCK(txq); 4314 txq->eq.flags &= ~EQ_ENABLED; 4315 TXQ_UNLOCK(txq); 4316 } 4317 4318 PORT_LOCK(pi); 4319 if (pi->nvi > 1 || sc->flags & IS_VF) 4320 callout_stop(&vi->tick); 4321 else 4322 callout_stop(&pi->tick); 4323 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4324 PORT_UNLOCK(pi); 4325 return (0); 4326 } 4327 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4328 pi->up_vis--; 4329 if (pi->up_vis > 0) { 4330 PORT_UNLOCK(pi); 4331 return (0); 4332 } 4333 PORT_UNLOCK(pi); 4334 4335 pi->link_cfg.link_ok = 0; 4336 pi->link_cfg.speed = 0; 4337 pi->link_cfg.link_down_rc = 255; 4338 t4_os_link_changed(pi, NULL); 4339 4340 return (0); 4341 } 4342 4343 /* 4344 * It is ok for this function to fail midway and return right away. t4_detach 4345 * will walk the entire sc->irq list and clean up whatever is valid. 4346 */ 4347 int 4348 t4_setup_intr_handlers(struct adapter *sc) 4349 { 4350 int rc, rid, p, q, v; 4351 char s[8]; 4352 struct irq *irq; 4353 struct port_info *pi; 4354 struct vi_info *vi; 4355 struct sge *sge = &sc->sge; 4356 struct sge_rxq *rxq; 4357 #ifdef TCP_OFFLOAD 4358 struct sge_ofld_rxq *ofld_rxq; 4359 #endif 4360 #ifdef DEV_NETMAP 4361 struct sge_nm_rxq *nm_rxq; 4362 #endif 4363 #ifdef RSS 4364 int nbuckets = rss_getnumbuckets(); 4365 #endif 4366 4367 /* 4368 * Setup interrupts. 4369 */ 4370 irq = &sc->irq[0]; 4371 rid = sc->intr_type == INTR_INTX ? 0 : 1; 4372 if (sc->intr_count == 1) 4373 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 4374 4375 /* Multiple interrupts. */ 4376 if (sc->flags & IS_VF) 4377 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, 4378 ("%s: too few intr.", __func__)); 4379 else 4380 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 4381 ("%s: too few intr.", __func__)); 4382 4383 /* The first one is always error intr on PFs */ 4384 if (!(sc->flags & IS_VF)) { 4385 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 4386 if (rc != 0) 4387 return (rc); 4388 irq++; 4389 rid++; 4390 } 4391 4392 /* The second one is always the firmware event queue (first on VFs) */ 4393 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 4394 if (rc != 0) 4395 return (rc); 4396 irq++; 4397 rid++; 4398 4399 for_each_port(sc, p) { 4400 pi = sc->port[p]; 4401 for_each_vi(pi, v, vi) { 4402 vi->first_intr = rid - 1; 4403 4404 if (vi->nnmrxq > 0) { 4405 int n = max(vi->nrxq, vi->nnmrxq); 4406 4407 MPASS(vi->flags & INTR_RXQ); 4408 4409 rxq = &sge->rxq[vi->first_rxq]; 4410 #ifdef DEV_NETMAP 4411 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 4412 #endif 4413 for (q = 0; q < n; q++) { 4414 snprintf(s, sizeof(s), "%x%c%x", p, 4415 'a' + v, q); 4416 if (q < vi->nrxq) 4417 irq->rxq = rxq++; 4418 #ifdef DEV_NETMAP 4419 if (q < vi->nnmrxq) 4420 irq->nm_rxq = nm_rxq++; 4421 #endif 4422 rc = t4_alloc_irq(sc, irq, rid, 4423 t4_vi_intr, irq, s); 4424 if (rc != 0) 4425 return (rc); 4426 irq++; 4427 rid++; 4428 vi->nintr++; 4429 } 4430 } else if (vi->flags & INTR_RXQ) { 4431 for_each_rxq(vi, q, rxq) { 4432 snprintf(s, sizeof(s), "%x%c%x", p, 4433 'a' + v, q); 4434 rc = t4_alloc_irq(sc, irq, rid, 4435 t4_intr, rxq, s); 4436 if (rc != 0) 4437 return (rc); 4438 #ifdef RSS 4439 bus_bind_intr(sc->dev, irq->res, 4440 rss_getcpu(q % nbuckets)); 4441 #endif 4442 irq++; 4443 rid++; 4444 vi->nintr++; 4445 } 4446 } 4447 #ifdef TCP_OFFLOAD 4448 if (vi->flags & INTR_OFLD_RXQ) { 4449 for_each_ofld_rxq(vi, q, ofld_rxq) { 4450 snprintf(s, sizeof(s), "%x%c%x", p, 4451 'A' + v, q); 4452 rc = t4_alloc_irq(sc, irq, rid, 4453 t4_intr, ofld_rxq, s); 4454 if (rc != 0) 4455 return (rc); 4456 irq++; 4457 rid++; 4458 vi->nintr++; 4459 } 4460 } 4461 #endif 4462 } 4463 } 4464 MPASS(irq == &sc->irq[sc->intr_count]); 4465 4466 return (0); 4467 } 4468 4469 int 4470 adapter_full_init(struct adapter *sc) 4471 { 4472 int rc, i; 4473 #ifdef RSS 4474 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4475 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4476 #endif 4477 4478 ASSERT_SYNCHRONIZED_OP(sc); 4479 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4480 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 4481 ("%s: FULL_INIT_DONE already", __func__)); 4482 4483 /* 4484 * queues that belong to the adapter (not any particular port). 4485 */ 4486 rc = t4_setup_adapter_queues(sc); 4487 if (rc != 0) 4488 goto done; 4489 4490 for (i = 0; i < nitems(sc->tq); i++) { 4491 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 4492 taskqueue_thread_enqueue, &sc->tq[i]); 4493 if (sc->tq[i] == NULL) { 4494 device_printf(sc->dev, 4495 "failed to allocate task queue %d\n", i); 4496 rc = ENOMEM; 4497 goto done; 4498 } 4499 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 4500 device_get_nameunit(sc->dev), i); 4501 } 4502 #ifdef RSS 4503 MPASS(RSS_KEYSIZE == 40); 4504 rss_getkey((void *)&raw_rss_key[0]); 4505 for (i = 0; i < nitems(rss_key); i++) { 4506 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4507 } 4508 t4_write_rss_key(sc, &rss_key[0], -1, 1); 4509 #endif 4510 4511 if (!(sc->flags & IS_VF)) 4512 t4_intr_enable(sc); 4513 sc->flags |= FULL_INIT_DONE; 4514 done: 4515 if (rc != 0) 4516 adapter_full_uninit(sc); 4517 4518 return (rc); 4519 } 4520 4521 int 4522 adapter_full_uninit(struct adapter *sc) 4523 { 4524 int i; 4525 4526 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4527 4528 t4_teardown_adapter_queues(sc); 4529 4530 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 4531 taskqueue_free(sc->tq[i]); 4532 sc->tq[i] = NULL; 4533 } 4534 4535 sc->flags &= ~FULL_INIT_DONE; 4536 4537 return (0); 4538 } 4539 4540 #ifdef RSS 4541 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 4542 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 4543 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 4544 RSS_HASHTYPE_RSS_UDP_IPV6) 4545 4546 /* Translates kernel hash types to hardware. */ 4547 static int 4548 hashconfig_to_hashen(int hashconfig) 4549 { 4550 int hashen = 0; 4551 4552 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 4553 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 4554 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 4555 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 4556 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 4557 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4558 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4559 } 4560 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 4561 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4562 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4563 } 4564 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 4565 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4566 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 4567 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4568 4569 return (hashen); 4570 } 4571 4572 /* Translates hardware hash types to kernel. */ 4573 static int 4574 hashen_to_hashconfig(int hashen) 4575 { 4576 int hashconfig = 0; 4577 4578 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 4579 /* 4580 * If UDP hashing was enabled it must have been enabled for 4581 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 4582 * enabling any 4-tuple hash is nonsense configuration. 4583 */ 4584 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4585 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 4586 4587 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4588 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 4589 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4590 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 4591 } 4592 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4593 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 4594 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4595 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 4596 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 4597 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 4598 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 4599 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 4600 4601 return (hashconfig); 4602 } 4603 #endif 4604 4605 int 4606 vi_full_init(struct vi_info *vi) 4607 { 4608 struct adapter *sc = vi->pi->adapter; 4609 struct ifnet *ifp = vi->ifp; 4610 uint16_t *rss; 4611 struct sge_rxq *rxq; 4612 int rc, i, j, hashen; 4613 #ifdef RSS 4614 int nbuckets = rss_getnumbuckets(); 4615 int hashconfig = rss_gethashconfig(); 4616 int extra; 4617 #endif 4618 4619 ASSERT_SYNCHRONIZED_OP(sc); 4620 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4621 ("%s: VI_INIT_DONE already", __func__)); 4622 4623 sysctl_ctx_init(&vi->ctx); 4624 vi->flags |= VI_SYSCTL_CTX; 4625 4626 /* 4627 * Allocate tx/rx/fl queues for this VI. 4628 */ 4629 rc = t4_setup_vi_queues(vi); 4630 if (rc != 0) 4631 goto done; /* error message displayed already */ 4632 4633 /* 4634 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4635 */ 4636 if (vi->nrxq > vi->rss_size) { 4637 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4638 "some queues will never receive traffic.\n", vi->nrxq, 4639 vi->rss_size); 4640 } else if (vi->rss_size % vi->nrxq) { 4641 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4642 "expect uneven traffic distribution.\n", vi->nrxq, 4643 vi->rss_size); 4644 } 4645 #ifdef RSS 4646 if (vi->nrxq != nbuckets) { 4647 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4648 "performance will be impacted.\n", vi->nrxq, nbuckets); 4649 } 4650 #endif 4651 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4652 for (i = 0; i < vi->rss_size;) { 4653 #ifdef RSS 4654 j = rss_get_indirection_to_bucket(i); 4655 j %= vi->nrxq; 4656 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4657 rss[i++] = rxq->iq.abs_id; 4658 #else 4659 for_each_rxq(vi, j, rxq) { 4660 rss[i++] = rxq->iq.abs_id; 4661 if (i == vi->rss_size) 4662 break; 4663 } 4664 #endif 4665 } 4666 4667 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4668 vi->rss_size); 4669 if (rc != 0) { 4670 if_printf(ifp, "rss_config failed: %d\n", rc); 4671 goto done; 4672 } 4673 4674 #ifdef RSS 4675 hashen = hashconfig_to_hashen(hashconfig); 4676 4677 /* 4678 * We may have had to enable some hashes even though the global config 4679 * wants them disabled. This is a potential problem that must be 4680 * reported to the user. 4681 */ 4682 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4683 4684 /* 4685 * If we consider only the supported hash types, then the enabled hashes 4686 * are a superset of the requested hashes. In other words, there cannot 4687 * be any supported hash that was requested but not enabled, but there 4688 * can be hashes that were not requested but had to be enabled. 4689 */ 4690 extra &= SUPPORTED_RSS_HASHTYPES; 4691 MPASS((extra & hashconfig) == 0); 4692 4693 if (extra) { 4694 if_printf(ifp, 4695 "global RSS config (0x%x) cannot be accommodated.\n", 4696 hashconfig); 4697 } 4698 if (extra & RSS_HASHTYPE_RSS_IPV4) 4699 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4700 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4701 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4702 if (extra & RSS_HASHTYPE_RSS_IPV6) 4703 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4704 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4705 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4706 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4707 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4708 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4709 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4710 #else 4711 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4712 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4713 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4714 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4715 #endif 4716 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0); 4717 if (rc != 0) { 4718 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4719 goto done; 4720 } 4721 4722 vi->rss = rss; 4723 vi->flags |= VI_INIT_DONE; 4724 done: 4725 if (rc != 0) 4726 vi_full_uninit(vi); 4727 4728 return (rc); 4729 } 4730 4731 /* 4732 * Idempotent. 4733 */ 4734 int 4735 vi_full_uninit(struct vi_info *vi) 4736 { 4737 struct port_info *pi = vi->pi; 4738 struct adapter *sc = pi->adapter; 4739 int i; 4740 struct sge_rxq *rxq; 4741 struct sge_txq *txq; 4742 #ifdef TCP_OFFLOAD 4743 struct sge_ofld_rxq *ofld_rxq; 4744 struct sge_wrq *ofld_txq; 4745 #endif 4746 4747 if (vi->flags & VI_INIT_DONE) { 4748 4749 /* Need to quiesce queues. */ 4750 4751 /* XXX: Only for the first VI? */ 4752 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 4753 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4754 4755 for_each_txq(vi, i, txq) { 4756 quiesce_txq(sc, txq); 4757 } 4758 4759 #ifdef TCP_OFFLOAD 4760 for_each_ofld_txq(vi, i, ofld_txq) { 4761 quiesce_wrq(sc, ofld_txq); 4762 } 4763 #endif 4764 4765 for_each_rxq(vi, i, rxq) { 4766 quiesce_iq(sc, &rxq->iq); 4767 quiesce_fl(sc, &rxq->fl); 4768 } 4769 4770 #ifdef TCP_OFFLOAD 4771 for_each_ofld_rxq(vi, i, ofld_rxq) { 4772 quiesce_iq(sc, &ofld_rxq->iq); 4773 quiesce_fl(sc, &ofld_rxq->fl); 4774 } 4775 #endif 4776 free(vi->rss, M_CXGBE); 4777 free(vi->nm_rss, M_CXGBE); 4778 } 4779 4780 t4_teardown_vi_queues(vi); 4781 vi->flags &= ~VI_INIT_DONE; 4782 4783 return (0); 4784 } 4785 4786 static void 4787 quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4788 { 4789 struct sge_eq *eq = &txq->eq; 4790 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4791 4792 (void) sc; /* unused */ 4793 4794 #ifdef INVARIANTS 4795 TXQ_LOCK(txq); 4796 MPASS((eq->flags & EQ_ENABLED) == 0); 4797 TXQ_UNLOCK(txq); 4798 #endif 4799 4800 /* Wait for the mp_ring to empty. */ 4801 while (!mp_ring_is_idle(txq->r)) { 4802 mp_ring_check_drainage(txq->r, 0); 4803 pause("rquiesce", 1); 4804 } 4805 4806 /* Then wait for the hardware to finish. */ 4807 while (spg->cidx != htobe16(eq->pidx)) 4808 pause("equiesce", 1); 4809 4810 /* Finally, wait for the driver to reclaim all descriptors. */ 4811 while (eq->cidx != eq->pidx) 4812 pause("dquiesce", 1); 4813 } 4814 4815 static void 4816 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4817 { 4818 4819 /* XXXTX */ 4820 } 4821 4822 static void 4823 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4824 { 4825 (void) sc; /* unused */ 4826 4827 /* Synchronize with the interrupt handler */ 4828 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4829 pause("iqfree", 1); 4830 } 4831 4832 static void 4833 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4834 { 4835 mtx_lock(&sc->sfl_lock); 4836 FL_LOCK(fl); 4837 fl->flags |= FL_DOOMED; 4838 FL_UNLOCK(fl); 4839 callout_stop(&sc->sfl_callout); 4840 mtx_unlock(&sc->sfl_lock); 4841 4842 KASSERT((fl->flags & FL_STARVING) == 0, 4843 ("%s: still starving", __func__)); 4844 } 4845 4846 static int 4847 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4848 driver_intr_t *handler, void *arg, char *name) 4849 { 4850 int rc; 4851 4852 irq->rid = rid; 4853 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4854 RF_SHAREABLE | RF_ACTIVE); 4855 if (irq->res == NULL) { 4856 device_printf(sc->dev, 4857 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4858 return (ENOMEM); 4859 } 4860 4861 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4862 NULL, handler, arg, &irq->tag); 4863 if (rc != 0) { 4864 device_printf(sc->dev, 4865 "failed to setup interrupt for rid %d, name %s: %d\n", 4866 rid, name, rc); 4867 } else if (name) 4868 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); 4869 4870 return (rc); 4871 } 4872 4873 static int 4874 t4_free_irq(struct adapter *sc, struct irq *irq) 4875 { 4876 if (irq->tag) 4877 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4878 if (irq->res) 4879 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4880 4881 bzero(irq, sizeof(*irq)); 4882 4883 return (0); 4884 } 4885 4886 static void 4887 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4888 { 4889 4890 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4891 t4_get_regs(sc, buf, regs->len); 4892 } 4893 4894 #define A_PL_INDIR_CMD 0x1f8 4895 4896 #define S_PL_AUTOINC 31 4897 #define M_PL_AUTOINC 0x1U 4898 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4899 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4900 4901 #define S_PL_VFID 20 4902 #define M_PL_VFID 0xffU 4903 #define V_PL_VFID(x) ((x) << S_PL_VFID) 4904 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4905 4906 #define S_PL_ADDR 0 4907 #define M_PL_ADDR 0xfffffU 4908 #define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4909 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4910 4911 #define A_PL_INDIR_DATA 0x1fc 4912 4913 static uint64_t 4914 read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4915 { 4916 u32 stats[2]; 4917 4918 mtx_assert(&sc->reg_lock, MA_OWNED); 4919 if (sc->flags & IS_VF) { 4920 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); 4921 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); 4922 } else { 4923 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4924 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4925 V_PL_ADDR(VF_MPS_REG(reg))); 4926 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4927 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4928 } 4929 return (((uint64_t)stats[1]) << 32 | stats[0]); 4930 } 4931 4932 static void 4933 t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4934 struct fw_vi_stats_vf *stats) 4935 { 4936 4937 #define GET_STAT(name) \ 4938 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4939 4940 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4941 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4942 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4943 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4944 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4945 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4946 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4947 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4948 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4949 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4950 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4951 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4952 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4953 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4954 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4955 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4956 4957 #undef GET_STAT 4958 } 4959 4960 static void 4961 t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4962 { 4963 int reg; 4964 4965 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4966 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4967 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4968 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4969 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4970 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4971 } 4972 4973 static void 4974 vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4975 { 4976 struct timeval tv; 4977 const struct timeval interval = {0, 250000}; /* 250ms */ 4978 4979 if (!(vi->flags & VI_INIT_DONE)) 4980 return; 4981 4982 getmicrotime(&tv); 4983 timevalsub(&tv, &interval); 4984 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4985 return; 4986 4987 mtx_lock(&sc->reg_lock); 4988 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4989 getmicrotime(&vi->last_refreshed); 4990 mtx_unlock(&sc->reg_lock); 4991 } 4992 4993 static void 4994 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4995 { 4996 int i; 4997 u_int v, tnl_cong_drops; 4998 struct timeval tv; 4999 const struct timeval interval = {0, 250000}; /* 250ms */ 5000 5001 getmicrotime(&tv); 5002 timevalsub(&tv, &interval); 5003 if (timevalcmp(&tv, &pi->last_refreshed, <)) 5004 return; 5005 5006 tnl_cong_drops = 0; 5007 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 5008 for (i = 0; i < sc->chip_params->nchan; i++) { 5009 if (pi->rx_chan_map & (1 << i)) { 5010 mtx_lock(&sc->reg_lock); 5011 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 5012 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 5013 mtx_unlock(&sc->reg_lock); 5014 tnl_cong_drops += v; 5015 } 5016 } 5017 pi->tnl_cong_drops = tnl_cong_drops; 5018 getmicrotime(&pi->last_refreshed); 5019 } 5020 5021 static void 5022 cxgbe_tick(void *arg) 5023 { 5024 struct port_info *pi = arg; 5025 struct adapter *sc = pi->adapter; 5026 5027 PORT_LOCK_ASSERT_OWNED(pi); 5028 cxgbe_refresh_stats(sc, pi); 5029 5030 callout_schedule(&pi->tick, hz); 5031 } 5032 5033 void 5034 vi_tick(void *arg) 5035 { 5036 struct vi_info *vi = arg; 5037 struct adapter *sc = vi->pi->adapter; 5038 5039 vi_refresh_stats(sc, vi); 5040 5041 callout_schedule(&vi->tick, hz); 5042 } 5043 5044 static void 5045 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 5046 { 5047 struct ifnet *vlan; 5048 5049 if (arg != ifp || ifp->if_type != IFT_ETHER) 5050 return; 5051 5052 vlan = VLAN_DEVAT(ifp, vid); 5053 VLAN_SETCOOKIE(vlan, ifp); 5054 } 5055 5056 /* 5057 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 5058 */ 5059 static char *caps_decoder[] = { 5060 "\20\001IPMI\002NCSI", /* 0: NBM */ 5061 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 5062 "\20\001INGRESS\002EGRESS", /* 2: switch */ 5063 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 5064 "\006HASHFILTER\007ETHOFLD", 5065 "\20\001TOE", /* 4: TOE */ 5066 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 5067 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 5068 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 5069 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 5070 "\007T10DIF" 5071 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 5072 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */ 5073 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 5074 "\004PO_INITIATOR\005PO_TARGET", 5075 }; 5076 5077 void 5078 t4_sysctls(struct adapter *sc) 5079 { 5080 struct sysctl_ctx_list *ctx; 5081 struct sysctl_oid *oid; 5082 struct sysctl_oid_list *children, *c0; 5083 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 5084 5085 ctx = device_get_sysctl_ctx(sc->dev); 5086 5087 /* 5088 * dev.t4nex.X. 5089 */ 5090 oid = device_get_sysctl_tree(sc->dev); 5091 c0 = children = SYSCTL_CHILDREN(oid); 5092 5093 sc->sc_do_rxcopy = 1; 5094 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 5095 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 5096 5097 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 5098 sc->params.nports, "# of ports"); 5099 5100 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 5101 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 5102 sysctl_bitfield, "A", "available doorbells"); 5103 5104 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 5105 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 5106 5107 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 5108 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 5109 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 5110 "interrupt holdoff timer values (us)"); 5111 5112 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 5113 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 5114 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 5115 "interrupt holdoff packet counter values"); 5116 5117 t4_sge_sysctls(sc, ctx, children); 5118 5119 sc->lro_timeout = 100; 5120 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 5121 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 5122 5123 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, 5124 &sc->debug_flags, 0, "flags to enable runtime debugging"); 5125 5126 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 5127 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 5128 5129 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 5130 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 5131 5132 if (sc->flags & IS_VF) 5133 return; 5134 5135 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 5136 NULL, chip_rev(sc), "chip hardware revision"); 5137 5138 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", 5139 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); 5140 5141 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", 5142 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); 5143 5144 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", 5145 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); 5146 5147 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", 5148 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); 5149 5150 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, 5151 sc->er_version, 0, "expansion ROM version"); 5152 5153 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, 5154 sc->bs_version, 0, "bootstrap firmware version"); 5155 5156 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, 5157 NULL, sc->params.scfg_vers, "serial config version"); 5158 5159 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, 5160 NULL, sc->params.vpd_vers, "VPD version"); 5161 5162 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 5163 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 5164 5165 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 5166 sc->cfcsum, "config file checksum"); 5167 5168 #define SYSCTL_CAP(name, n, text) \ 5169 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 5170 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 5171 sysctl_bitfield, "A", "available " text " capabilities") 5172 5173 SYSCTL_CAP(nbmcaps, 0, "NBM"); 5174 SYSCTL_CAP(linkcaps, 1, "link"); 5175 SYSCTL_CAP(switchcaps, 2, "switch"); 5176 SYSCTL_CAP(niccaps, 3, "NIC"); 5177 SYSCTL_CAP(toecaps, 4, "TCP offload"); 5178 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 5179 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 5180 SYSCTL_CAP(cryptocaps, 7, "crypto"); 5181 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 5182 #undef SYSCTL_CAP 5183 5184 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 5185 NULL, sc->tids.nftids, "number of filters"); 5186 5187 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 5188 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 5189 "chip temperature (in Celsius)"); 5190 5191 #ifdef SBUF_DRAIN 5192 /* 5193 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 5194 */ 5195 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 5196 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 5197 "logs and miscellaneous information"); 5198 children = SYSCTL_CHILDREN(oid); 5199 5200 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 5201 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5202 sysctl_cctrl, "A", "congestion control"); 5203 5204 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 5205 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5206 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 5207 5208 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 5209 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 5210 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 5211 5212 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 5213 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 5214 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 5215 5216 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 5217 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 5218 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 5219 5220 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 5221 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 5222 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 5223 5224 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 5225 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 5226 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 5227 5228 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 5229 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5230 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 5231 "A", "CIM logic analyzer"); 5232 5233 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 5234 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5235 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 5236 5237 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 5238 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 5239 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 5240 5241 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 5242 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 5243 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 5244 5245 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 5246 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 5247 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 5248 5249 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 5250 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 5251 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 5252 5253 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 5254 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 5255 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 5256 5257 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 5258 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 5259 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 5260 5261 if (chip_id(sc) > CHELSIO_T4) { 5262 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 5263 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 5264 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 5265 5266 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 5267 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 5268 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 5269 } 5270 5271 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 5272 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5273 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 5274 5275 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 5276 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5277 sysctl_cim_qcfg, "A", "CIM queue configuration"); 5278 5279 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 5280 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5281 sysctl_cpl_stats, "A", "CPL statistics"); 5282 5283 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 5284 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5285 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 5286 5287 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 5288 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5289 sysctl_devlog, "A", "firmware's device log"); 5290 5291 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 5292 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5293 sysctl_fcoe_stats, "A", "FCoE statistics"); 5294 5295 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 5296 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5297 sysctl_hw_sched, "A", "hardware scheduler "); 5298 5299 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 5300 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5301 sysctl_l2t, "A", "hardware L2 table"); 5302 5303 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 5304 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5305 sysctl_lb_stats, "A", "loopback statistics"); 5306 5307 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 5308 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5309 sysctl_meminfo, "A", "memory regions"); 5310 5311 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 5312 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5313 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 5314 "A", "MPS TCAM entries"); 5315 5316 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 5317 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5318 sysctl_path_mtus, "A", "path MTUs"); 5319 5320 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 5321 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5322 sysctl_pm_stats, "A", "PM statistics"); 5323 5324 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 5325 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5326 sysctl_rdma_stats, "A", "RDMA statistics"); 5327 5328 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 5329 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5330 sysctl_tcp_stats, "A", "TCP statistics"); 5331 5332 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 5333 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5334 sysctl_tids, "A", "TID information"); 5335 5336 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 5337 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5338 sysctl_tp_err_stats, "A", "TP error statistics"); 5339 5340 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 5341 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 5342 "TP logic analyzer event capture mask"); 5343 5344 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 5345 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5346 sysctl_tp_la, "A", "TP logic analyzer"); 5347 5348 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 5349 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5350 sysctl_tx_rate, "A", "Tx rate"); 5351 5352 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 5353 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5354 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 5355 5356 if (chip_id(sc) >= CHELSIO_T5) { 5357 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 5358 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5359 sysctl_wcwr_stats, "A", "write combined work requests"); 5360 } 5361 #endif 5362 5363 #ifdef TCP_OFFLOAD 5364 if (is_offload(sc)) { 5365 int i; 5366 char s[4]; 5367 5368 /* 5369 * dev.t4nex.X.toe. 5370 */ 5371 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 5372 NULL, "TOE parameters"); 5373 children = SYSCTL_CHILDREN(oid); 5374 5375 sc->tt.sndbuf = 256 * 1024; 5376 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 5377 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 5378 5379 sc->tt.ddp = 0; 5380 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 5381 &sc->tt.ddp, 0, "DDP allowed"); 5382 5383 sc->tt.rx_coalesce = 1; 5384 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 5385 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 5386 5387 sc->tt.tx_align = 1; 5388 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 5389 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 5390 5391 sc->tt.tx_zcopy = 0; 5392 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", 5393 CTLFLAG_RW, &sc->tt.tx_zcopy, 0, 5394 "Enable zero-copy aio_write(2)"); 5395 5396 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 5397 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 5398 "TP timer tick (us)"); 5399 5400 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 5401 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 5402 "TCP timestamp tick (us)"); 5403 5404 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 5405 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 5406 "DACK tick (us)"); 5407 5408 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 5409 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 5410 "IU", "DACK timer (us)"); 5411 5412 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 5413 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 5414 sysctl_tp_timer, "LU", "Minimum retransmit interval (us)"); 5415 5416 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 5417 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 5418 sysctl_tp_timer, "LU", "Maximum retransmit interval (us)"); 5419 5420 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 5421 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 5422 sysctl_tp_timer, "LU", "Persist timer min (us)"); 5423 5424 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 5425 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 5426 sysctl_tp_timer, "LU", "Persist timer max (us)"); 5427 5428 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 5429 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 5430 sysctl_tp_timer, "LU", "Keepalive idle timer (us)"); 5431 5432 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval", 5433 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 5434 sysctl_tp_timer, "LU", "Keepalive interval timer (us)"); 5435 5436 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 5437 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 5438 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 5439 5440 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 5441 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 5442 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 5443 5444 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count", 5445 CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX, 5446 sysctl_tp_shift_cnt, "IU", 5447 "Number of SYN retransmissions before abort"); 5448 5449 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count", 5450 CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2, 5451 sysctl_tp_shift_cnt, "IU", 5452 "Number of retransmissions before abort"); 5453 5454 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count", 5455 CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2, 5456 sysctl_tp_shift_cnt, "IU", 5457 "Number of keepalive probes before abort"); 5458 5459 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff", 5460 CTLFLAG_RD, NULL, "TOE retransmit backoffs"); 5461 children = SYSCTL_CHILDREN(oid); 5462 for (i = 0; i < 16; i++) { 5463 snprintf(s, sizeof(s), "%u", i); 5464 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s, 5465 CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff, 5466 "IU", "TOE retransmit backoff"); 5467 } 5468 } 5469 #endif 5470 } 5471 5472 void 5473 vi_sysctls(struct vi_info *vi) 5474 { 5475 struct sysctl_ctx_list *ctx; 5476 struct sysctl_oid *oid; 5477 struct sysctl_oid_list *children; 5478 5479 ctx = device_get_sysctl_ctx(vi->dev); 5480 5481 /* 5482 * dev.v?(cxgbe|cxl).X. 5483 */ 5484 oid = device_get_sysctl_tree(vi->dev); 5485 children = SYSCTL_CHILDREN(oid); 5486 5487 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 5488 vi->viid, "VI identifer"); 5489 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 5490 &vi->nrxq, 0, "# of rx queues"); 5491 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 5492 &vi->ntxq, 0, "# of tx queues"); 5493 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 5494 &vi->first_rxq, 0, "index of first rx queue"); 5495 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 5496 &vi->first_txq, 0, "index of first tx queue"); 5497 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, 5498 vi->rss_size, "size of RSS indirection table"); 5499 5500 if (IS_MAIN_VI(vi)) { 5501 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 5502 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 5503 "Reserve queue 0 for non-flowid packets"); 5504 } 5505 5506 #ifdef TCP_OFFLOAD 5507 if (vi->nofldrxq != 0) { 5508 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 5509 &vi->nofldrxq, 0, 5510 "# of rx queues for offloaded TCP connections"); 5511 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 5512 &vi->nofldtxq, 0, 5513 "# of tx queues for offloaded TCP connections"); 5514 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 5515 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 5516 "index of first TOE rx queue"); 5517 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 5518 CTLFLAG_RD, &vi->first_ofld_txq, 0, 5519 "index of first TOE tx queue"); 5520 } 5521 #endif 5522 #ifdef DEV_NETMAP 5523 if (vi->nnmrxq != 0) { 5524 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 5525 &vi->nnmrxq, 0, "# of netmap rx queues"); 5526 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 5527 &vi->nnmtxq, 0, "# of netmap tx queues"); 5528 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 5529 CTLFLAG_RD, &vi->first_nm_rxq, 0, 5530 "index of first netmap rx queue"); 5531 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 5532 CTLFLAG_RD, &vi->first_nm_txq, 0, 5533 "index of first netmap tx queue"); 5534 } 5535 #endif 5536 5537 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 5538 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 5539 "holdoff timer index"); 5540 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 5541 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 5542 "holdoff packet counter index"); 5543 5544 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 5545 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 5546 "rx queue size"); 5547 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 5548 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 5549 "tx queue size"); 5550 } 5551 5552 static void 5553 cxgbe_sysctls(struct port_info *pi) 5554 { 5555 struct sysctl_ctx_list *ctx; 5556 struct sysctl_oid *oid; 5557 struct sysctl_oid_list *children, *children2; 5558 struct adapter *sc = pi->adapter; 5559 int i; 5560 char name[16]; 5561 5562 ctx = device_get_sysctl_ctx(pi->dev); 5563 5564 /* 5565 * dev.cxgbe.X. 5566 */ 5567 oid = device_get_sysctl_tree(pi->dev); 5568 children = SYSCTL_CHILDREN(oid); 5569 5570 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 5571 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 5572 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 5573 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 5574 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 5575 "PHY temperature (in Celsius)"); 5576 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 5577 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 5578 "PHY firmware version"); 5579 } 5580 5581 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 5582 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A", 5583 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 5584 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec", 5585 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A", 5586 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); 5587 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", 5588 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I", 5589 "autonegotiation (-1 = not supported)"); 5590 5591 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 5592 port_top_speed(pi), "max speed (in Gbps)"); 5593 5594 if (sc->flags & IS_VF) 5595 return; 5596 5597 /* 5598 * dev.(cxgbe|cxl).X.tc. 5599 */ 5600 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 5601 "Tx scheduler traffic classes (cl_rl)"); 5602 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 5603 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; 5604 5605 snprintf(name, sizeof(name), "%d", i); 5606 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 5607 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 5608 "traffic class")); 5609 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD, 5610 &tc->flags, 0, "flags"); 5611 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 5612 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 5613 #ifdef SBUF_DRAIN 5614 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 5615 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 5616 sysctl_tc_params, "A", "traffic class parameters"); 5617 #endif 5618 } 5619 5620 /* 5621 * dev.cxgbe.X.stats. 5622 */ 5623 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 5624 NULL, "port statistics"); 5625 children = SYSCTL_CHILDREN(oid); 5626 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 5627 &pi->tx_parse_error, 0, 5628 "# of tx packets with invalid length or # of segments"); 5629 5630 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 5631 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 5632 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 5633 sysctl_handle_t4_reg64, "QU", desc) 5634 5635 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 5636 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 5637 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 5638 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 5639 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 5640 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 5641 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 5642 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 5643 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 5644 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 5645 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 5646 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 5647 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 5648 "# of tx frames in this range", 5649 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 5650 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 5651 "# of tx frames in this range", 5652 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 5653 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5654 "# of tx frames in this range", 5655 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5656 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5657 "# of tx frames in this range", 5658 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5659 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5660 "# of tx frames in this range", 5661 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5662 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5663 "# of tx frames in this range", 5664 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5665 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5666 "# of tx frames in this range", 5667 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5668 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5669 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5670 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5671 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5672 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5673 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5674 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5675 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5676 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5677 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5678 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5679 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5680 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5681 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5682 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5683 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5684 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5685 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5686 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5687 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5688 5689 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5690 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5691 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5692 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5693 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5694 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5695 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5696 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5697 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5698 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5699 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5700 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5701 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5702 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5703 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5704 "# of frames received with bad FCS", 5705 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5706 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5707 "# of frames received with length error", 5708 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5709 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5710 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5711 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5712 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5713 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5714 "# of rx frames in this range", 5715 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5716 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5717 "# of rx frames in this range", 5718 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5719 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5720 "# of rx frames in this range", 5721 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5722 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5723 "# of rx frames in this range", 5724 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5725 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5726 "# of rx frames in this range", 5727 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5728 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5729 "# of rx frames in this range", 5730 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5731 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5732 "# of rx frames in this range", 5733 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5734 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5735 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5736 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5737 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5738 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5739 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5740 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5741 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5742 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5743 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5744 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5745 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5746 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5747 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5748 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5749 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5750 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5751 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5752 5753 #undef SYSCTL_ADD_T4_REG64 5754 5755 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5756 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5757 &pi->stats.name, desc) 5758 5759 /* We get these from port_stats and they may be stale by up to 1s */ 5760 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5761 "# drops due to buffer-group 0 overflows"); 5762 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5763 "# drops due to buffer-group 1 overflows"); 5764 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5765 "# drops due to buffer-group 2 overflows"); 5766 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5767 "# drops due to buffer-group 3 overflows"); 5768 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5769 "# of buffer-group 0 truncated packets"); 5770 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5771 "# of buffer-group 1 truncated packets"); 5772 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5773 "# of buffer-group 2 truncated packets"); 5774 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5775 "# of buffer-group 3 truncated packets"); 5776 5777 #undef SYSCTL_ADD_T4_PORTSTAT 5778 } 5779 5780 static int 5781 sysctl_int_array(SYSCTL_HANDLER_ARGS) 5782 { 5783 int rc, *i, space = 0; 5784 struct sbuf sb; 5785 5786 sbuf_new_for_sysctl(&sb, NULL, 64, req); 5787 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5788 if (space) 5789 sbuf_printf(&sb, " "); 5790 sbuf_printf(&sb, "%d", *i); 5791 space = 1; 5792 } 5793 rc = sbuf_finish(&sb); 5794 sbuf_delete(&sb); 5795 return (rc); 5796 } 5797 5798 static int 5799 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5800 { 5801 int rc; 5802 struct sbuf *sb; 5803 5804 rc = sysctl_wire_old_buffer(req, 0); 5805 if (rc != 0) 5806 return(rc); 5807 5808 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5809 if (sb == NULL) 5810 return (ENOMEM); 5811 5812 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5813 rc = sbuf_finish(sb); 5814 sbuf_delete(sb); 5815 5816 return (rc); 5817 } 5818 5819 static int 5820 sysctl_btphy(SYSCTL_HANDLER_ARGS) 5821 { 5822 struct port_info *pi = arg1; 5823 int op = arg2; 5824 struct adapter *sc = pi->adapter; 5825 u_int v; 5826 int rc; 5827 5828 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5829 if (rc) 5830 return (rc); 5831 /* XXX: magic numbers */ 5832 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5833 &v); 5834 end_synchronized_op(sc, 0); 5835 if (rc) 5836 return (rc); 5837 if (op == 0) 5838 v /= 256; 5839 5840 rc = sysctl_handle_int(oidp, &v, 0, req); 5841 return (rc); 5842 } 5843 5844 static int 5845 sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5846 { 5847 struct vi_info *vi = arg1; 5848 int rc, val; 5849 5850 val = vi->rsrv_noflowq; 5851 rc = sysctl_handle_int(oidp, &val, 0, req); 5852 if (rc != 0 || req->newptr == NULL) 5853 return (rc); 5854 5855 if ((val >= 1) && (vi->ntxq > 1)) 5856 vi->rsrv_noflowq = 1; 5857 else 5858 vi->rsrv_noflowq = 0; 5859 5860 return (rc); 5861 } 5862 5863 static int 5864 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5865 { 5866 struct vi_info *vi = arg1; 5867 struct adapter *sc = vi->pi->adapter; 5868 int idx, rc, i; 5869 struct sge_rxq *rxq; 5870 #ifdef TCP_OFFLOAD 5871 struct sge_ofld_rxq *ofld_rxq; 5872 #endif 5873 uint8_t v; 5874 5875 idx = vi->tmr_idx; 5876 5877 rc = sysctl_handle_int(oidp, &idx, 0, req); 5878 if (rc != 0 || req->newptr == NULL) 5879 return (rc); 5880 5881 if (idx < 0 || idx >= SGE_NTIMERS) 5882 return (EINVAL); 5883 5884 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5885 "t4tmr"); 5886 if (rc) 5887 return (rc); 5888 5889 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5890 for_each_rxq(vi, i, rxq) { 5891 #ifdef atomic_store_rel_8 5892 atomic_store_rel_8(&rxq->iq.intr_params, v); 5893 #else 5894 rxq->iq.intr_params = v; 5895 #endif 5896 } 5897 #ifdef TCP_OFFLOAD 5898 for_each_ofld_rxq(vi, i, ofld_rxq) { 5899 #ifdef atomic_store_rel_8 5900 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5901 #else 5902 ofld_rxq->iq.intr_params = v; 5903 #endif 5904 } 5905 #endif 5906 vi->tmr_idx = idx; 5907 5908 end_synchronized_op(sc, LOCK_HELD); 5909 return (0); 5910 } 5911 5912 static int 5913 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5914 { 5915 struct vi_info *vi = arg1; 5916 struct adapter *sc = vi->pi->adapter; 5917 int idx, rc; 5918 5919 idx = vi->pktc_idx; 5920 5921 rc = sysctl_handle_int(oidp, &idx, 0, req); 5922 if (rc != 0 || req->newptr == NULL) 5923 return (rc); 5924 5925 if (idx < -1 || idx >= SGE_NCOUNTERS) 5926 return (EINVAL); 5927 5928 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5929 "t4pktc"); 5930 if (rc) 5931 return (rc); 5932 5933 if (vi->flags & VI_INIT_DONE) 5934 rc = EBUSY; /* cannot be changed once the queues are created */ 5935 else 5936 vi->pktc_idx = idx; 5937 5938 end_synchronized_op(sc, LOCK_HELD); 5939 return (rc); 5940 } 5941 5942 static int 5943 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5944 { 5945 struct vi_info *vi = arg1; 5946 struct adapter *sc = vi->pi->adapter; 5947 int qsize, rc; 5948 5949 qsize = vi->qsize_rxq; 5950 5951 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5952 if (rc != 0 || req->newptr == NULL) 5953 return (rc); 5954 5955 if (qsize < 128 || (qsize & 7)) 5956 return (EINVAL); 5957 5958 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5959 "t4rxqs"); 5960 if (rc) 5961 return (rc); 5962 5963 if (vi->flags & VI_INIT_DONE) 5964 rc = EBUSY; /* cannot be changed once the queues are created */ 5965 else 5966 vi->qsize_rxq = qsize; 5967 5968 end_synchronized_op(sc, LOCK_HELD); 5969 return (rc); 5970 } 5971 5972 static int 5973 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5974 { 5975 struct vi_info *vi = arg1; 5976 struct adapter *sc = vi->pi->adapter; 5977 int qsize, rc; 5978 5979 qsize = vi->qsize_txq; 5980 5981 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5982 if (rc != 0 || req->newptr == NULL) 5983 return (rc); 5984 5985 if (qsize < 128 || qsize > 65536) 5986 return (EINVAL); 5987 5988 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5989 "t4txqs"); 5990 if (rc) 5991 return (rc); 5992 5993 if (vi->flags & VI_INIT_DONE) 5994 rc = EBUSY; /* cannot be changed once the queues are created */ 5995 else 5996 vi->qsize_txq = qsize; 5997 5998 end_synchronized_op(sc, LOCK_HELD); 5999 return (rc); 6000 } 6001 6002 static int 6003 sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 6004 { 6005 struct port_info *pi = arg1; 6006 struct adapter *sc = pi->adapter; 6007 struct link_config *lc = &pi->link_cfg; 6008 int rc; 6009 6010 if (req->newptr == NULL) { 6011 struct sbuf *sb; 6012 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 6013 6014 rc = sysctl_wire_old_buffer(req, 0); 6015 if (rc != 0) 6016 return(rc); 6017 6018 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6019 if (sb == NULL) 6020 return (ENOMEM); 6021 6022 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 6023 rc = sbuf_finish(sb); 6024 sbuf_delete(sb); 6025 } else { 6026 char s[2]; 6027 int n; 6028 6029 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 6030 s[1] = 0; 6031 6032 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 6033 if (rc != 0) 6034 return(rc); 6035 6036 if (s[1] != 0) 6037 return (EINVAL); 6038 if (s[0] < '0' || s[0] > '9') 6039 return (EINVAL); /* not a number */ 6040 n = s[0] - '0'; 6041 if (n & ~(PAUSE_TX | PAUSE_RX)) 6042 return (EINVAL); /* some other bit is set too */ 6043 6044 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 6045 "t4PAUSE"); 6046 if (rc) 6047 return (rc); 6048 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 6049 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 6050 lc->requested_fc |= n; 6051 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 6052 if (rc == 0) { 6053 lc->fc = lc->requested_fc; 6054 } 6055 } 6056 end_synchronized_op(sc, 0); 6057 } 6058 6059 return (rc); 6060 } 6061 6062 static int 6063 sysctl_fec(SYSCTL_HANDLER_ARGS) 6064 { 6065 struct port_info *pi = arg1; 6066 struct adapter *sc = pi->adapter; 6067 struct link_config *lc = &pi->link_cfg; 6068 int rc; 6069 6070 if (req->newptr == NULL) { 6071 struct sbuf *sb; 6072 static char *bits = "\20\1RS\2BASER_RS\3RESERVED"; 6073 6074 rc = sysctl_wire_old_buffer(req, 0); 6075 if (rc != 0) 6076 return(rc); 6077 6078 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6079 if (sb == NULL) 6080 return (ENOMEM); 6081 6082 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits); 6083 rc = sbuf_finish(sb); 6084 sbuf_delete(sb); 6085 } else { 6086 char s[2]; 6087 int n; 6088 6089 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC); 6090 s[1] = 0; 6091 6092 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 6093 if (rc != 0) 6094 return(rc); 6095 6096 if (s[1] != 0) 6097 return (EINVAL); 6098 if (s[0] < '0' || s[0] > '9') 6099 return (EINVAL); /* not a number */ 6100 n = s[0] - '0'; 6101 if (n & ~M_FW_PORT_CAP_FEC) 6102 return (EINVAL); /* some other bit is set too */ 6103 6104 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 6105 "t4fec"); 6106 if (rc) 6107 return (rc); 6108 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) { 6109 lc->requested_fec = n & 6110 G_FW_PORT_CAP_FEC(lc->supported); 6111 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 6112 if (rc == 0) { 6113 lc->fec = lc->requested_fec; 6114 } 6115 } 6116 end_synchronized_op(sc, 0); 6117 } 6118 6119 return (rc); 6120 } 6121 6122 static int 6123 sysctl_autoneg(SYSCTL_HANDLER_ARGS) 6124 { 6125 struct port_info *pi = arg1; 6126 struct adapter *sc = pi->adapter; 6127 struct link_config *lc = &pi->link_cfg; 6128 int rc, val, old; 6129 6130 if (lc->supported & FW_PORT_CAP_ANEG) 6131 val = lc->requested_aneg == AUTONEG_ENABLE ? 1 : 0; 6132 else 6133 val = -1; 6134 rc = sysctl_handle_int(oidp, &val, 0, req); 6135 if (rc != 0 || req->newptr == NULL) 6136 return (rc); 6137 if ((lc->supported & FW_PORT_CAP_ANEG) == 0) 6138 return (ENOTSUP); 6139 6140 if (val == 0) 6141 val = AUTONEG_DISABLE; 6142 else if (val == 1) 6143 val = AUTONEG_ENABLE; 6144 else 6145 return (EINVAL); 6146 if (lc->requested_aneg == val) 6147 return (0); /* no change */ 6148 6149 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 6150 "t4aneg"); 6151 if (rc) 6152 return (rc); 6153 old = lc->requested_aneg; 6154 lc->requested_aneg = val; 6155 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 6156 if (rc != 0) 6157 lc->requested_aneg = old; 6158 end_synchronized_op(sc, 0); 6159 return (rc); 6160 } 6161 6162 static int 6163 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 6164 { 6165 struct adapter *sc = arg1; 6166 int reg = arg2; 6167 uint64_t val; 6168 6169 val = t4_read_reg64(sc, reg); 6170 6171 return (sysctl_handle_64(oidp, &val, 0, req)); 6172 } 6173 6174 static int 6175 sysctl_temperature(SYSCTL_HANDLER_ARGS) 6176 { 6177 struct adapter *sc = arg1; 6178 int rc, t; 6179 uint32_t param, val; 6180 6181 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 6182 if (rc) 6183 return (rc); 6184 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 6185 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 6186 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 6187 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 6188 end_synchronized_op(sc, 0); 6189 if (rc) 6190 return (rc); 6191 6192 /* unknown is returned as 0 but we display -1 in that case */ 6193 t = val == 0 ? -1 : val; 6194 6195 rc = sysctl_handle_int(oidp, &t, 0, req); 6196 return (rc); 6197 } 6198 6199 #ifdef SBUF_DRAIN 6200 static int 6201 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 6202 { 6203 struct adapter *sc = arg1; 6204 struct sbuf *sb; 6205 int rc, i; 6206 uint16_t incr[NMTUS][NCCTRL_WIN]; 6207 static const char *dec_fac[] = { 6208 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 6209 "0.9375" 6210 }; 6211 6212 rc = sysctl_wire_old_buffer(req, 0); 6213 if (rc != 0) 6214 return (rc); 6215 6216 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6217 if (sb == NULL) 6218 return (ENOMEM); 6219 6220 t4_read_cong_tbl(sc, incr); 6221 6222 for (i = 0; i < NCCTRL_WIN; ++i) { 6223 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 6224 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 6225 incr[5][i], incr[6][i], incr[7][i]); 6226 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 6227 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 6228 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 6229 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 6230 } 6231 6232 rc = sbuf_finish(sb); 6233 sbuf_delete(sb); 6234 6235 return (rc); 6236 } 6237 6238 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 6239 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 6240 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 6241 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 6242 }; 6243 6244 static int 6245 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 6246 { 6247 struct adapter *sc = arg1; 6248 struct sbuf *sb; 6249 int rc, i, n, qid = arg2; 6250 uint32_t *buf, *p; 6251 char *qtype; 6252 u_int cim_num_obq = sc->chip_params->cim_num_obq; 6253 6254 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 6255 ("%s: bad qid %d\n", __func__, qid)); 6256 6257 if (qid < CIM_NUM_IBQ) { 6258 /* inbound queue */ 6259 qtype = "IBQ"; 6260 n = 4 * CIM_IBQ_SIZE; 6261 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 6262 rc = t4_read_cim_ibq(sc, qid, buf, n); 6263 } else { 6264 /* outbound queue */ 6265 qtype = "OBQ"; 6266 qid -= CIM_NUM_IBQ; 6267 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 6268 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 6269 rc = t4_read_cim_obq(sc, qid, buf, n); 6270 } 6271 6272 if (rc < 0) { 6273 rc = -rc; 6274 goto done; 6275 } 6276 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 6277 6278 rc = sysctl_wire_old_buffer(req, 0); 6279 if (rc != 0) 6280 goto done; 6281 6282 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6283 if (sb == NULL) { 6284 rc = ENOMEM; 6285 goto done; 6286 } 6287 6288 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 6289 for (i = 0, p = buf; i < n; i += 16, p += 4) 6290 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 6291 p[2], p[3]); 6292 6293 rc = sbuf_finish(sb); 6294 sbuf_delete(sb); 6295 done: 6296 free(buf, M_CXGBE); 6297 return (rc); 6298 } 6299 6300 static int 6301 sysctl_cim_la(SYSCTL_HANDLER_ARGS) 6302 { 6303 struct adapter *sc = arg1; 6304 u_int cfg; 6305 struct sbuf *sb; 6306 uint32_t *buf, *p; 6307 int rc; 6308 6309 MPASS(chip_id(sc) <= CHELSIO_T5); 6310 6311 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 6312 if (rc != 0) 6313 return (rc); 6314 6315 rc = sysctl_wire_old_buffer(req, 0); 6316 if (rc != 0) 6317 return (rc); 6318 6319 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6320 if (sb == NULL) 6321 return (ENOMEM); 6322 6323 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 6324 M_ZERO | M_WAITOK); 6325 6326 rc = -t4_cim_read_la(sc, buf, NULL); 6327 if (rc != 0) 6328 goto done; 6329 6330 sbuf_printf(sb, "Status Data PC%s", 6331 cfg & F_UPDBGLACAPTPCONLY ? "" : 6332 " LS0Stat LS0Addr LS0Data"); 6333 6334 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 6335 if (cfg & F_UPDBGLACAPTPCONLY) { 6336 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 6337 p[6], p[7]); 6338 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 6339 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 6340 p[4] & 0xff, p[5] >> 8); 6341 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 6342 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 6343 p[1] & 0xf, p[2] >> 4); 6344 } else { 6345 sbuf_printf(sb, 6346 "\n %02x %x%07x %x%07x %08x %08x " 6347 "%08x%08x%08x%08x", 6348 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 6349 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 6350 p[6], p[7]); 6351 } 6352 } 6353 6354 rc = sbuf_finish(sb); 6355 sbuf_delete(sb); 6356 done: 6357 free(buf, M_CXGBE); 6358 return (rc); 6359 } 6360 6361 static int 6362 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 6363 { 6364 struct adapter *sc = arg1; 6365 u_int cfg; 6366 struct sbuf *sb; 6367 uint32_t *buf, *p; 6368 int rc; 6369 6370 MPASS(chip_id(sc) > CHELSIO_T5); 6371 6372 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 6373 if (rc != 0) 6374 return (rc); 6375 6376 rc = sysctl_wire_old_buffer(req, 0); 6377 if (rc != 0) 6378 return (rc); 6379 6380 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6381 if (sb == NULL) 6382 return (ENOMEM); 6383 6384 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 6385 M_ZERO | M_WAITOK); 6386 6387 rc = -t4_cim_read_la(sc, buf, NULL); 6388 if (rc != 0) 6389 goto done; 6390 6391 sbuf_printf(sb, "Status Inst Data PC%s", 6392 cfg & F_UPDBGLACAPTPCONLY ? "" : 6393 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 6394 6395 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 6396 if (cfg & F_UPDBGLACAPTPCONLY) { 6397 sbuf_printf(sb, "\n %02x %08x %08x %08x", 6398 p[3] & 0xff, p[2], p[1], p[0]); 6399 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 6400 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 6401 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 6402 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 6403 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 6404 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 6405 p[6] >> 16); 6406 } else { 6407 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 6408 "%08x %08x %08x %08x %08x %08x", 6409 (p[9] >> 16) & 0xff, 6410 p[9] & 0xffff, p[8] >> 16, 6411 p[8] & 0xffff, p[7] >> 16, 6412 p[7] & 0xffff, p[6] >> 16, 6413 p[2], p[1], p[0], p[5], p[4], p[3]); 6414 } 6415 } 6416 6417 rc = sbuf_finish(sb); 6418 sbuf_delete(sb); 6419 done: 6420 free(buf, M_CXGBE); 6421 return (rc); 6422 } 6423 6424 static int 6425 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 6426 { 6427 struct adapter *sc = arg1; 6428 u_int i; 6429 struct sbuf *sb; 6430 uint32_t *buf, *p; 6431 int rc; 6432 6433 rc = sysctl_wire_old_buffer(req, 0); 6434 if (rc != 0) 6435 return (rc); 6436 6437 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6438 if (sb == NULL) 6439 return (ENOMEM); 6440 6441 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 6442 M_ZERO | M_WAITOK); 6443 6444 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 6445 p = buf; 6446 6447 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6448 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 6449 p[1], p[0]); 6450 } 6451 6452 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 6453 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6454 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 6455 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 6456 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 6457 (p[1] >> 2) | ((p[2] & 3) << 30), 6458 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 6459 p[0] & 1); 6460 } 6461 6462 rc = sbuf_finish(sb); 6463 sbuf_delete(sb); 6464 free(buf, M_CXGBE); 6465 return (rc); 6466 } 6467 6468 static int 6469 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 6470 { 6471 struct adapter *sc = arg1; 6472 u_int i; 6473 struct sbuf *sb; 6474 uint32_t *buf, *p; 6475 int rc; 6476 6477 rc = sysctl_wire_old_buffer(req, 0); 6478 if (rc != 0) 6479 return (rc); 6480 6481 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6482 if (sb == NULL) 6483 return (ENOMEM); 6484 6485 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 6486 M_ZERO | M_WAITOK); 6487 6488 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 6489 p = buf; 6490 6491 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 6492 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6493 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 6494 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 6495 p[4], p[3], p[2], p[1], p[0]); 6496 } 6497 6498 sbuf_printf(sb, "\n\nCntl ID Data"); 6499 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6500 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 6501 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 6502 } 6503 6504 rc = sbuf_finish(sb); 6505 sbuf_delete(sb); 6506 free(buf, M_CXGBE); 6507 return (rc); 6508 } 6509 6510 static int 6511 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 6512 { 6513 struct adapter *sc = arg1; 6514 struct sbuf *sb; 6515 int rc, i; 6516 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6517 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6518 uint16_t thres[CIM_NUM_IBQ]; 6519 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 6520 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 6521 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 6522 6523 cim_num_obq = sc->chip_params->cim_num_obq; 6524 if (is_t4(sc)) { 6525 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 6526 obq_rdaddr = A_UP_OBQ_0_REALADDR; 6527 } else { 6528 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 6529 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 6530 } 6531 nq = CIM_NUM_IBQ + cim_num_obq; 6532 6533 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 6534 if (rc == 0) 6535 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 6536 if (rc != 0) 6537 return (rc); 6538 6539 t4_read_cimq_cfg(sc, base, size, thres); 6540 6541 rc = sysctl_wire_old_buffer(req, 0); 6542 if (rc != 0) 6543 return (rc); 6544 6545 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6546 if (sb == NULL) 6547 return (ENOMEM); 6548 6549 sbuf_printf(sb, 6550 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 6551 6552 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 6553 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 6554 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 6555 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6556 G_QUEREMFLITS(p[2]) * 16); 6557 for ( ; i < nq; i++, p += 4, wr += 2) 6558 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 6559 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 6560 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6561 G_QUEREMFLITS(p[2]) * 16); 6562 6563 rc = sbuf_finish(sb); 6564 sbuf_delete(sb); 6565 6566 return (rc); 6567 } 6568 6569 static int 6570 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 6571 { 6572 struct adapter *sc = arg1; 6573 struct sbuf *sb; 6574 int rc; 6575 struct tp_cpl_stats stats; 6576 6577 rc = sysctl_wire_old_buffer(req, 0); 6578 if (rc != 0) 6579 return (rc); 6580 6581 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6582 if (sb == NULL) 6583 return (ENOMEM); 6584 6585 mtx_lock(&sc->reg_lock); 6586 t4_tp_get_cpl_stats(sc, &stats, 0); 6587 mtx_unlock(&sc->reg_lock); 6588 6589 if (sc->chip_params->nchan > 2) { 6590 sbuf_printf(sb, " channel 0 channel 1" 6591 " channel 2 channel 3"); 6592 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 6593 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 6594 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 6595 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 6596 } else { 6597 sbuf_printf(sb, " channel 0 channel 1"); 6598 sbuf_printf(sb, "\nCPL requests: %10u %10u", 6599 stats.req[0], stats.req[1]); 6600 sbuf_printf(sb, "\nCPL responses: %10u %10u", 6601 stats.rsp[0], stats.rsp[1]); 6602 } 6603 6604 rc = sbuf_finish(sb); 6605 sbuf_delete(sb); 6606 6607 return (rc); 6608 } 6609 6610 static int 6611 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 6612 { 6613 struct adapter *sc = arg1; 6614 struct sbuf *sb; 6615 int rc; 6616 struct tp_usm_stats stats; 6617 6618 rc = sysctl_wire_old_buffer(req, 0); 6619 if (rc != 0) 6620 return(rc); 6621 6622 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6623 if (sb == NULL) 6624 return (ENOMEM); 6625 6626 t4_get_usm_stats(sc, &stats, 1); 6627 6628 sbuf_printf(sb, "Frames: %u\n", stats.frames); 6629 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 6630 sbuf_printf(sb, "Drops: %u", stats.drops); 6631 6632 rc = sbuf_finish(sb); 6633 sbuf_delete(sb); 6634 6635 return (rc); 6636 } 6637 6638 static const char * const devlog_level_strings[] = { 6639 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 6640 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 6641 [FW_DEVLOG_LEVEL_ERR] = "ERR", 6642 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 6643 [FW_DEVLOG_LEVEL_INFO] = "INFO", 6644 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 6645 }; 6646 6647 static const char * const devlog_facility_strings[] = { 6648 [FW_DEVLOG_FACILITY_CORE] = "CORE", 6649 [FW_DEVLOG_FACILITY_CF] = "CF", 6650 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 6651 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 6652 [FW_DEVLOG_FACILITY_RES] = "RES", 6653 [FW_DEVLOG_FACILITY_HW] = "HW", 6654 [FW_DEVLOG_FACILITY_FLR] = "FLR", 6655 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 6656 [FW_DEVLOG_FACILITY_PHY] = "PHY", 6657 [FW_DEVLOG_FACILITY_MAC] = "MAC", 6658 [FW_DEVLOG_FACILITY_PORT] = "PORT", 6659 [FW_DEVLOG_FACILITY_VI] = "VI", 6660 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 6661 [FW_DEVLOG_FACILITY_ACL] = "ACL", 6662 [FW_DEVLOG_FACILITY_TM] = "TM", 6663 [FW_DEVLOG_FACILITY_QFC] = "QFC", 6664 [FW_DEVLOG_FACILITY_DCB] = "DCB", 6665 [FW_DEVLOG_FACILITY_ETH] = "ETH", 6666 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 6667 [FW_DEVLOG_FACILITY_RI] = "RI", 6668 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 6669 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 6670 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 6671 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 6672 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 6673 }; 6674 6675 static int 6676 sysctl_devlog(SYSCTL_HANDLER_ARGS) 6677 { 6678 struct adapter *sc = arg1; 6679 struct devlog_params *dparams = &sc->params.devlog; 6680 struct fw_devlog_e *buf, *e; 6681 int i, j, rc, nentries, first = 0; 6682 struct sbuf *sb; 6683 uint64_t ftstamp = UINT64_MAX; 6684 6685 if (dparams->addr == 0) 6686 return (ENXIO); 6687 6688 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 6689 if (buf == NULL) 6690 return (ENOMEM); 6691 6692 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 6693 if (rc != 0) 6694 goto done; 6695 6696 nentries = dparams->size / sizeof(struct fw_devlog_e); 6697 for (i = 0; i < nentries; i++) { 6698 e = &buf[i]; 6699 6700 if (e->timestamp == 0) 6701 break; /* end */ 6702 6703 e->timestamp = be64toh(e->timestamp); 6704 e->seqno = be32toh(e->seqno); 6705 for (j = 0; j < 8; j++) 6706 e->params[j] = be32toh(e->params[j]); 6707 6708 if (e->timestamp < ftstamp) { 6709 ftstamp = e->timestamp; 6710 first = i; 6711 } 6712 } 6713 6714 if (buf[first].timestamp == 0) 6715 goto done; /* nothing in the log */ 6716 6717 rc = sysctl_wire_old_buffer(req, 0); 6718 if (rc != 0) 6719 goto done; 6720 6721 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6722 if (sb == NULL) { 6723 rc = ENOMEM; 6724 goto done; 6725 } 6726 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 6727 "Seq#", "Tstamp", "Level", "Facility", "Message"); 6728 6729 i = first; 6730 do { 6731 e = &buf[i]; 6732 if (e->timestamp == 0) 6733 break; /* end */ 6734 6735 sbuf_printf(sb, "%10d %15ju %8s %8s ", 6736 e->seqno, e->timestamp, 6737 (e->level < nitems(devlog_level_strings) ? 6738 devlog_level_strings[e->level] : "UNKNOWN"), 6739 (e->facility < nitems(devlog_facility_strings) ? 6740 devlog_facility_strings[e->facility] : "UNKNOWN")); 6741 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 6742 e->params[2], e->params[3], e->params[4], 6743 e->params[5], e->params[6], e->params[7]); 6744 6745 if (++i == nentries) 6746 i = 0; 6747 } while (i != first); 6748 6749 rc = sbuf_finish(sb); 6750 sbuf_delete(sb); 6751 done: 6752 free(buf, M_CXGBE); 6753 return (rc); 6754 } 6755 6756 static int 6757 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6758 { 6759 struct adapter *sc = arg1; 6760 struct sbuf *sb; 6761 int rc; 6762 struct tp_fcoe_stats stats[MAX_NCHAN]; 6763 int i, nchan = sc->chip_params->nchan; 6764 6765 rc = sysctl_wire_old_buffer(req, 0); 6766 if (rc != 0) 6767 return (rc); 6768 6769 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6770 if (sb == NULL) 6771 return (ENOMEM); 6772 6773 for (i = 0; i < nchan; i++) 6774 t4_get_fcoe_stats(sc, i, &stats[i], 1); 6775 6776 if (nchan > 2) { 6777 sbuf_printf(sb, " channel 0 channel 1" 6778 " channel 2 channel 3"); 6779 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6780 stats[0].octets_ddp, stats[1].octets_ddp, 6781 stats[2].octets_ddp, stats[3].octets_ddp); 6782 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6783 stats[0].frames_ddp, stats[1].frames_ddp, 6784 stats[2].frames_ddp, stats[3].frames_ddp); 6785 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6786 stats[0].frames_drop, stats[1].frames_drop, 6787 stats[2].frames_drop, stats[3].frames_drop); 6788 } else { 6789 sbuf_printf(sb, " channel 0 channel 1"); 6790 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6791 stats[0].octets_ddp, stats[1].octets_ddp); 6792 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6793 stats[0].frames_ddp, stats[1].frames_ddp); 6794 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6795 stats[0].frames_drop, stats[1].frames_drop); 6796 } 6797 6798 rc = sbuf_finish(sb); 6799 sbuf_delete(sb); 6800 6801 return (rc); 6802 } 6803 6804 static int 6805 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6806 { 6807 struct adapter *sc = arg1; 6808 struct sbuf *sb; 6809 int rc, i; 6810 unsigned int map, kbps, ipg, mode; 6811 unsigned int pace_tab[NTX_SCHED]; 6812 6813 rc = sysctl_wire_old_buffer(req, 0); 6814 if (rc != 0) 6815 return (rc); 6816 6817 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6818 if (sb == NULL) 6819 return (ENOMEM); 6820 6821 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6822 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6823 t4_read_pace_tbl(sc, pace_tab); 6824 6825 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6826 "Class IPG (0.1 ns) Flow IPG (us)"); 6827 6828 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6829 t4_get_tx_sched(sc, i, &kbps, &ipg, 1); 6830 sbuf_printf(sb, "\n %u %-5s %u ", i, 6831 (mode & (1 << i)) ? "flow" : "class", map & 3); 6832 if (kbps) 6833 sbuf_printf(sb, "%9u ", kbps); 6834 else 6835 sbuf_printf(sb, " disabled "); 6836 6837 if (ipg) 6838 sbuf_printf(sb, "%13u ", ipg); 6839 else 6840 sbuf_printf(sb, " disabled "); 6841 6842 if (pace_tab[i]) 6843 sbuf_printf(sb, "%10u", pace_tab[i]); 6844 else 6845 sbuf_printf(sb, " disabled"); 6846 } 6847 6848 rc = sbuf_finish(sb); 6849 sbuf_delete(sb); 6850 6851 return (rc); 6852 } 6853 6854 static int 6855 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6856 { 6857 struct adapter *sc = arg1; 6858 struct sbuf *sb; 6859 int rc, i, j; 6860 uint64_t *p0, *p1; 6861 struct lb_port_stats s[2]; 6862 static const char *stat_name[] = { 6863 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6864 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6865 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6866 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6867 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6868 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6869 "BG2FramesTrunc:", "BG3FramesTrunc:" 6870 }; 6871 6872 rc = sysctl_wire_old_buffer(req, 0); 6873 if (rc != 0) 6874 return (rc); 6875 6876 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6877 if (sb == NULL) 6878 return (ENOMEM); 6879 6880 memset(s, 0, sizeof(s)); 6881 6882 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6883 t4_get_lb_stats(sc, i, &s[0]); 6884 t4_get_lb_stats(sc, i + 1, &s[1]); 6885 6886 p0 = &s[0].octets; 6887 p1 = &s[1].octets; 6888 sbuf_printf(sb, "%s Loopback %u" 6889 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6890 6891 for (j = 0; j < nitems(stat_name); j++) 6892 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6893 *p0++, *p1++); 6894 } 6895 6896 rc = sbuf_finish(sb); 6897 sbuf_delete(sb); 6898 6899 return (rc); 6900 } 6901 6902 static int 6903 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6904 { 6905 int rc = 0; 6906 struct port_info *pi = arg1; 6907 struct link_config *lc = &pi->link_cfg; 6908 struct sbuf *sb; 6909 6910 rc = sysctl_wire_old_buffer(req, 0); 6911 if (rc != 0) 6912 return(rc); 6913 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6914 if (sb == NULL) 6915 return (ENOMEM); 6916 6917 if (lc->link_ok || lc->link_down_rc == 255) 6918 sbuf_printf(sb, "n/a"); 6919 else 6920 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); 6921 6922 rc = sbuf_finish(sb); 6923 sbuf_delete(sb); 6924 6925 return (rc); 6926 } 6927 6928 struct mem_desc { 6929 unsigned int base; 6930 unsigned int limit; 6931 unsigned int idx; 6932 }; 6933 6934 static int 6935 mem_desc_cmp(const void *a, const void *b) 6936 { 6937 return ((const struct mem_desc *)a)->base - 6938 ((const struct mem_desc *)b)->base; 6939 } 6940 6941 static void 6942 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6943 unsigned int to) 6944 { 6945 unsigned int size; 6946 6947 if (from == to) 6948 return; 6949 6950 size = to - from + 1; 6951 if (size == 0) 6952 return; 6953 6954 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6955 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6956 } 6957 6958 static int 6959 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6960 { 6961 struct adapter *sc = arg1; 6962 struct sbuf *sb; 6963 int rc, i, n; 6964 uint32_t lo, hi, used, alloc; 6965 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6966 static const char *region[] = { 6967 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6968 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6969 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6970 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6971 "RQUDP region:", "PBL region:", "TXPBL region:", 6972 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6973 "On-chip queues:" 6974 }; 6975 struct mem_desc avail[4]; 6976 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6977 struct mem_desc *md = mem; 6978 6979 rc = sysctl_wire_old_buffer(req, 0); 6980 if (rc != 0) 6981 return (rc); 6982 6983 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6984 if (sb == NULL) 6985 return (ENOMEM); 6986 6987 for (i = 0; i < nitems(mem); i++) { 6988 mem[i].limit = 0; 6989 mem[i].idx = i; 6990 } 6991 6992 /* Find and sort the populated memory ranges */ 6993 i = 0; 6994 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6995 if (lo & F_EDRAM0_ENABLE) { 6996 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6997 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6998 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6999 avail[i].idx = 0; 7000 i++; 7001 } 7002 if (lo & F_EDRAM1_ENABLE) { 7003 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 7004 avail[i].base = G_EDRAM1_BASE(hi) << 20; 7005 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 7006 avail[i].idx = 1; 7007 i++; 7008 } 7009 if (lo & F_EXT_MEM_ENABLE) { 7010 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 7011 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 7012 avail[i].limit = avail[i].base + 7013 (G_EXT_MEM_SIZE(hi) << 20); 7014 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 7015 i++; 7016 } 7017 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 7018 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 7019 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 7020 avail[i].limit = avail[i].base + 7021 (G_EXT_MEM1_SIZE(hi) << 20); 7022 avail[i].idx = 4; 7023 i++; 7024 } 7025 if (!i) /* no memory available */ 7026 return 0; 7027 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 7028 7029 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 7030 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 7031 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 7032 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 7033 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 7034 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 7035 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 7036 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 7037 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 7038 7039 /* the next few have explicit upper bounds */ 7040 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 7041 md->limit = md->base - 1 + 7042 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 7043 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 7044 md++; 7045 7046 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 7047 md->limit = md->base - 1 + 7048 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 7049 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 7050 md++; 7051 7052 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 7053 if (chip_id(sc) <= CHELSIO_T5) 7054 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 7055 else 7056 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 7057 md->limit = 0; 7058 } else { 7059 md->base = 0; 7060 md->idx = nitems(region); /* hide it */ 7061 } 7062 md++; 7063 7064 #define ulp_region(reg) \ 7065 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 7066 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 7067 7068 ulp_region(RX_ISCSI); 7069 ulp_region(RX_TDDP); 7070 ulp_region(TX_TPT); 7071 ulp_region(RX_STAG); 7072 ulp_region(RX_RQ); 7073 ulp_region(RX_RQUDP); 7074 ulp_region(RX_PBL); 7075 ulp_region(TX_PBL); 7076 #undef ulp_region 7077 7078 md->base = 0; 7079 md->idx = nitems(region); 7080 if (!is_t4(sc)) { 7081 uint32_t size = 0; 7082 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 7083 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 7084 7085 if (is_t5(sc)) { 7086 if (sge_ctrl & F_VFIFO_ENABLE) 7087 size = G_DBVFIFO_SIZE(fifo_size); 7088 } else 7089 size = G_T6_DBVFIFO_SIZE(fifo_size); 7090 7091 if (size) { 7092 md->base = G_BASEADDR(t4_read_reg(sc, 7093 A_SGE_DBVFIFO_BADDR)); 7094 md->limit = md->base + (size << 2) - 1; 7095 } 7096 } 7097 md++; 7098 7099 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 7100 md->limit = 0; 7101 md++; 7102 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 7103 md->limit = 0; 7104 md++; 7105 7106 md->base = sc->vres.ocq.start; 7107 if (sc->vres.ocq.size) 7108 md->limit = md->base + sc->vres.ocq.size - 1; 7109 else 7110 md->idx = nitems(region); /* hide it */ 7111 md++; 7112 7113 /* add any address-space holes, there can be up to 3 */ 7114 for (n = 0; n < i - 1; n++) 7115 if (avail[n].limit < avail[n + 1].base) 7116 (md++)->base = avail[n].limit; 7117 if (avail[n].limit) 7118 (md++)->base = avail[n].limit; 7119 7120 n = md - mem; 7121 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 7122 7123 for (lo = 0; lo < i; lo++) 7124 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 7125 avail[lo].limit - 1); 7126 7127 sbuf_printf(sb, "\n"); 7128 for (i = 0; i < n; i++) { 7129 if (mem[i].idx >= nitems(region)) 7130 continue; /* skip holes */ 7131 if (!mem[i].limit) 7132 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 7133 mem_region_show(sb, region[mem[i].idx], mem[i].base, 7134 mem[i].limit); 7135 } 7136 7137 sbuf_printf(sb, "\n"); 7138 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 7139 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 7140 mem_region_show(sb, "uP RAM:", lo, hi); 7141 7142 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 7143 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 7144 mem_region_show(sb, "uP Extmem2:", lo, hi); 7145 7146 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 7147 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 7148 G_PMRXMAXPAGE(lo), 7149 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 7150 (lo & F_PMRXNUMCHN) ? 2 : 1); 7151 7152 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 7153 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 7154 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 7155 G_PMTXMAXPAGE(lo), 7156 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 7157 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 7158 sbuf_printf(sb, "%u p-structs\n", 7159 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 7160 7161 for (i = 0; i < 4; i++) { 7162 if (chip_id(sc) > CHELSIO_T5) 7163 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 7164 else 7165 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 7166 if (is_t5(sc)) { 7167 used = G_T5_USED(lo); 7168 alloc = G_T5_ALLOC(lo); 7169 } else { 7170 used = G_USED(lo); 7171 alloc = G_ALLOC(lo); 7172 } 7173 /* For T6 these are MAC buffer groups */ 7174 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 7175 i, used, alloc); 7176 } 7177 for (i = 0; i < sc->chip_params->nchan; i++) { 7178 if (chip_id(sc) > CHELSIO_T5) 7179 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 7180 else 7181 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 7182 if (is_t5(sc)) { 7183 used = G_T5_USED(lo); 7184 alloc = G_T5_ALLOC(lo); 7185 } else { 7186 used = G_USED(lo); 7187 alloc = G_ALLOC(lo); 7188 } 7189 /* For T6 these are MAC buffer groups */ 7190 sbuf_printf(sb, 7191 "\nLoopback %d using %u pages out of %u allocated", 7192 i, used, alloc); 7193 } 7194 7195 rc = sbuf_finish(sb); 7196 sbuf_delete(sb); 7197 7198 return (rc); 7199 } 7200 7201 static inline void 7202 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 7203 { 7204 *mask = x | y; 7205 y = htobe64(y); 7206 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 7207 } 7208 7209 static int 7210 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 7211 { 7212 struct adapter *sc = arg1; 7213 struct sbuf *sb; 7214 int rc, i; 7215 7216 MPASS(chip_id(sc) <= CHELSIO_T5); 7217 7218 rc = sysctl_wire_old_buffer(req, 0); 7219 if (rc != 0) 7220 return (rc); 7221 7222 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7223 if (sb == NULL) 7224 return (ENOMEM); 7225 7226 sbuf_printf(sb, 7227 "Idx Ethernet address Mask Vld Ports PF" 7228 " VF Replication P0 P1 P2 P3 ML"); 7229 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 7230 uint64_t tcamx, tcamy, mask; 7231 uint32_t cls_lo, cls_hi; 7232 uint8_t addr[ETHER_ADDR_LEN]; 7233 7234 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 7235 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 7236 if (tcamx & tcamy) 7237 continue; 7238 tcamxy2valmask(tcamx, tcamy, addr, &mask); 7239 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 7240 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 7241 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 7242 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 7243 addr[3], addr[4], addr[5], (uintmax_t)mask, 7244 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 7245 G_PORTMAP(cls_hi), G_PF(cls_lo), 7246 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 7247 7248 if (cls_lo & F_REPLICATE) { 7249 struct fw_ldst_cmd ldst_cmd; 7250 7251 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 7252 ldst_cmd.op_to_addrspace = 7253 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 7254 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7255 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 7256 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 7257 ldst_cmd.u.mps.rplc.fid_idx = 7258 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 7259 V_FW_LDST_CMD_IDX(i)); 7260 7261 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 7262 "t4mps"); 7263 if (rc) 7264 break; 7265 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 7266 sizeof(ldst_cmd), &ldst_cmd); 7267 end_synchronized_op(sc, 0); 7268 7269 if (rc != 0) { 7270 sbuf_printf(sb, "%36d", rc); 7271 rc = 0; 7272 } else { 7273 sbuf_printf(sb, " %08x %08x %08x %08x", 7274 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7275 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7276 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7277 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7278 } 7279 } else 7280 sbuf_printf(sb, "%36s", ""); 7281 7282 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 7283 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 7284 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 7285 } 7286 7287 if (rc) 7288 (void) sbuf_finish(sb); 7289 else 7290 rc = sbuf_finish(sb); 7291 sbuf_delete(sb); 7292 7293 return (rc); 7294 } 7295 7296 static int 7297 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 7298 { 7299 struct adapter *sc = arg1; 7300 struct sbuf *sb; 7301 int rc, i; 7302 7303 MPASS(chip_id(sc) > CHELSIO_T5); 7304 7305 rc = sysctl_wire_old_buffer(req, 0); 7306 if (rc != 0) 7307 return (rc); 7308 7309 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7310 if (sb == NULL) 7311 return (ENOMEM); 7312 7313 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 7314 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 7315 " Replication" 7316 " P0 P1 P2 P3 ML\n"); 7317 7318 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 7319 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 7320 uint16_t ivlan; 7321 uint64_t tcamx, tcamy, val, mask; 7322 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 7323 uint8_t addr[ETHER_ADDR_LEN]; 7324 7325 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 7326 if (i < 256) 7327 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 7328 else 7329 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 7330 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 7331 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 7332 tcamy = G_DMACH(val) << 32; 7333 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 7334 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 7335 lookup_type = G_DATALKPTYPE(data2); 7336 port_num = G_DATAPORTNUM(data2); 7337 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7338 /* Inner header VNI */ 7339 vniy = ((data2 & F_DATAVIDH2) << 23) | 7340 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 7341 dip_hit = data2 & F_DATADIPHIT; 7342 vlan_vld = 0; 7343 } else { 7344 vniy = 0; 7345 dip_hit = 0; 7346 vlan_vld = data2 & F_DATAVIDH2; 7347 ivlan = G_VIDL(val); 7348 } 7349 7350 ctl |= V_CTLXYBITSEL(1); 7351 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 7352 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 7353 tcamx = G_DMACH(val) << 32; 7354 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 7355 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 7356 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7357 /* Inner header VNI mask */ 7358 vnix = ((data2 & F_DATAVIDH2) << 23) | 7359 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 7360 } else 7361 vnix = 0; 7362 7363 if (tcamx & tcamy) 7364 continue; 7365 tcamxy2valmask(tcamx, tcamy, addr, &mask); 7366 7367 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 7368 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 7369 7370 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7371 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 7372 "%012jx %06x %06x - - %3c" 7373 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 7374 addr[1], addr[2], addr[3], addr[4], addr[5], 7375 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 7376 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 7377 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 7378 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 7379 } else { 7380 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 7381 "%012jx - - ", i, addr[0], addr[1], 7382 addr[2], addr[3], addr[4], addr[5], 7383 (uintmax_t)mask); 7384 7385 if (vlan_vld) 7386 sbuf_printf(sb, "%4u Y ", ivlan); 7387 else 7388 sbuf_printf(sb, " - N "); 7389 7390 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 7391 lookup_type ? 'I' : 'O', port_num, 7392 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 7393 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 7394 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 7395 } 7396 7397 7398 if (cls_lo & F_T6_REPLICATE) { 7399 struct fw_ldst_cmd ldst_cmd; 7400 7401 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 7402 ldst_cmd.op_to_addrspace = 7403 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 7404 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7405 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 7406 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 7407 ldst_cmd.u.mps.rplc.fid_idx = 7408 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 7409 V_FW_LDST_CMD_IDX(i)); 7410 7411 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 7412 "t6mps"); 7413 if (rc) 7414 break; 7415 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 7416 sizeof(ldst_cmd), &ldst_cmd); 7417 end_synchronized_op(sc, 0); 7418 7419 if (rc != 0) { 7420 sbuf_printf(sb, "%72d", rc); 7421 rc = 0; 7422 } else { 7423 sbuf_printf(sb, " %08x %08x %08x %08x" 7424 " %08x %08x %08x %08x", 7425 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 7426 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 7427 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 7428 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 7429 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7430 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7431 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7432 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7433 } 7434 } else 7435 sbuf_printf(sb, "%72s", ""); 7436 7437 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 7438 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 7439 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 7440 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 7441 } 7442 7443 if (rc) 7444 (void) sbuf_finish(sb); 7445 else 7446 rc = sbuf_finish(sb); 7447 sbuf_delete(sb); 7448 7449 return (rc); 7450 } 7451 7452 static int 7453 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 7454 { 7455 struct adapter *sc = arg1; 7456 struct sbuf *sb; 7457 int rc; 7458 uint16_t mtus[NMTUS]; 7459 7460 rc = sysctl_wire_old_buffer(req, 0); 7461 if (rc != 0) 7462 return (rc); 7463 7464 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7465 if (sb == NULL) 7466 return (ENOMEM); 7467 7468 t4_read_mtu_tbl(sc, mtus, NULL); 7469 7470 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 7471 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 7472 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 7473 mtus[14], mtus[15]); 7474 7475 rc = sbuf_finish(sb); 7476 sbuf_delete(sb); 7477 7478 return (rc); 7479 } 7480 7481 static int 7482 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 7483 { 7484 struct adapter *sc = arg1; 7485 struct sbuf *sb; 7486 int rc, i; 7487 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 7488 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 7489 static const char *tx_stats[MAX_PM_NSTATS] = { 7490 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 7491 "Tx FIFO wait", NULL, "Tx latency" 7492 }; 7493 static const char *rx_stats[MAX_PM_NSTATS] = { 7494 "Read:", "Write bypass:", "Write mem:", "Flush:", 7495 "Rx FIFO wait", NULL, "Rx latency" 7496 }; 7497 7498 rc = sysctl_wire_old_buffer(req, 0); 7499 if (rc != 0) 7500 return (rc); 7501 7502 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7503 if (sb == NULL) 7504 return (ENOMEM); 7505 7506 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 7507 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 7508 7509 sbuf_printf(sb, " Tx pcmds Tx bytes"); 7510 for (i = 0; i < 4; i++) { 7511 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7512 tx_cyc[i]); 7513 } 7514 7515 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 7516 for (i = 0; i < 4; i++) { 7517 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7518 rx_cyc[i]); 7519 } 7520 7521 if (chip_id(sc) > CHELSIO_T5) { 7522 sbuf_printf(sb, 7523 "\n Total wait Total occupancy"); 7524 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7525 tx_cyc[i]); 7526 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7527 rx_cyc[i]); 7528 7529 i += 2; 7530 MPASS(i < nitems(tx_stats)); 7531 7532 sbuf_printf(sb, 7533 "\n Reads Total wait"); 7534 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7535 tx_cyc[i]); 7536 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7537 rx_cyc[i]); 7538 } 7539 7540 rc = sbuf_finish(sb); 7541 sbuf_delete(sb); 7542 7543 return (rc); 7544 } 7545 7546 static int 7547 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 7548 { 7549 struct adapter *sc = arg1; 7550 struct sbuf *sb; 7551 int rc; 7552 struct tp_rdma_stats stats; 7553 7554 rc = sysctl_wire_old_buffer(req, 0); 7555 if (rc != 0) 7556 return (rc); 7557 7558 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7559 if (sb == NULL) 7560 return (ENOMEM); 7561 7562 mtx_lock(&sc->reg_lock); 7563 t4_tp_get_rdma_stats(sc, &stats, 0); 7564 mtx_unlock(&sc->reg_lock); 7565 7566 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 7567 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 7568 7569 rc = sbuf_finish(sb); 7570 sbuf_delete(sb); 7571 7572 return (rc); 7573 } 7574 7575 static int 7576 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 7577 { 7578 struct adapter *sc = arg1; 7579 struct sbuf *sb; 7580 int rc; 7581 struct tp_tcp_stats v4, v6; 7582 7583 rc = sysctl_wire_old_buffer(req, 0); 7584 if (rc != 0) 7585 return (rc); 7586 7587 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7588 if (sb == NULL) 7589 return (ENOMEM); 7590 7591 mtx_lock(&sc->reg_lock); 7592 t4_tp_get_tcp_stats(sc, &v4, &v6, 0); 7593 mtx_unlock(&sc->reg_lock); 7594 7595 sbuf_printf(sb, 7596 " IP IPv6\n"); 7597 sbuf_printf(sb, "OutRsts: %20u %20u\n", 7598 v4.tcp_out_rsts, v6.tcp_out_rsts); 7599 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 7600 v4.tcp_in_segs, v6.tcp_in_segs); 7601 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 7602 v4.tcp_out_segs, v6.tcp_out_segs); 7603 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 7604 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 7605 7606 rc = sbuf_finish(sb); 7607 sbuf_delete(sb); 7608 7609 return (rc); 7610 } 7611 7612 static int 7613 sysctl_tids(SYSCTL_HANDLER_ARGS) 7614 { 7615 struct adapter *sc = arg1; 7616 struct sbuf *sb; 7617 int rc; 7618 struct tid_info *t = &sc->tids; 7619 7620 rc = sysctl_wire_old_buffer(req, 0); 7621 if (rc != 0) 7622 return (rc); 7623 7624 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7625 if (sb == NULL) 7626 return (ENOMEM); 7627 7628 if (t->natids) { 7629 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 7630 t->atids_in_use); 7631 } 7632 7633 if (t->ntids) { 7634 sbuf_printf(sb, "TID range: "); 7635 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 7636 uint32_t b, hb; 7637 7638 if (chip_id(sc) <= CHELSIO_T5) { 7639 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 7640 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 7641 } else { 7642 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); 7643 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); 7644 } 7645 7646 if (b) 7647 sbuf_printf(sb, "0-%u, ", b - 1); 7648 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1); 7649 } else 7650 sbuf_printf(sb, "0-%u", t->ntids - 1); 7651 sbuf_printf(sb, ", in use: %u\n", 7652 atomic_load_acq_int(&t->tids_in_use)); 7653 } 7654 7655 if (t->nstids) { 7656 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 7657 t->stid_base + t->nstids - 1, t->stids_in_use); 7658 } 7659 7660 if (t->nftids) { 7661 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 7662 t->ftid_base + t->nftids - 1); 7663 } 7664 7665 if (t->netids) { 7666 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 7667 t->etid_base + t->netids - 1); 7668 } 7669 7670 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 7671 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 7672 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 7673 7674 rc = sbuf_finish(sb); 7675 sbuf_delete(sb); 7676 7677 return (rc); 7678 } 7679 7680 static int 7681 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 7682 { 7683 struct adapter *sc = arg1; 7684 struct sbuf *sb; 7685 int rc; 7686 struct tp_err_stats stats; 7687 7688 rc = sysctl_wire_old_buffer(req, 0); 7689 if (rc != 0) 7690 return (rc); 7691 7692 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7693 if (sb == NULL) 7694 return (ENOMEM); 7695 7696 mtx_lock(&sc->reg_lock); 7697 t4_tp_get_err_stats(sc, &stats, 0); 7698 mtx_unlock(&sc->reg_lock); 7699 7700 if (sc->chip_params->nchan > 2) { 7701 sbuf_printf(sb, " channel 0 channel 1" 7702 " channel 2 channel 3\n"); 7703 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 7704 stats.mac_in_errs[0], stats.mac_in_errs[1], 7705 stats.mac_in_errs[2], stats.mac_in_errs[3]); 7706 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 7707 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 7708 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 7709 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 7710 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 7711 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 7712 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 7713 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 7714 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 7715 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 7716 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 7717 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 7718 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 7719 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 7720 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 7721 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 7722 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 7723 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 7724 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 7725 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 7726 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 7727 } else { 7728 sbuf_printf(sb, " channel 0 channel 1\n"); 7729 sbuf_printf(sb, "macInErrs: %10u %10u\n", 7730 stats.mac_in_errs[0], stats.mac_in_errs[1]); 7731 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 7732 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 7733 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 7734 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 7735 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 7736 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 7737 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 7738 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 7739 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 7740 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 7741 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 7742 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 7743 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 7744 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 7745 } 7746 7747 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 7748 stats.ofld_no_neigh, stats.ofld_cong_defer); 7749 7750 rc = sbuf_finish(sb); 7751 sbuf_delete(sb); 7752 7753 return (rc); 7754 } 7755 7756 static int 7757 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 7758 { 7759 struct adapter *sc = arg1; 7760 struct tp_params *tpp = &sc->params.tp; 7761 u_int mask; 7762 int rc; 7763 7764 mask = tpp->la_mask >> 16; 7765 rc = sysctl_handle_int(oidp, &mask, 0, req); 7766 if (rc != 0 || req->newptr == NULL) 7767 return (rc); 7768 if (mask > 0xffff) 7769 return (EINVAL); 7770 tpp->la_mask = mask << 16; 7771 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7772 7773 return (0); 7774 } 7775 7776 struct field_desc { 7777 const char *name; 7778 u_int start; 7779 u_int width; 7780 }; 7781 7782 static void 7783 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7784 { 7785 char buf[32]; 7786 int line_size = 0; 7787 7788 while (f->name) { 7789 uint64_t mask = (1ULL << f->width) - 1; 7790 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7791 ((uintmax_t)v >> f->start) & mask); 7792 7793 if (line_size + len >= 79) { 7794 line_size = 8; 7795 sbuf_printf(sb, "\n "); 7796 } 7797 sbuf_printf(sb, "%s ", buf); 7798 line_size += len + 1; 7799 f++; 7800 } 7801 sbuf_printf(sb, "\n"); 7802 } 7803 7804 static const struct field_desc tp_la0[] = { 7805 { "RcfOpCodeOut", 60, 4 }, 7806 { "State", 56, 4 }, 7807 { "WcfState", 52, 4 }, 7808 { "RcfOpcSrcOut", 50, 2 }, 7809 { "CRxError", 49, 1 }, 7810 { "ERxError", 48, 1 }, 7811 { "SanityFailed", 47, 1 }, 7812 { "SpuriousMsg", 46, 1 }, 7813 { "FlushInputMsg", 45, 1 }, 7814 { "FlushInputCpl", 44, 1 }, 7815 { "RssUpBit", 43, 1 }, 7816 { "RssFilterHit", 42, 1 }, 7817 { "Tid", 32, 10 }, 7818 { "InitTcb", 31, 1 }, 7819 { "LineNumber", 24, 7 }, 7820 { "Emsg", 23, 1 }, 7821 { "EdataOut", 22, 1 }, 7822 { "Cmsg", 21, 1 }, 7823 { "CdataOut", 20, 1 }, 7824 { "EreadPdu", 19, 1 }, 7825 { "CreadPdu", 18, 1 }, 7826 { "TunnelPkt", 17, 1 }, 7827 { "RcfPeerFin", 16, 1 }, 7828 { "RcfReasonOut", 12, 4 }, 7829 { "TxCchannel", 10, 2 }, 7830 { "RcfTxChannel", 8, 2 }, 7831 { "RxEchannel", 6, 2 }, 7832 { "RcfRxChannel", 5, 1 }, 7833 { "RcfDataOutSrdy", 4, 1 }, 7834 { "RxDvld", 3, 1 }, 7835 { "RxOoDvld", 2, 1 }, 7836 { "RxCongestion", 1, 1 }, 7837 { "TxCongestion", 0, 1 }, 7838 { NULL } 7839 }; 7840 7841 static const struct field_desc tp_la1[] = { 7842 { "CplCmdIn", 56, 8 }, 7843 { "CplCmdOut", 48, 8 }, 7844 { "ESynOut", 47, 1 }, 7845 { "EAckOut", 46, 1 }, 7846 { "EFinOut", 45, 1 }, 7847 { "ERstOut", 44, 1 }, 7848 { "SynIn", 43, 1 }, 7849 { "AckIn", 42, 1 }, 7850 { "FinIn", 41, 1 }, 7851 { "RstIn", 40, 1 }, 7852 { "DataIn", 39, 1 }, 7853 { "DataInVld", 38, 1 }, 7854 { "PadIn", 37, 1 }, 7855 { "RxBufEmpty", 36, 1 }, 7856 { "RxDdp", 35, 1 }, 7857 { "RxFbCongestion", 34, 1 }, 7858 { "TxFbCongestion", 33, 1 }, 7859 { "TxPktSumSrdy", 32, 1 }, 7860 { "RcfUlpType", 28, 4 }, 7861 { "Eread", 27, 1 }, 7862 { "Ebypass", 26, 1 }, 7863 { "Esave", 25, 1 }, 7864 { "Static0", 24, 1 }, 7865 { "Cread", 23, 1 }, 7866 { "Cbypass", 22, 1 }, 7867 { "Csave", 21, 1 }, 7868 { "CPktOut", 20, 1 }, 7869 { "RxPagePoolFull", 18, 2 }, 7870 { "RxLpbkPkt", 17, 1 }, 7871 { "TxLpbkPkt", 16, 1 }, 7872 { "RxVfValid", 15, 1 }, 7873 { "SynLearned", 14, 1 }, 7874 { "SetDelEntry", 13, 1 }, 7875 { "SetInvEntry", 12, 1 }, 7876 { "CpcmdDvld", 11, 1 }, 7877 { "CpcmdSave", 10, 1 }, 7878 { "RxPstructsFull", 8, 2 }, 7879 { "EpcmdDvld", 7, 1 }, 7880 { "EpcmdFlush", 6, 1 }, 7881 { "EpcmdTrimPrefix", 5, 1 }, 7882 { "EpcmdTrimPostfix", 4, 1 }, 7883 { "ERssIp4Pkt", 3, 1 }, 7884 { "ERssIp6Pkt", 2, 1 }, 7885 { "ERssTcpUdpPkt", 1, 1 }, 7886 { "ERssFceFipPkt", 0, 1 }, 7887 { NULL } 7888 }; 7889 7890 static const struct field_desc tp_la2[] = { 7891 { "CplCmdIn", 56, 8 }, 7892 { "MpsVfVld", 55, 1 }, 7893 { "MpsPf", 52, 3 }, 7894 { "MpsVf", 44, 8 }, 7895 { "SynIn", 43, 1 }, 7896 { "AckIn", 42, 1 }, 7897 { "FinIn", 41, 1 }, 7898 { "RstIn", 40, 1 }, 7899 { "DataIn", 39, 1 }, 7900 { "DataInVld", 38, 1 }, 7901 { "PadIn", 37, 1 }, 7902 { "RxBufEmpty", 36, 1 }, 7903 { "RxDdp", 35, 1 }, 7904 { "RxFbCongestion", 34, 1 }, 7905 { "TxFbCongestion", 33, 1 }, 7906 { "TxPktSumSrdy", 32, 1 }, 7907 { "RcfUlpType", 28, 4 }, 7908 { "Eread", 27, 1 }, 7909 { "Ebypass", 26, 1 }, 7910 { "Esave", 25, 1 }, 7911 { "Static0", 24, 1 }, 7912 { "Cread", 23, 1 }, 7913 { "Cbypass", 22, 1 }, 7914 { "Csave", 21, 1 }, 7915 { "CPktOut", 20, 1 }, 7916 { "RxPagePoolFull", 18, 2 }, 7917 { "RxLpbkPkt", 17, 1 }, 7918 { "TxLpbkPkt", 16, 1 }, 7919 { "RxVfValid", 15, 1 }, 7920 { "SynLearned", 14, 1 }, 7921 { "SetDelEntry", 13, 1 }, 7922 { "SetInvEntry", 12, 1 }, 7923 { "CpcmdDvld", 11, 1 }, 7924 { "CpcmdSave", 10, 1 }, 7925 { "RxPstructsFull", 8, 2 }, 7926 { "EpcmdDvld", 7, 1 }, 7927 { "EpcmdFlush", 6, 1 }, 7928 { "EpcmdTrimPrefix", 5, 1 }, 7929 { "EpcmdTrimPostfix", 4, 1 }, 7930 { "ERssIp4Pkt", 3, 1 }, 7931 { "ERssIp6Pkt", 2, 1 }, 7932 { "ERssTcpUdpPkt", 1, 1 }, 7933 { "ERssFceFipPkt", 0, 1 }, 7934 { NULL } 7935 }; 7936 7937 static void 7938 tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7939 { 7940 7941 field_desc_show(sb, *p, tp_la0); 7942 } 7943 7944 static void 7945 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7946 { 7947 7948 if (idx) 7949 sbuf_printf(sb, "\n"); 7950 field_desc_show(sb, p[0], tp_la0); 7951 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7952 field_desc_show(sb, p[1], tp_la0); 7953 } 7954 7955 static void 7956 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7957 { 7958 7959 if (idx) 7960 sbuf_printf(sb, "\n"); 7961 field_desc_show(sb, p[0], tp_la0); 7962 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7963 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7964 } 7965 7966 static int 7967 sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7968 { 7969 struct adapter *sc = arg1; 7970 struct sbuf *sb; 7971 uint64_t *buf, *p; 7972 int rc; 7973 u_int i, inc; 7974 void (*show_func)(struct sbuf *, uint64_t *, int); 7975 7976 rc = sysctl_wire_old_buffer(req, 0); 7977 if (rc != 0) 7978 return (rc); 7979 7980 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7981 if (sb == NULL) 7982 return (ENOMEM); 7983 7984 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7985 7986 t4_tp_read_la(sc, buf, NULL); 7987 p = buf; 7988 7989 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7990 case 2: 7991 inc = 2; 7992 show_func = tp_la_show2; 7993 break; 7994 case 3: 7995 inc = 2; 7996 show_func = tp_la_show3; 7997 break; 7998 default: 7999 inc = 1; 8000 show_func = tp_la_show; 8001 } 8002 8003 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 8004 (*show_func)(sb, p, i); 8005 8006 rc = sbuf_finish(sb); 8007 sbuf_delete(sb); 8008 free(buf, M_CXGBE); 8009 return (rc); 8010 } 8011 8012 static int 8013 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 8014 { 8015 struct adapter *sc = arg1; 8016 struct sbuf *sb; 8017 int rc; 8018 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 8019 8020 rc = sysctl_wire_old_buffer(req, 0); 8021 if (rc != 0) 8022 return (rc); 8023 8024 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 8025 if (sb == NULL) 8026 return (ENOMEM); 8027 8028 t4_get_chan_txrate(sc, nrate, orate); 8029 8030 if (sc->chip_params->nchan > 2) { 8031 sbuf_printf(sb, " channel 0 channel 1" 8032 " channel 2 channel 3\n"); 8033 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 8034 nrate[0], nrate[1], nrate[2], nrate[3]); 8035 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 8036 orate[0], orate[1], orate[2], orate[3]); 8037 } else { 8038 sbuf_printf(sb, " channel 0 channel 1\n"); 8039 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 8040 nrate[0], nrate[1]); 8041 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 8042 orate[0], orate[1]); 8043 } 8044 8045 rc = sbuf_finish(sb); 8046 sbuf_delete(sb); 8047 8048 return (rc); 8049 } 8050 8051 static int 8052 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 8053 { 8054 struct adapter *sc = arg1; 8055 struct sbuf *sb; 8056 uint32_t *buf, *p; 8057 int rc, i; 8058 8059 rc = sysctl_wire_old_buffer(req, 0); 8060 if (rc != 0) 8061 return (rc); 8062 8063 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8064 if (sb == NULL) 8065 return (ENOMEM); 8066 8067 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 8068 M_ZERO | M_WAITOK); 8069 8070 t4_ulprx_read_la(sc, buf); 8071 p = buf; 8072 8073 sbuf_printf(sb, " Pcmd Type Message" 8074 " Data"); 8075 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 8076 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 8077 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 8078 } 8079 8080 rc = sbuf_finish(sb); 8081 sbuf_delete(sb); 8082 free(buf, M_CXGBE); 8083 return (rc); 8084 } 8085 8086 static int 8087 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 8088 { 8089 struct adapter *sc = arg1; 8090 struct sbuf *sb; 8091 int rc, v; 8092 8093 MPASS(chip_id(sc) >= CHELSIO_T5); 8094 8095 rc = sysctl_wire_old_buffer(req, 0); 8096 if (rc != 0) 8097 return (rc); 8098 8099 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8100 if (sb == NULL) 8101 return (ENOMEM); 8102 8103 v = t4_read_reg(sc, A_SGE_STAT_CFG); 8104 if (G_STATSOURCE_T5(v) == 7) { 8105 int mode; 8106 8107 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v); 8108 if (mode == 0) { 8109 sbuf_printf(sb, "total %d, incomplete %d", 8110 t4_read_reg(sc, A_SGE_STAT_TOTAL), 8111 t4_read_reg(sc, A_SGE_STAT_MATCH)); 8112 } else if (mode == 1) { 8113 sbuf_printf(sb, "total %d, data overflow %d", 8114 t4_read_reg(sc, A_SGE_STAT_TOTAL), 8115 t4_read_reg(sc, A_SGE_STAT_MATCH)); 8116 } else { 8117 sbuf_printf(sb, "unknown mode %d", mode); 8118 } 8119 } 8120 rc = sbuf_finish(sb); 8121 sbuf_delete(sb); 8122 8123 return (rc); 8124 } 8125 8126 static int 8127 sysctl_tc_params(SYSCTL_HANDLER_ARGS) 8128 { 8129 struct adapter *sc = arg1; 8130 struct tx_cl_rl_params tc; 8131 struct sbuf *sb; 8132 int i, rc, port_id, mbps, gbps; 8133 8134 rc = sysctl_wire_old_buffer(req, 0); 8135 if (rc != 0) 8136 return (rc); 8137 8138 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8139 if (sb == NULL) 8140 return (ENOMEM); 8141 8142 port_id = arg2 >> 16; 8143 MPASS(port_id < sc->params.nports); 8144 MPASS(sc->port[port_id] != NULL); 8145 i = arg2 & 0xffff; 8146 MPASS(i < sc->chip_params->nsched_cls); 8147 8148 mtx_lock(&sc->tc_lock); 8149 tc = sc->port[port_id]->sched_params->cl_rl[i]; 8150 mtx_unlock(&sc->tc_lock); 8151 8152 if (tc.flags & TX_CLRL_ERROR) { 8153 sbuf_printf(sb, "error"); 8154 goto done; 8155 } 8156 8157 if (tc.ratemode == SCHED_CLASS_RATEMODE_REL) { 8158 /* XXX: top speed or actual link speed? */ 8159 gbps = port_top_speed(sc->port[port_id]); 8160 sbuf_printf(sb, " %u%% of %uGbps", tc.maxrate, gbps); 8161 } else if (tc.ratemode == SCHED_CLASS_RATEMODE_ABS) { 8162 switch (tc.rateunit) { 8163 case SCHED_CLASS_RATEUNIT_BITS: 8164 mbps = tc.maxrate / 1000; 8165 gbps = tc.maxrate / 1000000; 8166 if (tc.maxrate == gbps * 1000000) 8167 sbuf_printf(sb, " %uGbps", gbps); 8168 else if (tc.maxrate == mbps * 1000) 8169 sbuf_printf(sb, " %uMbps", mbps); 8170 else 8171 sbuf_printf(sb, " %uKbps", tc.maxrate); 8172 break; 8173 case SCHED_CLASS_RATEUNIT_PKTS: 8174 sbuf_printf(sb, " %upps", tc.maxrate); 8175 break; 8176 default: 8177 rc = ENXIO; 8178 goto done; 8179 } 8180 } 8181 8182 switch (tc.mode) { 8183 case SCHED_CLASS_MODE_CLASS: 8184 sbuf_printf(sb, " aggregate"); 8185 break; 8186 case SCHED_CLASS_MODE_FLOW: 8187 sbuf_printf(sb, " per-flow"); 8188 break; 8189 default: 8190 rc = ENXIO; 8191 goto done; 8192 } 8193 8194 done: 8195 if (rc == 0) 8196 rc = sbuf_finish(sb); 8197 sbuf_delete(sb); 8198 8199 return (rc); 8200 } 8201 #endif 8202 8203 #ifdef TCP_OFFLOAD 8204 static void 8205 unit_conv(char *buf, size_t len, u_int val, u_int factor) 8206 { 8207 u_int rem = val % factor; 8208 8209 if (rem == 0) 8210 snprintf(buf, len, "%u", val / factor); 8211 else { 8212 while (rem % 10 == 0) 8213 rem /= 10; 8214 snprintf(buf, len, "%u.%u", val / factor, rem); 8215 } 8216 } 8217 8218 static int 8219 sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 8220 { 8221 struct adapter *sc = arg1; 8222 char buf[16]; 8223 u_int res, re; 8224 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8225 8226 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 8227 switch (arg2) { 8228 case 0: 8229 /* timer_tick */ 8230 re = G_TIMERRESOLUTION(res); 8231 break; 8232 case 1: 8233 /* TCP timestamp tick */ 8234 re = G_TIMESTAMPRESOLUTION(res); 8235 break; 8236 case 2: 8237 /* DACK tick */ 8238 re = G_DELAYEDACKRESOLUTION(res); 8239 break; 8240 default: 8241 return (EDOOFUS); 8242 } 8243 8244 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 8245 8246 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 8247 } 8248 8249 static int 8250 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 8251 { 8252 struct adapter *sc = arg1; 8253 u_int res, dack_re, v; 8254 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8255 8256 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 8257 dack_re = G_DELAYEDACKRESOLUTION(res); 8258 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 8259 8260 return (sysctl_handle_int(oidp, &v, 0, req)); 8261 } 8262 8263 static int 8264 sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 8265 { 8266 struct adapter *sc = arg1; 8267 int reg = arg2; 8268 u_int tre; 8269 u_long tp_tick_us, v; 8270 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8271 8272 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 8273 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 8274 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || 8275 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); 8276 8277 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 8278 tp_tick_us = (cclk_ps << tre) / 1000000; 8279 8280 if (reg == A_TP_INIT_SRTT) 8281 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 8282 else 8283 v = tp_tick_us * t4_read_reg(sc, reg); 8284 8285 return (sysctl_handle_long(oidp, &v, 0, req)); 8286 } 8287 8288 /* 8289 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is 8290 * passed to this function. 8291 */ 8292 static int 8293 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS) 8294 { 8295 struct adapter *sc = arg1; 8296 int idx = arg2; 8297 u_int v; 8298 8299 MPASS(idx >= 0 && idx <= 24); 8300 8301 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf; 8302 8303 return (sysctl_handle_int(oidp, &v, 0, req)); 8304 } 8305 8306 static int 8307 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS) 8308 { 8309 struct adapter *sc = arg1; 8310 int idx = arg2; 8311 u_int shift, v, r; 8312 8313 MPASS(idx >= 0 && idx < 16); 8314 8315 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3); 8316 shift = (idx & 3) << 3; 8317 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0; 8318 8319 return (sysctl_handle_int(oidp, &v, 0, req)); 8320 } 8321 #endif 8322 8323 static uint32_t 8324 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 8325 { 8326 uint32_t mode; 8327 8328 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 8329 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 8330 8331 if (fconf & F_FRAGMENTATION) 8332 mode |= T4_FILTER_IP_FRAGMENT; 8333 8334 if (fconf & F_MPSHITTYPE) 8335 mode |= T4_FILTER_MPS_HIT_TYPE; 8336 8337 if (fconf & F_MACMATCH) 8338 mode |= T4_FILTER_MAC_IDX; 8339 8340 if (fconf & F_ETHERTYPE) 8341 mode |= T4_FILTER_ETH_TYPE; 8342 8343 if (fconf & F_PROTOCOL) 8344 mode |= T4_FILTER_IP_PROTO; 8345 8346 if (fconf & F_TOS) 8347 mode |= T4_FILTER_IP_TOS; 8348 8349 if (fconf & F_VLAN) 8350 mode |= T4_FILTER_VLAN; 8351 8352 if (fconf & F_VNIC_ID) { 8353 mode |= T4_FILTER_VNIC; 8354 if (iconf & F_VNIC) 8355 mode |= T4_FILTER_IC_VNIC; 8356 } 8357 8358 if (fconf & F_PORT) 8359 mode |= T4_FILTER_PORT; 8360 8361 if (fconf & F_FCOE) 8362 mode |= T4_FILTER_FCoE; 8363 8364 return (mode); 8365 } 8366 8367 static uint32_t 8368 mode_to_fconf(uint32_t mode) 8369 { 8370 uint32_t fconf = 0; 8371 8372 if (mode & T4_FILTER_IP_FRAGMENT) 8373 fconf |= F_FRAGMENTATION; 8374 8375 if (mode & T4_FILTER_MPS_HIT_TYPE) 8376 fconf |= F_MPSHITTYPE; 8377 8378 if (mode & T4_FILTER_MAC_IDX) 8379 fconf |= F_MACMATCH; 8380 8381 if (mode & T4_FILTER_ETH_TYPE) 8382 fconf |= F_ETHERTYPE; 8383 8384 if (mode & T4_FILTER_IP_PROTO) 8385 fconf |= F_PROTOCOL; 8386 8387 if (mode & T4_FILTER_IP_TOS) 8388 fconf |= F_TOS; 8389 8390 if (mode & T4_FILTER_VLAN) 8391 fconf |= F_VLAN; 8392 8393 if (mode & T4_FILTER_VNIC) 8394 fconf |= F_VNIC_ID; 8395 8396 if (mode & T4_FILTER_PORT) 8397 fconf |= F_PORT; 8398 8399 if (mode & T4_FILTER_FCoE) 8400 fconf |= F_FCOE; 8401 8402 return (fconf); 8403 } 8404 8405 static uint32_t 8406 mode_to_iconf(uint32_t mode) 8407 { 8408 8409 if (mode & T4_FILTER_IC_VNIC) 8410 return (F_VNIC); 8411 return (0); 8412 } 8413 8414 static int check_fspec_against_fconf_iconf(struct adapter *sc, 8415 struct t4_filter_specification *fs) 8416 { 8417 struct tp_params *tpp = &sc->params.tp; 8418 uint32_t fconf = 0; 8419 8420 if (fs->val.frag || fs->mask.frag) 8421 fconf |= F_FRAGMENTATION; 8422 8423 if (fs->val.matchtype || fs->mask.matchtype) 8424 fconf |= F_MPSHITTYPE; 8425 8426 if (fs->val.macidx || fs->mask.macidx) 8427 fconf |= F_MACMATCH; 8428 8429 if (fs->val.ethtype || fs->mask.ethtype) 8430 fconf |= F_ETHERTYPE; 8431 8432 if (fs->val.proto || fs->mask.proto) 8433 fconf |= F_PROTOCOL; 8434 8435 if (fs->val.tos || fs->mask.tos) 8436 fconf |= F_TOS; 8437 8438 if (fs->val.vlan_vld || fs->mask.vlan_vld) 8439 fconf |= F_VLAN; 8440 8441 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 8442 fconf |= F_VNIC_ID; 8443 if (tpp->ingress_config & F_VNIC) 8444 return (EINVAL); 8445 } 8446 8447 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 8448 fconf |= F_VNIC_ID; 8449 if ((tpp->ingress_config & F_VNIC) == 0) 8450 return (EINVAL); 8451 } 8452 8453 if (fs->val.iport || fs->mask.iport) 8454 fconf |= F_PORT; 8455 8456 if (fs->val.fcoe || fs->mask.fcoe) 8457 fconf |= F_FCOE; 8458 8459 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 8460 return (E2BIG); 8461 8462 return (0); 8463 } 8464 8465 static int 8466 get_filter_mode(struct adapter *sc, uint32_t *mode) 8467 { 8468 struct tp_params *tpp = &sc->params.tp; 8469 8470 /* 8471 * We trust the cached values of the relevant TP registers. This means 8472 * things work reliably only if writes to those registers are always via 8473 * t4_set_filter_mode. 8474 */ 8475 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 8476 8477 return (0); 8478 } 8479 8480 static int 8481 set_filter_mode(struct adapter *sc, uint32_t mode) 8482 { 8483 struct tp_params *tpp = &sc->params.tp; 8484 uint32_t fconf, iconf; 8485 int rc; 8486 8487 iconf = mode_to_iconf(mode); 8488 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 8489 /* 8490 * For now we just complain if A_TP_INGRESS_CONFIG is not 8491 * already set to the correct value for the requested filter 8492 * mode. It's not clear if it's safe to write to this register 8493 * on the fly. (And we trust the cached value of the register). 8494 */ 8495 return (EBUSY); 8496 } 8497 8498 fconf = mode_to_fconf(mode); 8499 8500 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8501 "t4setfm"); 8502 if (rc) 8503 return (rc); 8504 8505 if (sc->tids.ftids_in_use > 0) { 8506 rc = EBUSY; 8507 goto done; 8508 } 8509 8510 #ifdef TCP_OFFLOAD 8511 if (uld_active(sc, ULD_TOM)) { 8512 rc = EBUSY; 8513 goto done; 8514 } 8515 #endif 8516 8517 rc = -t4_set_filter_mode(sc, fconf, true); 8518 done: 8519 end_synchronized_op(sc, LOCK_HELD); 8520 return (rc); 8521 } 8522 8523 static inline uint64_t 8524 get_filter_hits(struct adapter *sc, uint32_t fid) 8525 { 8526 uint32_t tcb_addr; 8527 8528 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 8529 (fid + sc->tids.ftid_base) * TCB_SIZE; 8530 8531 if (is_t4(sc)) { 8532 uint64_t hits; 8533 8534 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 8535 return (be64toh(hits)); 8536 } else { 8537 uint32_t hits; 8538 8539 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 8540 return (be32toh(hits)); 8541 } 8542 } 8543 8544 static int 8545 get_filter(struct adapter *sc, struct t4_filter *t) 8546 { 8547 int i, rc, nfilters = sc->tids.nftids; 8548 struct filter_entry *f; 8549 8550 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8551 "t4getf"); 8552 if (rc) 8553 return (rc); 8554 8555 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 8556 t->idx >= nfilters) { 8557 t->idx = 0xffffffff; 8558 goto done; 8559 } 8560 8561 f = &sc->tids.ftid_tab[t->idx]; 8562 for (i = t->idx; i < nfilters; i++, f++) { 8563 if (f->valid) { 8564 t->idx = i; 8565 t->l2tidx = f->l2t ? f->l2t->idx : 0; 8566 t->smtidx = f->smtidx; 8567 if (f->fs.hitcnts) 8568 t->hits = get_filter_hits(sc, t->idx); 8569 else 8570 t->hits = UINT64_MAX; 8571 t->fs = f->fs; 8572 8573 goto done; 8574 } 8575 } 8576 8577 t->idx = 0xffffffff; 8578 done: 8579 end_synchronized_op(sc, LOCK_HELD); 8580 return (0); 8581 } 8582 8583 static int 8584 set_filter(struct adapter *sc, struct t4_filter *t) 8585 { 8586 unsigned int nfilters, nports; 8587 struct filter_entry *f; 8588 int i, rc; 8589 8590 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 8591 if (rc) 8592 return (rc); 8593 8594 nfilters = sc->tids.nftids; 8595 nports = sc->params.nports; 8596 8597 if (nfilters == 0) { 8598 rc = ENOTSUP; 8599 goto done; 8600 } 8601 8602 if (t->idx >= nfilters) { 8603 rc = EINVAL; 8604 goto done; 8605 } 8606 8607 /* Validate against the global filter mode and ingress config */ 8608 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 8609 if (rc != 0) 8610 goto done; 8611 8612 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 8613 rc = EINVAL; 8614 goto done; 8615 } 8616 8617 if (t->fs.val.iport >= nports) { 8618 rc = EINVAL; 8619 goto done; 8620 } 8621 8622 /* Can't specify an iq if not steering to it */ 8623 if (!t->fs.dirsteer && t->fs.iq) { 8624 rc = EINVAL; 8625 goto done; 8626 } 8627 8628 /* IPv6 filter idx must be 4 aligned */ 8629 if (t->fs.type == 1 && 8630 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 8631 rc = EINVAL; 8632 goto done; 8633 } 8634 8635 if (!(sc->flags & FULL_INIT_DONE) && 8636 ((rc = adapter_full_init(sc)) != 0)) 8637 goto done; 8638 8639 if (sc->tids.ftid_tab == NULL) { 8640 KASSERT(sc->tids.ftids_in_use == 0, 8641 ("%s: no memory allocated but filters_in_use > 0", 8642 __func__)); 8643 8644 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 8645 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 8646 if (sc->tids.ftid_tab == NULL) { 8647 rc = ENOMEM; 8648 goto done; 8649 } 8650 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 8651 } 8652 8653 for (i = 0; i < 4; i++) { 8654 f = &sc->tids.ftid_tab[t->idx + i]; 8655 8656 if (f->pending || f->valid) { 8657 rc = EBUSY; 8658 goto done; 8659 } 8660 if (f->locked) { 8661 rc = EPERM; 8662 goto done; 8663 } 8664 8665 if (t->fs.type == 0) 8666 break; 8667 } 8668 8669 f = &sc->tids.ftid_tab[t->idx]; 8670 f->fs = t->fs; 8671 8672 rc = set_filter_wr(sc, t->idx); 8673 done: 8674 end_synchronized_op(sc, 0); 8675 8676 if (rc == 0) { 8677 mtx_lock(&sc->tids.ftid_lock); 8678 for (;;) { 8679 if (f->pending == 0) { 8680 rc = f->valid ? 0 : EIO; 8681 break; 8682 } 8683 8684 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8685 PCATCH, "t4setfw", 0)) { 8686 rc = EINPROGRESS; 8687 break; 8688 } 8689 } 8690 mtx_unlock(&sc->tids.ftid_lock); 8691 } 8692 return (rc); 8693 } 8694 8695 static int 8696 del_filter(struct adapter *sc, struct t4_filter *t) 8697 { 8698 unsigned int nfilters; 8699 struct filter_entry *f; 8700 int rc; 8701 8702 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 8703 if (rc) 8704 return (rc); 8705 8706 nfilters = sc->tids.nftids; 8707 8708 if (nfilters == 0) { 8709 rc = ENOTSUP; 8710 goto done; 8711 } 8712 8713 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 8714 t->idx >= nfilters) { 8715 rc = EINVAL; 8716 goto done; 8717 } 8718 8719 if (!(sc->flags & FULL_INIT_DONE)) { 8720 rc = EAGAIN; 8721 goto done; 8722 } 8723 8724 f = &sc->tids.ftid_tab[t->idx]; 8725 8726 if (f->pending) { 8727 rc = EBUSY; 8728 goto done; 8729 } 8730 if (f->locked) { 8731 rc = EPERM; 8732 goto done; 8733 } 8734 8735 if (f->valid) { 8736 t->fs = f->fs; /* extra info for the caller */ 8737 rc = del_filter_wr(sc, t->idx); 8738 } 8739 8740 done: 8741 end_synchronized_op(sc, 0); 8742 8743 if (rc == 0) { 8744 mtx_lock(&sc->tids.ftid_lock); 8745 for (;;) { 8746 if (f->pending == 0) { 8747 rc = f->valid ? EIO : 0; 8748 break; 8749 } 8750 8751 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8752 PCATCH, "t4delfw", 0)) { 8753 rc = EINPROGRESS; 8754 break; 8755 } 8756 } 8757 mtx_unlock(&sc->tids.ftid_lock); 8758 } 8759 8760 return (rc); 8761 } 8762 8763 static void 8764 clear_filter(struct filter_entry *f) 8765 { 8766 if (f->l2t) 8767 t4_l2t_release(f->l2t); 8768 8769 bzero(f, sizeof (*f)); 8770 } 8771 8772 static int 8773 set_filter_wr(struct adapter *sc, int fidx) 8774 { 8775 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8776 struct fw_filter_wr *fwr; 8777 unsigned int ftid, vnic_vld, vnic_vld_mask; 8778 struct wrq_cookie cookie; 8779 8780 ASSERT_SYNCHRONIZED_OP(sc); 8781 8782 if (f->fs.newdmac || f->fs.newvlan) { 8783 /* This filter needs an L2T entry; allocate one. */ 8784 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8785 if (f->l2t == NULL) 8786 return (EAGAIN); 8787 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8788 f->fs.dmac)) { 8789 t4_l2t_release(f->l2t); 8790 f->l2t = NULL; 8791 return (ENOMEM); 8792 } 8793 } 8794 8795 /* Already validated against fconf, iconf */ 8796 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 8797 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 8798 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 8799 vnic_vld = 1; 8800 else 8801 vnic_vld = 0; 8802 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 8803 vnic_vld_mask = 1; 8804 else 8805 vnic_vld_mask = 0; 8806 8807 ftid = sc->tids.ftid_base + fidx; 8808 8809 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8810 if (fwr == NULL) 8811 return (ENOMEM); 8812 bzero(fwr, sizeof(*fwr)); 8813 8814 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 8815 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 8816 fwr->tid_to_iq = 8817 htobe32(V_FW_FILTER_WR_TID(ftid) | 8818 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 8819 V_FW_FILTER_WR_NOREPLY(0) | 8820 V_FW_FILTER_WR_IQ(f->fs.iq)); 8821 fwr->del_filter_to_l2tix = 8822 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 8823 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 8824 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 8825 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 8826 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 8827 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 8828 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 8829 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 8830 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 8831 f->fs.newvlan == VLAN_REWRITE) | 8832 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 8833 f->fs.newvlan == VLAN_REWRITE) | 8834 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 8835 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 8836 V_FW_FILTER_WR_PRIO(f->fs.prio) | 8837 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 8838 fwr->ethtype = htobe16(f->fs.val.ethtype); 8839 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 8840 fwr->frag_to_ovlan_vldm = 8841 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 8842 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 8843 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 8844 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 8845 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 8846 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 8847 fwr->smac_sel = 0; 8848 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 8849 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 8850 fwr->maci_to_matchtypem = 8851 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 8852 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 8853 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 8854 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 8855 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 8856 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 8857 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 8858 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 8859 fwr->ptcl = f->fs.val.proto; 8860 fwr->ptclm = f->fs.mask.proto; 8861 fwr->ttyp = f->fs.val.tos; 8862 fwr->ttypm = f->fs.mask.tos; 8863 fwr->ivlan = htobe16(f->fs.val.vlan); 8864 fwr->ivlanm = htobe16(f->fs.mask.vlan); 8865 fwr->ovlan = htobe16(f->fs.val.vnic); 8866 fwr->ovlanm = htobe16(f->fs.mask.vnic); 8867 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 8868 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 8869 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 8870 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 8871 fwr->lp = htobe16(f->fs.val.dport); 8872 fwr->lpm = htobe16(f->fs.mask.dport); 8873 fwr->fp = htobe16(f->fs.val.sport); 8874 fwr->fpm = htobe16(f->fs.mask.sport); 8875 if (f->fs.newsmac) 8876 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 8877 8878 f->pending = 1; 8879 sc->tids.ftids_in_use++; 8880 8881 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8882 return (0); 8883 } 8884 8885 static int 8886 del_filter_wr(struct adapter *sc, int fidx) 8887 { 8888 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8889 struct fw_filter_wr *fwr; 8890 unsigned int ftid; 8891 struct wrq_cookie cookie; 8892 8893 ftid = sc->tids.ftid_base + fidx; 8894 8895 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8896 if (fwr == NULL) 8897 return (ENOMEM); 8898 bzero(fwr, sizeof (*fwr)); 8899 8900 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 8901 8902 f->pending = 1; 8903 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8904 return (0); 8905 } 8906 8907 int 8908 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8909 { 8910 struct adapter *sc = iq->adapter; 8911 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 8912 unsigned int idx = GET_TID(rpl); 8913 unsigned int rc; 8914 struct filter_entry *f; 8915 8916 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 8917 rss->opcode)); 8918 MPASS(iq == &sc->sge.fwq); 8919 MPASS(is_ftid(sc, idx)); 8920 8921 idx -= sc->tids.ftid_base; 8922 f = &sc->tids.ftid_tab[idx]; 8923 rc = G_COOKIE(rpl->cookie); 8924 8925 mtx_lock(&sc->tids.ftid_lock); 8926 if (rc == FW_FILTER_WR_FLT_ADDED) { 8927 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 8928 __func__, idx)); 8929 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 8930 f->pending = 0; /* asynchronous setup completed */ 8931 f->valid = 1; 8932 } else { 8933 if (rc != FW_FILTER_WR_FLT_DELETED) { 8934 /* Add or delete failed, display an error */ 8935 log(LOG_ERR, 8936 "filter %u setup failed with error %u\n", 8937 idx, rc); 8938 } 8939 8940 clear_filter(f); 8941 sc->tids.ftids_in_use--; 8942 } 8943 wakeup(&sc->tids.ftid_tab); 8944 mtx_unlock(&sc->tids.ftid_lock); 8945 8946 return (0); 8947 } 8948 8949 static int 8950 set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8951 { 8952 8953 MPASS(iq->set_tcb_rpl != NULL); 8954 return (iq->set_tcb_rpl(iq, rss, m)); 8955 } 8956 8957 static int 8958 l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8959 { 8960 8961 MPASS(iq->l2t_write_rpl != NULL); 8962 return (iq->l2t_write_rpl(iq, rss, m)); 8963 } 8964 8965 static int 8966 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 8967 { 8968 int rc; 8969 8970 if (cntxt->cid > M_CTXTQID) 8971 return (EINVAL); 8972 8973 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 8974 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 8975 return (EINVAL); 8976 8977 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 8978 if (rc) 8979 return (rc); 8980 8981 if (sc->flags & FW_OK) { 8982 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 8983 &cntxt->data[0]); 8984 if (rc == 0) 8985 goto done; 8986 } 8987 8988 /* 8989 * Read via firmware failed or wasn't even attempted. Read directly via 8990 * the backdoor. 8991 */ 8992 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 8993 done: 8994 end_synchronized_op(sc, 0); 8995 return (rc); 8996 } 8997 8998 static int 8999 load_fw(struct adapter *sc, struct t4_data *fw) 9000 { 9001 int rc; 9002 uint8_t *fw_data; 9003 9004 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 9005 if (rc) 9006 return (rc); 9007 9008 if (sc->flags & FULL_INIT_DONE) { 9009 rc = EBUSY; 9010 goto done; 9011 } 9012 9013 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 9014 if (fw_data == NULL) { 9015 rc = ENOMEM; 9016 goto done; 9017 } 9018 9019 rc = copyin(fw->data, fw_data, fw->len); 9020 if (rc == 0) 9021 rc = -t4_load_fw(sc, fw_data, fw->len); 9022 9023 free(fw_data, M_CXGBE); 9024 done: 9025 end_synchronized_op(sc, 0); 9026 return (rc); 9027 } 9028 9029 static int 9030 load_cfg(struct adapter *sc, struct t4_data *cfg) 9031 { 9032 int rc; 9033 uint8_t *cfg_data = NULL; 9034 9035 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 9036 if (rc) 9037 return (rc); 9038 9039 if (cfg->len == 0) { 9040 /* clear */ 9041 rc = -t4_load_cfg(sc, NULL, 0); 9042 goto done; 9043 } 9044 9045 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); 9046 if (cfg_data == NULL) { 9047 rc = ENOMEM; 9048 goto done; 9049 } 9050 9051 rc = copyin(cfg->data, cfg_data, cfg->len); 9052 if (rc == 0) 9053 rc = -t4_load_cfg(sc, cfg_data, cfg->len); 9054 9055 free(cfg_data, M_CXGBE); 9056 done: 9057 end_synchronized_op(sc, 0); 9058 return (rc); 9059 } 9060 9061 static int 9062 load_boot(struct adapter *sc, struct t4_bootrom *br) 9063 { 9064 int rc; 9065 uint8_t *br_data = NULL; 9066 u_int offset; 9067 9068 if (br->len > 1024 * 1024) 9069 return (EFBIG); 9070 9071 if (br->pf_offset == 0) { 9072 /* pfidx */ 9073 if (br->pfidx_addr > 7) 9074 return (EINVAL); 9075 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, 9076 A_PCIE_PF_EXPROM_OFST))); 9077 } else if (br->pf_offset == 1) { 9078 /* offset */ 9079 offset = G_OFFSET(br->pfidx_addr); 9080 } else { 9081 return (EINVAL); 9082 } 9083 9084 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr"); 9085 if (rc) 9086 return (rc); 9087 9088 if (br->len == 0) { 9089 /* clear */ 9090 rc = -t4_load_boot(sc, NULL, offset, 0); 9091 goto done; 9092 } 9093 9094 br_data = malloc(br->len, M_CXGBE, M_WAITOK); 9095 if (br_data == NULL) { 9096 rc = ENOMEM; 9097 goto done; 9098 } 9099 9100 rc = copyin(br->data, br_data, br->len); 9101 if (rc == 0) 9102 rc = -t4_load_boot(sc, br_data, offset, br->len); 9103 9104 free(br_data, M_CXGBE); 9105 done: 9106 end_synchronized_op(sc, 0); 9107 return (rc); 9108 } 9109 9110 static int 9111 load_bootcfg(struct adapter *sc, struct t4_data *bc) 9112 { 9113 int rc; 9114 uint8_t *bc_data = NULL; 9115 9116 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 9117 if (rc) 9118 return (rc); 9119 9120 if (bc->len == 0) { 9121 /* clear */ 9122 rc = -t4_load_bootcfg(sc, NULL, 0); 9123 goto done; 9124 } 9125 9126 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); 9127 if (bc_data == NULL) { 9128 rc = ENOMEM; 9129 goto done; 9130 } 9131 9132 rc = copyin(bc->data, bc_data, bc->len); 9133 if (rc == 0) 9134 rc = -t4_load_bootcfg(sc, bc_data, bc->len); 9135 9136 free(bc_data, M_CXGBE); 9137 done: 9138 end_synchronized_op(sc, 0); 9139 return (rc); 9140 } 9141 9142 static int 9143 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump) 9144 { 9145 int rc; 9146 struct cudbg_init *cudbg; 9147 void *handle, *buf; 9148 9149 /* buf is large, don't block if no memory is available */ 9150 buf = malloc(dump->len, M_CXGBE, M_NOWAIT); 9151 if (buf == NULL) 9152 return (ENOMEM); 9153 9154 handle = cudbg_alloc_handle(); 9155 if (handle == NULL) { 9156 rc = ENOMEM; 9157 goto done; 9158 } 9159 9160 cudbg = cudbg_get_init(handle); 9161 cudbg->adap = sc; 9162 cudbg->print = (cudbg_print_cb)printf; 9163 9164 #ifndef notyet 9165 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", 9166 __func__, dump->wr_flash, dump->len, dump->data); 9167 #endif 9168 9169 if (dump->wr_flash) 9170 cudbg->use_flash = 1; 9171 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); 9172 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); 9173 9174 rc = cudbg_collect(handle, buf, &dump->len); 9175 if (rc != 0) 9176 goto done; 9177 9178 rc = copyout(buf, dump->data, dump->len); 9179 done: 9180 cudbg_free_handle(handle); 9181 free(buf, M_CXGBE); 9182 return (rc); 9183 } 9184 9185 #define MAX_READ_BUF_SIZE (128 * 1024) 9186 static int 9187 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 9188 { 9189 uint32_t addr, remaining, n; 9190 uint32_t *buf; 9191 int rc; 9192 uint8_t *dst; 9193 9194 rc = validate_mem_range(sc, mr->addr, mr->len); 9195 if (rc != 0) 9196 return (rc); 9197 9198 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 9199 addr = mr->addr; 9200 remaining = mr->len; 9201 dst = (void *)mr->data; 9202 9203 while (remaining) { 9204 n = min(remaining, MAX_READ_BUF_SIZE); 9205 read_via_memwin(sc, 2, addr, buf, n); 9206 9207 rc = copyout(buf, dst, n); 9208 if (rc != 0) 9209 break; 9210 9211 dst += n; 9212 remaining -= n; 9213 addr += n; 9214 } 9215 9216 free(buf, M_CXGBE); 9217 return (rc); 9218 } 9219 #undef MAX_READ_BUF_SIZE 9220 9221 static int 9222 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 9223 { 9224 int rc; 9225 9226 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 9227 return (EINVAL); 9228 9229 if (i2cd->len > sizeof(i2cd->data)) 9230 return (EFBIG); 9231 9232 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 9233 if (rc) 9234 return (rc); 9235 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 9236 i2cd->offset, i2cd->len, &i2cd->data[0]); 9237 end_synchronized_op(sc, 0); 9238 9239 return (rc); 9240 } 9241 9242 int 9243 t4_os_find_pci_capability(struct adapter *sc, int cap) 9244 { 9245 int i; 9246 9247 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 9248 } 9249 9250 int 9251 t4_os_pci_save_state(struct adapter *sc) 9252 { 9253 device_t dev; 9254 struct pci_devinfo *dinfo; 9255 9256 dev = sc->dev; 9257 dinfo = device_get_ivars(dev); 9258 9259 pci_cfg_save(dev, dinfo, 0); 9260 return (0); 9261 } 9262 9263 int 9264 t4_os_pci_restore_state(struct adapter *sc) 9265 { 9266 device_t dev; 9267 struct pci_devinfo *dinfo; 9268 9269 dev = sc->dev; 9270 dinfo = device_get_ivars(dev); 9271 9272 pci_cfg_restore(dev, dinfo); 9273 return (0); 9274 } 9275 9276 void 9277 t4_os_portmod_changed(struct port_info *pi, int old_ptype, int old_mtype, 9278 struct link_config *old_lc) 9279 { 9280 struct vi_info *vi; 9281 struct ifnet *ifp; 9282 int v; 9283 static const char *mod_str[] = { 9284 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 9285 }; 9286 9287 PORT_LOCK(pi); 9288 for_each_vi(pi, v, vi) { 9289 build_medialist(pi, &vi->media); 9290 } 9291 PORT_UNLOCK(pi); 9292 vi = &pi->vi[0]; 9293 if (begin_synchronized_op(pi->adapter, vi, HOLD_LOCK, "t4mod") == 0) { 9294 init_l1cfg(pi); 9295 end_synchronized_op(pi->adapter, LOCK_HELD); 9296 } 9297 9298 ifp = vi->ifp; 9299 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 9300 if_printf(ifp, "transceiver unplugged.\n"); 9301 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 9302 if_printf(ifp, "unknown transceiver inserted.\n"); 9303 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 9304 if_printf(ifp, "unsupported transceiver inserted.\n"); 9305 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 9306 if_printf(ifp, "%dGbps %s transceiver inserted.\n", 9307 port_top_speed(pi), mod_str[pi->mod_type]); 9308 } else { 9309 if_printf(ifp, "transceiver (type %d) inserted.\n", 9310 pi->mod_type); 9311 } 9312 } 9313 9314 void 9315 t4_os_link_changed(struct port_info *pi, struct link_config *old_lc) 9316 { 9317 struct vi_info *vi; 9318 struct ifnet *ifp; 9319 struct link_config *lc; 9320 int v; 9321 9322 for_each_vi(pi, v, vi) { 9323 ifp = vi->ifp; 9324 if (ifp == NULL) 9325 continue; 9326 9327 lc = &pi->link_cfg; 9328 if (lc->link_ok) { 9329 ifp->if_baudrate = IF_Mbps(lc->speed); 9330 if_link_state_change(ifp, LINK_STATE_UP); 9331 } else { 9332 if_link_state_change(ifp, LINK_STATE_DOWN); 9333 } 9334 } 9335 } 9336 9337 void 9338 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 9339 { 9340 struct adapter *sc; 9341 9342 sx_slock(&t4_list_lock); 9343 SLIST_FOREACH(sc, &t4_list, link) { 9344 /* 9345 * func should not make any assumptions about what state sc is 9346 * in - the only guarantee is that sc->sc_lock is a valid lock. 9347 */ 9348 func(sc, arg); 9349 } 9350 sx_sunlock(&t4_list_lock); 9351 } 9352 9353 static int 9354 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 9355 struct thread *td) 9356 { 9357 int rc; 9358 struct adapter *sc = dev->si_drv1; 9359 9360 rc = priv_check(td, PRIV_DRIVER); 9361 if (rc != 0) 9362 return (rc); 9363 9364 switch (cmd) { 9365 case CHELSIO_T4_GETREG: { 9366 struct t4_reg *edata = (struct t4_reg *)data; 9367 9368 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9369 return (EFAULT); 9370 9371 if (edata->size == 4) 9372 edata->val = t4_read_reg(sc, edata->addr); 9373 else if (edata->size == 8) 9374 edata->val = t4_read_reg64(sc, edata->addr); 9375 else 9376 return (EINVAL); 9377 9378 break; 9379 } 9380 case CHELSIO_T4_SETREG: { 9381 struct t4_reg *edata = (struct t4_reg *)data; 9382 9383 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9384 return (EFAULT); 9385 9386 if (edata->size == 4) { 9387 if (edata->val & 0xffffffff00000000) 9388 return (EINVAL); 9389 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 9390 } else if (edata->size == 8) 9391 t4_write_reg64(sc, edata->addr, edata->val); 9392 else 9393 return (EINVAL); 9394 break; 9395 } 9396 case CHELSIO_T4_REGDUMP: { 9397 struct t4_regdump *regs = (struct t4_regdump *)data; 9398 int reglen = t4_get_regs_len(sc); 9399 uint8_t *buf; 9400 9401 if (regs->len < reglen) { 9402 regs->len = reglen; /* hint to the caller */ 9403 return (ENOBUFS); 9404 } 9405 9406 regs->len = reglen; 9407 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 9408 get_regs(sc, regs, buf); 9409 rc = copyout(buf, regs->data, reglen); 9410 free(buf, M_CXGBE); 9411 break; 9412 } 9413 case CHELSIO_T4_GET_FILTER_MODE: 9414 rc = get_filter_mode(sc, (uint32_t *)data); 9415 break; 9416 case CHELSIO_T4_SET_FILTER_MODE: 9417 rc = set_filter_mode(sc, *(uint32_t *)data); 9418 break; 9419 case CHELSIO_T4_GET_FILTER: 9420 rc = get_filter(sc, (struct t4_filter *)data); 9421 break; 9422 case CHELSIO_T4_SET_FILTER: 9423 rc = set_filter(sc, (struct t4_filter *)data); 9424 break; 9425 case CHELSIO_T4_DEL_FILTER: 9426 rc = del_filter(sc, (struct t4_filter *)data); 9427 break; 9428 case CHELSIO_T4_GET_SGE_CONTEXT: 9429 rc = get_sge_context(sc, (struct t4_sge_context *)data); 9430 break; 9431 case CHELSIO_T4_LOAD_FW: 9432 rc = load_fw(sc, (struct t4_data *)data); 9433 break; 9434 case CHELSIO_T4_GET_MEM: 9435 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 9436 break; 9437 case CHELSIO_T4_GET_I2C: 9438 rc = read_i2c(sc, (struct t4_i2c_data *)data); 9439 break; 9440 case CHELSIO_T4_CLEAR_STATS: { 9441 int i, v; 9442 u_int port_id = *(uint32_t *)data; 9443 struct port_info *pi; 9444 struct vi_info *vi; 9445 9446 if (port_id >= sc->params.nports) 9447 return (EINVAL); 9448 pi = sc->port[port_id]; 9449 if (pi == NULL) 9450 return (EIO); 9451 9452 /* MAC stats */ 9453 t4_clr_port_stats(sc, pi->tx_chan); 9454 pi->tx_parse_error = 0; 9455 mtx_lock(&sc->reg_lock); 9456 for_each_vi(pi, v, vi) { 9457 if (vi->flags & VI_INIT_DONE) 9458 t4_clr_vi_stats(sc, vi->viid); 9459 } 9460 mtx_unlock(&sc->reg_lock); 9461 9462 /* 9463 * Since this command accepts a port, clear stats for 9464 * all VIs on this port. 9465 */ 9466 for_each_vi(pi, v, vi) { 9467 if (vi->flags & VI_INIT_DONE) { 9468 struct sge_rxq *rxq; 9469 struct sge_txq *txq; 9470 struct sge_wrq *wrq; 9471 9472 for_each_rxq(vi, i, rxq) { 9473 #if defined(INET) || defined(INET6) 9474 rxq->lro.lro_queued = 0; 9475 rxq->lro.lro_flushed = 0; 9476 #endif 9477 rxq->rxcsum = 0; 9478 rxq->vlan_extraction = 0; 9479 } 9480 9481 for_each_txq(vi, i, txq) { 9482 txq->txcsum = 0; 9483 txq->tso_wrs = 0; 9484 txq->vlan_insertion = 0; 9485 txq->imm_wrs = 0; 9486 txq->sgl_wrs = 0; 9487 txq->txpkt_wrs = 0; 9488 txq->txpkts0_wrs = 0; 9489 txq->txpkts1_wrs = 0; 9490 txq->txpkts0_pkts = 0; 9491 txq->txpkts1_pkts = 0; 9492 mp_ring_reset_stats(txq->r); 9493 } 9494 9495 #ifdef TCP_OFFLOAD 9496 /* nothing to clear for each ofld_rxq */ 9497 9498 for_each_ofld_txq(vi, i, wrq) { 9499 wrq->tx_wrs_direct = 0; 9500 wrq->tx_wrs_copied = 0; 9501 } 9502 #endif 9503 9504 if (IS_MAIN_VI(vi)) { 9505 wrq = &sc->sge.ctrlq[pi->port_id]; 9506 wrq->tx_wrs_direct = 0; 9507 wrq->tx_wrs_copied = 0; 9508 } 9509 } 9510 } 9511 break; 9512 } 9513 case CHELSIO_T4_SCHED_CLASS: 9514 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); 9515 break; 9516 case CHELSIO_T4_SCHED_QUEUE: 9517 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); 9518 break; 9519 case CHELSIO_T4_GET_TRACER: 9520 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 9521 break; 9522 case CHELSIO_T4_SET_TRACER: 9523 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 9524 break; 9525 case CHELSIO_T4_LOAD_CFG: 9526 rc = load_cfg(sc, (struct t4_data *)data); 9527 break; 9528 case CHELSIO_T4_LOAD_BOOT: 9529 rc = load_boot(sc, (struct t4_bootrom *)data); 9530 break; 9531 case CHELSIO_T4_LOAD_BOOTCFG: 9532 rc = load_bootcfg(sc, (struct t4_data *)data); 9533 break; 9534 case CHELSIO_T4_CUDBG_DUMP: 9535 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data); 9536 break; 9537 default: 9538 rc = ENOTTY; 9539 } 9540 9541 return (rc); 9542 } 9543 9544 void 9545 t4_db_full(struct adapter *sc) 9546 { 9547 9548 CXGBE_UNIMPLEMENTED(__func__); 9549 } 9550 9551 void 9552 t4_db_dropped(struct adapter *sc) 9553 { 9554 9555 CXGBE_UNIMPLEMENTED(__func__); 9556 } 9557 9558 #ifdef TCP_OFFLOAD 9559 static int 9560 toe_capability(struct vi_info *vi, int enable) 9561 { 9562 int rc; 9563 struct port_info *pi = vi->pi; 9564 struct adapter *sc = pi->adapter; 9565 9566 ASSERT_SYNCHRONIZED_OP(sc); 9567 9568 if (!is_offload(sc)) 9569 return (ENODEV); 9570 9571 if (enable) { 9572 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 9573 /* TOE is already enabled. */ 9574 return (0); 9575 } 9576 9577 /* 9578 * We need the port's queues around so that we're able to send 9579 * and receive CPLs to/from the TOE even if the ifnet for this 9580 * port has never been UP'd administratively. 9581 */ 9582 if (!(vi->flags & VI_INIT_DONE)) { 9583 rc = vi_full_init(vi); 9584 if (rc) 9585 return (rc); 9586 } 9587 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 9588 rc = vi_full_init(&pi->vi[0]); 9589 if (rc) 9590 return (rc); 9591 } 9592 9593 if (isset(&sc->offload_map, pi->port_id)) { 9594 /* TOE is enabled on another VI of this port. */ 9595 pi->uld_vis++; 9596 return (0); 9597 } 9598 9599 if (!uld_active(sc, ULD_TOM)) { 9600 rc = t4_activate_uld(sc, ULD_TOM); 9601 if (rc == EAGAIN) { 9602 log(LOG_WARNING, 9603 "You must kldload t4_tom.ko before trying " 9604 "to enable TOE on a cxgbe interface.\n"); 9605 } 9606 if (rc != 0) 9607 return (rc); 9608 KASSERT(sc->tom_softc != NULL, 9609 ("%s: TOM activated but softc NULL", __func__)); 9610 KASSERT(uld_active(sc, ULD_TOM), 9611 ("%s: TOM activated but flag not set", __func__)); 9612 } 9613 9614 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 9615 if (!uld_active(sc, ULD_IWARP)) 9616 (void) t4_activate_uld(sc, ULD_IWARP); 9617 if (!uld_active(sc, ULD_ISCSI)) 9618 (void) t4_activate_uld(sc, ULD_ISCSI); 9619 9620 pi->uld_vis++; 9621 setbit(&sc->offload_map, pi->port_id); 9622 } else { 9623 pi->uld_vis--; 9624 9625 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 9626 return (0); 9627 9628 KASSERT(uld_active(sc, ULD_TOM), 9629 ("%s: TOM never initialized?", __func__)); 9630 clrbit(&sc->offload_map, pi->port_id); 9631 } 9632 9633 return (0); 9634 } 9635 9636 /* 9637 * Add an upper layer driver to the global list. 9638 */ 9639 int 9640 t4_register_uld(struct uld_info *ui) 9641 { 9642 int rc = 0; 9643 struct uld_info *u; 9644 9645 sx_xlock(&t4_uld_list_lock); 9646 SLIST_FOREACH(u, &t4_uld_list, link) { 9647 if (u->uld_id == ui->uld_id) { 9648 rc = EEXIST; 9649 goto done; 9650 } 9651 } 9652 9653 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 9654 ui->refcount = 0; 9655 done: 9656 sx_xunlock(&t4_uld_list_lock); 9657 return (rc); 9658 } 9659 9660 int 9661 t4_unregister_uld(struct uld_info *ui) 9662 { 9663 int rc = EINVAL; 9664 struct uld_info *u; 9665 9666 sx_xlock(&t4_uld_list_lock); 9667 9668 SLIST_FOREACH(u, &t4_uld_list, link) { 9669 if (u == ui) { 9670 if (ui->refcount > 0) { 9671 rc = EBUSY; 9672 goto done; 9673 } 9674 9675 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 9676 rc = 0; 9677 goto done; 9678 } 9679 } 9680 done: 9681 sx_xunlock(&t4_uld_list_lock); 9682 return (rc); 9683 } 9684 9685 int 9686 t4_activate_uld(struct adapter *sc, int id) 9687 { 9688 int rc; 9689 struct uld_info *ui; 9690 9691 ASSERT_SYNCHRONIZED_OP(sc); 9692 9693 if (id < 0 || id > ULD_MAX) 9694 return (EINVAL); 9695 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 9696 9697 sx_slock(&t4_uld_list_lock); 9698 9699 SLIST_FOREACH(ui, &t4_uld_list, link) { 9700 if (ui->uld_id == id) { 9701 if (!(sc->flags & FULL_INIT_DONE)) { 9702 rc = adapter_full_init(sc); 9703 if (rc != 0) 9704 break; 9705 } 9706 9707 rc = ui->activate(sc); 9708 if (rc == 0) { 9709 setbit(&sc->active_ulds, id); 9710 ui->refcount++; 9711 } 9712 break; 9713 } 9714 } 9715 9716 sx_sunlock(&t4_uld_list_lock); 9717 9718 return (rc); 9719 } 9720 9721 int 9722 t4_deactivate_uld(struct adapter *sc, int id) 9723 { 9724 int rc; 9725 struct uld_info *ui; 9726 9727 ASSERT_SYNCHRONIZED_OP(sc); 9728 9729 if (id < 0 || id > ULD_MAX) 9730 return (EINVAL); 9731 rc = ENXIO; 9732 9733 sx_slock(&t4_uld_list_lock); 9734 9735 SLIST_FOREACH(ui, &t4_uld_list, link) { 9736 if (ui->uld_id == id) { 9737 rc = ui->deactivate(sc); 9738 if (rc == 0) { 9739 clrbit(&sc->active_ulds, id); 9740 ui->refcount--; 9741 } 9742 break; 9743 } 9744 } 9745 9746 sx_sunlock(&t4_uld_list_lock); 9747 9748 return (rc); 9749 } 9750 9751 int 9752 uld_active(struct adapter *sc, int uld_id) 9753 { 9754 9755 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9756 9757 return (isset(&sc->active_ulds, uld_id)); 9758 } 9759 #endif 9760 9761 /* 9762 * t = ptr to tunable. 9763 * nc = number of CPUs. 9764 * c = compiled in default for that tunable. 9765 */ 9766 static void 9767 calculate_nqueues(int *t, int nc, const int c) 9768 { 9769 int nq; 9770 9771 if (*t > 0) 9772 return; 9773 nq = *t < 0 ? -*t : c; 9774 *t = min(nc, nq); 9775 } 9776 9777 /* 9778 * Come up with reasonable defaults for some of the tunables, provided they're 9779 * not set by the user (in which case we'll use the values as is). 9780 */ 9781 static void 9782 tweak_tunables(void) 9783 { 9784 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 9785 9786 if (t4_ntxq10g < 1) { 9787 #ifdef RSS 9788 t4_ntxq10g = rss_getnumbuckets(); 9789 #else 9790 calculate_nqueues(&t4_ntxq10g, nc, NTXQ_10G); 9791 #endif 9792 } 9793 9794 if (t4_ntxq1g < 1) { 9795 #ifdef RSS 9796 /* XXX: way too many for 1GbE? */ 9797 t4_ntxq1g = rss_getnumbuckets(); 9798 #else 9799 calculate_nqueues(&t4_ntxq1g, nc, NTXQ_1G); 9800 #endif 9801 } 9802 9803 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); 9804 9805 if (t4_nrxq10g < 1) { 9806 #ifdef RSS 9807 t4_nrxq10g = rss_getnumbuckets(); 9808 #else 9809 calculate_nqueues(&t4_nrxq10g, nc, NRXQ_10G); 9810 #endif 9811 } 9812 9813 if (t4_nrxq1g < 1) { 9814 #ifdef RSS 9815 /* XXX: way too many for 1GbE? */ 9816 t4_nrxq1g = rss_getnumbuckets(); 9817 #else 9818 calculate_nqueues(&t4_nrxq1g, nc, NRXQ_1G); 9819 #endif 9820 } 9821 9822 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); 9823 9824 #ifdef TCP_OFFLOAD 9825 calculate_nqueues(&t4_nofldtxq10g, nc, NOFLDTXQ_10G); 9826 calculate_nqueues(&t4_nofldtxq1g, nc, NOFLDTXQ_1G); 9827 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); 9828 calculate_nqueues(&t4_nofldrxq10g, nc, NOFLDRXQ_10G); 9829 calculate_nqueues(&t4_nofldrxq1g, nc, NOFLDRXQ_1G); 9830 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); 9831 9832 if (t4_toecaps_allowed == -1) 9833 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 9834 9835 if (t4_rdmacaps_allowed == -1) { 9836 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 9837 FW_CAPS_CONFIG_RDMA_RDMAC; 9838 } 9839 9840 if (t4_iscsicaps_allowed == -1) { 9841 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 9842 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 9843 FW_CAPS_CONFIG_ISCSI_T10DIF; 9844 } 9845 #else 9846 if (t4_toecaps_allowed == -1) 9847 t4_toecaps_allowed = 0; 9848 9849 if (t4_rdmacaps_allowed == -1) 9850 t4_rdmacaps_allowed = 0; 9851 9852 if (t4_iscsicaps_allowed == -1) 9853 t4_iscsicaps_allowed = 0; 9854 #endif 9855 9856 #ifdef DEV_NETMAP 9857 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); 9858 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); 9859 #endif 9860 9861 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 9862 t4_tmr_idx_10g = TMR_IDX_10G; 9863 9864 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 9865 t4_pktc_idx_10g = PKTC_IDX_10G; 9866 9867 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 9868 t4_tmr_idx_1g = TMR_IDX_1G; 9869 9870 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 9871 t4_pktc_idx_1g = PKTC_IDX_1G; 9872 9873 if (t4_qsize_txq < 128) 9874 t4_qsize_txq = 128; 9875 9876 if (t4_qsize_rxq < 128) 9877 t4_qsize_rxq = 128; 9878 while (t4_qsize_rxq & 7) 9879 t4_qsize_rxq++; 9880 9881 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 9882 } 9883 9884 #ifdef DDB 9885 static void 9886 t4_dump_tcb(struct adapter *sc, int tid) 9887 { 9888 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 9889 9890 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 9891 save = t4_read_reg(sc, reg); 9892 base = sc->memwin[2].mw_base; 9893 9894 /* Dump TCB for the tid */ 9895 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 9896 tcb_addr += tid * TCB_SIZE; 9897 9898 if (is_t4(sc)) { 9899 pf = 0; 9900 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 9901 } else { 9902 pf = V_PFNUM(sc->pf); 9903 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 9904 } 9905 t4_write_reg(sc, reg, win_pos | pf); 9906 t4_read_reg(sc, reg); 9907 9908 off = tcb_addr - win_pos; 9909 for (i = 0; i < 4; i++) { 9910 uint32_t buf[8]; 9911 for (j = 0; j < 8; j++, off += 4) 9912 buf[j] = htonl(t4_read_reg(sc, base + off)); 9913 9914 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 9915 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 9916 buf[7]); 9917 } 9918 9919 t4_write_reg(sc, reg, save); 9920 t4_read_reg(sc, reg); 9921 } 9922 9923 static void 9924 t4_dump_devlog(struct adapter *sc) 9925 { 9926 struct devlog_params *dparams = &sc->params.devlog; 9927 struct fw_devlog_e e; 9928 int i, first, j, m, nentries, rc; 9929 uint64_t ftstamp = UINT64_MAX; 9930 9931 if (dparams->start == 0) { 9932 db_printf("devlog params not valid\n"); 9933 return; 9934 } 9935 9936 nentries = dparams->size / sizeof(struct fw_devlog_e); 9937 m = fwmtype_to_hwmtype(dparams->memtype); 9938 9939 /* Find the first entry. */ 9940 first = -1; 9941 for (i = 0; i < nentries && !db_pager_quit; i++) { 9942 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9943 sizeof(e), (void *)&e); 9944 if (rc != 0) 9945 break; 9946 9947 if (e.timestamp == 0) 9948 break; 9949 9950 e.timestamp = be64toh(e.timestamp); 9951 if (e.timestamp < ftstamp) { 9952 ftstamp = e.timestamp; 9953 first = i; 9954 } 9955 } 9956 9957 if (first == -1) 9958 return; 9959 9960 i = first; 9961 do { 9962 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9963 sizeof(e), (void *)&e); 9964 if (rc != 0) 9965 return; 9966 9967 if (e.timestamp == 0) 9968 return; 9969 9970 e.timestamp = be64toh(e.timestamp); 9971 e.seqno = be32toh(e.seqno); 9972 for (j = 0; j < 8; j++) 9973 e.params[j] = be32toh(e.params[j]); 9974 9975 db_printf("%10d %15ju %8s %8s ", 9976 e.seqno, e.timestamp, 9977 (e.level < nitems(devlog_level_strings) ? 9978 devlog_level_strings[e.level] : "UNKNOWN"), 9979 (e.facility < nitems(devlog_facility_strings) ? 9980 devlog_facility_strings[e.facility] : "UNKNOWN")); 9981 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 9982 e.params[3], e.params[4], e.params[5], e.params[6], 9983 e.params[7]); 9984 9985 if (++i == nentries) 9986 i = 0; 9987 } while (i != first && !db_pager_quit); 9988 } 9989 9990 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 9991 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 9992 9993 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 9994 { 9995 device_t dev; 9996 int t; 9997 bool valid; 9998 9999 valid = false; 10000 t = db_read_token(); 10001 if (t == tIDENT) { 10002 dev = device_lookup_by_name(db_tok_string); 10003 valid = true; 10004 } 10005 db_skip_to_eol(); 10006 if (!valid) { 10007 db_printf("usage: show t4 devlog <nexus>\n"); 10008 return; 10009 } 10010 10011 if (dev == NULL) { 10012 db_printf("device not found\n"); 10013 return; 10014 } 10015 10016 t4_dump_devlog(device_get_softc(dev)); 10017 } 10018 10019 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 10020 { 10021 device_t dev; 10022 int radix, tid, t; 10023 bool valid; 10024 10025 valid = false; 10026 radix = db_radix; 10027 db_radix = 10; 10028 t = db_read_token(); 10029 if (t == tIDENT) { 10030 dev = device_lookup_by_name(db_tok_string); 10031 t = db_read_token(); 10032 if (t == tNUMBER) { 10033 tid = db_tok_number; 10034 valid = true; 10035 } 10036 } 10037 db_radix = radix; 10038 db_skip_to_eol(); 10039 if (!valid) { 10040 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 10041 return; 10042 } 10043 10044 if (dev == NULL) { 10045 db_printf("device not found\n"); 10046 return; 10047 } 10048 if (tid < 0) { 10049 db_printf("invalid tid\n"); 10050 return; 10051 } 10052 10053 t4_dump_tcb(device_get_softc(dev), tid); 10054 } 10055 #endif 10056 10057 static struct sx mlu; /* mod load unload */ 10058 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 10059 10060 static int 10061 mod_event(module_t mod, int cmd, void *arg) 10062 { 10063 int rc = 0; 10064 static int loaded = 0; 10065 10066 switch (cmd) { 10067 case MOD_LOAD: 10068 sx_xlock(&mlu); 10069 if (loaded++ == 0) { 10070 t4_sge_modload(); 10071 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); 10072 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); 10073 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 10074 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 10075 sx_init(&t4_list_lock, "T4/T5 adapters"); 10076 SLIST_INIT(&t4_list); 10077 #ifdef TCP_OFFLOAD 10078 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 10079 SLIST_INIT(&t4_uld_list); 10080 #endif 10081 t4_tracer_modload(); 10082 tweak_tunables(); 10083 } 10084 sx_xunlock(&mlu); 10085 break; 10086 10087 case MOD_UNLOAD: 10088 sx_xlock(&mlu); 10089 if (--loaded == 0) { 10090 int tries; 10091 10092 sx_slock(&t4_list_lock); 10093 if (!SLIST_EMPTY(&t4_list)) { 10094 rc = EBUSY; 10095 sx_sunlock(&t4_list_lock); 10096 goto done_unload; 10097 } 10098 #ifdef TCP_OFFLOAD 10099 sx_slock(&t4_uld_list_lock); 10100 if (!SLIST_EMPTY(&t4_uld_list)) { 10101 rc = EBUSY; 10102 sx_sunlock(&t4_uld_list_lock); 10103 sx_sunlock(&t4_list_lock); 10104 goto done_unload; 10105 } 10106 #endif 10107 tries = 0; 10108 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 10109 uprintf("%ju clusters with custom free routine " 10110 "still is use.\n", t4_sge_extfree_refs()); 10111 pause("t4unload", 2 * hz); 10112 } 10113 #ifdef TCP_OFFLOAD 10114 sx_sunlock(&t4_uld_list_lock); 10115 #endif 10116 sx_sunlock(&t4_list_lock); 10117 10118 if (t4_sge_extfree_refs() == 0) { 10119 t4_tracer_modunload(); 10120 #ifdef TCP_OFFLOAD 10121 sx_destroy(&t4_uld_list_lock); 10122 #endif 10123 sx_destroy(&t4_list_lock); 10124 t4_sge_modunload(); 10125 loaded = 0; 10126 } else { 10127 rc = EBUSY; 10128 loaded++; /* undo earlier decrement */ 10129 } 10130 } 10131 done_unload: 10132 sx_xunlock(&mlu); 10133 break; 10134 } 10135 10136 return (rc); 10137 } 10138 10139 static devclass_t t4_devclass, t5_devclass, t6_devclass; 10140 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; 10141 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; 10142 10143 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 10144 MODULE_VERSION(t4nex, 1); 10145 MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 10146 #ifdef DEV_NETMAP 10147 MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 10148 #endif /* DEV_NETMAP */ 10149 10150 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 10151 MODULE_VERSION(t5nex, 1); 10152 MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 10153 #ifdef DEV_NETMAP 10154 MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 10155 #endif /* DEV_NETMAP */ 10156 10157 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); 10158 MODULE_VERSION(t6nex, 1); 10159 MODULE_DEPEND(t6nex, firmware, 1, 1, 1); 10160 #ifdef DEV_NETMAP 10161 MODULE_DEPEND(t6nex, netmap, 1, 1, 1); 10162 #endif /* DEV_NETMAP */ 10163 10164 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 10165 MODULE_VERSION(cxgbe, 1); 10166 10167 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 10168 MODULE_VERSION(cxl, 1); 10169 10170 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); 10171 MODULE_VERSION(cc, 1); 10172 10173 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 10174 MODULE_VERSION(vcxgbe, 1); 10175 10176 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 10177 MODULE_VERSION(vcxl, 1); 10178 10179 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); 10180 MODULE_VERSION(vcc, 1); 10181