1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_ddb.h" 34 #include "opt_inet.h" 35 #include "opt_inet6.h" 36 #include "opt_rss.h" 37 38 #include <sys/param.h> 39 #include <sys/conf.h> 40 #include <sys/priv.h> 41 #include <sys/kernel.h> 42 #include <sys/bus.h> 43 #include <sys/module.h> 44 #include <sys/malloc.h> 45 #include <sys/queue.h> 46 #include <sys/taskqueue.h> 47 #include <sys/pciio.h> 48 #include <dev/pci/pcireg.h> 49 #include <dev/pci/pcivar.h> 50 #include <dev/pci/pci_private.h> 51 #include <sys/firmware.h> 52 #include <sys/sbuf.h> 53 #include <sys/smp.h> 54 #include <sys/socket.h> 55 #include <sys/sockio.h> 56 #include <sys/sysctl.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_types.h> 60 #include <net/if_dl.h> 61 #include <net/if_vlan_var.h> 62 #ifdef RSS 63 #include <net/rss_config.h> 64 #endif 65 #if defined(__i386__) || defined(__amd64__) 66 #include <machine/md_var.h> 67 #include <machine/cputypes.h> 68 #include <vm/vm.h> 69 #include <vm/pmap.h> 70 #endif 71 #include <crypto/rijndael/rijndael.h> 72 #ifdef DDB 73 #include <ddb/ddb.h> 74 #include <ddb/db_lex.h> 75 #endif 76 77 #include "common/common.h" 78 #include "common/t4_msg.h" 79 #include "common/t4_regs.h" 80 #include "common/t4_regs_values.h" 81 #include "cudbg/cudbg.h" 82 #include "t4_ioctl.h" 83 #include "t4_l2t.h" 84 #include "t4_mp_ring.h" 85 #include "t4_if.h" 86 87 /* T4 bus driver interface */ 88 static int t4_probe(device_t); 89 static int t4_attach(device_t); 90 static int t4_detach(device_t); 91 static int t4_ready(device_t); 92 static int t4_read_port_device(device_t, int, device_t *); 93 static device_method_t t4_methods[] = { 94 DEVMETHOD(device_probe, t4_probe), 95 DEVMETHOD(device_attach, t4_attach), 96 DEVMETHOD(device_detach, t4_detach), 97 98 DEVMETHOD(t4_is_main_ready, t4_ready), 99 DEVMETHOD(t4_read_port_device, t4_read_port_device), 100 101 DEVMETHOD_END 102 }; 103 static driver_t t4_driver = { 104 "t4nex", 105 t4_methods, 106 sizeof(struct adapter) 107 }; 108 109 110 /* T4 port (cxgbe) interface */ 111 static int cxgbe_probe(device_t); 112 static int cxgbe_attach(device_t); 113 static int cxgbe_detach(device_t); 114 device_method_t cxgbe_methods[] = { 115 DEVMETHOD(device_probe, cxgbe_probe), 116 DEVMETHOD(device_attach, cxgbe_attach), 117 DEVMETHOD(device_detach, cxgbe_detach), 118 { 0, 0 } 119 }; 120 static driver_t cxgbe_driver = { 121 "cxgbe", 122 cxgbe_methods, 123 sizeof(struct port_info) 124 }; 125 126 /* T4 VI (vcxgbe) interface */ 127 static int vcxgbe_probe(device_t); 128 static int vcxgbe_attach(device_t); 129 static int vcxgbe_detach(device_t); 130 static device_method_t vcxgbe_methods[] = { 131 DEVMETHOD(device_probe, vcxgbe_probe), 132 DEVMETHOD(device_attach, vcxgbe_attach), 133 DEVMETHOD(device_detach, vcxgbe_detach), 134 { 0, 0 } 135 }; 136 static driver_t vcxgbe_driver = { 137 "vcxgbe", 138 vcxgbe_methods, 139 sizeof(struct vi_info) 140 }; 141 142 static d_ioctl_t t4_ioctl; 143 144 static struct cdevsw t4_cdevsw = { 145 .d_version = D_VERSION, 146 .d_ioctl = t4_ioctl, 147 .d_name = "t4nex", 148 }; 149 150 /* T5 bus driver interface */ 151 static int t5_probe(device_t); 152 static device_method_t t5_methods[] = { 153 DEVMETHOD(device_probe, t5_probe), 154 DEVMETHOD(device_attach, t4_attach), 155 DEVMETHOD(device_detach, t4_detach), 156 157 DEVMETHOD(t4_is_main_ready, t4_ready), 158 DEVMETHOD(t4_read_port_device, t4_read_port_device), 159 160 DEVMETHOD_END 161 }; 162 static driver_t t5_driver = { 163 "t5nex", 164 t5_methods, 165 sizeof(struct adapter) 166 }; 167 168 169 /* T5 port (cxl) interface */ 170 static driver_t cxl_driver = { 171 "cxl", 172 cxgbe_methods, 173 sizeof(struct port_info) 174 }; 175 176 /* T5 VI (vcxl) interface */ 177 static driver_t vcxl_driver = { 178 "vcxl", 179 vcxgbe_methods, 180 sizeof(struct vi_info) 181 }; 182 183 /* T6 bus driver interface */ 184 static int t6_probe(device_t); 185 static device_method_t t6_methods[] = { 186 DEVMETHOD(device_probe, t6_probe), 187 DEVMETHOD(device_attach, t4_attach), 188 DEVMETHOD(device_detach, t4_detach), 189 190 DEVMETHOD(t4_is_main_ready, t4_ready), 191 DEVMETHOD(t4_read_port_device, t4_read_port_device), 192 193 DEVMETHOD_END 194 }; 195 static driver_t t6_driver = { 196 "t6nex", 197 t6_methods, 198 sizeof(struct adapter) 199 }; 200 201 202 /* T6 port (cc) interface */ 203 static driver_t cc_driver = { 204 "cc", 205 cxgbe_methods, 206 sizeof(struct port_info) 207 }; 208 209 /* T6 VI (vcc) interface */ 210 static driver_t vcc_driver = { 211 "vcc", 212 vcxgbe_methods, 213 sizeof(struct vi_info) 214 }; 215 216 /* ifnet + media interface */ 217 static void cxgbe_init(void *); 218 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 219 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 220 static void cxgbe_qflush(struct ifnet *); 221 static int cxgbe_media_change(struct ifnet *); 222 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 223 224 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 225 226 /* 227 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 228 * then ADAPTER_LOCK, then t4_uld_list_lock. 229 */ 230 static struct sx t4_list_lock; 231 SLIST_HEAD(, adapter) t4_list; 232 #ifdef TCP_OFFLOAD 233 static struct sx t4_uld_list_lock; 234 SLIST_HEAD(, uld_info) t4_uld_list; 235 #endif 236 237 /* 238 * Tunables. See tweak_tunables() too. 239 * 240 * Each tunable is set to a default value here if it's known at compile-time. 241 * Otherwise it is set to -n as an indication to tweak_tunables() that it should 242 * provide a reasonable default (upto n) when the driver is loaded. 243 * 244 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 245 * T5 are under hw.cxl. 246 */ 247 248 /* 249 * Number of queues for tx and rx, NIC and offload. 250 */ 251 #define NTXQ 16 252 int t4_ntxq = -NTXQ; 253 TUNABLE_INT("hw.cxgbe.ntxq", &t4_ntxq); 254 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */ 255 256 #define NRXQ 8 257 int t4_nrxq = -NRXQ; 258 TUNABLE_INT("hw.cxgbe.nrxq", &t4_nrxq); 259 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */ 260 261 #define NTXQ_VI 1 262 static int t4_ntxq_vi = -NTXQ_VI; 263 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi); 264 265 #define NRXQ_VI 1 266 static int t4_nrxq_vi = -NRXQ_VI; 267 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi); 268 269 static int t4_rsrv_noflowq = 0; 270 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 271 272 #ifdef TCP_OFFLOAD 273 #define NOFLDTXQ 8 274 static int t4_nofldtxq = -NOFLDTXQ; 275 TUNABLE_INT("hw.cxgbe.nofldtxq", &t4_nofldtxq); 276 277 #define NOFLDRXQ 2 278 static int t4_nofldrxq = -NOFLDRXQ; 279 TUNABLE_INT("hw.cxgbe.nofldrxq", &t4_nofldrxq); 280 281 #define NOFLDTXQ_VI 1 282 static int t4_nofldtxq_vi = -NOFLDTXQ_VI; 283 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi); 284 285 #define NOFLDRXQ_VI 1 286 static int t4_nofldrxq_vi = -NOFLDRXQ_VI; 287 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi); 288 289 #define TMR_IDX_OFLD 1 290 int t4_tmr_idx_ofld = TMR_IDX_OFLD; 291 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_ofld", &t4_tmr_idx_ofld); 292 293 #define PKTC_IDX_OFLD (-1) 294 int t4_pktc_idx_ofld = PKTC_IDX_OFLD; 295 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_ofld", &t4_pktc_idx_ofld); 296 297 /* 0 means chip/fw default, non-zero number is value in microseconds */ 298 static u_long t4_toe_keepalive_idle = 0; 299 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_idle", &t4_toe_keepalive_idle); 300 301 /* 0 means chip/fw default, non-zero number is value in microseconds */ 302 static u_long t4_toe_keepalive_interval = 0; 303 TUNABLE_ULONG("hw.cxgbe.toe.keepalive_interval", &t4_toe_keepalive_interval); 304 305 /* 0 means chip/fw default, non-zero number is # of keepalives before abort */ 306 static int t4_toe_keepalive_count = 0; 307 TUNABLE_INT("hw.cxgbe.toe.keepalive_count", &t4_toe_keepalive_count); 308 309 /* 0 means chip/fw default, non-zero number is value in microseconds */ 310 static u_long t4_toe_rexmt_min = 0; 311 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_min", &t4_toe_rexmt_min); 312 313 /* 0 means chip/fw default, non-zero number is value in microseconds */ 314 static u_long t4_toe_rexmt_max = 0; 315 TUNABLE_ULONG("hw.cxgbe.toe.rexmt_max", &t4_toe_rexmt_max); 316 317 /* 0 means chip/fw default, non-zero number is # of rexmt before abort */ 318 static int t4_toe_rexmt_count = 0; 319 TUNABLE_INT("hw.cxgbe.toe.rexmt_count", &t4_toe_rexmt_count); 320 321 /* -1 means chip/fw default, other values are raw backoff values to use */ 322 static int t4_toe_rexmt_backoff[16] = { 323 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 324 }; 325 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.0", &t4_toe_rexmt_backoff[0]); 326 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.1", &t4_toe_rexmt_backoff[1]); 327 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.2", &t4_toe_rexmt_backoff[2]); 328 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.3", &t4_toe_rexmt_backoff[3]); 329 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.4", &t4_toe_rexmt_backoff[4]); 330 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.5", &t4_toe_rexmt_backoff[5]); 331 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.6", &t4_toe_rexmt_backoff[6]); 332 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.7", &t4_toe_rexmt_backoff[7]); 333 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.8", &t4_toe_rexmt_backoff[8]); 334 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.9", &t4_toe_rexmt_backoff[9]); 335 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.10", &t4_toe_rexmt_backoff[10]); 336 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.11", &t4_toe_rexmt_backoff[11]); 337 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.12", &t4_toe_rexmt_backoff[12]); 338 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.13", &t4_toe_rexmt_backoff[13]); 339 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.14", &t4_toe_rexmt_backoff[14]); 340 TUNABLE_INT("hw.cxgbe.toe.rexmt_backoff.15", &t4_toe_rexmt_backoff[15]); 341 #endif 342 343 #ifdef DEV_NETMAP 344 #define NNMTXQ_VI 2 345 static int t4_nnmtxq_vi = -NNMTXQ_VI; 346 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi); 347 348 #define NNMRXQ_VI 2 349 static int t4_nnmrxq_vi = -NNMRXQ_VI; 350 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi); 351 #endif 352 353 /* 354 * Holdoff parameters for ports. 355 */ 356 #define TMR_IDX 1 357 int t4_tmr_idx = TMR_IDX; 358 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx", &t4_tmr_idx); 359 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */ 360 361 #define PKTC_IDX (-1) 362 int t4_pktc_idx = PKTC_IDX; 363 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx", &t4_pktc_idx); 364 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */ 365 366 /* 367 * Size (# of entries) of each tx and rx queue. 368 */ 369 unsigned int t4_qsize_txq = TX_EQ_QSIZE; 370 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 371 372 unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 373 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 374 375 /* 376 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 377 */ 378 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 379 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 380 381 /* 382 * Configuration file. 383 */ 384 #define DEFAULT_CF "default" 385 #define FLASH_CF "flash" 386 #define UWIRE_CF "uwire" 387 #define FPGA_CF "fpga" 388 static char t4_cfg_file[32] = DEFAULT_CF; 389 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 390 391 /* 392 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 393 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 394 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 395 * mark or when signalled to do so, 0 to never emit PAUSE. 396 */ 397 static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 398 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 399 400 /* 401 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS, 402 * FEC_RESERVED respectively). 403 * -1 to run with the firmware default. 404 * 0 to disable FEC. 405 */ 406 static int t4_fec = -1; 407 TUNABLE_INT("hw.cxgbe.fec", &t4_fec); 408 409 /* 410 * Link autonegotiation. 411 * -1 to run with the firmware default. 412 * 0 to disable. 413 * 1 to enable. 414 */ 415 static int t4_autoneg = -1; 416 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg); 417 418 /* 419 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 420 * encouraged respectively). 421 */ 422 static unsigned int t4_fw_install = 1; 423 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 424 425 /* 426 * ASIC features that will be used. Disable the ones you don't want so that the 427 * chip resources aren't wasted on features that will not be used. 428 */ 429 static int t4_nbmcaps_allowed = 0; 430 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 431 432 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 433 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 434 435 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 436 FW_CAPS_CONFIG_SWITCH_EGRESS; 437 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 438 439 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 440 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 441 442 static int t4_toecaps_allowed = -1; 443 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 444 445 static int t4_rdmacaps_allowed = -1; 446 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 447 448 static int t4_cryptocaps_allowed = -1; 449 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed); 450 451 static int t4_iscsicaps_allowed = -1; 452 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 453 454 static int t4_fcoecaps_allowed = 0; 455 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 456 457 static int t5_write_combine = 1; 458 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 459 460 static int t4_num_vis = 1; 461 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 462 /* 463 * PCIe Relaxed Ordering. 464 * -1: driver should figure out a good value. 465 * 0: disable RO. 466 * 1: enable RO. 467 * 2: leave RO alone. 468 */ 469 static int pcie_relaxed_ordering = -1; 470 TUNABLE_INT("hw.cxgbe.pcie_relaxed_ordering", &pcie_relaxed_ordering); 471 472 473 /* Functions used by VIs to obtain unique MAC addresses for each VI. */ 474 static int vi_mac_funcs[] = { 475 FW_VI_FUNC_ETH, 476 FW_VI_FUNC_OFLD, 477 FW_VI_FUNC_IWARP, 478 FW_VI_FUNC_OPENISCSI, 479 FW_VI_FUNC_OPENFCOE, 480 FW_VI_FUNC_FOISCSI, 481 FW_VI_FUNC_FOFCOE, 482 }; 483 484 struct intrs_and_queues { 485 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 486 uint16_t num_vis; /* number of VIs for each port */ 487 uint16_t nirq; /* Total # of vectors */ 488 uint16_t ntxq; /* # of NIC txq's for each port */ 489 uint16_t nrxq; /* # of NIC rxq's for each port */ 490 uint16_t nofldtxq; /* # of TOE txq's for each port */ 491 uint16_t nofldrxq; /* # of TOE rxq's for each port */ 492 493 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 494 uint16_t ntxq_vi; /* # of NIC txq's */ 495 uint16_t nrxq_vi; /* # of NIC rxq's */ 496 uint16_t nofldtxq_vi; /* # of TOE txq's */ 497 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 498 uint16_t nnmtxq_vi; /* # of netmap txq's */ 499 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 500 }; 501 502 struct filter_entry { 503 uint32_t valid:1; /* filter allocated and valid */ 504 uint32_t locked:1; /* filter is administratively locked */ 505 uint32_t pending:1; /* filter action is pending firmware reply */ 506 uint32_t smtidx:8; /* Source MAC Table index for smac */ 507 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 508 509 struct t4_filter_specification fs; 510 }; 511 512 static void setup_memwin(struct adapter *); 513 static void position_memwin(struct adapter *, int, uint32_t); 514 static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 515 static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 516 int); 517 static inline int write_via_memwin(struct adapter *, int, uint32_t, 518 const uint32_t *, int); 519 static int validate_mem_range(struct adapter *, uint32_t, int); 520 static int fwmtype_to_hwmtype(int); 521 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 522 uint32_t *); 523 static int fixup_devlog_params(struct adapter *); 524 static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *); 525 static int prep_firmware(struct adapter *); 526 static int partition_resources(struct adapter *, const struct firmware *, 527 const char *); 528 static int get_params__pre_init(struct adapter *); 529 static int get_params__post_init(struct adapter *); 530 static int set_params__post_init(struct adapter *); 531 static void t4_set_desc(struct adapter *); 532 static void build_medialist(struct port_info *, struct ifmedia *); 533 static void init_l1cfg(struct port_info *); 534 static int cxgbe_init_synchronized(struct vi_info *); 535 static int cxgbe_uninit_synchronized(struct vi_info *); 536 static void quiesce_txq(struct adapter *, struct sge_txq *); 537 static void quiesce_wrq(struct adapter *, struct sge_wrq *); 538 static void quiesce_iq(struct adapter *, struct sge_iq *); 539 static void quiesce_fl(struct adapter *, struct sge_fl *); 540 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 541 driver_intr_t *, void *, char *); 542 static int t4_free_irq(struct adapter *, struct irq *); 543 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 544 static void vi_refresh_stats(struct adapter *, struct vi_info *); 545 static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 546 static void cxgbe_tick(void *); 547 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 548 static void cxgbe_sysctls(struct port_info *); 549 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 550 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 551 static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 552 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 553 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 554 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 555 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 556 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 557 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 558 static int sysctl_fec(SYSCTL_HANDLER_ARGS); 559 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); 560 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 561 static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 562 #ifdef SBUF_DRAIN 563 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 564 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 565 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 566 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 567 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 568 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 569 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 570 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 571 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 572 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 573 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 574 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 575 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 576 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 577 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 578 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 579 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 580 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 581 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 582 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 583 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 584 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 585 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 586 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 587 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 588 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 589 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 590 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 591 static int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 592 #endif 593 #ifdef TCP_OFFLOAD 594 static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS); 595 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 596 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 597 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 598 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS); 599 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS); 600 static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS); 601 static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS); 602 #endif 603 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 604 static uint32_t mode_to_fconf(uint32_t); 605 static uint32_t mode_to_iconf(uint32_t); 606 static int check_fspec_against_fconf_iconf(struct adapter *, 607 struct t4_filter_specification *); 608 static int get_filter_mode(struct adapter *, uint32_t *); 609 static int set_filter_mode(struct adapter *, uint32_t); 610 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 611 static int get_filter(struct adapter *, struct t4_filter *); 612 static int set_filter(struct adapter *, struct t4_filter *); 613 static int del_filter(struct adapter *, struct t4_filter *); 614 static void clear_filter(struct filter_entry *); 615 static int set_filter_wr(struct adapter *, int); 616 static int del_filter_wr(struct adapter *, int); 617 static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, 618 struct mbuf *); 619 static int get_sge_context(struct adapter *, struct t4_sge_context *); 620 static int load_fw(struct adapter *, struct t4_data *); 621 static int load_cfg(struct adapter *, struct t4_data *); 622 static int load_boot(struct adapter *, struct t4_bootrom *); 623 static int load_bootcfg(struct adapter *, struct t4_data *); 624 static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *); 625 static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 626 static int read_i2c(struct adapter *, struct t4_i2c_data *); 627 #ifdef TCP_OFFLOAD 628 static int toe_capability(struct vi_info *, int); 629 #endif 630 static int mod_event(module_t, int, void *); 631 static int notify_siblings(device_t, int); 632 633 struct { 634 uint16_t device; 635 char *desc; 636 } t4_pciids[] = { 637 {0xa000, "Chelsio Terminator 4 FPGA"}, 638 {0x4400, "Chelsio T440-dbg"}, 639 {0x4401, "Chelsio T420-CR"}, 640 {0x4402, "Chelsio T422-CR"}, 641 {0x4403, "Chelsio T440-CR"}, 642 {0x4404, "Chelsio T420-BCH"}, 643 {0x4405, "Chelsio T440-BCH"}, 644 {0x4406, "Chelsio T440-CH"}, 645 {0x4407, "Chelsio T420-SO"}, 646 {0x4408, "Chelsio T420-CX"}, 647 {0x4409, "Chelsio T420-BT"}, 648 {0x440a, "Chelsio T404-BT"}, 649 {0x440e, "Chelsio T440-LP-CR"}, 650 }, t5_pciids[] = { 651 {0xb000, "Chelsio Terminator 5 FPGA"}, 652 {0x5400, "Chelsio T580-dbg"}, 653 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 654 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 655 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 656 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 657 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 658 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 659 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 660 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 661 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 662 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 663 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 664 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 665 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 666 #ifdef notyet 667 {0x5404, "Chelsio T520-BCH"}, 668 {0x5405, "Chelsio T540-BCH"}, 669 {0x5406, "Chelsio T540-CH"}, 670 {0x5408, "Chelsio T520-CX"}, 671 {0x540b, "Chelsio B520-SR"}, 672 {0x540c, "Chelsio B504-BT"}, 673 {0x540f, "Chelsio Amsterdam"}, 674 {0x5413, "Chelsio T580-CHR"}, 675 #endif 676 }, t6_pciids[] = { 677 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ 678 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */ 679 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ 680 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ 681 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */ 682 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */ 683 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */ 684 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */ 685 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ 686 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ 687 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */ 688 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ 689 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */ 690 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */ 691 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */ 692 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */ 693 694 /* Custom */ 695 {0x6480, "Chelsio T6225 80"}, 696 {0x6481, "Chelsio T62100 81"}, 697 {0x6484, "Chelsio T62100 84"}, 698 }; 699 700 #ifdef TCP_OFFLOAD 701 /* 702 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 703 * exactly the same for both rxq and ofld_rxq. 704 */ 705 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 706 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 707 #endif 708 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 709 710 static int 711 t4_probe(device_t dev) 712 { 713 int i; 714 uint16_t v = pci_get_vendor(dev); 715 uint16_t d = pci_get_device(dev); 716 uint8_t f = pci_get_function(dev); 717 718 if (v != PCI_VENDOR_ID_CHELSIO) 719 return (ENXIO); 720 721 /* Attach only to PF0 of the FPGA */ 722 if (d == 0xa000 && f != 0) 723 return (ENXIO); 724 725 for (i = 0; i < nitems(t4_pciids); i++) { 726 if (d == t4_pciids[i].device) { 727 device_set_desc(dev, t4_pciids[i].desc); 728 return (BUS_PROBE_DEFAULT); 729 } 730 } 731 732 return (ENXIO); 733 } 734 735 static int 736 t5_probe(device_t dev) 737 { 738 int i; 739 uint16_t v = pci_get_vendor(dev); 740 uint16_t d = pci_get_device(dev); 741 uint8_t f = pci_get_function(dev); 742 743 if (v != PCI_VENDOR_ID_CHELSIO) 744 return (ENXIO); 745 746 /* Attach only to PF0 of the FPGA */ 747 if (d == 0xb000 && f != 0) 748 return (ENXIO); 749 750 for (i = 0; i < nitems(t5_pciids); i++) { 751 if (d == t5_pciids[i].device) { 752 device_set_desc(dev, t5_pciids[i].desc); 753 return (BUS_PROBE_DEFAULT); 754 } 755 } 756 757 return (ENXIO); 758 } 759 760 static int 761 t6_probe(device_t dev) 762 { 763 int i; 764 uint16_t v = pci_get_vendor(dev); 765 uint16_t d = pci_get_device(dev); 766 767 if (v != PCI_VENDOR_ID_CHELSIO) 768 return (ENXIO); 769 770 for (i = 0; i < nitems(t6_pciids); i++) { 771 if (d == t6_pciids[i].device) { 772 device_set_desc(dev, t6_pciids[i].desc); 773 return (BUS_PROBE_DEFAULT); 774 } 775 } 776 777 return (ENXIO); 778 } 779 780 static void 781 t5_attribute_workaround(device_t dev) 782 { 783 device_t root_port; 784 uint32_t v; 785 786 /* 787 * The T5 chips do not properly echo the No Snoop and Relaxed 788 * Ordering attributes when replying to a TLP from a Root 789 * Port. As a workaround, find the parent Root Port and 790 * disable No Snoop and Relaxed Ordering. Note that this 791 * affects all devices under this root port. 792 */ 793 root_port = pci_find_pcie_root_port(dev); 794 if (root_port == NULL) { 795 device_printf(dev, "Unable to find parent root port\n"); 796 return; 797 } 798 799 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 800 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 801 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 802 0) 803 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 804 device_get_nameunit(root_port)); 805 } 806 807 static const struct devnames devnames[] = { 808 { 809 .nexus_name = "t4nex", 810 .ifnet_name = "cxgbe", 811 .vi_ifnet_name = "vcxgbe", 812 .pf03_drv_name = "t4iov", 813 .vf_nexus_name = "t4vf", 814 .vf_ifnet_name = "cxgbev" 815 }, { 816 .nexus_name = "t5nex", 817 .ifnet_name = "cxl", 818 .vi_ifnet_name = "vcxl", 819 .pf03_drv_name = "t5iov", 820 .vf_nexus_name = "t5vf", 821 .vf_ifnet_name = "cxlv" 822 }, { 823 .nexus_name = "t6nex", 824 .ifnet_name = "cc", 825 .vi_ifnet_name = "vcc", 826 .pf03_drv_name = "t6iov", 827 .vf_nexus_name = "t6vf", 828 .vf_ifnet_name = "ccv" 829 } 830 }; 831 832 void 833 t4_init_devnames(struct adapter *sc) 834 { 835 int id; 836 837 id = chip_id(sc); 838 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) 839 sc->names = &devnames[id - CHELSIO_T4]; 840 else { 841 device_printf(sc->dev, "chip id %d is not supported.\n", id); 842 sc->names = NULL; 843 } 844 } 845 846 static int 847 t4_attach(device_t dev) 848 { 849 struct adapter *sc; 850 int rc = 0, i, j, rqidx, tqidx, nports; 851 struct make_dev_args mda; 852 struct intrs_and_queues iaq; 853 struct sge *s; 854 uint32_t *buf; 855 #ifdef TCP_OFFLOAD 856 int ofld_rqidx, ofld_tqidx; 857 #endif 858 #ifdef DEV_NETMAP 859 int nm_rqidx, nm_tqidx; 860 #endif 861 int num_vis; 862 863 sc = device_get_softc(dev); 864 sc->dev = dev; 865 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); 866 867 if ((pci_get_device(dev) & 0xff00) == 0x5400) 868 t5_attribute_workaround(dev); 869 pci_enable_busmaster(dev); 870 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 871 uint32_t v; 872 873 pci_set_max_read_req(dev, 4096); 874 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 875 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 876 if (pcie_relaxed_ordering == 0 && 877 (v | PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) { 878 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE; 879 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 880 } else if (pcie_relaxed_ordering == 1 && 881 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) { 882 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 883 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 884 } 885 } 886 887 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); 888 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); 889 sc->traceq = -1; 890 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 891 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 892 device_get_nameunit(dev)); 893 894 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 895 device_get_nameunit(dev)); 896 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 897 t4_add_adapter(sc); 898 899 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 900 TAILQ_INIT(&sc->sfl); 901 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 902 903 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 904 905 rc = t4_map_bars_0_and_4(sc); 906 if (rc != 0) 907 goto done; /* error message displayed already */ 908 909 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 910 911 /* Prepare the adapter for operation. */ 912 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 913 rc = -t4_prep_adapter(sc, buf); 914 free(buf, M_CXGBE); 915 if (rc != 0) { 916 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 917 goto done; 918 } 919 920 /* 921 * This is the real PF# to which we're attaching. Works from within PCI 922 * passthrough environments too, where pci_get_function() could return a 923 * different PF# depending on the passthrough configuration. We need to 924 * use the real PF# in all our communication with the firmware. 925 */ 926 j = t4_read_reg(sc, A_PL_WHOAMI); 927 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); 928 sc->mbox = sc->pf; 929 930 t4_init_devnames(sc); 931 if (sc->names == NULL) { 932 rc = ENOTSUP; 933 goto done; /* error message displayed already */ 934 } 935 936 /* 937 * Do this really early, with the memory windows set up even before the 938 * character device. The userland tool's register i/o and mem read 939 * will work even in "recovery mode". 940 */ 941 setup_memwin(sc); 942 if (t4_init_devlog_params(sc, 0) == 0) 943 fixup_devlog_params(sc); 944 make_dev_args_init(&mda); 945 mda.mda_devsw = &t4_cdevsw; 946 mda.mda_uid = UID_ROOT; 947 mda.mda_gid = GID_WHEEL; 948 mda.mda_mode = 0600; 949 mda.mda_si_drv1 = sc; 950 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); 951 if (rc != 0) 952 device_printf(dev, "failed to create nexus char device: %d.\n", 953 rc); 954 955 /* Go no further if recovery mode has been requested. */ 956 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 957 device_printf(dev, "recovery mode.\n"); 958 goto done; 959 } 960 961 #if defined(__i386__) 962 if ((cpu_feature & CPUID_CX8) == 0) { 963 device_printf(dev, "64 bit atomics not available.\n"); 964 rc = ENOTSUP; 965 goto done; 966 } 967 #endif 968 969 /* Prepare the firmware for operation */ 970 rc = prep_firmware(sc); 971 if (rc != 0) 972 goto done; /* error message displayed already */ 973 974 rc = get_params__post_init(sc); 975 if (rc != 0) 976 goto done; /* error message displayed already */ 977 978 rc = set_params__post_init(sc); 979 if (rc != 0) 980 goto done; /* error message displayed already */ 981 982 rc = t4_map_bar_2(sc); 983 if (rc != 0) 984 goto done; /* error message displayed already */ 985 986 rc = t4_create_dma_tag(sc); 987 if (rc != 0) 988 goto done; /* error message displayed already */ 989 990 /* 991 * First pass over all the ports - allocate VIs and initialize some 992 * basic parameters like mac address, port type, etc. 993 */ 994 for_each_port(sc, i) { 995 struct port_info *pi; 996 997 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 998 sc->port[i] = pi; 999 1000 /* These must be set before t4_port_init */ 1001 pi->adapter = sc; 1002 pi->port_id = i; 1003 /* 1004 * XXX: vi[0] is special so we can't delay this allocation until 1005 * pi->nvi's final value is known. 1006 */ 1007 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE, 1008 M_ZERO | M_WAITOK); 1009 1010 /* 1011 * Allocate the "main" VI and initialize parameters 1012 * like mac addr. 1013 */ 1014 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 1015 if (rc != 0) { 1016 device_printf(dev, "unable to initialize port %d: %d\n", 1017 i, rc); 1018 free(pi->vi, M_CXGBE); 1019 free(pi, M_CXGBE); 1020 sc->port[i] = NULL; 1021 goto done; 1022 } 1023 1024 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 1025 device_get_nameunit(dev), i); 1026 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 1027 sc->chan_map[pi->tx_chan] = i; 1028 1029 /* All VIs on this port share this media. */ 1030 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, 1031 cxgbe_media_status); 1032 1033 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1); 1034 if (pi->dev == NULL) { 1035 device_printf(dev, 1036 "failed to add device for port %d.\n", i); 1037 rc = ENXIO; 1038 goto done; 1039 } 1040 pi->vi[0].dev = pi->dev; 1041 device_set_softc(pi->dev, pi); 1042 } 1043 1044 /* 1045 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 1046 */ 1047 nports = sc->params.nports; 1048 rc = cfg_itype_and_nqueues(sc, &iaq); 1049 if (rc != 0) 1050 goto done; /* error message displayed already */ 1051 1052 num_vis = iaq.num_vis; 1053 sc->intr_type = iaq.intr_type; 1054 sc->intr_count = iaq.nirq; 1055 1056 s = &sc->sge; 1057 s->nrxq = nports * iaq.nrxq; 1058 s->ntxq = nports * iaq.ntxq; 1059 if (num_vis > 1) { 1060 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi; 1061 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi; 1062 } 1063 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 1064 s->neq += nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 1065 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 1066 #ifdef TCP_OFFLOAD 1067 if (is_offload(sc)) { 1068 s->nofldrxq = nports * iaq.nofldrxq; 1069 s->nofldtxq = nports * iaq.nofldtxq; 1070 if (num_vis > 1) { 1071 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi; 1072 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi; 1073 } 1074 s->neq += s->nofldtxq + s->nofldrxq; 1075 s->niq += s->nofldrxq; 1076 1077 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 1078 M_CXGBE, M_ZERO | M_WAITOK); 1079 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 1080 M_CXGBE, M_ZERO | M_WAITOK); 1081 } 1082 #endif 1083 #ifdef DEV_NETMAP 1084 if (num_vis > 1) { 1085 s->nnmrxq = nports * (num_vis - 1) * iaq.nnmrxq_vi; 1086 s->nnmtxq = nports * (num_vis - 1) * iaq.nnmtxq_vi; 1087 } 1088 s->neq += s->nnmtxq + s->nnmrxq; 1089 s->niq += s->nnmrxq; 1090 1091 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 1092 M_CXGBE, M_ZERO | M_WAITOK); 1093 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 1094 M_CXGBE, M_ZERO | M_WAITOK); 1095 #endif 1096 1097 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE, 1098 M_ZERO | M_WAITOK); 1099 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 1100 M_ZERO | M_WAITOK); 1101 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 1102 M_ZERO | M_WAITOK); 1103 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 1104 M_ZERO | M_WAITOK); 1105 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 1106 M_ZERO | M_WAITOK); 1107 1108 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 1109 M_ZERO | M_WAITOK); 1110 1111 t4_init_l2t(sc, M_WAITOK); 1112 t4_init_tx_sched(sc); 1113 1114 /* 1115 * Second pass over the ports. This time we know the number of rx and 1116 * tx queues that each port should get. 1117 */ 1118 rqidx = tqidx = 0; 1119 #ifdef TCP_OFFLOAD 1120 ofld_rqidx = ofld_tqidx = 0; 1121 #endif 1122 #ifdef DEV_NETMAP 1123 nm_rqidx = nm_tqidx = 0; 1124 #endif 1125 for_each_port(sc, i) { 1126 struct port_info *pi = sc->port[i]; 1127 struct vi_info *vi; 1128 1129 if (pi == NULL) 1130 continue; 1131 1132 pi->nvi = num_vis; 1133 for_each_vi(pi, j, vi) { 1134 vi->pi = pi; 1135 vi->qsize_rxq = t4_qsize_rxq; 1136 vi->qsize_txq = t4_qsize_txq; 1137 1138 vi->first_rxq = rqidx; 1139 vi->first_txq = tqidx; 1140 vi->tmr_idx = t4_tmr_idx; 1141 vi->pktc_idx = t4_pktc_idx; 1142 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi; 1143 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi; 1144 1145 rqidx += vi->nrxq; 1146 tqidx += vi->ntxq; 1147 1148 if (j == 0 && vi->ntxq > 1) 1149 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0; 1150 else 1151 vi->rsrv_noflowq = 0; 1152 1153 #ifdef TCP_OFFLOAD 1154 vi->ofld_tmr_idx = t4_tmr_idx_ofld; 1155 vi->ofld_pktc_idx = t4_pktc_idx_ofld; 1156 vi->first_ofld_rxq = ofld_rqidx; 1157 vi->first_ofld_txq = ofld_tqidx; 1158 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi; 1159 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi; 1160 1161 ofld_rqidx += vi->nofldrxq; 1162 ofld_tqidx += vi->nofldtxq; 1163 #endif 1164 #ifdef DEV_NETMAP 1165 if (j > 0) { 1166 vi->first_nm_rxq = nm_rqidx; 1167 vi->first_nm_txq = nm_tqidx; 1168 vi->nnmrxq = iaq.nnmrxq_vi; 1169 vi->nnmtxq = iaq.nnmtxq_vi; 1170 nm_rqidx += vi->nnmrxq; 1171 nm_tqidx += vi->nnmtxq; 1172 } 1173 #endif 1174 } 1175 } 1176 1177 rc = t4_setup_intr_handlers(sc); 1178 if (rc != 0) { 1179 device_printf(dev, 1180 "failed to setup interrupt handlers: %d\n", rc); 1181 goto done; 1182 } 1183 1184 rc = bus_generic_probe(dev); 1185 if (rc != 0) { 1186 device_printf(dev, "failed to probe child drivers: %d\n", rc); 1187 goto done; 1188 } 1189 1190 /* 1191 * Ensure thread-safe mailbox access (in debug builds). 1192 * 1193 * So far this was the only thread accessing the mailbox but various 1194 * ifnets and sysctls are about to be created and their handlers/ioctls 1195 * will access the mailbox from different threads. 1196 */ 1197 sc->flags |= CHK_MBOX_ACCESS; 1198 1199 rc = bus_generic_attach(dev); 1200 if (rc != 0) { 1201 device_printf(dev, 1202 "failed to attach all child ports: %d\n", rc); 1203 goto done; 1204 } 1205 1206 device_printf(dev, 1207 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1208 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1209 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1210 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1211 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1212 1213 t4_set_desc(sc); 1214 1215 notify_siblings(dev, 0); 1216 1217 done: 1218 if (rc != 0 && sc->cdev) { 1219 /* cdev was created and so cxgbetool works; recover that way. */ 1220 device_printf(dev, 1221 "error during attach, adapter is now in recovery mode.\n"); 1222 rc = 0; 1223 } 1224 1225 if (rc != 0) 1226 t4_detach_common(dev); 1227 else 1228 t4_sysctls(sc); 1229 1230 return (rc); 1231 } 1232 1233 static int 1234 t4_ready(device_t dev) 1235 { 1236 struct adapter *sc; 1237 1238 sc = device_get_softc(dev); 1239 if (sc->flags & FW_OK) 1240 return (0); 1241 return (ENXIO); 1242 } 1243 1244 static int 1245 t4_read_port_device(device_t dev, int port, device_t *child) 1246 { 1247 struct adapter *sc; 1248 struct port_info *pi; 1249 1250 sc = device_get_softc(dev); 1251 if (port < 0 || port >= MAX_NPORTS) 1252 return (EINVAL); 1253 pi = sc->port[port]; 1254 if (pi == NULL || pi->dev == NULL) 1255 return (ENXIO); 1256 *child = pi->dev; 1257 return (0); 1258 } 1259 1260 static int 1261 notify_siblings(device_t dev, int detaching) 1262 { 1263 device_t sibling; 1264 int error, i; 1265 1266 error = 0; 1267 for (i = 0; i < PCI_FUNCMAX; i++) { 1268 if (i == pci_get_function(dev)) 1269 continue; 1270 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), 1271 pci_get_slot(dev), i); 1272 if (sibling == NULL || !device_is_attached(sibling)) 1273 continue; 1274 if (detaching) 1275 error = T4_DETACH_CHILD(sibling); 1276 else 1277 (void)T4_ATTACH_CHILD(sibling); 1278 if (error) 1279 break; 1280 } 1281 return (error); 1282 } 1283 1284 /* 1285 * Idempotent 1286 */ 1287 static int 1288 t4_detach(device_t dev) 1289 { 1290 struct adapter *sc; 1291 int rc; 1292 1293 sc = device_get_softc(dev); 1294 1295 rc = notify_siblings(dev, 1); 1296 if (rc) { 1297 device_printf(dev, 1298 "failed to detach sibling devices: %d\n", rc); 1299 return (rc); 1300 } 1301 1302 return (t4_detach_common(dev)); 1303 } 1304 1305 int 1306 t4_detach_common(device_t dev) 1307 { 1308 struct adapter *sc; 1309 struct port_info *pi; 1310 int i, rc; 1311 1312 sc = device_get_softc(dev); 1313 1314 sc->flags &= ~CHK_MBOX_ACCESS; 1315 if (sc->flags & FULL_INIT_DONE) { 1316 if (!(sc->flags & IS_VF)) 1317 t4_intr_disable(sc); 1318 } 1319 1320 if (sc->cdev) { 1321 destroy_dev(sc->cdev); 1322 sc->cdev = NULL; 1323 } 1324 1325 if (device_is_attached(dev)) { 1326 rc = bus_generic_detach(dev); 1327 if (rc) { 1328 device_printf(dev, 1329 "failed to detach child devices: %d\n", rc); 1330 return (rc); 1331 } 1332 } 1333 1334 for (i = 0; i < sc->intr_count; i++) 1335 t4_free_irq(sc, &sc->irq[i]); 1336 1337 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1338 t4_free_tx_sched(sc); 1339 1340 for (i = 0; i < MAX_NPORTS; i++) { 1341 pi = sc->port[i]; 1342 if (pi) { 1343 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1344 if (pi->dev) 1345 device_delete_child(dev, pi->dev); 1346 1347 mtx_destroy(&pi->pi_lock); 1348 free(pi->vi, M_CXGBE); 1349 free(pi, M_CXGBE); 1350 } 1351 } 1352 1353 device_delete_children(dev); 1354 1355 if (sc->flags & FULL_INIT_DONE) 1356 adapter_full_uninit(sc); 1357 1358 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1359 t4_fw_bye(sc, sc->mbox); 1360 1361 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1362 pci_release_msi(dev); 1363 1364 if (sc->regs_res) 1365 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1366 sc->regs_res); 1367 1368 if (sc->udbs_res) 1369 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1370 sc->udbs_res); 1371 1372 if (sc->msix_res) 1373 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1374 sc->msix_res); 1375 1376 if (sc->l2t) 1377 t4_free_l2t(sc->l2t); 1378 1379 #ifdef TCP_OFFLOAD 1380 free(sc->sge.ofld_rxq, M_CXGBE); 1381 free(sc->sge.ofld_txq, M_CXGBE); 1382 #endif 1383 #ifdef DEV_NETMAP 1384 free(sc->sge.nm_rxq, M_CXGBE); 1385 free(sc->sge.nm_txq, M_CXGBE); 1386 #endif 1387 free(sc->irq, M_CXGBE); 1388 free(sc->sge.rxq, M_CXGBE); 1389 free(sc->sge.txq, M_CXGBE); 1390 free(sc->sge.ctrlq, M_CXGBE); 1391 free(sc->sge.iqmap, M_CXGBE); 1392 free(sc->sge.eqmap, M_CXGBE); 1393 free(sc->tids.ftid_tab, M_CXGBE); 1394 free(sc->tt.tls_rx_ports, M_CXGBE); 1395 t4_destroy_dma_tag(sc); 1396 if (mtx_initialized(&sc->sc_lock)) { 1397 sx_xlock(&t4_list_lock); 1398 SLIST_REMOVE(&t4_list, sc, adapter, link); 1399 sx_xunlock(&t4_list_lock); 1400 mtx_destroy(&sc->sc_lock); 1401 } 1402 1403 callout_drain(&sc->sfl_callout); 1404 if (mtx_initialized(&sc->tids.ftid_lock)) 1405 mtx_destroy(&sc->tids.ftid_lock); 1406 if (mtx_initialized(&sc->sfl_lock)) 1407 mtx_destroy(&sc->sfl_lock); 1408 if (mtx_initialized(&sc->ifp_lock)) 1409 mtx_destroy(&sc->ifp_lock); 1410 if (mtx_initialized(&sc->reg_lock)) 1411 mtx_destroy(&sc->reg_lock); 1412 1413 for (i = 0; i < NUM_MEMWIN; i++) { 1414 struct memwin *mw = &sc->memwin[i]; 1415 1416 if (rw_initialized(&mw->mw_lock)) 1417 rw_destroy(&mw->mw_lock); 1418 } 1419 1420 bzero(sc, sizeof(*sc)); 1421 1422 return (0); 1423 } 1424 1425 static int 1426 cxgbe_probe(device_t dev) 1427 { 1428 char buf[128]; 1429 struct port_info *pi = device_get_softc(dev); 1430 1431 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1432 device_set_desc_copy(dev, buf); 1433 1434 return (BUS_PROBE_DEFAULT); 1435 } 1436 1437 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1438 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1439 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1440 #define T4_CAP_ENABLE (T4_CAP) 1441 1442 static int 1443 cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1444 { 1445 struct ifnet *ifp; 1446 struct sbuf *sb; 1447 1448 vi->xact_addr_filt = -1; 1449 callout_init(&vi->tick, 1); 1450 1451 /* Allocate an ifnet and set it up */ 1452 ifp = if_alloc(IFT_ETHER); 1453 if (ifp == NULL) { 1454 device_printf(dev, "Cannot allocate ifnet\n"); 1455 return (ENOMEM); 1456 } 1457 vi->ifp = ifp; 1458 ifp->if_softc = vi; 1459 1460 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1461 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1462 1463 ifp->if_init = cxgbe_init; 1464 ifp->if_ioctl = cxgbe_ioctl; 1465 ifp->if_transmit = cxgbe_transmit; 1466 ifp->if_qflush = cxgbe_qflush; 1467 ifp->if_get_counter = cxgbe_get_counter; 1468 1469 ifp->if_capabilities = T4_CAP; 1470 #ifdef TCP_OFFLOAD 1471 if (vi->nofldrxq != 0) 1472 ifp->if_capabilities |= IFCAP_TOE; 1473 #endif 1474 #ifdef DEV_NETMAP 1475 if (vi->nnmrxq != 0) 1476 ifp->if_capabilities |= IFCAP_NETMAP; 1477 #endif 1478 ifp->if_capenable = T4_CAP_ENABLE; 1479 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1480 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1481 1482 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1483 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1484 ifp->if_hw_tsomaxsegsize = 65536; 1485 1486 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1487 EVENTHANDLER_PRI_ANY); 1488 1489 ether_ifattach(ifp, vi->hw_addr); 1490 #ifdef DEV_NETMAP 1491 if (ifp->if_capabilities & IFCAP_NETMAP) 1492 cxgbe_nm_attach(vi); 1493 #endif 1494 sb = sbuf_new_auto(); 1495 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1496 #ifdef TCP_OFFLOAD 1497 if (ifp->if_capabilities & IFCAP_TOE) 1498 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1499 vi->nofldtxq, vi->nofldrxq); 1500 #endif 1501 #ifdef DEV_NETMAP 1502 if (ifp->if_capabilities & IFCAP_NETMAP) 1503 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1504 vi->nnmtxq, vi->nnmrxq); 1505 #endif 1506 sbuf_finish(sb); 1507 device_printf(dev, "%s\n", sbuf_data(sb)); 1508 sbuf_delete(sb); 1509 1510 vi_sysctls(vi); 1511 1512 return (0); 1513 } 1514 1515 static int 1516 cxgbe_attach(device_t dev) 1517 { 1518 struct port_info *pi = device_get_softc(dev); 1519 struct adapter *sc = pi->adapter; 1520 struct vi_info *vi; 1521 int i, rc; 1522 1523 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1524 1525 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1526 if (rc) 1527 return (rc); 1528 1529 for_each_vi(pi, i, vi) { 1530 if (i == 0) 1531 continue; 1532 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); 1533 if (vi->dev == NULL) { 1534 device_printf(dev, "failed to add VI %d\n", i); 1535 continue; 1536 } 1537 device_set_softc(vi->dev, vi); 1538 } 1539 1540 cxgbe_sysctls(pi); 1541 1542 bus_generic_attach(dev); 1543 1544 return (0); 1545 } 1546 1547 static void 1548 cxgbe_vi_detach(struct vi_info *vi) 1549 { 1550 struct ifnet *ifp = vi->ifp; 1551 1552 ether_ifdetach(ifp); 1553 1554 if (vi->vlan_c) 1555 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1556 1557 /* Let detach proceed even if these fail. */ 1558 #ifdef DEV_NETMAP 1559 if (ifp->if_capabilities & IFCAP_NETMAP) 1560 cxgbe_nm_detach(vi); 1561 #endif 1562 cxgbe_uninit_synchronized(vi); 1563 callout_drain(&vi->tick); 1564 vi_full_uninit(vi); 1565 1566 if_free(vi->ifp); 1567 vi->ifp = NULL; 1568 } 1569 1570 static int 1571 cxgbe_detach(device_t dev) 1572 { 1573 struct port_info *pi = device_get_softc(dev); 1574 struct adapter *sc = pi->adapter; 1575 int rc; 1576 1577 /* Detach the extra VIs first. */ 1578 rc = bus_generic_detach(dev); 1579 if (rc) 1580 return (rc); 1581 device_delete_children(dev); 1582 1583 doom_vi(sc, &pi->vi[0]); 1584 1585 if (pi->flags & HAS_TRACEQ) { 1586 sc->traceq = -1; /* cloner should not create ifnet */ 1587 t4_tracer_port_detach(sc); 1588 } 1589 1590 cxgbe_vi_detach(&pi->vi[0]); 1591 callout_drain(&pi->tick); 1592 ifmedia_removeall(&pi->media); 1593 1594 end_synchronized_op(sc, 0); 1595 1596 return (0); 1597 } 1598 1599 static void 1600 cxgbe_init(void *arg) 1601 { 1602 struct vi_info *vi = arg; 1603 struct adapter *sc = vi->pi->adapter; 1604 1605 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1606 return; 1607 cxgbe_init_synchronized(vi); 1608 end_synchronized_op(sc, 0); 1609 } 1610 1611 static int 1612 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1613 { 1614 int rc = 0, mtu, flags, can_sleep; 1615 struct vi_info *vi = ifp->if_softc; 1616 struct port_info *pi = vi->pi; 1617 struct adapter *sc = pi->adapter; 1618 struct ifreq *ifr = (struct ifreq *)data; 1619 uint32_t mask; 1620 1621 switch (cmd) { 1622 case SIOCSIFMTU: 1623 mtu = ifr->ifr_mtu; 1624 if (mtu < ETHERMIN || mtu > MAX_MTU) 1625 return (EINVAL); 1626 1627 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1628 if (rc) 1629 return (rc); 1630 ifp->if_mtu = mtu; 1631 if (vi->flags & VI_INIT_DONE) { 1632 t4_update_fl_bufsize(ifp); 1633 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1634 rc = update_mac_settings(ifp, XGMAC_MTU); 1635 } 1636 end_synchronized_op(sc, 0); 1637 break; 1638 1639 case SIOCSIFFLAGS: 1640 can_sleep = 0; 1641 redo_sifflags: 1642 rc = begin_synchronized_op(sc, vi, 1643 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1644 if (rc) 1645 return (rc); 1646 1647 if (ifp->if_flags & IFF_UP) { 1648 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1649 flags = vi->if_flags; 1650 if ((ifp->if_flags ^ flags) & 1651 (IFF_PROMISC | IFF_ALLMULTI)) { 1652 if (can_sleep == 1) { 1653 end_synchronized_op(sc, 0); 1654 can_sleep = 0; 1655 goto redo_sifflags; 1656 } 1657 rc = update_mac_settings(ifp, 1658 XGMAC_PROMISC | XGMAC_ALLMULTI); 1659 } 1660 } else { 1661 if (can_sleep == 0) { 1662 end_synchronized_op(sc, LOCK_HELD); 1663 can_sleep = 1; 1664 goto redo_sifflags; 1665 } 1666 rc = cxgbe_init_synchronized(vi); 1667 } 1668 vi->if_flags = ifp->if_flags; 1669 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1670 if (can_sleep == 0) { 1671 end_synchronized_op(sc, LOCK_HELD); 1672 can_sleep = 1; 1673 goto redo_sifflags; 1674 } 1675 rc = cxgbe_uninit_synchronized(vi); 1676 } 1677 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1678 break; 1679 1680 case SIOCADDMULTI: 1681 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1682 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1683 if (rc) 1684 return (rc); 1685 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1686 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1687 end_synchronized_op(sc, LOCK_HELD); 1688 break; 1689 1690 case SIOCSIFCAP: 1691 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1692 if (rc) 1693 return (rc); 1694 1695 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1696 if (mask & IFCAP_TXCSUM) { 1697 ifp->if_capenable ^= IFCAP_TXCSUM; 1698 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1699 1700 if (IFCAP_TSO4 & ifp->if_capenable && 1701 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1702 ifp->if_capenable &= ~IFCAP_TSO4; 1703 if_printf(ifp, 1704 "tso4 disabled due to -txcsum.\n"); 1705 } 1706 } 1707 if (mask & IFCAP_TXCSUM_IPV6) { 1708 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1709 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1710 1711 if (IFCAP_TSO6 & ifp->if_capenable && 1712 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1713 ifp->if_capenable &= ~IFCAP_TSO6; 1714 if_printf(ifp, 1715 "tso6 disabled due to -txcsum6.\n"); 1716 } 1717 } 1718 if (mask & IFCAP_RXCSUM) 1719 ifp->if_capenable ^= IFCAP_RXCSUM; 1720 if (mask & IFCAP_RXCSUM_IPV6) 1721 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1722 1723 /* 1724 * Note that we leave CSUM_TSO alone (it is always set). The 1725 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1726 * sending a TSO request our way, so it's sufficient to toggle 1727 * IFCAP_TSOx only. 1728 */ 1729 if (mask & IFCAP_TSO4) { 1730 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1731 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1732 if_printf(ifp, "enable txcsum first.\n"); 1733 rc = EAGAIN; 1734 goto fail; 1735 } 1736 ifp->if_capenable ^= IFCAP_TSO4; 1737 } 1738 if (mask & IFCAP_TSO6) { 1739 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1740 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1741 if_printf(ifp, "enable txcsum6 first.\n"); 1742 rc = EAGAIN; 1743 goto fail; 1744 } 1745 ifp->if_capenable ^= IFCAP_TSO6; 1746 } 1747 if (mask & IFCAP_LRO) { 1748 #if defined(INET) || defined(INET6) 1749 int i; 1750 struct sge_rxq *rxq; 1751 1752 ifp->if_capenable ^= IFCAP_LRO; 1753 for_each_rxq(vi, i, rxq) { 1754 if (ifp->if_capenable & IFCAP_LRO) 1755 rxq->iq.flags |= IQ_LRO_ENABLED; 1756 else 1757 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1758 } 1759 #endif 1760 } 1761 #ifdef TCP_OFFLOAD 1762 if (mask & IFCAP_TOE) { 1763 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1764 1765 rc = toe_capability(vi, enable); 1766 if (rc != 0) 1767 goto fail; 1768 1769 ifp->if_capenable ^= mask; 1770 } 1771 #endif 1772 if (mask & IFCAP_VLAN_HWTAGGING) { 1773 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1774 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1775 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1776 } 1777 if (mask & IFCAP_VLAN_MTU) { 1778 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1779 1780 /* Need to find out how to disable auto-mtu-inflation */ 1781 } 1782 if (mask & IFCAP_VLAN_HWTSO) 1783 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1784 if (mask & IFCAP_VLAN_HWCSUM) 1785 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1786 1787 #ifdef VLAN_CAPABILITIES 1788 VLAN_CAPABILITIES(ifp); 1789 #endif 1790 fail: 1791 end_synchronized_op(sc, 0); 1792 break; 1793 1794 case SIOCSIFMEDIA: 1795 case SIOCGIFMEDIA: 1796 case SIOCGIFXMEDIA: 1797 ifmedia_ioctl(ifp, ifr, &pi->media, cmd); 1798 break; 1799 1800 case SIOCGI2C: { 1801 struct ifi2creq i2c; 1802 1803 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1804 if (rc != 0) 1805 break; 1806 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1807 rc = EPERM; 1808 break; 1809 } 1810 if (i2c.len > sizeof(i2c.data)) { 1811 rc = EINVAL; 1812 break; 1813 } 1814 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1815 if (rc) 1816 return (rc); 1817 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, 1818 i2c.offset, i2c.len, &i2c.data[0]); 1819 end_synchronized_op(sc, 0); 1820 if (rc == 0) 1821 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1822 break; 1823 } 1824 1825 default: 1826 rc = ether_ioctl(ifp, cmd, data); 1827 } 1828 1829 return (rc); 1830 } 1831 1832 static int 1833 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1834 { 1835 struct vi_info *vi = ifp->if_softc; 1836 struct port_info *pi = vi->pi; 1837 struct adapter *sc = pi->adapter; 1838 struct sge_txq *txq; 1839 void *items[1]; 1840 int rc; 1841 1842 M_ASSERTPKTHDR(m); 1843 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1844 1845 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1846 m_freem(m); 1847 return (ENETDOWN); 1848 } 1849 1850 rc = parse_pkt(sc, &m); 1851 if (__predict_false(rc != 0)) { 1852 MPASS(m == NULL); /* was freed already */ 1853 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1854 return (rc); 1855 } 1856 1857 /* Select a txq. */ 1858 txq = &sc->sge.txq[vi->first_txq]; 1859 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1860 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1861 vi->rsrv_noflowq); 1862 1863 items[0] = m; 1864 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1865 if (__predict_false(rc != 0)) 1866 m_freem(m); 1867 1868 return (rc); 1869 } 1870 1871 static void 1872 cxgbe_qflush(struct ifnet *ifp) 1873 { 1874 struct vi_info *vi = ifp->if_softc; 1875 struct sge_txq *txq; 1876 int i; 1877 1878 /* queues do not exist if !VI_INIT_DONE. */ 1879 if (vi->flags & VI_INIT_DONE) { 1880 for_each_txq(vi, i, txq) { 1881 TXQ_LOCK(txq); 1882 txq->eq.flags |= EQ_QFLUSH; 1883 TXQ_UNLOCK(txq); 1884 while (!mp_ring_is_idle(txq->r)) { 1885 mp_ring_check_drainage(txq->r, 0); 1886 pause("qflush", 1); 1887 } 1888 TXQ_LOCK(txq); 1889 txq->eq.flags &= ~EQ_QFLUSH; 1890 TXQ_UNLOCK(txq); 1891 } 1892 } 1893 if_qflush(ifp); 1894 } 1895 1896 static uint64_t 1897 vi_get_counter(struct ifnet *ifp, ift_counter c) 1898 { 1899 struct vi_info *vi = ifp->if_softc; 1900 struct fw_vi_stats_vf *s = &vi->stats; 1901 1902 vi_refresh_stats(vi->pi->adapter, vi); 1903 1904 switch (c) { 1905 case IFCOUNTER_IPACKETS: 1906 return (s->rx_bcast_frames + s->rx_mcast_frames + 1907 s->rx_ucast_frames); 1908 case IFCOUNTER_IERRORS: 1909 return (s->rx_err_frames); 1910 case IFCOUNTER_OPACKETS: 1911 return (s->tx_bcast_frames + s->tx_mcast_frames + 1912 s->tx_ucast_frames + s->tx_offload_frames); 1913 case IFCOUNTER_OERRORS: 1914 return (s->tx_drop_frames); 1915 case IFCOUNTER_IBYTES: 1916 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 1917 s->rx_ucast_bytes); 1918 case IFCOUNTER_OBYTES: 1919 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 1920 s->tx_ucast_bytes + s->tx_offload_bytes); 1921 case IFCOUNTER_IMCASTS: 1922 return (s->rx_mcast_frames); 1923 case IFCOUNTER_OMCASTS: 1924 return (s->tx_mcast_frames); 1925 case IFCOUNTER_OQDROPS: { 1926 uint64_t drops; 1927 1928 drops = 0; 1929 if (vi->flags & VI_INIT_DONE) { 1930 int i; 1931 struct sge_txq *txq; 1932 1933 for_each_txq(vi, i, txq) 1934 drops += counter_u64_fetch(txq->r->drops); 1935 } 1936 1937 return (drops); 1938 1939 } 1940 1941 default: 1942 return (if_get_counter_default(ifp, c)); 1943 } 1944 } 1945 1946 uint64_t 1947 cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1948 { 1949 struct vi_info *vi = ifp->if_softc; 1950 struct port_info *pi = vi->pi; 1951 struct adapter *sc = pi->adapter; 1952 struct port_stats *s = &pi->stats; 1953 1954 if (pi->nvi > 1 || sc->flags & IS_VF) 1955 return (vi_get_counter(ifp, c)); 1956 1957 cxgbe_refresh_stats(sc, pi); 1958 1959 switch (c) { 1960 case IFCOUNTER_IPACKETS: 1961 return (s->rx_frames); 1962 1963 case IFCOUNTER_IERRORS: 1964 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1965 s->rx_fcs_err + s->rx_len_err); 1966 1967 case IFCOUNTER_OPACKETS: 1968 return (s->tx_frames); 1969 1970 case IFCOUNTER_OERRORS: 1971 return (s->tx_error_frames); 1972 1973 case IFCOUNTER_IBYTES: 1974 return (s->rx_octets); 1975 1976 case IFCOUNTER_OBYTES: 1977 return (s->tx_octets); 1978 1979 case IFCOUNTER_IMCASTS: 1980 return (s->rx_mcast_frames); 1981 1982 case IFCOUNTER_OMCASTS: 1983 return (s->tx_mcast_frames); 1984 1985 case IFCOUNTER_IQDROPS: 1986 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1987 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1988 s->rx_trunc3 + pi->tnl_cong_drops); 1989 1990 case IFCOUNTER_OQDROPS: { 1991 uint64_t drops; 1992 1993 drops = s->tx_drop; 1994 if (vi->flags & VI_INIT_DONE) { 1995 int i; 1996 struct sge_txq *txq; 1997 1998 for_each_txq(vi, i, txq) 1999 drops += counter_u64_fetch(txq->r->drops); 2000 } 2001 2002 return (drops); 2003 2004 } 2005 2006 default: 2007 return (if_get_counter_default(ifp, c)); 2008 } 2009 } 2010 2011 static int 2012 cxgbe_media_change(struct ifnet *ifp) 2013 { 2014 struct vi_info *vi = ifp->if_softc; 2015 2016 device_printf(vi->dev, "%s unimplemented.\n", __func__); 2017 2018 return (EOPNOTSUPP); 2019 } 2020 2021 static void 2022 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2023 { 2024 struct vi_info *vi = ifp->if_softc; 2025 struct port_info *pi = vi->pi; 2026 struct ifmedia_entry *cur; 2027 struct link_config *lc = &pi->link_cfg; 2028 2029 /* 2030 * If all the interfaces are administratively down the firmware does not 2031 * report transceiver changes. Refresh port info here so that ifconfig 2032 * displays accurate information at all times. 2033 */ 2034 if (begin_synchronized_op(pi->adapter, NULL, SLEEP_OK | INTR_OK, 2035 "t4med") == 0) { 2036 PORT_LOCK(pi); 2037 if (pi->up_vis == 0) { 2038 t4_update_port_info(pi); 2039 build_medialist(pi, &pi->media); 2040 } 2041 PORT_UNLOCK(pi); 2042 end_synchronized_op(pi->adapter, 0); 2043 } 2044 2045 ifmr->ifm_status = IFM_AVALID; 2046 if (lc->link_ok == 0) 2047 return; 2048 2049 ifmr->ifm_status |= IFM_ACTIVE; 2050 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); 2051 if (lc->fc & PAUSE_RX) 2052 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2053 if (lc->fc & PAUSE_TX) 2054 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2055 2056 /* active and current will differ iff current media is autoselect. */ 2057 cur = pi->media.ifm_cur; 2058 if (cur != NULL && IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 2059 return; 2060 2061 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 2062 if (lc->fc & PAUSE_RX) 2063 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2064 if (lc->fc & PAUSE_TX) 2065 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2066 switch (lc->speed) { 2067 case 10000: 2068 ifmr->ifm_active |= IFM_10G_T; 2069 break; 2070 case 1000: 2071 ifmr->ifm_active |= IFM_1000_T; 2072 break; 2073 case 100: 2074 ifmr->ifm_active |= IFM_100_TX; 2075 break; 2076 case 10: 2077 ifmr->ifm_active |= IFM_10_T; 2078 break; 2079 default: 2080 device_printf(vi->dev, "link up but speed unknown (%u)\n", 2081 lc->speed); 2082 } 2083 } 2084 2085 static int 2086 vcxgbe_probe(device_t dev) 2087 { 2088 char buf[128]; 2089 struct vi_info *vi = device_get_softc(dev); 2090 2091 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 2092 vi - vi->pi->vi); 2093 device_set_desc_copy(dev, buf); 2094 2095 return (BUS_PROBE_DEFAULT); 2096 } 2097 2098 static int 2099 alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi) 2100 { 2101 int func, index, rc; 2102 uint32_t param, val; 2103 2104 ASSERT_SYNCHRONIZED_OP(sc); 2105 2106 index = vi - pi->vi; 2107 MPASS(index > 0); /* This function deals with _extra_ VIs only */ 2108 KASSERT(index < nitems(vi_mac_funcs), 2109 ("%s: VI %s doesn't have a MAC func", __func__, 2110 device_get_nameunit(vi->dev))); 2111 func = vi_mac_funcs[index]; 2112 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 2113 vi->hw_addr, &vi->rss_size, func, 0); 2114 if (rc < 0) { 2115 device_printf(vi->dev, "failed to allocate virtual interface %d" 2116 "for port %d: %d\n", index, pi->port_id, -rc); 2117 return (-rc); 2118 } 2119 vi->viid = rc; 2120 if (chip_id(sc) <= CHELSIO_T5) 2121 vi->smt_idx = (rc & 0x7f) << 1; 2122 else 2123 vi->smt_idx = (rc & 0x7f); 2124 2125 if (vi->rss_size == 1) { 2126 /* 2127 * This VI didn't get a slice of the RSS table. Reduce the 2128 * number of VIs being created (hw.cxgbe.num_vis) or modify the 2129 * configuration file (nvi, rssnvi for this PF) if this is a 2130 * problem. 2131 */ 2132 device_printf(vi->dev, "RSS table not available.\n"); 2133 vi->rss_base = 0xffff; 2134 2135 return (0); 2136 } 2137 2138 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 2139 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 2140 V_FW_PARAMS_PARAM_YZ(vi->viid); 2141 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2142 if (rc) 2143 vi->rss_base = 0xffff; 2144 else { 2145 MPASS((val >> 16) == vi->rss_size); 2146 vi->rss_base = val & 0xffff; 2147 } 2148 2149 return (0); 2150 } 2151 2152 static int 2153 vcxgbe_attach(device_t dev) 2154 { 2155 struct vi_info *vi; 2156 struct port_info *pi; 2157 struct adapter *sc; 2158 int rc; 2159 2160 vi = device_get_softc(dev); 2161 pi = vi->pi; 2162 sc = pi->adapter; 2163 2164 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via"); 2165 if (rc) 2166 return (rc); 2167 rc = alloc_extra_vi(sc, pi, vi); 2168 end_synchronized_op(sc, 0); 2169 if (rc) 2170 return (rc); 2171 2172 rc = cxgbe_vi_attach(dev, vi); 2173 if (rc) { 2174 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2175 return (rc); 2176 } 2177 return (0); 2178 } 2179 2180 static int 2181 vcxgbe_detach(device_t dev) 2182 { 2183 struct vi_info *vi; 2184 struct adapter *sc; 2185 2186 vi = device_get_softc(dev); 2187 sc = vi->pi->adapter; 2188 2189 doom_vi(sc, vi); 2190 2191 cxgbe_vi_detach(vi); 2192 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2193 2194 end_synchronized_op(sc, 0); 2195 2196 return (0); 2197 } 2198 2199 void 2200 t4_fatal_err(struct adapter *sc) 2201 { 2202 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 2203 t4_intr_disable(sc); 2204 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 2205 device_get_nameunit(sc->dev)); 2206 } 2207 2208 void 2209 t4_add_adapter(struct adapter *sc) 2210 { 2211 sx_xlock(&t4_list_lock); 2212 SLIST_INSERT_HEAD(&t4_list, sc, link); 2213 sx_xunlock(&t4_list_lock); 2214 } 2215 2216 int 2217 t4_map_bars_0_and_4(struct adapter *sc) 2218 { 2219 sc->regs_rid = PCIR_BAR(0); 2220 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2221 &sc->regs_rid, RF_ACTIVE); 2222 if (sc->regs_res == NULL) { 2223 device_printf(sc->dev, "cannot map registers.\n"); 2224 return (ENXIO); 2225 } 2226 sc->bt = rman_get_bustag(sc->regs_res); 2227 sc->bh = rman_get_bushandle(sc->regs_res); 2228 sc->mmio_len = rman_get_size(sc->regs_res); 2229 setbit(&sc->doorbells, DOORBELL_KDB); 2230 2231 sc->msix_rid = PCIR_BAR(4); 2232 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2233 &sc->msix_rid, RF_ACTIVE); 2234 if (sc->msix_res == NULL) { 2235 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 2236 return (ENXIO); 2237 } 2238 2239 return (0); 2240 } 2241 2242 int 2243 t4_map_bar_2(struct adapter *sc) 2244 { 2245 2246 /* 2247 * T4: only iWARP driver uses the userspace doorbells. There is no need 2248 * to map it if RDMA is disabled. 2249 */ 2250 if (is_t4(sc) && sc->rdmacaps == 0) 2251 return (0); 2252 2253 sc->udbs_rid = PCIR_BAR(2); 2254 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2255 &sc->udbs_rid, RF_ACTIVE); 2256 if (sc->udbs_res == NULL) { 2257 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 2258 return (ENXIO); 2259 } 2260 sc->udbs_base = rman_get_virtual(sc->udbs_res); 2261 2262 if (chip_id(sc) >= CHELSIO_T5) { 2263 setbit(&sc->doorbells, DOORBELL_UDB); 2264 #if defined(__i386__) || defined(__amd64__) 2265 if (t5_write_combine) { 2266 int rc, mode; 2267 2268 /* 2269 * Enable write combining on BAR2. This is the 2270 * userspace doorbell BAR and is split into 128B 2271 * (UDBS_SEG_SIZE) doorbell regions, each associated 2272 * with an egress queue. The first 64B has the doorbell 2273 * and the second 64B can be used to submit a tx work 2274 * request with an implicit doorbell. 2275 */ 2276 2277 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 2278 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 2279 if (rc == 0) { 2280 clrbit(&sc->doorbells, DOORBELL_UDB); 2281 setbit(&sc->doorbells, DOORBELL_WCWR); 2282 setbit(&sc->doorbells, DOORBELL_UDBWC); 2283 } else { 2284 t5_write_combine = 0; 2285 device_printf(sc->dev, 2286 "couldn't enable write combining: %d\n", 2287 rc); 2288 } 2289 2290 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); 2291 t4_write_reg(sc, A_SGE_STAT_CFG, 2292 V_STATSOURCE_T5(7) | mode); 2293 } 2294 #else 2295 t5_write_combine = 0; 2296 #endif 2297 sc->iwt.wc_en = t5_write_combine; 2298 } 2299 2300 return (0); 2301 } 2302 2303 struct memwin_init { 2304 uint32_t base; 2305 uint32_t aperture; 2306 }; 2307 2308 static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2309 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2310 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2311 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2312 }; 2313 2314 static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2315 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2316 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2317 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2318 }; 2319 2320 static void 2321 setup_memwin(struct adapter *sc) 2322 { 2323 const struct memwin_init *mw_init; 2324 struct memwin *mw; 2325 int i; 2326 uint32_t bar0; 2327 2328 if (is_t4(sc)) { 2329 /* 2330 * Read low 32b of bar0 indirectly via the hardware backdoor 2331 * mechanism. Works from within PCI passthrough environments 2332 * too, where rman_get_start() can return a different value. We 2333 * need to program the T4 memory window decoders with the actual 2334 * addresses that will be coming across the PCIe link. 2335 */ 2336 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2337 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2338 2339 mw_init = &t4_memwin[0]; 2340 } else { 2341 /* T5+ use the relative offset inside the PCIe BAR */ 2342 bar0 = 0; 2343 2344 mw_init = &t5_memwin[0]; 2345 } 2346 2347 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2348 rw_init(&mw->mw_lock, "memory window access"); 2349 mw->mw_base = mw_init->base; 2350 mw->mw_aperture = mw_init->aperture; 2351 mw->mw_curpos = 0; 2352 t4_write_reg(sc, 2353 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2354 (mw->mw_base + bar0) | V_BIR(0) | 2355 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2356 rw_wlock(&mw->mw_lock); 2357 position_memwin(sc, i, 0); 2358 rw_wunlock(&mw->mw_lock); 2359 } 2360 2361 /* flush */ 2362 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2363 } 2364 2365 /* 2366 * Positions the memory window at the given address in the card's address space. 2367 * There are some alignment requirements and the actual position may be at an 2368 * address prior to the requested address. mw->mw_curpos always has the actual 2369 * position of the window. 2370 */ 2371 static void 2372 position_memwin(struct adapter *sc, int idx, uint32_t addr) 2373 { 2374 struct memwin *mw; 2375 uint32_t pf; 2376 uint32_t reg; 2377 2378 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2379 mw = &sc->memwin[idx]; 2380 rw_assert(&mw->mw_lock, RA_WLOCKED); 2381 2382 if (is_t4(sc)) { 2383 pf = 0; 2384 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2385 } else { 2386 pf = V_PFNUM(sc->pf); 2387 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2388 } 2389 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2390 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2391 t4_read_reg(sc, reg); /* flush */ 2392 } 2393 2394 static int 2395 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2396 int len, int rw) 2397 { 2398 struct memwin *mw; 2399 uint32_t mw_end, v; 2400 2401 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2402 2403 /* Memory can only be accessed in naturally aligned 4 byte units */ 2404 if (addr & 3 || len & 3 || len <= 0) 2405 return (EINVAL); 2406 2407 mw = &sc->memwin[idx]; 2408 while (len > 0) { 2409 rw_rlock(&mw->mw_lock); 2410 mw_end = mw->mw_curpos + mw->mw_aperture; 2411 if (addr >= mw_end || addr < mw->mw_curpos) { 2412 /* Will need to reposition the window */ 2413 if (!rw_try_upgrade(&mw->mw_lock)) { 2414 rw_runlock(&mw->mw_lock); 2415 rw_wlock(&mw->mw_lock); 2416 } 2417 rw_assert(&mw->mw_lock, RA_WLOCKED); 2418 position_memwin(sc, idx, addr); 2419 rw_downgrade(&mw->mw_lock); 2420 mw_end = mw->mw_curpos + mw->mw_aperture; 2421 } 2422 rw_assert(&mw->mw_lock, RA_RLOCKED); 2423 while (addr < mw_end && len > 0) { 2424 if (rw == 0) { 2425 v = t4_read_reg(sc, mw->mw_base + addr - 2426 mw->mw_curpos); 2427 *val++ = le32toh(v); 2428 } else { 2429 v = *val++; 2430 t4_write_reg(sc, mw->mw_base + addr - 2431 mw->mw_curpos, htole32(v)); 2432 } 2433 addr += 4; 2434 len -= 4; 2435 } 2436 rw_runlock(&mw->mw_lock); 2437 } 2438 2439 return (0); 2440 } 2441 2442 static inline int 2443 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2444 int len) 2445 { 2446 2447 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2448 } 2449 2450 static inline int 2451 write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2452 const uint32_t *val, int len) 2453 { 2454 2455 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2456 } 2457 2458 static int 2459 t4_range_cmp(const void *a, const void *b) 2460 { 2461 return ((const struct t4_range *)a)->start - 2462 ((const struct t4_range *)b)->start; 2463 } 2464 2465 /* 2466 * Verify that the memory range specified by the addr/len pair is valid within 2467 * the card's address space. 2468 */ 2469 static int 2470 validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2471 { 2472 struct t4_range mem_ranges[4], *r, *next; 2473 uint32_t em, addr_len; 2474 int i, n, remaining; 2475 2476 /* Memory can only be accessed in naturally aligned 4 byte units */ 2477 if (addr & 3 || len & 3 || len <= 0) 2478 return (EINVAL); 2479 2480 /* Enabled memories */ 2481 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2482 2483 r = &mem_ranges[0]; 2484 n = 0; 2485 bzero(r, sizeof(mem_ranges)); 2486 if (em & F_EDRAM0_ENABLE) { 2487 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2488 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2489 if (r->size > 0) { 2490 r->start = G_EDRAM0_BASE(addr_len) << 20; 2491 if (addr >= r->start && 2492 addr + len <= r->start + r->size) 2493 return (0); 2494 r++; 2495 n++; 2496 } 2497 } 2498 if (em & F_EDRAM1_ENABLE) { 2499 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2500 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2501 if (r->size > 0) { 2502 r->start = G_EDRAM1_BASE(addr_len) << 20; 2503 if (addr >= r->start && 2504 addr + len <= r->start + r->size) 2505 return (0); 2506 r++; 2507 n++; 2508 } 2509 } 2510 if (em & F_EXT_MEM_ENABLE) { 2511 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2512 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2513 if (r->size > 0) { 2514 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2515 if (addr >= r->start && 2516 addr + len <= r->start + r->size) 2517 return (0); 2518 r++; 2519 n++; 2520 } 2521 } 2522 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2523 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2524 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2525 if (r->size > 0) { 2526 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2527 if (addr >= r->start && 2528 addr + len <= r->start + r->size) 2529 return (0); 2530 r++; 2531 n++; 2532 } 2533 } 2534 MPASS(n <= nitems(mem_ranges)); 2535 2536 if (n > 1) { 2537 /* Sort and merge the ranges. */ 2538 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2539 2540 /* Start from index 0 and examine the next n - 1 entries. */ 2541 r = &mem_ranges[0]; 2542 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2543 2544 MPASS(r->size > 0); /* r is a valid entry. */ 2545 next = r + 1; 2546 MPASS(next->size > 0); /* and so is the next one. */ 2547 2548 while (r->start + r->size >= next->start) { 2549 /* Merge the next one into the current entry. */ 2550 r->size = max(r->start + r->size, 2551 next->start + next->size) - r->start; 2552 n--; /* One fewer entry in total. */ 2553 if (--remaining == 0) 2554 goto done; /* short circuit */ 2555 next++; 2556 } 2557 if (next != r + 1) { 2558 /* 2559 * Some entries were merged into r and next 2560 * points to the first valid entry that couldn't 2561 * be merged. 2562 */ 2563 MPASS(next->size > 0); /* must be valid */ 2564 memcpy(r + 1, next, remaining * sizeof(*r)); 2565 #ifdef INVARIANTS 2566 /* 2567 * This so that the foo->size assertion in the 2568 * next iteration of the loop do the right 2569 * thing for entries that were pulled up and are 2570 * no longer valid. 2571 */ 2572 MPASS(n < nitems(mem_ranges)); 2573 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2574 sizeof(struct t4_range)); 2575 #endif 2576 } 2577 } 2578 done: 2579 /* Done merging the ranges. */ 2580 MPASS(n > 0); 2581 r = &mem_ranges[0]; 2582 for (i = 0; i < n; i++, r++) { 2583 if (addr >= r->start && 2584 addr + len <= r->start + r->size) 2585 return (0); 2586 } 2587 } 2588 2589 return (EFAULT); 2590 } 2591 2592 static int 2593 fwmtype_to_hwmtype(int mtype) 2594 { 2595 2596 switch (mtype) { 2597 case FW_MEMTYPE_EDC0: 2598 return (MEM_EDC0); 2599 case FW_MEMTYPE_EDC1: 2600 return (MEM_EDC1); 2601 case FW_MEMTYPE_EXTMEM: 2602 return (MEM_MC0); 2603 case FW_MEMTYPE_EXTMEM1: 2604 return (MEM_MC1); 2605 default: 2606 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2607 } 2608 } 2609 2610 /* 2611 * Verify that the memory range specified by the memtype/offset/len pair is 2612 * valid and lies entirely within the memtype specified. The global address of 2613 * the start of the range is returned in addr. 2614 */ 2615 static int 2616 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2617 uint32_t *addr) 2618 { 2619 uint32_t em, addr_len, maddr; 2620 2621 /* Memory can only be accessed in naturally aligned 4 byte units */ 2622 if (off & 3 || len & 3 || len == 0) 2623 return (EINVAL); 2624 2625 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2626 switch (fwmtype_to_hwmtype(mtype)) { 2627 case MEM_EDC0: 2628 if (!(em & F_EDRAM0_ENABLE)) 2629 return (EINVAL); 2630 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2631 maddr = G_EDRAM0_BASE(addr_len) << 20; 2632 break; 2633 case MEM_EDC1: 2634 if (!(em & F_EDRAM1_ENABLE)) 2635 return (EINVAL); 2636 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2637 maddr = G_EDRAM1_BASE(addr_len) << 20; 2638 break; 2639 case MEM_MC: 2640 if (!(em & F_EXT_MEM_ENABLE)) 2641 return (EINVAL); 2642 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2643 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2644 break; 2645 case MEM_MC1: 2646 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2647 return (EINVAL); 2648 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2649 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2650 break; 2651 default: 2652 return (EINVAL); 2653 } 2654 2655 *addr = maddr + off; /* global address */ 2656 return (validate_mem_range(sc, *addr, len)); 2657 } 2658 2659 static int 2660 fixup_devlog_params(struct adapter *sc) 2661 { 2662 struct devlog_params *dparams = &sc->params.devlog; 2663 int rc; 2664 2665 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2666 dparams->size, &dparams->addr); 2667 2668 return (rc); 2669 } 2670 2671 static void 2672 update_nirq(struct intrs_and_queues *iaq, int nports) 2673 { 2674 int extra = T4_EXTRA_INTR; 2675 2676 iaq->nirq = extra; 2677 iaq->nirq += nports * (iaq->nrxq + iaq->nofldrxq); 2678 iaq->nirq += nports * (iaq->num_vis - 1) * 2679 max(iaq->nrxq_vi, iaq->nnmrxq_vi); 2680 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi; 2681 } 2682 2683 /* 2684 * Adjust requirements to fit the number of interrupts available. 2685 */ 2686 static void 2687 calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype, 2688 int navail) 2689 { 2690 int old_nirq; 2691 const int nports = sc->params.nports; 2692 2693 MPASS(nports > 0); 2694 MPASS(navail > 0); 2695 2696 bzero(iaq, sizeof(*iaq)); 2697 iaq->intr_type = itype; 2698 iaq->num_vis = t4_num_vis; 2699 iaq->ntxq = t4_ntxq; 2700 iaq->ntxq_vi = t4_ntxq_vi; 2701 iaq->nrxq = t4_nrxq; 2702 iaq->nrxq_vi = t4_nrxq_vi; 2703 #ifdef TCP_OFFLOAD 2704 if (is_offload(sc)) { 2705 iaq->nofldtxq = t4_nofldtxq; 2706 iaq->nofldtxq_vi = t4_nofldtxq_vi; 2707 iaq->nofldrxq = t4_nofldrxq; 2708 iaq->nofldrxq_vi = t4_nofldrxq_vi; 2709 } 2710 #endif 2711 #ifdef DEV_NETMAP 2712 iaq->nnmtxq_vi = t4_nnmtxq_vi; 2713 iaq->nnmrxq_vi = t4_nnmrxq_vi; 2714 #endif 2715 2716 update_nirq(iaq, nports); 2717 if (iaq->nirq <= navail && 2718 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2719 /* 2720 * This is the normal case -- there are enough interrupts for 2721 * everything. 2722 */ 2723 goto done; 2724 } 2725 2726 /* 2727 * If extra VIs have been configured try reducing their count and see if 2728 * that works. 2729 */ 2730 while (iaq->num_vis > 1) { 2731 iaq->num_vis--; 2732 update_nirq(iaq, nports); 2733 if (iaq->nirq <= navail && 2734 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2735 device_printf(sc->dev, "virtual interfaces per port " 2736 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, " 2737 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. " 2738 "itype %d, navail %u, nirq %d.\n", 2739 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq, 2740 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, 2741 itype, navail, iaq->nirq); 2742 goto done; 2743 } 2744 } 2745 2746 /* 2747 * Extra VIs will not be created. Log a message if they were requested. 2748 */ 2749 MPASS(iaq->num_vis == 1); 2750 iaq->ntxq_vi = iaq->nrxq_vi = 0; 2751 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 2752 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 2753 if (iaq->num_vis != t4_num_vis) { 2754 device_printf(sc->dev, "extra virtual interfaces disabled. " 2755 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, " 2756 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n", 2757 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, 2758 iaq->nnmrxq_vi, itype, navail, iaq->nirq); 2759 } 2760 2761 /* 2762 * Keep reducing the number of NIC rx queues to the next lower power of 2763 * 2 (for even RSS distribution) and halving the TOE rx queues and see 2764 * if that works. 2765 */ 2766 do { 2767 if (iaq->nrxq > 1) { 2768 do { 2769 iaq->nrxq--; 2770 } while (!powerof2(iaq->nrxq)); 2771 } 2772 if (iaq->nofldrxq > 1) 2773 iaq->nofldrxq >>= 1; 2774 2775 old_nirq = iaq->nirq; 2776 update_nirq(iaq, nports); 2777 if (iaq->nirq <= navail && 2778 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2779 device_printf(sc->dev, "running with reduced number of " 2780 "rx queues because of shortage of interrupts. " 2781 "nrxq=%u, nofldrxq=%u. " 2782 "itype %d, navail %u, nirq %d.\n", iaq->nrxq, 2783 iaq->nofldrxq, itype, navail, iaq->nirq); 2784 goto done; 2785 } 2786 } while (old_nirq != iaq->nirq); 2787 2788 /* One interrupt for everything. Ugh. */ 2789 device_printf(sc->dev, "running with minimal number of queues. " 2790 "itype %d, navail %u.\n", itype, navail); 2791 iaq->nirq = 1; 2792 MPASS(iaq->nrxq == 1); 2793 iaq->ntxq = 1; 2794 if (iaq->nofldrxq > 1) 2795 iaq->nofldtxq = 1; 2796 done: 2797 MPASS(iaq->num_vis > 0); 2798 if (iaq->num_vis > 1) { 2799 MPASS(iaq->nrxq_vi > 0); 2800 MPASS(iaq->ntxq_vi > 0); 2801 } 2802 MPASS(iaq->nirq > 0); 2803 MPASS(iaq->nrxq > 0); 2804 MPASS(iaq->ntxq > 0); 2805 if (itype == INTR_MSI) { 2806 MPASS(powerof2(iaq->nirq)); 2807 } 2808 } 2809 2810 static int 2811 cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq) 2812 { 2813 int rc, itype, navail, nalloc; 2814 2815 for (itype = INTR_MSIX; itype; itype >>= 1) { 2816 2817 if ((itype & t4_intr_types) == 0) 2818 continue; /* not allowed */ 2819 2820 if (itype == INTR_MSIX) 2821 navail = pci_msix_count(sc->dev); 2822 else if (itype == INTR_MSI) 2823 navail = pci_msi_count(sc->dev); 2824 else 2825 navail = 1; 2826 restart: 2827 if (navail == 0) 2828 continue; 2829 2830 calculate_iaq(sc, iaq, itype, navail); 2831 nalloc = iaq->nirq; 2832 rc = 0; 2833 if (itype == INTR_MSIX) 2834 rc = pci_alloc_msix(sc->dev, &nalloc); 2835 else if (itype == INTR_MSI) 2836 rc = pci_alloc_msi(sc->dev, &nalloc); 2837 2838 if (rc == 0 && nalloc > 0) { 2839 if (nalloc == iaq->nirq) 2840 return (0); 2841 2842 /* 2843 * Didn't get the number requested. Use whatever number 2844 * the kernel is willing to allocate. 2845 */ 2846 device_printf(sc->dev, "fewer vectors than requested, " 2847 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2848 itype, iaq->nirq, nalloc); 2849 pci_release_msi(sc->dev); 2850 navail = nalloc; 2851 goto restart; 2852 } 2853 2854 device_printf(sc->dev, 2855 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2856 itype, rc, iaq->nirq, nalloc); 2857 } 2858 2859 device_printf(sc->dev, 2860 "failed to find a usable interrupt type. " 2861 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2862 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2863 2864 return (ENXIO); 2865 } 2866 2867 #define FW_VERSION(chip) ( \ 2868 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2869 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2870 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2871 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2872 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2873 2874 struct fw_info { 2875 uint8_t chip; 2876 char *kld_name; 2877 char *fw_mod_name; 2878 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2879 } fw_info[] = { 2880 { 2881 .chip = CHELSIO_T4, 2882 .kld_name = "t4fw_cfg", 2883 .fw_mod_name = "t4fw", 2884 .fw_hdr = { 2885 .chip = FW_HDR_CHIP_T4, 2886 .fw_ver = htobe32_const(FW_VERSION(T4)), 2887 .intfver_nic = FW_INTFVER(T4, NIC), 2888 .intfver_vnic = FW_INTFVER(T4, VNIC), 2889 .intfver_ofld = FW_INTFVER(T4, OFLD), 2890 .intfver_ri = FW_INTFVER(T4, RI), 2891 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2892 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2893 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2894 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2895 }, 2896 }, { 2897 .chip = CHELSIO_T5, 2898 .kld_name = "t5fw_cfg", 2899 .fw_mod_name = "t5fw", 2900 .fw_hdr = { 2901 .chip = FW_HDR_CHIP_T5, 2902 .fw_ver = htobe32_const(FW_VERSION(T5)), 2903 .intfver_nic = FW_INTFVER(T5, NIC), 2904 .intfver_vnic = FW_INTFVER(T5, VNIC), 2905 .intfver_ofld = FW_INTFVER(T5, OFLD), 2906 .intfver_ri = FW_INTFVER(T5, RI), 2907 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2908 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2909 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2910 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2911 }, 2912 }, { 2913 .chip = CHELSIO_T6, 2914 .kld_name = "t6fw_cfg", 2915 .fw_mod_name = "t6fw", 2916 .fw_hdr = { 2917 .chip = FW_HDR_CHIP_T6, 2918 .fw_ver = htobe32_const(FW_VERSION(T6)), 2919 .intfver_nic = FW_INTFVER(T6, NIC), 2920 .intfver_vnic = FW_INTFVER(T6, VNIC), 2921 .intfver_ofld = FW_INTFVER(T6, OFLD), 2922 .intfver_ri = FW_INTFVER(T6, RI), 2923 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), 2924 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 2925 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), 2926 .intfver_fcoe = FW_INTFVER(T6, FCOE), 2927 }, 2928 } 2929 }; 2930 2931 static struct fw_info * 2932 find_fw_info(int chip) 2933 { 2934 int i; 2935 2936 for (i = 0; i < nitems(fw_info); i++) { 2937 if (fw_info[i].chip == chip) 2938 return (&fw_info[i]); 2939 } 2940 return (NULL); 2941 } 2942 2943 /* 2944 * Is the given firmware API compatible with the one the driver was compiled 2945 * with? 2946 */ 2947 static int 2948 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2949 { 2950 2951 /* short circuit if it's the exact same firmware version */ 2952 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2953 return (1); 2954 2955 /* 2956 * XXX: Is this too conservative? Perhaps I should limit this to the 2957 * features that are supported in the driver. 2958 */ 2959 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2960 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2961 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2962 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2963 return (1); 2964 #undef SAME_INTF 2965 2966 return (0); 2967 } 2968 2969 /* 2970 * The firmware in the KLD is usable, but should it be installed? This routine 2971 * explains itself in detail if it indicates the KLD firmware should be 2972 * installed. 2973 */ 2974 static int 2975 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2976 { 2977 const char *reason; 2978 2979 if (!card_fw_usable) { 2980 reason = "incompatible or unusable"; 2981 goto install; 2982 } 2983 2984 if (k > c) { 2985 reason = "older than the version bundled with this driver"; 2986 goto install; 2987 } 2988 2989 if (t4_fw_install == 2 && k != c) { 2990 reason = "different than the version bundled with this driver"; 2991 goto install; 2992 } 2993 2994 return (0); 2995 2996 install: 2997 if (t4_fw_install == 0) { 2998 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2999 "but the driver is prohibited from installing a different " 3000 "firmware on the card.\n", 3001 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3002 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 3003 3004 return (0); 3005 } 3006 3007 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 3008 "installing firmware %u.%u.%u.%u on card.\n", 3009 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3010 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 3011 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 3012 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 3013 3014 return (1); 3015 } 3016 3017 /* 3018 * Establish contact with the firmware and determine if we are the master driver 3019 * or not, and whether we are responsible for chip initialization. 3020 */ 3021 static int 3022 prep_firmware(struct adapter *sc) 3023 { 3024 const struct firmware *fw = NULL, *default_cfg; 3025 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 3026 enum dev_state state; 3027 struct fw_info *fw_info; 3028 struct fw_hdr *card_fw; /* fw on the card */ 3029 const struct fw_hdr *kld_fw; /* fw in the KLD */ 3030 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 3031 against */ 3032 3033 /* This is the firmware whose headers the driver was compiled against */ 3034 fw_info = find_fw_info(chip_id(sc)); 3035 if (fw_info == NULL) { 3036 device_printf(sc->dev, 3037 "unable to look up firmware information for chip %d.\n", 3038 chip_id(sc)); 3039 return (EINVAL); 3040 } 3041 drv_fw = &fw_info->fw_hdr; 3042 3043 /* 3044 * The firmware KLD contains many modules. The KLD name is also the 3045 * name of the module that contains the default config file. 3046 */ 3047 default_cfg = firmware_get(fw_info->kld_name); 3048 3049 /* This is the firmware in the KLD */ 3050 fw = firmware_get(fw_info->fw_mod_name); 3051 if (fw != NULL) { 3052 kld_fw = (const void *)fw->data; 3053 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 3054 } else { 3055 kld_fw = NULL; 3056 kld_fw_usable = 0; 3057 } 3058 3059 /* Read the header of the firmware on the card */ 3060 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 3061 rc = -t4_read_flash(sc, FLASH_FW_START, 3062 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 3063 if (rc == 0) { 3064 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 3065 if (card_fw->fw_ver == be32toh(0xffffffff)) { 3066 uint32_t d = be32toh(kld_fw->fw_ver); 3067 3068 if (!kld_fw_usable) { 3069 device_printf(sc->dev, 3070 "no firmware on the card and no usable " 3071 "firmware bundled with the driver.\n"); 3072 rc = EIO; 3073 goto done; 3074 } else if (t4_fw_install == 0) { 3075 device_printf(sc->dev, 3076 "no firmware on the card and the driver " 3077 "is prohibited from installing new " 3078 "firmware.\n"); 3079 rc = EIO; 3080 goto done; 3081 } 3082 3083 device_printf(sc->dev, "no firmware on the card, " 3084 "installing firmware %d.%d.%d.%d\n", 3085 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3086 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); 3087 rc = t4_fw_forceinstall(sc, fw->data, fw->datasize); 3088 if (rc < 0) { 3089 rc = -rc; 3090 device_printf(sc->dev, 3091 "firmware install failed: %d.\n", rc); 3092 goto done; 3093 } 3094 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 3095 card_fw_usable = 1; 3096 need_fw_reset = 0; 3097 } 3098 } else { 3099 device_printf(sc->dev, 3100 "Unable to read card's firmware header: %d\n", rc); 3101 card_fw_usable = 0; 3102 } 3103 3104 /* Contact firmware. */ 3105 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 3106 if (rc < 0 || state == DEV_STATE_ERR) { 3107 rc = -rc; 3108 device_printf(sc->dev, 3109 "failed to connect to the firmware: %d, %d.\n", rc, state); 3110 goto done; 3111 } 3112 pf = rc; 3113 if (pf == sc->mbox) 3114 sc->flags |= MASTER_PF; 3115 else if (state == DEV_STATE_UNINIT) { 3116 /* 3117 * We didn't get to be the master so we definitely won't be 3118 * configuring the chip. It's a bug if someone else hasn't 3119 * configured it already. 3120 */ 3121 device_printf(sc->dev, "couldn't be master(%d), " 3122 "device not already initialized either(%d).\n", rc, state); 3123 rc = EPROTO; 3124 goto done; 3125 } 3126 3127 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 3128 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 3129 /* 3130 * Common case: the firmware on the card is an exact match and 3131 * the KLD is an exact match too, or the KLD is 3132 * absent/incompatible. Note that t4_fw_install = 2 is ignored 3133 * here -- use cxgbetool loadfw if you want to reinstall the 3134 * same firmware as the one on the card. 3135 */ 3136 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 3137 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 3138 be32toh(card_fw->fw_ver))) { 3139 3140 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 3141 if (rc != 0) { 3142 device_printf(sc->dev, 3143 "failed to install firmware: %d\n", rc); 3144 goto done; 3145 } 3146 3147 /* Installed successfully, update the cached header too. */ 3148 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 3149 card_fw_usable = 1; 3150 need_fw_reset = 0; /* already reset as part of load_fw */ 3151 } 3152 3153 if (!card_fw_usable) { 3154 uint32_t d, c, k; 3155 3156 d = ntohl(drv_fw->fw_ver); 3157 c = ntohl(card_fw->fw_ver); 3158 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 3159 3160 device_printf(sc->dev, "Cannot find a usable firmware: " 3161 "fw_install %d, chip state %d, " 3162 "driver compiled with %d.%d.%d.%d, " 3163 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 3164 t4_fw_install, state, 3165 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3166 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 3167 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3168 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 3169 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 3170 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 3171 rc = EINVAL; 3172 goto done; 3173 } 3174 3175 /* Reset device */ 3176 if (need_fw_reset && 3177 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 3178 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 3179 if (rc != ETIMEDOUT && rc != EIO) 3180 t4_fw_bye(sc, sc->mbox); 3181 goto done; 3182 } 3183 sc->flags |= FW_OK; 3184 3185 rc = get_params__pre_init(sc); 3186 if (rc != 0) 3187 goto done; /* error message displayed already */ 3188 3189 /* Partition adapter resources as specified in the config file. */ 3190 if (state == DEV_STATE_UNINIT) { 3191 3192 KASSERT(sc->flags & MASTER_PF, 3193 ("%s: trying to change chip settings when not master.", 3194 __func__)); 3195 3196 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 3197 if (rc != 0) 3198 goto done; /* error message displayed already */ 3199 3200 t4_tweak_chip_settings(sc); 3201 3202 /* get basic stuff going */ 3203 rc = -t4_fw_initialize(sc, sc->mbox); 3204 if (rc != 0) { 3205 device_printf(sc->dev, "fw init failed: %d.\n", rc); 3206 goto done; 3207 } 3208 } else { 3209 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 3210 sc->cfcsum = 0; 3211 } 3212 3213 done: 3214 free(card_fw, M_CXGBE); 3215 if (fw != NULL) 3216 firmware_put(fw, FIRMWARE_UNLOAD); 3217 if (default_cfg != NULL) 3218 firmware_put(default_cfg, FIRMWARE_UNLOAD); 3219 3220 return (rc); 3221 } 3222 3223 #define FW_PARAM_DEV(param) \ 3224 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3225 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3226 #define FW_PARAM_PFVF(param) \ 3227 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3228 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 3229 3230 /* 3231 * Partition chip resources for use between various PFs, VFs, etc. 3232 */ 3233 static int 3234 partition_resources(struct adapter *sc, const struct firmware *default_cfg, 3235 const char *name_prefix) 3236 { 3237 const struct firmware *cfg = NULL; 3238 int rc = 0; 3239 struct fw_caps_config_cmd caps; 3240 uint32_t mtype, moff, finicsum, cfcsum; 3241 3242 /* 3243 * Figure out what configuration file to use. Pick the default config 3244 * file for the card if the user hasn't specified one explicitly. 3245 */ 3246 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 3247 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 3248 /* Card specific overrides go here. */ 3249 if (pci_get_device(sc->dev) == 0x440a) 3250 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 3251 if (is_fpga(sc)) 3252 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 3253 } 3254 3255 /* 3256 * We need to load another module if the profile is anything except 3257 * "default" or "flash". 3258 */ 3259 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 3260 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3261 char s[32]; 3262 3263 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 3264 cfg = firmware_get(s); 3265 if (cfg == NULL) { 3266 if (default_cfg != NULL) { 3267 device_printf(sc->dev, 3268 "unable to load module \"%s\" for " 3269 "configuration profile \"%s\", will use " 3270 "the default config file instead.\n", 3271 s, sc->cfg_file); 3272 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3273 "%s", DEFAULT_CF); 3274 } else { 3275 device_printf(sc->dev, 3276 "unable to load module \"%s\" for " 3277 "configuration profile \"%s\", will use " 3278 "the config file on the card's flash " 3279 "instead.\n", s, sc->cfg_file); 3280 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3281 "%s", FLASH_CF); 3282 } 3283 } 3284 } 3285 3286 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 3287 default_cfg == NULL) { 3288 device_printf(sc->dev, 3289 "default config file not available, will use the config " 3290 "file on the card's flash instead.\n"); 3291 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 3292 } 3293 3294 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3295 u_int cflen; 3296 const uint32_t *cfdata; 3297 uint32_t param, val, addr; 3298 3299 KASSERT(cfg != NULL || default_cfg != NULL, 3300 ("%s: no config to upload", __func__)); 3301 3302 /* 3303 * Ask the firmware where it wants us to upload the config file. 3304 */ 3305 param = FW_PARAM_DEV(CF); 3306 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3307 if (rc != 0) { 3308 /* No support for config file? Shouldn't happen. */ 3309 device_printf(sc->dev, 3310 "failed to query config file location: %d.\n", rc); 3311 goto done; 3312 } 3313 mtype = G_FW_PARAMS_PARAM_Y(val); 3314 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 3315 3316 /* 3317 * XXX: sheer laziness. We deliberately added 4 bytes of 3318 * useless stuffing/comments at the end of the config file so 3319 * it's ok to simply throw away the last remaining bytes when 3320 * the config file is not an exact multiple of 4. This also 3321 * helps with the validate_mt_off_len check. 3322 */ 3323 if (cfg != NULL) { 3324 cflen = cfg->datasize & ~3; 3325 cfdata = cfg->data; 3326 } else { 3327 cflen = default_cfg->datasize & ~3; 3328 cfdata = default_cfg->data; 3329 } 3330 3331 if (cflen > FLASH_CFG_MAX_SIZE) { 3332 device_printf(sc->dev, 3333 "config file too long (%d, max allowed is %d). " 3334 "Will try to use the config on the card, if any.\n", 3335 cflen, FLASH_CFG_MAX_SIZE); 3336 goto use_config_on_flash; 3337 } 3338 3339 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3340 if (rc != 0) { 3341 device_printf(sc->dev, 3342 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 3343 "Will try to use the config on the card, if any.\n", 3344 __func__, mtype, moff, cflen, rc); 3345 goto use_config_on_flash; 3346 } 3347 write_via_memwin(sc, 2, addr, cfdata, cflen); 3348 } else { 3349 use_config_on_flash: 3350 mtype = FW_MEMTYPE_FLASH; 3351 moff = t4_flash_cfg_addr(sc); 3352 } 3353 3354 bzero(&caps, sizeof(caps)); 3355 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3356 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3357 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3358 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3359 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3360 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3361 if (rc != 0) { 3362 device_printf(sc->dev, 3363 "failed to pre-process config file: %d " 3364 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3365 goto done; 3366 } 3367 3368 finicsum = be32toh(caps.finicsum); 3369 cfcsum = be32toh(caps.cfcsum); 3370 if (finicsum != cfcsum) { 3371 device_printf(sc->dev, 3372 "WARNING: config file checksum mismatch: %08x %08x\n", 3373 finicsum, cfcsum); 3374 } 3375 sc->cfcsum = cfcsum; 3376 3377 #define LIMIT_CAPS(x) do { \ 3378 caps.x &= htobe16(t4_##x##_allowed); \ 3379 } while (0) 3380 3381 /* 3382 * Let the firmware know what features will (not) be used so it can tune 3383 * things accordingly. 3384 */ 3385 LIMIT_CAPS(nbmcaps); 3386 LIMIT_CAPS(linkcaps); 3387 LIMIT_CAPS(switchcaps); 3388 LIMIT_CAPS(niccaps); 3389 LIMIT_CAPS(toecaps); 3390 LIMIT_CAPS(rdmacaps); 3391 LIMIT_CAPS(cryptocaps); 3392 LIMIT_CAPS(iscsicaps); 3393 LIMIT_CAPS(fcoecaps); 3394 #undef LIMIT_CAPS 3395 3396 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3397 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3398 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3399 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3400 if (rc != 0) { 3401 device_printf(sc->dev, 3402 "failed to process config file: %d.\n", rc); 3403 } 3404 done: 3405 if (cfg != NULL) 3406 firmware_put(cfg, FIRMWARE_UNLOAD); 3407 return (rc); 3408 } 3409 3410 /* 3411 * Retrieve parameters that are needed (or nice to have) very early. 3412 */ 3413 static int 3414 get_params__pre_init(struct adapter *sc) 3415 { 3416 int rc; 3417 uint32_t param[2], val[2]; 3418 3419 t4_get_version_info(sc); 3420 3421 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 3422 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 3423 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 3424 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 3425 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 3426 3427 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", 3428 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), 3429 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), 3430 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), 3431 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); 3432 3433 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 3434 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 3435 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 3436 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 3437 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 3438 3439 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", 3440 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), 3441 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), 3442 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), 3443 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); 3444 3445 param[0] = FW_PARAM_DEV(PORTVEC); 3446 param[1] = FW_PARAM_DEV(CCLK); 3447 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3448 if (rc != 0) { 3449 device_printf(sc->dev, 3450 "failed to query parameters (pre_init): %d.\n", rc); 3451 return (rc); 3452 } 3453 3454 sc->params.portvec = val[0]; 3455 sc->params.nports = bitcount32(val[0]); 3456 sc->params.vpd.cclk = val[1]; 3457 3458 /* Read device log parameters. */ 3459 rc = -t4_init_devlog_params(sc, 1); 3460 if (rc == 0) 3461 fixup_devlog_params(sc); 3462 else { 3463 device_printf(sc->dev, 3464 "failed to get devlog parameters: %d.\n", rc); 3465 rc = 0; /* devlog isn't critical for device operation */ 3466 } 3467 3468 return (rc); 3469 } 3470 3471 /* 3472 * Retrieve various parameters that are of interest to the driver. The device 3473 * has been initialized by the firmware at this point. 3474 */ 3475 static int 3476 get_params__post_init(struct adapter *sc) 3477 { 3478 int rc; 3479 uint32_t param[7], val[7]; 3480 struct fw_caps_config_cmd caps; 3481 3482 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3483 param[1] = FW_PARAM_PFVF(EQ_START); 3484 param[2] = FW_PARAM_PFVF(FILTER_START); 3485 param[3] = FW_PARAM_PFVF(FILTER_END); 3486 param[4] = FW_PARAM_PFVF(L2T_START); 3487 param[5] = FW_PARAM_PFVF(L2T_END); 3488 param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3489 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 3490 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); 3491 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val); 3492 if (rc != 0) { 3493 device_printf(sc->dev, 3494 "failed to query parameters (post_init): %d.\n", rc); 3495 return (rc); 3496 } 3497 3498 sc->sge.iq_start = val[0]; 3499 sc->sge.eq_start = val[1]; 3500 sc->tids.ftid_base = val[2]; 3501 sc->tids.nftids = val[3] - val[2] + 1; 3502 sc->params.ftid_min = val[2]; 3503 sc->params.ftid_max = val[3]; 3504 sc->vres.l2t.start = val[4]; 3505 sc->vres.l2t.size = val[5] - val[4] + 1; 3506 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3507 ("%s: L2 table size (%u) larger than expected (%u)", 3508 __func__, sc->vres.l2t.size, L2T_SIZE)); 3509 sc->params.core_vdd = val[6]; 3510 3511 /* 3512 * MPSBGMAP is queried separately because only recent firmwares support 3513 * it as a parameter and we don't want the compound query above to fail 3514 * on older firmwares. 3515 */ 3516 param[0] = FW_PARAM_DEV(MPSBGMAP); 3517 val[0] = 0; 3518 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); 3519 if (rc == 0) 3520 sc->params.mps_bg_map = val[0]; 3521 else 3522 sc->params.mps_bg_map = 0; 3523 3524 /* get capabilites */ 3525 bzero(&caps, sizeof(caps)); 3526 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3527 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3528 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3529 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3530 if (rc != 0) { 3531 device_printf(sc->dev, 3532 "failed to get card capabilities: %d.\n", rc); 3533 return (rc); 3534 } 3535 3536 #define READ_CAPS(x) do { \ 3537 sc->x = htobe16(caps.x); \ 3538 } while (0) 3539 READ_CAPS(nbmcaps); 3540 READ_CAPS(linkcaps); 3541 READ_CAPS(switchcaps); 3542 READ_CAPS(niccaps); 3543 READ_CAPS(toecaps); 3544 READ_CAPS(rdmacaps); 3545 READ_CAPS(cryptocaps); 3546 READ_CAPS(iscsicaps); 3547 READ_CAPS(fcoecaps); 3548 3549 /* 3550 * The firmware attempts memfree TOE configuration for -SO cards and 3551 * will report toecaps=0 if it runs out of resources (this depends on 3552 * the config file). It may not report 0 for other capabilities 3553 * dependent on the TOE in this case. Set them to 0 here so that the 3554 * driver doesn't bother tracking resources that will never be used. 3555 */ 3556 if (sc->toecaps == 0) { 3557 sc->iscsicaps = 0; 3558 sc->rdmacaps = 0; 3559 } 3560 3561 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3562 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3563 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3564 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3565 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3566 if (rc != 0) { 3567 device_printf(sc->dev, 3568 "failed to query NIC parameters: %d.\n", rc); 3569 return (rc); 3570 } 3571 sc->tids.etid_base = val[0]; 3572 sc->params.etid_min = val[0]; 3573 sc->tids.netids = val[1] - val[0] + 1; 3574 sc->params.netids = sc->tids.netids; 3575 sc->params.eo_wr_cred = val[2]; 3576 sc->params.ethoffload = 1; 3577 } 3578 3579 if (sc->toecaps) { 3580 /* query offload-related parameters */ 3581 param[0] = FW_PARAM_DEV(NTID); 3582 param[1] = FW_PARAM_PFVF(SERVER_START); 3583 param[2] = FW_PARAM_PFVF(SERVER_END); 3584 param[3] = FW_PARAM_PFVF(TDDP_START); 3585 param[4] = FW_PARAM_PFVF(TDDP_END); 3586 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3587 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3588 if (rc != 0) { 3589 device_printf(sc->dev, 3590 "failed to query TOE parameters: %d.\n", rc); 3591 return (rc); 3592 } 3593 sc->tids.ntids = val[0]; 3594 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3595 sc->tids.stid_base = val[1]; 3596 sc->tids.nstids = val[2] - val[1] + 1; 3597 sc->vres.ddp.start = val[3]; 3598 sc->vres.ddp.size = val[4] - val[3] + 1; 3599 sc->params.ofldq_wr_cred = val[5]; 3600 sc->params.offload = 1; 3601 } 3602 if (sc->rdmacaps) { 3603 param[0] = FW_PARAM_PFVF(STAG_START); 3604 param[1] = FW_PARAM_PFVF(STAG_END); 3605 param[2] = FW_PARAM_PFVF(RQ_START); 3606 param[3] = FW_PARAM_PFVF(RQ_END); 3607 param[4] = FW_PARAM_PFVF(PBL_START); 3608 param[5] = FW_PARAM_PFVF(PBL_END); 3609 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3610 if (rc != 0) { 3611 device_printf(sc->dev, 3612 "failed to query RDMA parameters(1): %d.\n", rc); 3613 return (rc); 3614 } 3615 sc->vres.stag.start = val[0]; 3616 sc->vres.stag.size = val[1] - val[0] + 1; 3617 sc->vres.rq.start = val[2]; 3618 sc->vres.rq.size = val[3] - val[2] + 1; 3619 sc->vres.pbl.start = val[4]; 3620 sc->vres.pbl.size = val[5] - val[4] + 1; 3621 3622 param[0] = FW_PARAM_PFVF(SQRQ_START); 3623 param[1] = FW_PARAM_PFVF(SQRQ_END); 3624 param[2] = FW_PARAM_PFVF(CQ_START); 3625 param[3] = FW_PARAM_PFVF(CQ_END); 3626 param[4] = FW_PARAM_PFVF(OCQ_START); 3627 param[5] = FW_PARAM_PFVF(OCQ_END); 3628 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3629 if (rc != 0) { 3630 device_printf(sc->dev, 3631 "failed to query RDMA parameters(2): %d.\n", rc); 3632 return (rc); 3633 } 3634 sc->vres.qp.start = val[0]; 3635 sc->vres.qp.size = val[1] - val[0] + 1; 3636 sc->vres.cq.start = val[2]; 3637 sc->vres.cq.size = val[3] - val[2] + 1; 3638 sc->vres.ocq.start = val[4]; 3639 sc->vres.ocq.size = val[5] - val[4] + 1; 3640 3641 param[0] = FW_PARAM_PFVF(SRQ_START); 3642 param[1] = FW_PARAM_PFVF(SRQ_END); 3643 param[2] = FW_PARAM_DEV(MAXORDIRD_QP); 3644 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); 3645 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); 3646 if (rc != 0) { 3647 device_printf(sc->dev, 3648 "failed to query RDMA parameters(3): %d.\n", rc); 3649 return (rc); 3650 } 3651 sc->vres.srq.start = val[0]; 3652 sc->vres.srq.size = val[1] - val[0] + 1; 3653 sc->params.max_ordird_qp = val[2]; 3654 sc->params.max_ird_adapter = val[3]; 3655 } 3656 if (sc->iscsicaps) { 3657 param[0] = FW_PARAM_PFVF(ISCSI_START); 3658 param[1] = FW_PARAM_PFVF(ISCSI_END); 3659 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3660 if (rc != 0) { 3661 device_printf(sc->dev, 3662 "failed to query iSCSI parameters: %d.\n", rc); 3663 return (rc); 3664 } 3665 sc->vres.iscsi.start = val[0]; 3666 sc->vres.iscsi.size = val[1] - val[0] + 1; 3667 } 3668 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { 3669 param[0] = FW_PARAM_PFVF(TLS_START); 3670 param[1] = FW_PARAM_PFVF(TLS_END); 3671 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3672 if (rc != 0) { 3673 device_printf(sc->dev, 3674 "failed to query TLS parameters: %d.\n", rc); 3675 return (rc); 3676 } 3677 sc->vres.key.start = val[0]; 3678 sc->vres.key.size = val[1] - val[0] + 1; 3679 } 3680 3681 t4_init_sge_params(sc); 3682 3683 /* 3684 * We've got the params we wanted to query via the firmware. Now grab 3685 * some others directly from the chip. 3686 */ 3687 rc = t4_read_chip_settings(sc); 3688 3689 return (rc); 3690 } 3691 3692 static int 3693 set_params__post_init(struct adapter *sc) 3694 { 3695 uint32_t param, val; 3696 #ifdef TCP_OFFLOAD 3697 int i, v, shift; 3698 #endif 3699 3700 /* ask for encapsulated CPLs */ 3701 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3702 val = 1; 3703 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3704 3705 #ifdef TCP_OFFLOAD 3706 /* 3707 * Override the TOE timers with user provided tunables. This is not the 3708 * recommended way to change the timers (the firmware config file is) so 3709 * these tunables are not documented. 3710 * 3711 * All the timer tunables are in microseconds. 3712 */ 3713 if (t4_toe_keepalive_idle != 0) { 3714 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle); 3715 v &= M_KEEPALIVEIDLE; 3716 t4_set_reg_field(sc, A_TP_KEEP_IDLE, 3717 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v)); 3718 } 3719 if (t4_toe_keepalive_interval != 0) { 3720 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval); 3721 v &= M_KEEPALIVEINTVL; 3722 t4_set_reg_field(sc, A_TP_KEEP_INTVL, 3723 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v)); 3724 } 3725 if (t4_toe_keepalive_count != 0) { 3726 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2; 3727 t4_set_reg_field(sc, A_TP_SHIFT_CNT, 3728 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) | 3729 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2), 3730 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v)); 3731 } 3732 if (t4_toe_rexmt_min != 0) { 3733 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min); 3734 v &= M_RXTMIN; 3735 t4_set_reg_field(sc, A_TP_RXT_MIN, 3736 V_RXTMIN(M_RXTMIN), V_RXTMIN(v)); 3737 } 3738 if (t4_toe_rexmt_max != 0) { 3739 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max); 3740 v &= M_RXTMAX; 3741 t4_set_reg_field(sc, A_TP_RXT_MAX, 3742 V_RXTMAX(M_RXTMAX), V_RXTMAX(v)); 3743 } 3744 if (t4_toe_rexmt_count != 0) { 3745 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2; 3746 t4_set_reg_field(sc, A_TP_SHIFT_CNT, 3747 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) | 3748 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2), 3749 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v)); 3750 } 3751 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) { 3752 if (t4_toe_rexmt_backoff[i] != -1) { 3753 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0; 3754 shift = (i & 3) << 3; 3755 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3), 3756 M_TIMERBACKOFFINDEX0 << shift, v << shift); 3757 } 3758 } 3759 #endif 3760 return (0); 3761 } 3762 3763 #undef FW_PARAM_PFVF 3764 #undef FW_PARAM_DEV 3765 3766 static void 3767 t4_set_desc(struct adapter *sc) 3768 { 3769 char buf[128]; 3770 struct adapter_params *p = &sc->params; 3771 3772 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); 3773 3774 device_set_desc_copy(sc->dev, buf); 3775 } 3776 3777 static void 3778 build_medialist(struct port_info *pi, struct ifmedia *media) 3779 { 3780 int m; 3781 3782 PORT_LOCK_ASSERT_OWNED(pi); 3783 3784 ifmedia_removeall(media); 3785 3786 /* 3787 * XXX: Would it be better to ifmedia_add all 4 combinations of pause 3788 * settings for every speed instead of just txpause|rxpause? ifconfig 3789 * media display looks much better if autoselect is the only case where 3790 * ifm_current is different from ifm_active. If the user picks anything 3791 * except txpause|rxpause the display is ugly. 3792 */ 3793 m = IFM_ETHER | IFM_FDX | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3794 3795 switch(pi->port_type) { 3796 case FW_PORT_TYPE_BT_XFI: 3797 case FW_PORT_TYPE_BT_XAUI: 3798 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3799 /* fall through */ 3800 3801 case FW_PORT_TYPE_BT_SGMII: 3802 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3803 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3804 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3805 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3806 break; 3807 3808 case FW_PORT_TYPE_CX4: 3809 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3810 ifmedia_set(media, m | IFM_10G_CX4); 3811 break; 3812 3813 case FW_PORT_TYPE_QSFP_10G: 3814 case FW_PORT_TYPE_SFP: 3815 case FW_PORT_TYPE_FIBER_XFI: 3816 case FW_PORT_TYPE_FIBER_XAUI: 3817 switch (pi->mod_type) { 3818 3819 case FW_PORT_MOD_TYPE_LR: 3820 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3821 ifmedia_set(media, m | IFM_10G_LR); 3822 break; 3823 3824 case FW_PORT_MOD_TYPE_SR: 3825 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3826 ifmedia_set(media, m | IFM_10G_SR); 3827 break; 3828 3829 case FW_PORT_MOD_TYPE_LRM: 3830 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3831 ifmedia_set(media, m | IFM_10G_LRM); 3832 break; 3833 3834 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3835 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3836 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3837 ifmedia_set(media, m | IFM_10G_TWINAX); 3838 break; 3839 3840 case FW_PORT_MOD_TYPE_NONE: 3841 m &= ~IFM_FDX; 3842 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3843 ifmedia_set(media, m | IFM_NONE); 3844 break; 3845 3846 case FW_PORT_MOD_TYPE_NA: 3847 case FW_PORT_MOD_TYPE_ER: 3848 default: 3849 device_printf(pi->dev, 3850 "unknown port_type (%d), mod_type (%d)\n", 3851 pi->port_type, pi->mod_type); 3852 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3853 ifmedia_set(media, m | IFM_UNKNOWN); 3854 break; 3855 } 3856 break; 3857 3858 case FW_PORT_TYPE_CR_QSFP: 3859 case FW_PORT_TYPE_SFP28: 3860 case FW_PORT_TYPE_KR_SFP28: 3861 switch (pi->mod_type) { 3862 3863 case FW_PORT_MOD_TYPE_SR: 3864 ifmedia_add(media, m | IFM_25G_SR, 0, NULL); 3865 ifmedia_set(media, m | IFM_25G_SR); 3866 break; 3867 3868 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3869 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3870 ifmedia_add(media, m | IFM_25G_CR, 0, NULL); 3871 ifmedia_set(media, m | IFM_25G_CR); 3872 break; 3873 3874 case FW_PORT_MOD_TYPE_NONE: 3875 m &= ~IFM_FDX; 3876 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3877 ifmedia_set(media, m | IFM_NONE); 3878 break; 3879 3880 default: 3881 device_printf(pi->dev, 3882 "unknown port_type (%d), mod_type (%d)\n", 3883 pi->port_type, pi->mod_type); 3884 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3885 ifmedia_set(media, m | IFM_UNKNOWN); 3886 break; 3887 } 3888 break; 3889 3890 case FW_PORT_TYPE_QSFP: 3891 switch (pi->mod_type) { 3892 3893 case FW_PORT_MOD_TYPE_LR: 3894 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3895 ifmedia_set(media, m | IFM_40G_LR4); 3896 break; 3897 3898 case FW_PORT_MOD_TYPE_SR: 3899 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3900 ifmedia_set(media, m | IFM_40G_SR4); 3901 break; 3902 3903 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3904 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3905 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3906 ifmedia_set(media, m | IFM_40G_CR4); 3907 break; 3908 3909 case FW_PORT_MOD_TYPE_NONE: 3910 m &= ~IFM_FDX; 3911 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3912 ifmedia_set(media, m | IFM_NONE); 3913 break; 3914 3915 default: 3916 device_printf(pi->dev, 3917 "unknown port_type (%d), mod_type (%d)\n", 3918 pi->port_type, pi->mod_type); 3919 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3920 ifmedia_set(media, m | IFM_UNKNOWN); 3921 break; 3922 } 3923 break; 3924 3925 case FW_PORT_TYPE_KR4_100G: 3926 case FW_PORT_TYPE_CR4_QSFP: 3927 switch (pi->mod_type) { 3928 3929 case FW_PORT_MOD_TYPE_LR: 3930 ifmedia_add(media, m | IFM_100G_LR4, 0, NULL); 3931 ifmedia_set(media, m | IFM_100G_LR4); 3932 break; 3933 3934 case FW_PORT_MOD_TYPE_SR: 3935 ifmedia_add(media, m | IFM_100G_SR4, 0, NULL); 3936 ifmedia_set(media, m | IFM_100G_SR4); 3937 break; 3938 3939 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3940 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3941 ifmedia_add(media, m | IFM_100G_CR4, 0, NULL); 3942 ifmedia_set(media, m | IFM_100G_CR4); 3943 break; 3944 3945 case FW_PORT_MOD_TYPE_NONE: 3946 m &= ~IFM_FDX; 3947 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3948 ifmedia_set(media, m | IFM_NONE); 3949 break; 3950 3951 default: 3952 device_printf(pi->dev, 3953 "unknown port_type (%d), mod_type (%d)\n", 3954 pi->port_type, pi->mod_type); 3955 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3956 ifmedia_set(media, m | IFM_UNKNOWN); 3957 break; 3958 } 3959 break; 3960 3961 default: 3962 device_printf(pi->dev, 3963 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3964 pi->mod_type); 3965 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3966 ifmedia_set(media, m | IFM_UNKNOWN); 3967 break; 3968 } 3969 } 3970 3971 /* 3972 * Update all the requested_* fields in the link config and then send a mailbox 3973 * command to apply the settings. 3974 */ 3975 static void 3976 init_l1cfg(struct port_info *pi) 3977 { 3978 struct adapter *sc = pi->adapter; 3979 struct link_config *lc = &pi->link_cfg; 3980 int rc; 3981 3982 ASSERT_SYNCHRONIZED_OP(sc); 3983 3984 lc->requested_speed = port_top_speed(pi); /* in Gbps */ 3985 if (t4_autoneg != 0 && lc->supported & FW_PORT_CAP_ANEG) { 3986 lc->requested_aneg = AUTONEG_ENABLE; 3987 } else { 3988 lc->requested_aneg = AUTONEG_DISABLE; 3989 } 3990 3991 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX); 3992 3993 if (t4_fec != -1) { 3994 lc->requested_fec = t4_fec & (FEC_RS | FEC_BASER_RS | 3995 FEC_RESERVED); 3996 } else { 3997 /* Use the suggested value provided by the firmware in acaps */ 3998 if (lc->advertising & FW_PORT_CAP_FEC_RS) 3999 lc->requested_fec = FEC_RS; 4000 else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS) 4001 lc->requested_fec = FEC_BASER_RS; 4002 else if (lc->advertising & FW_PORT_CAP_FEC_RESERVED) 4003 lc->requested_fec = FEC_RESERVED; 4004 else 4005 lc->requested_fec = 0; 4006 } 4007 4008 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 4009 if (rc != 0) { 4010 device_printf(pi->dev, "l1cfg failed: %d\n", rc); 4011 } else { 4012 lc->fc = lc->requested_fc; 4013 lc->fec = lc->requested_fec; 4014 } 4015 } 4016 4017 #define FW_MAC_EXACT_CHUNK 7 4018 4019 /* 4020 * Program the port's XGMAC based on parameters in ifnet. The caller also 4021 * indicates which parameters should be programmed (the rest are left alone). 4022 */ 4023 int 4024 update_mac_settings(struct ifnet *ifp, int flags) 4025 { 4026 int rc = 0; 4027 struct vi_info *vi = ifp->if_softc; 4028 struct port_info *pi = vi->pi; 4029 struct adapter *sc = pi->adapter; 4030 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 4031 4032 ASSERT_SYNCHRONIZED_OP(sc); 4033 KASSERT(flags, ("%s: not told what to update.", __func__)); 4034 4035 if (flags & XGMAC_MTU) 4036 mtu = ifp->if_mtu; 4037 4038 if (flags & XGMAC_PROMISC) 4039 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 4040 4041 if (flags & XGMAC_ALLMULTI) 4042 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 4043 4044 if (flags & XGMAC_VLANEX) 4045 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 4046 4047 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 4048 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 4049 allmulti, 1, vlanex, false); 4050 if (rc) { 4051 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 4052 rc); 4053 return (rc); 4054 } 4055 } 4056 4057 if (flags & XGMAC_UCADDR) { 4058 uint8_t ucaddr[ETHER_ADDR_LEN]; 4059 4060 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 4061 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 4062 ucaddr, true, true); 4063 if (rc < 0) { 4064 rc = -rc; 4065 if_printf(ifp, "change_mac failed: %d\n", rc); 4066 return (rc); 4067 } else { 4068 vi->xact_addr_filt = rc; 4069 rc = 0; 4070 } 4071 } 4072 4073 if (flags & XGMAC_MCADDRS) { 4074 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 4075 int del = 1; 4076 uint64_t hash = 0; 4077 struct ifmultiaddr *ifma; 4078 int i = 0, j; 4079 4080 if_maddr_rlock(ifp); 4081 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 4082 if (ifma->ifma_addr->sa_family != AF_LINK) 4083 continue; 4084 mcaddr[i] = 4085 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 4086 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 4087 i++; 4088 4089 if (i == FW_MAC_EXACT_CHUNK) { 4090 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 4091 del, i, mcaddr, NULL, &hash, 0); 4092 if (rc < 0) { 4093 rc = -rc; 4094 for (j = 0; j < i; j++) { 4095 if_printf(ifp, 4096 "failed to add mc address" 4097 " %02x:%02x:%02x:" 4098 "%02x:%02x:%02x rc=%d\n", 4099 mcaddr[j][0], mcaddr[j][1], 4100 mcaddr[j][2], mcaddr[j][3], 4101 mcaddr[j][4], mcaddr[j][5], 4102 rc); 4103 } 4104 goto mcfail; 4105 } 4106 del = 0; 4107 i = 0; 4108 } 4109 } 4110 if (i > 0) { 4111 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 4112 mcaddr, NULL, &hash, 0); 4113 if (rc < 0) { 4114 rc = -rc; 4115 for (j = 0; j < i; j++) { 4116 if_printf(ifp, 4117 "failed to add mc address" 4118 " %02x:%02x:%02x:" 4119 "%02x:%02x:%02x rc=%d\n", 4120 mcaddr[j][0], mcaddr[j][1], 4121 mcaddr[j][2], mcaddr[j][3], 4122 mcaddr[j][4], mcaddr[j][5], 4123 rc); 4124 } 4125 goto mcfail; 4126 } 4127 } 4128 4129 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 4130 if (rc != 0) 4131 if_printf(ifp, "failed to set mc address hash: %d", rc); 4132 mcfail: 4133 if_maddr_runlock(ifp); 4134 } 4135 4136 return (rc); 4137 } 4138 4139 /* 4140 * {begin|end}_synchronized_op must be called from the same thread. 4141 */ 4142 int 4143 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 4144 char *wmesg) 4145 { 4146 int rc, pri; 4147 4148 #ifdef WITNESS 4149 /* the caller thinks it's ok to sleep, but is it really? */ 4150 if (flags & SLEEP_OK) 4151 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 4152 "begin_synchronized_op"); 4153 #endif 4154 4155 if (INTR_OK) 4156 pri = PCATCH; 4157 else 4158 pri = 0; 4159 4160 ADAPTER_LOCK(sc); 4161 for (;;) { 4162 4163 if (vi && IS_DOOMED(vi)) { 4164 rc = ENXIO; 4165 goto done; 4166 } 4167 4168 if (!IS_BUSY(sc)) { 4169 rc = 0; 4170 break; 4171 } 4172 4173 if (!(flags & SLEEP_OK)) { 4174 rc = EBUSY; 4175 goto done; 4176 } 4177 4178 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 4179 rc = EINTR; 4180 goto done; 4181 } 4182 } 4183 4184 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 4185 SET_BUSY(sc); 4186 #ifdef INVARIANTS 4187 sc->last_op = wmesg; 4188 sc->last_op_thr = curthread; 4189 sc->last_op_flags = flags; 4190 #endif 4191 4192 done: 4193 if (!(flags & HOLD_LOCK) || rc) 4194 ADAPTER_UNLOCK(sc); 4195 4196 return (rc); 4197 } 4198 4199 /* 4200 * Tell if_ioctl and if_init that the VI is going away. This is 4201 * special variant of begin_synchronized_op and must be paired with a 4202 * call to end_synchronized_op. 4203 */ 4204 void 4205 doom_vi(struct adapter *sc, struct vi_info *vi) 4206 { 4207 4208 ADAPTER_LOCK(sc); 4209 SET_DOOMED(vi); 4210 wakeup(&sc->flags); 4211 while (IS_BUSY(sc)) 4212 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 4213 SET_BUSY(sc); 4214 #ifdef INVARIANTS 4215 sc->last_op = "t4detach"; 4216 sc->last_op_thr = curthread; 4217 sc->last_op_flags = 0; 4218 #endif 4219 ADAPTER_UNLOCK(sc); 4220 } 4221 4222 /* 4223 * {begin|end}_synchronized_op must be called from the same thread. 4224 */ 4225 void 4226 end_synchronized_op(struct adapter *sc, int flags) 4227 { 4228 4229 if (flags & LOCK_HELD) 4230 ADAPTER_LOCK_ASSERT_OWNED(sc); 4231 else 4232 ADAPTER_LOCK(sc); 4233 4234 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 4235 CLR_BUSY(sc); 4236 wakeup(&sc->flags); 4237 ADAPTER_UNLOCK(sc); 4238 } 4239 4240 static int 4241 cxgbe_init_synchronized(struct vi_info *vi) 4242 { 4243 struct port_info *pi = vi->pi; 4244 struct adapter *sc = pi->adapter; 4245 struct ifnet *ifp = vi->ifp; 4246 int rc = 0, i; 4247 struct sge_txq *txq; 4248 4249 ASSERT_SYNCHRONIZED_OP(sc); 4250 4251 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4252 return (0); /* already running */ 4253 4254 if (!(sc->flags & FULL_INIT_DONE) && 4255 ((rc = adapter_full_init(sc)) != 0)) 4256 return (rc); /* error message displayed already */ 4257 4258 if (!(vi->flags & VI_INIT_DONE) && 4259 ((rc = vi_full_init(vi)) != 0)) 4260 return (rc); /* error message displayed already */ 4261 4262 rc = update_mac_settings(ifp, XGMAC_ALL); 4263 if (rc) 4264 goto done; /* error message displayed already */ 4265 4266 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 4267 if (rc != 0) { 4268 if_printf(ifp, "enable_vi failed: %d\n", rc); 4269 goto done; 4270 } 4271 4272 /* 4273 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 4274 * if this changes. 4275 */ 4276 4277 for_each_txq(vi, i, txq) { 4278 TXQ_LOCK(txq); 4279 txq->eq.flags |= EQ_ENABLED; 4280 TXQ_UNLOCK(txq); 4281 } 4282 4283 /* 4284 * The first iq of the first port to come up is used for tracing. 4285 */ 4286 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 4287 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 4288 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 4289 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 4290 V_QUEUENUMBER(sc->traceq)); 4291 pi->flags |= HAS_TRACEQ; 4292 } 4293 4294 /* all ok */ 4295 PORT_LOCK(pi); 4296 if (pi->up_vis++ == 0) { 4297 t4_update_port_info(pi); 4298 build_medialist(pi, &pi->media); 4299 init_l1cfg(pi); 4300 } 4301 ifp->if_drv_flags |= IFF_DRV_RUNNING; 4302 4303 if (pi->nvi > 1 || sc->flags & IS_VF) 4304 callout_reset(&vi->tick, hz, vi_tick, vi); 4305 else 4306 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 4307 PORT_UNLOCK(pi); 4308 done: 4309 if (rc != 0) 4310 cxgbe_uninit_synchronized(vi); 4311 4312 return (rc); 4313 } 4314 4315 /* 4316 * Idempotent. 4317 */ 4318 static int 4319 cxgbe_uninit_synchronized(struct vi_info *vi) 4320 { 4321 struct port_info *pi = vi->pi; 4322 struct adapter *sc = pi->adapter; 4323 struct ifnet *ifp = vi->ifp; 4324 int rc, i; 4325 struct sge_txq *txq; 4326 4327 ASSERT_SYNCHRONIZED_OP(sc); 4328 4329 if (!(vi->flags & VI_INIT_DONE)) { 4330 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 4331 ("uninited VI is running")); 4332 return (0); 4333 } 4334 4335 /* 4336 * Disable the VI so that all its data in either direction is discarded 4337 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 4338 * tick) intact as the TP can deliver negative advice or data that it's 4339 * holding in its RAM (for an offloaded connection) even after the VI is 4340 * disabled. 4341 */ 4342 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 4343 if (rc) { 4344 if_printf(ifp, "disable_vi failed: %d\n", rc); 4345 return (rc); 4346 } 4347 4348 for_each_txq(vi, i, txq) { 4349 TXQ_LOCK(txq); 4350 txq->eq.flags &= ~EQ_ENABLED; 4351 TXQ_UNLOCK(txq); 4352 } 4353 4354 PORT_LOCK(pi); 4355 if (pi->nvi > 1 || sc->flags & IS_VF) 4356 callout_stop(&vi->tick); 4357 else 4358 callout_stop(&pi->tick); 4359 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4360 PORT_UNLOCK(pi); 4361 return (0); 4362 } 4363 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4364 pi->up_vis--; 4365 if (pi->up_vis > 0) { 4366 PORT_UNLOCK(pi); 4367 return (0); 4368 } 4369 PORT_UNLOCK(pi); 4370 4371 pi->link_cfg.link_ok = 0; 4372 pi->link_cfg.speed = 0; 4373 pi->link_cfg.link_down_rc = 255; 4374 t4_os_link_changed(pi); 4375 pi->old_link_cfg = pi->link_cfg; 4376 4377 return (0); 4378 } 4379 4380 /* 4381 * It is ok for this function to fail midway and return right away. t4_detach 4382 * will walk the entire sc->irq list and clean up whatever is valid. 4383 */ 4384 int 4385 t4_setup_intr_handlers(struct adapter *sc) 4386 { 4387 int rc, rid, p, q, v; 4388 char s[8]; 4389 struct irq *irq; 4390 struct port_info *pi; 4391 struct vi_info *vi; 4392 struct sge *sge = &sc->sge; 4393 struct sge_rxq *rxq; 4394 #ifdef TCP_OFFLOAD 4395 struct sge_ofld_rxq *ofld_rxq; 4396 #endif 4397 #ifdef DEV_NETMAP 4398 struct sge_nm_rxq *nm_rxq; 4399 #endif 4400 #ifdef RSS 4401 int nbuckets = rss_getnumbuckets(); 4402 #endif 4403 4404 /* 4405 * Setup interrupts. 4406 */ 4407 irq = &sc->irq[0]; 4408 rid = sc->intr_type == INTR_INTX ? 0 : 1; 4409 if (forwarding_intr_to_fwq(sc)) 4410 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 4411 4412 /* Multiple interrupts. */ 4413 if (sc->flags & IS_VF) 4414 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, 4415 ("%s: too few intr.", __func__)); 4416 else 4417 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 4418 ("%s: too few intr.", __func__)); 4419 4420 /* The first one is always error intr on PFs */ 4421 if (!(sc->flags & IS_VF)) { 4422 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 4423 if (rc != 0) 4424 return (rc); 4425 irq++; 4426 rid++; 4427 } 4428 4429 /* The second one is always the firmware event queue (first on VFs) */ 4430 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 4431 if (rc != 0) 4432 return (rc); 4433 irq++; 4434 rid++; 4435 4436 for_each_port(sc, p) { 4437 pi = sc->port[p]; 4438 for_each_vi(pi, v, vi) { 4439 vi->first_intr = rid - 1; 4440 4441 if (vi->nnmrxq > 0) { 4442 int n = max(vi->nrxq, vi->nnmrxq); 4443 4444 rxq = &sge->rxq[vi->first_rxq]; 4445 #ifdef DEV_NETMAP 4446 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 4447 #endif 4448 for (q = 0; q < n; q++) { 4449 snprintf(s, sizeof(s), "%x%c%x", p, 4450 'a' + v, q); 4451 if (q < vi->nrxq) 4452 irq->rxq = rxq++; 4453 #ifdef DEV_NETMAP 4454 if (q < vi->nnmrxq) 4455 irq->nm_rxq = nm_rxq++; 4456 #endif 4457 rc = t4_alloc_irq(sc, irq, rid, 4458 t4_vi_intr, irq, s); 4459 if (rc != 0) 4460 return (rc); 4461 #ifdef RSS 4462 if (q < vi->nrxq) { 4463 bus_bind_intr(sc->dev, irq->res, 4464 rss_getcpu(q % nbuckets)); 4465 } 4466 #endif 4467 irq++; 4468 rid++; 4469 vi->nintr++; 4470 } 4471 } else { 4472 for_each_rxq(vi, q, rxq) { 4473 snprintf(s, sizeof(s), "%x%c%x", p, 4474 'a' + v, q); 4475 rc = t4_alloc_irq(sc, irq, rid, 4476 t4_intr, rxq, s); 4477 if (rc != 0) 4478 return (rc); 4479 #ifdef RSS 4480 bus_bind_intr(sc->dev, irq->res, 4481 rss_getcpu(q % nbuckets)); 4482 #endif 4483 irq++; 4484 rid++; 4485 vi->nintr++; 4486 } 4487 } 4488 #ifdef TCP_OFFLOAD 4489 for_each_ofld_rxq(vi, q, ofld_rxq) { 4490 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q); 4491 rc = t4_alloc_irq(sc, irq, rid, t4_intr, 4492 ofld_rxq, s); 4493 if (rc != 0) 4494 return (rc); 4495 irq++; 4496 rid++; 4497 vi->nintr++; 4498 } 4499 #endif 4500 } 4501 } 4502 MPASS(irq == &sc->irq[sc->intr_count]); 4503 4504 return (0); 4505 } 4506 4507 int 4508 adapter_full_init(struct adapter *sc) 4509 { 4510 int rc, i; 4511 #ifdef RSS 4512 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4513 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4514 #endif 4515 4516 ASSERT_SYNCHRONIZED_OP(sc); 4517 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4518 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 4519 ("%s: FULL_INIT_DONE already", __func__)); 4520 4521 /* 4522 * queues that belong to the adapter (not any particular port). 4523 */ 4524 rc = t4_setup_adapter_queues(sc); 4525 if (rc != 0) 4526 goto done; 4527 4528 for (i = 0; i < nitems(sc->tq); i++) { 4529 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 4530 taskqueue_thread_enqueue, &sc->tq[i]); 4531 if (sc->tq[i] == NULL) { 4532 device_printf(sc->dev, 4533 "failed to allocate task queue %d\n", i); 4534 rc = ENOMEM; 4535 goto done; 4536 } 4537 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 4538 device_get_nameunit(sc->dev), i); 4539 } 4540 #ifdef RSS 4541 MPASS(RSS_KEYSIZE == 40); 4542 rss_getkey((void *)&raw_rss_key[0]); 4543 for (i = 0; i < nitems(rss_key); i++) { 4544 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4545 } 4546 t4_write_rss_key(sc, &rss_key[0], -1, 1); 4547 #endif 4548 4549 if (!(sc->flags & IS_VF)) 4550 t4_intr_enable(sc); 4551 sc->flags |= FULL_INIT_DONE; 4552 done: 4553 if (rc != 0) 4554 adapter_full_uninit(sc); 4555 4556 return (rc); 4557 } 4558 4559 int 4560 adapter_full_uninit(struct adapter *sc) 4561 { 4562 int i; 4563 4564 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4565 4566 t4_teardown_adapter_queues(sc); 4567 4568 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 4569 taskqueue_free(sc->tq[i]); 4570 sc->tq[i] = NULL; 4571 } 4572 4573 sc->flags &= ~FULL_INIT_DONE; 4574 4575 return (0); 4576 } 4577 4578 #ifdef RSS 4579 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 4580 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 4581 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 4582 RSS_HASHTYPE_RSS_UDP_IPV6) 4583 4584 /* Translates kernel hash types to hardware. */ 4585 static int 4586 hashconfig_to_hashen(int hashconfig) 4587 { 4588 int hashen = 0; 4589 4590 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 4591 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 4592 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 4593 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 4594 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 4595 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4596 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4597 } 4598 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 4599 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4600 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4601 } 4602 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 4603 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4604 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 4605 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4606 4607 return (hashen); 4608 } 4609 4610 /* Translates hardware hash types to kernel. */ 4611 static int 4612 hashen_to_hashconfig(int hashen) 4613 { 4614 int hashconfig = 0; 4615 4616 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 4617 /* 4618 * If UDP hashing was enabled it must have been enabled for 4619 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 4620 * enabling any 4-tuple hash is nonsense configuration. 4621 */ 4622 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4623 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 4624 4625 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4626 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 4627 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4628 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 4629 } 4630 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4631 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 4632 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4633 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 4634 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 4635 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 4636 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 4637 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 4638 4639 return (hashconfig); 4640 } 4641 #endif 4642 4643 int 4644 vi_full_init(struct vi_info *vi) 4645 { 4646 struct adapter *sc = vi->pi->adapter; 4647 struct ifnet *ifp = vi->ifp; 4648 uint16_t *rss; 4649 struct sge_rxq *rxq; 4650 int rc, i, j, hashen; 4651 #ifdef RSS 4652 int nbuckets = rss_getnumbuckets(); 4653 int hashconfig = rss_gethashconfig(); 4654 int extra; 4655 #endif 4656 4657 ASSERT_SYNCHRONIZED_OP(sc); 4658 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4659 ("%s: VI_INIT_DONE already", __func__)); 4660 4661 sysctl_ctx_init(&vi->ctx); 4662 vi->flags |= VI_SYSCTL_CTX; 4663 4664 /* 4665 * Allocate tx/rx/fl queues for this VI. 4666 */ 4667 rc = t4_setup_vi_queues(vi); 4668 if (rc != 0) 4669 goto done; /* error message displayed already */ 4670 4671 /* 4672 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4673 */ 4674 if (vi->nrxq > vi->rss_size) { 4675 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4676 "some queues will never receive traffic.\n", vi->nrxq, 4677 vi->rss_size); 4678 } else if (vi->rss_size % vi->nrxq) { 4679 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4680 "expect uneven traffic distribution.\n", vi->nrxq, 4681 vi->rss_size); 4682 } 4683 #ifdef RSS 4684 if (vi->nrxq != nbuckets) { 4685 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4686 "performance will be impacted.\n", vi->nrxq, nbuckets); 4687 } 4688 #endif 4689 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4690 for (i = 0; i < vi->rss_size;) { 4691 #ifdef RSS 4692 j = rss_get_indirection_to_bucket(i); 4693 j %= vi->nrxq; 4694 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4695 rss[i++] = rxq->iq.abs_id; 4696 #else 4697 for_each_rxq(vi, j, rxq) { 4698 rss[i++] = rxq->iq.abs_id; 4699 if (i == vi->rss_size) 4700 break; 4701 } 4702 #endif 4703 } 4704 4705 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4706 vi->rss_size); 4707 if (rc != 0) { 4708 if_printf(ifp, "rss_config failed: %d\n", rc); 4709 goto done; 4710 } 4711 4712 #ifdef RSS 4713 hashen = hashconfig_to_hashen(hashconfig); 4714 4715 /* 4716 * We may have had to enable some hashes even though the global config 4717 * wants them disabled. This is a potential problem that must be 4718 * reported to the user. 4719 */ 4720 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4721 4722 /* 4723 * If we consider only the supported hash types, then the enabled hashes 4724 * are a superset of the requested hashes. In other words, there cannot 4725 * be any supported hash that was requested but not enabled, but there 4726 * can be hashes that were not requested but had to be enabled. 4727 */ 4728 extra &= SUPPORTED_RSS_HASHTYPES; 4729 MPASS((extra & hashconfig) == 0); 4730 4731 if (extra) { 4732 if_printf(ifp, 4733 "global RSS config (0x%x) cannot be accommodated.\n", 4734 hashconfig); 4735 } 4736 if (extra & RSS_HASHTYPE_RSS_IPV4) 4737 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4738 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4739 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4740 if (extra & RSS_HASHTYPE_RSS_IPV6) 4741 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4742 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4743 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4744 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4745 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4746 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4747 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4748 #else 4749 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4750 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4751 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4752 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4753 #endif 4754 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0); 4755 if (rc != 0) { 4756 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4757 goto done; 4758 } 4759 4760 vi->rss = rss; 4761 vi->flags |= VI_INIT_DONE; 4762 done: 4763 if (rc != 0) 4764 vi_full_uninit(vi); 4765 4766 return (rc); 4767 } 4768 4769 /* 4770 * Idempotent. 4771 */ 4772 int 4773 vi_full_uninit(struct vi_info *vi) 4774 { 4775 struct port_info *pi = vi->pi; 4776 struct adapter *sc = pi->adapter; 4777 int i; 4778 struct sge_rxq *rxq; 4779 struct sge_txq *txq; 4780 #ifdef TCP_OFFLOAD 4781 struct sge_ofld_rxq *ofld_rxq; 4782 struct sge_wrq *ofld_txq; 4783 #endif 4784 4785 if (vi->flags & VI_INIT_DONE) { 4786 4787 /* Need to quiesce queues. */ 4788 4789 /* XXX: Only for the first VI? */ 4790 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 4791 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4792 4793 for_each_txq(vi, i, txq) { 4794 quiesce_txq(sc, txq); 4795 } 4796 4797 #ifdef TCP_OFFLOAD 4798 for_each_ofld_txq(vi, i, ofld_txq) { 4799 quiesce_wrq(sc, ofld_txq); 4800 } 4801 #endif 4802 4803 for_each_rxq(vi, i, rxq) { 4804 quiesce_iq(sc, &rxq->iq); 4805 quiesce_fl(sc, &rxq->fl); 4806 } 4807 4808 #ifdef TCP_OFFLOAD 4809 for_each_ofld_rxq(vi, i, ofld_rxq) { 4810 quiesce_iq(sc, &ofld_rxq->iq); 4811 quiesce_fl(sc, &ofld_rxq->fl); 4812 } 4813 #endif 4814 free(vi->rss, M_CXGBE); 4815 free(vi->nm_rss, M_CXGBE); 4816 } 4817 4818 t4_teardown_vi_queues(vi); 4819 vi->flags &= ~VI_INIT_DONE; 4820 4821 return (0); 4822 } 4823 4824 static void 4825 quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4826 { 4827 struct sge_eq *eq = &txq->eq; 4828 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4829 4830 (void) sc; /* unused */ 4831 4832 #ifdef INVARIANTS 4833 TXQ_LOCK(txq); 4834 MPASS((eq->flags & EQ_ENABLED) == 0); 4835 TXQ_UNLOCK(txq); 4836 #endif 4837 4838 /* Wait for the mp_ring to empty. */ 4839 while (!mp_ring_is_idle(txq->r)) { 4840 mp_ring_check_drainage(txq->r, 0); 4841 pause("rquiesce", 1); 4842 } 4843 4844 /* Then wait for the hardware to finish. */ 4845 while (spg->cidx != htobe16(eq->pidx)) 4846 pause("equiesce", 1); 4847 4848 /* Finally, wait for the driver to reclaim all descriptors. */ 4849 while (eq->cidx != eq->pidx) 4850 pause("dquiesce", 1); 4851 } 4852 4853 static void 4854 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4855 { 4856 4857 /* XXXTX */ 4858 } 4859 4860 static void 4861 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4862 { 4863 (void) sc; /* unused */ 4864 4865 /* Synchronize with the interrupt handler */ 4866 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4867 pause("iqfree", 1); 4868 } 4869 4870 static void 4871 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4872 { 4873 mtx_lock(&sc->sfl_lock); 4874 FL_LOCK(fl); 4875 fl->flags |= FL_DOOMED; 4876 FL_UNLOCK(fl); 4877 callout_stop(&sc->sfl_callout); 4878 mtx_unlock(&sc->sfl_lock); 4879 4880 KASSERT((fl->flags & FL_STARVING) == 0, 4881 ("%s: still starving", __func__)); 4882 } 4883 4884 static int 4885 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4886 driver_intr_t *handler, void *arg, char *name) 4887 { 4888 int rc; 4889 4890 irq->rid = rid; 4891 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4892 RF_SHAREABLE | RF_ACTIVE); 4893 if (irq->res == NULL) { 4894 device_printf(sc->dev, 4895 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4896 return (ENOMEM); 4897 } 4898 4899 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4900 NULL, handler, arg, &irq->tag); 4901 if (rc != 0) { 4902 device_printf(sc->dev, 4903 "failed to setup interrupt for rid %d, name %s: %d\n", 4904 rid, name, rc); 4905 } else if (name) 4906 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); 4907 4908 return (rc); 4909 } 4910 4911 static int 4912 t4_free_irq(struct adapter *sc, struct irq *irq) 4913 { 4914 if (irq->tag) 4915 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4916 if (irq->res) 4917 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4918 4919 bzero(irq, sizeof(*irq)); 4920 4921 return (0); 4922 } 4923 4924 static void 4925 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4926 { 4927 4928 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4929 t4_get_regs(sc, buf, regs->len); 4930 } 4931 4932 #define A_PL_INDIR_CMD 0x1f8 4933 4934 #define S_PL_AUTOINC 31 4935 #define M_PL_AUTOINC 0x1U 4936 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4937 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4938 4939 #define S_PL_VFID 20 4940 #define M_PL_VFID 0xffU 4941 #define V_PL_VFID(x) ((x) << S_PL_VFID) 4942 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4943 4944 #define S_PL_ADDR 0 4945 #define M_PL_ADDR 0xfffffU 4946 #define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4947 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4948 4949 #define A_PL_INDIR_DATA 0x1fc 4950 4951 static uint64_t 4952 read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4953 { 4954 u32 stats[2]; 4955 4956 mtx_assert(&sc->reg_lock, MA_OWNED); 4957 if (sc->flags & IS_VF) { 4958 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); 4959 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); 4960 } else { 4961 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4962 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4963 V_PL_ADDR(VF_MPS_REG(reg))); 4964 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4965 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4966 } 4967 return (((uint64_t)stats[1]) << 32 | stats[0]); 4968 } 4969 4970 static void 4971 t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4972 struct fw_vi_stats_vf *stats) 4973 { 4974 4975 #define GET_STAT(name) \ 4976 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4977 4978 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4979 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4980 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4981 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4982 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4983 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4984 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4985 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4986 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4987 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4988 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4989 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4990 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4991 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4992 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4993 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4994 4995 #undef GET_STAT 4996 } 4997 4998 static void 4999 t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 5000 { 5001 int reg; 5002 5003 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 5004 V_PL_VFID(G_FW_VIID_VIN(viid)) | 5005 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 5006 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 5007 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 5008 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 5009 } 5010 5011 static void 5012 vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 5013 { 5014 struct timeval tv; 5015 const struct timeval interval = {0, 250000}; /* 250ms */ 5016 5017 if (!(vi->flags & VI_INIT_DONE)) 5018 return; 5019 5020 getmicrotime(&tv); 5021 timevalsub(&tv, &interval); 5022 if (timevalcmp(&tv, &vi->last_refreshed, <)) 5023 return; 5024 5025 mtx_lock(&sc->reg_lock); 5026 t4_get_vi_stats(sc, vi->viid, &vi->stats); 5027 getmicrotime(&vi->last_refreshed); 5028 mtx_unlock(&sc->reg_lock); 5029 } 5030 5031 static void 5032 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 5033 { 5034 u_int i, v, tnl_cong_drops, bg_map; 5035 struct timeval tv; 5036 const struct timeval interval = {0, 250000}; /* 250ms */ 5037 5038 getmicrotime(&tv); 5039 timevalsub(&tv, &interval); 5040 if (timevalcmp(&tv, &pi->last_refreshed, <)) 5041 return; 5042 5043 tnl_cong_drops = 0; 5044 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 5045 bg_map = pi->mps_bg_map; 5046 while (bg_map) { 5047 i = ffs(bg_map) - 1; 5048 mtx_lock(&sc->reg_lock); 5049 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, 5050 A_TP_MIB_TNL_CNG_DROP_0 + i); 5051 mtx_unlock(&sc->reg_lock); 5052 tnl_cong_drops += v; 5053 bg_map &= ~(1 << i); 5054 } 5055 pi->tnl_cong_drops = tnl_cong_drops; 5056 getmicrotime(&pi->last_refreshed); 5057 } 5058 5059 static void 5060 cxgbe_tick(void *arg) 5061 { 5062 struct port_info *pi = arg; 5063 struct adapter *sc = pi->adapter; 5064 5065 PORT_LOCK_ASSERT_OWNED(pi); 5066 cxgbe_refresh_stats(sc, pi); 5067 5068 callout_schedule(&pi->tick, hz); 5069 } 5070 5071 void 5072 vi_tick(void *arg) 5073 { 5074 struct vi_info *vi = arg; 5075 struct adapter *sc = vi->pi->adapter; 5076 5077 vi_refresh_stats(sc, vi); 5078 5079 callout_schedule(&vi->tick, hz); 5080 } 5081 5082 static void 5083 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 5084 { 5085 struct ifnet *vlan; 5086 5087 if (arg != ifp || ifp->if_type != IFT_ETHER) 5088 return; 5089 5090 vlan = VLAN_DEVAT(ifp, vid); 5091 VLAN_SETCOOKIE(vlan, ifp); 5092 } 5093 5094 /* 5095 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 5096 */ 5097 static char *caps_decoder[] = { 5098 "\20\001IPMI\002NCSI", /* 0: NBM */ 5099 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 5100 "\20\001INGRESS\002EGRESS", /* 2: switch */ 5101 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 5102 "\006HASHFILTER\007ETHOFLD", 5103 "\20\001TOE", /* 4: TOE */ 5104 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 5105 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 5106 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 5107 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 5108 "\007T10DIF" 5109 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 5110 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */ 5111 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 5112 "\004PO_INITIATOR\005PO_TARGET", 5113 }; 5114 5115 void 5116 t4_sysctls(struct adapter *sc) 5117 { 5118 struct sysctl_ctx_list *ctx; 5119 struct sysctl_oid *oid; 5120 struct sysctl_oid_list *children, *c0; 5121 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 5122 5123 ctx = device_get_sysctl_ctx(sc->dev); 5124 5125 /* 5126 * dev.t4nex.X. 5127 */ 5128 oid = device_get_sysctl_tree(sc->dev); 5129 c0 = children = SYSCTL_CHILDREN(oid); 5130 5131 sc->sc_do_rxcopy = 1; 5132 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 5133 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 5134 5135 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 5136 sc->params.nports, "# of ports"); 5137 5138 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 5139 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 5140 sysctl_bitfield, "A", "available doorbells"); 5141 5142 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 5143 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 5144 5145 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 5146 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 5147 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 5148 "interrupt holdoff timer values (us)"); 5149 5150 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 5151 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 5152 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 5153 "interrupt holdoff packet counter values"); 5154 5155 t4_sge_sysctls(sc, ctx, children); 5156 5157 sc->lro_timeout = 100; 5158 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 5159 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 5160 5161 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, 5162 &sc->debug_flags, 0, "flags to enable runtime debugging"); 5163 5164 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 5165 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 5166 5167 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 5168 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 5169 5170 if (sc->flags & IS_VF) 5171 return; 5172 5173 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 5174 NULL, chip_rev(sc), "chip hardware revision"); 5175 5176 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", 5177 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); 5178 5179 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", 5180 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); 5181 5182 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", 5183 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); 5184 5185 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version", 5186 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version"); 5187 5188 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", 5189 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); 5190 5191 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, 5192 sc->er_version, 0, "expansion ROM version"); 5193 5194 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, 5195 sc->bs_version, 0, "bootstrap firmware version"); 5196 5197 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, 5198 NULL, sc->params.scfg_vers, "serial config version"); 5199 5200 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, 5201 NULL, sc->params.vpd_vers, "VPD version"); 5202 5203 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 5204 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 5205 5206 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 5207 sc->cfcsum, "config file checksum"); 5208 5209 #define SYSCTL_CAP(name, n, text) \ 5210 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 5211 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 5212 sysctl_bitfield, "A", "available " text " capabilities") 5213 5214 SYSCTL_CAP(nbmcaps, 0, "NBM"); 5215 SYSCTL_CAP(linkcaps, 1, "link"); 5216 SYSCTL_CAP(switchcaps, 2, "switch"); 5217 SYSCTL_CAP(niccaps, 3, "NIC"); 5218 SYSCTL_CAP(toecaps, 4, "TCP offload"); 5219 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 5220 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 5221 SYSCTL_CAP(cryptocaps, 7, "crypto"); 5222 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 5223 #undef SYSCTL_CAP 5224 5225 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 5226 NULL, sc->tids.nftids, "number of filters"); 5227 5228 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 5229 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 5230 "chip temperature (in Celsius)"); 5231 5232 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_vdd", CTLFLAG_RD, 5233 &sc->params.core_vdd, 0, "core Vdd (in mV)"); 5234 5235 #ifdef SBUF_DRAIN 5236 /* 5237 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 5238 */ 5239 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 5240 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 5241 "logs and miscellaneous information"); 5242 children = SYSCTL_CHILDREN(oid); 5243 5244 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 5245 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5246 sysctl_cctrl, "A", "congestion control"); 5247 5248 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 5249 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5250 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 5251 5252 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 5253 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 5254 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 5255 5256 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 5257 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 5258 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 5259 5260 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 5261 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 5262 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 5263 5264 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 5265 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 5266 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 5267 5268 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 5269 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 5270 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 5271 5272 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 5273 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5274 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 5275 "A", "CIM logic analyzer"); 5276 5277 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 5278 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5279 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 5280 5281 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 5282 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 5283 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 5284 5285 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 5286 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 5287 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 5288 5289 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 5290 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 5291 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 5292 5293 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 5294 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 5295 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 5296 5297 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 5298 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 5299 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 5300 5301 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 5302 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 5303 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 5304 5305 if (chip_id(sc) > CHELSIO_T4) { 5306 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 5307 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 5308 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 5309 5310 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 5311 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 5312 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 5313 } 5314 5315 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 5316 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5317 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 5318 5319 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 5320 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5321 sysctl_cim_qcfg, "A", "CIM queue configuration"); 5322 5323 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 5324 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5325 sysctl_cpl_stats, "A", "CPL statistics"); 5326 5327 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 5328 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5329 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 5330 5331 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 5332 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5333 sysctl_devlog, "A", "firmware's device log"); 5334 5335 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 5336 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5337 sysctl_fcoe_stats, "A", "FCoE statistics"); 5338 5339 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 5340 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5341 sysctl_hw_sched, "A", "hardware scheduler "); 5342 5343 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 5344 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5345 sysctl_l2t, "A", "hardware L2 table"); 5346 5347 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 5348 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5349 sysctl_lb_stats, "A", "loopback statistics"); 5350 5351 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 5352 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5353 sysctl_meminfo, "A", "memory regions"); 5354 5355 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 5356 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5357 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 5358 "A", "MPS TCAM entries"); 5359 5360 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 5361 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5362 sysctl_path_mtus, "A", "path MTUs"); 5363 5364 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 5365 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5366 sysctl_pm_stats, "A", "PM statistics"); 5367 5368 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 5369 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5370 sysctl_rdma_stats, "A", "RDMA statistics"); 5371 5372 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 5373 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5374 sysctl_tcp_stats, "A", "TCP statistics"); 5375 5376 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 5377 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5378 sysctl_tids, "A", "TID information"); 5379 5380 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 5381 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5382 sysctl_tp_err_stats, "A", "TP error statistics"); 5383 5384 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 5385 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 5386 "TP logic analyzer event capture mask"); 5387 5388 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 5389 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5390 sysctl_tp_la, "A", "TP logic analyzer"); 5391 5392 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 5393 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5394 sysctl_tx_rate, "A", "Tx rate"); 5395 5396 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 5397 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5398 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 5399 5400 if (chip_id(sc) >= CHELSIO_T5) { 5401 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 5402 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5403 sysctl_wcwr_stats, "A", "write combined work requests"); 5404 } 5405 #endif 5406 5407 #ifdef TCP_OFFLOAD 5408 if (is_offload(sc)) { 5409 int i; 5410 char s[4]; 5411 5412 /* 5413 * dev.t4nex.X.toe. 5414 */ 5415 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 5416 NULL, "TOE parameters"); 5417 children = SYSCTL_CHILDREN(oid); 5418 5419 sc->tt.cong_algorithm = -1; 5420 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm", 5421 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control " 5422 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, " 5423 "3 = highspeed)"); 5424 5425 sc->tt.sndbuf = 256 * 1024; 5426 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 5427 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 5428 5429 sc->tt.ddp = 0; 5430 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 5431 &sc->tt.ddp, 0, "DDP allowed"); 5432 5433 sc->tt.rx_coalesce = 1; 5434 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 5435 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 5436 5437 sc->tt.tls = 0; 5438 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tls", CTLFLAG_RW, 5439 &sc->tt.tls, 0, "Inline TLS allowed"); 5440 5441 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports", 5442 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports, 5443 "I", "TCP ports that use inline TLS+TOE RX"); 5444 5445 sc->tt.tx_align = 1; 5446 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 5447 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 5448 5449 sc->tt.tx_zcopy = 0; 5450 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", 5451 CTLFLAG_RW, &sc->tt.tx_zcopy, 0, 5452 "Enable zero-copy aio_write(2)"); 5453 5454 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 5455 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 5456 "TP timer tick (us)"); 5457 5458 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 5459 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 5460 "TCP timestamp tick (us)"); 5461 5462 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 5463 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 5464 "DACK tick (us)"); 5465 5466 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 5467 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 5468 "IU", "DACK timer (us)"); 5469 5470 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 5471 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 5472 sysctl_tp_timer, "LU", "Minimum retransmit interval (us)"); 5473 5474 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 5475 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 5476 sysctl_tp_timer, "LU", "Maximum retransmit interval (us)"); 5477 5478 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 5479 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 5480 sysctl_tp_timer, "LU", "Persist timer min (us)"); 5481 5482 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 5483 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 5484 sysctl_tp_timer, "LU", "Persist timer max (us)"); 5485 5486 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 5487 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 5488 sysctl_tp_timer, "LU", "Keepalive idle timer (us)"); 5489 5490 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval", 5491 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 5492 sysctl_tp_timer, "LU", "Keepalive interval timer (us)"); 5493 5494 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 5495 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 5496 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 5497 5498 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 5499 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 5500 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 5501 5502 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count", 5503 CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX, 5504 sysctl_tp_shift_cnt, "IU", 5505 "Number of SYN retransmissions before abort"); 5506 5507 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count", 5508 CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2, 5509 sysctl_tp_shift_cnt, "IU", 5510 "Number of retransmissions before abort"); 5511 5512 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count", 5513 CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2, 5514 sysctl_tp_shift_cnt, "IU", 5515 "Number of keepalive probes before abort"); 5516 5517 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff", 5518 CTLFLAG_RD, NULL, "TOE retransmit backoffs"); 5519 children = SYSCTL_CHILDREN(oid); 5520 for (i = 0; i < 16; i++) { 5521 snprintf(s, sizeof(s), "%u", i); 5522 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s, 5523 CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff, 5524 "IU", "TOE retransmit backoff"); 5525 } 5526 } 5527 #endif 5528 } 5529 5530 void 5531 vi_sysctls(struct vi_info *vi) 5532 { 5533 struct sysctl_ctx_list *ctx; 5534 struct sysctl_oid *oid; 5535 struct sysctl_oid_list *children; 5536 5537 ctx = device_get_sysctl_ctx(vi->dev); 5538 5539 /* 5540 * dev.v?(cxgbe|cxl).X. 5541 */ 5542 oid = device_get_sysctl_tree(vi->dev); 5543 children = SYSCTL_CHILDREN(oid); 5544 5545 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 5546 vi->viid, "VI identifer"); 5547 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 5548 &vi->nrxq, 0, "# of rx queues"); 5549 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 5550 &vi->ntxq, 0, "# of tx queues"); 5551 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 5552 &vi->first_rxq, 0, "index of first rx queue"); 5553 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 5554 &vi->first_txq, 0, "index of first tx queue"); 5555 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, 5556 vi->rss_size, "size of RSS indirection table"); 5557 5558 if (IS_MAIN_VI(vi)) { 5559 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 5560 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 5561 "Reserve queue 0 for non-flowid packets"); 5562 } 5563 5564 #ifdef TCP_OFFLOAD 5565 if (vi->nofldrxq != 0) { 5566 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 5567 &vi->nofldrxq, 0, 5568 "# of rx queues for offloaded TCP connections"); 5569 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 5570 &vi->nofldtxq, 0, 5571 "# of tx queues for offloaded TCP connections"); 5572 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 5573 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 5574 "index of first TOE rx queue"); 5575 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 5576 CTLFLAG_RD, &vi->first_ofld_txq, 0, 5577 "index of first TOE tx queue"); 5578 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld", 5579 CTLTYPE_INT | CTLFLAG_RW, vi, 0, 5580 sysctl_holdoff_tmr_idx_ofld, "I", 5581 "holdoff timer index for TOE queues"); 5582 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld", 5583 CTLTYPE_INT | CTLFLAG_RW, vi, 0, 5584 sysctl_holdoff_pktc_idx_ofld, "I", 5585 "holdoff packet counter index for TOE queues"); 5586 } 5587 #endif 5588 #ifdef DEV_NETMAP 5589 if (vi->nnmrxq != 0) { 5590 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 5591 &vi->nnmrxq, 0, "# of netmap rx queues"); 5592 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 5593 &vi->nnmtxq, 0, "# of netmap tx queues"); 5594 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 5595 CTLFLAG_RD, &vi->first_nm_rxq, 0, 5596 "index of first netmap rx queue"); 5597 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 5598 CTLFLAG_RD, &vi->first_nm_txq, 0, 5599 "index of first netmap tx queue"); 5600 } 5601 #endif 5602 5603 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 5604 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 5605 "holdoff timer index"); 5606 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 5607 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 5608 "holdoff packet counter index"); 5609 5610 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 5611 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 5612 "rx queue size"); 5613 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 5614 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 5615 "tx queue size"); 5616 } 5617 5618 static void 5619 cxgbe_sysctls(struct port_info *pi) 5620 { 5621 struct sysctl_ctx_list *ctx; 5622 struct sysctl_oid *oid; 5623 struct sysctl_oid_list *children, *children2; 5624 struct adapter *sc = pi->adapter; 5625 int i; 5626 char name[16]; 5627 5628 ctx = device_get_sysctl_ctx(pi->dev); 5629 5630 /* 5631 * dev.cxgbe.X. 5632 */ 5633 oid = device_get_sysctl_tree(pi->dev); 5634 children = SYSCTL_CHILDREN(oid); 5635 5636 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 5637 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 5638 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 5639 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 5640 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 5641 "PHY temperature (in Celsius)"); 5642 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 5643 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 5644 "PHY firmware version"); 5645 } 5646 5647 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 5648 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A", 5649 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 5650 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec", 5651 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A", 5652 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); 5653 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", 5654 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I", 5655 "autonegotiation (-1 = not supported)"); 5656 5657 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 5658 port_top_speed(pi), "max speed (in Gbps)"); 5659 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL, 5660 pi->mps_bg_map, "MPS buffer group map"); 5661 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD, 5662 NULL, pi->rx_e_chan_map, "TP rx e-channel map"); 5663 5664 if (sc->flags & IS_VF) 5665 return; 5666 5667 /* 5668 * dev.(cxgbe|cxl).X.tc. 5669 */ 5670 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 5671 "Tx scheduler traffic classes (cl_rl)"); 5672 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 5673 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; 5674 5675 snprintf(name, sizeof(name), "%d", i); 5676 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 5677 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 5678 "traffic class")); 5679 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD, 5680 &tc->flags, 0, "flags"); 5681 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 5682 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 5683 #ifdef SBUF_DRAIN 5684 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 5685 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 5686 sysctl_tc_params, "A", "traffic class parameters"); 5687 #endif 5688 } 5689 5690 /* 5691 * dev.cxgbe.X.stats. 5692 */ 5693 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 5694 NULL, "port statistics"); 5695 children = SYSCTL_CHILDREN(oid); 5696 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 5697 &pi->tx_parse_error, 0, 5698 "# of tx packets with invalid length or # of segments"); 5699 5700 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 5701 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 5702 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 5703 sysctl_handle_t4_reg64, "QU", desc) 5704 5705 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 5706 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 5707 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 5708 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 5709 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 5710 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 5711 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 5712 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 5713 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 5714 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 5715 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 5716 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 5717 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 5718 "# of tx frames in this range", 5719 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 5720 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 5721 "# of tx frames in this range", 5722 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 5723 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5724 "# of tx frames in this range", 5725 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5726 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5727 "# of tx frames in this range", 5728 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5729 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5730 "# of tx frames in this range", 5731 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5732 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5733 "# of tx frames in this range", 5734 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5735 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5736 "# of tx frames in this range", 5737 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5738 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5739 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5740 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5741 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5742 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5743 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5744 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5745 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5746 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5747 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5748 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5749 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5750 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5751 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5752 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5753 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5754 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5755 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5756 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5757 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5758 5759 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5760 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5761 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5762 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5763 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5764 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5765 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5766 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5767 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5768 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5769 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5770 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5771 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5772 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5773 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5774 "# of frames received with bad FCS", 5775 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5776 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5777 "# of frames received with length error", 5778 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5779 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5780 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5781 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5782 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5783 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5784 "# of rx frames in this range", 5785 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5786 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5787 "# of rx frames in this range", 5788 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5789 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5790 "# of rx frames in this range", 5791 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5792 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5793 "# of rx frames in this range", 5794 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5795 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5796 "# of rx frames in this range", 5797 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5798 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5799 "# of rx frames in this range", 5800 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5801 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5802 "# of rx frames in this range", 5803 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5804 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5805 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5806 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5807 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5808 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5809 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5810 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5811 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5812 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5813 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5814 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5815 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5816 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5817 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5818 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5819 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5820 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5821 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5822 5823 #undef SYSCTL_ADD_T4_REG64 5824 5825 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5826 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5827 &pi->stats.name, desc) 5828 5829 /* We get these from port_stats and they may be stale by up to 1s */ 5830 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5831 "# drops due to buffer-group 0 overflows"); 5832 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5833 "# drops due to buffer-group 1 overflows"); 5834 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5835 "# drops due to buffer-group 2 overflows"); 5836 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5837 "# drops due to buffer-group 3 overflows"); 5838 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5839 "# of buffer-group 0 truncated packets"); 5840 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5841 "# of buffer-group 1 truncated packets"); 5842 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5843 "# of buffer-group 2 truncated packets"); 5844 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5845 "# of buffer-group 3 truncated packets"); 5846 5847 #undef SYSCTL_ADD_T4_PORTSTAT 5848 5849 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_records", 5850 CTLFLAG_RD, &pi->tx_tls_records, 5851 "# of TLS records transmitted"); 5852 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_tls_octets", 5853 CTLFLAG_RD, &pi->tx_tls_octets, 5854 "# of payload octets in transmitted TLS records"); 5855 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_records", 5856 CTLFLAG_RD, &pi->rx_tls_records, 5857 "# of TLS records received"); 5858 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_tls_octets", 5859 CTLFLAG_RD, &pi->rx_tls_octets, 5860 "# of payload octets in received TLS records"); 5861 } 5862 5863 static int 5864 sysctl_int_array(SYSCTL_HANDLER_ARGS) 5865 { 5866 int rc, *i, space = 0; 5867 struct sbuf sb; 5868 5869 sbuf_new_for_sysctl(&sb, NULL, 64, req); 5870 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5871 if (space) 5872 sbuf_printf(&sb, " "); 5873 sbuf_printf(&sb, "%d", *i); 5874 space = 1; 5875 } 5876 rc = sbuf_finish(&sb); 5877 sbuf_delete(&sb); 5878 return (rc); 5879 } 5880 5881 static int 5882 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5883 { 5884 int rc; 5885 struct sbuf *sb; 5886 5887 rc = sysctl_wire_old_buffer(req, 0); 5888 if (rc != 0) 5889 return(rc); 5890 5891 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5892 if (sb == NULL) 5893 return (ENOMEM); 5894 5895 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5896 rc = sbuf_finish(sb); 5897 sbuf_delete(sb); 5898 5899 return (rc); 5900 } 5901 5902 static int 5903 sysctl_btphy(SYSCTL_HANDLER_ARGS) 5904 { 5905 struct port_info *pi = arg1; 5906 int op = arg2; 5907 struct adapter *sc = pi->adapter; 5908 u_int v; 5909 int rc; 5910 5911 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5912 if (rc) 5913 return (rc); 5914 /* XXX: magic numbers */ 5915 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5916 &v); 5917 end_synchronized_op(sc, 0); 5918 if (rc) 5919 return (rc); 5920 if (op == 0) 5921 v /= 256; 5922 5923 rc = sysctl_handle_int(oidp, &v, 0, req); 5924 return (rc); 5925 } 5926 5927 static int 5928 sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5929 { 5930 struct vi_info *vi = arg1; 5931 int rc, val; 5932 5933 val = vi->rsrv_noflowq; 5934 rc = sysctl_handle_int(oidp, &val, 0, req); 5935 if (rc != 0 || req->newptr == NULL) 5936 return (rc); 5937 5938 if ((val >= 1) && (vi->ntxq > 1)) 5939 vi->rsrv_noflowq = 1; 5940 else 5941 vi->rsrv_noflowq = 0; 5942 5943 return (rc); 5944 } 5945 5946 static int 5947 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5948 { 5949 struct vi_info *vi = arg1; 5950 struct adapter *sc = vi->pi->adapter; 5951 int idx, rc, i; 5952 struct sge_rxq *rxq; 5953 uint8_t v; 5954 5955 idx = vi->tmr_idx; 5956 5957 rc = sysctl_handle_int(oidp, &idx, 0, req); 5958 if (rc != 0 || req->newptr == NULL) 5959 return (rc); 5960 5961 if (idx < 0 || idx >= SGE_NTIMERS) 5962 return (EINVAL); 5963 5964 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5965 "t4tmr"); 5966 if (rc) 5967 return (rc); 5968 5969 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5970 for_each_rxq(vi, i, rxq) { 5971 #ifdef atomic_store_rel_8 5972 atomic_store_rel_8(&rxq->iq.intr_params, v); 5973 #else 5974 rxq->iq.intr_params = v; 5975 #endif 5976 } 5977 vi->tmr_idx = idx; 5978 5979 end_synchronized_op(sc, LOCK_HELD); 5980 return (0); 5981 } 5982 5983 static int 5984 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5985 { 5986 struct vi_info *vi = arg1; 5987 struct adapter *sc = vi->pi->adapter; 5988 int idx, rc; 5989 5990 idx = vi->pktc_idx; 5991 5992 rc = sysctl_handle_int(oidp, &idx, 0, req); 5993 if (rc != 0 || req->newptr == NULL) 5994 return (rc); 5995 5996 if (idx < -1 || idx >= SGE_NCOUNTERS) 5997 return (EINVAL); 5998 5999 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 6000 "t4pktc"); 6001 if (rc) 6002 return (rc); 6003 6004 if (vi->flags & VI_INIT_DONE) 6005 rc = EBUSY; /* cannot be changed once the queues are created */ 6006 else 6007 vi->pktc_idx = idx; 6008 6009 end_synchronized_op(sc, LOCK_HELD); 6010 return (rc); 6011 } 6012 6013 static int 6014 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 6015 { 6016 struct vi_info *vi = arg1; 6017 struct adapter *sc = vi->pi->adapter; 6018 int qsize, rc; 6019 6020 qsize = vi->qsize_rxq; 6021 6022 rc = sysctl_handle_int(oidp, &qsize, 0, req); 6023 if (rc != 0 || req->newptr == NULL) 6024 return (rc); 6025 6026 if (qsize < 128 || (qsize & 7)) 6027 return (EINVAL); 6028 6029 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 6030 "t4rxqs"); 6031 if (rc) 6032 return (rc); 6033 6034 if (vi->flags & VI_INIT_DONE) 6035 rc = EBUSY; /* cannot be changed once the queues are created */ 6036 else 6037 vi->qsize_rxq = qsize; 6038 6039 end_synchronized_op(sc, LOCK_HELD); 6040 return (rc); 6041 } 6042 6043 static int 6044 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 6045 { 6046 struct vi_info *vi = arg1; 6047 struct adapter *sc = vi->pi->adapter; 6048 int qsize, rc; 6049 6050 qsize = vi->qsize_txq; 6051 6052 rc = sysctl_handle_int(oidp, &qsize, 0, req); 6053 if (rc != 0 || req->newptr == NULL) 6054 return (rc); 6055 6056 if (qsize < 128 || qsize > 65536) 6057 return (EINVAL); 6058 6059 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 6060 "t4txqs"); 6061 if (rc) 6062 return (rc); 6063 6064 if (vi->flags & VI_INIT_DONE) 6065 rc = EBUSY; /* cannot be changed once the queues are created */ 6066 else 6067 vi->qsize_txq = qsize; 6068 6069 end_synchronized_op(sc, LOCK_HELD); 6070 return (rc); 6071 } 6072 6073 static int 6074 sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 6075 { 6076 struct port_info *pi = arg1; 6077 struct adapter *sc = pi->adapter; 6078 struct link_config *lc = &pi->link_cfg; 6079 int rc; 6080 6081 if (req->newptr == NULL) { 6082 struct sbuf *sb; 6083 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 6084 6085 rc = sysctl_wire_old_buffer(req, 0); 6086 if (rc != 0) 6087 return(rc); 6088 6089 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6090 if (sb == NULL) 6091 return (ENOMEM); 6092 6093 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 6094 rc = sbuf_finish(sb); 6095 sbuf_delete(sb); 6096 } else { 6097 char s[2]; 6098 int n; 6099 6100 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 6101 s[1] = 0; 6102 6103 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 6104 if (rc != 0) 6105 return(rc); 6106 6107 if (s[1] != 0) 6108 return (EINVAL); 6109 if (s[0] < '0' || s[0] > '9') 6110 return (EINVAL); /* not a number */ 6111 n = s[0] - '0'; 6112 if (n & ~(PAUSE_TX | PAUSE_RX)) 6113 return (EINVAL); /* some other bit is set too */ 6114 6115 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 6116 "t4PAUSE"); 6117 if (rc) 6118 return (rc); 6119 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 6120 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 6121 lc->requested_fc |= n; 6122 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 6123 if (rc == 0) { 6124 lc->fc = lc->requested_fc; 6125 } 6126 } 6127 end_synchronized_op(sc, 0); 6128 } 6129 6130 return (rc); 6131 } 6132 6133 static int 6134 sysctl_fec(SYSCTL_HANDLER_ARGS) 6135 { 6136 struct port_info *pi = arg1; 6137 struct adapter *sc = pi->adapter; 6138 struct link_config *lc = &pi->link_cfg; 6139 int rc; 6140 6141 if (req->newptr == NULL) { 6142 struct sbuf *sb; 6143 static char *bits = "\20\1RS\2BASER_RS\3RESERVED"; 6144 6145 rc = sysctl_wire_old_buffer(req, 0); 6146 if (rc != 0) 6147 return(rc); 6148 6149 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6150 if (sb == NULL) 6151 return (ENOMEM); 6152 6153 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits); 6154 rc = sbuf_finish(sb); 6155 sbuf_delete(sb); 6156 } else { 6157 char s[2]; 6158 int n; 6159 6160 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC); 6161 s[1] = 0; 6162 6163 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 6164 if (rc != 0) 6165 return(rc); 6166 6167 if (s[1] != 0) 6168 return (EINVAL); 6169 if (s[0] < '0' || s[0] > '9') 6170 return (EINVAL); /* not a number */ 6171 n = s[0] - '0'; 6172 if (n & ~M_FW_PORT_CAP_FEC) 6173 return (EINVAL); /* some other bit is set too */ 6174 6175 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 6176 "t4fec"); 6177 if (rc) 6178 return (rc); 6179 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) { 6180 lc->requested_fec = n & 6181 G_FW_PORT_CAP_FEC(lc->supported); 6182 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 6183 if (rc == 0) { 6184 lc->fec = lc->requested_fec; 6185 } 6186 } 6187 end_synchronized_op(sc, 0); 6188 } 6189 6190 return (rc); 6191 } 6192 6193 static int 6194 sysctl_autoneg(SYSCTL_HANDLER_ARGS) 6195 { 6196 struct port_info *pi = arg1; 6197 struct adapter *sc = pi->adapter; 6198 struct link_config *lc = &pi->link_cfg; 6199 int rc, val, old; 6200 6201 if (lc->supported & FW_PORT_CAP_ANEG) 6202 val = lc->requested_aneg == AUTONEG_ENABLE ? 1 : 0; 6203 else 6204 val = -1; 6205 rc = sysctl_handle_int(oidp, &val, 0, req); 6206 if (rc != 0 || req->newptr == NULL) 6207 return (rc); 6208 if ((lc->supported & FW_PORT_CAP_ANEG) == 0) 6209 return (ENOTSUP); 6210 6211 if (val == 0) 6212 val = AUTONEG_DISABLE; 6213 else if (val == 1) 6214 val = AUTONEG_ENABLE; 6215 else 6216 return (EINVAL); 6217 if (lc->requested_aneg == val) 6218 return (0); /* no change */ 6219 6220 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 6221 "t4aneg"); 6222 if (rc) 6223 return (rc); 6224 old = lc->requested_aneg; 6225 lc->requested_aneg = val; 6226 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 6227 if (rc != 0) 6228 lc->requested_aneg = old; 6229 end_synchronized_op(sc, 0); 6230 return (rc); 6231 } 6232 6233 static int 6234 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 6235 { 6236 struct adapter *sc = arg1; 6237 int reg = arg2; 6238 uint64_t val; 6239 6240 val = t4_read_reg64(sc, reg); 6241 6242 return (sysctl_handle_64(oidp, &val, 0, req)); 6243 } 6244 6245 static int 6246 sysctl_temperature(SYSCTL_HANDLER_ARGS) 6247 { 6248 struct adapter *sc = arg1; 6249 int rc, t; 6250 uint32_t param, val; 6251 6252 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 6253 if (rc) 6254 return (rc); 6255 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 6256 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 6257 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 6258 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 6259 end_synchronized_op(sc, 0); 6260 if (rc) 6261 return (rc); 6262 6263 /* unknown is returned as 0 but we display -1 in that case */ 6264 t = val == 0 ? -1 : val; 6265 6266 rc = sysctl_handle_int(oidp, &t, 0, req); 6267 return (rc); 6268 } 6269 6270 #ifdef SBUF_DRAIN 6271 static int 6272 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 6273 { 6274 struct adapter *sc = arg1; 6275 struct sbuf *sb; 6276 int rc, i; 6277 uint16_t incr[NMTUS][NCCTRL_WIN]; 6278 static const char *dec_fac[] = { 6279 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 6280 "0.9375" 6281 }; 6282 6283 rc = sysctl_wire_old_buffer(req, 0); 6284 if (rc != 0) 6285 return (rc); 6286 6287 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6288 if (sb == NULL) 6289 return (ENOMEM); 6290 6291 t4_read_cong_tbl(sc, incr); 6292 6293 for (i = 0; i < NCCTRL_WIN; ++i) { 6294 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 6295 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 6296 incr[5][i], incr[6][i], incr[7][i]); 6297 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 6298 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 6299 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 6300 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 6301 } 6302 6303 rc = sbuf_finish(sb); 6304 sbuf_delete(sb); 6305 6306 return (rc); 6307 } 6308 6309 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 6310 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 6311 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 6312 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 6313 }; 6314 6315 static int 6316 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 6317 { 6318 struct adapter *sc = arg1; 6319 struct sbuf *sb; 6320 int rc, i, n, qid = arg2; 6321 uint32_t *buf, *p; 6322 char *qtype; 6323 u_int cim_num_obq = sc->chip_params->cim_num_obq; 6324 6325 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 6326 ("%s: bad qid %d\n", __func__, qid)); 6327 6328 if (qid < CIM_NUM_IBQ) { 6329 /* inbound queue */ 6330 qtype = "IBQ"; 6331 n = 4 * CIM_IBQ_SIZE; 6332 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 6333 rc = t4_read_cim_ibq(sc, qid, buf, n); 6334 } else { 6335 /* outbound queue */ 6336 qtype = "OBQ"; 6337 qid -= CIM_NUM_IBQ; 6338 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 6339 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 6340 rc = t4_read_cim_obq(sc, qid, buf, n); 6341 } 6342 6343 if (rc < 0) { 6344 rc = -rc; 6345 goto done; 6346 } 6347 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 6348 6349 rc = sysctl_wire_old_buffer(req, 0); 6350 if (rc != 0) 6351 goto done; 6352 6353 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6354 if (sb == NULL) { 6355 rc = ENOMEM; 6356 goto done; 6357 } 6358 6359 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 6360 for (i = 0, p = buf; i < n; i += 16, p += 4) 6361 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 6362 p[2], p[3]); 6363 6364 rc = sbuf_finish(sb); 6365 sbuf_delete(sb); 6366 done: 6367 free(buf, M_CXGBE); 6368 return (rc); 6369 } 6370 6371 static int 6372 sysctl_cim_la(SYSCTL_HANDLER_ARGS) 6373 { 6374 struct adapter *sc = arg1; 6375 u_int cfg; 6376 struct sbuf *sb; 6377 uint32_t *buf, *p; 6378 int rc; 6379 6380 MPASS(chip_id(sc) <= CHELSIO_T5); 6381 6382 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 6383 if (rc != 0) 6384 return (rc); 6385 6386 rc = sysctl_wire_old_buffer(req, 0); 6387 if (rc != 0) 6388 return (rc); 6389 6390 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6391 if (sb == NULL) 6392 return (ENOMEM); 6393 6394 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 6395 M_ZERO | M_WAITOK); 6396 6397 rc = -t4_cim_read_la(sc, buf, NULL); 6398 if (rc != 0) 6399 goto done; 6400 6401 sbuf_printf(sb, "Status Data PC%s", 6402 cfg & F_UPDBGLACAPTPCONLY ? "" : 6403 " LS0Stat LS0Addr LS0Data"); 6404 6405 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 6406 if (cfg & F_UPDBGLACAPTPCONLY) { 6407 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 6408 p[6], p[7]); 6409 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 6410 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 6411 p[4] & 0xff, p[5] >> 8); 6412 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 6413 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 6414 p[1] & 0xf, p[2] >> 4); 6415 } else { 6416 sbuf_printf(sb, 6417 "\n %02x %x%07x %x%07x %08x %08x " 6418 "%08x%08x%08x%08x", 6419 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 6420 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 6421 p[6], p[7]); 6422 } 6423 } 6424 6425 rc = sbuf_finish(sb); 6426 sbuf_delete(sb); 6427 done: 6428 free(buf, M_CXGBE); 6429 return (rc); 6430 } 6431 6432 static int 6433 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 6434 { 6435 struct adapter *sc = arg1; 6436 u_int cfg; 6437 struct sbuf *sb; 6438 uint32_t *buf, *p; 6439 int rc; 6440 6441 MPASS(chip_id(sc) > CHELSIO_T5); 6442 6443 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 6444 if (rc != 0) 6445 return (rc); 6446 6447 rc = sysctl_wire_old_buffer(req, 0); 6448 if (rc != 0) 6449 return (rc); 6450 6451 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6452 if (sb == NULL) 6453 return (ENOMEM); 6454 6455 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 6456 M_ZERO | M_WAITOK); 6457 6458 rc = -t4_cim_read_la(sc, buf, NULL); 6459 if (rc != 0) 6460 goto done; 6461 6462 sbuf_printf(sb, "Status Inst Data PC%s", 6463 cfg & F_UPDBGLACAPTPCONLY ? "" : 6464 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 6465 6466 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 6467 if (cfg & F_UPDBGLACAPTPCONLY) { 6468 sbuf_printf(sb, "\n %02x %08x %08x %08x", 6469 p[3] & 0xff, p[2], p[1], p[0]); 6470 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 6471 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 6472 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 6473 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 6474 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 6475 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 6476 p[6] >> 16); 6477 } else { 6478 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 6479 "%08x %08x %08x %08x %08x %08x", 6480 (p[9] >> 16) & 0xff, 6481 p[9] & 0xffff, p[8] >> 16, 6482 p[8] & 0xffff, p[7] >> 16, 6483 p[7] & 0xffff, p[6] >> 16, 6484 p[2], p[1], p[0], p[5], p[4], p[3]); 6485 } 6486 } 6487 6488 rc = sbuf_finish(sb); 6489 sbuf_delete(sb); 6490 done: 6491 free(buf, M_CXGBE); 6492 return (rc); 6493 } 6494 6495 static int 6496 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 6497 { 6498 struct adapter *sc = arg1; 6499 u_int i; 6500 struct sbuf *sb; 6501 uint32_t *buf, *p; 6502 int rc; 6503 6504 rc = sysctl_wire_old_buffer(req, 0); 6505 if (rc != 0) 6506 return (rc); 6507 6508 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6509 if (sb == NULL) 6510 return (ENOMEM); 6511 6512 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 6513 M_ZERO | M_WAITOK); 6514 6515 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 6516 p = buf; 6517 6518 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6519 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 6520 p[1], p[0]); 6521 } 6522 6523 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 6524 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6525 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 6526 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 6527 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 6528 (p[1] >> 2) | ((p[2] & 3) << 30), 6529 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 6530 p[0] & 1); 6531 } 6532 6533 rc = sbuf_finish(sb); 6534 sbuf_delete(sb); 6535 free(buf, M_CXGBE); 6536 return (rc); 6537 } 6538 6539 static int 6540 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 6541 { 6542 struct adapter *sc = arg1; 6543 u_int i; 6544 struct sbuf *sb; 6545 uint32_t *buf, *p; 6546 int rc; 6547 6548 rc = sysctl_wire_old_buffer(req, 0); 6549 if (rc != 0) 6550 return (rc); 6551 6552 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6553 if (sb == NULL) 6554 return (ENOMEM); 6555 6556 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 6557 M_ZERO | M_WAITOK); 6558 6559 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 6560 p = buf; 6561 6562 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 6563 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6564 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 6565 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 6566 p[4], p[3], p[2], p[1], p[0]); 6567 } 6568 6569 sbuf_printf(sb, "\n\nCntl ID Data"); 6570 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6571 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 6572 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 6573 } 6574 6575 rc = sbuf_finish(sb); 6576 sbuf_delete(sb); 6577 free(buf, M_CXGBE); 6578 return (rc); 6579 } 6580 6581 static int 6582 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 6583 { 6584 struct adapter *sc = arg1; 6585 struct sbuf *sb; 6586 int rc, i; 6587 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6588 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6589 uint16_t thres[CIM_NUM_IBQ]; 6590 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 6591 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 6592 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 6593 6594 cim_num_obq = sc->chip_params->cim_num_obq; 6595 if (is_t4(sc)) { 6596 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 6597 obq_rdaddr = A_UP_OBQ_0_REALADDR; 6598 } else { 6599 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 6600 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 6601 } 6602 nq = CIM_NUM_IBQ + cim_num_obq; 6603 6604 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 6605 if (rc == 0) 6606 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 6607 if (rc != 0) 6608 return (rc); 6609 6610 t4_read_cimq_cfg(sc, base, size, thres); 6611 6612 rc = sysctl_wire_old_buffer(req, 0); 6613 if (rc != 0) 6614 return (rc); 6615 6616 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6617 if (sb == NULL) 6618 return (ENOMEM); 6619 6620 sbuf_printf(sb, 6621 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 6622 6623 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 6624 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 6625 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 6626 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6627 G_QUEREMFLITS(p[2]) * 16); 6628 for ( ; i < nq; i++, p += 4, wr += 2) 6629 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 6630 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 6631 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6632 G_QUEREMFLITS(p[2]) * 16); 6633 6634 rc = sbuf_finish(sb); 6635 sbuf_delete(sb); 6636 6637 return (rc); 6638 } 6639 6640 static int 6641 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 6642 { 6643 struct adapter *sc = arg1; 6644 struct sbuf *sb; 6645 int rc; 6646 struct tp_cpl_stats stats; 6647 6648 rc = sysctl_wire_old_buffer(req, 0); 6649 if (rc != 0) 6650 return (rc); 6651 6652 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6653 if (sb == NULL) 6654 return (ENOMEM); 6655 6656 mtx_lock(&sc->reg_lock); 6657 t4_tp_get_cpl_stats(sc, &stats, 0); 6658 mtx_unlock(&sc->reg_lock); 6659 6660 if (sc->chip_params->nchan > 2) { 6661 sbuf_printf(sb, " channel 0 channel 1" 6662 " channel 2 channel 3"); 6663 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 6664 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 6665 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 6666 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 6667 } else { 6668 sbuf_printf(sb, " channel 0 channel 1"); 6669 sbuf_printf(sb, "\nCPL requests: %10u %10u", 6670 stats.req[0], stats.req[1]); 6671 sbuf_printf(sb, "\nCPL responses: %10u %10u", 6672 stats.rsp[0], stats.rsp[1]); 6673 } 6674 6675 rc = sbuf_finish(sb); 6676 sbuf_delete(sb); 6677 6678 return (rc); 6679 } 6680 6681 static int 6682 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 6683 { 6684 struct adapter *sc = arg1; 6685 struct sbuf *sb; 6686 int rc; 6687 struct tp_usm_stats stats; 6688 6689 rc = sysctl_wire_old_buffer(req, 0); 6690 if (rc != 0) 6691 return(rc); 6692 6693 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6694 if (sb == NULL) 6695 return (ENOMEM); 6696 6697 t4_get_usm_stats(sc, &stats, 1); 6698 6699 sbuf_printf(sb, "Frames: %u\n", stats.frames); 6700 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 6701 sbuf_printf(sb, "Drops: %u", stats.drops); 6702 6703 rc = sbuf_finish(sb); 6704 sbuf_delete(sb); 6705 6706 return (rc); 6707 } 6708 6709 static const char * const devlog_level_strings[] = { 6710 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 6711 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 6712 [FW_DEVLOG_LEVEL_ERR] = "ERR", 6713 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 6714 [FW_DEVLOG_LEVEL_INFO] = "INFO", 6715 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 6716 }; 6717 6718 static const char * const devlog_facility_strings[] = { 6719 [FW_DEVLOG_FACILITY_CORE] = "CORE", 6720 [FW_DEVLOG_FACILITY_CF] = "CF", 6721 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 6722 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 6723 [FW_DEVLOG_FACILITY_RES] = "RES", 6724 [FW_DEVLOG_FACILITY_HW] = "HW", 6725 [FW_DEVLOG_FACILITY_FLR] = "FLR", 6726 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 6727 [FW_DEVLOG_FACILITY_PHY] = "PHY", 6728 [FW_DEVLOG_FACILITY_MAC] = "MAC", 6729 [FW_DEVLOG_FACILITY_PORT] = "PORT", 6730 [FW_DEVLOG_FACILITY_VI] = "VI", 6731 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 6732 [FW_DEVLOG_FACILITY_ACL] = "ACL", 6733 [FW_DEVLOG_FACILITY_TM] = "TM", 6734 [FW_DEVLOG_FACILITY_QFC] = "QFC", 6735 [FW_DEVLOG_FACILITY_DCB] = "DCB", 6736 [FW_DEVLOG_FACILITY_ETH] = "ETH", 6737 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 6738 [FW_DEVLOG_FACILITY_RI] = "RI", 6739 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 6740 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 6741 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 6742 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 6743 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 6744 }; 6745 6746 static int 6747 sysctl_devlog(SYSCTL_HANDLER_ARGS) 6748 { 6749 struct adapter *sc = arg1; 6750 struct devlog_params *dparams = &sc->params.devlog; 6751 struct fw_devlog_e *buf, *e; 6752 int i, j, rc, nentries, first = 0; 6753 struct sbuf *sb; 6754 uint64_t ftstamp = UINT64_MAX; 6755 6756 if (dparams->addr == 0) 6757 return (ENXIO); 6758 6759 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 6760 if (buf == NULL) 6761 return (ENOMEM); 6762 6763 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 6764 if (rc != 0) 6765 goto done; 6766 6767 nentries = dparams->size / sizeof(struct fw_devlog_e); 6768 for (i = 0; i < nentries; i++) { 6769 e = &buf[i]; 6770 6771 if (e->timestamp == 0) 6772 break; /* end */ 6773 6774 e->timestamp = be64toh(e->timestamp); 6775 e->seqno = be32toh(e->seqno); 6776 for (j = 0; j < 8; j++) 6777 e->params[j] = be32toh(e->params[j]); 6778 6779 if (e->timestamp < ftstamp) { 6780 ftstamp = e->timestamp; 6781 first = i; 6782 } 6783 } 6784 6785 if (buf[first].timestamp == 0) 6786 goto done; /* nothing in the log */ 6787 6788 rc = sysctl_wire_old_buffer(req, 0); 6789 if (rc != 0) 6790 goto done; 6791 6792 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6793 if (sb == NULL) { 6794 rc = ENOMEM; 6795 goto done; 6796 } 6797 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 6798 "Seq#", "Tstamp", "Level", "Facility", "Message"); 6799 6800 i = first; 6801 do { 6802 e = &buf[i]; 6803 if (e->timestamp == 0) 6804 break; /* end */ 6805 6806 sbuf_printf(sb, "%10d %15ju %8s %8s ", 6807 e->seqno, e->timestamp, 6808 (e->level < nitems(devlog_level_strings) ? 6809 devlog_level_strings[e->level] : "UNKNOWN"), 6810 (e->facility < nitems(devlog_facility_strings) ? 6811 devlog_facility_strings[e->facility] : "UNKNOWN")); 6812 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 6813 e->params[2], e->params[3], e->params[4], 6814 e->params[5], e->params[6], e->params[7]); 6815 6816 if (++i == nentries) 6817 i = 0; 6818 } while (i != first); 6819 6820 rc = sbuf_finish(sb); 6821 sbuf_delete(sb); 6822 done: 6823 free(buf, M_CXGBE); 6824 return (rc); 6825 } 6826 6827 static int 6828 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6829 { 6830 struct adapter *sc = arg1; 6831 struct sbuf *sb; 6832 int rc; 6833 struct tp_fcoe_stats stats[MAX_NCHAN]; 6834 int i, nchan = sc->chip_params->nchan; 6835 6836 rc = sysctl_wire_old_buffer(req, 0); 6837 if (rc != 0) 6838 return (rc); 6839 6840 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6841 if (sb == NULL) 6842 return (ENOMEM); 6843 6844 for (i = 0; i < nchan; i++) 6845 t4_get_fcoe_stats(sc, i, &stats[i], 1); 6846 6847 if (nchan > 2) { 6848 sbuf_printf(sb, " channel 0 channel 1" 6849 " channel 2 channel 3"); 6850 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6851 stats[0].octets_ddp, stats[1].octets_ddp, 6852 stats[2].octets_ddp, stats[3].octets_ddp); 6853 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6854 stats[0].frames_ddp, stats[1].frames_ddp, 6855 stats[2].frames_ddp, stats[3].frames_ddp); 6856 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6857 stats[0].frames_drop, stats[1].frames_drop, 6858 stats[2].frames_drop, stats[3].frames_drop); 6859 } else { 6860 sbuf_printf(sb, " channel 0 channel 1"); 6861 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6862 stats[0].octets_ddp, stats[1].octets_ddp); 6863 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6864 stats[0].frames_ddp, stats[1].frames_ddp); 6865 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6866 stats[0].frames_drop, stats[1].frames_drop); 6867 } 6868 6869 rc = sbuf_finish(sb); 6870 sbuf_delete(sb); 6871 6872 return (rc); 6873 } 6874 6875 static int 6876 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6877 { 6878 struct adapter *sc = arg1; 6879 struct sbuf *sb; 6880 int rc, i; 6881 unsigned int map, kbps, ipg, mode; 6882 unsigned int pace_tab[NTX_SCHED]; 6883 6884 rc = sysctl_wire_old_buffer(req, 0); 6885 if (rc != 0) 6886 return (rc); 6887 6888 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6889 if (sb == NULL) 6890 return (ENOMEM); 6891 6892 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6893 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6894 t4_read_pace_tbl(sc, pace_tab); 6895 6896 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6897 "Class IPG (0.1 ns) Flow IPG (us)"); 6898 6899 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6900 t4_get_tx_sched(sc, i, &kbps, &ipg, 1); 6901 sbuf_printf(sb, "\n %u %-5s %u ", i, 6902 (mode & (1 << i)) ? "flow" : "class", map & 3); 6903 if (kbps) 6904 sbuf_printf(sb, "%9u ", kbps); 6905 else 6906 sbuf_printf(sb, " disabled "); 6907 6908 if (ipg) 6909 sbuf_printf(sb, "%13u ", ipg); 6910 else 6911 sbuf_printf(sb, " disabled "); 6912 6913 if (pace_tab[i]) 6914 sbuf_printf(sb, "%10u", pace_tab[i]); 6915 else 6916 sbuf_printf(sb, " disabled"); 6917 } 6918 6919 rc = sbuf_finish(sb); 6920 sbuf_delete(sb); 6921 6922 return (rc); 6923 } 6924 6925 static int 6926 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6927 { 6928 struct adapter *sc = arg1; 6929 struct sbuf *sb; 6930 int rc, i, j; 6931 uint64_t *p0, *p1; 6932 struct lb_port_stats s[2]; 6933 static const char *stat_name[] = { 6934 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6935 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6936 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6937 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6938 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6939 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6940 "BG2FramesTrunc:", "BG3FramesTrunc:" 6941 }; 6942 6943 rc = sysctl_wire_old_buffer(req, 0); 6944 if (rc != 0) 6945 return (rc); 6946 6947 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6948 if (sb == NULL) 6949 return (ENOMEM); 6950 6951 memset(s, 0, sizeof(s)); 6952 6953 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6954 t4_get_lb_stats(sc, i, &s[0]); 6955 t4_get_lb_stats(sc, i + 1, &s[1]); 6956 6957 p0 = &s[0].octets; 6958 p1 = &s[1].octets; 6959 sbuf_printf(sb, "%s Loopback %u" 6960 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6961 6962 for (j = 0; j < nitems(stat_name); j++) 6963 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6964 *p0++, *p1++); 6965 } 6966 6967 rc = sbuf_finish(sb); 6968 sbuf_delete(sb); 6969 6970 return (rc); 6971 } 6972 6973 static int 6974 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6975 { 6976 int rc = 0; 6977 struct port_info *pi = arg1; 6978 struct link_config *lc = &pi->link_cfg; 6979 struct sbuf *sb; 6980 6981 rc = sysctl_wire_old_buffer(req, 0); 6982 if (rc != 0) 6983 return(rc); 6984 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6985 if (sb == NULL) 6986 return (ENOMEM); 6987 6988 if (lc->link_ok || lc->link_down_rc == 255) 6989 sbuf_printf(sb, "n/a"); 6990 else 6991 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); 6992 6993 rc = sbuf_finish(sb); 6994 sbuf_delete(sb); 6995 6996 return (rc); 6997 } 6998 6999 struct mem_desc { 7000 unsigned int base; 7001 unsigned int limit; 7002 unsigned int idx; 7003 }; 7004 7005 static int 7006 mem_desc_cmp(const void *a, const void *b) 7007 { 7008 return ((const struct mem_desc *)a)->base - 7009 ((const struct mem_desc *)b)->base; 7010 } 7011 7012 static void 7013 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 7014 unsigned int to) 7015 { 7016 unsigned int size; 7017 7018 if (from == to) 7019 return; 7020 7021 size = to - from + 1; 7022 if (size == 0) 7023 return; 7024 7025 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 7026 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 7027 } 7028 7029 static int 7030 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 7031 { 7032 struct adapter *sc = arg1; 7033 struct sbuf *sb; 7034 int rc, i, n; 7035 uint32_t lo, hi, used, alloc; 7036 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 7037 static const char *region[] = { 7038 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 7039 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 7040 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 7041 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 7042 "RQUDP region:", "PBL region:", "TXPBL region:", 7043 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 7044 "On-chip queues:", "TLS keys:", 7045 }; 7046 struct mem_desc avail[4]; 7047 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 7048 struct mem_desc *md = mem; 7049 7050 rc = sysctl_wire_old_buffer(req, 0); 7051 if (rc != 0) 7052 return (rc); 7053 7054 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7055 if (sb == NULL) 7056 return (ENOMEM); 7057 7058 for (i = 0; i < nitems(mem); i++) { 7059 mem[i].limit = 0; 7060 mem[i].idx = i; 7061 } 7062 7063 /* Find and sort the populated memory ranges */ 7064 i = 0; 7065 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 7066 if (lo & F_EDRAM0_ENABLE) { 7067 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 7068 avail[i].base = G_EDRAM0_BASE(hi) << 20; 7069 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 7070 avail[i].idx = 0; 7071 i++; 7072 } 7073 if (lo & F_EDRAM1_ENABLE) { 7074 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 7075 avail[i].base = G_EDRAM1_BASE(hi) << 20; 7076 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 7077 avail[i].idx = 1; 7078 i++; 7079 } 7080 if (lo & F_EXT_MEM_ENABLE) { 7081 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 7082 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 7083 avail[i].limit = avail[i].base + 7084 (G_EXT_MEM_SIZE(hi) << 20); 7085 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 7086 i++; 7087 } 7088 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 7089 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 7090 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 7091 avail[i].limit = avail[i].base + 7092 (G_EXT_MEM1_SIZE(hi) << 20); 7093 avail[i].idx = 4; 7094 i++; 7095 } 7096 if (!i) /* no memory available */ 7097 return 0; 7098 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 7099 7100 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 7101 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 7102 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 7103 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 7104 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 7105 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 7106 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 7107 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 7108 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 7109 7110 /* the next few have explicit upper bounds */ 7111 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 7112 md->limit = md->base - 1 + 7113 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 7114 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 7115 md++; 7116 7117 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 7118 md->limit = md->base - 1 + 7119 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 7120 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 7121 md++; 7122 7123 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 7124 if (chip_id(sc) <= CHELSIO_T5) 7125 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 7126 else 7127 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 7128 md->limit = 0; 7129 } else { 7130 md->base = 0; 7131 md->idx = nitems(region); /* hide it */ 7132 } 7133 md++; 7134 7135 #define ulp_region(reg) \ 7136 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 7137 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 7138 7139 ulp_region(RX_ISCSI); 7140 ulp_region(RX_TDDP); 7141 ulp_region(TX_TPT); 7142 ulp_region(RX_STAG); 7143 ulp_region(RX_RQ); 7144 ulp_region(RX_RQUDP); 7145 ulp_region(RX_PBL); 7146 ulp_region(TX_PBL); 7147 #undef ulp_region 7148 7149 md->base = 0; 7150 md->idx = nitems(region); 7151 if (!is_t4(sc)) { 7152 uint32_t size = 0; 7153 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 7154 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 7155 7156 if (is_t5(sc)) { 7157 if (sge_ctrl & F_VFIFO_ENABLE) 7158 size = G_DBVFIFO_SIZE(fifo_size); 7159 } else 7160 size = G_T6_DBVFIFO_SIZE(fifo_size); 7161 7162 if (size) { 7163 md->base = G_BASEADDR(t4_read_reg(sc, 7164 A_SGE_DBVFIFO_BADDR)); 7165 md->limit = md->base + (size << 2) - 1; 7166 } 7167 } 7168 md++; 7169 7170 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 7171 md->limit = 0; 7172 md++; 7173 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 7174 md->limit = 0; 7175 md++; 7176 7177 md->base = sc->vres.ocq.start; 7178 if (sc->vres.ocq.size) 7179 md->limit = md->base + sc->vres.ocq.size - 1; 7180 else 7181 md->idx = nitems(region); /* hide it */ 7182 md++; 7183 7184 md->base = sc->vres.key.start; 7185 if (sc->vres.key.size) 7186 md->limit = md->base + sc->vres.key.size - 1; 7187 else 7188 md->idx = nitems(region); /* hide it */ 7189 md++; 7190 7191 /* add any address-space holes, there can be up to 3 */ 7192 for (n = 0; n < i - 1; n++) 7193 if (avail[n].limit < avail[n + 1].base) 7194 (md++)->base = avail[n].limit; 7195 if (avail[n].limit) 7196 (md++)->base = avail[n].limit; 7197 7198 n = md - mem; 7199 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 7200 7201 for (lo = 0; lo < i; lo++) 7202 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 7203 avail[lo].limit - 1); 7204 7205 sbuf_printf(sb, "\n"); 7206 for (i = 0; i < n; i++) { 7207 if (mem[i].idx >= nitems(region)) 7208 continue; /* skip holes */ 7209 if (!mem[i].limit) 7210 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 7211 mem_region_show(sb, region[mem[i].idx], mem[i].base, 7212 mem[i].limit); 7213 } 7214 7215 sbuf_printf(sb, "\n"); 7216 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 7217 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 7218 mem_region_show(sb, "uP RAM:", lo, hi); 7219 7220 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 7221 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 7222 mem_region_show(sb, "uP Extmem2:", lo, hi); 7223 7224 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 7225 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 7226 G_PMRXMAXPAGE(lo), 7227 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 7228 (lo & F_PMRXNUMCHN) ? 2 : 1); 7229 7230 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 7231 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 7232 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 7233 G_PMTXMAXPAGE(lo), 7234 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 7235 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 7236 sbuf_printf(sb, "%u p-structs\n", 7237 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 7238 7239 for (i = 0; i < 4; i++) { 7240 if (chip_id(sc) > CHELSIO_T5) 7241 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 7242 else 7243 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 7244 if (is_t5(sc)) { 7245 used = G_T5_USED(lo); 7246 alloc = G_T5_ALLOC(lo); 7247 } else { 7248 used = G_USED(lo); 7249 alloc = G_ALLOC(lo); 7250 } 7251 /* For T6 these are MAC buffer groups */ 7252 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 7253 i, used, alloc); 7254 } 7255 for (i = 0; i < sc->chip_params->nchan; i++) { 7256 if (chip_id(sc) > CHELSIO_T5) 7257 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 7258 else 7259 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 7260 if (is_t5(sc)) { 7261 used = G_T5_USED(lo); 7262 alloc = G_T5_ALLOC(lo); 7263 } else { 7264 used = G_USED(lo); 7265 alloc = G_ALLOC(lo); 7266 } 7267 /* For T6 these are MAC buffer groups */ 7268 sbuf_printf(sb, 7269 "\nLoopback %d using %u pages out of %u allocated", 7270 i, used, alloc); 7271 } 7272 7273 rc = sbuf_finish(sb); 7274 sbuf_delete(sb); 7275 7276 return (rc); 7277 } 7278 7279 static inline void 7280 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 7281 { 7282 *mask = x | y; 7283 y = htobe64(y); 7284 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 7285 } 7286 7287 static int 7288 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 7289 { 7290 struct adapter *sc = arg1; 7291 struct sbuf *sb; 7292 int rc, i; 7293 7294 MPASS(chip_id(sc) <= CHELSIO_T5); 7295 7296 rc = sysctl_wire_old_buffer(req, 0); 7297 if (rc != 0) 7298 return (rc); 7299 7300 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7301 if (sb == NULL) 7302 return (ENOMEM); 7303 7304 sbuf_printf(sb, 7305 "Idx Ethernet address Mask Vld Ports PF" 7306 " VF Replication P0 P1 P2 P3 ML"); 7307 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 7308 uint64_t tcamx, tcamy, mask; 7309 uint32_t cls_lo, cls_hi; 7310 uint8_t addr[ETHER_ADDR_LEN]; 7311 7312 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 7313 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 7314 if (tcamx & tcamy) 7315 continue; 7316 tcamxy2valmask(tcamx, tcamy, addr, &mask); 7317 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 7318 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 7319 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 7320 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 7321 addr[3], addr[4], addr[5], (uintmax_t)mask, 7322 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 7323 G_PORTMAP(cls_hi), G_PF(cls_lo), 7324 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 7325 7326 if (cls_lo & F_REPLICATE) { 7327 struct fw_ldst_cmd ldst_cmd; 7328 7329 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 7330 ldst_cmd.op_to_addrspace = 7331 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 7332 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7333 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 7334 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 7335 ldst_cmd.u.mps.rplc.fid_idx = 7336 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 7337 V_FW_LDST_CMD_IDX(i)); 7338 7339 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 7340 "t4mps"); 7341 if (rc) 7342 break; 7343 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 7344 sizeof(ldst_cmd), &ldst_cmd); 7345 end_synchronized_op(sc, 0); 7346 7347 if (rc != 0) { 7348 sbuf_printf(sb, "%36d", rc); 7349 rc = 0; 7350 } else { 7351 sbuf_printf(sb, " %08x %08x %08x %08x", 7352 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7353 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7354 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7355 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7356 } 7357 } else 7358 sbuf_printf(sb, "%36s", ""); 7359 7360 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 7361 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 7362 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 7363 } 7364 7365 if (rc) 7366 (void) sbuf_finish(sb); 7367 else 7368 rc = sbuf_finish(sb); 7369 sbuf_delete(sb); 7370 7371 return (rc); 7372 } 7373 7374 static int 7375 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 7376 { 7377 struct adapter *sc = arg1; 7378 struct sbuf *sb; 7379 int rc, i; 7380 7381 MPASS(chip_id(sc) > CHELSIO_T5); 7382 7383 rc = sysctl_wire_old_buffer(req, 0); 7384 if (rc != 0) 7385 return (rc); 7386 7387 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7388 if (sb == NULL) 7389 return (ENOMEM); 7390 7391 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 7392 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 7393 " Replication" 7394 " P0 P1 P2 P3 ML\n"); 7395 7396 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 7397 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 7398 uint16_t ivlan; 7399 uint64_t tcamx, tcamy, val, mask; 7400 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 7401 uint8_t addr[ETHER_ADDR_LEN]; 7402 7403 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 7404 if (i < 256) 7405 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 7406 else 7407 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 7408 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 7409 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 7410 tcamy = G_DMACH(val) << 32; 7411 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 7412 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 7413 lookup_type = G_DATALKPTYPE(data2); 7414 port_num = G_DATAPORTNUM(data2); 7415 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7416 /* Inner header VNI */ 7417 vniy = ((data2 & F_DATAVIDH2) << 23) | 7418 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 7419 dip_hit = data2 & F_DATADIPHIT; 7420 vlan_vld = 0; 7421 } else { 7422 vniy = 0; 7423 dip_hit = 0; 7424 vlan_vld = data2 & F_DATAVIDH2; 7425 ivlan = G_VIDL(val); 7426 } 7427 7428 ctl |= V_CTLXYBITSEL(1); 7429 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 7430 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 7431 tcamx = G_DMACH(val) << 32; 7432 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 7433 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 7434 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7435 /* Inner header VNI mask */ 7436 vnix = ((data2 & F_DATAVIDH2) << 23) | 7437 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 7438 } else 7439 vnix = 0; 7440 7441 if (tcamx & tcamy) 7442 continue; 7443 tcamxy2valmask(tcamx, tcamy, addr, &mask); 7444 7445 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 7446 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 7447 7448 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7449 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 7450 "%012jx %06x %06x - - %3c" 7451 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 7452 addr[1], addr[2], addr[3], addr[4], addr[5], 7453 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 7454 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 7455 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 7456 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 7457 } else { 7458 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 7459 "%012jx - - ", i, addr[0], addr[1], 7460 addr[2], addr[3], addr[4], addr[5], 7461 (uintmax_t)mask); 7462 7463 if (vlan_vld) 7464 sbuf_printf(sb, "%4u Y ", ivlan); 7465 else 7466 sbuf_printf(sb, " - N "); 7467 7468 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 7469 lookup_type ? 'I' : 'O', port_num, 7470 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 7471 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 7472 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 7473 } 7474 7475 7476 if (cls_lo & F_T6_REPLICATE) { 7477 struct fw_ldst_cmd ldst_cmd; 7478 7479 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 7480 ldst_cmd.op_to_addrspace = 7481 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 7482 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7483 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 7484 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 7485 ldst_cmd.u.mps.rplc.fid_idx = 7486 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 7487 V_FW_LDST_CMD_IDX(i)); 7488 7489 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 7490 "t6mps"); 7491 if (rc) 7492 break; 7493 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 7494 sizeof(ldst_cmd), &ldst_cmd); 7495 end_synchronized_op(sc, 0); 7496 7497 if (rc != 0) { 7498 sbuf_printf(sb, "%72d", rc); 7499 rc = 0; 7500 } else { 7501 sbuf_printf(sb, " %08x %08x %08x %08x" 7502 " %08x %08x %08x %08x", 7503 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 7504 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 7505 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 7506 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 7507 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7508 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7509 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7510 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7511 } 7512 } else 7513 sbuf_printf(sb, "%72s", ""); 7514 7515 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 7516 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 7517 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 7518 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 7519 } 7520 7521 if (rc) 7522 (void) sbuf_finish(sb); 7523 else 7524 rc = sbuf_finish(sb); 7525 sbuf_delete(sb); 7526 7527 return (rc); 7528 } 7529 7530 static int 7531 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 7532 { 7533 struct adapter *sc = arg1; 7534 struct sbuf *sb; 7535 int rc; 7536 uint16_t mtus[NMTUS]; 7537 7538 rc = sysctl_wire_old_buffer(req, 0); 7539 if (rc != 0) 7540 return (rc); 7541 7542 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7543 if (sb == NULL) 7544 return (ENOMEM); 7545 7546 t4_read_mtu_tbl(sc, mtus, NULL); 7547 7548 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 7549 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 7550 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 7551 mtus[14], mtus[15]); 7552 7553 rc = sbuf_finish(sb); 7554 sbuf_delete(sb); 7555 7556 return (rc); 7557 } 7558 7559 static int 7560 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 7561 { 7562 struct adapter *sc = arg1; 7563 struct sbuf *sb; 7564 int rc, i; 7565 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 7566 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 7567 static const char *tx_stats[MAX_PM_NSTATS] = { 7568 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 7569 "Tx FIFO wait", NULL, "Tx latency" 7570 }; 7571 static const char *rx_stats[MAX_PM_NSTATS] = { 7572 "Read:", "Write bypass:", "Write mem:", "Flush:", 7573 "Rx FIFO wait", NULL, "Rx latency" 7574 }; 7575 7576 rc = sysctl_wire_old_buffer(req, 0); 7577 if (rc != 0) 7578 return (rc); 7579 7580 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7581 if (sb == NULL) 7582 return (ENOMEM); 7583 7584 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 7585 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 7586 7587 sbuf_printf(sb, " Tx pcmds Tx bytes"); 7588 for (i = 0; i < 4; i++) { 7589 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7590 tx_cyc[i]); 7591 } 7592 7593 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 7594 for (i = 0; i < 4; i++) { 7595 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7596 rx_cyc[i]); 7597 } 7598 7599 if (chip_id(sc) > CHELSIO_T5) { 7600 sbuf_printf(sb, 7601 "\n Total wait Total occupancy"); 7602 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7603 tx_cyc[i]); 7604 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7605 rx_cyc[i]); 7606 7607 i += 2; 7608 MPASS(i < nitems(tx_stats)); 7609 7610 sbuf_printf(sb, 7611 "\n Reads Total wait"); 7612 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7613 tx_cyc[i]); 7614 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7615 rx_cyc[i]); 7616 } 7617 7618 rc = sbuf_finish(sb); 7619 sbuf_delete(sb); 7620 7621 return (rc); 7622 } 7623 7624 static int 7625 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 7626 { 7627 struct adapter *sc = arg1; 7628 struct sbuf *sb; 7629 int rc; 7630 struct tp_rdma_stats stats; 7631 7632 rc = sysctl_wire_old_buffer(req, 0); 7633 if (rc != 0) 7634 return (rc); 7635 7636 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7637 if (sb == NULL) 7638 return (ENOMEM); 7639 7640 mtx_lock(&sc->reg_lock); 7641 t4_tp_get_rdma_stats(sc, &stats, 0); 7642 mtx_unlock(&sc->reg_lock); 7643 7644 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 7645 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 7646 7647 rc = sbuf_finish(sb); 7648 sbuf_delete(sb); 7649 7650 return (rc); 7651 } 7652 7653 static int 7654 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 7655 { 7656 struct adapter *sc = arg1; 7657 struct sbuf *sb; 7658 int rc; 7659 struct tp_tcp_stats v4, v6; 7660 7661 rc = sysctl_wire_old_buffer(req, 0); 7662 if (rc != 0) 7663 return (rc); 7664 7665 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7666 if (sb == NULL) 7667 return (ENOMEM); 7668 7669 mtx_lock(&sc->reg_lock); 7670 t4_tp_get_tcp_stats(sc, &v4, &v6, 0); 7671 mtx_unlock(&sc->reg_lock); 7672 7673 sbuf_printf(sb, 7674 " IP IPv6\n"); 7675 sbuf_printf(sb, "OutRsts: %20u %20u\n", 7676 v4.tcp_out_rsts, v6.tcp_out_rsts); 7677 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 7678 v4.tcp_in_segs, v6.tcp_in_segs); 7679 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 7680 v4.tcp_out_segs, v6.tcp_out_segs); 7681 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 7682 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 7683 7684 rc = sbuf_finish(sb); 7685 sbuf_delete(sb); 7686 7687 return (rc); 7688 } 7689 7690 static int 7691 sysctl_tids(SYSCTL_HANDLER_ARGS) 7692 { 7693 struct adapter *sc = arg1; 7694 struct sbuf *sb; 7695 int rc; 7696 struct tid_info *t = &sc->tids; 7697 7698 rc = sysctl_wire_old_buffer(req, 0); 7699 if (rc != 0) 7700 return (rc); 7701 7702 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7703 if (sb == NULL) 7704 return (ENOMEM); 7705 7706 if (t->natids) { 7707 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 7708 t->atids_in_use); 7709 } 7710 7711 if (t->ntids) { 7712 sbuf_printf(sb, "TID range: "); 7713 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 7714 uint32_t b, hb; 7715 7716 if (chip_id(sc) <= CHELSIO_T5) { 7717 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 7718 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 7719 } else { 7720 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); 7721 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); 7722 } 7723 7724 if (b) 7725 sbuf_printf(sb, "0-%u, ", b - 1); 7726 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1); 7727 } else 7728 sbuf_printf(sb, "0-%u", t->ntids - 1); 7729 sbuf_printf(sb, ", in use: %u\n", 7730 atomic_load_acq_int(&t->tids_in_use)); 7731 } 7732 7733 if (t->nstids) { 7734 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 7735 t->stid_base + t->nstids - 1, t->stids_in_use); 7736 } 7737 7738 if (t->nftids) { 7739 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 7740 t->ftid_base + t->nftids - 1); 7741 } 7742 7743 if (t->netids) { 7744 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 7745 t->etid_base + t->netids - 1); 7746 } 7747 7748 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 7749 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 7750 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 7751 7752 rc = sbuf_finish(sb); 7753 sbuf_delete(sb); 7754 7755 return (rc); 7756 } 7757 7758 static int 7759 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 7760 { 7761 struct adapter *sc = arg1; 7762 struct sbuf *sb; 7763 int rc; 7764 struct tp_err_stats stats; 7765 7766 rc = sysctl_wire_old_buffer(req, 0); 7767 if (rc != 0) 7768 return (rc); 7769 7770 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7771 if (sb == NULL) 7772 return (ENOMEM); 7773 7774 mtx_lock(&sc->reg_lock); 7775 t4_tp_get_err_stats(sc, &stats, 0); 7776 mtx_unlock(&sc->reg_lock); 7777 7778 if (sc->chip_params->nchan > 2) { 7779 sbuf_printf(sb, " channel 0 channel 1" 7780 " channel 2 channel 3\n"); 7781 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 7782 stats.mac_in_errs[0], stats.mac_in_errs[1], 7783 stats.mac_in_errs[2], stats.mac_in_errs[3]); 7784 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 7785 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 7786 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 7787 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 7788 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 7789 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 7790 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 7791 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 7792 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 7793 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 7794 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 7795 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 7796 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 7797 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 7798 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 7799 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 7800 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 7801 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 7802 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 7803 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 7804 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 7805 } else { 7806 sbuf_printf(sb, " channel 0 channel 1\n"); 7807 sbuf_printf(sb, "macInErrs: %10u %10u\n", 7808 stats.mac_in_errs[0], stats.mac_in_errs[1]); 7809 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 7810 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 7811 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 7812 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 7813 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 7814 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 7815 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 7816 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 7817 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 7818 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 7819 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 7820 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 7821 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 7822 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 7823 } 7824 7825 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 7826 stats.ofld_no_neigh, stats.ofld_cong_defer); 7827 7828 rc = sbuf_finish(sb); 7829 sbuf_delete(sb); 7830 7831 return (rc); 7832 } 7833 7834 static int 7835 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 7836 { 7837 struct adapter *sc = arg1; 7838 struct tp_params *tpp = &sc->params.tp; 7839 u_int mask; 7840 int rc; 7841 7842 mask = tpp->la_mask >> 16; 7843 rc = sysctl_handle_int(oidp, &mask, 0, req); 7844 if (rc != 0 || req->newptr == NULL) 7845 return (rc); 7846 if (mask > 0xffff) 7847 return (EINVAL); 7848 tpp->la_mask = mask << 16; 7849 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7850 7851 return (0); 7852 } 7853 7854 struct field_desc { 7855 const char *name; 7856 u_int start; 7857 u_int width; 7858 }; 7859 7860 static void 7861 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7862 { 7863 char buf[32]; 7864 int line_size = 0; 7865 7866 while (f->name) { 7867 uint64_t mask = (1ULL << f->width) - 1; 7868 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7869 ((uintmax_t)v >> f->start) & mask); 7870 7871 if (line_size + len >= 79) { 7872 line_size = 8; 7873 sbuf_printf(sb, "\n "); 7874 } 7875 sbuf_printf(sb, "%s ", buf); 7876 line_size += len + 1; 7877 f++; 7878 } 7879 sbuf_printf(sb, "\n"); 7880 } 7881 7882 static const struct field_desc tp_la0[] = { 7883 { "RcfOpCodeOut", 60, 4 }, 7884 { "State", 56, 4 }, 7885 { "WcfState", 52, 4 }, 7886 { "RcfOpcSrcOut", 50, 2 }, 7887 { "CRxError", 49, 1 }, 7888 { "ERxError", 48, 1 }, 7889 { "SanityFailed", 47, 1 }, 7890 { "SpuriousMsg", 46, 1 }, 7891 { "FlushInputMsg", 45, 1 }, 7892 { "FlushInputCpl", 44, 1 }, 7893 { "RssUpBit", 43, 1 }, 7894 { "RssFilterHit", 42, 1 }, 7895 { "Tid", 32, 10 }, 7896 { "InitTcb", 31, 1 }, 7897 { "LineNumber", 24, 7 }, 7898 { "Emsg", 23, 1 }, 7899 { "EdataOut", 22, 1 }, 7900 { "Cmsg", 21, 1 }, 7901 { "CdataOut", 20, 1 }, 7902 { "EreadPdu", 19, 1 }, 7903 { "CreadPdu", 18, 1 }, 7904 { "TunnelPkt", 17, 1 }, 7905 { "RcfPeerFin", 16, 1 }, 7906 { "RcfReasonOut", 12, 4 }, 7907 { "TxCchannel", 10, 2 }, 7908 { "RcfTxChannel", 8, 2 }, 7909 { "RxEchannel", 6, 2 }, 7910 { "RcfRxChannel", 5, 1 }, 7911 { "RcfDataOutSrdy", 4, 1 }, 7912 { "RxDvld", 3, 1 }, 7913 { "RxOoDvld", 2, 1 }, 7914 { "RxCongestion", 1, 1 }, 7915 { "TxCongestion", 0, 1 }, 7916 { NULL } 7917 }; 7918 7919 static const struct field_desc tp_la1[] = { 7920 { "CplCmdIn", 56, 8 }, 7921 { "CplCmdOut", 48, 8 }, 7922 { "ESynOut", 47, 1 }, 7923 { "EAckOut", 46, 1 }, 7924 { "EFinOut", 45, 1 }, 7925 { "ERstOut", 44, 1 }, 7926 { "SynIn", 43, 1 }, 7927 { "AckIn", 42, 1 }, 7928 { "FinIn", 41, 1 }, 7929 { "RstIn", 40, 1 }, 7930 { "DataIn", 39, 1 }, 7931 { "DataInVld", 38, 1 }, 7932 { "PadIn", 37, 1 }, 7933 { "RxBufEmpty", 36, 1 }, 7934 { "RxDdp", 35, 1 }, 7935 { "RxFbCongestion", 34, 1 }, 7936 { "TxFbCongestion", 33, 1 }, 7937 { "TxPktSumSrdy", 32, 1 }, 7938 { "RcfUlpType", 28, 4 }, 7939 { "Eread", 27, 1 }, 7940 { "Ebypass", 26, 1 }, 7941 { "Esave", 25, 1 }, 7942 { "Static0", 24, 1 }, 7943 { "Cread", 23, 1 }, 7944 { "Cbypass", 22, 1 }, 7945 { "Csave", 21, 1 }, 7946 { "CPktOut", 20, 1 }, 7947 { "RxPagePoolFull", 18, 2 }, 7948 { "RxLpbkPkt", 17, 1 }, 7949 { "TxLpbkPkt", 16, 1 }, 7950 { "RxVfValid", 15, 1 }, 7951 { "SynLearned", 14, 1 }, 7952 { "SetDelEntry", 13, 1 }, 7953 { "SetInvEntry", 12, 1 }, 7954 { "CpcmdDvld", 11, 1 }, 7955 { "CpcmdSave", 10, 1 }, 7956 { "RxPstructsFull", 8, 2 }, 7957 { "EpcmdDvld", 7, 1 }, 7958 { "EpcmdFlush", 6, 1 }, 7959 { "EpcmdTrimPrefix", 5, 1 }, 7960 { "EpcmdTrimPostfix", 4, 1 }, 7961 { "ERssIp4Pkt", 3, 1 }, 7962 { "ERssIp6Pkt", 2, 1 }, 7963 { "ERssTcpUdpPkt", 1, 1 }, 7964 { "ERssFceFipPkt", 0, 1 }, 7965 { NULL } 7966 }; 7967 7968 static const struct field_desc tp_la2[] = { 7969 { "CplCmdIn", 56, 8 }, 7970 { "MpsVfVld", 55, 1 }, 7971 { "MpsPf", 52, 3 }, 7972 { "MpsVf", 44, 8 }, 7973 { "SynIn", 43, 1 }, 7974 { "AckIn", 42, 1 }, 7975 { "FinIn", 41, 1 }, 7976 { "RstIn", 40, 1 }, 7977 { "DataIn", 39, 1 }, 7978 { "DataInVld", 38, 1 }, 7979 { "PadIn", 37, 1 }, 7980 { "RxBufEmpty", 36, 1 }, 7981 { "RxDdp", 35, 1 }, 7982 { "RxFbCongestion", 34, 1 }, 7983 { "TxFbCongestion", 33, 1 }, 7984 { "TxPktSumSrdy", 32, 1 }, 7985 { "RcfUlpType", 28, 4 }, 7986 { "Eread", 27, 1 }, 7987 { "Ebypass", 26, 1 }, 7988 { "Esave", 25, 1 }, 7989 { "Static0", 24, 1 }, 7990 { "Cread", 23, 1 }, 7991 { "Cbypass", 22, 1 }, 7992 { "Csave", 21, 1 }, 7993 { "CPktOut", 20, 1 }, 7994 { "RxPagePoolFull", 18, 2 }, 7995 { "RxLpbkPkt", 17, 1 }, 7996 { "TxLpbkPkt", 16, 1 }, 7997 { "RxVfValid", 15, 1 }, 7998 { "SynLearned", 14, 1 }, 7999 { "SetDelEntry", 13, 1 }, 8000 { "SetInvEntry", 12, 1 }, 8001 { "CpcmdDvld", 11, 1 }, 8002 { "CpcmdSave", 10, 1 }, 8003 { "RxPstructsFull", 8, 2 }, 8004 { "EpcmdDvld", 7, 1 }, 8005 { "EpcmdFlush", 6, 1 }, 8006 { "EpcmdTrimPrefix", 5, 1 }, 8007 { "EpcmdTrimPostfix", 4, 1 }, 8008 { "ERssIp4Pkt", 3, 1 }, 8009 { "ERssIp6Pkt", 2, 1 }, 8010 { "ERssTcpUdpPkt", 1, 1 }, 8011 { "ERssFceFipPkt", 0, 1 }, 8012 { NULL } 8013 }; 8014 8015 static void 8016 tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 8017 { 8018 8019 field_desc_show(sb, *p, tp_la0); 8020 } 8021 8022 static void 8023 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 8024 { 8025 8026 if (idx) 8027 sbuf_printf(sb, "\n"); 8028 field_desc_show(sb, p[0], tp_la0); 8029 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 8030 field_desc_show(sb, p[1], tp_la0); 8031 } 8032 8033 static void 8034 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 8035 { 8036 8037 if (idx) 8038 sbuf_printf(sb, "\n"); 8039 field_desc_show(sb, p[0], tp_la0); 8040 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 8041 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 8042 } 8043 8044 static int 8045 sysctl_tp_la(SYSCTL_HANDLER_ARGS) 8046 { 8047 struct adapter *sc = arg1; 8048 struct sbuf *sb; 8049 uint64_t *buf, *p; 8050 int rc; 8051 u_int i, inc; 8052 void (*show_func)(struct sbuf *, uint64_t *, int); 8053 8054 rc = sysctl_wire_old_buffer(req, 0); 8055 if (rc != 0) 8056 return (rc); 8057 8058 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8059 if (sb == NULL) 8060 return (ENOMEM); 8061 8062 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 8063 8064 t4_tp_read_la(sc, buf, NULL); 8065 p = buf; 8066 8067 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 8068 case 2: 8069 inc = 2; 8070 show_func = tp_la_show2; 8071 break; 8072 case 3: 8073 inc = 2; 8074 show_func = tp_la_show3; 8075 break; 8076 default: 8077 inc = 1; 8078 show_func = tp_la_show; 8079 } 8080 8081 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 8082 (*show_func)(sb, p, i); 8083 8084 rc = sbuf_finish(sb); 8085 sbuf_delete(sb); 8086 free(buf, M_CXGBE); 8087 return (rc); 8088 } 8089 8090 static int 8091 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 8092 { 8093 struct adapter *sc = arg1; 8094 struct sbuf *sb; 8095 int rc; 8096 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 8097 8098 rc = sysctl_wire_old_buffer(req, 0); 8099 if (rc != 0) 8100 return (rc); 8101 8102 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 8103 if (sb == NULL) 8104 return (ENOMEM); 8105 8106 t4_get_chan_txrate(sc, nrate, orate); 8107 8108 if (sc->chip_params->nchan > 2) { 8109 sbuf_printf(sb, " channel 0 channel 1" 8110 " channel 2 channel 3\n"); 8111 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 8112 nrate[0], nrate[1], nrate[2], nrate[3]); 8113 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 8114 orate[0], orate[1], orate[2], orate[3]); 8115 } else { 8116 sbuf_printf(sb, " channel 0 channel 1\n"); 8117 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 8118 nrate[0], nrate[1]); 8119 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 8120 orate[0], orate[1]); 8121 } 8122 8123 rc = sbuf_finish(sb); 8124 sbuf_delete(sb); 8125 8126 return (rc); 8127 } 8128 8129 static int 8130 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 8131 { 8132 struct adapter *sc = arg1; 8133 struct sbuf *sb; 8134 uint32_t *buf, *p; 8135 int rc, i; 8136 8137 rc = sysctl_wire_old_buffer(req, 0); 8138 if (rc != 0) 8139 return (rc); 8140 8141 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8142 if (sb == NULL) 8143 return (ENOMEM); 8144 8145 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 8146 M_ZERO | M_WAITOK); 8147 8148 t4_ulprx_read_la(sc, buf); 8149 p = buf; 8150 8151 sbuf_printf(sb, " Pcmd Type Message" 8152 " Data"); 8153 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 8154 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 8155 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 8156 } 8157 8158 rc = sbuf_finish(sb); 8159 sbuf_delete(sb); 8160 free(buf, M_CXGBE); 8161 return (rc); 8162 } 8163 8164 static int 8165 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 8166 { 8167 struct adapter *sc = arg1; 8168 struct sbuf *sb; 8169 int rc, v; 8170 8171 MPASS(chip_id(sc) >= CHELSIO_T5); 8172 8173 rc = sysctl_wire_old_buffer(req, 0); 8174 if (rc != 0) 8175 return (rc); 8176 8177 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8178 if (sb == NULL) 8179 return (ENOMEM); 8180 8181 v = t4_read_reg(sc, A_SGE_STAT_CFG); 8182 if (G_STATSOURCE_T5(v) == 7) { 8183 int mode; 8184 8185 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v); 8186 if (mode == 0) { 8187 sbuf_printf(sb, "total %d, incomplete %d", 8188 t4_read_reg(sc, A_SGE_STAT_TOTAL), 8189 t4_read_reg(sc, A_SGE_STAT_MATCH)); 8190 } else if (mode == 1) { 8191 sbuf_printf(sb, "total %d, data overflow %d", 8192 t4_read_reg(sc, A_SGE_STAT_TOTAL), 8193 t4_read_reg(sc, A_SGE_STAT_MATCH)); 8194 } else { 8195 sbuf_printf(sb, "unknown mode %d", mode); 8196 } 8197 } 8198 rc = sbuf_finish(sb); 8199 sbuf_delete(sb); 8200 8201 return (rc); 8202 } 8203 8204 static int 8205 sysctl_tc_params(SYSCTL_HANDLER_ARGS) 8206 { 8207 struct adapter *sc = arg1; 8208 struct tx_cl_rl_params tc; 8209 struct sbuf *sb; 8210 int i, rc, port_id, mbps, gbps; 8211 8212 rc = sysctl_wire_old_buffer(req, 0); 8213 if (rc != 0) 8214 return (rc); 8215 8216 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8217 if (sb == NULL) 8218 return (ENOMEM); 8219 8220 port_id = arg2 >> 16; 8221 MPASS(port_id < sc->params.nports); 8222 MPASS(sc->port[port_id] != NULL); 8223 i = arg2 & 0xffff; 8224 MPASS(i < sc->chip_params->nsched_cls); 8225 8226 mtx_lock(&sc->tc_lock); 8227 tc = sc->port[port_id]->sched_params->cl_rl[i]; 8228 mtx_unlock(&sc->tc_lock); 8229 8230 if (tc.flags & TX_CLRL_ERROR) { 8231 sbuf_printf(sb, "error"); 8232 goto done; 8233 } 8234 8235 if (tc.ratemode == SCHED_CLASS_RATEMODE_REL) { 8236 /* XXX: top speed or actual link speed? */ 8237 gbps = port_top_speed(sc->port[port_id]); 8238 sbuf_printf(sb, " %u%% of %uGbps", tc.maxrate, gbps); 8239 } else if (tc.ratemode == SCHED_CLASS_RATEMODE_ABS) { 8240 switch (tc.rateunit) { 8241 case SCHED_CLASS_RATEUNIT_BITS: 8242 mbps = tc.maxrate / 1000; 8243 gbps = tc.maxrate / 1000000; 8244 if (tc.maxrate == gbps * 1000000) 8245 sbuf_printf(sb, " %uGbps", gbps); 8246 else if (tc.maxrate == mbps * 1000) 8247 sbuf_printf(sb, " %uMbps", mbps); 8248 else 8249 sbuf_printf(sb, " %uKbps", tc.maxrate); 8250 break; 8251 case SCHED_CLASS_RATEUNIT_PKTS: 8252 sbuf_printf(sb, " %upps", tc.maxrate); 8253 break; 8254 default: 8255 rc = ENXIO; 8256 goto done; 8257 } 8258 } 8259 8260 switch (tc.mode) { 8261 case SCHED_CLASS_MODE_CLASS: 8262 sbuf_printf(sb, " aggregate"); 8263 break; 8264 case SCHED_CLASS_MODE_FLOW: 8265 sbuf_printf(sb, " per-flow"); 8266 break; 8267 default: 8268 rc = ENXIO; 8269 goto done; 8270 } 8271 8272 done: 8273 if (rc == 0) 8274 rc = sbuf_finish(sb); 8275 sbuf_delete(sb); 8276 8277 return (rc); 8278 } 8279 #endif 8280 8281 #ifdef TCP_OFFLOAD 8282 static int 8283 sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS) 8284 { 8285 struct adapter *sc = arg1; 8286 int *old_ports, *new_ports; 8287 int i, new_count, rc; 8288 8289 if (req->newptr == NULL && req->oldptr == NULL) 8290 return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) * 8291 sizeof(sc->tt.tls_rx_ports[0]))); 8292 8293 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx"); 8294 if (rc) 8295 return (rc); 8296 8297 if (sc->tt.num_tls_rx_ports == 0) { 8298 i = -1; 8299 rc = SYSCTL_OUT(req, &i, sizeof(i)); 8300 } else 8301 rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports, 8302 sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0])); 8303 if (rc == 0 && req->newptr != NULL) { 8304 new_count = req->newlen / sizeof(new_ports[0]); 8305 new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE, 8306 M_WAITOK); 8307 rc = SYSCTL_IN(req, new_ports, new_count * 8308 sizeof(new_ports[0])); 8309 if (rc) 8310 goto err; 8311 8312 /* Allow setting to a single '-1' to clear the list. */ 8313 if (new_count == 1 && new_ports[0] == -1) { 8314 ADAPTER_LOCK(sc); 8315 old_ports = sc->tt.tls_rx_ports; 8316 sc->tt.tls_rx_ports = NULL; 8317 sc->tt.num_tls_rx_ports = 0; 8318 ADAPTER_UNLOCK(sc); 8319 free(old_ports, M_CXGBE); 8320 } else { 8321 for (i = 0; i < new_count; i++) { 8322 if (new_ports[i] < 1 || 8323 new_ports[i] > IPPORT_MAX) { 8324 rc = EINVAL; 8325 goto err; 8326 } 8327 } 8328 8329 ADAPTER_LOCK(sc); 8330 old_ports = sc->tt.tls_rx_ports; 8331 sc->tt.tls_rx_ports = new_ports; 8332 sc->tt.num_tls_rx_ports = new_count; 8333 ADAPTER_UNLOCK(sc); 8334 free(old_ports, M_CXGBE); 8335 new_ports = NULL; 8336 } 8337 err: 8338 free(new_ports, M_CXGBE); 8339 } 8340 end_synchronized_op(sc, 0); 8341 return (rc); 8342 } 8343 8344 static void 8345 unit_conv(char *buf, size_t len, u_int val, u_int factor) 8346 { 8347 u_int rem = val % factor; 8348 8349 if (rem == 0) 8350 snprintf(buf, len, "%u", val / factor); 8351 else { 8352 while (rem % 10 == 0) 8353 rem /= 10; 8354 snprintf(buf, len, "%u.%u", val / factor, rem); 8355 } 8356 } 8357 8358 static int 8359 sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 8360 { 8361 struct adapter *sc = arg1; 8362 char buf[16]; 8363 u_int res, re; 8364 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8365 8366 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 8367 switch (arg2) { 8368 case 0: 8369 /* timer_tick */ 8370 re = G_TIMERRESOLUTION(res); 8371 break; 8372 case 1: 8373 /* TCP timestamp tick */ 8374 re = G_TIMESTAMPRESOLUTION(res); 8375 break; 8376 case 2: 8377 /* DACK tick */ 8378 re = G_DELAYEDACKRESOLUTION(res); 8379 break; 8380 default: 8381 return (EDOOFUS); 8382 } 8383 8384 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 8385 8386 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 8387 } 8388 8389 static int 8390 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 8391 { 8392 struct adapter *sc = arg1; 8393 u_int res, dack_re, v; 8394 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8395 8396 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 8397 dack_re = G_DELAYEDACKRESOLUTION(res); 8398 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 8399 8400 return (sysctl_handle_int(oidp, &v, 0, req)); 8401 } 8402 8403 static int 8404 sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 8405 { 8406 struct adapter *sc = arg1; 8407 int reg = arg2; 8408 u_int tre; 8409 u_long tp_tick_us, v; 8410 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8411 8412 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 8413 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 8414 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || 8415 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); 8416 8417 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 8418 tp_tick_us = (cclk_ps << tre) / 1000000; 8419 8420 if (reg == A_TP_INIT_SRTT) 8421 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 8422 else 8423 v = tp_tick_us * t4_read_reg(sc, reg); 8424 8425 return (sysctl_handle_long(oidp, &v, 0, req)); 8426 } 8427 8428 /* 8429 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is 8430 * passed to this function. 8431 */ 8432 static int 8433 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS) 8434 { 8435 struct adapter *sc = arg1; 8436 int idx = arg2; 8437 u_int v; 8438 8439 MPASS(idx >= 0 && idx <= 24); 8440 8441 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf; 8442 8443 return (sysctl_handle_int(oidp, &v, 0, req)); 8444 } 8445 8446 static int 8447 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS) 8448 { 8449 struct adapter *sc = arg1; 8450 int idx = arg2; 8451 u_int shift, v, r; 8452 8453 MPASS(idx >= 0 && idx < 16); 8454 8455 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3); 8456 shift = (idx & 3) << 3; 8457 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0; 8458 8459 return (sysctl_handle_int(oidp, &v, 0, req)); 8460 } 8461 8462 static int 8463 sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS) 8464 { 8465 struct vi_info *vi = arg1; 8466 struct adapter *sc = vi->pi->adapter; 8467 int idx, rc, i; 8468 struct sge_ofld_rxq *ofld_rxq; 8469 uint8_t v; 8470 8471 idx = vi->ofld_tmr_idx; 8472 8473 rc = sysctl_handle_int(oidp, &idx, 0, req); 8474 if (rc != 0 || req->newptr == NULL) 8475 return (rc); 8476 8477 if (idx < 0 || idx >= SGE_NTIMERS) 8478 return (EINVAL); 8479 8480 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 8481 "t4otmr"); 8482 if (rc) 8483 return (rc); 8484 8485 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1); 8486 for_each_ofld_rxq(vi, i, ofld_rxq) { 8487 #ifdef atomic_store_rel_8 8488 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 8489 #else 8490 ofld_rxq->iq.intr_params = v; 8491 #endif 8492 } 8493 vi->ofld_tmr_idx = idx; 8494 8495 end_synchronized_op(sc, LOCK_HELD); 8496 return (0); 8497 } 8498 8499 static int 8500 sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS) 8501 { 8502 struct vi_info *vi = arg1; 8503 struct adapter *sc = vi->pi->adapter; 8504 int idx, rc; 8505 8506 idx = vi->ofld_pktc_idx; 8507 8508 rc = sysctl_handle_int(oidp, &idx, 0, req); 8509 if (rc != 0 || req->newptr == NULL) 8510 return (rc); 8511 8512 if (idx < -1 || idx >= SGE_NCOUNTERS) 8513 return (EINVAL); 8514 8515 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 8516 "t4opktc"); 8517 if (rc) 8518 return (rc); 8519 8520 if (vi->flags & VI_INIT_DONE) 8521 rc = EBUSY; /* cannot be changed once the queues are created */ 8522 else 8523 vi->ofld_pktc_idx = idx; 8524 8525 end_synchronized_op(sc, LOCK_HELD); 8526 return (rc); 8527 } 8528 #endif 8529 8530 static uint32_t 8531 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 8532 { 8533 uint32_t mode; 8534 8535 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 8536 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 8537 8538 if (fconf & F_FRAGMENTATION) 8539 mode |= T4_FILTER_IP_FRAGMENT; 8540 8541 if (fconf & F_MPSHITTYPE) 8542 mode |= T4_FILTER_MPS_HIT_TYPE; 8543 8544 if (fconf & F_MACMATCH) 8545 mode |= T4_FILTER_MAC_IDX; 8546 8547 if (fconf & F_ETHERTYPE) 8548 mode |= T4_FILTER_ETH_TYPE; 8549 8550 if (fconf & F_PROTOCOL) 8551 mode |= T4_FILTER_IP_PROTO; 8552 8553 if (fconf & F_TOS) 8554 mode |= T4_FILTER_IP_TOS; 8555 8556 if (fconf & F_VLAN) 8557 mode |= T4_FILTER_VLAN; 8558 8559 if (fconf & F_VNIC_ID) { 8560 mode |= T4_FILTER_VNIC; 8561 if (iconf & F_VNIC) 8562 mode |= T4_FILTER_IC_VNIC; 8563 } 8564 8565 if (fconf & F_PORT) 8566 mode |= T4_FILTER_PORT; 8567 8568 if (fconf & F_FCOE) 8569 mode |= T4_FILTER_FCoE; 8570 8571 return (mode); 8572 } 8573 8574 static uint32_t 8575 mode_to_fconf(uint32_t mode) 8576 { 8577 uint32_t fconf = 0; 8578 8579 if (mode & T4_FILTER_IP_FRAGMENT) 8580 fconf |= F_FRAGMENTATION; 8581 8582 if (mode & T4_FILTER_MPS_HIT_TYPE) 8583 fconf |= F_MPSHITTYPE; 8584 8585 if (mode & T4_FILTER_MAC_IDX) 8586 fconf |= F_MACMATCH; 8587 8588 if (mode & T4_FILTER_ETH_TYPE) 8589 fconf |= F_ETHERTYPE; 8590 8591 if (mode & T4_FILTER_IP_PROTO) 8592 fconf |= F_PROTOCOL; 8593 8594 if (mode & T4_FILTER_IP_TOS) 8595 fconf |= F_TOS; 8596 8597 if (mode & T4_FILTER_VLAN) 8598 fconf |= F_VLAN; 8599 8600 if (mode & T4_FILTER_VNIC) 8601 fconf |= F_VNIC_ID; 8602 8603 if (mode & T4_FILTER_PORT) 8604 fconf |= F_PORT; 8605 8606 if (mode & T4_FILTER_FCoE) 8607 fconf |= F_FCOE; 8608 8609 return (fconf); 8610 } 8611 8612 static uint32_t 8613 mode_to_iconf(uint32_t mode) 8614 { 8615 8616 if (mode & T4_FILTER_IC_VNIC) 8617 return (F_VNIC); 8618 return (0); 8619 } 8620 8621 static int check_fspec_against_fconf_iconf(struct adapter *sc, 8622 struct t4_filter_specification *fs) 8623 { 8624 struct tp_params *tpp = &sc->params.tp; 8625 uint32_t fconf = 0; 8626 8627 if (fs->val.frag || fs->mask.frag) 8628 fconf |= F_FRAGMENTATION; 8629 8630 if (fs->val.matchtype || fs->mask.matchtype) 8631 fconf |= F_MPSHITTYPE; 8632 8633 if (fs->val.macidx || fs->mask.macidx) 8634 fconf |= F_MACMATCH; 8635 8636 if (fs->val.ethtype || fs->mask.ethtype) 8637 fconf |= F_ETHERTYPE; 8638 8639 if (fs->val.proto || fs->mask.proto) 8640 fconf |= F_PROTOCOL; 8641 8642 if (fs->val.tos || fs->mask.tos) 8643 fconf |= F_TOS; 8644 8645 if (fs->val.vlan_vld || fs->mask.vlan_vld) 8646 fconf |= F_VLAN; 8647 8648 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 8649 fconf |= F_VNIC_ID; 8650 if (tpp->ingress_config & F_VNIC) 8651 return (EINVAL); 8652 } 8653 8654 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 8655 fconf |= F_VNIC_ID; 8656 if ((tpp->ingress_config & F_VNIC) == 0) 8657 return (EINVAL); 8658 } 8659 8660 if (fs->val.iport || fs->mask.iport) 8661 fconf |= F_PORT; 8662 8663 if (fs->val.fcoe || fs->mask.fcoe) 8664 fconf |= F_FCOE; 8665 8666 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 8667 return (E2BIG); 8668 8669 return (0); 8670 } 8671 8672 static int 8673 get_filter_mode(struct adapter *sc, uint32_t *mode) 8674 { 8675 struct tp_params *tpp = &sc->params.tp; 8676 8677 /* 8678 * We trust the cached values of the relevant TP registers. This means 8679 * things work reliably only if writes to those registers are always via 8680 * t4_set_filter_mode. 8681 */ 8682 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 8683 8684 return (0); 8685 } 8686 8687 static int 8688 set_filter_mode(struct adapter *sc, uint32_t mode) 8689 { 8690 struct tp_params *tpp = &sc->params.tp; 8691 uint32_t fconf, iconf; 8692 int rc; 8693 8694 iconf = mode_to_iconf(mode); 8695 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 8696 /* 8697 * For now we just complain if A_TP_INGRESS_CONFIG is not 8698 * already set to the correct value for the requested filter 8699 * mode. It's not clear if it's safe to write to this register 8700 * on the fly. (And we trust the cached value of the register). 8701 */ 8702 return (EBUSY); 8703 } 8704 8705 fconf = mode_to_fconf(mode); 8706 8707 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8708 "t4setfm"); 8709 if (rc) 8710 return (rc); 8711 8712 if (sc->tids.ftids_in_use > 0) { 8713 rc = EBUSY; 8714 goto done; 8715 } 8716 8717 #ifdef TCP_OFFLOAD 8718 if (uld_active(sc, ULD_TOM)) { 8719 rc = EBUSY; 8720 goto done; 8721 } 8722 #endif 8723 8724 rc = -t4_set_filter_mode(sc, fconf, true); 8725 done: 8726 end_synchronized_op(sc, LOCK_HELD); 8727 return (rc); 8728 } 8729 8730 static inline uint64_t 8731 get_filter_hits(struct adapter *sc, uint32_t fid) 8732 { 8733 uint32_t tcb_addr; 8734 8735 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 8736 (fid + sc->tids.ftid_base) * TCB_SIZE; 8737 8738 if (is_t4(sc)) { 8739 uint64_t hits; 8740 8741 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 8742 return (be64toh(hits)); 8743 } else { 8744 uint32_t hits; 8745 8746 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 8747 return (be32toh(hits)); 8748 } 8749 } 8750 8751 static int 8752 get_filter(struct adapter *sc, struct t4_filter *t) 8753 { 8754 int i, rc, nfilters = sc->tids.nftids; 8755 struct filter_entry *f; 8756 8757 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8758 "t4getf"); 8759 if (rc) 8760 return (rc); 8761 8762 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 8763 t->idx >= nfilters) { 8764 t->idx = 0xffffffff; 8765 goto done; 8766 } 8767 8768 f = &sc->tids.ftid_tab[t->idx]; 8769 for (i = t->idx; i < nfilters; i++, f++) { 8770 if (f->valid) { 8771 t->idx = i; 8772 t->l2tidx = f->l2t ? f->l2t->idx : 0; 8773 t->smtidx = f->smtidx; 8774 if (f->fs.hitcnts) 8775 t->hits = get_filter_hits(sc, t->idx); 8776 else 8777 t->hits = UINT64_MAX; 8778 t->fs = f->fs; 8779 8780 goto done; 8781 } 8782 } 8783 8784 t->idx = 0xffffffff; 8785 done: 8786 end_synchronized_op(sc, LOCK_HELD); 8787 return (0); 8788 } 8789 8790 static int 8791 set_filter(struct adapter *sc, struct t4_filter *t) 8792 { 8793 unsigned int nfilters, nports; 8794 struct filter_entry *f; 8795 int i, rc; 8796 8797 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 8798 if (rc) 8799 return (rc); 8800 8801 nfilters = sc->tids.nftids; 8802 nports = sc->params.nports; 8803 8804 if (nfilters == 0) { 8805 rc = ENOTSUP; 8806 goto done; 8807 } 8808 8809 if (t->idx >= nfilters) { 8810 rc = EINVAL; 8811 goto done; 8812 } 8813 8814 /* Validate against the global filter mode and ingress config */ 8815 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 8816 if (rc != 0) 8817 goto done; 8818 8819 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 8820 rc = EINVAL; 8821 goto done; 8822 } 8823 8824 if (t->fs.val.iport >= nports) { 8825 rc = EINVAL; 8826 goto done; 8827 } 8828 8829 /* Can't specify an iq if not steering to it */ 8830 if (!t->fs.dirsteer && t->fs.iq) { 8831 rc = EINVAL; 8832 goto done; 8833 } 8834 8835 /* IPv6 filter idx must be 4 aligned */ 8836 if (t->fs.type == 1 && 8837 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 8838 rc = EINVAL; 8839 goto done; 8840 } 8841 8842 if (!(sc->flags & FULL_INIT_DONE) && 8843 ((rc = adapter_full_init(sc)) != 0)) 8844 goto done; 8845 8846 if (sc->tids.ftid_tab == NULL) { 8847 KASSERT(sc->tids.ftids_in_use == 0, 8848 ("%s: no memory allocated but filters_in_use > 0", 8849 __func__)); 8850 8851 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 8852 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 8853 if (sc->tids.ftid_tab == NULL) { 8854 rc = ENOMEM; 8855 goto done; 8856 } 8857 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 8858 } 8859 8860 for (i = 0; i < 4; i++) { 8861 f = &sc->tids.ftid_tab[t->idx + i]; 8862 8863 if (f->pending || f->valid) { 8864 rc = EBUSY; 8865 goto done; 8866 } 8867 if (f->locked) { 8868 rc = EPERM; 8869 goto done; 8870 } 8871 8872 if (t->fs.type == 0) 8873 break; 8874 } 8875 8876 f = &sc->tids.ftid_tab[t->idx]; 8877 f->fs = t->fs; 8878 8879 rc = set_filter_wr(sc, t->idx); 8880 done: 8881 end_synchronized_op(sc, 0); 8882 8883 if (rc == 0) { 8884 mtx_lock(&sc->tids.ftid_lock); 8885 for (;;) { 8886 if (f->pending == 0) { 8887 rc = f->valid ? 0 : EIO; 8888 break; 8889 } 8890 8891 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8892 PCATCH, "t4setfw", 0)) { 8893 rc = EINPROGRESS; 8894 break; 8895 } 8896 } 8897 mtx_unlock(&sc->tids.ftid_lock); 8898 } 8899 return (rc); 8900 } 8901 8902 static int 8903 del_filter(struct adapter *sc, struct t4_filter *t) 8904 { 8905 unsigned int nfilters; 8906 struct filter_entry *f; 8907 int rc; 8908 8909 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 8910 if (rc) 8911 return (rc); 8912 8913 nfilters = sc->tids.nftids; 8914 8915 if (nfilters == 0) { 8916 rc = ENOTSUP; 8917 goto done; 8918 } 8919 8920 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 8921 t->idx >= nfilters) { 8922 rc = EINVAL; 8923 goto done; 8924 } 8925 8926 if (!(sc->flags & FULL_INIT_DONE)) { 8927 rc = EAGAIN; 8928 goto done; 8929 } 8930 8931 f = &sc->tids.ftid_tab[t->idx]; 8932 8933 if (f->pending) { 8934 rc = EBUSY; 8935 goto done; 8936 } 8937 if (f->locked) { 8938 rc = EPERM; 8939 goto done; 8940 } 8941 8942 if (f->valid) { 8943 t->fs = f->fs; /* extra info for the caller */ 8944 rc = del_filter_wr(sc, t->idx); 8945 } 8946 8947 done: 8948 end_synchronized_op(sc, 0); 8949 8950 if (rc == 0) { 8951 mtx_lock(&sc->tids.ftid_lock); 8952 for (;;) { 8953 if (f->pending == 0) { 8954 rc = f->valid ? EIO : 0; 8955 break; 8956 } 8957 8958 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8959 PCATCH, "t4delfw", 0)) { 8960 rc = EINPROGRESS; 8961 break; 8962 } 8963 } 8964 mtx_unlock(&sc->tids.ftid_lock); 8965 } 8966 8967 return (rc); 8968 } 8969 8970 static void 8971 clear_filter(struct filter_entry *f) 8972 { 8973 if (f->l2t) 8974 t4_l2t_release(f->l2t); 8975 8976 bzero(f, sizeof (*f)); 8977 } 8978 8979 static int 8980 set_filter_wr(struct adapter *sc, int fidx) 8981 { 8982 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8983 struct fw_filter_wr *fwr; 8984 unsigned int ftid, vnic_vld, vnic_vld_mask; 8985 struct wrq_cookie cookie; 8986 8987 ASSERT_SYNCHRONIZED_OP(sc); 8988 8989 if (f->fs.newdmac || f->fs.newvlan) { 8990 /* This filter needs an L2T entry; allocate one. */ 8991 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8992 if (f->l2t == NULL) 8993 return (EAGAIN); 8994 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8995 f->fs.dmac)) { 8996 t4_l2t_release(f->l2t); 8997 f->l2t = NULL; 8998 return (ENOMEM); 8999 } 9000 } 9001 9002 /* Already validated against fconf, iconf */ 9003 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 9004 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 9005 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 9006 vnic_vld = 1; 9007 else 9008 vnic_vld = 0; 9009 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 9010 vnic_vld_mask = 1; 9011 else 9012 vnic_vld_mask = 0; 9013 9014 ftid = sc->tids.ftid_base + fidx; 9015 9016 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 9017 if (fwr == NULL) 9018 return (ENOMEM); 9019 bzero(fwr, sizeof(*fwr)); 9020 9021 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 9022 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 9023 fwr->tid_to_iq = 9024 htobe32(V_FW_FILTER_WR_TID(ftid) | 9025 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 9026 V_FW_FILTER_WR_NOREPLY(0) | 9027 V_FW_FILTER_WR_IQ(f->fs.iq)); 9028 fwr->del_filter_to_l2tix = 9029 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 9030 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 9031 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 9032 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 9033 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 9034 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 9035 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 9036 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 9037 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 9038 f->fs.newvlan == VLAN_REWRITE) | 9039 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 9040 f->fs.newvlan == VLAN_REWRITE) | 9041 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 9042 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 9043 V_FW_FILTER_WR_PRIO(f->fs.prio) | 9044 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 9045 fwr->ethtype = htobe16(f->fs.val.ethtype); 9046 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 9047 fwr->frag_to_ovlan_vldm = 9048 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 9049 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 9050 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 9051 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 9052 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 9053 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 9054 fwr->smac_sel = 0; 9055 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 9056 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 9057 fwr->maci_to_matchtypem = 9058 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 9059 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 9060 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 9061 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 9062 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 9063 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 9064 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 9065 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 9066 fwr->ptcl = f->fs.val.proto; 9067 fwr->ptclm = f->fs.mask.proto; 9068 fwr->ttyp = f->fs.val.tos; 9069 fwr->ttypm = f->fs.mask.tos; 9070 fwr->ivlan = htobe16(f->fs.val.vlan); 9071 fwr->ivlanm = htobe16(f->fs.mask.vlan); 9072 fwr->ovlan = htobe16(f->fs.val.vnic); 9073 fwr->ovlanm = htobe16(f->fs.mask.vnic); 9074 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 9075 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 9076 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 9077 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 9078 fwr->lp = htobe16(f->fs.val.dport); 9079 fwr->lpm = htobe16(f->fs.mask.dport); 9080 fwr->fp = htobe16(f->fs.val.sport); 9081 fwr->fpm = htobe16(f->fs.mask.sport); 9082 if (f->fs.newsmac) 9083 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 9084 9085 f->pending = 1; 9086 sc->tids.ftids_in_use++; 9087 9088 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 9089 return (0); 9090 } 9091 9092 static int 9093 del_filter_wr(struct adapter *sc, int fidx) 9094 { 9095 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 9096 struct fw_filter_wr *fwr; 9097 unsigned int ftid; 9098 struct wrq_cookie cookie; 9099 9100 ftid = sc->tids.ftid_base + fidx; 9101 9102 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 9103 if (fwr == NULL) 9104 return (ENOMEM); 9105 bzero(fwr, sizeof (*fwr)); 9106 9107 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 9108 9109 f->pending = 1; 9110 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 9111 return (0); 9112 } 9113 9114 int 9115 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 9116 { 9117 struct adapter *sc = iq->adapter; 9118 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 9119 unsigned int idx = GET_TID(rpl); 9120 unsigned int rc; 9121 struct filter_entry *f; 9122 9123 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 9124 rss->opcode)); 9125 MPASS(iq == &sc->sge.fwq); 9126 MPASS(is_ftid(sc, idx)); 9127 9128 idx -= sc->tids.ftid_base; 9129 f = &sc->tids.ftid_tab[idx]; 9130 rc = G_COOKIE(rpl->cookie); 9131 9132 mtx_lock(&sc->tids.ftid_lock); 9133 if (rc == FW_FILTER_WR_FLT_ADDED) { 9134 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 9135 __func__, idx)); 9136 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 9137 f->pending = 0; /* asynchronous setup completed */ 9138 f->valid = 1; 9139 } else { 9140 if (rc != FW_FILTER_WR_FLT_DELETED) { 9141 /* Add or delete failed, display an error */ 9142 log(LOG_ERR, 9143 "filter %u setup failed with error %u\n", 9144 idx, rc); 9145 } 9146 9147 clear_filter(f); 9148 sc->tids.ftids_in_use--; 9149 } 9150 wakeup(&sc->tids.ftid_tab); 9151 mtx_unlock(&sc->tids.ftid_lock); 9152 9153 return (0); 9154 } 9155 9156 static int 9157 set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 9158 { 9159 9160 MPASS(iq->set_tcb_rpl != NULL); 9161 return (iq->set_tcb_rpl(iq, rss, m)); 9162 } 9163 9164 static int 9165 l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 9166 { 9167 9168 MPASS(iq->l2t_write_rpl != NULL); 9169 return (iq->l2t_write_rpl(iq, rss, m)); 9170 } 9171 9172 static int 9173 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 9174 { 9175 int rc; 9176 9177 if (cntxt->cid > M_CTXTQID) 9178 return (EINVAL); 9179 9180 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 9181 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 9182 return (EINVAL); 9183 9184 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 9185 if (rc) 9186 return (rc); 9187 9188 if (sc->flags & FW_OK) { 9189 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 9190 &cntxt->data[0]); 9191 if (rc == 0) 9192 goto done; 9193 } 9194 9195 /* 9196 * Read via firmware failed or wasn't even attempted. Read directly via 9197 * the backdoor. 9198 */ 9199 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 9200 done: 9201 end_synchronized_op(sc, 0); 9202 return (rc); 9203 } 9204 9205 static int 9206 load_fw(struct adapter *sc, struct t4_data *fw) 9207 { 9208 int rc; 9209 uint8_t *fw_data; 9210 9211 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 9212 if (rc) 9213 return (rc); 9214 9215 /* 9216 * The firmware, with the sole exception of the memory parity error 9217 * handler, runs from memory and not flash. It is almost always safe to 9218 * install a new firmware on a running system. Just set bit 1 in 9219 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first. 9220 */ 9221 if (sc->flags & FULL_INIT_DONE && 9222 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) { 9223 rc = EBUSY; 9224 goto done; 9225 } 9226 9227 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 9228 if (fw_data == NULL) { 9229 rc = ENOMEM; 9230 goto done; 9231 } 9232 9233 rc = copyin(fw->data, fw_data, fw->len); 9234 if (rc == 0) 9235 rc = -t4_load_fw(sc, fw_data, fw->len); 9236 9237 free(fw_data, M_CXGBE); 9238 done: 9239 end_synchronized_op(sc, 0); 9240 return (rc); 9241 } 9242 9243 static int 9244 load_cfg(struct adapter *sc, struct t4_data *cfg) 9245 { 9246 int rc; 9247 uint8_t *cfg_data = NULL; 9248 9249 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 9250 if (rc) 9251 return (rc); 9252 9253 if (cfg->len == 0) { 9254 /* clear */ 9255 rc = -t4_load_cfg(sc, NULL, 0); 9256 goto done; 9257 } 9258 9259 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); 9260 if (cfg_data == NULL) { 9261 rc = ENOMEM; 9262 goto done; 9263 } 9264 9265 rc = copyin(cfg->data, cfg_data, cfg->len); 9266 if (rc == 0) 9267 rc = -t4_load_cfg(sc, cfg_data, cfg->len); 9268 9269 free(cfg_data, M_CXGBE); 9270 done: 9271 end_synchronized_op(sc, 0); 9272 return (rc); 9273 } 9274 9275 static int 9276 load_boot(struct adapter *sc, struct t4_bootrom *br) 9277 { 9278 int rc; 9279 uint8_t *br_data = NULL; 9280 u_int offset; 9281 9282 if (br->len > 1024 * 1024) 9283 return (EFBIG); 9284 9285 if (br->pf_offset == 0) { 9286 /* pfidx */ 9287 if (br->pfidx_addr > 7) 9288 return (EINVAL); 9289 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, 9290 A_PCIE_PF_EXPROM_OFST))); 9291 } else if (br->pf_offset == 1) { 9292 /* offset */ 9293 offset = G_OFFSET(br->pfidx_addr); 9294 } else { 9295 return (EINVAL); 9296 } 9297 9298 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr"); 9299 if (rc) 9300 return (rc); 9301 9302 if (br->len == 0) { 9303 /* clear */ 9304 rc = -t4_load_boot(sc, NULL, offset, 0); 9305 goto done; 9306 } 9307 9308 br_data = malloc(br->len, M_CXGBE, M_WAITOK); 9309 if (br_data == NULL) { 9310 rc = ENOMEM; 9311 goto done; 9312 } 9313 9314 rc = copyin(br->data, br_data, br->len); 9315 if (rc == 0) 9316 rc = -t4_load_boot(sc, br_data, offset, br->len); 9317 9318 free(br_data, M_CXGBE); 9319 done: 9320 end_synchronized_op(sc, 0); 9321 return (rc); 9322 } 9323 9324 static int 9325 load_bootcfg(struct adapter *sc, struct t4_data *bc) 9326 { 9327 int rc; 9328 uint8_t *bc_data = NULL; 9329 9330 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 9331 if (rc) 9332 return (rc); 9333 9334 if (bc->len == 0) { 9335 /* clear */ 9336 rc = -t4_load_bootcfg(sc, NULL, 0); 9337 goto done; 9338 } 9339 9340 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); 9341 if (bc_data == NULL) { 9342 rc = ENOMEM; 9343 goto done; 9344 } 9345 9346 rc = copyin(bc->data, bc_data, bc->len); 9347 if (rc == 0) 9348 rc = -t4_load_bootcfg(sc, bc_data, bc->len); 9349 9350 free(bc_data, M_CXGBE); 9351 done: 9352 end_synchronized_op(sc, 0); 9353 return (rc); 9354 } 9355 9356 static int 9357 cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump) 9358 { 9359 int rc; 9360 struct cudbg_init *cudbg; 9361 void *handle, *buf; 9362 9363 /* buf is large, don't block if no memory is available */ 9364 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO); 9365 if (buf == NULL) 9366 return (ENOMEM); 9367 9368 handle = cudbg_alloc_handle(); 9369 if (handle == NULL) { 9370 rc = ENOMEM; 9371 goto done; 9372 } 9373 9374 cudbg = cudbg_get_init(handle); 9375 cudbg->adap = sc; 9376 cudbg->print = (cudbg_print_cb)printf; 9377 9378 #ifndef notyet 9379 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", 9380 __func__, dump->wr_flash, dump->len, dump->data); 9381 #endif 9382 9383 if (dump->wr_flash) 9384 cudbg->use_flash = 1; 9385 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); 9386 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); 9387 9388 rc = cudbg_collect(handle, buf, &dump->len); 9389 if (rc != 0) 9390 goto done; 9391 9392 rc = copyout(buf, dump->data, dump->len); 9393 done: 9394 cudbg_free_handle(handle); 9395 free(buf, M_CXGBE); 9396 return (rc); 9397 } 9398 9399 #define MAX_READ_BUF_SIZE (128 * 1024) 9400 static int 9401 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 9402 { 9403 uint32_t addr, remaining, n; 9404 uint32_t *buf; 9405 int rc; 9406 uint8_t *dst; 9407 9408 rc = validate_mem_range(sc, mr->addr, mr->len); 9409 if (rc != 0) 9410 return (rc); 9411 9412 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 9413 addr = mr->addr; 9414 remaining = mr->len; 9415 dst = (void *)mr->data; 9416 9417 while (remaining) { 9418 n = min(remaining, MAX_READ_BUF_SIZE); 9419 read_via_memwin(sc, 2, addr, buf, n); 9420 9421 rc = copyout(buf, dst, n); 9422 if (rc != 0) 9423 break; 9424 9425 dst += n; 9426 remaining -= n; 9427 addr += n; 9428 } 9429 9430 free(buf, M_CXGBE); 9431 return (rc); 9432 } 9433 #undef MAX_READ_BUF_SIZE 9434 9435 static int 9436 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 9437 { 9438 int rc; 9439 9440 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 9441 return (EINVAL); 9442 9443 if (i2cd->len > sizeof(i2cd->data)) 9444 return (EFBIG); 9445 9446 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 9447 if (rc) 9448 return (rc); 9449 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 9450 i2cd->offset, i2cd->len, &i2cd->data[0]); 9451 end_synchronized_op(sc, 0); 9452 9453 return (rc); 9454 } 9455 9456 int 9457 t4_os_find_pci_capability(struct adapter *sc, int cap) 9458 { 9459 int i; 9460 9461 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 9462 } 9463 9464 int 9465 t4_os_pci_save_state(struct adapter *sc) 9466 { 9467 device_t dev; 9468 struct pci_devinfo *dinfo; 9469 9470 dev = sc->dev; 9471 dinfo = device_get_ivars(dev); 9472 9473 pci_cfg_save(dev, dinfo, 0); 9474 return (0); 9475 } 9476 9477 int 9478 t4_os_pci_restore_state(struct adapter *sc) 9479 { 9480 device_t dev; 9481 struct pci_devinfo *dinfo; 9482 9483 dev = sc->dev; 9484 dinfo = device_get_ivars(dev); 9485 9486 pci_cfg_restore(dev, dinfo); 9487 return (0); 9488 } 9489 9490 void 9491 t4_os_portmod_changed(struct port_info *pi) 9492 { 9493 struct adapter *sc = pi->adapter; 9494 struct vi_info *vi; 9495 struct ifnet *ifp; 9496 static const char *mod_str[] = { 9497 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 9498 }; 9499 9500 PORT_LOCK(pi); 9501 build_medialist(pi, &pi->media); 9502 PORT_UNLOCK(pi); 9503 vi = &pi->vi[0]; 9504 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) { 9505 init_l1cfg(pi); 9506 end_synchronized_op(sc, LOCK_HELD); 9507 } 9508 9509 ifp = vi->ifp; 9510 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 9511 if_printf(ifp, "transceiver unplugged.\n"); 9512 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 9513 if_printf(ifp, "unknown transceiver inserted.\n"); 9514 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 9515 if_printf(ifp, "unsupported transceiver inserted.\n"); 9516 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 9517 if_printf(ifp, "%dGbps %s transceiver inserted.\n", 9518 port_top_speed(pi), mod_str[pi->mod_type]); 9519 } else { 9520 if_printf(ifp, "transceiver (type %d) inserted.\n", 9521 pi->mod_type); 9522 } 9523 } 9524 9525 void 9526 t4_os_link_changed(struct port_info *pi) 9527 { 9528 struct vi_info *vi; 9529 struct ifnet *ifp; 9530 struct link_config *lc; 9531 int v; 9532 9533 for_each_vi(pi, v, vi) { 9534 ifp = vi->ifp; 9535 if (ifp == NULL) 9536 continue; 9537 9538 lc = &pi->link_cfg; 9539 if (lc->link_ok) { 9540 ifp->if_baudrate = IF_Mbps(lc->speed); 9541 if_link_state_change(ifp, LINK_STATE_UP); 9542 } else { 9543 if_link_state_change(ifp, LINK_STATE_DOWN); 9544 } 9545 } 9546 } 9547 9548 void 9549 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 9550 { 9551 struct adapter *sc; 9552 9553 sx_slock(&t4_list_lock); 9554 SLIST_FOREACH(sc, &t4_list, link) { 9555 /* 9556 * func should not make any assumptions about what state sc is 9557 * in - the only guarantee is that sc->sc_lock is a valid lock. 9558 */ 9559 func(sc, arg); 9560 } 9561 sx_sunlock(&t4_list_lock); 9562 } 9563 9564 static int 9565 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 9566 struct thread *td) 9567 { 9568 int rc; 9569 struct adapter *sc = dev->si_drv1; 9570 9571 rc = priv_check(td, PRIV_DRIVER); 9572 if (rc != 0) 9573 return (rc); 9574 9575 switch (cmd) { 9576 case CHELSIO_T4_GETREG: { 9577 struct t4_reg *edata = (struct t4_reg *)data; 9578 9579 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9580 return (EFAULT); 9581 9582 if (edata->size == 4) 9583 edata->val = t4_read_reg(sc, edata->addr); 9584 else if (edata->size == 8) 9585 edata->val = t4_read_reg64(sc, edata->addr); 9586 else 9587 return (EINVAL); 9588 9589 break; 9590 } 9591 case CHELSIO_T4_SETREG: { 9592 struct t4_reg *edata = (struct t4_reg *)data; 9593 9594 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9595 return (EFAULT); 9596 9597 if (edata->size == 4) { 9598 if (edata->val & 0xffffffff00000000) 9599 return (EINVAL); 9600 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 9601 } else if (edata->size == 8) 9602 t4_write_reg64(sc, edata->addr, edata->val); 9603 else 9604 return (EINVAL); 9605 break; 9606 } 9607 case CHELSIO_T4_REGDUMP: { 9608 struct t4_regdump *regs = (struct t4_regdump *)data; 9609 int reglen = t4_get_regs_len(sc); 9610 uint8_t *buf; 9611 9612 if (regs->len < reglen) { 9613 regs->len = reglen; /* hint to the caller */ 9614 return (ENOBUFS); 9615 } 9616 9617 regs->len = reglen; 9618 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 9619 get_regs(sc, regs, buf); 9620 rc = copyout(buf, regs->data, reglen); 9621 free(buf, M_CXGBE); 9622 break; 9623 } 9624 case CHELSIO_T4_GET_FILTER_MODE: 9625 rc = get_filter_mode(sc, (uint32_t *)data); 9626 break; 9627 case CHELSIO_T4_SET_FILTER_MODE: 9628 rc = set_filter_mode(sc, *(uint32_t *)data); 9629 break; 9630 case CHELSIO_T4_GET_FILTER: 9631 rc = get_filter(sc, (struct t4_filter *)data); 9632 break; 9633 case CHELSIO_T4_SET_FILTER: 9634 rc = set_filter(sc, (struct t4_filter *)data); 9635 break; 9636 case CHELSIO_T4_DEL_FILTER: 9637 rc = del_filter(sc, (struct t4_filter *)data); 9638 break; 9639 case CHELSIO_T4_GET_SGE_CONTEXT: 9640 rc = get_sge_context(sc, (struct t4_sge_context *)data); 9641 break; 9642 case CHELSIO_T4_LOAD_FW: 9643 rc = load_fw(sc, (struct t4_data *)data); 9644 break; 9645 case CHELSIO_T4_GET_MEM: 9646 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 9647 break; 9648 case CHELSIO_T4_GET_I2C: 9649 rc = read_i2c(sc, (struct t4_i2c_data *)data); 9650 break; 9651 case CHELSIO_T4_CLEAR_STATS: { 9652 int i, v, bg_map; 9653 u_int port_id = *(uint32_t *)data; 9654 struct port_info *pi; 9655 struct vi_info *vi; 9656 9657 if (port_id >= sc->params.nports) 9658 return (EINVAL); 9659 pi = sc->port[port_id]; 9660 if (pi == NULL) 9661 return (EIO); 9662 9663 /* MAC stats */ 9664 t4_clr_port_stats(sc, pi->tx_chan); 9665 pi->tx_parse_error = 0; 9666 pi->tnl_cong_drops = 0; 9667 mtx_lock(&sc->reg_lock); 9668 for_each_vi(pi, v, vi) { 9669 if (vi->flags & VI_INIT_DONE) 9670 t4_clr_vi_stats(sc, vi->viid); 9671 } 9672 bg_map = pi->mps_bg_map; 9673 v = 0; /* reuse */ 9674 while (bg_map) { 9675 i = ffs(bg_map) - 1; 9676 t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 9677 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 9678 bg_map &= ~(1 << i); 9679 } 9680 mtx_unlock(&sc->reg_lock); 9681 9682 /* 9683 * Since this command accepts a port, clear stats for 9684 * all VIs on this port. 9685 */ 9686 for_each_vi(pi, v, vi) { 9687 if (vi->flags & VI_INIT_DONE) { 9688 struct sge_rxq *rxq; 9689 struct sge_txq *txq; 9690 struct sge_wrq *wrq; 9691 9692 for_each_rxq(vi, i, rxq) { 9693 #if defined(INET) || defined(INET6) 9694 rxq->lro.lro_queued = 0; 9695 rxq->lro.lro_flushed = 0; 9696 #endif 9697 rxq->rxcsum = 0; 9698 rxq->vlan_extraction = 0; 9699 } 9700 9701 for_each_txq(vi, i, txq) { 9702 txq->txcsum = 0; 9703 txq->tso_wrs = 0; 9704 txq->vlan_insertion = 0; 9705 txq->imm_wrs = 0; 9706 txq->sgl_wrs = 0; 9707 txq->txpkt_wrs = 0; 9708 txq->txpkts0_wrs = 0; 9709 txq->txpkts1_wrs = 0; 9710 txq->txpkts0_pkts = 0; 9711 txq->txpkts1_pkts = 0; 9712 mp_ring_reset_stats(txq->r); 9713 } 9714 9715 #ifdef TCP_OFFLOAD 9716 /* nothing to clear for each ofld_rxq */ 9717 9718 for_each_ofld_txq(vi, i, wrq) { 9719 wrq->tx_wrs_direct = 0; 9720 wrq->tx_wrs_copied = 0; 9721 } 9722 #endif 9723 9724 if (IS_MAIN_VI(vi)) { 9725 wrq = &sc->sge.ctrlq[pi->port_id]; 9726 wrq->tx_wrs_direct = 0; 9727 wrq->tx_wrs_copied = 0; 9728 } 9729 } 9730 } 9731 break; 9732 } 9733 case CHELSIO_T4_SCHED_CLASS: 9734 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); 9735 break; 9736 case CHELSIO_T4_SCHED_QUEUE: 9737 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); 9738 break; 9739 case CHELSIO_T4_GET_TRACER: 9740 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 9741 break; 9742 case CHELSIO_T4_SET_TRACER: 9743 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 9744 break; 9745 case CHELSIO_T4_LOAD_CFG: 9746 rc = load_cfg(sc, (struct t4_data *)data); 9747 break; 9748 case CHELSIO_T4_LOAD_BOOT: 9749 rc = load_boot(sc, (struct t4_bootrom *)data); 9750 break; 9751 case CHELSIO_T4_LOAD_BOOTCFG: 9752 rc = load_bootcfg(sc, (struct t4_data *)data); 9753 break; 9754 case CHELSIO_T4_CUDBG_DUMP: 9755 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data); 9756 break; 9757 default: 9758 rc = ENOTTY; 9759 } 9760 9761 return (rc); 9762 } 9763 9764 void 9765 t4_db_full(struct adapter *sc) 9766 { 9767 9768 CXGBE_UNIMPLEMENTED(__func__); 9769 } 9770 9771 void 9772 t4_db_dropped(struct adapter *sc) 9773 { 9774 9775 CXGBE_UNIMPLEMENTED(__func__); 9776 } 9777 9778 #ifdef TCP_OFFLOAD 9779 static int 9780 toe_capability(struct vi_info *vi, int enable) 9781 { 9782 int rc; 9783 struct port_info *pi = vi->pi; 9784 struct adapter *sc = pi->adapter; 9785 9786 ASSERT_SYNCHRONIZED_OP(sc); 9787 9788 if (!is_offload(sc)) 9789 return (ENODEV); 9790 9791 if (enable) { 9792 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 9793 /* TOE is already enabled. */ 9794 return (0); 9795 } 9796 9797 /* 9798 * We need the port's queues around so that we're able to send 9799 * and receive CPLs to/from the TOE even if the ifnet for this 9800 * port has never been UP'd administratively. 9801 */ 9802 if (!(vi->flags & VI_INIT_DONE)) { 9803 rc = vi_full_init(vi); 9804 if (rc) 9805 return (rc); 9806 } 9807 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 9808 rc = vi_full_init(&pi->vi[0]); 9809 if (rc) 9810 return (rc); 9811 } 9812 9813 if (isset(&sc->offload_map, pi->port_id)) { 9814 /* TOE is enabled on another VI of this port. */ 9815 pi->uld_vis++; 9816 return (0); 9817 } 9818 9819 if (!uld_active(sc, ULD_TOM)) { 9820 rc = t4_activate_uld(sc, ULD_TOM); 9821 if (rc == EAGAIN) { 9822 log(LOG_WARNING, 9823 "You must kldload t4_tom.ko before trying " 9824 "to enable TOE on a cxgbe interface.\n"); 9825 } 9826 if (rc != 0) 9827 return (rc); 9828 KASSERT(sc->tom_softc != NULL, 9829 ("%s: TOM activated but softc NULL", __func__)); 9830 KASSERT(uld_active(sc, ULD_TOM), 9831 ("%s: TOM activated but flag not set", __func__)); 9832 } 9833 9834 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 9835 if (!uld_active(sc, ULD_IWARP)) 9836 (void) t4_activate_uld(sc, ULD_IWARP); 9837 if (!uld_active(sc, ULD_ISCSI)) 9838 (void) t4_activate_uld(sc, ULD_ISCSI); 9839 9840 pi->uld_vis++; 9841 setbit(&sc->offload_map, pi->port_id); 9842 } else { 9843 pi->uld_vis--; 9844 9845 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 9846 return (0); 9847 9848 KASSERT(uld_active(sc, ULD_TOM), 9849 ("%s: TOM never initialized?", __func__)); 9850 clrbit(&sc->offload_map, pi->port_id); 9851 } 9852 9853 return (0); 9854 } 9855 9856 /* 9857 * Add an upper layer driver to the global list. 9858 */ 9859 int 9860 t4_register_uld(struct uld_info *ui) 9861 { 9862 int rc = 0; 9863 struct uld_info *u; 9864 9865 sx_xlock(&t4_uld_list_lock); 9866 SLIST_FOREACH(u, &t4_uld_list, link) { 9867 if (u->uld_id == ui->uld_id) { 9868 rc = EEXIST; 9869 goto done; 9870 } 9871 } 9872 9873 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 9874 ui->refcount = 0; 9875 done: 9876 sx_xunlock(&t4_uld_list_lock); 9877 return (rc); 9878 } 9879 9880 int 9881 t4_unregister_uld(struct uld_info *ui) 9882 { 9883 int rc = EINVAL; 9884 struct uld_info *u; 9885 9886 sx_xlock(&t4_uld_list_lock); 9887 9888 SLIST_FOREACH(u, &t4_uld_list, link) { 9889 if (u == ui) { 9890 if (ui->refcount > 0) { 9891 rc = EBUSY; 9892 goto done; 9893 } 9894 9895 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 9896 rc = 0; 9897 goto done; 9898 } 9899 } 9900 done: 9901 sx_xunlock(&t4_uld_list_lock); 9902 return (rc); 9903 } 9904 9905 int 9906 t4_activate_uld(struct adapter *sc, int id) 9907 { 9908 int rc; 9909 struct uld_info *ui; 9910 9911 ASSERT_SYNCHRONIZED_OP(sc); 9912 9913 if (id < 0 || id > ULD_MAX) 9914 return (EINVAL); 9915 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 9916 9917 sx_slock(&t4_uld_list_lock); 9918 9919 SLIST_FOREACH(ui, &t4_uld_list, link) { 9920 if (ui->uld_id == id) { 9921 if (!(sc->flags & FULL_INIT_DONE)) { 9922 rc = adapter_full_init(sc); 9923 if (rc != 0) 9924 break; 9925 } 9926 9927 rc = ui->activate(sc); 9928 if (rc == 0) { 9929 setbit(&sc->active_ulds, id); 9930 ui->refcount++; 9931 } 9932 break; 9933 } 9934 } 9935 9936 sx_sunlock(&t4_uld_list_lock); 9937 9938 return (rc); 9939 } 9940 9941 int 9942 t4_deactivate_uld(struct adapter *sc, int id) 9943 { 9944 int rc; 9945 struct uld_info *ui; 9946 9947 ASSERT_SYNCHRONIZED_OP(sc); 9948 9949 if (id < 0 || id > ULD_MAX) 9950 return (EINVAL); 9951 rc = ENXIO; 9952 9953 sx_slock(&t4_uld_list_lock); 9954 9955 SLIST_FOREACH(ui, &t4_uld_list, link) { 9956 if (ui->uld_id == id) { 9957 rc = ui->deactivate(sc); 9958 if (rc == 0) { 9959 clrbit(&sc->active_ulds, id); 9960 ui->refcount--; 9961 } 9962 break; 9963 } 9964 } 9965 9966 sx_sunlock(&t4_uld_list_lock); 9967 9968 return (rc); 9969 } 9970 9971 int 9972 uld_active(struct adapter *sc, int uld_id) 9973 { 9974 9975 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9976 9977 return (isset(&sc->active_ulds, uld_id)); 9978 } 9979 #endif 9980 9981 /* 9982 * t = ptr to tunable. 9983 * nc = number of CPUs. 9984 * c = compiled in default for that tunable. 9985 */ 9986 static void 9987 calculate_nqueues(int *t, int nc, const int c) 9988 { 9989 int nq; 9990 9991 if (*t > 0) 9992 return; 9993 nq = *t < 0 ? -*t : c; 9994 *t = min(nc, nq); 9995 } 9996 9997 /* 9998 * Come up with reasonable defaults for some of the tunables, provided they're 9999 * not set by the user (in which case we'll use the values as is). 10000 */ 10001 static void 10002 tweak_tunables(void) 10003 { 10004 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 10005 10006 if (t4_ntxq < 1) { 10007 #ifdef RSS 10008 t4_ntxq = rss_getnumbuckets(); 10009 #else 10010 calculate_nqueues(&t4_ntxq, nc, NTXQ); 10011 #endif 10012 } 10013 10014 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); 10015 10016 if (t4_nrxq < 1) { 10017 #ifdef RSS 10018 t4_nrxq = rss_getnumbuckets(); 10019 #else 10020 calculate_nqueues(&t4_nrxq, nc, NRXQ); 10021 #endif 10022 } 10023 10024 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); 10025 10026 #ifdef TCP_OFFLOAD 10027 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ); 10028 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); 10029 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ); 10030 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); 10031 10032 if (t4_toecaps_allowed == -1) 10033 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 10034 10035 if (t4_rdmacaps_allowed == -1) { 10036 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 10037 FW_CAPS_CONFIG_RDMA_RDMAC; 10038 } 10039 10040 if (t4_iscsicaps_allowed == -1) { 10041 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 10042 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 10043 FW_CAPS_CONFIG_ISCSI_T10DIF; 10044 } 10045 10046 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS) 10047 t4_tmr_idx_ofld = TMR_IDX_OFLD; 10048 10049 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS) 10050 t4_pktc_idx_ofld = PKTC_IDX_OFLD; 10051 #else 10052 if (t4_toecaps_allowed == -1) 10053 t4_toecaps_allowed = 0; 10054 10055 if (t4_rdmacaps_allowed == -1) 10056 t4_rdmacaps_allowed = 0; 10057 10058 if (t4_iscsicaps_allowed == -1) 10059 t4_iscsicaps_allowed = 0; 10060 #endif 10061 10062 #ifdef DEV_NETMAP 10063 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); 10064 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); 10065 #endif 10066 10067 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS) 10068 t4_tmr_idx = TMR_IDX; 10069 10070 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS) 10071 t4_pktc_idx = PKTC_IDX; 10072 10073 if (t4_qsize_txq < 128) 10074 t4_qsize_txq = 128; 10075 10076 if (t4_qsize_rxq < 128) 10077 t4_qsize_rxq = 128; 10078 while (t4_qsize_rxq & 7) 10079 t4_qsize_rxq++; 10080 10081 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 10082 10083 /* 10084 * Number of VIs to create per-port. The first VI is the "main" regular 10085 * VI for the port. The rest are additional virtual interfaces on the 10086 * same physical port. Note that the main VI does not have native 10087 * netmap support but the extra VIs do. 10088 * 10089 * Limit the number of VIs per port to the number of available 10090 * MAC addresses per port. 10091 */ 10092 if (t4_num_vis < 1) 10093 t4_num_vis = 1; 10094 if (t4_num_vis > nitems(vi_mac_funcs)) { 10095 t4_num_vis = nitems(vi_mac_funcs); 10096 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis); 10097 } 10098 10099 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) { 10100 pcie_relaxed_ordering = 1; 10101 #if defined(__i386__) || defined(__amd64__) 10102 if (cpu_vendor_id == CPU_VENDOR_INTEL) 10103 pcie_relaxed_ordering = 0; 10104 #endif 10105 } 10106 } 10107 10108 #ifdef DDB 10109 static void 10110 t4_dump_tcb(struct adapter *sc, int tid) 10111 { 10112 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 10113 10114 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 10115 save = t4_read_reg(sc, reg); 10116 base = sc->memwin[2].mw_base; 10117 10118 /* Dump TCB for the tid */ 10119 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 10120 tcb_addr += tid * TCB_SIZE; 10121 10122 if (is_t4(sc)) { 10123 pf = 0; 10124 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 10125 } else { 10126 pf = V_PFNUM(sc->pf); 10127 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 10128 } 10129 t4_write_reg(sc, reg, win_pos | pf); 10130 t4_read_reg(sc, reg); 10131 10132 off = tcb_addr - win_pos; 10133 for (i = 0; i < 4; i++) { 10134 uint32_t buf[8]; 10135 for (j = 0; j < 8; j++, off += 4) 10136 buf[j] = htonl(t4_read_reg(sc, base + off)); 10137 10138 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 10139 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 10140 buf[7]); 10141 } 10142 10143 t4_write_reg(sc, reg, save); 10144 t4_read_reg(sc, reg); 10145 } 10146 10147 static void 10148 t4_dump_devlog(struct adapter *sc) 10149 { 10150 struct devlog_params *dparams = &sc->params.devlog; 10151 struct fw_devlog_e e; 10152 int i, first, j, m, nentries, rc; 10153 uint64_t ftstamp = UINT64_MAX; 10154 10155 if (dparams->start == 0) { 10156 db_printf("devlog params not valid\n"); 10157 return; 10158 } 10159 10160 nentries = dparams->size / sizeof(struct fw_devlog_e); 10161 m = fwmtype_to_hwmtype(dparams->memtype); 10162 10163 /* Find the first entry. */ 10164 first = -1; 10165 for (i = 0; i < nentries && !db_pager_quit; i++) { 10166 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 10167 sizeof(e), (void *)&e); 10168 if (rc != 0) 10169 break; 10170 10171 if (e.timestamp == 0) 10172 break; 10173 10174 e.timestamp = be64toh(e.timestamp); 10175 if (e.timestamp < ftstamp) { 10176 ftstamp = e.timestamp; 10177 first = i; 10178 } 10179 } 10180 10181 if (first == -1) 10182 return; 10183 10184 i = first; 10185 do { 10186 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 10187 sizeof(e), (void *)&e); 10188 if (rc != 0) 10189 return; 10190 10191 if (e.timestamp == 0) 10192 return; 10193 10194 e.timestamp = be64toh(e.timestamp); 10195 e.seqno = be32toh(e.seqno); 10196 for (j = 0; j < 8; j++) 10197 e.params[j] = be32toh(e.params[j]); 10198 10199 db_printf("%10d %15ju %8s %8s ", 10200 e.seqno, e.timestamp, 10201 (e.level < nitems(devlog_level_strings) ? 10202 devlog_level_strings[e.level] : "UNKNOWN"), 10203 (e.facility < nitems(devlog_facility_strings) ? 10204 devlog_facility_strings[e.facility] : "UNKNOWN")); 10205 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 10206 e.params[3], e.params[4], e.params[5], e.params[6], 10207 e.params[7]); 10208 10209 if (++i == nentries) 10210 i = 0; 10211 } while (i != first && !db_pager_quit); 10212 } 10213 10214 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 10215 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 10216 10217 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 10218 { 10219 device_t dev; 10220 int t; 10221 bool valid; 10222 10223 valid = false; 10224 t = db_read_token(); 10225 if (t == tIDENT) { 10226 dev = device_lookup_by_name(db_tok_string); 10227 valid = true; 10228 } 10229 db_skip_to_eol(); 10230 if (!valid) { 10231 db_printf("usage: show t4 devlog <nexus>\n"); 10232 return; 10233 } 10234 10235 if (dev == NULL) { 10236 db_printf("device not found\n"); 10237 return; 10238 } 10239 10240 t4_dump_devlog(device_get_softc(dev)); 10241 } 10242 10243 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 10244 { 10245 device_t dev; 10246 int radix, tid, t; 10247 bool valid; 10248 10249 valid = false; 10250 radix = db_radix; 10251 db_radix = 10; 10252 t = db_read_token(); 10253 if (t == tIDENT) { 10254 dev = device_lookup_by_name(db_tok_string); 10255 t = db_read_token(); 10256 if (t == tNUMBER) { 10257 tid = db_tok_number; 10258 valid = true; 10259 } 10260 } 10261 db_radix = radix; 10262 db_skip_to_eol(); 10263 if (!valid) { 10264 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 10265 return; 10266 } 10267 10268 if (dev == NULL) { 10269 db_printf("device not found\n"); 10270 return; 10271 } 10272 if (tid < 0) { 10273 db_printf("invalid tid\n"); 10274 return; 10275 } 10276 10277 t4_dump_tcb(device_get_softc(dev), tid); 10278 } 10279 #endif 10280 10281 /* 10282 * Borrowed from cesa_prep_aes_key(). 10283 * 10284 * NB: The crypto engine wants the words in the decryption key in reverse 10285 * order. 10286 */ 10287 void 10288 t4_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits) 10289 { 10290 uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; 10291 uint32_t *dkey; 10292 int i; 10293 10294 rijndaelKeySetupEnc(ek, enc_key, kbits); 10295 dkey = dec_key; 10296 dkey += (kbits / 8) / 4; 10297 10298 switch (kbits) { 10299 case 128: 10300 for (i = 0; i < 4; i++) 10301 *--dkey = htobe32(ek[4 * 10 + i]); 10302 break; 10303 case 192: 10304 for (i = 0; i < 2; i++) 10305 *--dkey = htobe32(ek[4 * 11 + 2 + i]); 10306 for (i = 0; i < 4; i++) 10307 *--dkey = htobe32(ek[4 * 12 + i]); 10308 break; 10309 case 256: 10310 for (i = 0; i < 4; i++) 10311 *--dkey = htobe32(ek[4 * 13 + i]); 10312 for (i = 0; i < 4; i++) 10313 *--dkey = htobe32(ek[4 * 14 + i]); 10314 break; 10315 } 10316 MPASS(dkey == dec_key); 10317 } 10318 10319 static struct sx mlu; /* mod load unload */ 10320 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 10321 10322 static int 10323 mod_event(module_t mod, int cmd, void *arg) 10324 { 10325 int rc = 0; 10326 static int loaded = 0; 10327 10328 switch (cmd) { 10329 case MOD_LOAD: 10330 sx_xlock(&mlu); 10331 if (loaded++ == 0) { 10332 t4_sge_modload(); 10333 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); 10334 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); 10335 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 10336 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 10337 sx_init(&t4_list_lock, "T4/T5 adapters"); 10338 SLIST_INIT(&t4_list); 10339 #ifdef TCP_OFFLOAD 10340 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 10341 SLIST_INIT(&t4_uld_list); 10342 #endif 10343 t4_tracer_modload(); 10344 tweak_tunables(); 10345 } 10346 sx_xunlock(&mlu); 10347 break; 10348 10349 case MOD_UNLOAD: 10350 sx_xlock(&mlu); 10351 if (--loaded == 0) { 10352 int tries; 10353 10354 sx_slock(&t4_list_lock); 10355 if (!SLIST_EMPTY(&t4_list)) { 10356 rc = EBUSY; 10357 sx_sunlock(&t4_list_lock); 10358 goto done_unload; 10359 } 10360 #ifdef TCP_OFFLOAD 10361 sx_slock(&t4_uld_list_lock); 10362 if (!SLIST_EMPTY(&t4_uld_list)) { 10363 rc = EBUSY; 10364 sx_sunlock(&t4_uld_list_lock); 10365 sx_sunlock(&t4_list_lock); 10366 goto done_unload; 10367 } 10368 #endif 10369 tries = 0; 10370 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 10371 uprintf("%ju clusters with custom free routine " 10372 "still is use.\n", t4_sge_extfree_refs()); 10373 pause("t4unload", 2 * hz); 10374 } 10375 #ifdef TCP_OFFLOAD 10376 sx_sunlock(&t4_uld_list_lock); 10377 #endif 10378 sx_sunlock(&t4_list_lock); 10379 10380 if (t4_sge_extfree_refs() == 0) { 10381 t4_tracer_modunload(); 10382 #ifdef TCP_OFFLOAD 10383 sx_destroy(&t4_uld_list_lock); 10384 #endif 10385 sx_destroy(&t4_list_lock); 10386 t4_sge_modunload(); 10387 loaded = 0; 10388 } else { 10389 rc = EBUSY; 10390 loaded++; /* undo earlier decrement */ 10391 } 10392 } 10393 done_unload: 10394 sx_xunlock(&mlu); 10395 break; 10396 } 10397 10398 return (rc); 10399 } 10400 10401 static devclass_t t4_devclass, t5_devclass, t6_devclass; 10402 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; 10403 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; 10404 10405 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 10406 MODULE_VERSION(t4nex, 1); 10407 MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 10408 #ifdef DEV_NETMAP 10409 MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 10410 #endif /* DEV_NETMAP */ 10411 10412 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 10413 MODULE_VERSION(t5nex, 1); 10414 MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 10415 #ifdef DEV_NETMAP 10416 MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 10417 #endif /* DEV_NETMAP */ 10418 10419 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); 10420 MODULE_VERSION(t6nex, 1); 10421 MODULE_DEPEND(t6nex, firmware, 1, 1, 1); 10422 #ifdef DEV_NETMAP 10423 MODULE_DEPEND(t6nex, netmap, 1, 1, 1); 10424 #endif /* DEV_NETMAP */ 10425 10426 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 10427 MODULE_VERSION(cxgbe, 1); 10428 10429 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 10430 MODULE_VERSION(cxl, 1); 10431 10432 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); 10433 MODULE_VERSION(cc, 1); 10434 10435 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 10436 MODULE_VERSION(vcxgbe, 1); 10437 10438 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 10439 MODULE_VERSION(vcxl, 1); 10440 10441 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); 10442 MODULE_VERSION(vcc, 1); 10443