1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_ddb.h" 32 #include "opt_inet.h" 33 #include "opt_inet6.h" 34 #include "opt_rss.h" 35 36 #include <sys/param.h> 37 #include <sys/conf.h> 38 #include <sys/priv.h> 39 #include <sys/kernel.h> 40 #include <sys/bus.h> 41 #include <sys/module.h> 42 #include <sys/malloc.h> 43 #include <sys/queue.h> 44 #include <sys/taskqueue.h> 45 #include <sys/pciio.h> 46 #include <dev/pci/pcireg.h> 47 #include <dev/pci/pcivar.h> 48 #include <dev/pci/pci_private.h> 49 #include <sys/firmware.h> 50 #include <sys/sbuf.h> 51 #include <sys/smp.h> 52 #include <sys/socket.h> 53 #include <sys/sockio.h> 54 #include <sys/sysctl.h> 55 #include <net/ethernet.h> 56 #include <net/if.h> 57 #include <net/if_types.h> 58 #include <net/if_dl.h> 59 #include <net/if_vlan_var.h> 60 #ifdef RSS 61 #include <net/rss_config.h> 62 #endif 63 #if defined(__i386__) || defined(__amd64__) 64 #include <vm/vm.h> 65 #include <vm/pmap.h> 66 #endif 67 #ifdef DDB 68 #include <ddb/ddb.h> 69 #include <ddb/db_lex.h> 70 #endif 71 72 #include "common/common.h" 73 #include "common/t4_msg.h" 74 #include "common/t4_regs.h" 75 #include "common/t4_regs_values.h" 76 #include "t4_ioctl.h" 77 #include "t4_l2t.h" 78 #include "t4_mp_ring.h" 79 #include "t4_if.h" 80 81 /* T4 bus driver interface */ 82 static int t4_probe(device_t); 83 static int t4_attach(device_t); 84 static int t4_detach(device_t); 85 static int t4_ready(device_t); 86 static int t4_read_port_device(device_t, int, device_t *); 87 static device_method_t t4_methods[] = { 88 DEVMETHOD(device_probe, t4_probe), 89 DEVMETHOD(device_attach, t4_attach), 90 DEVMETHOD(device_detach, t4_detach), 91 92 DEVMETHOD(t4_is_main_ready, t4_ready), 93 DEVMETHOD(t4_read_port_device, t4_read_port_device), 94 95 DEVMETHOD_END 96 }; 97 static driver_t t4_driver = { 98 "t4nex", 99 t4_methods, 100 sizeof(struct adapter) 101 }; 102 103 104 /* T4 port (cxgbe) interface */ 105 static int cxgbe_probe(device_t); 106 static int cxgbe_attach(device_t); 107 static int cxgbe_detach(device_t); 108 device_method_t cxgbe_methods[] = { 109 DEVMETHOD(device_probe, cxgbe_probe), 110 DEVMETHOD(device_attach, cxgbe_attach), 111 DEVMETHOD(device_detach, cxgbe_detach), 112 { 0, 0 } 113 }; 114 static driver_t cxgbe_driver = { 115 "cxgbe", 116 cxgbe_methods, 117 sizeof(struct port_info) 118 }; 119 120 /* T4 VI (vcxgbe) interface */ 121 static int vcxgbe_probe(device_t); 122 static int vcxgbe_attach(device_t); 123 static int vcxgbe_detach(device_t); 124 static device_method_t vcxgbe_methods[] = { 125 DEVMETHOD(device_probe, vcxgbe_probe), 126 DEVMETHOD(device_attach, vcxgbe_attach), 127 DEVMETHOD(device_detach, vcxgbe_detach), 128 { 0, 0 } 129 }; 130 static driver_t vcxgbe_driver = { 131 "vcxgbe", 132 vcxgbe_methods, 133 sizeof(struct vi_info) 134 }; 135 136 static d_ioctl_t t4_ioctl; 137 138 static struct cdevsw t4_cdevsw = { 139 .d_version = D_VERSION, 140 .d_ioctl = t4_ioctl, 141 .d_name = "t4nex", 142 }; 143 144 /* T5 bus driver interface */ 145 static int t5_probe(device_t); 146 static device_method_t t5_methods[] = { 147 DEVMETHOD(device_probe, t5_probe), 148 DEVMETHOD(device_attach, t4_attach), 149 DEVMETHOD(device_detach, t4_detach), 150 151 DEVMETHOD(t4_is_main_ready, t4_ready), 152 DEVMETHOD(t4_read_port_device, t4_read_port_device), 153 154 DEVMETHOD_END 155 }; 156 static driver_t t5_driver = { 157 "t5nex", 158 t5_methods, 159 sizeof(struct adapter) 160 }; 161 162 163 /* T5 port (cxl) interface */ 164 static driver_t cxl_driver = { 165 "cxl", 166 cxgbe_methods, 167 sizeof(struct port_info) 168 }; 169 170 /* T5 VI (vcxl) interface */ 171 static driver_t vcxl_driver = { 172 "vcxl", 173 vcxgbe_methods, 174 sizeof(struct vi_info) 175 }; 176 177 /* T6 bus driver interface */ 178 static int t6_probe(device_t); 179 static device_method_t t6_methods[] = { 180 DEVMETHOD(device_probe, t6_probe), 181 DEVMETHOD(device_attach, t4_attach), 182 DEVMETHOD(device_detach, t4_detach), 183 184 DEVMETHOD(t4_is_main_ready, t4_ready), 185 DEVMETHOD(t4_read_port_device, t4_read_port_device), 186 187 DEVMETHOD_END 188 }; 189 static driver_t t6_driver = { 190 "t6nex", 191 t6_methods, 192 sizeof(struct adapter) 193 }; 194 195 196 /* T6 port (cc) interface */ 197 static driver_t cc_driver = { 198 "cc", 199 cxgbe_methods, 200 sizeof(struct port_info) 201 }; 202 203 /* T6 VI (vcc) interface */ 204 static driver_t vcc_driver = { 205 "vcc", 206 vcxgbe_methods, 207 sizeof(struct vi_info) 208 }; 209 210 /* ifnet + media interface */ 211 static void cxgbe_init(void *); 212 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 213 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 214 static void cxgbe_qflush(struct ifnet *); 215 static int cxgbe_media_change(struct ifnet *); 216 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 217 218 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 219 220 /* 221 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 222 * then ADAPTER_LOCK, then t4_uld_list_lock. 223 */ 224 static struct sx t4_list_lock; 225 SLIST_HEAD(, adapter) t4_list; 226 #ifdef TCP_OFFLOAD 227 static struct sx t4_uld_list_lock; 228 SLIST_HEAD(, uld_info) t4_uld_list; 229 #endif 230 231 /* 232 * Tunables. See tweak_tunables() too. 233 * 234 * Each tunable is set to a default value here if it's known at compile-time. 235 * Otherwise it is set to -n as an indication to tweak_tunables() that it should 236 * provide a reasonable default (upto n) when the driver is loaded. 237 * 238 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 239 * T5 are under hw.cxl. 240 */ 241 242 /* 243 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 244 */ 245 #define NTXQ_10G 16 246 int t4_ntxq10g = -NTXQ_10G; 247 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 248 249 #define NRXQ_10G 8 250 int t4_nrxq10g = -NRXQ_10G; 251 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 252 253 #define NTXQ_1G 4 254 int t4_ntxq1g = -NTXQ_1G; 255 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 256 257 #define NRXQ_1G 2 258 int t4_nrxq1g = -NRXQ_1G; 259 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 260 261 #define NTXQ_VI 1 262 static int t4_ntxq_vi = -NTXQ_VI; 263 TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi); 264 265 #define NRXQ_VI 1 266 static int t4_nrxq_vi = -NRXQ_VI; 267 TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi); 268 269 static int t4_rsrv_noflowq = 0; 270 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 271 272 #ifdef TCP_OFFLOAD 273 #define NOFLDTXQ_10G 8 274 static int t4_nofldtxq10g = -NOFLDTXQ_10G; 275 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 276 277 #define NOFLDRXQ_10G 2 278 static int t4_nofldrxq10g = -NOFLDRXQ_10G; 279 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 280 281 #define NOFLDTXQ_1G 2 282 static int t4_nofldtxq1g = -NOFLDTXQ_1G; 283 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 284 285 #define NOFLDRXQ_1G 1 286 static int t4_nofldrxq1g = -NOFLDRXQ_1G; 287 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 288 289 #define NOFLDTXQ_VI 1 290 static int t4_nofldtxq_vi = -NOFLDTXQ_VI; 291 TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi); 292 293 #define NOFLDRXQ_VI 1 294 static int t4_nofldrxq_vi = -NOFLDRXQ_VI; 295 TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi); 296 #endif 297 298 #ifdef DEV_NETMAP 299 #define NNMTXQ_VI 2 300 static int t4_nnmtxq_vi = -NNMTXQ_VI; 301 TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi); 302 303 #define NNMRXQ_VI 2 304 static int t4_nnmrxq_vi = -NNMRXQ_VI; 305 TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi); 306 #endif 307 308 /* 309 * Holdoff parameters for 10G and 1G ports. 310 */ 311 #define TMR_IDX_10G 1 312 int t4_tmr_idx_10g = TMR_IDX_10G; 313 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 314 315 #define PKTC_IDX_10G (-1) 316 int t4_pktc_idx_10g = PKTC_IDX_10G; 317 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 318 319 #define TMR_IDX_1G 1 320 int t4_tmr_idx_1g = TMR_IDX_1G; 321 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 322 323 #define PKTC_IDX_1G (-1) 324 int t4_pktc_idx_1g = PKTC_IDX_1G; 325 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 326 327 /* 328 * Size (# of entries) of each tx and rx queue. 329 */ 330 unsigned int t4_qsize_txq = TX_EQ_QSIZE; 331 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 332 333 unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 334 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 335 336 /* 337 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 338 */ 339 int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 340 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 341 342 /* 343 * Configuration file. 344 */ 345 #define DEFAULT_CF "default" 346 #define FLASH_CF "flash" 347 #define UWIRE_CF "uwire" 348 #define FPGA_CF "fpga" 349 static char t4_cfg_file[32] = DEFAULT_CF; 350 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 351 352 /* 353 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 354 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 355 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 356 * mark or when signalled to do so, 0 to never emit PAUSE. 357 */ 358 static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 359 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 360 361 /* 362 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS, 363 * FEC_RESERVED respectively). 364 * -1 to run with the firmware default. 365 * 0 to disable FEC. 366 */ 367 static int t4_fec = -1; 368 TUNABLE_INT("hw.cxgbe.fec", &t4_fec); 369 370 /* 371 * Link autonegotiation. 372 * -1 to run with the firmware default. 373 * 0 to disable. 374 * 1 to enable. 375 */ 376 static int t4_autoneg = -1; 377 TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg); 378 379 /* 380 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 381 * encouraged respectively). 382 */ 383 static unsigned int t4_fw_install = 1; 384 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 385 386 /* 387 * ASIC features that will be used. Disable the ones you don't want so that the 388 * chip resources aren't wasted on features that will not be used. 389 */ 390 static int t4_nbmcaps_allowed = 0; 391 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 392 393 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 394 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 395 396 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 397 FW_CAPS_CONFIG_SWITCH_EGRESS; 398 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 399 400 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 401 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 402 403 static int t4_toecaps_allowed = -1; 404 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 405 406 static int t4_rdmacaps_allowed = -1; 407 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 408 409 static int t4_cryptocaps_allowed = 0; 410 TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed); 411 412 static int t4_iscsicaps_allowed = -1; 413 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 414 415 static int t4_fcoecaps_allowed = 0; 416 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 417 418 static int t5_write_combine = 0; 419 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 420 421 static int t4_num_vis = 1; 422 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 423 424 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 425 static int vi_mac_funcs[] = { 426 FW_VI_FUNC_OFLD, 427 FW_VI_FUNC_IWARP, 428 FW_VI_FUNC_OPENISCSI, 429 FW_VI_FUNC_OPENFCOE, 430 FW_VI_FUNC_FOISCSI, 431 FW_VI_FUNC_FOFCOE, 432 }; 433 434 struct intrs_and_queues { 435 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 436 uint16_t nirq; /* Total # of vectors */ 437 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 438 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 439 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 440 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 441 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 442 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 443 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 444 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 445 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 446 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 447 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 448 449 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 450 uint16_t ntxq_vi; /* # of NIC txq's */ 451 uint16_t nrxq_vi; /* # of NIC rxq's */ 452 uint16_t nofldtxq_vi; /* # of TOE txq's */ 453 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 454 uint16_t nnmtxq_vi; /* # of netmap txq's */ 455 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 456 }; 457 458 struct filter_entry { 459 uint32_t valid:1; /* filter allocated and valid */ 460 uint32_t locked:1; /* filter is administratively locked */ 461 uint32_t pending:1; /* filter action is pending firmware reply */ 462 uint32_t smtidx:8; /* Source MAC Table index for smac */ 463 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 464 465 struct t4_filter_specification fs; 466 }; 467 468 static void setup_memwin(struct adapter *); 469 static void position_memwin(struct adapter *, int, uint32_t); 470 static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 471 static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 472 int); 473 static inline int write_via_memwin(struct adapter *, int, uint32_t, 474 const uint32_t *, int); 475 static int validate_mem_range(struct adapter *, uint32_t, int); 476 static int fwmtype_to_hwmtype(int); 477 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 478 uint32_t *); 479 static int fixup_devlog_params(struct adapter *); 480 static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 481 struct intrs_and_queues *); 482 static int prep_firmware(struct adapter *); 483 static int partition_resources(struct adapter *, const struct firmware *, 484 const char *); 485 static int get_params__pre_init(struct adapter *); 486 static int get_params__post_init(struct adapter *); 487 static int set_params__post_init(struct adapter *); 488 static void t4_set_desc(struct adapter *); 489 static void build_medialist(struct port_info *, struct ifmedia *); 490 static void init_l1cfg(struct port_info *); 491 static int cxgbe_init_synchronized(struct vi_info *); 492 static int cxgbe_uninit_synchronized(struct vi_info *); 493 static void quiesce_txq(struct adapter *, struct sge_txq *); 494 static void quiesce_wrq(struct adapter *, struct sge_wrq *); 495 static void quiesce_iq(struct adapter *, struct sge_iq *); 496 static void quiesce_fl(struct adapter *, struct sge_fl *); 497 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 498 driver_intr_t *, void *, char *); 499 static int t4_free_irq(struct adapter *, struct irq *); 500 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 501 static void vi_refresh_stats(struct adapter *, struct vi_info *); 502 static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 503 static void cxgbe_tick(void *); 504 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 505 static void cxgbe_sysctls(struct port_info *); 506 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 507 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 508 static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 509 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 510 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 511 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 512 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 513 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 514 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 515 static int sysctl_fec(SYSCTL_HANDLER_ARGS); 516 static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); 517 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 518 static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 519 #ifdef SBUF_DRAIN 520 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 521 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 522 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 523 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 524 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 525 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 526 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 527 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 528 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 529 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 530 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 531 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 532 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 533 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 534 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 535 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 536 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 537 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 538 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 539 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 540 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 541 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 542 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 543 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 544 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 545 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 546 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 547 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 548 static int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 549 #endif 550 #ifdef TCP_OFFLOAD 551 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 552 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 553 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 554 static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS); 555 static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS); 556 #endif 557 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 558 static uint32_t mode_to_fconf(uint32_t); 559 static uint32_t mode_to_iconf(uint32_t); 560 static int check_fspec_against_fconf_iconf(struct adapter *, 561 struct t4_filter_specification *); 562 static int get_filter_mode(struct adapter *, uint32_t *); 563 static int set_filter_mode(struct adapter *, uint32_t); 564 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 565 static int get_filter(struct adapter *, struct t4_filter *); 566 static int set_filter(struct adapter *, struct t4_filter *); 567 static int del_filter(struct adapter *, struct t4_filter *); 568 static void clear_filter(struct filter_entry *); 569 static int set_filter_wr(struct adapter *, int); 570 static int del_filter_wr(struct adapter *, int); 571 static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, 572 struct mbuf *); 573 static int get_sge_context(struct adapter *, struct t4_sge_context *); 574 static int load_fw(struct adapter *, struct t4_data *); 575 static int load_cfg(struct adapter *, struct t4_data *); 576 static int load_boot(struct adapter *, struct t4_bootrom *); 577 static int load_bootcfg(struct adapter *, struct t4_data *); 578 static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 579 static int read_i2c(struct adapter *, struct t4_i2c_data *); 580 #ifdef TCP_OFFLOAD 581 static int toe_capability(struct vi_info *, int); 582 #endif 583 static int mod_event(module_t, int, void *); 584 static int notify_siblings(device_t, int); 585 586 struct { 587 uint16_t device; 588 char *desc; 589 } t4_pciids[] = { 590 {0xa000, "Chelsio Terminator 4 FPGA"}, 591 {0x4400, "Chelsio T440-dbg"}, 592 {0x4401, "Chelsio T420-CR"}, 593 {0x4402, "Chelsio T422-CR"}, 594 {0x4403, "Chelsio T440-CR"}, 595 {0x4404, "Chelsio T420-BCH"}, 596 {0x4405, "Chelsio T440-BCH"}, 597 {0x4406, "Chelsio T440-CH"}, 598 {0x4407, "Chelsio T420-SO"}, 599 {0x4408, "Chelsio T420-CX"}, 600 {0x4409, "Chelsio T420-BT"}, 601 {0x440a, "Chelsio T404-BT"}, 602 {0x440e, "Chelsio T440-LP-CR"}, 603 }, t5_pciids[] = { 604 {0xb000, "Chelsio Terminator 5 FPGA"}, 605 {0x5400, "Chelsio T580-dbg"}, 606 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 607 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 608 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 609 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 610 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 611 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 612 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 613 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 614 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 615 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 616 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 617 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 618 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 619 #ifdef notyet 620 {0x5404, "Chelsio T520-BCH"}, 621 {0x5405, "Chelsio T540-BCH"}, 622 {0x5406, "Chelsio T540-CH"}, 623 {0x5408, "Chelsio T520-CX"}, 624 {0x540b, "Chelsio B520-SR"}, 625 {0x540c, "Chelsio B504-BT"}, 626 {0x540f, "Chelsio Amsterdam"}, 627 {0x5413, "Chelsio T580-CHR"}, 628 #endif 629 }, t6_pciids[] = { 630 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ 631 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */ 632 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ 633 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ 634 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */ 635 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */ 636 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */ 637 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */ 638 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ 639 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ 640 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */ 641 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ 642 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */ 643 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */ 644 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */ 645 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */ 646 647 /* Custom */ 648 {0x6480, "Chelsio T6225 80"}, 649 {0x6481, "Chelsio T62100 81"}, 650 }; 651 652 #ifdef TCP_OFFLOAD 653 /* 654 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 655 * exactly the same for both rxq and ofld_rxq. 656 */ 657 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 658 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 659 #endif 660 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 661 662 static int 663 t4_probe(device_t dev) 664 { 665 int i; 666 uint16_t v = pci_get_vendor(dev); 667 uint16_t d = pci_get_device(dev); 668 uint8_t f = pci_get_function(dev); 669 670 if (v != PCI_VENDOR_ID_CHELSIO) 671 return (ENXIO); 672 673 /* Attach only to PF0 of the FPGA */ 674 if (d == 0xa000 && f != 0) 675 return (ENXIO); 676 677 for (i = 0; i < nitems(t4_pciids); i++) { 678 if (d == t4_pciids[i].device) { 679 device_set_desc(dev, t4_pciids[i].desc); 680 return (BUS_PROBE_DEFAULT); 681 } 682 } 683 684 return (ENXIO); 685 } 686 687 static int 688 t5_probe(device_t dev) 689 { 690 int i; 691 uint16_t v = pci_get_vendor(dev); 692 uint16_t d = pci_get_device(dev); 693 uint8_t f = pci_get_function(dev); 694 695 if (v != PCI_VENDOR_ID_CHELSIO) 696 return (ENXIO); 697 698 /* Attach only to PF0 of the FPGA */ 699 if (d == 0xb000 && f != 0) 700 return (ENXIO); 701 702 for (i = 0; i < nitems(t5_pciids); i++) { 703 if (d == t5_pciids[i].device) { 704 device_set_desc(dev, t5_pciids[i].desc); 705 return (BUS_PROBE_DEFAULT); 706 } 707 } 708 709 return (ENXIO); 710 } 711 712 static int 713 t6_probe(device_t dev) 714 { 715 int i; 716 uint16_t v = pci_get_vendor(dev); 717 uint16_t d = pci_get_device(dev); 718 719 if (v != PCI_VENDOR_ID_CHELSIO) 720 return (ENXIO); 721 722 for (i = 0; i < nitems(t6_pciids); i++) { 723 if (d == t6_pciids[i].device) { 724 device_set_desc(dev, t6_pciids[i].desc); 725 return (BUS_PROBE_DEFAULT); 726 } 727 } 728 729 return (ENXIO); 730 } 731 732 static void 733 t5_attribute_workaround(device_t dev) 734 { 735 device_t root_port; 736 uint32_t v; 737 738 /* 739 * The T5 chips do not properly echo the No Snoop and Relaxed 740 * Ordering attributes when replying to a TLP from a Root 741 * Port. As a workaround, find the parent Root Port and 742 * disable No Snoop and Relaxed Ordering. Note that this 743 * affects all devices under this root port. 744 */ 745 root_port = pci_find_pcie_root_port(dev); 746 if (root_port == NULL) { 747 device_printf(dev, "Unable to find parent root port\n"); 748 return; 749 } 750 751 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 752 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 753 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 754 0) 755 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 756 device_get_nameunit(root_port)); 757 } 758 759 static const struct devnames devnames[] = { 760 { 761 .nexus_name = "t4nex", 762 .ifnet_name = "cxgbe", 763 .vi_ifnet_name = "vcxgbe", 764 .pf03_drv_name = "t4iov", 765 .vf_nexus_name = "t4vf", 766 .vf_ifnet_name = "cxgbev" 767 }, { 768 .nexus_name = "t5nex", 769 .ifnet_name = "cxl", 770 .vi_ifnet_name = "vcxl", 771 .pf03_drv_name = "t5iov", 772 .vf_nexus_name = "t5vf", 773 .vf_ifnet_name = "cxlv" 774 }, { 775 .nexus_name = "t6nex", 776 .ifnet_name = "cc", 777 .vi_ifnet_name = "vcc", 778 .pf03_drv_name = "t6iov", 779 .vf_nexus_name = "t6vf", 780 .vf_ifnet_name = "ccv" 781 } 782 }; 783 784 void 785 t4_init_devnames(struct adapter *sc) 786 { 787 int id; 788 789 id = chip_id(sc); 790 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) 791 sc->names = &devnames[id - CHELSIO_T4]; 792 else { 793 device_printf(sc->dev, "chip id %d is not supported.\n", id); 794 sc->names = NULL; 795 } 796 } 797 798 static int 799 t4_attach(device_t dev) 800 { 801 struct adapter *sc; 802 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 803 struct make_dev_args mda; 804 struct intrs_and_queues iaq; 805 struct sge *s; 806 uint8_t *buf; 807 #ifdef TCP_OFFLOAD 808 int ofld_rqidx, ofld_tqidx; 809 #endif 810 #ifdef DEV_NETMAP 811 int nm_rqidx, nm_tqidx; 812 #endif 813 int num_vis; 814 815 sc = device_get_softc(dev); 816 sc->dev = dev; 817 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); 818 819 if ((pci_get_device(dev) & 0xff00) == 0x5400) 820 t5_attribute_workaround(dev); 821 pci_enable_busmaster(dev); 822 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 823 uint32_t v; 824 825 pci_set_max_read_req(dev, 4096); 826 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 827 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 828 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 829 830 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 831 } 832 833 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); 834 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); 835 sc->traceq = -1; 836 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 837 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 838 device_get_nameunit(dev)); 839 840 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 841 device_get_nameunit(dev)); 842 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 843 t4_add_adapter(sc); 844 845 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 846 TAILQ_INIT(&sc->sfl); 847 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 848 849 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 850 851 rc = t4_map_bars_0_and_4(sc); 852 if (rc != 0) 853 goto done; /* error message displayed already */ 854 855 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 856 857 /* Prepare the adapter for operation. */ 858 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 859 rc = -t4_prep_adapter(sc, buf); 860 free(buf, M_CXGBE); 861 if (rc != 0) { 862 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 863 goto done; 864 } 865 866 /* 867 * This is the real PF# to which we're attaching. Works from within PCI 868 * passthrough environments too, where pci_get_function() could return a 869 * different PF# depending on the passthrough configuration. We need to 870 * use the real PF# in all our communication with the firmware. 871 */ 872 j = t4_read_reg(sc, A_PL_WHOAMI); 873 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); 874 sc->mbox = sc->pf; 875 876 t4_init_devnames(sc); 877 if (sc->names == NULL) { 878 rc = ENOTSUP; 879 goto done; /* error message displayed already */ 880 } 881 882 /* 883 * Do this really early, with the memory windows set up even before the 884 * character device. The userland tool's register i/o and mem read 885 * will work even in "recovery mode". 886 */ 887 setup_memwin(sc); 888 if (t4_init_devlog_params(sc, 0) == 0) 889 fixup_devlog_params(sc); 890 make_dev_args_init(&mda); 891 mda.mda_devsw = &t4_cdevsw; 892 mda.mda_uid = UID_ROOT; 893 mda.mda_gid = GID_WHEEL; 894 mda.mda_mode = 0600; 895 mda.mda_si_drv1 = sc; 896 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); 897 if (rc != 0) 898 device_printf(dev, "failed to create nexus char device: %d.\n", 899 rc); 900 901 /* Go no further if recovery mode has been requested. */ 902 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 903 device_printf(dev, "recovery mode.\n"); 904 goto done; 905 } 906 907 #if defined(__i386__) 908 if ((cpu_feature & CPUID_CX8) == 0) { 909 device_printf(dev, "64 bit atomics not available.\n"); 910 rc = ENOTSUP; 911 goto done; 912 } 913 #endif 914 915 /* Prepare the firmware for operation */ 916 rc = prep_firmware(sc); 917 if (rc != 0) 918 goto done; /* error message displayed already */ 919 920 rc = get_params__post_init(sc); 921 if (rc != 0) 922 goto done; /* error message displayed already */ 923 924 rc = set_params__post_init(sc); 925 if (rc != 0) 926 goto done; /* error message displayed already */ 927 928 rc = t4_map_bar_2(sc); 929 if (rc != 0) 930 goto done; /* error message displayed already */ 931 932 rc = t4_create_dma_tag(sc); 933 if (rc != 0) 934 goto done; /* error message displayed already */ 935 936 /* 937 * Number of VIs to create per-port. The first VI is the "main" regular 938 * VI for the port. The rest are additional virtual interfaces on the 939 * same physical port. Note that the main VI does not have native 940 * netmap support but the extra VIs do. 941 * 942 * Limit the number of VIs per port to the number of available 943 * MAC addresses per port. 944 */ 945 if (t4_num_vis >= 1) 946 num_vis = t4_num_vis; 947 else 948 num_vis = 1; 949 if (num_vis > nitems(vi_mac_funcs)) { 950 num_vis = nitems(vi_mac_funcs); 951 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 952 } 953 954 /* 955 * First pass over all the ports - allocate VIs and initialize some 956 * basic parameters like mac address, port type, etc. We also figure 957 * out whether a port is 10G or 1G and use that information when 958 * calculating how many interrupts to attempt to allocate. 959 */ 960 n10g = n1g = 0; 961 for_each_port(sc, i) { 962 struct port_info *pi; 963 964 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 965 sc->port[i] = pi; 966 967 /* These must be set before t4_port_init */ 968 pi->adapter = sc; 969 pi->port_id = i; 970 /* 971 * XXX: vi[0] is special so we can't delay this allocation until 972 * pi->nvi's final value is known. 973 */ 974 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 975 M_ZERO | M_WAITOK); 976 977 /* 978 * Allocate the "main" VI and initialize parameters 979 * like mac addr. 980 */ 981 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 982 if (rc != 0) { 983 device_printf(dev, "unable to initialize port %d: %d\n", 984 i, rc); 985 free(pi->vi, M_CXGBE); 986 free(pi, M_CXGBE); 987 sc->port[i] = NULL; 988 goto done; 989 } 990 991 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 992 device_get_nameunit(dev), i); 993 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 994 sc->chan_map[pi->tx_chan] = i; 995 996 if (port_top_speed(pi) >= 10) { 997 n10g++; 998 } else { 999 n1g++; 1000 } 1001 1002 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1); 1003 if (pi->dev == NULL) { 1004 device_printf(dev, 1005 "failed to add device for port %d.\n", i); 1006 rc = ENXIO; 1007 goto done; 1008 } 1009 pi->vi[0].dev = pi->dev; 1010 device_set_softc(pi->dev, pi); 1011 } 1012 1013 /* 1014 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 1015 */ 1016 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 1017 if (rc != 0) 1018 goto done; /* error message displayed already */ 1019 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0) 1020 num_vis = 1; 1021 1022 sc->intr_type = iaq.intr_type; 1023 sc->intr_count = iaq.nirq; 1024 1025 s = &sc->sge; 1026 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 1027 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 1028 if (num_vis > 1) { 1029 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi; 1030 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi; 1031 } 1032 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 1033 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 1034 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 1035 #ifdef TCP_OFFLOAD 1036 if (is_offload(sc)) { 1037 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 1038 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 1039 if (num_vis > 1) { 1040 s->nofldrxq += (n10g + n1g) * (num_vis - 1) * 1041 iaq.nofldrxq_vi; 1042 s->nofldtxq += (n10g + n1g) * (num_vis - 1) * 1043 iaq.nofldtxq_vi; 1044 } 1045 s->neq += s->nofldtxq + s->nofldrxq; 1046 s->niq += s->nofldrxq; 1047 1048 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 1049 M_CXGBE, M_ZERO | M_WAITOK); 1050 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 1051 M_CXGBE, M_ZERO | M_WAITOK); 1052 } 1053 #endif 1054 #ifdef DEV_NETMAP 1055 if (num_vis > 1) { 1056 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi; 1057 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi; 1058 } 1059 s->neq += s->nnmtxq + s->nnmrxq; 1060 s->niq += s->nnmrxq; 1061 1062 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 1063 M_CXGBE, M_ZERO | M_WAITOK); 1064 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 1065 M_CXGBE, M_ZERO | M_WAITOK); 1066 #endif 1067 1068 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 1069 M_ZERO | M_WAITOK); 1070 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 1071 M_ZERO | M_WAITOK); 1072 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 1073 M_ZERO | M_WAITOK); 1074 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 1075 M_ZERO | M_WAITOK); 1076 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 1077 M_ZERO | M_WAITOK); 1078 1079 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 1080 M_ZERO | M_WAITOK); 1081 1082 t4_init_l2t(sc, M_WAITOK); 1083 t4_init_tx_sched(sc); 1084 1085 /* 1086 * Second pass over the ports. This time we know the number of rx and 1087 * tx queues that each port should get. 1088 */ 1089 rqidx = tqidx = 0; 1090 #ifdef TCP_OFFLOAD 1091 ofld_rqidx = ofld_tqidx = 0; 1092 #endif 1093 #ifdef DEV_NETMAP 1094 nm_rqidx = nm_tqidx = 0; 1095 #endif 1096 for_each_port(sc, i) { 1097 struct port_info *pi = sc->port[i]; 1098 struct vi_info *vi; 1099 1100 if (pi == NULL) 1101 continue; 1102 1103 pi->nvi = num_vis; 1104 for_each_vi(pi, j, vi) { 1105 vi->pi = pi; 1106 vi->qsize_rxq = t4_qsize_rxq; 1107 vi->qsize_txq = t4_qsize_txq; 1108 1109 vi->first_rxq = rqidx; 1110 vi->first_txq = tqidx; 1111 if (port_top_speed(pi) >= 10) { 1112 vi->tmr_idx = t4_tmr_idx_10g; 1113 vi->pktc_idx = t4_pktc_idx_10g; 1114 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 1115 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi; 1116 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi; 1117 } else { 1118 vi->tmr_idx = t4_tmr_idx_1g; 1119 vi->pktc_idx = t4_pktc_idx_1g; 1120 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 1121 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi; 1122 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi; 1123 } 1124 rqidx += vi->nrxq; 1125 tqidx += vi->ntxq; 1126 1127 if (j == 0 && vi->ntxq > 1) 1128 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1129 else 1130 vi->rsrv_noflowq = 0; 1131 1132 #ifdef TCP_OFFLOAD 1133 vi->first_ofld_rxq = ofld_rqidx; 1134 vi->first_ofld_txq = ofld_tqidx; 1135 if (port_top_speed(pi) >= 10) { 1136 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1137 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1138 iaq.nofldrxq_vi; 1139 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1140 iaq.nofldtxq_vi; 1141 } else { 1142 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1143 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1144 iaq.nofldrxq_vi; 1145 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1146 iaq.nofldtxq_vi; 1147 } 1148 ofld_rqidx += vi->nofldrxq; 1149 ofld_tqidx += vi->nofldtxq; 1150 #endif 1151 #ifdef DEV_NETMAP 1152 if (j > 0) { 1153 vi->first_nm_rxq = nm_rqidx; 1154 vi->first_nm_txq = nm_tqidx; 1155 vi->nnmrxq = iaq.nnmrxq_vi; 1156 vi->nnmtxq = iaq.nnmtxq_vi; 1157 nm_rqidx += vi->nnmrxq; 1158 nm_tqidx += vi->nnmtxq; 1159 } 1160 #endif 1161 } 1162 } 1163 1164 rc = t4_setup_intr_handlers(sc); 1165 if (rc != 0) { 1166 device_printf(dev, 1167 "failed to setup interrupt handlers: %d\n", rc); 1168 goto done; 1169 } 1170 1171 rc = bus_generic_probe(dev); 1172 if (rc != 0) { 1173 device_printf(dev, "failed to probe child drivers: %d\n", rc); 1174 goto done; 1175 } 1176 1177 rc = bus_generic_attach(dev); 1178 if (rc != 0) { 1179 device_printf(dev, 1180 "failed to attach all child ports: %d\n", rc); 1181 goto done; 1182 } 1183 1184 device_printf(dev, 1185 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1186 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1187 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1188 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1189 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1190 1191 t4_set_desc(sc); 1192 1193 notify_siblings(dev, 0); 1194 1195 done: 1196 if (rc != 0 && sc->cdev) { 1197 /* cdev was created and so cxgbetool works; recover that way. */ 1198 device_printf(dev, 1199 "error during attach, adapter is now in recovery mode.\n"); 1200 rc = 0; 1201 } 1202 1203 if (rc != 0) 1204 t4_detach_common(dev); 1205 else 1206 t4_sysctls(sc); 1207 1208 return (rc); 1209 } 1210 1211 static int 1212 t4_ready(device_t dev) 1213 { 1214 struct adapter *sc; 1215 1216 sc = device_get_softc(dev); 1217 if (sc->flags & FW_OK) 1218 return (0); 1219 return (ENXIO); 1220 } 1221 1222 static int 1223 t4_read_port_device(device_t dev, int port, device_t *child) 1224 { 1225 struct adapter *sc; 1226 struct port_info *pi; 1227 1228 sc = device_get_softc(dev); 1229 if (port < 0 || port >= MAX_NPORTS) 1230 return (EINVAL); 1231 pi = sc->port[port]; 1232 if (pi == NULL || pi->dev == NULL) 1233 return (ENXIO); 1234 *child = pi->dev; 1235 return (0); 1236 } 1237 1238 static int 1239 notify_siblings(device_t dev, int detaching) 1240 { 1241 device_t sibling; 1242 int error, i; 1243 1244 error = 0; 1245 for (i = 0; i < PCI_FUNCMAX; i++) { 1246 if (i == pci_get_function(dev)) 1247 continue; 1248 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), 1249 pci_get_slot(dev), i); 1250 if (sibling == NULL || !device_is_attached(sibling)) 1251 continue; 1252 if (detaching) 1253 error = T4_DETACH_CHILD(sibling); 1254 else 1255 (void)T4_ATTACH_CHILD(sibling); 1256 if (error) 1257 break; 1258 } 1259 return (error); 1260 } 1261 1262 /* 1263 * Idempotent 1264 */ 1265 static int 1266 t4_detach(device_t dev) 1267 { 1268 struct adapter *sc; 1269 int rc; 1270 1271 sc = device_get_softc(dev); 1272 1273 rc = notify_siblings(dev, 1); 1274 if (rc) { 1275 device_printf(dev, 1276 "failed to detach sibling devices: %d\n", rc); 1277 return (rc); 1278 } 1279 1280 return (t4_detach_common(dev)); 1281 } 1282 1283 int 1284 t4_detach_common(device_t dev) 1285 { 1286 struct adapter *sc; 1287 struct port_info *pi; 1288 int i, rc; 1289 1290 sc = device_get_softc(dev); 1291 1292 if (sc->flags & FULL_INIT_DONE) { 1293 if (!(sc->flags & IS_VF)) 1294 t4_intr_disable(sc); 1295 } 1296 1297 if (sc->cdev) { 1298 destroy_dev(sc->cdev); 1299 sc->cdev = NULL; 1300 } 1301 1302 if (device_is_attached(dev)) { 1303 rc = bus_generic_detach(dev); 1304 if (rc) { 1305 device_printf(dev, 1306 "failed to detach child devices: %d\n", rc); 1307 return (rc); 1308 } 1309 } 1310 1311 for (i = 0; i < sc->intr_count; i++) 1312 t4_free_irq(sc, &sc->irq[i]); 1313 1314 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1315 t4_free_tx_sched(sc); 1316 1317 for (i = 0; i < MAX_NPORTS; i++) { 1318 pi = sc->port[i]; 1319 if (pi) { 1320 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1321 if (pi->dev) 1322 device_delete_child(dev, pi->dev); 1323 1324 mtx_destroy(&pi->pi_lock); 1325 free(pi->vi, M_CXGBE); 1326 free(pi, M_CXGBE); 1327 } 1328 } 1329 1330 device_delete_children(dev); 1331 1332 if (sc->flags & FULL_INIT_DONE) 1333 adapter_full_uninit(sc); 1334 1335 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1336 t4_fw_bye(sc, sc->mbox); 1337 1338 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1339 pci_release_msi(dev); 1340 1341 if (sc->regs_res) 1342 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1343 sc->regs_res); 1344 1345 if (sc->udbs_res) 1346 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1347 sc->udbs_res); 1348 1349 if (sc->msix_res) 1350 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1351 sc->msix_res); 1352 1353 if (sc->l2t) 1354 t4_free_l2t(sc->l2t); 1355 1356 #ifdef TCP_OFFLOAD 1357 free(sc->sge.ofld_rxq, M_CXGBE); 1358 free(sc->sge.ofld_txq, M_CXGBE); 1359 #endif 1360 #ifdef DEV_NETMAP 1361 free(sc->sge.nm_rxq, M_CXGBE); 1362 free(sc->sge.nm_txq, M_CXGBE); 1363 #endif 1364 free(sc->irq, M_CXGBE); 1365 free(sc->sge.rxq, M_CXGBE); 1366 free(sc->sge.txq, M_CXGBE); 1367 free(sc->sge.ctrlq, M_CXGBE); 1368 free(sc->sge.iqmap, M_CXGBE); 1369 free(sc->sge.eqmap, M_CXGBE); 1370 free(sc->tids.ftid_tab, M_CXGBE); 1371 t4_destroy_dma_tag(sc); 1372 if (mtx_initialized(&sc->sc_lock)) { 1373 sx_xlock(&t4_list_lock); 1374 SLIST_REMOVE(&t4_list, sc, adapter, link); 1375 sx_xunlock(&t4_list_lock); 1376 mtx_destroy(&sc->sc_lock); 1377 } 1378 1379 callout_drain(&sc->sfl_callout); 1380 if (mtx_initialized(&sc->tids.ftid_lock)) 1381 mtx_destroy(&sc->tids.ftid_lock); 1382 if (mtx_initialized(&sc->sfl_lock)) 1383 mtx_destroy(&sc->sfl_lock); 1384 if (mtx_initialized(&sc->ifp_lock)) 1385 mtx_destroy(&sc->ifp_lock); 1386 if (mtx_initialized(&sc->reg_lock)) 1387 mtx_destroy(&sc->reg_lock); 1388 1389 for (i = 0; i < NUM_MEMWIN; i++) { 1390 struct memwin *mw = &sc->memwin[i]; 1391 1392 if (rw_initialized(&mw->mw_lock)) 1393 rw_destroy(&mw->mw_lock); 1394 } 1395 1396 bzero(sc, sizeof(*sc)); 1397 1398 return (0); 1399 } 1400 1401 static int 1402 cxgbe_probe(device_t dev) 1403 { 1404 char buf[128]; 1405 struct port_info *pi = device_get_softc(dev); 1406 1407 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1408 device_set_desc_copy(dev, buf); 1409 1410 return (BUS_PROBE_DEFAULT); 1411 } 1412 1413 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1414 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1415 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1416 #define T4_CAP_ENABLE (T4_CAP) 1417 1418 static int 1419 cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1420 { 1421 struct ifnet *ifp; 1422 struct sbuf *sb; 1423 1424 vi->xact_addr_filt = -1; 1425 callout_init(&vi->tick, 1); 1426 1427 /* Allocate an ifnet and set it up */ 1428 ifp = if_alloc(IFT_ETHER); 1429 if (ifp == NULL) { 1430 device_printf(dev, "Cannot allocate ifnet\n"); 1431 return (ENOMEM); 1432 } 1433 vi->ifp = ifp; 1434 ifp->if_softc = vi; 1435 1436 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1437 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1438 1439 ifp->if_init = cxgbe_init; 1440 ifp->if_ioctl = cxgbe_ioctl; 1441 ifp->if_transmit = cxgbe_transmit; 1442 ifp->if_qflush = cxgbe_qflush; 1443 ifp->if_get_counter = cxgbe_get_counter; 1444 1445 ifp->if_capabilities = T4_CAP; 1446 #ifdef TCP_OFFLOAD 1447 if (vi->nofldrxq != 0) 1448 ifp->if_capabilities |= IFCAP_TOE; 1449 #endif 1450 #ifdef DEV_NETMAP 1451 if (vi->nnmrxq != 0) 1452 ifp->if_capabilities |= IFCAP_NETMAP; 1453 #endif 1454 ifp->if_capenable = T4_CAP_ENABLE; 1455 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1456 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1457 1458 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1459 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1460 ifp->if_hw_tsomaxsegsize = 65536; 1461 1462 /* Initialize ifmedia for this VI */ 1463 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1464 cxgbe_media_status); 1465 1466 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1467 EVENTHANDLER_PRI_ANY); 1468 1469 ether_ifattach(ifp, vi->hw_addr); 1470 #ifdef DEV_NETMAP 1471 if (ifp->if_capabilities & IFCAP_NETMAP) 1472 cxgbe_nm_attach(vi); 1473 #endif 1474 sb = sbuf_new_auto(); 1475 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1476 #ifdef TCP_OFFLOAD 1477 if (ifp->if_capabilities & IFCAP_TOE) 1478 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1479 vi->nofldtxq, vi->nofldrxq); 1480 #endif 1481 #ifdef DEV_NETMAP 1482 if (ifp->if_capabilities & IFCAP_NETMAP) 1483 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1484 vi->nnmtxq, vi->nnmrxq); 1485 #endif 1486 sbuf_finish(sb); 1487 device_printf(dev, "%s\n", sbuf_data(sb)); 1488 sbuf_delete(sb); 1489 1490 vi_sysctls(vi); 1491 1492 return (0); 1493 } 1494 1495 static int 1496 cxgbe_attach(device_t dev) 1497 { 1498 struct port_info *pi = device_get_softc(dev); 1499 struct adapter *sc = pi->adapter; 1500 struct vi_info *vi; 1501 int i, rc; 1502 1503 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1504 1505 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1506 if (rc) 1507 return (rc); 1508 1509 for_each_vi(pi, i, vi) { 1510 if (i == 0) 1511 continue; 1512 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); 1513 if (vi->dev == NULL) { 1514 device_printf(dev, "failed to add VI %d\n", i); 1515 continue; 1516 } 1517 device_set_softc(vi->dev, vi); 1518 } 1519 1520 cxgbe_sysctls(pi); 1521 1522 bus_generic_attach(dev); 1523 1524 return (0); 1525 } 1526 1527 static void 1528 cxgbe_vi_detach(struct vi_info *vi) 1529 { 1530 struct ifnet *ifp = vi->ifp; 1531 1532 ether_ifdetach(ifp); 1533 1534 if (vi->vlan_c) 1535 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1536 1537 /* Let detach proceed even if these fail. */ 1538 #ifdef DEV_NETMAP 1539 if (ifp->if_capabilities & IFCAP_NETMAP) 1540 cxgbe_nm_detach(vi); 1541 #endif 1542 cxgbe_uninit_synchronized(vi); 1543 callout_drain(&vi->tick); 1544 vi_full_uninit(vi); 1545 1546 ifmedia_removeall(&vi->media); 1547 if_free(vi->ifp); 1548 vi->ifp = NULL; 1549 } 1550 1551 static int 1552 cxgbe_detach(device_t dev) 1553 { 1554 struct port_info *pi = device_get_softc(dev); 1555 struct adapter *sc = pi->adapter; 1556 int rc; 1557 1558 /* Detach the extra VIs first. */ 1559 rc = bus_generic_detach(dev); 1560 if (rc) 1561 return (rc); 1562 device_delete_children(dev); 1563 1564 doom_vi(sc, &pi->vi[0]); 1565 1566 if (pi->flags & HAS_TRACEQ) { 1567 sc->traceq = -1; /* cloner should not create ifnet */ 1568 t4_tracer_port_detach(sc); 1569 } 1570 1571 cxgbe_vi_detach(&pi->vi[0]); 1572 callout_drain(&pi->tick); 1573 1574 end_synchronized_op(sc, 0); 1575 1576 return (0); 1577 } 1578 1579 static void 1580 cxgbe_init(void *arg) 1581 { 1582 struct vi_info *vi = arg; 1583 struct adapter *sc = vi->pi->adapter; 1584 1585 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1586 return; 1587 cxgbe_init_synchronized(vi); 1588 end_synchronized_op(sc, 0); 1589 } 1590 1591 static int 1592 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1593 { 1594 int rc = 0, mtu, flags, can_sleep; 1595 struct vi_info *vi = ifp->if_softc; 1596 struct adapter *sc = vi->pi->adapter; 1597 struct ifreq *ifr = (struct ifreq *)data; 1598 uint32_t mask; 1599 1600 switch (cmd) { 1601 case SIOCSIFMTU: 1602 mtu = ifr->ifr_mtu; 1603 if (mtu < ETHERMIN || mtu > MAX_MTU) 1604 return (EINVAL); 1605 1606 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1607 if (rc) 1608 return (rc); 1609 ifp->if_mtu = mtu; 1610 if (vi->flags & VI_INIT_DONE) { 1611 t4_update_fl_bufsize(ifp); 1612 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1613 rc = update_mac_settings(ifp, XGMAC_MTU); 1614 } 1615 end_synchronized_op(sc, 0); 1616 break; 1617 1618 case SIOCSIFFLAGS: 1619 can_sleep = 0; 1620 redo_sifflags: 1621 rc = begin_synchronized_op(sc, vi, 1622 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1623 if (rc) 1624 return (rc); 1625 1626 if (ifp->if_flags & IFF_UP) { 1627 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1628 flags = vi->if_flags; 1629 if ((ifp->if_flags ^ flags) & 1630 (IFF_PROMISC | IFF_ALLMULTI)) { 1631 if (can_sleep == 1) { 1632 end_synchronized_op(sc, 0); 1633 can_sleep = 0; 1634 goto redo_sifflags; 1635 } 1636 rc = update_mac_settings(ifp, 1637 XGMAC_PROMISC | XGMAC_ALLMULTI); 1638 } 1639 } else { 1640 if (can_sleep == 0) { 1641 end_synchronized_op(sc, LOCK_HELD); 1642 can_sleep = 1; 1643 goto redo_sifflags; 1644 } 1645 rc = cxgbe_init_synchronized(vi); 1646 } 1647 vi->if_flags = ifp->if_flags; 1648 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1649 if (can_sleep == 0) { 1650 end_synchronized_op(sc, LOCK_HELD); 1651 can_sleep = 1; 1652 goto redo_sifflags; 1653 } 1654 rc = cxgbe_uninit_synchronized(vi); 1655 } 1656 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1657 break; 1658 1659 case SIOCADDMULTI: 1660 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1661 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1662 if (rc) 1663 return (rc); 1664 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1665 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1666 end_synchronized_op(sc, LOCK_HELD); 1667 break; 1668 1669 case SIOCSIFCAP: 1670 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1671 if (rc) 1672 return (rc); 1673 1674 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1675 if (mask & IFCAP_TXCSUM) { 1676 ifp->if_capenable ^= IFCAP_TXCSUM; 1677 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1678 1679 if (IFCAP_TSO4 & ifp->if_capenable && 1680 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1681 ifp->if_capenable &= ~IFCAP_TSO4; 1682 if_printf(ifp, 1683 "tso4 disabled due to -txcsum.\n"); 1684 } 1685 } 1686 if (mask & IFCAP_TXCSUM_IPV6) { 1687 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1688 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1689 1690 if (IFCAP_TSO6 & ifp->if_capenable && 1691 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1692 ifp->if_capenable &= ~IFCAP_TSO6; 1693 if_printf(ifp, 1694 "tso6 disabled due to -txcsum6.\n"); 1695 } 1696 } 1697 if (mask & IFCAP_RXCSUM) 1698 ifp->if_capenable ^= IFCAP_RXCSUM; 1699 if (mask & IFCAP_RXCSUM_IPV6) 1700 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1701 1702 /* 1703 * Note that we leave CSUM_TSO alone (it is always set). The 1704 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1705 * sending a TSO request our way, so it's sufficient to toggle 1706 * IFCAP_TSOx only. 1707 */ 1708 if (mask & IFCAP_TSO4) { 1709 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1710 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1711 if_printf(ifp, "enable txcsum first.\n"); 1712 rc = EAGAIN; 1713 goto fail; 1714 } 1715 ifp->if_capenable ^= IFCAP_TSO4; 1716 } 1717 if (mask & IFCAP_TSO6) { 1718 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1719 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1720 if_printf(ifp, "enable txcsum6 first.\n"); 1721 rc = EAGAIN; 1722 goto fail; 1723 } 1724 ifp->if_capenable ^= IFCAP_TSO6; 1725 } 1726 if (mask & IFCAP_LRO) { 1727 #if defined(INET) || defined(INET6) 1728 int i; 1729 struct sge_rxq *rxq; 1730 1731 ifp->if_capenable ^= IFCAP_LRO; 1732 for_each_rxq(vi, i, rxq) { 1733 if (ifp->if_capenable & IFCAP_LRO) 1734 rxq->iq.flags |= IQ_LRO_ENABLED; 1735 else 1736 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1737 } 1738 #endif 1739 } 1740 #ifdef TCP_OFFLOAD 1741 if (mask & IFCAP_TOE) { 1742 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1743 1744 rc = toe_capability(vi, enable); 1745 if (rc != 0) 1746 goto fail; 1747 1748 ifp->if_capenable ^= mask; 1749 } 1750 #endif 1751 if (mask & IFCAP_VLAN_HWTAGGING) { 1752 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1753 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1754 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1755 } 1756 if (mask & IFCAP_VLAN_MTU) { 1757 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1758 1759 /* Need to find out how to disable auto-mtu-inflation */ 1760 } 1761 if (mask & IFCAP_VLAN_HWTSO) 1762 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1763 if (mask & IFCAP_VLAN_HWCSUM) 1764 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1765 1766 #ifdef VLAN_CAPABILITIES 1767 VLAN_CAPABILITIES(ifp); 1768 #endif 1769 fail: 1770 end_synchronized_op(sc, 0); 1771 break; 1772 1773 case SIOCSIFMEDIA: 1774 case SIOCGIFMEDIA: 1775 case SIOCGIFXMEDIA: 1776 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1777 break; 1778 1779 case SIOCGI2C: { 1780 struct ifi2creq i2c; 1781 1782 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1783 if (rc != 0) 1784 break; 1785 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1786 rc = EPERM; 1787 break; 1788 } 1789 if (i2c.len > sizeof(i2c.data)) { 1790 rc = EINVAL; 1791 break; 1792 } 1793 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1794 if (rc) 1795 return (rc); 1796 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1797 i2c.offset, i2c.len, &i2c.data[0]); 1798 end_synchronized_op(sc, 0); 1799 if (rc == 0) 1800 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1801 break; 1802 } 1803 1804 default: 1805 rc = ether_ioctl(ifp, cmd, data); 1806 } 1807 1808 return (rc); 1809 } 1810 1811 static int 1812 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1813 { 1814 struct vi_info *vi = ifp->if_softc; 1815 struct port_info *pi = vi->pi; 1816 struct adapter *sc = pi->adapter; 1817 struct sge_txq *txq; 1818 void *items[1]; 1819 int rc; 1820 1821 M_ASSERTPKTHDR(m); 1822 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1823 1824 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1825 m_freem(m); 1826 return (ENETDOWN); 1827 } 1828 1829 rc = parse_pkt(sc, &m); 1830 if (__predict_false(rc != 0)) { 1831 MPASS(m == NULL); /* was freed already */ 1832 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1833 return (rc); 1834 } 1835 1836 /* Select a txq. */ 1837 txq = &sc->sge.txq[vi->first_txq]; 1838 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1839 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1840 vi->rsrv_noflowq); 1841 1842 items[0] = m; 1843 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1844 if (__predict_false(rc != 0)) 1845 m_freem(m); 1846 1847 return (rc); 1848 } 1849 1850 static void 1851 cxgbe_qflush(struct ifnet *ifp) 1852 { 1853 struct vi_info *vi = ifp->if_softc; 1854 struct sge_txq *txq; 1855 int i; 1856 1857 /* queues do not exist if !VI_INIT_DONE. */ 1858 if (vi->flags & VI_INIT_DONE) { 1859 for_each_txq(vi, i, txq) { 1860 TXQ_LOCK(txq); 1861 txq->eq.flags |= EQ_QFLUSH; 1862 TXQ_UNLOCK(txq); 1863 while (!mp_ring_is_idle(txq->r)) { 1864 mp_ring_check_drainage(txq->r, 0); 1865 pause("qflush", 1); 1866 } 1867 TXQ_LOCK(txq); 1868 txq->eq.flags &= ~EQ_QFLUSH; 1869 TXQ_UNLOCK(txq); 1870 } 1871 } 1872 if_qflush(ifp); 1873 } 1874 1875 static uint64_t 1876 vi_get_counter(struct ifnet *ifp, ift_counter c) 1877 { 1878 struct vi_info *vi = ifp->if_softc; 1879 struct fw_vi_stats_vf *s = &vi->stats; 1880 1881 vi_refresh_stats(vi->pi->adapter, vi); 1882 1883 switch (c) { 1884 case IFCOUNTER_IPACKETS: 1885 return (s->rx_bcast_frames + s->rx_mcast_frames + 1886 s->rx_ucast_frames); 1887 case IFCOUNTER_IERRORS: 1888 return (s->rx_err_frames); 1889 case IFCOUNTER_OPACKETS: 1890 return (s->tx_bcast_frames + s->tx_mcast_frames + 1891 s->tx_ucast_frames + s->tx_offload_frames); 1892 case IFCOUNTER_OERRORS: 1893 return (s->tx_drop_frames); 1894 case IFCOUNTER_IBYTES: 1895 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 1896 s->rx_ucast_bytes); 1897 case IFCOUNTER_OBYTES: 1898 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 1899 s->tx_ucast_bytes + s->tx_offload_bytes); 1900 case IFCOUNTER_IMCASTS: 1901 return (s->rx_mcast_frames); 1902 case IFCOUNTER_OMCASTS: 1903 return (s->tx_mcast_frames); 1904 case IFCOUNTER_OQDROPS: { 1905 uint64_t drops; 1906 1907 drops = 0; 1908 if (vi->flags & VI_INIT_DONE) { 1909 int i; 1910 struct sge_txq *txq; 1911 1912 for_each_txq(vi, i, txq) 1913 drops += counter_u64_fetch(txq->r->drops); 1914 } 1915 1916 return (drops); 1917 1918 } 1919 1920 default: 1921 return (if_get_counter_default(ifp, c)); 1922 } 1923 } 1924 1925 uint64_t 1926 cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1927 { 1928 struct vi_info *vi = ifp->if_softc; 1929 struct port_info *pi = vi->pi; 1930 struct adapter *sc = pi->adapter; 1931 struct port_stats *s = &pi->stats; 1932 1933 if (pi->nvi > 1 || sc->flags & IS_VF) 1934 return (vi_get_counter(ifp, c)); 1935 1936 cxgbe_refresh_stats(sc, pi); 1937 1938 switch (c) { 1939 case IFCOUNTER_IPACKETS: 1940 return (s->rx_frames); 1941 1942 case IFCOUNTER_IERRORS: 1943 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1944 s->rx_fcs_err + s->rx_len_err); 1945 1946 case IFCOUNTER_OPACKETS: 1947 return (s->tx_frames); 1948 1949 case IFCOUNTER_OERRORS: 1950 return (s->tx_error_frames); 1951 1952 case IFCOUNTER_IBYTES: 1953 return (s->rx_octets); 1954 1955 case IFCOUNTER_OBYTES: 1956 return (s->tx_octets); 1957 1958 case IFCOUNTER_IMCASTS: 1959 return (s->rx_mcast_frames); 1960 1961 case IFCOUNTER_OMCASTS: 1962 return (s->tx_mcast_frames); 1963 1964 case IFCOUNTER_IQDROPS: 1965 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1966 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1967 s->rx_trunc3 + pi->tnl_cong_drops); 1968 1969 case IFCOUNTER_OQDROPS: { 1970 uint64_t drops; 1971 1972 drops = s->tx_drop; 1973 if (vi->flags & VI_INIT_DONE) { 1974 int i; 1975 struct sge_txq *txq; 1976 1977 for_each_txq(vi, i, txq) 1978 drops += counter_u64_fetch(txq->r->drops); 1979 } 1980 1981 return (drops); 1982 1983 } 1984 1985 default: 1986 return (if_get_counter_default(ifp, c)); 1987 } 1988 } 1989 1990 static int 1991 cxgbe_media_change(struct ifnet *ifp) 1992 { 1993 struct vi_info *vi = ifp->if_softc; 1994 1995 device_printf(vi->dev, "%s unimplemented.\n", __func__); 1996 1997 return (EOPNOTSUPP); 1998 } 1999 2000 static void 2001 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2002 { 2003 struct vi_info *vi = ifp->if_softc; 2004 struct port_info *pi = vi->pi; 2005 struct ifmedia_entry *cur; 2006 struct link_config *lc = &pi->link_cfg; 2007 2008 /* 2009 * If all the interfaces are administratively down the firmware does not 2010 * report transceiver changes. Refresh port info here so that ifconfig 2011 * displays accurate information at all times. 2012 */ 2013 if (begin_synchronized_op(pi->adapter, NULL, SLEEP_OK | INTR_OK, 2014 "t4med") == 0) { 2015 PORT_LOCK(pi); 2016 if (pi->up_vis == 0) { 2017 t4_update_port_info(pi); 2018 build_medialist(pi, &vi->media); 2019 } 2020 PORT_UNLOCK(pi); 2021 end_synchronized_op(pi->adapter, 0); 2022 } 2023 2024 cur = vi->media.ifm_cur; 2025 2026 ifmr->ifm_status = IFM_AVALID; 2027 if (lc->link_ok == 0) 2028 return; 2029 2030 ifmr->ifm_status |= IFM_ACTIVE; 2031 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); 2032 if (lc->fc & PAUSE_RX) 2033 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2034 if (lc->fc & PAUSE_TX) 2035 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2036 2037 /* active and current will differ iff current media is autoselect. */ 2038 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 2039 return; 2040 2041 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 2042 if (lc->fc & PAUSE_RX) 2043 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2044 if (lc->fc & PAUSE_TX) 2045 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2046 switch (lc->speed) { 2047 case 10000: 2048 ifmr->ifm_active |= IFM_10G_T; 2049 break; 2050 case 1000: 2051 ifmr->ifm_active |= IFM_1000_T; 2052 break; 2053 case 100: 2054 ifmr->ifm_active |= IFM_100_TX; 2055 break; 2056 case 10: 2057 ifmr->ifm_active |= IFM_10_T; 2058 break; 2059 default: 2060 device_printf(vi->dev, "link up but speed unknown (%u)\n", 2061 lc->speed); 2062 } 2063 } 2064 2065 static int 2066 vcxgbe_probe(device_t dev) 2067 { 2068 char buf[128]; 2069 struct vi_info *vi = device_get_softc(dev); 2070 2071 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 2072 vi - vi->pi->vi); 2073 device_set_desc_copy(dev, buf); 2074 2075 return (BUS_PROBE_DEFAULT); 2076 } 2077 2078 static int 2079 vcxgbe_attach(device_t dev) 2080 { 2081 struct vi_info *vi; 2082 struct port_info *pi; 2083 struct adapter *sc; 2084 int func, index, rc; 2085 u32 param, val; 2086 2087 vi = device_get_softc(dev); 2088 pi = vi->pi; 2089 sc = pi->adapter; 2090 2091 index = vi - pi->vi; 2092 KASSERT(index < nitems(vi_mac_funcs), 2093 ("%s: VI %s doesn't have a MAC func", __func__, 2094 device_get_nameunit(dev))); 2095 func = vi_mac_funcs[index]; 2096 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 2097 vi->hw_addr, &vi->rss_size, func, 0); 2098 if (rc < 0) { 2099 device_printf(dev, "Failed to allocate virtual interface " 2100 "for port %d: %d\n", pi->port_id, -rc); 2101 return (-rc); 2102 } 2103 vi->viid = rc; 2104 if (chip_id(sc) <= CHELSIO_T5) 2105 vi->smt_idx = (rc & 0x7f) << 1; 2106 else 2107 vi->smt_idx = (rc & 0x7f); 2108 2109 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 2110 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 2111 V_FW_PARAMS_PARAM_YZ(vi->viid); 2112 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2113 if (rc) 2114 vi->rss_base = 0xffff; 2115 else { 2116 /* MPASS((val >> 16) == rss_size); */ 2117 vi->rss_base = val & 0xffff; 2118 } 2119 2120 rc = cxgbe_vi_attach(dev, vi); 2121 if (rc) { 2122 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2123 return (rc); 2124 } 2125 return (0); 2126 } 2127 2128 static int 2129 vcxgbe_detach(device_t dev) 2130 { 2131 struct vi_info *vi; 2132 struct adapter *sc; 2133 2134 vi = device_get_softc(dev); 2135 sc = vi->pi->adapter; 2136 2137 doom_vi(sc, vi); 2138 2139 cxgbe_vi_detach(vi); 2140 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2141 2142 end_synchronized_op(sc, 0); 2143 2144 return (0); 2145 } 2146 2147 void 2148 t4_fatal_err(struct adapter *sc) 2149 { 2150 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 2151 t4_intr_disable(sc); 2152 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 2153 device_get_nameunit(sc->dev)); 2154 } 2155 2156 void 2157 t4_add_adapter(struct adapter *sc) 2158 { 2159 sx_xlock(&t4_list_lock); 2160 SLIST_INSERT_HEAD(&t4_list, sc, link); 2161 sx_xunlock(&t4_list_lock); 2162 } 2163 2164 int 2165 t4_map_bars_0_and_4(struct adapter *sc) 2166 { 2167 sc->regs_rid = PCIR_BAR(0); 2168 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2169 &sc->regs_rid, RF_ACTIVE); 2170 if (sc->regs_res == NULL) { 2171 device_printf(sc->dev, "cannot map registers.\n"); 2172 return (ENXIO); 2173 } 2174 sc->bt = rman_get_bustag(sc->regs_res); 2175 sc->bh = rman_get_bushandle(sc->regs_res); 2176 sc->mmio_len = rman_get_size(sc->regs_res); 2177 setbit(&sc->doorbells, DOORBELL_KDB); 2178 2179 sc->msix_rid = PCIR_BAR(4); 2180 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2181 &sc->msix_rid, RF_ACTIVE); 2182 if (sc->msix_res == NULL) { 2183 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 2184 return (ENXIO); 2185 } 2186 2187 return (0); 2188 } 2189 2190 int 2191 t4_map_bar_2(struct adapter *sc) 2192 { 2193 2194 /* 2195 * T4: only iWARP driver uses the userspace doorbells. There is no need 2196 * to map it if RDMA is disabled. 2197 */ 2198 if (is_t4(sc) && sc->rdmacaps == 0) 2199 return (0); 2200 2201 sc->udbs_rid = PCIR_BAR(2); 2202 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2203 &sc->udbs_rid, RF_ACTIVE); 2204 if (sc->udbs_res == NULL) { 2205 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 2206 return (ENXIO); 2207 } 2208 sc->udbs_base = rman_get_virtual(sc->udbs_res); 2209 2210 if (chip_id(sc) >= CHELSIO_T5) { 2211 setbit(&sc->doorbells, DOORBELL_UDB); 2212 #if defined(__i386__) || defined(__amd64__) 2213 if (t5_write_combine) { 2214 int rc, mode; 2215 2216 /* 2217 * Enable write combining on BAR2. This is the 2218 * userspace doorbell BAR and is split into 128B 2219 * (UDBS_SEG_SIZE) doorbell regions, each associated 2220 * with an egress queue. The first 64B has the doorbell 2221 * and the second 64B can be used to submit a tx work 2222 * request with an implicit doorbell. 2223 */ 2224 2225 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 2226 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 2227 if (rc == 0) { 2228 clrbit(&sc->doorbells, DOORBELL_UDB); 2229 setbit(&sc->doorbells, DOORBELL_WCWR); 2230 setbit(&sc->doorbells, DOORBELL_UDBWC); 2231 } else { 2232 device_printf(sc->dev, 2233 "couldn't enable write combining: %d\n", 2234 rc); 2235 } 2236 2237 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); 2238 t4_write_reg(sc, A_SGE_STAT_CFG, 2239 V_STATSOURCE_T5(7) | mode); 2240 } 2241 #endif 2242 } 2243 2244 return (0); 2245 } 2246 2247 struct memwin_init { 2248 uint32_t base; 2249 uint32_t aperture; 2250 }; 2251 2252 static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2253 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2254 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2255 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2256 }; 2257 2258 static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2259 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2260 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2261 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2262 }; 2263 2264 static void 2265 setup_memwin(struct adapter *sc) 2266 { 2267 const struct memwin_init *mw_init; 2268 struct memwin *mw; 2269 int i; 2270 uint32_t bar0; 2271 2272 if (is_t4(sc)) { 2273 /* 2274 * Read low 32b of bar0 indirectly via the hardware backdoor 2275 * mechanism. Works from within PCI passthrough environments 2276 * too, where rman_get_start() can return a different value. We 2277 * need to program the T4 memory window decoders with the actual 2278 * addresses that will be coming across the PCIe link. 2279 */ 2280 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2281 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2282 2283 mw_init = &t4_memwin[0]; 2284 } else { 2285 /* T5+ use the relative offset inside the PCIe BAR */ 2286 bar0 = 0; 2287 2288 mw_init = &t5_memwin[0]; 2289 } 2290 2291 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2292 rw_init(&mw->mw_lock, "memory window access"); 2293 mw->mw_base = mw_init->base; 2294 mw->mw_aperture = mw_init->aperture; 2295 mw->mw_curpos = 0; 2296 t4_write_reg(sc, 2297 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2298 (mw->mw_base + bar0) | V_BIR(0) | 2299 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2300 rw_wlock(&mw->mw_lock); 2301 position_memwin(sc, i, 0); 2302 rw_wunlock(&mw->mw_lock); 2303 } 2304 2305 /* flush */ 2306 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2307 } 2308 2309 /* 2310 * Positions the memory window at the given address in the card's address space. 2311 * There are some alignment requirements and the actual position may be at an 2312 * address prior to the requested address. mw->mw_curpos always has the actual 2313 * position of the window. 2314 */ 2315 static void 2316 position_memwin(struct adapter *sc, int idx, uint32_t addr) 2317 { 2318 struct memwin *mw; 2319 uint32_t pf; 2320 uint32_t reg; 2321 2322 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2323 mw = &sc->memwin[idx]; 2324 rw_assert(&mw->mw_lock, RA_WLOCKED); 2325 2326 if (is_t4(sc)) { 2327 pf = 0; 2328 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2329 } else { 2330 pf = V_PFNUM(sc->pf); 2331 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2332 } 2333 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2334 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2335 t4_read_reg(sc, reg); /* flush */ 2336 } 2337 2338 static int 2339 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2340 int len, int rw) 2341 { 2342 struct memwin *mw; 2343 uint32_t mw_end, v; 2344 2345 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2346 2347 /* Memory can only be accessed in naturally aligned 4 byte units */ 2348 if (addr & 3 || len & 3 || len <= 0) 2349 return (EINVAL); 2350 2351 mw = &sc->memwin[idx]; 2352 while (len > 0) { 2353 rw_rlock(&mw->mw_lock); 2354 mw_end = mw->mw_curpos + mw->mw_aperture; 2355 if (addr >= mw_end || addr < mw->mw_curpos) { 2356 /* Will need to reposition the window */ 2357 if (!rw_try_upgrade(&mw->mw_lock)) { 2358 rw_runlock(&mw->mw_lock); 2359 rw_wlock(&mw->mw_lock); 2360 } 2361 rw_assert(&mw->mw_lock, RA_WLOCKED); 2362 position_memwin(sc, idx, addr); 2363 rw_downgrade(&mw->mw_lock); 2364 mw_end = mw->mw_curpos + mw->mw_aperture; 2365 } 2366 rw_assert(&mw->mw_lock, RA_RLOCKED); 2367 while (addr < mw_end && len > 0) { 2368 if (rw == 0) { 2369 v = t4_read_reg(sc, mw->mw_base + addr - 2370 mw->mw_curpos); 2371 *val++ = le32toh(v); 2372 } else { 2373 v = *val++; 2374 t4_write_reg(sc, mw->mw_base + addr - 2375 mw->mw_curpos, htole32(v)); 2376 } 2377 addr += 4; 2378 len -= 4; 2379 } 2380 rw_runlock(&mw->mw_lock); 2381 } 2382 2383 return (0); 2384 } 2385 2386 static inline int 2387 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2388 int len) 2389 { 2390 2391 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2392 } 2393 2394 static inline int 2395 write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2396 const uint32_t *val, int len) 2397 { 2398 2399 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2400 } 2401 2402 static int 2403 t4_range_cmp(const void *a, const void *b) 2404 { 2405 return ((const struct t4_range *)a)->start - 2406 ((const struct t4_range *)b)->start; 2407 } 2408 2409 /* 2410 * Verify that the memory range specified by the addr/len pair is valid within 2411 * the card's address space. 2412 */ 2413 static int 2414 validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2415 { 2416 struct t4_range mem_ranges[4], *r, *next; 2417 uint32_t em, addr_len; 2418 int i, n, remaining; 2419 2420 /* Memory can only be accessed in naturally aligned 4 byte units */ 2421 if (addr & 3 || len & 3 || len <= 0) 2422 return (EINVAL); 2423 2424 /* Enabled memories */ 2425 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2426 2427 r = &mem_ranges[0]; 2428 n = 0; 2429 bzero(r, sizeof(mem_ranges)); 2430 if (em & F_EDRAM0_ENABLE) { 2431 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2432 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2433 if (r->size > 0) { 2434 r->start = G_EDRAM0_BASE(addr_len) << 20; 2435 if (addr >= r->start && 2436 addr + len <= r->start + r->size) 2437 return (0); 2438 r++; 2439 n++; 2440 } 2441 } 2442 if (em & F_EDRAM1_ENABLE) { 2443 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2444 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2445 if (r->size > 0) { 2446 r->start = G_EDRAM1_BASE(addr_len) << 20; 2447 if (addr >= r->start && 2448 addr + len <= r->start + r->size) 2449 return (0); 2450 r++; 2451 n++; 2452 } 2453 } 2454 if (em & F_EXT_MEM_ENABLE) { 2455 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2456 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2457 if (r->size > 0) { 2458 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2459 if (addr >= r->start && 2460 addr + len <= r->start + r->size) 2461 return (0); 2462 r++; 2463 n++; 2464 } 2465 } 2466 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2467 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2468 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2469 if (r->size > 0) { 2470 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2471 if (addr >= r->start && 2472 addr + len <= r->start + r->size) 2473 return (0); 2474 r++; 2475 n++; 2476 } 2477 } 2478 MPASS(n <= nitems(mem_ranges)); 2479 2480 if (n > 1) { 2481 /* Sort and merge the ranges. */ 2482 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2483 2484 /* Start from index 0 and examine the next n - 1 entries. */ 2485 r = &mem_ranges[0]; 2486 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2487 2488 MPASS(r->size > 0); /* r is a valid entry. */ 2489 next = r + 1; 2490 MPASS(next->size > 0); /* and so is the next one. */ 2491 2492 while (r->start + r->size >= next->start) { 2493 /* Merge the next one into the current entry. */ 2494 r->size = max(r->start + r->size, 2495 next->start + next->size) - r->start; 2496 n--; /* One fewer entry in total. */ 2497 if (--remaining == 0) 2498 goto done; /* short circuit */ 2499 next++; 2500 } 2501 if (next != r + 1) { 2502 /* 2503 * Some entries were merged into r and next 2504 * points to the first valid entry that couldn't 2505 * be merged. 2506 */ 2507 MPASS(next->size > 0); /* must be valid */ 2508 memcpy(r + 1, next, remaining * sizeof(*r)); 2509 #ifdef INVARIANTS 2510 /* 2511 * This so that the foo->size assertion in the 2512 * next iteration of the loop do the right 2513 * thing for entries that were pulled up and are 2514 * no longer valid. 2515 */ 2516 MPASS(n < nitems(mem_ranges)); 2517 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2518 sizeof(struct t4_range)); 2519 #endif 2520 } 2521 } 2522 done: 2523 /* Done merging the ranges. */ 2524 MPASS(n > 0); 2525 r = &mem_ranges[0]; 2526 for (i = 0; i < n; i++, r++) { 2527 if (addr >= r->start && 2528 addr + len <= r->start + r->size) 2529 return (0); 2530 } 2531 } 2532 2533 return (EFAULT); 2534 } 2535 2536 static int 2537 fwmtype_to_hwmtype(int mtype) 2538 { 2539 2540 switch (mtype) { 2541 case FW_MEMTYPE_EDC0: 2542 return (MEM_EDC0); 2543 case FW_MEMTYPE_EDC1: 2544 return (MEM_EDC1); 2545 case FW_MEMTYPE_EXTMEM: 2546 return (MEM_MC0); 2547 case FW_MEMTYPE_EXTMEM1: 2548 return (MEM_MC1); 2549 default: 2550 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2551 } 2552 } 2553 2554 /* 2555 * Verify that the memory range specified by the memtype/offset/len pair is 2556 * valid and lies entirely within the memtype specified. The global address of 2557 * the start of the range is returned in addr. 2558 */ 2559 static int 2560 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2561 uint32_t *addr) 2562 { 2563 uint32_t em, addr_len, maddr; 2564 2565 /* Memory can only be accessed in naturally aligned 4 byte units */ 2566 if (off & 3 || len & 3 || len == 0) 2567 return (EINVAL); 2568 2569 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2570 switch (fwmtype_to_hwmtype(mtype)) { 2571 case MEM_EDC0: 2572 if (!(em & F_EDRAM0_ENABLE)) 2573 return (EINVAL); 2574 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2575 maddr = G_EDRAM0_BASE(addr_len) << 20; 2576 break; 2577 case MEM_EDC1: 2578 if (!(em & F_EDRAM1_ENABLE)) 2579 return (EINVAL); 2580 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2581 maddr = G_EDRAM1_BASE(addr_len) << 20; 2582 break; 2583 case MEM_MC: 2584 if (!(em & F_EXT_MEM_ENABLE)) 2585 return (EINVAL); 2586 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2587 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2588 break; 2589 case MEM_MC1: 2590 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2591 return (EINVAL); 2592 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2593 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2594 break; 2595 default: 2596 return (EINVAL); 2597 } 2598 2599 *addr = maddr + off; /* global address */ 2600 return (validate_mem_range(sc, *addr, len)); 2601 } 2602 2603 static int 2604 fixup_devlog_params(struct adapter *sc) 2605 { 2606 struct devlog_params *dparams = &sc->params.devlog; 2607 int rc; 2608 2609 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2610 dparams->size, &dparams->addr); 2611 2612 return (rc); 2613 } 2614 2615 static int 2616 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2617 struct intrs_and_queues *iaq) 2618 { 2619 int rc, itype, navail, nrxq10g, nrxq1g, n; 2620 int nofldrxq10g = 0, nofldrxq1g = 0; 2621 2622 bzero(iaq, sizeof(*iaq)); 2623 2624 iaq->ntxq10g = t4_ntxq10g; 2625 iaq->ntxq1g = t4_ntxq1g; 2626 iaq->ntxq_vi = t4_ntxq_vi; 2627 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2628 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2629 iaq->nrxq_vi = t4_nrxq_vi; 2630 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2631 #ifdef TCP_OFFLOAD 2632 if (is_offload(sc)) { 2633 iaq->nofldtxq10g = t4_nofldtxq10g; 2634 iaq->nofldtxq1g = t4_nofldtxq1g; 2635 iaq->nofldtxq_vi = t4_nofldtxq_vi; 2636 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2637 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2638 iaq->nofldrxq_vi = t4_nofldrxq_vi; 2639 } 2640 #endif 2641 #ifdef DEV_NETMAP 2642 iaq->nnmtxq_vi = t4_nnmtxq_vi; 2643 iaq->nnmrxq_vi = t4_nnmrxq_vi; 2644 #endif 2645 2646 for (itype = INTR_MSIX; itype; itype >>= 1) { 2647 2648 if ((itype & t4_intr_types) == 0) 2649 continue; /* not allowed */ 2650 2651 if (itype == INTR_MSIX) 2652 navail = pci_msix_count(sc->dev); 2653 else if (itype == INTR_MSI) 2654 navail = pci_msi_count(sc->dev); 2655 else 2656 navail = 1; 2657 restart: 2658 if (navail == 0) 2659 continue; 2660 2661 iaq->intr_type = itype; 2662 iaq->intr_flags_10g = 0; 2663 iaq->intr_flags_1g = 0; 2664 2665 /* 2666 * Best option: an interrupt vector for errors, one for the 2667 * firmware event queue, and one for every rxq (NIC and TOE) of 2668 * every VI. The VIs that support netmap use the same 2669 * interrupts for the NIC rx queues and the netmap rx queues 2670 * because only one set of queues is active at a time. 2671 */ 2672 iaq->nirq = T4_EXTRA_INTR; 2673 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2674 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2675 iaq->nirq += (n10g + n1g) * (num_vis - 1) * 2676 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */ 2677 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi; 2678 if (iaq->nirq <= navail && 2679 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2680 iaq->intr_flags_10g = INTR_ALL; 2681 iaq->intr_flags_1g = INTR_ALL; 2682 goto allocate; 2683 } 2684 2685 /* Disable the VIs (and netmap) if there aren't enough intrs */ 2686 if (num_vis > 1) { 2687 device_printf(sc->dev, "virtual interfaces disabled " 2688 "because num_vis=%u with current settings " 2689 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, " 2690 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, " 2691 "nnmrxq_vi=%u) would need %u interrupts but " 2692 "only %u are available.\n", num_vis, nrxq10g, 2693 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi, 2694 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq, 2695 navail); 2696 num_vis = 1; 2697 iaq->ntxq_vi = iaq->nrxq_vi = 0; 2698 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 2699 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 2700 goto restart; 2701 } 2702 2703 /* 2704 * Second best option: a vector for errors, one for the firmware 2705 * event queue, and vectors for either all the NIC rx queues or 2706 * all the TOE rx queues. The queues that don't get vectors 2707 * will forward their interrupts to those that do. 2708 */ 2709 iaq->nirq = T4_EXTRA_INTR; 2710 if (nrxq10g >= nofldrxq10g) { 2711 iaq->intr_flags_10g = INTR_RXQ; 2712 iaq->nirq += n10g * nrxq10g; 2713 } else { 2714 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2715 iaq->nirq += n10g * nofldrxq10g; 2716 } 2717 if (nrxq1g >= nofldrxq1g) { 2718 iaq->intr_flags_1g = INTR_RXQ; 2719 iaq->nirq += n1g * nrxq1g; 2720 } else { 2721 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2722 iaq->nirq += n1g * nofldrxq1g; 2723 } 2724 if (iaq->nirq <= navail && 2725 (itype != INTR_MSI || powerof2(iaq->nirq))) 2726 goto allocate; 2727 2728 /* 2729 * Next best option: an interrupt vector for errors, one for the 2730 * firmware event queue, and at least one per main-VI. At this 2731 * point we know we'll have to downsize nrxq and/or nofldrxq to 2732 * fit what's available to us. 2733 */ 2734 iaq->nirq = T4_EXTRA_INTR; 2735 iaq->nirq += n10g + n1g; 2736 if (iaq->nirq <= navail) { 2737 int leftover = navail - iaq->nirq; 2738 2739 if (n10g > 0) { 2740 int target = max(nrxq10g, nofldrxq10g); 2741 2742 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2743 INTR_RXQ : INTR_OFLD_RXQ; 2744 2745 n = 1; 2746 while (n < target && leftover >= n10g) { 2747 leftover -= n10g; 2748 iaq->nirq += n10g; 2749 n++; 2750 } 2751 iaq->nrxq10g = min(n, nrxq10g); 2752 #ifdef TCP_OFFLOAD 2753 iaq->nofldrxq10g = min(n, nofldrxq10g); 2754 #endif 2755 } 2756 2757 if (n1g > 0) { 2758 int target = max(nrxq1g, nofldrxq1g); 2759 2760 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2761 INTR_RXQ : INTR_OFLD_RXQ; 2762 2763 n = 1; 2764 while (n < target && leftover >= n1g) { 2765 leftover -= n1g; 2766 iaq->nirq += n1g; 2767 n++; 2768 } 2769 iaq->nrxq1g = min(n, nrxq1g); 2770 #ifdef TCP_OFFLOAD 2771 iaq->nofldrxq1g = min(n, nofldrxq1g); 2772 #endif 2773 } 2774 2775 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2776 goto allocate; 2777 } 2778 2779 /* 2780 * Least desirable option: one interrupt vector for everything. 2781 */ 2782 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2783 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2784 #ifdef TCP_OFFLOAD 2785 if (is_offload(sc)) 2786 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2787 #endif 2788 allocate: 2789 navail = iaq->nirq; 2790 rc = 0; 2791 if (itype == INTR_MSIX) 2792 rc = pci_alloc_msix(sc->dev, &navail); 2793 else if (itype == INTR_MSI) 2794 rc = pci_alloc_msi(sc->dev, &navail); 2795 2796 if (rc == 0) { 2797 if (navail == iaq->nirq) 2798 return (0); 2799 2800 /* 2801 * Didn't get the number requested. Use whatever number 2802 * the kernel is willing to allocate (it's in navail). 2803 */ 2804 device_printf(sc->dev, "fewer vectors than requested, " 2805 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2806 itype, iaq->nirq, navail); 2807 pci_release_msi(sc->dev); 2808 goto restart; 2809 } 2810 2811 device_printf(sc->dev, 2812 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2813 itype, rc, iaq->nirq, navail); 2814 } 2815 2816 device_printf(sc->dev, 2817 "failed to find a usable interrupt type. " 2818 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2819 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2820 2821 return (ENXIO); 2822 } 2823 2824 #define FW_VERSION(chip) ( \ 2825 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2826 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2827 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2828 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2829 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2830 2831 struct fw_info { 2832 uint8_t chip; 2833 char *kld_name; 2834 char *fw_mod_name; 2835 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2836 } fw_info[] = { 2837 { 2838 .chip = CHELSIO_T4, 2839 .kld_name = "t4fw_cfg", 2840 .fw_mod_name = "t4fw", 2841 .fw_hdr = { 2842 .chip = FW_HDR_CHIP_T4, 2843 .fw_ver = htobe32_const(FW_VERSION(T4)), 2844 .intfver_nic = FW_INTFVER(T4, NIC), 2845 .intfver_vnic = FW_INTFVER(T4, VNIC), 2846 .intfver_ofld = FW_INTFVER(T4, OFLD), 2847 .intfver_ri = FW_INTFVER(T4, RI), 2848 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2849 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2850 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2851 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2852 }, 2853 }, { 2854 .chip = CHELSIO_T5, 2855 .kld_name = "t5fw_cfg", 2856 .fw_mod_name = "t5fw", 2857 .fw_hdr = { 2858 .chip = FW_HDR_CHIP_T5, 2859 .fw_ver = htobe32_const(FW_VERSION(T5)), 2860 .intfver_nic = FW_INTFVER(T5, NIC), 2861 .intfver_vnic = FW_INTFVER(T5, VNIC), 2862 .intfver_ofld = FW_INTFVER(T5, OFLD), 2863 .intfver_ri = FW_INTFVER(T5, RI), 2864 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2865 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2866 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2867 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2868 }, 2869 }, { 2870 .chip = CHELSIO_T6, 2871 .kld_name = "t6fw_cfg", 2872 .fw_mod_name = "t6fw", 2873 .fw_hdr = { 2874 .chip = FW_HDR_CHIP_T6, 2875 .fw_ver = htobe32_const(FW_VERSION(T6)), 2876 .intfver_nic = FW_INTFVER(T6, NIC), 2877 .intfver_vnic = FW_INTFVER(T6, VNIC), 2878 .intfver_ofld = FW_INTFVER(T6, OFLD), 2879 .intfver_ri = FW_INTFVER(T6, RI), 2880 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), 2881 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 2882 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), 2883 .intfver_fcoe = FW_INTFVER(T6, FCOE), 2884 }, 2885 } 2886 }; 2887 2888 static struct fw_info * 2889 find_fw_info(int chip) 2890 { 2891 int i; 2892 2893 for (i = 0; i < nitems(fw_info); i++) { 2894 if (fw_info[i].chip == chip) 2895 return (&fw_info[i]); 2896 } 2897 return (NULL); 2898 } 2899 2900 /* 2901 * Is the given firmware API compatible with the one the driver was compiled 2902 * with? 2903 */ 2904 static int 2905 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2906 { 2907 2908 /* short circuit if it's the exact same firmware version */ 2909 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2910 return (1); 2911 2912 /* 2913 * XXX: Is this too conservative? Perhaps I should limit this to the 2914 * features that are supported in the driver. 2915 */ 2916 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2917 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2918 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2919 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2920 return (1); 2921 #undef SAME_INTF 2922 2923 return (0); 2924 } 2925 2926 /* 2927 * The firmware in the KLD is usable, but should it be installed? This routine 2928 * explains itself in detail if it indicates the KLD firmware should be 2929 * installed. 2930 */ 2931 static int 2932 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2933 { 2934 const char *reason; 2935 2936 if (!card_fw_usable) { 2937 reason = "incompatible or unusable"; 2938 goto install; 2939 } 2940 2941 if (k > c) { 2942 reason = "older than the version bundled with this driver"; 2943 goto install; 2944 } 2945 2946 if (t4_fw_install == 2 && k != c) { 2947 reason = "different than the version bundled with this driver"; 2948 goto install; 2949 } 2950 2951 return (0); 2952 2953 install: 2954 if (t4_fw_install == 0) { 2955 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2956 "but the driver is prohibited from installing a different " 2957 "firmware on the card.\n", 2958 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2959 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2960 2961 return (0); 2962 } 2963 2964 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2965 "installing firmware %u.%u.%u.%u on card.\n", 2966 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2967 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2968 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2969 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2970 2971 return (1); 2972 } 2973 2974 /* 2975 * Establish contact with the firmware and determine if we are the master driver 2976 * or not, and whether we are responsible for chip initialization. 2977 */ 2978 static int 2979 prep_firmware(struct adapter *sc) 2980 { 2981 const struct firmware *fw = NULL, *default_cfg; 2982 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2983 enum dev_state state; 2984 struct fw_info *fw_info; 2985 struct fw_hdr *card_fw; /* fw on the card */ 2986 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2987 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2988 against */ 2989 2990 /* This is the firmware whose headers the driver was compiled against */ 2991 fw_info = find_fw_info(chip_id(sc)); 2992 if (fw_info == NULL) { 2993 device_printf(sc->dev, 2994 "unable to look up firmware information for chip %d.\n", 2995 chip_id(sc)); 2996 return (EINVAL); 2997 } 2998 drv_fw = &fw_info->fw_hdr; 2999 3000 /* 3001 * The firmware KLD contains many modules. The KLD name is also the 3002 * name of the module that contains the default config file. 3003 */ 3004 default_cfg = firmware_get(fw_info->kld_name); 3005 3006 /* This is the firmware in the KLD */ 3007 fw = firmware_get(fw_info->fw_mod_name); 3008 if (fw != NULL) { 3009 kld_fw = (const void *)fw->data; 3010 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 3011 } else { 3012 kld_fw = NULL; 3013 kld_fw_usable = 0; 3014 } 3015 3016 /* Read the header of the firmware on the card */ 3017 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 3018 rc = -t4_read_flash(sc, FLASH_FW_START, 3019 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 3020 if (rc == 0) { 3021 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 3022 if (card_fw->fw_ver == be32toh(0xffffffff)) { 3023 uint32_t d = be32toh(kld_fw->fw_ver); 3024 3025 if (!kld_fw_usable) { 3026 device_printf(sc->dev, 3027 "no firmware on the card and no usable " 3028 "firmware bundled with the driver.\n"); 3029 rc = EIO; 3030 goto done; 3031 } else if (t4_fw_install == 0) { 3032 device_printf(sc->dev, 3033 "no firmware on the card and the driver " 3034 "is prohibited from installing new " 3035 "firmware.\n"); 3036 rc = EIO; 3037 goto done; 3038 } 3039 3040 device_printf(sc->dev, "no firmware on the card, " 3041 "installing firmware %d.%d.%d.%d\n", 3042 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3043 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); 3044 rc = t4_fw_forceinstall(sc, fw->data, fw->datasize); 3045 if (rc < 0) { 3046 rc = -rc; 3047 device_printf(sc->dev, 3048 "firmware install failed: %d.\n", rc); 3049 goto done; 3050 } 3051 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 3052 card_fw_usable = 1; 3053 need_fw_reset = 0; 3054 } 3055 } else { 3056 device_printf(sc->dev, 3057 "Unable to read card's firmware header: %d\n", rc); 3058 card_fw_usable = 0; 3059 } 3060 3061 /* Contact firmware. */ 3062 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 3063 if (rc < 0 || state == DEV_STATE_ERR) { 3064 rc = -rc; 3065 device_printf(sc->dev, 3066 "failed to connect to the firmware: %d, %d.\n", rc, state); 3067 goto done; 3068 } 3069 pf = rc; 3070 if (pf == sc->mbox) 3071 sc->flags |= MASTER_PF; 3072 else if (state == DEV_STATE_UNINIT) { 3073 /* 3074 * We didn't get to be the master so we definitely won't be 3075 * configuring the chip. It's a bug if someone else hasn't 3076 * configured it already. 3077 */ 3078 device_printf(sc->dev, "couldn't be master(%d), " 3079 "device not already initialized either(%d).\n", rc, state); 3080 rc = EPROTO; 3081 goto done; 3082 } 3083 3084 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 3085 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 3086 /* 3087 * Common case: the firmware on the card is an exact match and 3088 * the KLD is an exact match too, or the KLD is 3089 * absent/incompatible. Note that t4_fw_install = 2 is ignored 3090 * here -- use cxgbetool loadfw if you want to reinstall the 3091 * same firmware as the one on the card. 3092 */ 3093 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 3094 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 3095 be32toh(card_fw->fw_ver))) { 3096 3097 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 3098 if (rc != 0) { 3099 device_printf(sc->dev, 3100 "failed to install firmware: %d\n", rc); 3101 goto done; 3102 } 3103 3104 /* Installed successfully, update the cached header too. */ 3105 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 3106 card_fw_usable = 1; 3107 need_fw_reset = 0; /* already reset as part of load_fw */ 3108 } 3109 3110 if (!card_fw_usable) { 3111 uint32_t d, c, k; 3112 3113 d = ntohl(drv_fw->fw_ver); 3114 c = ntohl(card_fw->fw_ver); 3115 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 3116 3117 device_printf(sc->dev, "Cannot find a usable firmware: " 3118 "fw_install %d, chip state %d, " 3119 "driver compiled with %d.%d.%d.%d, " 3120 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 3121 t4_fw_install, state, 3122 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3123 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 3124 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3125 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 3126 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 3127 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 3128 rc = EINVAL; 3129 goto done; 3130 } 3131 3132 /* Reset device */ 3133 if (need_fw_reset && 3134 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 3135 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 3136 if (rc != ETIMEDOUT && rc != EIO) 3137 t4_fw_bye(sc, sc->mbox); 3138 goto done; 3139 } 3140 sc->flags |= FW_OK; 3141 3142 rc = get_params__pre_init(sc); 3143 if (rc != 0) 3144 goto done; /* error message displayed already */ 3145 3146 /* Partition adapter resources as specified in the config file. */ 3147 if (state == DEV_STATE_UNINIT) { 3148 3149 KASSERT(sc->flags & MASTER_PF, 3150 ("%s: trying to change chip settings when not master.", 3151 __func__)); 3152 3153 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 3154 if (rc != 0) 3155 goto done; /* error message displayed already */ 3156 3157 t4_tweak_chip_settings(sc); 3158 3159 /* get basic stuff going */ 3160 rc = -t4_fw_initialize(sc, sc->mbox); 3161 if (rc != 0) { 3162 device_printf(sc->dev, "fw init failed: %d.\n", rc); 3163 goto done; 3164 } 3165 } else { 3166 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 3167 sc->cfcsum = 0; 3168 } 3169 3170 done: 3171 free(card_fw, M_CXGBE); 3172 if (fw != NULL) 3173 firmware_put(fw, FIRMWARE_UNLOAD); 3174 if (default_cfg != NULL) 3175 firmware_put(default_cfg, FIRMWARE_UNLOAD); 3176 3177 return (rc); 3178 } 3179 3180 #define FW_PARAM_DEV(param) \ 3181 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3182 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3183 #define FW_PARAM_PFVF(param) \ 3184 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3185 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 3186 3187 /* 3188 * Partition chip resources for use between various PFs, VFs, etc. 3189 */ 3190 static int 3191 partition_resources(struct adapter *sc, const struct firmware *default_cfg, 3192 const char *name_prefix) 3193 { 3194 const struct firmware *cfg = NULL; 3195 int rc = 0; 3196 struct fw_caps_config_cmd caps; 3197 uint32_t mtype, moff, finicsum, cfcsum; 3198 3199 /* 3200 * Figure out what configuration file to use. Pick the default config 3201 * file for the card if the user hasn't specified one explicitly. 3202 */ 3203 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 3204 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 3205 /* Card specific overrides go here. */ 3206 if (pci_get_device(sc->dev) == 0x440a) 3207 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 3208 if (is_fpga(sc)) 3209 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 3210 } 3211 3212 /* 3213 * We need to load another module if the profile is anything except 3214 * "default" or "flash". 3215 */ 3216 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 3217 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3218 char s[32]; 3219 3220 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 3221 cfg = firmware_get(s); 3222 if (cfg == NULL) { 3223 if (default_cfg != NULL) { 3224 device_printf(sc->dev, 3225 "unable to load module \"%s\" for " 3226 "configuration profile \"%s\", will use " 3227 "the default config file instead.\n", 3228 s, sc->cfg_file); 3229 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3230 "%s", DEFAULT_CF); 3231 } else { 3232 device_printf(sc->dev, 3233 "unable to load module \"%s\" for " 3234 "configuration profile \"%s\", will use " 3235 "the config file on the card's flash " 3236 "instead.\n", s, sc->cfg_file); 3237 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3238 "%s", FLASH_CF); 3239 } 3240 } 3241 } 3242 3243 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 3244 default_cfg == NULL) { 3245 device_printf(sc->dev, 3246 "default config file not available, will use the config " 3247 "file on the card's flash instead.\n"); 3248 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 3249 } 3250 3251 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3252 u_int cflen; 3253 const uint32_t *cfdata; 3254 uint32_t param, val, addr; 3255 3256 KASSERT(cfg != NULL || default_cfg != NULL, 3257 ("%s: no config to upload", __func__)); 3258 3259 /* 3260 * Ask the firmware where it wants us to upload the config file. 3261 */ 3262 param = FW_PARAM_DEV(CF); 3263 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3264 if (rc != 0) { 3265 /* No support for config file? Shouldn't happen. */ 3266 device_printf(sc->dev, 3267 "failed to query config file location: %d.\n", rc); 3268 goto done; 3269 } 3270 mtype = G_FW_PARAMS_PARAM_Y(val); 3271 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 3272 3273 /* 3274 * XXX: sheer laziness. We deliberately added 4 bytes of 3275 * useless stuffing/comments at the end of the config file so 3276 * it's ok to simply throw away the last remaining bytes when 3277 * the config file is not an exact multiple of 4. This also 3278 * helps with the validate_mt_off_len check. 3279 */ 3280 if (cfg != NULL) { 3281 cflen = cfg->datasize & ~3; 3282 cfdata = cfg->data; 3283 } else { 3284 cflen = default_cfg->datasize & ~3; 3285 cfdata = default_cfg->data; 3286 } 3287 3288 if (cflen > FLASH_CFG_MAX_SIZE) { 3289 device_printf(sc->dev, 3290 "config file too long (%d, max allowed is %d). " 3291 "Will try to use the config on the card, if any.\n", 3292 cflen, FLASH_CFG_MAX_SIZE); 3293 goto use_config_on_flash; 3294 } 3295 3296 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3297 if (rc != 0) { 3298 device_printf(sc->dev, 3299 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 3300 "Will try to use the config on the card, if any.\n", 3301 __func__, mtype, moff, cflen, rc); 3302 goto use_config_on_flash; 3303 } 3304 write_via_memwin(sc, 2, addr, cfdata, cflen); 3305 } else { 3306 use_config_on_flash: 3307 mtype = FW_MEMTYPE_FLASH; 3308 moff = t4_flash_cfg_addr(sc); 3309 } 3310 3311 bzero(&caps, sizeof(caps)); 3312 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3313 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3314 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3315 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3316 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3317 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3318 if (rc != 0) { 3319 device_printf(sc->dev, 3320 "failed to pre-process config file: %d " 3321 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3322 goto done; 3323 } 3324 3325 finicsum = be32toh(caps.finicsum); 3326 cfcsum = be32toh(caps.cfcsum); 3327 if (finicsum != cfcsum) { 3328 device_printf(sc->dev, 3329 "WARNING: config file checksum mismatch: %08x %08x\n", 3330 finicsum, cfcsum); 3331 } 3332 sc->cfcsum = cfcsum; 3333 3334 #define LIMIT_CAPS(x) do { \ 3335 caps.x &= htobe16(t4_##x##_allowed); \ 3336 } while (0) 3337 3338 /* 3339 * Let the firmware know what features will (not) be used so it can tune 3340 * things accordingly. 3341 */ 3342 LIMIT_CAPS(nbmcaps); 3343 LIMIT_CAPS(linkcaps); 3344 LIMIT_CAPS(switchcaps); 3345 LIMIT_CAPS(niccaps); 3346 LIMIT_CAPS(toecaps); 3347 LIMIT_CAPS(rdmacaps); 3348 LIMIT_CAPS(cryptocaps); 3349 LIMIT_CAPS(iscsicaps); 3350 LIMIT_CAPS(fcoecaps); 3351 #undef LIMIT_CAPS 3352 3353 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3354 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3355 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3356 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3357 if (rc != 0) { 3358 device_printf(sc->dev, 3359 "failed to process config file: %d.\n", rc); 3360 } 3361 done: 3362 if (cfg != NULL) 3363 firmware_put(cfg, FIRMWARE_UNLOAD); 3364 return (rc); 3365 } 3366 3367 /* 3368 * Retrieve parameters that are needed (or nice to have) very early. 3369 */ 3370 static int 3371 get_params__pre_init(struct adapter *sc) 3372 { 3373 int rc; 3374 uint32_t param[2], val[2]; 3375 3376 t4_get_version_info(sc); 3377 3378 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 3379 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 3380 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 3381 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 3382 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 3383 3384 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", 3385 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), 3386 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), 3387 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), 3388 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); 3389 3390 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 3391 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 3392 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 3393 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 3394 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 3395 3396 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", 3397 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), 3398 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), 3399 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), 3400 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); 3401 3402 param[0] = FW_PARAM_DEV(PORTVEC); 3403 param[1] = FW_PARAM_DEV(CCLK); 3404 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3405 if (rc != 0) { 3406 device_printf(sc->dev, 3407 "failed to query parameters (pre_init): %d.\n", rc); 3408 return (rc); 3409 } 3410 3411 sc->params.portvec = val[0]; 3412 sc->params.nports = bitcount32(val[0]); 3413 sc->params.vpd.cclk = val[1]; 3414 3415 /* Read device log parameters. */ 3416 rc = -t4_init_devlog_params(sc, 1); 3417 if (rc == 0) 3418 fixup_devlog_params(sc); 3419 else { 3420 device_printf(sc->dev, 3421 "failed to get devlog parameters: %d.\n", rc); 3422 rc = 0; /* devlog isn't critical for device operation */ 3423 } 3424 3425 return (rc); 3426 } 3427 3428 /* 3429 * Retrieve various parameters that are of interest to the driver. The device 3430 * has been initialized by the firmware at this point. 3431 */ 3432 static int 3433 get_params__post_init(struct adapter *sc) 3434 { 3435 int rc; 3436 uint32_t param[7], val[7]; 3437 struct fw_caps_config_cmd caps; 3438 3439 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3440 param[1] = FW_PARAM_PFVF(EQ_START); 3441 param[2] = FW_PARAM_PFVF(FILTER_START); 3442 param[3] = FW_PARAM_PFVF(FILTER_END); 3443 param[4] = FW_PARAM_PFVF(L2T_START); 3444 param[5] = FW_PARAM_PFVF(L2T_END); 3445 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3446 if (rc != 0) { 3447 device_printf(sc->dev, 3448 "failed to query parameters (post_init): %d.\n", rc); 3449 return (rc); 3450 } 3451 3452 sc->sge.iq_start = val[0]; 3453 sc->sge.eq_start = val[1]; 3454 sc->tids.ftid_base = val[2]; 3455 sc->tids.nftids = val[3] - val[2] + 1; 3456 sc->params.ftid_min = val[2]; 3457 sc->params.ftid_max = val[3]; 3458 sc->vres.l2t.start = val[4]; 3459 sc->vres.l2t.size = val[5] - val[4] + 1; 3460 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3461 ("%s: L2 table size (%u) larger than expected (%u)", 3462 __func__, sc->vres.l2t.size, L2T_SIZE)); 3463 3464 /* get capabilites */ 3465 bzero(&caps, sizeof(caps)); 3466 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3467 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3468 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3469 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3470 if (rc != 0) { 3471 device_printf(sc->dev, 3472 "failed to get card capabilities: %d.\n", rc); 3473 return (rc); 3474 } 3475 3476 #define READ_CAPS(x) do { \ 3477 sc->x = htobe16(caps.x); \ 3478 } while (0) 3479 READ_CAPS(nbmcaps); 3480 READ_CAPS(linkcaps); 3481 READ_CAPS(switchcaps); 3482 READ_CAPS(niccaps); 3483 READ_CAPS(toecaps); 3484 READ_CAPS(rdmacaps); 3485 READ_CAPS(cryptocaps); 3486 READ_CAPS(iscsicaps); 3487 READ_CAPS(fcoecaps); 3488 3489 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3490 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3491 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3492 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3493 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3494 if (rc != 0) { 3495 device_printf(sc->dev, 3496 "failed to query NIC parameters: %d.\n", rc); 3497 return (rc); 3498 } 3499 sc->tids.etid_base = val[0]; 3500 sc->params.etid_min = val[0]; 3501 sc->tids.netids = val[1] - val[0] + 1; 3502 sc->params.netids = sc->tids.netids; 3503 sc->params.eo_wr_cred = val[2]; 3504 sc->params.ethoffload = 1; 3505 } 3506 3507 if (sc->toecaps) { 3508 /* query offload-related parameters */ 3509 param[0] = FW_PARAM_DEV(NTID); 3510 param[1] = FW_PARAM_PFVF(SERVER_START); 3511 param[2] = FW_PARAM_PFVF(SERVER_END); 3512 param[3] = FW_PARAM_PFVF(TDDP_START); 3513 param[4] = FW_PARAM_PFVF(TDDP_END); 3514 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3515 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3516 if (rc != 0) { 3517 device_printf(sc->dev, 3518 "failed to query TOE parameters: %d.\n", rc); 3519 return (rc); 3520 } 3521 sc->tids.ntids = val[0]; 3522 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3523 sc->tids.stid_base = val[1]; 3524 sc->tids.nstids = val[2] - val[1] + 1; 3525 sc->vres.ddp.start = val[3]; 3526 sc->vres.ddp.size = val[4] - val[3] + 1; 3527 sc->params.ofldq_wr_cred = val[5]; 3528 sc->params.offload = 1; 3529 } 3530 if (sc->rdmacaps) { 3531 param[0] = FW_PARAM_PFVF(STAG_START); 3532 param[1] = FW_PARAM_PFVF(STAG_END); 3533 param[2] = FW_PARAM_PFVF(RQ_START); 3534 param[3] = FW_PARAM_PFVF(RQ_END); 3535 param[4] = FW_PARAM_PFVF(PBL_START); 3536 param[5] = FW_PARAM_PFVF(PBL_END); 3537 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3538 if (rc != 0) { 3539 device_printf(sc->dev, 3540 "failed to query RDMA parameters(1): %d.\n", rc); 3541 return (rc); 3542 } 3543 sc->vres.stag.start = val[0]; 3544 sc->vres.stag.size = val[1] - val[0] + 1; 3545 sc->vres.rq.start = val[2]; 3546 sc->vres.rq.size = val[3] - val[2] + 1; 3547 sc->vres.pbl.start = val[4]; 3548 sc->vres.pbl.size = val[5] - val[4] + 1; 3549 3550 param[0] = FW_PARAM_PFVF(SQRQ_START); 3551 param[1] = FW_PARAM_PFVF(SQRQ_END); 3552 param[2] = FW_PARAM_PFVF(CQ_START); 3553 param[3] = FW_PARAM_PFVF(CQ_END); 3554 param[4] = FW_PARAM_PFVF(OCQ_START); 3555 param[5] = FW_PARAM_PFVF(OCQ_END); 3556 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3557 if (rc != 0) { 3558 device_printf(sc->dev, 3559 "failed to query RDMA parameters(2): %d.\n", rc); 3560 return (rc); 3561 } 3562 sc->vres.qp.start = val[0]; 3563 sc->vres.qp.size = val[1] - val[0] + 1; 3564 sc->vres.cq.start = val[2]; 3565 sc->vres.cq.size = val[3] - val[2] + 1; 3566 sc->vres.ocq.start = val[4]; 3567 sc->vres.ocq.size = val[5] - val[4] + 1; 3568 3569 param[0] = FW_PARAM_PFVF(SRQ_START); 3570 param[1] = FW_PARAM_PFVF(SRQ_END); 3571 param[2] = FW_PARAM_DEV(MAXORDIRD_QP); 3572 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); 3573 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); 3574 if (rc != 0) { 3575 device_printf(sc->dev, 3576 "failed to query RDMA parameters(3): %d.\n", rc); 3577 return (rc); 3578 } 3579 sc->vres.srq.start = val[0]; 3580 sc->vres.srq.size = val[1] - val[0] + 1; 3581 sc->params.max_ordird_qp = val[2]; 3582 sc->params.max_ird_adapter = val[3]; 3583 } 3584 if (sc->iscsicaps) { 3585 param[0] = FW_PARAM_PFVF(ISCSI_START); 3586 param[1] = FW_PARAM_PFVF(ISCSI_END); 3587 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3588 if (rc != 0) { 3589 device_printf(sc->dev, 3590 "failed to query iSCSI parameters: %d.\n", rc); 3591 return (rc); 3592 } 3593 sc->vres.iscsi.start = val[0]; 3594 sc->vres.iscsi.size = val[1] - val[0] + 1; 3595 } 3596 3597 t4_init_sge_params(sc); 3598 3599 /* 3600 * We've got the params we wanted to query via the firmware. Now grab 3601 * some others directly from the chip. 3602 */ 3603 rc = t4_read_chip_settings(sc); 3604 3605 return (rc); 3606 } 3607 3608 static int 3609 set_params__post_init(struct adapter *sc) 3610 { 3611 uint32_t param, val; 3612 int i, v, shift; 3613 char s[32]; 3614 3615 /* ask for encapsulated CPLs */ 3616 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3617 val = 1; 3618 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3619 3620 /* 3621 * Override the TOE timers with user provided tunables. This is not the 3622 * recommended way to change the timers (the firmware config file is) so 3623 * these tunables are not documented. 3624 * 3625 * All the timer tunables are in milliseconds. 3626 */ 3627 if (TUNABLE_INT_FETCH("hw.cxgbe.toe.keepalive_idle", &v)) { 3628 t4_set_reg_field(sc, A_TP_KEEP_IDLE, 3629 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), 3630 V_KEEPALIVEIDLE(ms_to_tcp_ticks(sc, v))); 3631 } 3632 if (TUNABLE_INT_FETCH("hw.cxgbe.toe.keepalive_interval", &v)) { 3633 t4_set_reg_field(sc, A_TP_KEEP_INTVL, 3634 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), 3635 V_KEEPALIVEINTVL(ms_to_tcp_ticks(sc, v))); 3636 } 3637 if (TUNABLE_INT_FETCH("hw.cxgbe.toe.keepalive_count", &v)) { 3638 v &= M_KEEPALIVEMAXR1; 3639 t4_set_reg_field(sc, A_TP_SHIFT_CNT, 3640 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) | 3641 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2), 3642 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v)); 3643 } 3644 if (TUNABLE_INT_FETCH("hw.cxgbe.toe.rexmt_min", &v)) { 3645 t4_set_reg_field(sc, A_TP_RXT_MIN, 3646 V_RXTMIN(M_RXTMIN), V_RXTMIN(ms_to_tcp_ticks(sc, v))); 3647 } 3648 if (TUNABLE_INT_FETCH("hw.cxgbe.toe.rexmt_max", &v)) { 3649 t4_set_reg_field(sc, A_TP_RXT_MAX, 3650 V_RXTMAX(M_RXTMAX), V_RXTMAX(ms_to_tcp_ticks(sc, v))); 3651 } 3652 if (TUNABLE_INT_FETCH("hw.cxgbe.toe.rexmt_count", &v)) { 3653 v &= M_RXTSHIFTMAXR1; 3654 t4_set_reg_field(sc, A_TP_SHIFT_CNT, 3655 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) | 3656 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2), 3657 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v)); 3658 } 3659 for (i = 0; i < 16; i++) { 3660 snprintf(s, sizeof(s), "hw.cxgbe.toe.rexmt_backoff.%d", i); 3661 if (TUNABLE_INT_FETCH(s, &v)) { 3662 v &= M_TIMERBACKOFFINDEX0; 3663 shift = (i & 3) << 3; 3664 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3), 3665 M_TIMERBACKOFFINDEX0 << shift, v << shift); 3666 } 3667 } 3668 return (0); 3669 } 3670 3671 #undef FW_PARAM_PFVF 3672 #undef FW_PARAM_DEV 3673 3674 static void 3675 t4_set_desc(struct adapter *sc) 3676 { 3677 char buf[128]; 3678 struct adapter_params *p = &sc->params; 3679 3680 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); 3681 3682 device_set_desc_copy(sc->dev, buf); 3683 } 3684 3685 static void 3686 build_medialist(struct port_info *pi, struct ifmedia *media) 3687 { 3688 int m; 3689 3690 PORT_LOCK_ASSERT_OWNED(pi); 3691 3692 ifmedia_removeall(media); 3693 3694 /* 3695 * XXX: Would it be better to ifmedia_add all 4 combinations of pause 3696 * settings for every speed instead of just txpause|rxpause? ifconfig 3697 * media display looks much better if autoselect is the only case where 3698 * ifm_current is different from ifm_active. If the user picks anything 3699 * except txpause|rxpause the display is ugly. 3700 */ 3701 m = IFM_ETHER | IFM_FDX | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 3702 3703 switch(pi->port_type) { 3704 case FW_PORT_TYPE_BT_XFI: 3705 case FW_PORT_TYPE_BT_XAUI: 3706 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3707 /* fall through */ 3708 3709 case FW_PORT_TYPE_BT_SGMII: 3710 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3711 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3712 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3713 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3714 break; 3715 3716 case FW_PORT_TYPE_CX4: 3717 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3718 ifmedia_set(media, m | IFM_10G_CX4); 3719 break; 3720 3721 case FW_PORT_TYPE_QSFP_10G: 3722 case FW_PORT_TYPE_SFP: 3723 case FW_PORT_TYPE_FIBER_XFI: 3724 case FW_PORT_TYPE_FIBER_XAUI: 3725 switch (pi->mod_type) { 3726 3727 case FW_PORT_MOD_TYPE_LR: 3728 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3729 ifmedia_set(media, m | IFM_10G_LR); 3730 break; 3731 3732 case FW_PORT_MOD_TYPE_SR: 3733 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3734 ifmedia_set(media, m | IFM_10G_SR); 3735 break; 3736 3737 case FW_PORT_MOD_TYPE_LRM: 3738 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3739 ifmedia_set(media, m | IFM_10G_LRM); 3740 break; 3741 3742 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3743 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3744 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3745 ifmedia_set(media, m | IFM_10G_TWINAX); 3746 break; 3747 3748 case FW_PORT_MOD_TYPE_NONE: 3749 m &= ~IFM_FDX; 3750 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3751 ifmedia_set(media, m | IFM_NONE); 3752 break; 3753 3754 case FW_PORT_MOD_TYPE_NA: 3755 case FW_PORT_MOD_TYPE_ER: 3756 default: 3757 device_printf(pi->dev, 3758 "unknown port_type (%d), mod_type (%d)\n", 3759 pi->port_type, pi->mod_type); 3760 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3761 ifmedia_set(media, m | IFM_UNKNOWN); 3762 break; 3763 } 3764 break; 3765 3766 case FW_PORT_TYPE_CR_QSFP: 3767 case FW_PORT_TYPE_SFP28: 3768 case FW_PORT_TYPE_KR_SFP28: 3769 switch (pi->mod_type) { 3770 3771 case FW_PORT_MOD_TYPE_SR: 3772 ifmedia_add(media, m | IFM_25G_SR, 0, NULL); 3773 ifmedia_set(media, m | IFM_25G_SR); 3774 break; 3775 3776 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3777 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3778 ifmedia_add(media, m | IFM_25G_CR, 0, NULL); 3779 ifmedia_set(media, m | IFM_25G_CR); 3780 break; 3781 3782 case FW_PORT_MOD_TYPE_NONE: 3783 m &= ~IFM_FDX; 3784 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3785 ifmedia_set(media, m | IFM_NONE); 3786 break; 3787 3788 default: 3789 device_printf(pi->dev, 3790 "unknown port_type (%d), mod_type (%d)\n", 3791 pi->port_type, pi->mod_type); 3792 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3793 ifmedia_set(media, m | IFM_UNKNOWN); 3794 break; 3795 } 3796 break; 3797 3798 case FW_PORT_TYPE_QSFP: 3799 switch (pi->mod_type) { 3800 3801 case FW_PORT_MOD_TYPE_LR: 3802 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3803 ifmedia_set(media, m | IFM_40G_LR4); 3804 break; 3805 3806 case FW_PORT_MOD_TYPE_SR: 3807 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3808 ifmedia_set(media, m | IFM_40G_SR4); 3809 break; 3810 3811 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3812 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3813 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3814 ifmedia_set(media, m | IFM_40G_CR4); 3815 break; 3816 3817 case FW_PORT_MOD_TYPE_NONE: 3818 m &= ~IFM_FDX; 3819 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3820 ifmedia_set(media, m | IFM_NONE); 3821 break; 3822 3823 default: 3824 device_printf(pi->dev, 3825 "unknown port_type (%d), mod_type (%d)\n", 3826 pi->port_type, pi->mod_type); 3827 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3828 ifmedia_set(media, m | IFM_UNKNOWN); 3829 break; 3830 } 3831 break; 3832 3833 case FW_PORT_TYPE_KR4_100G: 3834 case FW_PORT_TYPE_CR4_QSFP: 3835 switch (pi->mod_type) { 3836 3837 case FW_PORT_MOD_TYPE_LR: 3838 ifmedia_add(media, m | IFM_100G_LR4, 0, NULL); 3839 ifmedia_set(media, m | IFM_100G_LR4); 3840 break; 3841 3842 case FW_PORT_MOD_TYPE_SR: 3843 ifmedia_add(media, m | IFM_100G_SR4, 0, NULL); 3844 ifmedia_set(media, m | IFM_100G_SR4); 3845 break; 3846 3847 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3848 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3849 ifmedia_add(media, m | IFM_100G_CR4, 0, NULL); 3850 ifmedia_set(media, m | IFM_100G_CR4); 3851 break; 3852 3853 case FW_PORT_MOD_TYPE_NONE: 3854 m &= ~IFM_FDX; 3855 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3856 ifmedia_set(media, m | IFM_NONE); 3857 break; 3858 3859 default: 3860 device_printf(pi->dev, 3861 "unknown port_type (%d), mod_type (%d)\n", 3862 pi->port_type, pi->mod_type); 3863 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3864 ifmedia_set(media, m | IFM_UNKNOWN); 3865 break; 3866 } 3867 break; 3868 3869 default: 3870 device_printf(pi->dev, 3871 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3872 pi->mod_type); 3873 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3874 ifmedia_set(media, m | IFM_UNKNOWN); 3875 break; 3876 } 3877 } 3878 3879 /* 3880 * Update all the requested_* fields in the link config and then send a mailbox 3881 * command to apply the settings. 3882 */ 3883 static void 3884 init_l1cfg(struct port_info *pi) 3885 { 3886 struct adapter *sc = pi->adapter; 3887 struct link_config *lc = &pi->link_cfg; 3888 int rc; 3889 3890 ASSERT_SYNCHRONIZED_OP(sc); 3891 3892 if (t4_autoneg != 0 && lc->supported & FW_PORT_CAP_ANEG) { 3893 lc->requested_aneg = AUTONEG_ENABLE; 3894 lc->requested_speed = 0; 3895 } else { 3896 lc->requested_aneg = AUTONEG_DISABLE; 3897 lc->requested_speed = port_top_speed(pi); /* in Gbps */ 3898 } 3899 3900 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX); 3901 3902 if (t4_fec != -1) { 3903 lc->requested_fec = t4_fec & (FEC_RS | FEC_BASER_RS | 3904 FEC_RESERVED); 3905 } else { 3906 /* Use the suggested value provided by the firmware in acaps */ 3907 if (lc->advertising & FW_PORT_CAP_FEC_RS) 3908 lc->requested_fec = FEC_RS; 3909 else if (lc->advertising & FW_PORT_CAP_FEC_BASER_RS) 3910 lc->requested_fec = FEC_BASER_RS; 3911 else if (lc->advertising & FW_PORT_CAP_FEC_RESERVED) 3912 lc->requested_fec = FEC_RESERVED; 3913 else 3914 lc->requested_fec = 0; 3915 } 3916 3917 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 3918 if (rc != 0) { 3919 device_printf(pi->dev, "l1cfg failed: %d\n", rc); 3920 } else { 3921 lc->fc = lc->requested_fc; 3922 lc->fec = lc->requested_fec; 3923 } 3924 } 3925 3926 #define FW_MAC_EXACT_CHUNK 7 3927 3928 /* 3929 * Program the port's XGMAC based on parameters in ifnet. The caller also 3930 * indicates which parameters should be programmed (the rest are left alone). 3931 */ 3932 int 3933 update_mac_settings(struct ifnet *ifp, int flags) 3934 { 3935 int rc = 0; 3936 struct vi_info *vi = ifp->if_softc; 3937 struct port_info *pi = vi->pi; 3938 struct adapter *sc = pi->adapter; 3939 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3940 3941 ASSERT_SYNCHRONIZED_OP(sc); 3942 KASSERT(flags, ("%s: not told what to update.", __func__)); 3943 3944 if (flags & XGMAC_MTU) 3945 mtu = ifp->if_mtu; 3946 3947 if (flags & XGMAC_PROMISC) 3948 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3949 3950 if (flags & XGMAC_ALLMULTI) 3951 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3952 3953 if (flags & XGMAC_VLANEX) 3954 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3955 3956 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3957 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3958 allmulti, 1, vlanex, false); 3959 if (rc) { 3960 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3961 rc); 3962 return (rc); 3963 } 3964 } 3965 3966 if (flags & XGMAC_UCADDR) { 3967 uint8_t ucaddr[ETHER_ADDR_LEN]; 3968 3969 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3970 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3971 ucaddr, true, true); 3972 if (rc < 0) { 3973 rc = -rc; 3974 if_printf(ifp, "change_mac failed: %d\n", rc); 3975 return (rc); 3976 } else { 3977 vi->xact_addr_filt = rc; 3978 rc = 0; 3979 } 3980 } 3981 3982 if (flags & XGMAC_MCADDRS) { 3983 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3984 int del = 1; 3985 uint64_t hash = 0; 3986 struct ifmultiaddr *ifma; 3987 int i = 0, j; 3988 3989 if_maddr_rlock(ifp); 3990 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3991 if (ifma->ifma_addr->sa_family != AF_LINK) 3992 continue; 3993 mcaddr[i] = 3994 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3995 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3996 i++; 3997 3998 if (i == FW_MAC_EXACT_CHUNK) { 3999 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 4000 del, i, mcaddr, NULL, &hash, 0); 4001 if (rc < 0) { 4002 rc = -rc; 4003 for (j = 0; j < i; j++) { 4004 if_printf(ifp, 4005 "failed to add mc address" 4006 " %02x:%02x:%02x:" 4007 "%02x:%02x:%02x rc=%d\n", 4008 mcaddr[j][0], mcaddr[j][1], 4009 mcaddr[j][2], mcaddr[j][3], 4010 mcaddr[j][4], mcaddr[j][5], 4011 rc); 4012 } 4013 goto mcfail; 4014 } 4015 del = 0; 4016 i = 0; 4017 } 4018 } 4019 if (i > 0) { 4020 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 4021 mcaddr, NULL, &hash, 0); 4022 if (rc < 0) { 4023 rc = -rc; 4024 for (j = 0; j < i; j++) { 4025 if_printf(ifp, 4026 "failed to add mc address" 4027 " %02x:%02x:%02x:" 4028 "%02x:%02x:%02x rc=%d\n", 4029 mcaddr[j][0], mcaddr[j][1], 4030 mcaddr[j][2], mcaddr[j][3], 4031 mcaddr[j][4], mcaddr[j][5], 4032 rc); 4033 } 4034 goto mcfail; 4035 } 4036 } 4037 4038 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 4039 if (rc != 0) 4040 if_printf(ifp, "failed to set mc address hash: %d", rc); 4041 mcfail: 4042 if_maddr_runlock(ifp); 4043 } 4044 4045 return (rc); 4046 } 4047 4048 /* 4049 * {begin|end}_synchronized_op must be called from the same thread. 4050 */ 4051 int 4052 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 4053 char *wmesg) 4054 { 4055 int rc, pri; 4056 4057 #ifdef WITNESS 4058 /* the caller thinks it's ok to sleep, but is it really? */ 4059 if (flags & SLEEP_OK) 4060 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 4061 "begin_synchronized_op"); 4062 #endif 4063 4064 if (INTR_OK) 4065 pri = PCATCH; 4066 else 4067 pri = 0; 4068 4069 ADAPTER_LOCK(sc); 4070 for (;;) { 4071 4072 if (vi && IS_DOOMED(vi)) { 4073 rc = ENXIO; 4074 goto done; 4075 } 4076 4077 if (!IS_BUSY(sc)) { 4078 rc = 0; 4079 break; 4080 } 4081 4082 if (!(flags & SLEEP_OK)) { 4083 rc = EBUSY; 4084 goto done; 4085 } 4086 4087 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 4088 rc = EINTR; 4089 goto done; 4090 } 4091 } 4092 4093 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 4094 SET_BUSY(sc); 4095 #ifdef INVARIANTS 4096 sc->last_op = wmesg; 4097 sc->last_op_thr = curthread; 4098 sc->last_op_flags = flags; 4099 #endif 4100 4101 done: 4102 if (!(flags & HOLD_LOCK) || rc) 4103 ADAPTER_UNLOCK(sc); 4104 4105 return (rc); 4106 } 4107 4108 /* 4109 * Tell if_ioctl and if_init that the VI is going away. This is 4110 * special variant of begin_synchronized_op and must be paired with a 4111 * call to end_synchronized_op. 4112 */ 4113 void 4114 doom_vi(struct adapter *sc, struct vi_info *vi) 4115 { 4116 4117 ADAPTER_LOCK(sc); 4118 SET_DOOMED(vi); 4119 wakeup(&sc->flags); 4120 while (IS_BUSY(sc)) 4121 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 4122 SET_BUSY(sc); 4123 #ifdef INVARIANTS 4124 sc->last_op = "t4detach"; 4125 sc->last_op_thr = curthread; 4126 sc->last_op_flags = 0; 4127 #endif 4128 ADAPTER_UNLOCK(sc); 4129 } 4130 4131 /* 4132 * {begin|end}_synchronized_op must be called from the same thread. 4133 */ 4134 void 4135 end_synchronized_op(struct adapter *sc, int flags) 4136 { 4137 4138 if (flags & LOCK_HELD) 4139 ADAPTER_LOCK_ASSERT_OWNED(sc); 4140 else 4141 ADAPTER_LOCK(sc); 4142 4143 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 4144 CLR_BUSY(sc); 4145 wakeup(&sc->flags); 4146 ADAPTER_UNLOCK(sc); 4147 } 4148 4149 static int 4150 cxgbe_init_synchronized(struct vi_info *vi) 4151 { 4152 struct port_info *pi = vi->pi; 4153 struct adapter *sc = pi->adapter; 4154 struct ifnet *ifp = vi->ifp; 4155 int rc = 0, i; 4156 struct sge_txq *txq; 4157 4158 ASSERT_SYNCHRONIZED_OP(sc); 4159 4160 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4161 return (0); /* already running */ 4162 4163 if (!(sc->flags & FULL_INIT_DONE) && 4164 ((rc = adapter_full_init(sc)) != 0)) 4165 return (rc); /* error message displayed already */ 4166 4167 if (!(vi->flags & VI_INIT_DONE) && 4168 ((rc = vi_full_init(vi)) != 0)) 4169 return (rc); /* error message displayed already */ 4170 4171 rc = update_mac_settings(ifp, XGMAC_ALL); 4172 if (rc) 4173 goto done; /* error message displayed already */ 4174 4175 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 4176 if (rc != 0) { 4177 if_printf(ifp, "enable_vi failed: %d\n", rc); 4178 goto done; 4179 } 4180 4181 /* 4182 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 4183 * if this changes. 4184 */ 4185 4186 for_each_txq(vi, i, txq) { 4187 TXQ_LOCK(txq); 4188 txq->eq.flags |= EQ_ENABLED; 4189 TXQ_UNLOCK(txq); 4190 } 4191 4192 /* 4193 * The first iq of the first port to come up is used for tracing. 4194 */ 4195 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 4196 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 4197 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 4198 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 4199 V_QUEUENUMBER(sc->traceq)); 4200 pi->flags |= HAS_TRACEQ; 4201 } 4202 4203 /* all ok */ 4204 PORT_LOCK(pi); 4205 if (pi->up_vis++ == 0) { 4206 t4_update_port_info(pi); 4207 build_medialist(vi->pi, &vi->media); 4208 init_l1cfg(pi); 4209 } 4210 ifp->if_drv_flags |= IFF_DRV_RUNNING; 4211 4212 if (pi->nvi > 1 || sc->flags & IS_VF) 4213 callout_reset(&vi->tick, hz, vi_tick, vi); 4214 else 4215 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 4216 PORT_UNLOCK(pi); 4217 done: 4218 if (rc != 0) 4219 cxgbe_uninit_synchronized(vi); 4220 4221 return (rc); 4222 } 4223 4224 /* 4225 * Idempotent. 4226 */ 4227 static int 4228 cxgbe_uninit_synchronized(struct vi_info *vi) 4229 { 4230 struct port_info *pi = vi->pi; 4231 struct adapter *sc = pi->adapter; 4232 struct ifnet *ifp = vi->ifp; 4233 int rc, i; 4234 struct sge_txq *txq; 4235 4236 ASSERT_SYNCHRONIZED_OP(sc); 4237 4238 if (!(vi->flags & VI_INIT_DONE)) { 4239 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 4240 ("uninited VI is running")); 4241 return (0); 4242 } 4243 4244 /* 4245 * Disable the VI so that all its data in either direction is discarded 4246 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 4247 * tick) intact as the TP can deliver negative advice or data that it's 4248 * holding in its RAM (for an offloaded connection) even after the VI is 4249 * disabled. 4250 */ 4251 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 4252 if (rc) { 4253 if_printf(ifp, "disable_vi failed: %d\n", rc); 4254 return (rc); 4255 } 4256 4257 for_each_txq(vi, i, txq) { 4258 TXQ_LOCK(txq); 4259 txq->eq.flags &= ~EQ_ENABLED; 4260 TXQ_UNLOCK(txq); 4261 } 4262 4263 PORT_LOCK(pi); 4264 if (pi->nvi > 1 || sc->flags & IS_VF) 4265 callout_stop(&vi->tick); 4266 else 4267 callout_stop(&pi->tick); 4268 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4269 PORT_UNLOCK(pi); 4270 return (0); 4271 } 4272 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4273 pi->up_vis--; 4274 if (pi->up_vis > 0) { 4275 PORT_UNLOCK(pi); 4276 return (0); 4277 } 4278 PORT_UNLOCK(pi); 4279 4280 pi->link_cfg.link_ok = 0; 4281 pi->link_cfg.speed = 0; 4282 pi->link_cfg.link_down_rc = 255; 4283 t4_os_link_changed(pi, NULL); 4284 4285 return (0); 4286 } 4287 4288 /* 4289 * It is ok for this function to fail midway and return right away. t4_detach 4290 * will walk the entire sc->irq list and clean up whatever is valid. 4291 */ 4292 int 4293 t4_setup_intr_handlers(struct adapter *sc) 4294 { 4295 int rc, rid, p, q, v; 4296 char s[8]; 4297 struct irq *irq; 4298 struct port_info *pi; 4299 struct vi_info *vi; 4300 struct sge *sge = &sc->sge; 4301 struct sge_rxq *rxq; 4302 #ifdef TCP_OFFLOAD 4303 struct sge_ofld_rxq *ofld_rxq; 4304 #endif 4305 #ifdef DEV_NETMAP 4306 struct sge_nm_rxq *nm_rxq; 4307 #endif 4308 #ifdef RSS 4309 int nbuckets = rss_getnumbuckets(); 4310 #endif 4311 4312 /* 4313 * Setup interrupts. 4314 */ 4315 irq = &sc->irq[0]; 4316 rid = sc->intr_type == INTR_INTX ? 0 : 1; 4317 if (sc->intr_count == 1) 4318 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 4319 4320 /* Multiple interrupts. */ 4321 if (sc->flags & IS_VF) 4322 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, 4323 ("%s: too few intr.", __func__)); 4324 else 4325 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 4326 ("%s: too few intr.", __func__)); 4327 4328 /* The first one is always error intr on PFs */ 4329 if (!(sc->flags & IS_VF)) { 4330 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 4331 if (rc != 0) 4332 return (rc); 4333 irq++; 4334 rid++; 4335 } 4336 4337 /* The second one is always the firmware event queue (first on VFs) */ 4338 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 4339 if (rc != 0) 4340 return (rc); 4341 irq++; 4342 rid++; 4343 4344 for_each_port(sc, p) { 4345 pi = sc->port[p]; 4346 for_each_vi(pi, v, vi) { 4347 vi->first_intr = rid - 1; 4348 4349 if (vi->nnmrxq > 0) { 4350 int n = max(vi->nrxq, vi->nnmrxq); 4351 4352 MPASS(vi->flags & INTR_RXQ); 4353 4354 rxq = &sge->rxq[vi->first_rxq]; 4355 #ifdef DEV_NETMAP 4356 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 4357 #endif 4358 for (q = 0; q < n; q++) { 4359 snprintf(s, sizeof(s), "%x%c%x", p, 4360 'a' + v, q); 4361 if (q < vi->nrxq) 4362 irq->rxq = rxq++; 4363 #ifdef DEV_NETMAP 4364 if (q < vi->nnmrxq) 4365 irq->nm_rxq = nm_rxq++; 4366 #endif 4367 rc = t4_alloc_irq(sc, irq, rid, 4368 t4_vi_intr, irq, s); 4369 if (rc != 0) 4370 return (rc); 4371 irq++; 4372 rid++; 4373 vi->nintr++; 4374 } 4375 } else if (vi->flags & INTR_RXQ) { 4376 for_each_rxq(vi, q, rxq) { 4377 snprintf(s, sizeof(s), "%x%c%x", p, 4378 'a' + v, q); 4379 rc = t4_alloc_irq(sc, irq, rid, 4380 t4_intr, rxq, s); 4381 if (rc != 0) 4382 return (rc); 4383 #ifdef RSS 4384 bus_bind_intr(sc->dev, irq->res, 4385 rss_getcpu(q % nbuckets)); 4386 #endif 4387 irq++; 4388 rid++; 4389 vi->nintr++; 4390 } 4391 } 4392 #ifdef TCP_OFFLOAD 4393 if (vi->flags & INTR_OFLD_RXQ) { 4394 for_each_ofld_rxq(vi, q, ofld_rxq) { 4395 snprintf(s, sizeof(s), "%x%c%x", p, 4396 'A' + v, q); 4397 rc = t4_alloc_irq(sc, irq, rid, 4398 t4_intr, ofld_rxq, s); 4399 if (rc != 0) 4400 return (rc); 4401 irq++; 4402 rid++; 4403 vi->nintr++; 4404 } 4405 } 4406 #endif 4407 } 4408 } 4409 MPASS(irq == &sc->irq[sc->intr_count]); 4410 4411 return (0); 4412 } 4413 4414 int 4415 adapter_full_init(struct adapter *sc) 4416 { 4417 int rc, i; 4418 #ifdef RSS 4419 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4420 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4421 #endif 4422 4423 ASSERT_SYNCHRONIZED_OP(sc); 4424 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4425 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 4426 ("%s: FULL_INIT_DONE already", __func__)); 4427 4428 /* 4429 * queues that belong to the adapter (not any particular port). 4430 */ 4431 rc = t4_setup_adapter_queues(sc); 4432 if (rc != 0) 4433 goto done; 4434 4435 for (i = 0; i < nitems(sc->tq); i++) { 4436 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 4437 taskqueue_thread_enqueue, &sc->tq[i]); 4438 if (sc->tq[i] == NULL) { 4439 device_printf(sc->dev, 4440 "failed to allocate task queue %d\n", i); 4441 rc = ENOMEM; 4442 goto done; 4443 } 4444 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 4445 device_get_nameunit(sc->dev), i); 4446 } 4447 #ifdef RSS 4448 MPASS(RSS_KEYSIZE == 40); 4449 rss_getkey((void *)&raw_rss_key[0]); 4450 for (i = 0; i < nitems(rss_key); i++) { 4451 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4452 } 4453 t4_write_rss_key(sc, &rss_key[0], -1); 4454 #endif 4455 4456 if (!(sc->flags & IS_VF)) 4457 t4_intr_enable(sc); 4458 sc->flags |= FULL_INIT_DONE; 4459 done: 4460 if (rc != 0) 4461 adapter_full_uninit(sc); 4462 4463 return (rc); 4464 } 4465 4466 int 4467 adapter_full_uninit(struct adapter *sc) 4468 { 4469 int i; 4470 4471 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4472 4473 t4_teardown_adapter_queues(sc); 4474 4475 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 4476 taskqueue_free(sc->tq[i]); 4477 sc->tq[i] = NULL; 4478 } 4479 4480 sc->flags &= ~FULL_INIT_DONE; 4481 4482 return (0); 4483 } 4484 4485 #ifdef RSS 4486 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 4487 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 4488 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 4489 RSS_HASHTYPE_RSS_UDP_IPV6) 4490 4491 /* Translates kernel hash types to hardware. */ 4492 static int 4493 hashconfig_to_hashen(int hashconfig) 4494 { 4495 int hashen = 0; 4496 4497 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 4498 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 4499 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 4500 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 4501 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 4502 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4503 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4504 } 4505 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 4506 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4507 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4508 } 4509 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 4510 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4511 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 4512 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4513 4514 return (hashen); 4515 } 4516 4517 /* Translates hardware hash types to kernel. */ 4518 static int 4519 hashen_to_hashconfig(int hashen) 4520 { 4521 int hashconfig = 0; 4522 4523 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 4524 /* 4525 * If UDP hashing was enabled it must have been enabled for 4526 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 4527 * enabling any 4-tuple hash is nonsense configuration. 4528 */ 4529 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4530 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 4531 4532 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4533 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 4534 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4535 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 4536 } 4537 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4538 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 4539 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4540 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 4541 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 4542 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 4543 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 4544 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 4545 4546 return (hashconfig); 4547 } 4548 #endif 4549 4550 int 4551 vi_full_init(struct vi_info *vi) 4552 { 4553 struct adapter *sc = vi->pi->adapter; 4554 struct ifnet *ifp = vi->ifp; 4555 uint16_t *rss; 4556 struct sge_rxq *rxq; 4557 int rc, i, j, hashen; 4558 #ifdef RSS 4559 int nbuckets = rss_getnumbuckets(); 4560 int hashconfig = rss_gethashconfig(); 4561 int extra; 4562 #endif 4563 4564 ASSERT_SYNCHRONIZED_OP(sc); 4565 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4566 ("%s: VI_INIT_DONE already", __func__)); 4567 4568 sysctl_ctx_init(&vi->ctx); 4569 vi->flags |= VI_SYSCTL_CTX; 4570 4571 /* 4572 * Allocate tx/rx/fl queues for this VI. 4573 */ 4574 rc = t4_setup_vi_queues(vi); 4575 if (rc != 0) 4576 goto done; /* error message displayed already */ 4577 4578 /* 4579 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4580 */ 4581 if (vi->nrxq > vi->rss_size) { 4582 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4583 "some queues will never receive traffic.\n", vi->nrxq, 4584 vi->rss_size); 4585 } else if (vi->rss_size % vi->nrxq) { 4586 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4587 "expect uneven traffic distribution.\n", vi->nrxq, 4588 vi->rss_size); 4589 } 4590 #ifdef RSS 4591 if (vi->nrxq != nbuckets) { 4592 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4593 "performance will be impacted.\n", vi->nrxq, nbuckets); 4594 } 4595 #endif 4596 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4597 for (i = 0; i < vi->rss_size;) { 4598 #ifdef RSS 4599 j = rss_get_indirection_to_bucket(i); 4600 j %= vi->nrxq; 4601 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4602 rss[i++] = rxq->iq.abs_id; 4603 #else 4604 for_each_rxq(vi, j, rxq) { 4605 rss[i++] = rxq->iq.abs_id; 4606 if (i == vi->rss_size) 4607 break; 4608 } 4609 #endif 4610 } 4611 4612 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4613 vi->rss_size); 4614 if (rc != 0) { 4615 if_printf(ifp, "rss_config failed: %d\n", rc); 4616 goto done; 4617 } 4618 4619 #ifdef RSS 4620 hashen = hashconfig_to_hashen(hashconfig); 4621 4622 /* 4623 * We may have had to enable some hashes even though the global config 4624 * wants them disabled. This is a potential problem that must be 4625 * reported to the user. 4626 */ 4627 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4628 4629 /* 4630 * If we consider only the supported hash types, then the enabled hashes 4631 * are a superset of the requested hashes. In other words, there cannot 4632 * be any supported hash that was requested but not enabled, but there 4633 * can be hashes that were not requested but had to be enabled. 4634 */ 4635 extra &= SUPPORTED_RSS_HASHTYPES; 4636 MPASS((extra & hashconfig) == 0); 4637 4638 if (extra) { 4639 if_printf(ifp, 4640 "global RSS config (0x%x) cannot be accommodated.\n", 4641 hashconfig); 4642 } 4643 if (extra & RSS_HASHTYPE_RSS_IPV4) 4644 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4645 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4646 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4647 if (extra & RSS_HASHTYPE_RSS_IPV6) 4648 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4649 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4650 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4651 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4652 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4653 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4654 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4655 #else 4656 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4657 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4658 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4659 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4660 #endif 4661 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0); 4662 if (rc != 0) { 4663 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4664 goto done; 4665 } 4666 4667 vi->rss = rss; 4668 vi->flags |= VI_INIT_DONE; 4669 done: 4670 if (rc != 0) 4671 vi_full_uninit(vi); 4672 4673 return (rc); 4674 } 4675 4676 /* 4677 * Idempotent. 4678 */ 4679 int 4680 vi_full_uninit(struct vi_info *vi) 4681 { 4682 struct port_info *pi = vi->pi; 4683 struct adapter *sc = pi->adapter; 4684 int i; 4685 struct sge_rxq *rxq; 4686 struct sge_txq *txq; 4687 #ifdef TCP_OFFLOAD 4688 struct sge_ofld_rxq *ofld_rxq; 4689 struct sge_wrq *ofld_txq; 4690 #endif 4691 4692 if (vi->flags & VI_INIT_DONE) { 4693 4694 /* Need to quiesce queues. */ 4695 4696 /* XXX: Only for the first VI? */ 4697 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 4698 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4699 4700 for_each_txq(vi, i, txq) { 4701 quiesce_txq(sc, txq); 4702 } 4703 4704 #ifdef TCP_OFFLOAD 4705 for_each_ofld_txq(vi, i, ofld_txq) { 4706 quiesce_wrq(sc, ofld_txq); 4707 } 4708 #endif 4709 4710 for_each_rxq(vi, i, rxq) { 4711 quiesce_iq(sc, &rxq->iq); 4712 quiesce_fl(sc, &rxq->fl); 4713 } 4714 4715 #ifdef TCP_OFFLOAD 4716 for_each_ofld_rxq(vi, i, ofld_rxq) { 4717 quiesce_iq(sc, &ofld_rxq->iq); 4718 quiesce_fl(sc, &ofld_rxq->fl); 4719 } 4720 #endif 4721 free(vi->rss, M_CXGBE); 4722 free(vi->nm_rss, M_CXGBE); 4723 } 4724 4725 t4_teardown_vi_queues(vi); 4726 vi->flags &= ~VI_INIT_DONE; 4727 4728 return (0); 4729 } 4730 4731 static void 4732 quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4733 { 4734 struct sge_eq *eq = &txq->eq; 4735 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4736 4737 (void) sc; /* unused */ 4738 4739 #ifdef INVARIANTS 4740 TXQ_LOCK(txq); 4741 MPASS((eq->flags & EQ_ENABLED) == 0); 4742 TXQ_UNLOCK(txq); 4743 #endif 4744 4745 /* Wait for the mp_ring to empty. */ 4746 while (!mp_ring_is_idle(txq->r)) { 4747 mp_ring_check_drainage(txq->r, 0); 4748 pause("rquiesce", 1); 4749 } 4750 4751 /* Then wait for the hardware to finish. */ 4752 while (spg->cidx != htobe16(eq->pidx)) 4753 pause("equiesce", 1); 4754 4755 /* Finally, wait for the driver to reclaim all descriptors. */ 4756 while (eq->cidx != eq->pidx) 4757 pause("dquiesce", 1); 4758 } 4759 4760 static void 4761 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4762 { 4763 4764 /* XXXTX */ 4765 } 4766 4767 static void 4768 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4769 { 4770 (void) sc; /* unused */ 4771 4772 /* Synchronize with the interrupt handler */ 4773 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4774 pause("iqfree", 1); 4775 } 4776 4777 static void 4778 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4779 { 4780 mtx_lock(&sc->sfl_lock); 4781 FL_LOCK(fl); 4782 fl->flags |= FL_DOOMED; 4783 FL_UNLOCK(fl); 4784 callout_stop(&sc->sfl_callout); 4785 mtx_unlock(&sc->sfl_lock); 4786 4787 KASSERT((fl->flags & FL_STARVING) == 0, 4788 ("%s: still starving", __func__)); 4789 } 4790 4791 static int 4792 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4793 driver_intr_t *handler, void *arg, char *name) 4794 { 4795 int rc; 4796 4797 irq->rid = rid; 4798 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4799 RF_SHAREABLE | RF_ACTIVE); 4800 if (irq->res == NULL) { 4801 device_printf(sc->dev, 4802 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4803 return (ENOMEM); 4804 } 4805 4806 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4807 NULL, handler, arg, &irq->tag); 4808 if (rc != 0) { 4809 device_printf(sc->dev, 4810 "failed to setup interrupt for rid %d, name %s: %d\n", 4811 rid, name, rc); 4812 } else if (name) 4813 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); 4814 4815 return (rc); 4816 } 4817 4818 static int 4819 t4_free_irq(struct adapter *sc, struct irq *irq) 4820 { 4821 if (irq->tag) 4822 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4823 if (irq->res) 4824 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4825 4826 bzero(irq, sizeof(*irq)); 4827 4828 return (0); 4829 } 4830 4831 static void 4832 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4833 { 4834 4835 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4836 t4_get_regs(sc, buf, regs->len); 4837 } 4838 4839 #define A_PL_INDIR_CMD 0x1f8 4840 4841 #define S_PL_AUTOINC 31 4842 #define M_PL_AUTOINC 0x1U 4843 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4844 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4845 4846 #define S_PL_VFID 20 4847 #define M_PL_VFID 0xffU 4848 #define V_PL_VFID(x) ((x) << S_PL_VFID) 4849 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4850 4851 #define S_PL_ADDR 0 4852 #define M_PL_ADDR 0xfffffU 4853 #define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4854 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4855 4856 #define A_PL_INDIR_DATA 0x1fc 4857 4858 static uint64_t 4859 read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4860 { 4861 u32 stats[2]; 4862 4863 mtx_assert(&sc->reg_lock, MA_OWNED); 4864 if (sc->flags & IS_VF) { 4865 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); 4866 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); 4867 } else { 4868 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4869 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4870 V_PL_ADDR(VF_MPS_REG(reg))); 4871 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4872 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4873 } 4874 return (((uint64_t)stats[1]) << 32 | stats[0]); 4875 } 4876 4877 static void 4878 t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4879 struct fw_vi_stats_vf *stats) 4880 { 4881 4882 #define GET_STAT(name) \ 4883 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4884 4885 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4886 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4887 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4888 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4889 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4890 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4891 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4892 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4893 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4894 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4895 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4896 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4897 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4898 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4899 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4900 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4901 4902 #undef GET_STAT 4903 } 4904 4905 static void 4906 t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4907 { 4908 int reg; 4909 4910 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4911 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4912 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4913 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4914 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4915 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4916 } 4917 4918 static void 4919 vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4920 { 4921 struct timeval tv; 4922 const struct timeval interval = {0, 250000}; /* 250ms */ 4923 4924 if (!(vi->flags & VI_INIT_DONE)) 4925 return; 4926 4927 getmicrotime(&tv); 4928 timevalsub(&tv, &interval); 4929 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4930 return; 4931 4932 mtx_lock(&sc->reg_lock); 4933 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4934 getmicrotime(&vi->last_refreshed); 4935 mtx_unlock(&sc->reg_lock); 4936 } 4937 4938 static void 4939 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4940 { 4941 int i; 4942 u_int v, tnl_cong_drops; 4943 struct timeval tv; 4944 const struct timeval interval = {0, 250000}; /* 250ms */ 4945 4946 getmicrotime(&tv); 4947 timevalsub(&tv, &interval); 4948 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4949 return; 4950 4951 tnl_cong_drops = 0; 4952 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4953 for (i = 0; i < sc->chip_params->nchan; i++) { 4954 if (pi->rx_chan_map & (1 << i)) { 4955 mtx_lock(&sc->reg_lock); 4956 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4957 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4958 mtx_unlock(&sc->reg_lock); 4959 tnl_cong_drops += v; 4960 } 4961 } 4962 pi->tnl_cong_drops = tnl_cong_drops; 4963 getmicrotime(&pi->last_refreshed); 4964 } 4965 4966 static void 4967 cxgbe_tick(void *arg) 4968 { 4969 struct port_info *pi = arg; 4970 struct adapter *sc = pi->adapter; 4971 4972 PORT_LOCK_ASSERT_OWNED(pi); 4973 cxgbe_refresh_stats(sc, pi); 4974 4975 callout_schedule(&pi->tick, hz); 4976 } 4977 4978 void 4979 vi_tick(void *arg) 4980 { 4981 struct vi_info *vi = arg; 4982 struct adapter *sc = vi->pi->adapter; 4983 4984 vi_refresh_stats(sc, vi); 4985 4986 callout_schedule(&vi->tick, hz); 4987 } 4988 4989 static void 4990 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4991 { 4992 struct ifnet *vlan; 4993 4994 if (arg != ifp || ifp->if_type != IFT_ETHER) 4995 return; 4996 4997 vlan = VLAN_DEVAT(ifp, vid); 4998 VLAN_SETCOOKIE(vlan, ifp); 4999 } 5000 5001 /* 5002 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 5003 */ 5004 static char *caps_decoder[] = { 5005 "\20\001IPMI\002NCSI", /* 0: NBM */ 5006 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 5007 "\20\001INGRESS\002EGRESS", /* 2: switch */ 5008 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 5009 "\006HASHFILTER\007ETHOFLD", 5010 "\20\001TOE", /* 4: TOE */ 5011 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 5012 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 5013 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 5014 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 5015 "\007T10DIF" 5016 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 5017 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */ 5018 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 5019 "\004PO_INITIATOR\005PO_TARGET", 5020 }; 5021 5022 void 5023 t4_sysctls(struct adapter *sc) 5024 { 5025 struct sysctl_ctx_list *ctx; 5026 struct sysctl_oid *oid; 5027 struct sysctl_oid_list *children, *c0; 5028 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 5029 5030 ctx = device_get_sysctl_ctx(sc->dev); 5031 5032 /* 5033 * dev.t4nex.X. 5034 */ 5035 oid = device_get_sysctl_tree(sc->dev); 5036 c0 = children = SYSCTL_CHILDREN(oid); 5037 5038 sc->sc_do_rxcopy = 1; 5039 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 5040 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 5041 5042 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 5043 sc->params.nports, "# of ports"); 5044 5045 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 5046 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 5047 sysctl_bitfield, "A", "available doorbells"); 5048 5049 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 5050 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 5051 5052 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 5053 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 5054 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 5055 "interrupt holdoff timer values (us)"); 5056 5057 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 5058 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 5059 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 5060 "interrupt holdoff packet counter values"); 5061 5062 t4_sge_sysctls(sc, ctx, children); 5063 5064 sc->lro_timeout = 100; 5065 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 5066 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 5067 5068 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, 5069 &sc->debug_flags, 0, "flags to enable runtime debugging"); 5070 5071 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 5072 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 5073 5074 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 5075 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 5076 5077 if (sc->flags & IS_VF) 5078 return; 5079 5080 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 5081 NULL, chip_rev(sc), "chip hardware revision"); 5082 5083 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", 5084 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); 5085 5086 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", 5087 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); 5088 5089 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", 5090 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); 5091 5092 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", 5093 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); 5094 5095 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, 5096 sc->er_version, 0, "expansion ROM version"); 5097 5098 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, 5099 sc->bs_version, 0, "bootstrap firmware version"); 5100 5101 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, 5102 NULL, sc->params.scfg_vers, "serial config version"); 5103 5104 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, 5105 NULL, sc->params.vpd_vers, "VPD version"); 5106 5107 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 5108 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 5109 5110 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 5111 sc->cfcsum, "config file checksum"); 5112 5113 #define SYSCTL_CAP(name, n, text) \ 5114 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 5115 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 5116 sysctl_bitfield, "A", "available " text " capabilities") 5117 5118 SYSCTL_CAP(nbmcaps, 0, "NBM"); 5119 SYSCTL_CAP(linkcaps, 1, "link"); 5120 SYSCTL_CAP(switchcaps, 2, "switch"); 5121 SYSCTL_CAP(niccaps, 3, "NIC"); 5122 SYSCTL_CAP(toecaps, 4, "TCP offload"); 5123 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 5124 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 5125 SYSCTL_CAP(cryptocaps, 7, "crypto"); 5126 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 5127 #undef SYSCTL_CAP 5128 5129 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 5130 NULL, sc->tids.nftids, "number of filters"); 5131 5132 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 5133 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 5134 "chip temperature (in Celsius)"); 5135 5136 #ifdef SBUF_DRAIN 5137 /* 5138 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 5139 */ 5140 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 5141 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 5142 "logs and miscellaneous information"); 5143 children = SYSCTL_CHILDREN(oid); 5144 5145 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 5146 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5147 sysctl_cctrl, "A", "congestion control"); 5148 5149 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 5150 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5151 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 5152 5153 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 5154 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 5155 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 5156 5157 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 5158 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 5159 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 5160 5161 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 5162 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 5163 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 5164 5165 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 5166 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 5167 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 5168 5169 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 5170 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 5171 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 5172 5173 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 5174 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5175 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 5176 "A", "CIM logic analyzer"); 5177 5178 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 5179 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5180 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 5181 5182 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 5183 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 5184 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 5185 5186 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 5187 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 5188 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 5189 5190 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 5191 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 5192 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 5193 5194 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 5195 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 5196 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 5197 5198 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 5199 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 5200 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 5201 5202 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 5203 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 5204 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 5205 5206 if (chip_id(sc) > CHELSIO_T4) { 5207 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 5208 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 5209 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 5210 5211 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 5212 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 5213 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 5214 } 5215 5216 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 5217 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5218 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 5219 5220 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 5221 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5222 sysctl_cim_qcfg, "A", "CIM queue configuration"); 5223 5224 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 5225 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5226 sysctl_cpl_stats, "A", "CPL statistics"); 5227 5228 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 5229 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5230 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 5231 5232 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 5233 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5234 sysctl_devlog, "A", "firmware's device log"); 5235 5236 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 5237 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5238 sysctl_fcoe_stats, "A", "FCoE statistics"); 5239 5240 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 5241 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5242 sysctl_hw_sched, "A", "hardware scheduler "); 5243 5244 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 5245 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5246 sysctl_l2t, "A", "hardware L2 table"); 5247 5248 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 5249 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5250 sysctl_lb_stats, "A", "loopback statistics"); 5251 5252 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 5253 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5254 sysctl_meminfo, "A", "memory regions"); 5255 5256 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 5257 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5258 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 5259 "A", "MPS TCAM entries"); 5260 5261 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 5262 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5263 sysctl_path_mtus, "A", "path MTUs"); 5264 5265 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 5266 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5267 sysctl_pm_stats, "A", "PM statistics"); 5268 5269 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 5270 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5271 sysctl_rdma_stats, "A", "RDMA statistics"); 5272 5273 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 5274 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5275 sysctl_tcp_stats, "A", "TCP statistics"); 5276 5277 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 5278 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5279 sysctl_tids, "A", "TID information"); 5280 5281 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 5282 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5283 sysctl_tp_err_stats, "A", "TP error statistics"); 5284 5285 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 5286 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 5287 "TP logic analyzer event capture mask"); 5288 5289 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 5290 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5291 sysctl_tp_la, "A", "TP logic analyzer"); 5292 5293 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 5294 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5295 sysctl_tx_rate, "A", "Tx rate"); 5296 5297 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 5298 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5299 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 5300 5301 if (chip_id(sc) >= CHELSIO_T5) { 5302 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 5303 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5304 sysctl_wcwr_stats, "A", "write combined work requests"); 5305 } 5306 #endif 5307 5308 #ifdef TCP_OFFLOAD 5309 if (is_offload(sc)) { 5310 int i; 5311 char s[4]; 5312 5313 /* 5314 * dev.t4nex.X.toe. 5315 */ 5316 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 5317 NULL, "TOE parameters"); 5318 children = SYSCTL_CHILDREN(oid); 5319 5320 sc->tt.sndbuf = 256 * 1024; 5321 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 5322 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 5323 5324 sc->tt.ddp = 0; 5325 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 5326 &sc->tt.ddp, 0, "DDP allowed"); 5327 5328 sc->tt.rx_coalesce = 1; 5329 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 5330 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 5331 5332 sc->tt.tx_align = 1; 5333 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 5334 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 5335 5336 sc->tt.tx_zcopy = 0; 5337 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", 5338 CTLFLAG_RW, &sc->tt.tx_zcopy, 0, 5339 "Enable zero-copy aio_write(2)"); 5340 5341 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 5342 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 5343 "TP timer tick (us)"); 5344 5345 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 5346 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 5347 "TCP timestamp tick (us)"); 5348 5349 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 5350 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 5351 "DACK tick (us)"); 5352 5353 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 5354 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 5355 "IU", "DACK timer (us)"); 5356 5357 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 5358 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 5359 sysctl_tp_timer, "LU", "Minimum retransmit interval (us)"); 5360 5361 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 5362 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 5363 sysctl_tp_timer, "LU", "Maximum retransmit interval (us)"); 5364 5365 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 5366 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 5367 sysctl_tp_timer, "LU", "Persist timer min (us)"); 5368 5369 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 5370 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 5371 sysctl_tp_timer, "LU", "Persist timer max (us)"); 5372 5373 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 5374 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 5375 sysctl_tp_timer, "LU", "Keepalive idle timer (us)"); 5376 5377 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval", 5378 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 5379 sysctl_tp_timer, "LU", "Keepalive interval timer (us)"); 5380 5381 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 5382 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 5383 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 5384 5385 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 5386 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 5387 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 5388 5389 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count", 5390 CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX, 5391 sysctl_tp_shift_cnt, "IU", 5392 "Number of SYN retransmissions before abort"); 5393 5394 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count", 5395 CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2, 5396 sysctl_tp_shift_cnt, "IU", 5397 "Number of retransmissions before abort"); 5398 5399 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count", 5400 CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2, 5401 sysctl_tp_shift_cnt, "IU", 5402 "Number of keepalive probes before abort"); 5403 5404 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff", 5405 CTLFLAG_RD, NULL, "TOE retransmit backoffs"); 5406 children = SYSCTL_CHILDREN(oid); 5407 for (i = 0; i < 16; i++) { 5408 snprintf(s, sizeof(s), "%u", i); 5409 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s, 5410 CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff, 5411 "IU", "TOE retransmit backoff"); 5412 } 5413 } 5414 #endif 5415 } 5416 5417 void 5418 vi_sysctls(struct vi_info *vi) 5419 { 5420 struct sysctl_ctx_list *ctx; 5421 struct sysctl_oid *oid; 5422 struct sysctl_oid_list *children; 5423 5424 ctx = device_get_sysctl_ctx(vi->dev); 5425 5426 /* 5427 * dev.v?(cxgbe|cxl).X. 5428 */ 5429 oid = device_get_sysctl_tree(vi->dev); 5430 children = SYSCTL_CHILDREN(oid); 5431 5432 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 5433 vi->viid, "VI identifer"); 5434 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 5435 &vi->nrxq, 0, "# of rx queues"); 5436 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 5437 &vi->ntxq, 0, "# of tx queues"); 5438 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 5439 &vi->first_rxq, 0, "index of first rx queue"); 5440 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 5441 &vi->first_txq, 0, "index of first tx queue"); 5442 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, 5443 vi->rss_size, "size of RSS indirection table"); 5444 5445 if (IS_MAIN_VI(vi)) { 5446 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 5447 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 5448 "Reserve queue 0 for non-flowid packets"); 5449 } 5450 5451 #ifdef TCP_OFFLOAD 5452 if (vi->nofldrxq != 0) { 5453 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 5454 &vi->nofldrxq, 0, 5455 "# of rx queues for offloaded TCP connections"); 5456 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 5457 &vi->nofldtxq, 0, 5458 "# of tx queues for offloaded TCP connections"); 5459 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 5460 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 5461 "index of first TOE rx queue"); 5462 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 5463 CTLFLAG_RD, &vi->first_ofld_txq, 0, 5464 "index of first TOE tx queue"); 5465 } 5466 #endif 5467 #ifdef DEV_NETMAP 5468 if (vi->nnmrxq != 0) { 5469 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 5470 &vi->nnmrxq, 0, "# of netmap rx queues"); 5471 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 5472 &vi->nnmtxq, 0, "# of netmap tx queues"); 5473 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 5474 CTLFLAG_RD, &vi->first_nm_rxq, 0, 5475 "index of first netmap rx queue"); 5476 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 5477 CTLFLAG_RD, &vi->first_nm_txq, 0, 5478 "index of first netmap tx queue"); 5479 } 5480 #endif 5481 5482 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 5483 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 5484 "holdoff timer index"); 5485 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 5486 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 5487 "holdoff packet counter index"); 5488 5489 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 5490 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 5491 "rx queue size"); 5492 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 5493 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 5494 "tx queue size"); 5495 } 5496 5497 static void 5498 cxgbe_sysctls(struct port_info *pi) 5499 { 5500 struct sysctl_ctx_list *ctx; 5501 struct sysctl_oid *oid; 5502 struct sysctl_oid_list *children, *children2; 5503 struct adapter *sc = pi->adapter; 5504 int i; 5505 char name[16]; 5506 5507 ctx = device_get_sysctl_ctx(pi->dev); 5508 5509 /* 5510 * dev.cxgbe.X. 5511 */ 5512 oid = device_get_sysctl_tree(pi->dev); 5513 children = SYSCTL_CHILDREN(oid); 5514 5515 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 5516 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 5517 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 5518 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 5519 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 5520 "PHY temperature (in Celsius)"); 5521 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 5522 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 5523 "PHY firmware version"); 5524 } 5525 5526 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 5527 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A", 5528 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 5529 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec", 5530 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A", 5531 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); 5532 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", 5533 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I", 5534 "autonegotiation (-1 = not supported)"); 5535 5536 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 5537 port_top_speed(pi), "max speed (in Gbps)"); 5538 5539 if (sc->flags & IS_VF) 5540 return; 5541 5542 /* 5543 * dev.(cxgbe|cxl).X.tc. 5544 */ 5545 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 5546 "Tx scheduler traffic classes (cl_rl)"); 5547 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 5548 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; 5549 5550 snprintf(name, sizeof(name), "%d", i); 5551 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 5552 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 5553 "traffic class")); 5554 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD, 5555 &tc->flags, 0, "flags"); 5556 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 5557 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 5558 #ifdef SBUF_DRAIN 5559 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 5560 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 5561 sysctl_tc_params, "A", "traffic class parameters"); 5562 #endif 5563 } 5564 5565 /* 5566 * dev.cxgbe.X.stats. 5567 */ 5568 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 5569 NULL, "port statistics"); 5570 children = SYSCTL_CHILDREN(oid); 5571 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 5572 &pi->tx_parse_error, 0, 5573 "# of tx packets with invalid length or # of segments"); 5574 5575 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 5576 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 5577 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 5578 sysctl_handle_t4_reg64, "QU", desc) 5579 5580 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 5581 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 5582 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 5583 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 5584 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 5585 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 5586 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 5587 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 5588 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 5589 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 5590 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 5591 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 5592 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 5593 "# of tx frames in this range", 5594 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 5595 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 5596 "# of tx frames in this range", 5597 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 5598 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5599 "# of tx frames in this range", 5600 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5601 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5602 "# of tx frames in this range", 5603 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5604 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5605 "# of tx frames in this range", 5606 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5607 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5608 "# of tx frames in this range", 5609 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5610 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5611 "# of tx frames in this range", 5612 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5613 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5614 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5615 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5616 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5617 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5618 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5619 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5620 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5621 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5622 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5623 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5624 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5625 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5626 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5627 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5628 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5629 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5630 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5631 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5632 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5633 5634 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5635 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5636 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5637 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5638 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5639 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5640 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5641 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5642 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5643 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5644 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5645 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5646 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5647 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5648 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5649 "# of frames received with bad FCS", 5650 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5651 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5652 "# of frames received with length error", 5653 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5654 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5655 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5656 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5657 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5658 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5659 "# of rx frames in this range", 5660 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5661 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5662 "# of rx frames in this range", 5663 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5664 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5665 "# of rx frames in this range", 5666 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5667 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5668 "# of rx frames in this range", 5669 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5670 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5671 "# of rx frames in this range", 5672 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5673 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5674 "# of rx frames in this range", 5675 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5676 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5677 "# of rx frames in this range", 5678 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5679 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5680 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5681 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5682 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5683 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5684 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5685 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5686 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5687 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5688 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5689 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5690 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5691 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5692 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5693 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5694 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5695 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5696 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5697 5698 #undef SYSCTL_ADD_T4_REG64 5699 5700 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5701 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5702 &pi->stats.name, desc) 5703 5704 /* We get these from port_stats and they may be stale by up to 1s */ 5705 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5706 "# drops due to buffer-group 0 overflows"); 5707 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5708 "# drops due to buffer-group 1 overflows"); 5709 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5710 "# drops due to buffer-group 2 overflows"); 5711 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5712 "# drops due to buffer-group 3 overflows"); 5713 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5714 "# of buffer-group 0 truncated packets"); 5715 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5716 "# of buffer-group 1 truncated packets"); 5717 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5718 "# of buffer-group 2 truncated packets"); 5719 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5720 "# of buffer-group 3 truncated packets"); 5721 5722 #undef SYSCTL_ADD_T4_PORTSTAT 5723 } 5724 5725 static int 5726 sysctl_int_array(SYSCTL_HANDLER_ARGS) 5727 { 5728 int rc, *i, space = 0; 5729 struct sbuf sb; 5730 5731 sbuf_new_for_sysctl(&sb, NULL, 64, req); 5732 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5733 if (space) 5734 sbuf_printf(&sb, " "); 5735 sbuf_printf(&sb, "%d", *i); 5736 space = 1; 5737 } 5738 rc = sbuf_finish(&sb); 5739 sbuf_delete(&sb); 5740 return (rc); 5741 } 5742 5743 static int 5744 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5745 { 5746 int rc; 5747 struct sbuf *sb; 5748 5749 rc = sysctl_wire_old_buffer(req, 0); 5750 if (rc != 0) 5751 return(rc); 5752 5753 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5754 if (sb == NULL) 5755 return (ENOMEM); 5756 5757 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5758 rc = sbuf_finish(sb); 5759 sbuf_delete(sb); 5760 5761 return (rc); 5762 } 5763 5764 static int 5765 sysctl_btphy(SYSCTL_HANDLER_ARGS) 5766 { 5767 struct port_info *pi = arg1; 5768 int op = arg2; 5769 struct adapter *sc = pi->adapter; 5770 u_int v; 5771 int rc; 5772 5773 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5774 if (rc) 5775 return (rc); 5776 /* XXX: magic numbers */ 5777 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5778 &v); 5779 end_synchronized_op(sc, 0); 5780 if (rc) 5781 return (rc); 5782 if (op == 0) 5783 v /= 256; 5784 5785 rc = sysctl_handle_int(oidp, &v, 0, req); 5786 return (rc); 5787 } 5788 5789 static int 5790 sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5791 { 5792 struct vi_info *vi = arg1; 5793 int rc, val; 5794 5795 val = vi->rsrv_noflowq; 5796 rc = sysctl_handle_int(oidp, &val, 0, req); 5797 if (rc != 0 || req->newptr == NULL) 5798 return (rc); 5799 5800 if ((val >= 1) && (vi->ntxq > 1)) 5801 vi->rsrv_noflowq = 1; 5802 else 5803 vi->rsrv_noflowq = 0; 5804 5805 return (rc); 5806 } 5807 5808 static int 5809 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5810 { 5811 struct vi_info *vi = arg1; 5812 struct adapter *sc = vi->pi->adapter; 5813 int idx, rc, i; 5814 struct sge_rxq *rxq; 5815 #ifdef TCP_OFFLOAD 5816 struct sge_ofld_rxq *ofld_rxq; 5817 #endif 5818 uint8_t v; 5819 5820 idx = vi->tmr_idx; 5821 5822 rc = sysctl_handle_int(oidp, &idx, 0, req); 5823 if (rc != 0 || req->newptr == NULL) 5824 return (rc); 5825 5826 if (idx < 0 || idx >= SGE_NTIMERS) 5827 return (EINVAL); 5828 5829 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5830 "t4tmr"); 5831 if (rc) 5832 return (rc); 5833 5834 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5835 for_each_rxq(vi, i, rxq) { 5836 #ifdef atomic_store_rel_8 5837 atomic_store_rel_8(&rxq->iq.intr_params, v); 5838 #else 5839 rxq->iq.intr_params = v; 5840 #endif 5841 } 5842 #ifdef TCP_OFFLOAD 5843 for_each_ofld_rxq(vi, i, ofld_rxq) { 5844 #ifdef atomic_store_rel_8 5845 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5846 #else 5847 ofld_rxq->iq.intr_params = v; 5848 #endif 5849 } 5850 #endif 5851 vi->tmr_idx = idx; 5852 5853 end_synchronized_op(sc, LOCK_HELD); 5854 return (0); 5855 } 5856 5857 static int 5858 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5859 { 5860 struct vi_info *vi = arg1; 5861 struct adapter *sc = vi->pi->adapter; 5862 int idx, rc; 5863 5864 idx = vi->pktc_idx; 5865 5866 rc = sysctl_handle_int(oidp, &idx, 0, req); 5867 if (rc != 0 || req->newptr == NULL) 5868 return (rc); 5869 5870 if (idx < -1 || idx >= SGE_NCOUNTERS) 5871 return (EINVAL); 5872 5873 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5874 "t4pktc"); 5875 if (rc) 5876 return (rc); 5877 5878 if (vi->flags & VI_INIT_DONE) 5879 rc = EBUSY; /* cannot be changed once the queues are created */ 5880 else 5881 vi->pktc_idx = idx; 5882 5883 end_synchronized_op(sc, LOCK_HELD); 5884 return (rc); 5885 } 5886 5887 static int 5888 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5889 { 5890 struct vi_info *vi = arg1; 5891 struct adapter *sc = vi->pi->adapter; 5892 int qsize, rc; 5893 5894 qsize = vi->qsize_rxq; 5895 5896 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5897 if (rc != 0 || req->newptr == NULL) 5898 return (rc); 5899 5900 if (qsize < 128 || (qsize & 7)) 5901 return (EINVAL); 5902 5903 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5904 "t4rxqs"); 5905 if (rc) 5906 return (rc); 5907 5908 if (vi->flags & VI_INIT_DONE) 5909 rc = EBUSY; /* cannot be changed once the queues are created */ 5910 else 5911 vi->qsize_rxq = qsize; 5912 5913 end_synchronized_op(sc, LOCK_HELD); 5914 return (rc); 5915 } 5916 5917 static int 5918 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5919 { 5920 struct vi_info *vi = arg1; 5921 struct adapter *sc = vi->pi->adapter; 5922 int qsize, rc; 5923 5924 qsize = vi->qsize_txq; 5925 5926 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5927 if (rc != 0 || req->newptr == NULL) 5928 return (rc); 5929 5930 if (qsize < 128 || qsize > 65536) 5931 return (EINVAL); 5932 5933 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5934 "t4txqs"); 5935 if (rc) 5936 return (rc); 5937 5938 if (vi->flags & VI_INIT_DONE) 5939 rc = EBUSY; /* cannot be changed once the queues are created */ 5940 else 5941 vi->qsize_txq = qsize; 5942 5943 end_synchronized_op(sc, LOCK_HELD); 5944 return (rc); 5945 } 5946 5947 static int 5948 sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5949 { 5950 struct port_info *pi = arg1; 5951 struct adapter *sc = pi->adapter; 5952 struct link_config *lc = &pi->link_cfg; 5953 int rc; 5954 5955 if (req->newptr == NULL) { 5956 struct sbuf *sb; 5957 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5958 5959 rc = sysctl_wire_old_buffer(req, 0); 5960 if (rc != 0) 5961 return(rc); 5962 5963 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5964 if (sb == NULL) 5965 return (ENOMEM); 5966 5967 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5968 rc = sbuf_finish(sb); 5969 sbuf_delete(sb); 5970 } else { 5971 char s[2]; 5972 int n; 5973 5974 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5975 s[1] = 0; 5976 5977 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5978 if (rc != 0) 5979 return(rc); 5980 5981 if (s[1] != 0) 5982 return (EINVAL); 5983 if (s[0] < '0' || s[0] > '9') 5984 return (EINVAL); /* not a number */ 5985 n = s[0] - '0'; 5986 if (n & ~(PAUSE_TX | PAUSE_RX)) 5987 return (EINVAL); /* some other bit is set too */ 5988 5989 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5990 "t4PAUSE"); 5991 if (rc) 5992 return (rc); 5993 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5994 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5995 lc->requested_fc |= n; 5996 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5997 if (rc == 0) { 5998 lc->fc = lc->requested_fc; 5999 } 6000 } 6001 end_synchronized_op(sc, 0); 6002 } 6003 6004 return (rc); 6005 } 6006 6007 static int 6008 sysctl_fec(SYSCTL_HANDLER_ARGS) 6009 { 6010 struct port_info *pi = arg1; 6011 struct adapter *sc = pi->adapter; 6012 struct link_config *lc = &pi->link_cfg; 6013 int rc; 6014 6015 if (req->newptr == NULL) { 6016 struct sbuf *sb; 6017 static char *bits = "\20\1RS\2BASER_RS\3RESERVED"; 6018 6019 rc = sysctl_wire_old_buffer(req, 0); 6020 if (rc != 0) 6021 return(rc); 6022 6023 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6024 if (sb == NULL) 6025 return (ENOMEM); 6026 6027 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits); 6028 rc = sbuf_finish(sb); 6029 sbuf_delete(sb); 6030 } else { 6031 char s[2]; 6032 int n; 6033 6034 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC); 6035 s[1] = 0; 6036 6037 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 6038 if (rc != 0) 6039 return(rc); 6040 6041 if (s[1] != 0) 6042 return (EINVAL); 6043 if (s[0] < '0' || s[0] > '9') 6044 return (EINVAL); /* not a number */ 6045 n = s[0] - '0'; 6046 if (n & ~M_FW_PORT_CAP_FEC) 6047 return (EINVAL); /* some other bit is set too */ 6048 6049 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 6050 "t4fec"); 6051 if (rc) 6052 return (rc); 6053 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) { 6054 lc->requested_fec = n & 6055 G_FW_PORT_CAP_FEC(lc->supported); 6056 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 6057 if (rc == 0) { 6058 lc->fec = lc->requested_fec; 6059 } 6060 } 6061 end_synchronized_op(sc, 0); 6062 } 6063 6064 return (rc); 6065 } 6066 6067 static int 6068 sysctl_autoneg(SYSCTL_HANDLER_ARGS) 6069 { 6070 struct port_info *pi = arg1; 6071 struct adapter *sc = pi->adapter; 6072 struct link_config *lc = &pi->link_cfg; 6073 int rc, val, old; 6074 6075 if (lc->supported & FW_PORT_CAP_ANEG) 6076 val = lc->requested_aneg == AUTONEG_ENABLE ? 1 : 0; 6077 else 6078 val = -1; 6079 rc = sysctl_handle_int(oidp, &val, 0, req); 6080 if (rc != 0 || req->newptr == NULL) 6081 return (rc); 6082 if ((lc->supported & FW_PORT_CAP_ANEG) == 0) 6083 return (ENOTSUP); 6084 6085 if (val == 0) 6086 val = AUTONEG_DISABLE; 6087 else if (val == 1) 6088 val = AUTONEG_ENABLE; 6089 else 6090 return (EINVAL); 6091 if (lc->requested_aneg == val) 6092 return (0); /* no change */ 6093 6094 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 6095 "t4aneg"); 6096 if (rc) 6097 return (rc); 6098 old = lc->requested_aneg; 6099 lc->requested_aneg = val; 6100 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 6101 if (rc != 0) 6102 lc->requested_aneg = old; 6103 end_synchronized_op(sc, 0); 6104 return (rc); 6105 } 6106 6107 static int 6108 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 6109 { 6110 struct adapter *sc = arg1; 6111 int reg = arg2; 6112 uint64_t val; 6113 6114 val = t4_read_reg64(sc, reg); 6115 6116 return (sysctl_handle_64(oidp, &val, 0, req)); 6117 } 6118 6119 static int 6120 sysctl_temperature(SYSCTL_HANDLER_ARGS) 6121 { 6122 struct adapter *sc = arg1; 6123 int rc, t; 6124 uint32_t param, val; 6125 6126 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 6127 if (rc) 6128 return (rc); 6129 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 6130 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 6131 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 6132 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 6133 end_synchronized_op(sc, 0); 6134 if (rc) 6135 return (rc); 6136 6137 /* unknown is returned as 0 but we display -1 in that case */ 6138 t = val == 0 ? -1 : val; 6139 6140 rc = sysctl_handle_int(oidp, &t, 0, req); 6141 return (rc); 6142 } 6143 6144 #ifdef SBUF_DRAIN 6145 static int 6146 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 6147 { 6148 struct adapter *sc = arg1; 6149 struct sbuf *sb; 6150 int rc, i; 6151 uint16_t incr[NMTUS][NCCTRL_WIN]; 6152 static const char *dec_fac[] = { 6153 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 6154 "0.9375" 6155 }; 6156 6157 rc = sysctl_wire_old_buffer(req, 0); 6158 if (rc != 0) 6159 return (rc); 6160 6161 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6162 if (sb == NULL) 6163 return (ENOMEM); 6164 6165 t4_read_cong_tbl(sc, incr); 6166 6167 for (i = 0; i < NCCTRL_WIN; ++i) { 6168 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 6169 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 6170 incr[5][i], incr[6][i], incr[7][i]); 6171 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 6172 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 6173 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 6174 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 6175 } 6176 6177 rc = sbuf_finish(sb); 6178 sbuf_delete(sb); 6179 6180 return (rc); 6181 } 6182 6183 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 6184 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 6185 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 6186 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 6187 }; 6188 6189 static int 6190 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 6191 { 6192 struct adapter *sc = arg1; 6193 struct sbuf *sb; 6194 int rc, i, n, qid = arg2; 6195 uint32_t *buf, *p; 6196 char *qtype; 6197 u_int cim_num_obq = sc->chip_params->cim_num_obq; 6198 6199 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 6200 ("%s: bad qid %d\n", __func__, qid)); 6201 6202 if (qid < CIM_NUM_IBQ) { 6203 /* inbound queue */ 6204 qtype = "IBQ"; 6205 n = 4 * CIM_IBQ_SIZE; 6206 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 6207 rc = t4_read_cim_ibq(sc, qid, buf, n); 6208 } else { 6209 /* outbound queue */ 6210 qtype = "OBQ"; 6211 qid -= CIM_NUM_IBQ; 6212 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 6213 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 6214 rc = t4_read_cim_obq(sc, qid, buf, n); 6215 } 6216 6217 if (rc < 0) { 6218 rc = -rc; 6219 goto done; 6220 } 6221 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 6222 6223 rc = sysctl_wire_old_buffer(req, 0); 6224 if (rc != 0) 6225 goto done; 6226 6227 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6228 if (sb == NULL) { 6229 rc = ENOMEM; 6230 goto done; 6231 } 6232 6233 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 6234 for (i = 0, p = buf; i < n; i += 16, p += 4) 6235 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 6236 p[2], p[3]); 6237 6238 rc = sbuf_finish(sb); 6239 sbuf_delete(sb); 6240 done: 6241 free(buf, M_CXGBE); 6242 return (rc); 6243 } 6244 6245 static int 6246 sysctl_cim_la(SYSCTL_HANDLER_ARGS) 6247 { 6248 struct adapter *sc = arg1; 6249 u_int cfg; 6250 struct sbuf *sb; 6251 uint32_t *buf, *p; 6252 int rc; 6253 6254 MPASS(chip_id(sc) <= CHELSIO_T5); 6255 6256 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 6257 if (rc != 0) 6258 return (rc); 6259 6260 rc = sysctl_wire_old_buffer(req, 0); 6261 if (rc != 0) 6262 return (rc); 6263 6264 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6265 if (sb == NULL) 6266 return (ENOMEM); 6267 6268 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 6269 M_ZERO | M_WAITOK); 6270 6271 rc = -t4_cim_read_la(sc, buf, NULL); 6272 if (rc != 0) 6273 goto done; 6274 6275 sbuf_printf(sb, "Status Data PC%s", 6276 cfg & F_UPDBGLACAPTPCONLY ? "" : 6277 " LS0Stat LS0Addr LS0Data"); 6278 6279 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 6280 if (cfg & F_UPDBGLACAPTPCONLY) { 6281 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 6282 p[6], p[7]); 6283 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 6284 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 6285 p[4] & 0xff, p[5] >> 8); 6286 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 6287 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 6288 p[1] & 0xf, p[2] >> 4); 6289 } else { 6290 sbuf_printf(sb, 6291 "\n %02x %x%07x %x%07x %08x %08x " 6292 "%08x%08x%08x%08x", 6293 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 6294 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 6295 p[6], p[7]); 6296 } 6297 } 6298 6299 rc = sbuf_finish(sb); 6300 sbuf_delete(sb); 6301 done: 6302 free(buf, M_CXGBE); 6303 return (rc); 6304 } 6305 6306 static int 6307 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 6308 { 6309 struct adapter *sc = arg1; 6310 u_int cfg; 6311 struct sbuf *sb; 6312 uint32_t *buf, *p; 6313 int rc; 6314 6315 MPASS(chip_id(sc) > CHELSIO_T5); 6316 6317 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 6318 if (rc != 0) 6319 return (rc); 6320 6321 rc = sysctl_wire_old_buffer(req, 0); 6322 if (rc != 0) 6323 return (rc); 6324 6325 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6326 if (sb == NULL) 6327 return (ENOMEM); 6328 6329 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 6330 M_ZERO | M_WAITOK); 6331 6332 rc = -t4_cim_read_la(sc, buf, NULL); 6333 if (rc != 0) 6334 goto done; 6335 6336 sbuf_printf(sb, "Status Inst Data PC%s", 6337 cfg & F_UPDBGLACAPTPCONLY ? "" : 6338 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 6339 6340 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 6341 if (cfg & F_UPDBGLACAPTPCONLY) { 6342 sbuf_printf(sb, "\n %02x %08x %08x %08x", 6343 p[3] & 0xff, p[2], p[1], p[0]); 6344 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 6345 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 6346 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 6347 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 6348 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 6349 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 6350 p[6] >> 16); 6351 } else { 6352 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 6353 "%08x %08x %08x %08x %08x %08x", 6354 (p[9] >> 16) & 0xff, 6355 p[9] & 0xffff, p[8] >> 16, 6356 p[8] & 0xffff, p[7] >> 16, 6357 p[7] & 0xffff, p[6] >> 16, 6358 p[2], p[1], p[0], p[5], p[4], p[3]); 6359 } 6360 } 6361 6362 rc = sbuf_finish(sb); 6363 sbuf_delete(sb); 6364 done: 6365 free(buf, M_CXGBE); 6366 return (rc); 6367 } 6368 6369 static int 6370 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 6371 { 6372 struct adapter *sc = arg1; 6373 u_int i; 6374 struct sbuf *sb; 6375 uint32_t *buf, *p; 6376 int rc; 6377 6378 rc = sysctl_wire_old_buffer(req, 0); 6379 if (rc != 0) 6380 return (rc); 6381 6382 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6383 if (sb == NULL) 6384 return (ENOMEM); 6385 6386 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 6387 M_ZERO | M_WAITOK); 6388 6389 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 6390 p = buf; 6391 6392 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6393 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 6394 p[1], p[0]); 6395 } 6396 6397 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 6398 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6399 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 6400 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 6401 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 6402 (p[1] >> 2) | ((p[2] & 3) << 30), 6403 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 6404 p[0] & 1); 6405 } 6406 6407 rc = sbuf_finish(sb); 6408 sbuf_delete(sb); 6409 free(buf, M_CXGBE); 6410 return (rc); 6411 } 6412 6413 static int 6414 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 6415 { 6416 struct adapter *sc = arg1; 6417 u_int i; 6418 struct sbuf *sb; 6419 uint32_t *buf, *p; 6420 int rc; 6421 6422 rc = sysctl_wire_old_buffer(req, 0); 6423 if (rc != 0) 6424 return (rc); 6425 6426 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6427 if (sb == NULL) 6428 return (ENOMEM); 6429 6430 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 6431 M_ZERO | M_WAITOK); 6432 6433 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 6434 p = buf; 6435 6436 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 6437 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6438 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 6439 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 6440 p[4], p[3], p[2], p[1], p[0]); 6441 } 6442 6443 sbuf_printf(sb, "\n\nCntl ID Data"); 6444 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6445 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 6446 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 6447 } 6448 6449 rc = sbuf_finish(sb); 6450 sbuf_delete(sb); 6451 free(buf, M_CXGBE); 6452 return (rc); 6453 } 6454 6455 static int 6456 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 6457 { 6458 struct adapter *sc = arg1; 6459 struct sbuf *sb; 6460 int rc, i; 6461 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6462 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6463 uint16_t thres[CIM_NUM_IBQ]; 6464 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 6465 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 6466 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 6467 6468 cim_num_obq = sc->chip_params->cim_num_obq; 6469 if (is_t4(sc)) { 6470 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 6471 obq_rdaddr = A_UP_OBQ_0_REALADDR; 6472 } else { 6473 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 6474 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 6475 } 6476 nq = CIM_NUM_IBQ + cim_num_obq; 6477 6478 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 6479 if (rc == 0) 6480 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 6481 if (rc != 0) 6482 return (rc); 6483 6484 t4_read_cimq_cfg(sc, base, size, thres); 6485 6486 rc = sysctl_wire_old_buffer(req, 0); 6487 if (rc != 0) 6488 return (rc); 6489 6490 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6491 if (sb == NULL) 6492 return (ENOMEM); 6493 6494 sbuf_printf(sb, 6495 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 6496 6497 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 6498 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 6499 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 6500 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6501 G_QUEREMFLITS(p[2]) * 16); 6502 for ( ; i < nq; i++, p += 4, wr += 2) 6503 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 6504 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 6505 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6506 G_QUEREMFLITS(p[2]) * 16); 6507 6508 rc = sbuf_finish(sb); 6509 sbuf_delete(sb); 6510 6511 return (rc); 6512 } 6513 6514 static int 6515 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 6516 { 6517 struct adapter *sc = arg1; 6518 struct sbuf *sb; 6519 int rc; 6520 struct tp_cpl_stats stats; 6521 6522 rc = sysctl_wire_old_buffer(req, 0); 6523 if (rc != 0) 6524 return (rc); 6525 6526 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6527 if (sb == NULL) 6528 return (ENOMEM); 6529 6530 mtx_lock(&sc->reg_lock); 6531 t4_tp_get_cpl_stats(sc, &stats); 6532 mtx_unlock(&sc->reg_lock); 6533 6534 if (sc->chip_params->nchan > 2) { 6535 sbuf_printf(sb, " channel 0 channel 1" 6536 " channel 2 channel 3"); 6537 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 6538 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 6539 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 6540 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 6541 } else { 6542 sbuf_printf(sb, " channel 0 channel 1"); 6543 sbuf_printf(sb, "\nCPL requests: %10u %10u", 6544 stats.req[0], stats.req[1]); 6545 sbuf_printf(sb, "\nCPL responses: %10u %10u", 6546 stats.rsp[0], stats.rsp[1]); 6547 } 6548 6549 rc = sbuf_finish(sb); 6550 sbuf_delete(sb); 6551 6552 return (rc); 6553 } 6554 6555 static int 6556 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 6557 { 6558 struct adapter *sc = arg1; 6559 struct sbuf *sb; 6560 int rc; 6561 struct tp_usm_stats stats; 6562 6563 rc = sysctl_wire_old_buffer(req, 0); 6564 if (rc != 0) 6565 return(rc); 6566 6567 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6568 if (sb == NULL) 6569 return (ENOMEM); 6570 6571 t4_get_usm_stats(sc, &stats); 6572 6573 sbuf_printf(sb, "Frames: %u\n", stats.frames); 6574 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 6575 sbuf_printf(sb, "Drops: %u", stats.drops); 6576 6577 rc = sbuf_finish(sb); 6578 sbuf_delete(sb); 6579 6580 return (rc); 6581 } 6582 6583 static const char * const devlog_level_strings[] = { 6584 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 6585 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 6586 [FW_DEVLOG_LEVEL_ERR] = "ERR", 6587 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 6588 [FW_DEVLOG_LEVEL_INFO] = "INFO", 6589 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 6590 }; 6591 6592 static const char * const devlog_facility_strings[] = { 6593 [FW_DEVLOG_FACILITY_CORE] = "CORE", 6594 [FW_DEVLOG_FACILITY_CF] = "CF", 6595 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 6596 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 6597 [FW_DEVLOG_FACILITY_RES] = "RES", 6598 [FW_DEVLOG_FACILITY_HW] = "HW", 6599 [FW_DEVLOG_FACILITY_FLR] = "FLR", 6600 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 6601 [FW_DEVLOG_FACILITY_PHY] = "PHY", 6602 [FW_DEVLOG_FACILITY_MAC] = "MAC", 6603 [FW_DEVLOG_FACILITY_PORT] = "PORT", 6604 [FW_DEVLOG_FACILITY_VI] = "VI", 6605 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 6606 [FW_DEVLOG_FACILITY_ACL] = "ACL", 6607 [FW_DEVLOG_FACILITY_TM] = "TM", 6608 [FW_DEVLOG_FACILITY_QFC] = "QFC", 6609 [FW_DEVLOG_FACILITY_DCB] = "DCB", 6610 [FW_DEVLOG_FACILITY_ETH] = "ETH", 6611 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 6612 [FW_DEVLOG_FACILITY_RI] = "RI", 6613 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 6614 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 6615 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 6616 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 6617 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 6618 }; 6619 6620 static int 6621 sysctl_devlog(SYSCTL_HANDLER_ARGS) 6622 { 6623 struct adapter *sc = arg1; 6624 struct devlog_params *dparams = &sc->params.devlog; 6625 struct fw_devlog_e *buf, *e; 6626 int i, j, rc, nentries, first = 0; 6627 struct sbuf *sb; 6628 uint64_t ftstamp = UINT64_MAX; 6629 6630 if (dparams->addr == 0) 6631 return (ENXIO); 6632 6633 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 6634 if (buf == NULL) 6635 return (ENOMEM); 6636 6637 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 6638 if (rc != 0) 6639 goto done; 6640 6641 nentries = dparams->size / sizeof(struct fw_devlog_e); 6642 for (i = 0; i < nentries; i++) { 6643 e = &buf[i]; 6644 6645 if (e->timestamp == 0) 6646 break; /* end */ 6647 6648 e->timestamp = be64toh(e->timestamp); 6649 e->seqno = be32toh(e->seqno); 6650 for (j = 0; j < 8; j++) 6651 e->params[j] = be32toh(e->params[j]); 6652 6653 if (e->timestamp < ftstamp) { 6654 ftstamp = e->timestamp; 6655 first = i; 6656 } 6657 } 6658 6659 if (buf[first].timestamp == 0) 6660 goto done; /* nothing in the log */ 6661 6662 rc = sysctl_wire_old_buffer(req, 0); 6663 if (rc != 0) 6664 goto done; 6665 6666 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6667 if (sb == NULL) { 6668 rc = ENOMEM; 6669 goto done; 6670 } 6671 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 6672 "Seq#", "Tstamp", "Level", "Facility", "Message"); 6673 6674 i = first; 6675 do { 6676 e = &buf[i]; 6677 if (e->timestamp == 0) 6678 break; /* end */ 6679 6680 sbuf_printf(sb, "%10d %15ju %8s %8s ", 6681 e->seqno, e->timestamp, 6682 (e->level < nitems(devlog_level_strings) ? 6683 devlog_level_strings[e->level] : "UNKNOWN"), 6684 (e->facility < nitems(devlog_facility_strings) ? 6685 devlog_facility_strings[e->facility] : "UNKNOWN")); 6686 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 6687 e->params[2], e->params[3], e->params[4], 6688 e->params[5], e->params[6], e->params[7]); 6689 6690 if (++i == nentries) 6691 i = 0; 6692 } while (i != first); 6693 6694 rc = sbuf_finish(sb); 6695 sbuf_delete(sb); 6696 done: 6697 free(buf, M_CXGBE); 6698 return (rc); 6699 } 6700 6701 static int 6702 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6703 { 6704 struct adapter *sc = arg1; 6705 struct sbuf *sb; 6706 int rc; 6707 struct tp_fcoe_stats stats[MAX_NCHAN]; 6708 int i, nchan = sc->chip_params->nchan; 6709 6710 rc = sysctl_wire_old_buffer(req, 0); 6711 if (rc != 0) 6712 return (rc); 6713 6714 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6715 if (sb == NULL) 6716 return (ENOMEM); 6717 6718 for (i = 0; i < nchan; i++) 6719 t4_get_fcoe_stats(sc, i, &stats[i]); 6720 6721 if (nchan > 2) { 6722 sbuf_printf(sb, " channel 0 channel 1" 6723 " channel 2 channel 3"); 6724 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6725 stats[0].octets_ddp, stats[1].octets_ddp, 6726 stats[2].octets_ddp, stats[3].octets_ddp); 6727 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6728 stats[0].frames_ddp, stats[1].frames_ddp, 6729 stats[2].frames_ddp, stats[3].frames_ddp); 6730 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6731 stats[0].frames_drop, stats[1].frames_drop, 6732 stats[2].frames_drop, stats[3].frames_drop); 6733 } else { 6734 sbuf_printf(sb, " channel 0 channel 1"); 6735 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6736 stats[0].octets_ddp, stats[1].octets_ddp); 6737 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6738 stats[0].frames_ddp, stats[1].frames_ddp); 6739 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6740 stats[0].frames_drop, stats[1].frames_drop); 6741 } 6742 6743 rc = sbuf_finish(sb); 6744 sbuf_delete(sb); 6745 6746 return (rc); 6747 } 6748 6749 static int 6750 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6751 { 6752 struct adapter *sc = arg1; 6753 struct sbuf *sb; 6754 int rc, i; 6755 unsigned int map, kbps, ipg, mode; 6756 unsigned int pace_tab[NTX_SCHED]; 6757 6758 rc = sysctl_wire_old_buffer(req, 0); 6759 if (rc != 0) 6760 return (rc); 6761 6762 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6763 if (sb == NULL) 6764 return (ENOMEM); 6765 6766 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6767 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6768 t4_read_pace_tbl(sc, pace_tab); 6769 6770 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6771 "Class IPG (0.1 ns) Flow IPG (us)"); 6772 6773 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6774 t4_get_tx_sched(sc, i, &kbps, &ipg); 6775 sbuf_printf(sb, "\n %u %-5s %u ", i, 6776 (mode & (1 << i)) ? "flow" : "class", map & 3); 6777 if (kbps) 6778 sbuf_printf(sb, "%9u ", kbps); 6779 else 6780 sbuf_printf(sb, " disabled "); 6781 6782 if (ipg) 6783 sbuf_printf(sb, "%13u ", ipg); 6784 else 6785 sbuf_printf(sb, " disabled "); 6786 6787 if (pace_tab[i]) 6788 sbuf_printf(sb, "%10u", pace_tab[i]); 6789 else 6790 sbuf_printf(sb, " disabled"); 6791 } 6792 6793 rc = sbuf_finish(sb); 6794 sbuf_delete(sb); 6795 6796 return (rc); 6797 } 6798 6799 static int 6800 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6801 { 6802 struct adapter *sc = arg1; 6803 struct sbuf *sb; 6804 int rc, i, j; 6805 uint64_t *p0, *p1; 6806 struct lb_port_stats s[2]; 6807 static const char *stat_name[] = { 6808 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6809 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6810 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6811 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6812 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6813 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6814 "BG2FramesTrunc:", "BG3FramesTrunc:" 6815 }; 6816 6817 rc = sysctl_wire_old_buffer(req, 0); 6818 if (rc != 0) 6819 return (rc); 6820 6821 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6822 if (sb == NULL) 6823 return (ENOMEM); 6824 6825 memset(s, 0, sizeof(s)); 6826 6827 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6828 t4_get_lb_stats(sc, i, &s[0]); 6829 t4_get_lb_stats(sc, i + 1, &s[1]); 6830 6831 p0 = &s[0].octets; 6832 p1 = &s[1].octets; 6833 sbuf_printf(sb, "%s Loopback %u" 6834 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6835 6836 for (j = 0; j < nitems(stat_name); j++) 6837 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6838 *p0++, *p1++); 6839 } 6840 6841 rc = sbuf_finish(sb); 6842 sbuf_delete(sb); 6843 6844 return (rc); 6845 } 6846 6847 static int 6848 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6849 { 6850 int rc = 0; 6851 struct port_info *pi = arg1; 6852 struct link_config *lc = &pi->link_cfg; 6853 struct sbuf *sb; 6854 6855 rc = sysctl_wire_old_buffer(req, 0); 6856 if (rc != 0) 6857 return(rc); 6858 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6859 if (sb == NULL) 6860 return (ENOMEM); 6861 6862 if (lc->link_ok || lc->link_down_rc == 255) 6863 sbuf_printf(sb, "n/a"); 6864 else 6865 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); 6866 6867 rc = sbuf_finish(sb); 6868 sbuf_delete(sb); 6869 6870 return (rc); 6871 } 6872 6873 struct mem_desc { 6874 unsigned int base; 6875 unsigned int limit; 6876 unsigned int idx; 6877 }; 6878 6879 static int 6880 mem_desc_cmp(const void *a, const void *b) 6881 { 6882 return ((const struct mem_desc *)a)->base - 6883 ((const struct mem_desc *)b)->base; 6884 } 6885 6886 static void 6887 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6888 unsigned int to) 6889 { 6890 unsigned int size; 6891 6892 if (from == to) 6893 return; 6894 6895 size = to - from + 1; 6896 if (size == 0) 6897 return; 6898 6899 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6900 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6901 } 6902 6903 static int 6904 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6905 { 6906 struct adapter *sc = arg1; 6907 struct sbuf *sb; 6908 int rc, i, n; 6909 uint32_t lo, hi, used, alloc; 6910 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6911 static const char *region[] = { 6912 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6913 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6914 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6915 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6916 "RQUDP region:", "PBL region:", "TXPBL region:", 6917 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6918 "On-chip queues:" 6919 }; 6920 struct mem_desc avail[4]; 6921 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6922 struct mem_desc *md = mem; 6923 6924 rc = sysctl_wire_old_buffer(req, 0); 6925 if (rc != 0) 6926 return (rc); 6927 6928 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6929 if (sb == NULL) 6930 return (ENOMEM); 6931 6932 for (i = 0; i < nitems(mem); i++) { 6933 mem[i].limit = 0; 6934 mem[i].idx = i; 6935 } 6936 6937 /* Find and sort the populated memory ranges */ 6938 i = 0; 6939 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6940 if (lo & F_EDRAM0_ENABLE) { 6941 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6942 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6943 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6944 avail[i].idx = 0; 6945 i++; 6946 } 6947 if (lo & F_EDRAM1_ENABLE) { 6948 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6949 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6950 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6951 avail[i].idx = 1; 6952 i++; 6953 } 6954 if (lo & F_EXT_MEM_ENABLE) { 6955 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6956 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6957 avail[i].limit = avail[i].base + 6958 (G_EXT_MEM_SIZE(hi) << 20); 6959 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6960 i++; 6961 } 6962 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6963 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6964 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6965 avail[i].limit = avail[i].base + 6966 (G_EXT_MEM1_SIZE(hi) << 20); 6967 avail[i].idx = 4; 6968 i++; 6969 } 6970 if (!i) /* no memory available */ 6971 return 0; 6972 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6973 6974 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6975 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6976 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6977 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6978 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6979 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6980 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6981 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6982 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6983 6984 /* the next few have explicit upper bounds */ 6985 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6986 md->limit = md->base - 1 + 6987 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6988 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6989 md++; 6990 6991 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6992 md->limit = md->base - 1 + 6993 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6994 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6995 md++; 6996 6997 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6998 if (chip_id(sc) <= CHELSIO_T5) 6999 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 7000 else 7001 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 7002 md->limit = 0; 7003 } else { 7004 md->base = 0; 7005 md->idx = nitems(region); /* hide it */ 7006 } 7007 md++; 7008 7009 #define ulp_region(reg) \ 7010 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 7011 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 7012 7013 ulp_region(RX_ISCSI); 7014 ulp_region(RX_TDDP); 7015 ulp_region(TX_TPT); 7016 ulp_region(RX_STAG); 7017 ulp_region(RX_RQ); 7018 ulp_region(RX_RQUDP); 7019 ulp_region(RX_PBL); 7020 ulp_region(TX_PBL); 7021 #undef ulp_region 7022 7023 md->base = 0; 7024 md->idx = nitems(region); 7025 if (!is_t4(sc)) { 7026 uint32_t size = 0; 7027 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 7028 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 7029 7030 if (is_t5(sc)) { 7031 if (sge_ctrl & F_VFIFO_ENABLE) 7032 size = G_DBVFIFO_SIZE(fifo_size); 7033 } else 7034 size = G_T6_DBVFIFO_SIZE(fifo_size); 7035 7036 if (size) { 7037 md->base = G_BASEADDR(t4_read_reg(sc, 7038 A_SGE_DBVFIFO_BADDR)); 7039 md->limit = md->base + (size << 2) - 1; 7040 } 7041 } 7042 md++; 7043 7044 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 7045 md->limit = 0; 7046 md++; 7047 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 7048 md->limit = 0; 7049 md++; 7050 7051 md->base = sc->vres.ocq.start; 7052 if (sc->vres.ocq.size) 7053 md->limit = md->base + sc->vres.ocq.size - 1; 7054 else 7055 md->idx = nitems(region); /* hide it */ 7056 md++; 7057 7058 /* add any address-space holes, there can be up to 3 */ 7059 for (n = 0; n < i - 1; n++) 7060 if (avail[n].limit < avail[n + 1].base) 7061 (md++)->base = avail[n].limit; 7062 if (avail[n].limit) 7063 (md++)->base = avail[n].limit; 7064 7065 n = md - mem; 7066 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 7067 7068 for (lo = 0; lo < i; lo++) 7069 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 7070 avail[lo].limit - 1); 7071 7072 sbuf_printf(sb, "\n"); 7073 for (i = 0; i < n; i++) { 7074 if (mem[i].idx >= nitems(region)) 7075 continue; /* skip holes */ 7076 if (!mem[i].limit) 7077 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 7078 mem_region_show(sb, region[mem[i].idx], mem[i].base, 7079 mem[i].limit); 7080 } 7081 7082 sbuf_printf(sb, "\n"); 7083 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 7084 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 7085 mem_region_show(sb, "uP RAM:", lo, hi); 7086 7087 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 7088 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 7089 mem_region_show(sb, "uP Extmem2:", lo, hi); 7090 7091 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 7092 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 7093 G_PMRXMAXPAGE(lo), 7094 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 7095 (lo & F_PMRXNUMCHN) ? 2 : 1); 7096 7097 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 7098 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 7099 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 7100 G_PMTXMAXPAGE(lo), 7101 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 7102 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 7103 sbuf_printf(sb, "%u p-structs\n", 7104 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 7105 7106 for (i = 0; i < 4; i++) { 7107 if (chip_id(sc) > CHELSIO_T5) 7108 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 7109 else 7110 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 7111 if (is_t5(sc)) { 7112 used = G_T5_USED(lo); 7113 alloc = G_T5_ALLOC(lo); 7114 } else { 7115 used = G_USED(lo); 7116 alloc = G_ALLOC(lo); 7117 } 7118 /* For T6 these are MAC buffer groups */ 7119 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 7120 i, used, alloc); 7121 } 7122 for (i = 0; i < sc->chip_params->nchan; i++) { 7123 if (chip_id(sc) > CHELSIO_T5) 7124 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 7125 else 7126 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 7127 if (is_t5(sc)) { 7128 used = G_T5_USED(lo); 7129 alloc = G_T5_ALLOC(lo); 7130 } else { 7131 used = G_USED(lo); 7132 alloc = G_ALLOC(lo); 7133 } 7134 /* For T6 these are MAC buffer groups */ 7135 sbuf_printf(sb, 7136 "\nLoopback %d using %u pages out of %u allocated", 7137 i, used, alloc); 7138 } 7139 7140 rc = sbuf_finish(sb); 7141 sbuf_delete(sb); 7142 7143 return (rc); 7144 } 7145 7146 static inline void 7147 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 7148 { 7149 *mask = x | y; 7150 y = htobe64(y); 7151 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 7152 } 7153 7154 static int 7155 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 7156 { 7157 struct adapter *sc = arg1; 7158 struct sbuf *sb; 7159 int rc, i; 7160 7161 MPASS(chip_id(sc) <= CHELSIO_T5); 7162 7163 rc = sysctl_wire_old_buffer(req, 0); 7164 if (rc != 0) 7165 return (rc); 7166 7167 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7168 if (sb == NULL) 7169 return (ENOMEM); 7170 7171 sbuf_printf(sb, 7172 "Idx Ethernet address Mask Vld Ports PF" 7173 " VF Replication P0 P1 P2 P3 ML"); 7174 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 7175 uint64_t tcamx, tcamy, mask; 7176 uint32_t cls_lo, cls_hi; 7177 uint8_t addr[ETHER_ADDR_LEN]; 7178 7179 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 7180 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 7181 if (tcamx & tcamy) 7182 continue; 7183 tcamxy2valmask(tcamx, tcamy, addr, &mask); 7184 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 7185 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 7186 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 7187 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 7188 addr[3], addr[4], addr[5], (uintmax_t)mask, 7189 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 7190 G_PORTMAP(cls_hi), G_PF(cls_lo), 7191 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 7192 7193 if (cls_lo & F_REPLICATE) { 7194 struct fw_ldst_cmd ldst_cmd; 7195 7196 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 7197 ldst_cmd.op_to_addrspace = 7198 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 7199 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7200 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 7201 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 7202 ldst_cmd.u.mps.rplc.fid_idx = 7203 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 7204 V_FW_LDST_CMD_IDX(i)); 7205 7206 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 7207 "t4mps"); 7208 if (rc) 7209 break; 7210 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 7211 sizeof(ldst_cmd), &ldst_cmd); 7212 end_synchronized_op(sc, 0); 7213 7214 if (rc != 0) { 7215 sbuf_printf(sb, "%36d", rc); 7216 rc = 0; 7217 } else { 7218 sbuf_printf(sb, " %08x %08x %08x %08x", 7219 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7220 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7221 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7222 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7223 } 7224 } else 7225 sbuf_printf(sb, "%36s", ""); 7226 7227 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 7228 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 7229 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 7230 } 7231 7232 if (rc) 7233 (void) sbuf_finish(sb); 7234 else 7235 rc = sbuf_finish(sb); 7236 sbuf_delete(sb); 7237 7238 return (rc); 7239 } 7240 7241 static int 7242 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 7243 { 7244 struct adapter *sc = arg1; 7245 struct sbuf *sb; 7246 int rc, i; 7247 7248 MPASS(chip_id(sc) > CHELSIO_T5); 7249 7250 rc = sysctl_wire_old_buffer(req, 0); 7251 if (rc != 0) 7252 return (rc); 7253 7254 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7255 if (sb == NULL) 7256 return (ENOMEM); 7257 7258 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 7259 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 7260 " Replication" 7261 " P0 P1 P2 P3 ML\n"); 7262 7263 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 7264 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 7265 uint16_t ivlan; 7266 uint64_t tcamx, tcamy, val, mask; 7267 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 7268 uint8_t addr[ETHER_ADDR_LEN]; 7269 7270 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 7271 if (i < 256) 7272 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 7273 else 7274 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 7275 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 7276 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 7277 tcamy = G_DMACH(val) << 32; 7278 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 7279 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 7280 lookup_type = G_DATALKPTYPE(data2); 7281 port_num = G_DATAPORTNUM(data2); 7282 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7283 /* Inner header VNI */ 7284 vniy = ((data2 & F_DATAVIDH2) << 23) | 7285 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 7286 dip_hit = data2 & F_DATADIPHIT; 7287 vlan_vld = 0; 7288 } else { 7289 vniy = 0; 7290 dip_hit = 0; 7291 vlan_vld = data2 & F_DATAVIDH2; 7292 ivlan = G_VIDL(val); 7293 } 7294 7295 ctl |= V_CTLXYBITSEL(1); 7296 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 7297 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 7298 tcamx = G_DMACH(val) << 32; 7299 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 7300 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 7301 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7302 /* Inner header VNI mask */ 7303 vnix = ((data2 & F_DATAVIDH2) << 23) | 7304 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 7305 } else 7306 vnix = 0; 7307 7308 if (tcamx & tcamy) 7309 continue; 7310 tcamxy2valmask(tcamx, tcamy, addr, &mask); 7311 7312 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 7313 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 7314 7315 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7316 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 7317 "%012jx %06x %06x - - %3c" 7318 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 7319 addr[1], addr[2], addr[3], addr[4], addr[5], 7320 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 7321 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 7322 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 7323 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 7324 } else { 7325 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 7326 "%012jx - - ", i, addr[0], addr[1], 7327 addr[2], addr[3], addr[4], addr[5], 7328 (uintmax_t)mask); 7329 7330 if (vlan_vld) 7331 sbuf_printf(sb, "%4u Y ", ivlan); 7332 else 7333 sbuf_printf(sb, " - N "); 7334 7335 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 7336 lookup_type ? 'I' : 'O', port_num, 7337 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 7338 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 7339 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 7340 } 7341 7342 7343 if (cls_lo & F_T6_REPLICATE) { 7344 struct fw_ldst_cmd ldst_cmd; 7345 7346 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 7347 ldst_cmd.op_to_addrspace = 7348 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 7349 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7350 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 7351 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 7352 ldst_cmd.u.mps.rplc.fid_idx = 7353 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 7354 V_FW_LDST_CMD_IDX(i)); 7355 7356 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 7357 "t6mps"); 7358 if (rc) 7359 break; 7360 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 7361 sizeof(ldst_cmd), &ldst_cmd); 7362 end_synchronized_op(sc, 0); 7363 7364 if (rc != 0) { 7365 sbuf_printf(sb, "%72d", rc); 7366 rc = 0; 7367 } else { 7368 sbuf_printf(sb, " %08x %08x %08x %08x" 7369 " %08x %08x %08x %08x", 7370 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 7371 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 7372 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 7373 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 7374 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7375 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7376 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7377 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7378 } 7379 } else 7380 sbuf_printf(sb, "%72s", ""); 7381 7382 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 7383 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 7384 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 7385 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 7386 } 7387 7388 if (rc) 7389 (void) sbuf_finish(sb); 7390 else 7391 rc = sbuf_finish(sb); 7392 sbuf_delete(sb); 7393 7394 return (rc); 7395 } 7396 7397 static int 7398 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 7399 { 7400 struct adapter *sc = arg1; 7401 struct sbuf *sb; 7402 int rc; 7403 uint16_t mtus[NMTUS]; 7404 7405 rc = sysctl_wire_old_buffer(req, 0); 7406 if (rc != 0) 7407 return (rc); 7408 7409 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7410 if (sb == NULL) 7411 return (ENOMEM); 7412 7413 t4_read_mtu_tbl(sc, mtus, NULL); 7414 7415 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 7416 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 7417 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 7418 mtus[14], mtus[15]); 7419 7420 rc = sbuf_finish(sb); 7421 sbuf_delete(sb); 7422 7423 return (rc); 7424 } 7425 7426 static int 7427 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 7428 { 7429 struct adapter *sc = arg1; 7430 struct sbuf *sb; 7431 int rc, i; 7432 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 7433 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 7434 static const char *tx_stats[MAX_PM_NSTATS] = { 7435 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 7436 "Tx FIFO wait", NULL, "Tx latency" 7437 }; 7438 static const char *rx_stats[MAX_PM_NSTATS] = { 7439 "Read:", "Write bypass:", "Write mem:", "Flush:", 7440 "Rx FIFO wait", NULL, "Rx latency" 7441 }; 7442 7443 rc = sysctl_wire_old_buffer(req, 0); 7444 if (rc != 0) 7445 return (rc); 7446 7447 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7448 if (sb == NULL) 7449 return (ENOMEM); 7450 7451 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 7452 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 7453 7454 sbuf_printf(sb, " Tx pcmds Tx bytes"); 7455 for (i = 0; i < 4; i++) { 7456 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7457 tx_cyc[i]); 7458 } 7459 7460 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 7461 for (i = 0; i < 4; i++) { 7462 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7463 rx_cyc[i]); 7464 } 7465 7466 if (chip_id(sc) > CHELSIO_T5) { 7467 sbuf_printf(sb, 7468 "\n Total wait Total occupancy"); 7469 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7470 tx_cyc[i]); 7471 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7472 rx_cyc[i]); 7473 7474 i += 2; 7475 MPASS(i < nitems(tx_stats)); 7476 7477 sbuf_printf(sb, 7478 "\n Reads Total wait"); 7479 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7480 tx_cyc[i]); 7481 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7482 rx_cyc[i]); 7483 } 7484 7485 rc = sbuf_finish(sb); 7486 sbuf_delete(sb); 7487 7488 return (rc); 7489 } 7490 7491 static int 7492 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 7493 { 7494 struct adapter *sc = arg1; 7495 struct sbuf *sb; 7496 int rc; 7497 struct tp_rdma_stats stats; 7498 7499 rc = sysctl_wire_old_buffer(req, 0); 7500 if (rc != 0) 7501 return (rc); 7502 7503 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7504 if (sb == NULL) 7505 return (ENOMEM); 7506 7507 mtx_lock(&sc->reg_lock); 7508 t4_tp_get_rdma_stats(sc, &stats); 7509 mtx_unlock(&sc->reg_lock); 7510 7511 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 7512 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 7513 7514 rc = sbuf_finish(sb); 7515 sbuf_delete(sb); 7516 7517 return (rc); 7518 } 7519 7520 static int 7521 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 7522 { 7523 struct adapter *sc = arg1; 7524 struct sbuf *sb; 7525 int rc; 7526 struct tp_tcp_stats v4, v6; 7527 7528 rc = sysctl_wire_old_buffer(req, 0); 7529 if (rc != 0) 7530 return (rc); 7531 7532 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7533 if (sb == NULL) 7534 return (ENOMEM); 7535 7536 mtx_lock(&sc->reg_lock); 7537 t4_tp_get_tcp_stats(sc, &v4, &v6); 7538 mtx_unlock(&sc->reg_lock); 7539 7540 sbuf_printf(sb, 7541 " IP IPv6\n"); 7542 sbuf_printf(sb, "OutRsts: %20u %20u\n", 7543 v4.tcp_out_rsts, v6.tcp_out_rsts); 7544 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 7545 v4.tcp_in_segs, v6.tcp_in_segs); 7546 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 7547 v4.tcp_out_segs, v6.tcp_out_segs); 7548 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 7549 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 7550 7551 rc = sbuf_finish(sb); 7552 sbuf_delete(sb); 7553 7554 return (rc); 7555 } 7556 7557 static int 7558 sysctl_tids(SYSCTL_HANDLER_ARGS) 7559 { 7560 struct adapter *sc = arg1; 7561 struct sbuf *sb; 7562 int rc; 7563 struct tid_info *t = &sc->tids; 7564 7565 rc = sysctl_wire_old_buffer(req, 0); 7566 if (rc != 0) 7567 return (rc); 7568 7569 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7570 if (sb == NULL) 7571 return (ENOMEM); 7572 7573 if (t->natids) { 7574 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 7575 t->atids_in_use); 7576 } 7577 7578 if (t->ntids) { 7579 sbuf_printf(sb, "TID range: "); 7580 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 7581 uint32_t b, hb; 7582 7583 if (chip_id(sc) <= CHELSIO_T5) { 7584 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 7585 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 7586 } else { 7587 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); 7588 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); 7589 } 7590 7591 if (b) 7592 sbuf_printf(sb, "0-%u, ", b - 1); 7593 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1); 7594 } else 7595 sbuf_printf(sb, "0-%u", t->ntids - 1); 7596 sbuf_printf(sb, ", in use: %u\n", 7597 atomic_load_acq_int(&t->tids_in_use)); 7598 } 7599 7600 if (t->nstids) { 7601 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 7602 t->stid_base + t->nstids - 1, t->stids_in_use); 7603 } 7604 7605 if (t->nftids) { 7606 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 7607 t->ftid_base + t->nftids - 1); 7608 } 7609 7610 if (t->netids) { 7611 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 7612 t->etid_base + t->netids - 1); 7613 } 7614 7615 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 7616 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 7617 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 7618 7619 rc = sbuf_finish(sb); 7620 sbuf_delete(sb); 7621 7622 return (rc); 7623 } 7624 7625 static int 7626 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 7627 { 7628 struct adapter *sc = arg1; 7629 struct sbuf *sb; 7630 int rc; 7631 struct tp_err_stats stats; 7632 7633 rc = sysctl_wire_old_buffer(req, 0); 7634 if (rc != 0) 7635 return (rc); 7636 7637 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7638 if (sb == NULL) 7639 return (ENOMEM); 7640 7641 mtx_lock(&sc->reg_lock); 7642 t4_tp_get_err_stats(sc, &stats); 7643 mtx_unlock(&sc->reg_lock); 7644 7645 if (sc->chip_params->nchan > 2) { 7646 sbuf_printf(sb, " channel 0 channel 1" 7647 " channel 2 channel 3\n"); 7648 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 7649 stats.mac_in_errs[0], stats.mac_in_errs[1], 7650 stats.mac_in_errs[2], stats.mac_in_errs[3]); 7651 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 7652 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 7653 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 7654 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 7655 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 7656 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 7657 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 7658 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 7659 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 7660 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 7661 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 7662 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 7663 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 7664 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 7665 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 7666 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 7667 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 7668 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 7669 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 7670 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 7671 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 7672 } else { 7673 sbuf_printf(sb, " channel 0 channel 1\n"); 7674 sbuf_printf(sb, "macInErrs: %10u %10u\n", 7675 stats.mac_in_errs[0], stats.mac_in_errs[1]); 7676 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 7677 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 7678 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 7679 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 7680 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 7681 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 7682 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 7683 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 7684 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 7685 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 7686 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 7687 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 7688 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 7689 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 7690 } 7691 7692 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 7693 stats.ofld_no_neigh, stats.ofld_cong_defer); 7694 7695 rc = sbuf_finish(sb); 7696 sbuf_delete(sb); 7697 7698 return (rc); 7699 } 7700 7701 static int 7702 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 7703 { 7704 struct adapter *sc = arg1; 7705 struct tp_params *tpp = &sc->params.tp; 7706 u_int mask; 7707 int rc; 7708 7709 mask = tpp->la_mask >> 16; 7710 rc = sysctl_handle_int(oidp, &mask, 0, req); 7711 if (rc != 0 || req->newptr == NULL) 7712 return (rc); 7713 if (mask > 0xffff) 7714 return (EINVAL); 7715 tpp->la_mask = mask << 16; 7716 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7717 7718 return (0); 7719 } 7720 7721 struct field_desc { 7722 const char *name; 7723 u_int start; 7724 u_int width; 7725 }; 7726 7727 static void 7728 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7729 { 7730 char buf[32]; 7731 int line_size = 0; 7732 7733 while (f->name) { 7734 uint64_t mask = (1ULL << f->width) - 1; 7735 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7736 ((uintmax_t)v >> f->start) & mask); 7737 7738 if (line_size + len >= 79) { 7739 line_size = 8; 7740 sbuf_printf(sb, "\n "); 7741 } 7742 sbuf_printf(sb, "%s ", buf); 7743 line_size += len + 1; 7744 f++; 7745 } 7746 sbuf_printf(sb, "\n"); 7747 } 7748 7749 static const struct field_desc tp_la0[] = { 7750 { "RcfOpCodeOut", 60, 4 }, 7751 { "State", 56, 4 }, 7752 { "WcfState", 52, 4 }, 7753 { "RcfOpcSrcOut", 50, 2 }, 7754 { "CRxError", 49, 1 }, 7755 { "ERxError", 48, 1 }, 7756 { "SanityFailed", 47, 1 }, 7757 { "SpuriousMsg", 46, 1 }, 7758 { "FlushInputMsg", 45, 1 }, 7759 { "FlushInputCpl", 44, 1 }, 7760 { "RssUpBit", 43, 1 }, 7761 { "RssFilterHit", 42, 1 }, 7762 { "Tid", 32, 10 }, 7763 { "InitTcb", 31, 1 }, 7764 { "LineNumber", 24, 7 }, 7765 { "Emsg", 23, 1 }, 7766 { "EdataOut", 22, 1 }, 7767 { "Cmsg", 21, 1 }, 7768 { "CdataOut", 20, 1 }, 7769 { "EreadPdu", 19, 1 }, 7770 { "CreadPdu", 18, 1 }, 7771 { "TunnelPkt", 17, 1 }, 7772 { "RcfPeerFin", 16, 1 }, 7773 { "RcfReasonOut", 12, 4 }, 7774 { "TxCchannel", 10, 2 }, 7775 { "RcfTxChannel", 8, 2 }, 7776 { "RxEchannel", 6, 2 }, 7777 { "RcfRxChannel", 5, 1 }, 7778 { "RcfDataOutSrdy", 4, 1 }, 7779 { "RxDvld", 3, 1 }, 7780 { "RxOoDvld", 2, 1 }, 7781 { "RxCongestion", 1, 1 }, 7782 { "TxCongestion", 0, 1 }, 7783 { NULL } 7784 }; 7785 7786 static const struct field_desc tp_la1[] = { 7787 { "CplCmdIn", 56, 8 }, 7788 { "CplCmdOut", 48, 8 }, 7789 { "ESynOut", 47, 1 }, 7790 { "EAckOut", 46, 1 }, 7791 { "EFinOut", 45, 1 }, 7792 { "ERstOut", 44, 1 }, 7793 { "SynIn", 43, 1 }, 7794 { "AckIn", 42, 1 }, 7795 { "FinIn", 41, 1 }, 7796 { "RstIn", 40, 1 }, 7797 { "DataIn", 39, 1 }, 7798 { "DataInVld", 38, 1 }, 7799 { "PadIn", 37, 1 }, 7800 { "RxBufEmpty", 36, 1 }, 7801 { "RxDdp", 35, 1 }, 7802 { "RxFbCongestion", 34, 1 }, 7803 { "TxFbCongestion", 33, 1 }, 7804 { "TxPktSumSrdy", 32, 1 }, 7805 { "RcfUlpType", 28, 4 }, 7806 { "Eread", 27, 1 }, 7807 { "Ebypass", 26, 1 }, 7808 { "Esave", 25, 1 }, 7809 { "Static0", 24, 1 }, 7810 { "Cread", 23, 1 }, 7811 { "Cbypass", 22, 1 }, 7812 { "Csave", 21, 1 }, 7813 { "CPktOut", 20, 1 }, 7814 { "RxPagePoolFull", 18, 2 }, 7815 { "RxLpbkPkt", 17, 1 }, 7816 { "TxLpbkPkt", 16, 1 }, 7817 { "RxVfValid", 15, 1 }, 7818 { "SynLearned", 14, 1 }, 7819 { "SetDelEntry", 13, 1 }, 7820 { "SetInvEntry", 12, 1 }, 7821 { "CpcmdDvld", 11, 1 }, 7822 { "CpcmdSave", 10, 1 }, 7823 { "RxPstructsFull", 8, 2 }, 7824 { "EpcmdDvld", 7, 1 }, 7825 { "EpcmdFlush", 6, 1 }, 7826 { "EpcmdTrimPrefix", 5, 1 }, 7827 { "EpcmdTrimPostfix", 4, 1 }, 7828 { "ERssIp4Pkt", 3, 1 }, 7829 { "ERssIp6Pkt", 2, 1 }, 7830 { "ERssTcpUdpPkt", 1, 1 }, 7831 { "ERssFceFipPkt", 0, 1 }, 7832 { NULL } 7833 }; 7834 7835 static const struct field_desc tp_la2[] = { 7836 { "CplCmdIn", 56, 8 }, 7837 { "MpsVfVld", 55, 1 }, 7838 { "MpsPf", 52, 3 }, 7839 { "MpsVf", 44, 8 }, 7840 { "SynIn", 43, 1 }, 7841 { "AckIn", 42, 1 }, 7842 { "FinIn", 41, 1 }, 7843 { "RstIn", 40, 1 }, 7844 { "DataIn", 39, 1 }, 7845 { "DataInVld", 38, 1 }, 7846 { "PadIn", 37, 1 }, 7847 { "RxBufEmpty", 36, 1 }, 7848 { "RxDdp", 35, 1 }, 7849 { "RxFbCongestion", 34, 1 }, 7850 { "TxFbCongestion", 33, 1 }, 7851 { "TxPktSumSrdy", 32, 1 }, 7852 { "RcfUlpType", 28, 4 }, 7853 { "Eread", 27, 1 }, 7854 { "Ebypass", 26, 1 }, 7855 { "Esave", 25, 1 }, 7856 { "Static0", 24, 1 }, 7857 { "Cread", 23, 1 }, 7858 { "Cbypass", 22, 1 }, 7859 { "Csave", 21, 1 }, 7860 { "CPktOut", 20, 1 }, 7861 { "RxPagePoolFull", 18, 2 }, 7862 { "RxLpbkPkt", 17, 1 }, 7863 { "TxLpbkPkt", 16, 1 }, 7864 { "RxVfValid", 15, 1 }, 7865 { "SynLearned", 14, 1 }, 7866 { "SetDelEntry", 13, 1 }, 7867 { "SetInvEntry", 12, 1 }, 7868 { "CpcmdDvld", 11, 1 }, 7869 { "CpcmdSave", 10, 1 }, 7870 { "RxPstructsFull", 8, 2 }, 7871 { "EpcmdDvld", 7, 1 }, 7872 { "EpcmdFlush", 6, 1 }, 7873 { "EpcmdTrimPrefix", 5, 1 }, 7874 { "EpcmdTrimPostfix", 4, 1 }, 7875 { "ERssIp4Pkt", 3, 1 }, 7876 { "ERssIp6Pkt", 2, 1 }, 7877 { "ERssTcpUdpPkt", 1, 1 }, 7878 { "ERssFceFipPkt", 0, 1 }, 7879 { NULL } 7880 }; 7881 7882 static void 7883 tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7884 { 7885 7886 field_desc_show(sb, *p, tp_la0); 7887 } 7888 7889 static void 7890 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7891 { 7892 7893 if (idx) 7894 sbuf_printf(sb, "\n"); 7895 field_desc_show(sb, p[0], tp_la0); 7896 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7897 field_desc_show(sb, p[1], tp_la0); 7898 } 7899 7900 static void 7901 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7902 { 7903 7904 if (idx) 7905 sbuf_printf(sb, "\n"); 7906 field_desc_show(sb, p[0], tp_la0); 7907 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7908 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7909 } 7910 7911 static int 7912 sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7913 { 7914 struct adapter *sc = arg1; 7915 struct sbuf *sb; 7916 uint64_t *buf, *p; 7917 int rc; 7918 u_int i, inc; 7919 void (*show_func)(struct sbuf *, uint64_t *, int); 7920 7921 rc = sysctl_wire_old_buffer(req, 0); 7922 if (rc != 0) 7923 return (rc); 7924 7925 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7926 if (sb == NULL) 7927 return (ENOMEM); 7928 7929 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7930 7931 t4_tp_read_la(sc, buf, NULL); 7932 p = buf; 7933 7934 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7935 case 2: 7936 inc = 2; 7937 show_func = tp_la_show2; 7938 break; 7939 case 3: 7940 inc = 2; 7941 show_func = tp_la_show3; 7942 break; 7943 default: 7944 inc = 1; 7945 show_func = tp_la_show; 7946 } 7947 7948 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7949 (*show_func)(sb, p, i); 7950 7951 rc = sbuf_finish(sb); 7952 sbuf_delete(sb); 7953 free(buf, M_CXGBE); 7954 return (rc); 7955 } 7956 7957 static int 7958 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7959 { 7960 struct adapter *sc = arg1; 7961 struct sbuf *sb; 7962 int rc; 7963 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7964 7965 rc = sysctl_wire_old_buffer(req, 0); 7966 if (rc != 0) 7967 return (rc); 7968 7969 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7970 if (sb == NULL) 7971 return (ENOMEM); 7972 7973 t4_get_chan_txrate(sc, nrate, orate); 7974 7975 if (sc->chip_params->nchan > 2) { 7976 sbuf_printf(sb, " channel 0 channel 1" 7977 " channel 2 channel 3\n"); 7978 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7979 nrate[0], nrate[1], nrate[2], nrate[3]); 7980 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7981 orate[0], orate[1], orate[2], orate[3]); 7982 } else { 7983 sbuf_printf(sb, " channel 0 channel 1\n"); 7984 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7985 nrate[0], nrate[1]); 7986 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7987 orate[0], orate[1]); 7988 } 7989 7990 rc = sbuf_finish(sb); 7991 sbuf_delete(sb); 7992 7993 return (rc); 7994 } 7995 7996 static int 7997 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7998 { 7999 struct adapter *sc = arg1; 8000 struct sbuf *sb; 8001 uint32_t *buf, *p; 8002 int rc, i; 8003 8004 rc = sysctl_wire_old_buffer(req, 0); 8005 if (rc != 0) 8006 return (rc); 8007 8008 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8009 if (sb == NULL) 8010 return (ENOMEM); 8011 8012 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 8013 M_ZERO | M_WAITOK); 8014 8015 t4_ulprx_read_la(sc, buf); 8016 p = buf; 8017 8018 sbuf_printf(sb, " Pcmd Type Message" 8019 " Data"); 8020 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 8021 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 8022 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 8023 } 8024 8025 rc = sbuf_finish(sb); 8026 sbuf_delete(sb); 8027 free(buf, M_CXGBE); 8028 return (rc); 8029 } 8030 8031 static int 8032 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 8033 { 8034 struct adapter *sc = arg1; 8035 struct sbuf *sb; 8036 int rc, v; 8037 8038 MPASS(chip_id(sc) >= CHELSIO_T5); 8039 8040 rc = sysctl_wire_old_buffer(req, 0); 8041 if (rc != 0) 8042 return (rc); 8043 8044 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8045 if (sb == NULL) 8046 return (ENOMEM); 8047 8048 v = t4_read_reg(sc, A_SGE_STAT_CFG); 8049 if (G_STATSOURCE_T5(v) == 7) { 8050 int mode; 8051 8052 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v); 8053 if (mode == 0) { 8054 sbuf_printf(sb, "total %d, incomplete %d", 8055 t4_read_reg(sc, A_SGE_STAT_TOTAL), 8056 t4_read_reg(sc, A_SGE_STAT_MATCH)); 8057 } else if (mode == 1) { 8058 sbuf_printf(sb, "total %d, data overflow %d", 8059 t4_read_reg(sc, A_SGE_STAT_TOTAL), 8060 t4_read_reg(sc, A_SGE_STAT_MATCH)); 8061 } else { 8062 sbuf_printf(sb, "unknown mode %d", mode); 8063 } 8064 } 8065 rc = sbuf_finish(sb); 8066 sbuf_delete(sb); 8067 8068 return (rc); 8069 } 8070 8071 static int 8072 sysctl_tc_params(SYSCTL_HANDLER_ARGS) 8073 { 8074 struct adapter *sc = arg1; 8075 struct tx_cl_rl_params tc; 8076 struct sbuf *sb; 8077 int i, rc, port_id, mbps, gbps; 8078 8079 rc = sysctl_wire_old_buffer(req, 0); 8080 if (rc != 0) 8081 return (rc); 8082 8083 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8084 if (sb == NULL) 8085 return (ENOMEM); 8086 8087 port_id = arg2 >> 16; 8088 MPASS(port_id < sc->params.nports); 8089 MPASS(sc->port[port_id] != NULL); 8090 i = arg2 & 0xffff; 8091 MPASS(i < sc->chip_params->nsched_cls); 8092 8093 mtx_lock(&sc->tc_lock); 8094 tc = sc->port[port_id]->sched_params->cl_rl[i]; 8095 mtx_unlock(&sc->tc_lock); 8096 8097 if (tc.flags & TX_CLRL_ERROR) { 8098 sbuf_printf(sb, "error"); 8099 goto done; 8100 } 8101 8102 if (tc.ratemode == SCHED_CLASS_RATEMODE_REL) { 8103 /* XXX: top speed or actual link speed? */ 8104 gbps = port_top_speed(sc->port[port_id]); 8105 sbuf_printf(sb, " %u%% of %uGbps", tc.maxrate, gbps); 8106 } else if (tc.ratemode == SCHED_CLASS_RATEMODE_ABS) { 8107 switch (tc.rateunit) { 8108 case SCHED_CLASS_RATEUNIT_BITS: 8109 mbps = tc.maxrate / 1000; 8110 gbps = tc.maxrate / 1000000; 8111 if (tc.maxrate == gbps * 1000000) 8112 sbuf_printf(sb, " %uGbps", gbps); 8113 else if (tc.maxrate == mbps * 1000) 8114 sbuf_printf(sb, " %uMbps", mbps); 8115 else 8116 sbuf_printf(sb, " %uKbps", tc.maxrate); 8117 break; 8118 case SCHED_CLASS_RATEUNIT_PKTS: 8119 sbuf_printf(sb, " %upps", tc.maxrate); 8120 break; 8121 default: 8122 rc = ENXIO; 8123 goto done; 8124 } 8125 } 8126 8127 switch (tc.mode) { 8128 case SCHED_CLASS_MODE_CLASS: 8129 sbuf_printf(sb, " aggregate"); 8130 break; 8131 case SCHED_CLASS_MODE_FLOW: 8132 sbuf_printf(sb, " per-flow"); 8133 break; 8134 default: 8135 rc = ENXIO; 8136 goto done; 8137 } 8138 8139 done: 8140 if (rc == 0) 8141 rc = sbuf_finish(sb); 8142 sbuf_delete(sb); 8143 8144 return (rc); 8145 } 8146 #endif 8147 8148 #ifdef TCP_OFFLOAD 8149 static void 8150 unit_conv(char *buf, size_t len, u_int val, u_int factor) 8151 { 8152 u_int rem = val % factor; 8153 8154 if (rem == 0) 8155 snprintf(buf, len, "%u", val / factor); 8156 else { 8157 while (rem % 10 == 0) 8158 rem /= 10; 8159 snprintf(buf, len, "%u.%u", val / factor, rem); 8160 } 8161 } 8162 8163 static int 8164 sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 8165 { 8166 struct adapter *sc = arg1; 8167 char buf[16]; 8168 u_int res, re; 8169 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8170 8171 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 8172 switch (arg2) { 8173 case 0: 8174 /* timer_tick */ 8175 re = G_TIMERRESOLUTION(res); 8176 break; 8177 case 1: 8178 /* TCP timestamp tick */ 8179 re = G_TIMESTAMPRESOLUTION(res); 8180 break; 8181 case 2: 8182 /* DACK tick */ 8183 re = G_DELAYEDACKRESOLUTION(res); 8184 break; 8185 default: 8186 return (EDOOFUS); 8187 } 8188 8189 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 8190 8191 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 8192 } 8193 8194 static int 8195 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 8196 { 8197 struct adapter *sc = arg1; 8198 u_int res, dack_re, v; 8199 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8200 8201 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 8202 dack_re = G_DELAYEDACKRESOLUTION(res); 8203 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 8204 8205 return (sysctl_handle_int(oidp, &v, 0, req)); 8206 } 8207 8208 static int 8209 sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 8210 { 8211 struct adapter *sc = arg1; 8212 int reg = arg2; 8213 u_int tre; 8214 u_long tp_tick_us, v; 8215 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8216 8217 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 8218 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 8219 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || 8220 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); 8221 8222 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 8223 tp_tick_us = (cclk_ps << tre) / 1000000; 8224 8225 if (reg == A_TP_INIT_SRTT) 8226 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 8227 else 8228 v = tp_tick_us * t4_read_reg(sc, reg); 8229 8230 return (sysctl_handle_long(oidp, &v, 0, req)); 8231 } 8232 8233 /* 8234 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is 8235 * passed to this function. 8236 */ 8237 static int 8238 sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS) 8239 { 8240 struct adapter *sc = arg1; 8241 int idx = arg2; 8242 u_int v; 8243 8244 MPASS(idx >= 0 && idx <= 24); 8245 8246 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf; 8247 8248 return (sysctl_handle_int(oidp, &v, 0, req)); 8249 } 8250 8251 static int 8252 sysctl_tp_backoff(SYSCTL_HANDLER_ARGS) 8253 { 8254 struct adapter *sc = arg1; 8255 int idx = arg2; 8256 u_int shift, v, r; 8257 8258 MPASS(idx >= 0 && idx < 16); 8259 8260 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3); 8261 shift = (idx & 3) << 3; 8262 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0; 8263 8264 return (sysctl_handle_int(oidp, &v, 0, req)); 8265 } 8266 #endif 8267 8268 static uint32_t 8269 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 8270 { 8271 uint32_t mode; 8272 8273 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 8274 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 8275 8276 if (fconf & F_FRAGMENTATION) 8277 mode |= T4_FILTER_IP_FRAGMENT; 8278 8279 if (fconf & F_MPSHITTYPE) 8280 mode |= T4_FILTER_MPS_HIT_TYPE; 8281 8282 if (fconf & F_MACMATCH) 8283 mode |= T4_FILTER_MAC_IDX; 8284 8285 if (fconf & F_ETHERTYPE) 8286 mode |= T4_FILTER_ETH_TYPE; 8287 8288 if (fconf & F_PROTOCOL) 8289 mode |= T4_FILTER_IP_PROTO; 8290 8291 if (fconf & F_TOS) 8292 mode |= T4_FILTER_IP_TOS; 8293 8294 if (fconf & F_VLAN) 8295 mode |= T4_FILTER_VLAN; 8296 8297 if (fconf & F_VNIC_ID) { 8298 mode |= T4_FILTER_VNIC; 8299 if (iconf & F_VNIC) 8300 mode |= T4_FILTER_IC_VNIC; 8301 } 8302 8303 if (fconf & F_PORT) 8304 mode |= T4_FILTER_PORT; 8305 8306 if (fconf & F_FCOE) 8307 mode |= T4_FILTER_FCoE; 8308 8309 return (mode); 8310 } 8311 8312 static uint32_t 8313 mode_to_fconf(uint32_t mode) 8314 { 8315 uint32_t fconf = 0; 8316 8317 if (mode & T4_FILTER_IP_FRAGMENT) 8318 fconf |= F_FRAGMENTATION; 8319 8320 if (mode & T4_FILTER_MPS_HIT_TYPE) 8321 fconf |= F_MPSHITTYPE; 8322 8323 if (mode & T4_FILTER_MAC_IDX) 8324 fconf |= F_MACMATCH; 8325 8326 if (mode & T4_FILTER_ETH_TYPE) 8327 fconf |= F_ETHERTYPE; 8328 8329 if (mode & T4_FILTER_IP_PROTO) 8330 fconf |= F_PROTOCOL; 8331 8332 if (mode & T4_FILTER_IP_TOS) 8333 fconf |= F_TOS; 8334 8335 if (mode & T4_FILTER_VLAN) 8336 fconf |= F_VLAN; 8337 8338 if (mode & T4_FILTER_VNIC) 8339 fconf |= F_VNIC_ID; 8340 8341 if (mode & T4_FILTER_PORT) 8342 fconf |= F_PORT; 8343 8344 if (mode & T4_FILTER_FCoE) 8345 fconf |= F_FCOE; 8346 8347 return (fconf); 8348 } 8349 8350 static uint32_t 8351 mode_to_iconf(uint32_t mode) 8352 { 8353 8354 if (mode & T4_FILTER_IC_VNIC) 8355 return (F_VNIC); 8356 return (0); 8357 } 8358 8359 static int check_fspec_against_fconf_iconf(struct adapter *sc, 8360 struct t4_filter_specification *fs) 8361 { 8362 struct tp_params *tpp = &sc->params.tp; 8363 uint32_t fconf = 0; 8364 8365 if (fs->val.frag || fs->mask.frag) 8366 fconf |= F_FRAGMENTATION; 8367 8368 if (fs->val.matchtype || fs->mask.matchtype) 8369 fconf |= F_MPSHITTYPE; 8370 8371 if (fs->val.macidx || fs->mask.macidx) 8372 fconf |= F_MACMATCH; 8373 8374 if (fs->val.ethtype || fs->mask.ethtype) 8375 fconf |= F_ETHERTYPE; 8376 8377 if (fs->val.proto || fs->mask.proto) 8378 fconf |= F_PROTOCOL; 8379 8380 if (fs->val.tos || fs->mask.tos) 8381 fconf |= F_TOS; 8382 8383 if (fs->val.vlan_vld || fs->mask.vlan_vld) 8384 fconf |= F_VLAN; 8385 8386 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 8387 fconf |= F_VNIC_ID; 8388 if (tpp->ingress_config & F_VNIC) 8389 return (EINVAL); 8390 } 8391 8392 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 8393 fconf |= F_VNIC_ID; 8394 if ((tpp->ingress_config & F_VNIC) == 0) 8395 return (EINVAL); 8396 } 8397 8398 if (fs->val.iport || fs->mask.iport) 8399 fconf |= F_PORT; 8400 8401 if (fs->val.fcoe || fs->mask.fcoe) 8402 fconf |= F_FCOE; 8403 8404 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 8405 return (E2BIG); 8406 8407 return (0); 8408 } 8409 8410 static int 8411 get_filter_mode(struct adapter *sc, uint32_t *mode) 8412 { 8413 struct tp_params *tpp = &sc->params.tp; 8414 8415 /* 8416 * We trust the cached values of the relevant TP registers. This means 8417 * things work reliably only if writes to those registers are always via 8418 * t4_set_filter_mode. 8419 */ 8420 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 8421 8422 return (0); 8423 } 8424 8425 static int 8426 set_filter_mode(struct adapter *sc, uint32_t mode) 8427 { 8428 struct tp_params *tpp = &sc->params.tp; 8429 uint32_t fconf, iconf; 8430 int rc; 8431 8432 iconf = mode_to_iconf(mode); 8433 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 8434 /* 8435 * For now we just complain if A_TP_INGRESS_CONFIG is not 8436 * already set to the correct value for the requested filter 8437 * mode. It's not clear if it's safe to write to this register 8438 * on the fly. (And we trust the cached value of the register). 8439 */ 8440 return (EBUSY); 8441 } 8442 8443 fconf = mode_to_fconf(mode); 8444 8445 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8446 "t4setfm"); 8447 if (rc) 8448 return (rc); 8449 8450 if (sc->tids.ftids_in_use > 0) { 8451 rc = EBUSY; 8452 goto done; 8453 } 8454 8455 #ifdef TCP_OFFLOAD 8456 if (uld_active(sc, ULD_TOM)) { 8457 rc = EBUSY; 8458 goto done; 8459 } 8460 #endif 8461 8462 rc = -t4_set_filter_mode(sc, fconf); 8463 done: 8464 end_synchronized_op(sc, LOCK_HELD); 8465 return (rc); 8466 } 8467 8468 static inline uint64_t 8469 get_filter_hits(struct adapter *sc, uint32_t fid) 8470 { 8471 uint32_t tcb_addr; 8472 8473 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 8474 (fid + sc->tids.ftid_base) * TCB_SIZE; 8475 8476 if (is_t4(sc)) { 8477 uint64_t hits; 8478 8479 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 8480 return (be64toh(hits)); 8481 } else { 8482 uint32_t hits; 8483 8484 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 8485 return (be32toh(hits)); 8486 } 8487 } 8488 8489 static int 8490 get_filter(struct adapter *sc, struct t4_filter *t) 8491 { 8492 int i, rc, nfilters = sc->tids.nftids; 8493 struct filter_entry *f; 8494 8495 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8496 "t4getf"); 8497 if (rc) 8498 return (rc); 8499 8500 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 8501 t->idx >= nfilters) { 8502 t->idx = 0xffffffff; 8503 goto done; 8504 } 8505 8506 f = &sc->tids.ftid_tab[t->idx]; 8507 for (i = t->idx; i < nfilters; i++, f++) { 8508 if (f->valid) { 8509 t->idx = i; 8510 t->l2tidx = f->l2t ? f->l2t->idx : 0; 8511 t->smtidx = f->smtidx; 8512 if (f->fs.hitcnts) 8513 t->hits = get_filter_hits(sc, t->idx); 8514 else 8515 t->hits = UINT64_MAX; 8516 t->fs = f->fs; 8517 8518 goto done; 8519 } 8520 } 8521 8522 t->idx = 0xffffffff; 8523 done: 8524 end_synchronized_op(sc, LOCK_HELD); 8525 return (0); 8526 } 8527 8528 static int 8529 set_filter(struct adapter *sc, struct t4_filter *t) 8530 { 8531 unsigned int nfilters, nports; 8532 struct filter_entry *f; 8533 int i, rc; 8534 8535 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 8536 if (rc) 8537 return (rc); 8538 8539 nfilters = sc->tids.nftids; 8540 nports = sc->params.nports; 8541 8542 if (nfilters == 0) { 8543 rc = ENOTSUP; 8544 goto done; 8545 } 8546 8547 if (t->idx >= nfilters) { 8548 rc = EINVAL; 8549 goto done; 8550 } 8551 8552 /* Validate against the global filter mode and ingress config */ 8553 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 8554 if (rc != 0) 8555 goto done; 8556 8557 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 8558 rc = EINVAL; 8559 goto done; 8560 } 8561 8562 if (t->fs.val.iport >= nports) { 8563 rc = EINVAL; 8564 goto done; 8565 } 8566 8567 /* Can't specify an iq if not steering to it */ 8568 if (!t->fs.dirsteer && t->fs.iq) { 8569 rc = EINVAL; 8570 goto done; 8571 } 8572 8573 /* IPv6 filter idx must be 4 aligned */ 8574 if (t->fs.type == 1 && 8575 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 8576 rc = EINVAL; 8577 goto done; 8578 } 8579 8580 if (!(sc->flags & FULL_INIT_DONE) && 8581 ((rc = adapter_full_init(sc)) != 0)) 8582 goto done; 8583 8584 if (sc->tids.ftid_tab == NULL) { 8585 KASSERT(sc->tids.ftids_in_use == 0, 8586 ("%s: no memory allocated but filters_in_use > 0", 8587 __func__)); 8588 8589 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 8590 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 8591 if (sc->tids.ftid_tab == NULL) { 8592 rc = ENOMEM; 8593 goto done; 8594 } 8595 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 8596 } 8597 8598 for (i = 0; i < 4; i++) { 8599 f = &sc->tids.ftid_tab[t->idx + i]; 8600 8601 if (f->pending || f->valid) { 8602 rc = EBUSY; 8603 goto done; 8604 } 8605 if (f->locked) { 8606 rc = EPERM; 8607 goto done; 8608 } 8609 8610 if (t->fs.type == 0) 8611 break; 8612 } 8613 8614 f = &sc->tids.ftid_tab[t->idx]; 8615 f->fs = t->fs; 8616 8617 rc = set_filter_wr(sc, t->idx); 8618 done: 8619 end_synchronized_op(sc, 0); 8620 8621 if (rc == 0) { 8622 mtx_lock(&sc->tids.ftid_lock); 8623 for (;;) { 8624 if (f->pending == 0) { 8625 rc = f->valid ? 0 : EIO; 8626 break; 8627 } 8628 8629 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8630 PCATCH, "t4setfw", 0)) { 8631 rc = EINPROGRESS; 8632 break; 8633 } 8634 } 8635 mtx_unlock(&sc->tids.ftid_lock); 8636 } 8637 return (rc); 8638 } 8639 8640 static int 8641 del_filter(struct adapter *sc, struct t4_filter *t) 8642 { 8643 unsigned int nfilters; 8644 struct filter_entry *f; 8645 int rc; 8646 8647 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 8648 if (rc) 8649 return (rc); 8650 8651 nfilters = sc->tids.nftids; 8652 8653 if (nfilters == 0) { 8654 rc = ENOTSUP; 8655 goto done; 8656 } 8657 8658 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 8659 t->idx >= nfilters) { 8660 rc = EINVAL; 8661 goto done; 8662 } 8663 8664 if (!(sc->flags & FULL_INIT_DONE)) { 8665 rc = EAGAIN; 8666 goto done; 8667 } 8668 8669 f = &sc->tids.ftid_tab[t->idx]; 8670 8671 if (f->pending) { 8672 rc = EBUSY; 8673 goto done; 8674 } 8675 if (f->locked) { 8676 rc = EPERM; 8677 goto done; 8678 } 8679 8680 if (f->valid) { 8681 t->fs = f->fs; /* extra info for the caller */ 8682 rc = del_filter_wr(sc, t->idx); 8683 } 8684 8685 done: 8686 end_synchronized_op(sc, 0); 8687 8688 if (rc == 0) { 8689 mtx_lock(&sc->tids.ftid_lock); 8690 for (;;) { 8691 if (f->pending == 0) { 8692 rc = f->valid ? EIO : 0; 8693 break; 8694 } 8695 8696 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8697 PCATCH, "t4delfw", 0)) { 8698 rc = EINPROGRESS; 8699 break; 8700 } 8701 } 8702 mtx_unlock(&sc->tids.ftid_lock); 8703 } 8704 8705 return (rc); 8706 } 8707 8708 static void 8709 clear_filter(struct filter_entry *f) 8710 { 8711 if (f->l2t) 8712 t4_l2t_release(f->l2t); 8713 8714 bzero(f, sizeof (*f)); 8715 } 8716 8717 static int 8718 set_filter_wr(struct adapter *sc, int fidx) 8719 { 8720 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8721 struct fw_filter_wr *fwr; 8722 unsigned int ftid, vnic_vld, vnic_vld_mask; 8723 struct wrq_cookie cookie; 8724 8725 ASSERT_SYNCHRONIZED_OP(sc); 8726 8727 if (f->fs.newdmac || f->fs.newvlan) { 8728 /* This filter needs an L2T entry; allocate one. */ 8729 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8730 if (f->l2t == NULL) 8731 return (EAGAIN); 8732 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8733 f->fs.dmac)) { 8734 t4_l2t_release(f->l2t); 8735 f->l2t = NULL; 8736 return (ENOMEM); 8737 } 8738 } 8739 8740 /* Already validated against fconf, iconf */ 8741 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 8742 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 8743 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 8744 vnic_vld = 1; 8745 else 8746 vnic_vld = 0; 8747 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 8748 vnic_vld_mask = 1; 8749 else 8750 vnic_vld_mask = 0; 8751 8752 ftid = sc->tids.ftid_base + fidx; 8753 8754 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8755 if (fwr == NULL) 8756 return (ENOMEM); 8757 bzero(fwr, sizeof(*fwr)); 8758 8759 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 8760 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 8761 fwr->tid_to_iq = 8762 htobe32(V_FW_FILTER_WR_TID(ftid) | 8763 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 8764 V_FW_FILTER_WR_NOREPLY(0) | 8765 V_FW_FILTER_WR_IQ(f->fs.iq)); 8766 fwr->del_filter_to_l2tix = 8767 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 8768 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 8769 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 8770 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 8771 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 8772 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 8773 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 8774 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 8775 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 8776 f->fs.newvlan == VLAN_REWRITE) | 8777 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 8778 f->fs.newvlan == VLAN_REWRITE) | 8779 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 8780 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 8781 V_FW_FILTER_WR_PRIO(f->fs.prio) | 8782 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 8783 fwr->ethtype = htobe16(f->fs.val.ethtype); 8784 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 8785 fwr->frag_to_ovlan_vldm = 8786 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 8787 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 8788 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 8789 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 8790 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 8791 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 8792 fwr->smac_sel = 0; 8793 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 8794 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 8795 fwr->maci_to_matchtypem = 8796 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 8797 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 8798 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 8799 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 8800 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 8801 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 8802 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 8803 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 8804 fwr->ptcl = f->fs.val.proto; 8805 fwr->ptclm = f->fs.mask.proto; 8806 fwr->ttyp = f->fs.val.tos; 8807 fwr->ttypm = f->fs.mask.tos; 8808 fwr->ivlan = htobe16(f->fs.val.vlan); 8809 fwr->ivlanm = htobe16(f->fs.mask.vlan); 8810 fwr->ovlan = htobe16(f->fs.val.vnic); 8811 fwr->ovlanm = htobe16(f->fs.mask.vnic); 8812 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 8813 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 8814 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 8815 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 8816 fwr->lp = htobe16(f->fs.val.dport); 8817 fwr->lpm = htobe16(f->fs.mask.dport); 8818 fwr->fp = htobe16(f->fs.val.sport); 8819 fwr->fpm = htobe16(f->fs.mask.sport); 8820 if (f->fs.newsmac) 8821 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 8822 8823 f->pending = 1; 8824 sc->tids.ftids_in_use++; 8825 8826 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8827 return (0); 8828 } 8829 8830 static int 8831 del_filter_wr(struct adapter *sc, int fidx) 8832 { 8833 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8834 struct fw_filter_wr *fwr; 8835 unsigned int ftid; 8836 struct wrq_cookie cookie; 8837 8838 ftid = sc->tids.ftid_base + fidx; 8839 8840 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8841 if (fwr == NULL) 8842 return (ENOMEM); 8843 bzero(fwr, sizeof (*fwr)); 8844 8845 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 8846 8847 f->pending = 1; 8848 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8849 return (0); 8850 } 8851 8852 int 8853 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8854 { 8855 struct adapter *sc = iq->adapter; 8856 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 8857 unsigned int idx = GET_TID(rpl); 8858 unsigned int rc; 8859 struct filter_entry *f; 8860 8861 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 8862 rss->opcode)); 8863 MPASS(iq == &sc->sge.fwq); 8864 MPASS(is_ftid(sc, idx)); 8865 8866 idx -= sc->tids.ftid_base; 8867 f = &sc->tids.ftid_tab[idx]; 8868 rc = G_COOKIE(rpl->cookie); 8869 8870 mtx_lock(&sc->tids.ftid_lock); 8871 if (rc == FW_FILTER_WR_FLT_ADDED) { 8872 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 8873 __func__, idx)); 8874 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 8875 f->pending = 0; /* asynchronous setup completed */ 8876 f->valid = 1; 8877 } else { 8878 if (rc != FW_FILTER_WR_FLT_DELETED) { 8879 /* Add or delete failed, display an error */ 8880 log(LOG_ERR, 8881 "filter %u setup failed with error %u\n", 8882 idx, rc); 8883 } 8884 8885 clear_filter(f); 8886 sc->tids.ftids_in_use--; 8887 } 8888 wakeup(&sc->tids.ftid_tab); 8889 mtx_unlock(&sc->tids.ftid_lock); 8890 8891 return (0); 8892 } 8893 8894 static int 8895 set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8896 { 8897 8898 MPASS(iq->set_tcb_rpl != NULL); 8899 return (iq->set_tcb_rpl(iq, rss, m)); 8900 } 8901 8902 static int 8903 l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8904 { 8905 8906 MPASS(iq->l2t_write_rpl != NULL); 8907 return (iq->l2t_write_rpl(iq, rss, m)); 8908 } 8909 8910 static int 8911 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 8912 { 8913 int rc; 8914 8915 if (cntxt->cid > M_CTXTQID) 8916 return (EINVAL); 8917 8918 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 8919 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 8920 return (EINVAL); 8921 8922 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 8923 if (rc) 8924 return (rc); 8925 8926 if (sc->flags & FW_OK) { 8927 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 8928 &cntxt->data[0]); 8929 if (rc == 0) 8930 goto done; 8931 } 8932 8933 /* 8934 * Read via firmware failed or wasn't even attempted. Read directly via 8935 * the backdoor. 8936 */ 8937 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 8938 done: 8939 end_synchronized_op(sc, 0); 8940 return (rc); 8941 } 8942 8943 static int 8944 load_fw(struct adapter *sc, struct t4_data *fw) 8945 { 8946 int rc; 8947 uint8_t *fw_data; 8948 8949 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 8950 if (rc) 8951 return (rc); 8952 8953 if (sc->flags & FULL_INIT_DONE) { 8954 rc = EBUSY; 8955 goto done; 8956 } 8957 8958 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 8959 if (fw_data == NULL) { 8960 rc = ENOMEM; 8961 goto done; 8962 } 8963 8964 rc = copyin(fw->data, fw_data, fw->len); 8965 if (rc == 0) 8966 rc = -t4_load_fw(sc, fw_data, fw->len); 8967 8968 free(fw_data, M_CXGBE); 8969 done: 8970 end_synchronized_op(sc, 0); 8971 return (rc); 8972 } 8973 8974 static int 8975 load_cfg(struct adapter *sc, struct t4_data *cfg) 8976 { 8977 int rc; 8978 uint8_t *cfg_data = NULL; 8979 8980 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 8981 if (rc) 8982 return (rc); 8983 8984 if (cfg->len == 0) { 8985 /* clear */ 8986 rc = -t4_load_cfg(sc, NULL, 0); 8987 goto done; 8988 } 8989 8990 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); 8991 if (cfg_data == NULL) { 8992 rc = ENOMEM; 8993 goto done; 8994 } 8995 8996 rc = copyin(cfg->data, cfg_data, cfg->len); 8997 if (rc == 0) 8998 rc = -t4_load_cfg(sc, cfg_data, cfg->len); 8999 9000 free(cfg_data, M_CXGBE); 9001 done: 9002 end_synchronized_op(sc, 0); 9003 return (rc); 9004 } 9005 9006 static int 9007 load_boot(struct adapter *sc, struct t4_bootrom *br) 9008 { 9009 int rc; 9010 uint8_t *br_data = NULL; 9011 u_int offset; 9012 9013 if (br->len > 1024 * 1024) 9014 return (EFBIG); 9015 9016 if (br->pf_offset == 0) { 9017 /* pfidx */ 9018 if (br->pfidx_addr > 7) 9019 return (EINVAL); 9020 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, 9021 A_PCIE_PF_EXPROM_OFST))); 9022 } else if (br->pf_offset == 1) { 9023 /* offset */ 9024 offset = G_OFFSET(br->pfidx_addr); 9025 } else { 9026 return (EINVAL); 9027 } 9028 9029 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr"); 9030 if (rc) 9031 return (rc); 9032 9033 if (br->len == 0) { 9034 /* clear */ 9035 rc = -t4_load_boot(sc, NULL, offset, 0); 9036 goto done; 9037 } 9038 9039 br_data = malloc(br->len, M_CXGBE, M_WAITOK); 9040 if (br_data == NULL) { 9041 rc = ENOMEM; 9042 goto done; 9043 } 9044 9045 rc = copyin(br->data, br_data, br->len); 9046 if (rc == 0) 9047 rc = -t4_load_boot(sc, br_data, offset, br->len); 9048 9049 free(br_data, M_CXGBE); 9050 done: 9051 end_synchronized_op(sc, 0); 9052 return (rc); 9053 } 9054 9055 static int 9056 load_bootcfg(struct adapter *sc, struct t4_data *bc) 9057 { 9058 int rc; 9059 uint8_t *bc_data = NULL; 9060 9061 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 9062 if (rc) 9063 return (rc); 9064 9065 if (bc->len == 0) { 9066 /* clear */ 9067 rc = -t4_load_bootcfg(sc, NULL, 0); 9068 goto done; 9069 } 9070 9071 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); 9072 if (bc_data == NULL) { 9073 rc = ENOMEM; 9074 goto done; 9075 } 9076 9077 rc = copyin(bc->data, bc_data, bc->len); 9078 if (rc == 0) 9079 rc = -t4_load_bootcfg(sc, bc_data, bc->len); 9080 9081 free(bc_data, M_CXGBE); 9082 done: 9083 end_synchronized_op(sc, 0); 9084 return (rc); 9085 } 9086 9087 #define MAX_READ_BUF_SIZE (128 * 1024) 9088 static int 9089 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 9090 { 9091 uint32_t addr, remaining, n; 9092 uint32_t *buf; 9093 int rc; 9094 uint8_t *dst; 9095 9096 rc = validate_mem_range(sc, mr->addr, mr->len); 9097 if (rc != 0) 9098 return (rc); 9099 9100 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 9101 addr = mr->addr; 9102 remaining = mr->len; 9103 dst = (void *)mr->data; 9104 9105 while (remaining) { 9106 n = min(remaining, MAX_READ_BUF_SIZE); 9107 read_via_memwin(sc, 2, addr, buf, n); 9108 9109 rc = copyout(buf, dst, n); 9110 if (rc != 0) 9111 break; 9112 9113 dst += n; 9114 remaining -= n; 9115 addr += n; 9116 } 9117 9118 free(buf, M_CXGBE); 9119 return (rc); 9120 } 9121 #undef MAX_READ_BUF_SIZE 9122 9123 static int 9124 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 9125 { 9126 int rc; 9127 9128 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 9129 return (EINVAL); 9130 9131 if (i2cd->len > sizeof(i2cd->data)) 9132 return (EFBIG); 9133 9134 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 9135 if (rc) 9136 return (rc); 9137 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 9138 i2cd->offset, i2cd->len, &i2cd->data[0]); 9139 end_synchronized_op(sc, 0); 9140 9141 return (rc); 9142 } 9143 9144 int 9145 t4_os_find_pci_capability(struct adapter *sc, int cap) 9146 { 9147 int i; 9148 9149 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 9150 } 9151 9152 int 9153 t4_os_pci_save_state(struct adapter *sc) 9154 { 9155 device_t dev; 9156 struct pci_devinfo *dinfo; 9157 9158 dev = sc->dev; 9159 dinfo = device_get_ivars(dev); 9160 9161 pci_cfg_save(dev, dinfo, 0); 9162 return (0); 9163 } 9164 9165 int 9166 t4_os_pci_restore_state(struct adapter *sc) 9167 { 9168 device_t dev; 9169 struct pci_devinfo *dinfo; 9170 9171 dev = sc->dev; 9172 dinfo = device_get_ivars(dev); 9173 9174 pci_cfg_restore(dev, dinfo); 9175 return (0); 9176 } 9177 9178 void 9179 t4_os_portmod_changed(struct port_info *pi, int old_ptype, int old_mtype, 9180 struct link_config *old_lc) 9181 { 9182 struct vi_info *vi; 9183 struct ifnet *ifp; 9184 int v; 9185 static const char *mod_str[] = { 9186 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 9187 }; 9188 9189 PORT_LOCK(pi); 9190 for_each_vi(pi, v, vi) { 9191 build_medialist(pi, &vi->media); 9192 } 9193 PORT_UNLOCK(pi); 9194 if (begin_synchronized_op(pi->adapter, vi, HOLD_LOCK, "t4mod") == 0) { 9195 init_l1cfg(pi); 9196 end_synchronized_op(pi->adapter, LOCK_HELD); 9197 } 9198 9199 ifp = pi->vi[0].ifp; 9200 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 9201 if_printf(ifp, "transceiver unplugged.\n"); 9202 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 9203 if_printf(ifp, "unknown transceiver inserted.\n"); 9204 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 9205 if_printf(ifp, "unsupported transceiver inserted.\n"); 9206 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 9207 if_printf(ifp, "%dGbps %s transceiver inserted.\n", 9208 port_top_speed(pi), mod_str[pi->mod_type]); 9209 } else { 9210 if_printf(ifp, "transceiver (type %d) inserted.\n", 9211 pi->mod_type); 9212 } 9213 } 9214 9215 void 9216 t4_os_link_changed(struct port_info *pi, struct link_config *old_lc) 9217 { 9218 struct vi_info *vi; 9219 struct ifnet *ifp; 9220 struct link_config *lc; 9221 int v; 9222 9223 for_each_vi(pi, v, vi) { 9224 ifp = vi->ifp; 9225 if (ifp == NULL) 9226 continue; 9227 9228 lc = &pi->link_cfg; 9229 if (lc->link_ok) { 9230 ifp->if_baudrate = IF_Mbps(lc->speed); 9231 if_link_state_change(ifp, LINK_STATE_UP); 9232 } else { 9233 if_link_state_change(ifp, LINK_STATE_DOWN); 9234 } 9235 } 9236 } 9237 9238 void 9239 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 9240 { 9241 struct adapter *sc; 9242 9243 sx_slock(&t4_list_lock); 9244 SLIST_FOREACH(sc, &t4_list, link) { 9245 /* 9246 * func should not make any assumptions about what state sc is 9247 * in - the only guarantee is that sc->sc_lock is a valid lock. 9248 */ 9249 func(sc, arg); 9250 } 9251 sx_sunlock(&t4_list_lock); 9252 } 9253 9254 static int 9255 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 9256 struct thread *td) 9257 { 9258 int rc; 9259 struct adapter *sc = dev->si_drv1; 9260 9261 rc = priv_check(td, PRIV_DRIVER); 9262 if (rc != 0) 9263 return (rc); 9264 9265 switch (cmd) { 9266 case CHELSIO_T4_GETREG: { 9267 struct t4_reg *edata = (struct t4_reg *)data; 9268 9269 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9270 return (EFAULT); 9271 9272 if (edata->size == 4) 9273 edata->val = t4_read_reg(sc, edata->addr); 9274 else if (edata->size == 8) 9275 edata->val = t4_read_reg64(sc, edata->addr); 9276 else 9277 return (EINVAL); 9278 9279 break; 9280 } 9281 case CHELSIO_T4_SETREG: { 9282 struct t4_reg *edata = (struct t4_reg *)data; 9283 9284 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9285 return (EFAULT); 9286 9287 if (edata->size == 4) { 9288 if (edata->val & 0xffffffff00000000) 9289 return (EINVAL); 9290 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 9291 } else if (edata->size == 8) 9292 t4_write_reg64(sc, edata->addr, edata->val); 9293 else 9294 return (EINVAL); 9295 break; 9296 } 9297 case CHELSIO_T4_REGDUMP: { 9298 struct t4_regdump *regs = (struct t4_regdump *)data; 9299 int reglen = t4_get_regs_len(sc); 9300 uint8_t *buf; 9301 9302 if (regs->len < reglen) { 9303 regs->len = reglen; /* hint to the caller */ 9304 return (ENOBUFS); 9305 } 9306 9307 regs->len = reglen; 9308 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 9309 get_regs(sc, regs, buf); 9310 rc = copyout(buf, regs->data, reglen); 9311 free(buf, M_CXGBE); 9312 break; 9313 } 9314 case CHELSIO_T4_GET_FILTER_MODE: 9315 rc = get_filter_mode(sc, (uint32_t *)data); 9316 break; 9317 case CHELSIO_T4_SET_FILTER_MODE: 9318 rc = set_filter_mode(sc, *(uint32_t *)data); 9319 break; 9320 case CHELSIO_T4_GET_FILTER: 9321 rc = get_filter(sc, (struct t4_filter *)data); 9322 break; 9323 case CHELSIO_T4_SET_FILTER: 9324 rc = set_filter(sc, (struct t4_filter *)data); 9325 break; 9326 case CHELSIO_T4_DEL_FILTER: 9327 rc = del_filter(sc, (struct t4_filter *)data); 9328 break; 9329 case CHELSIO_T4_GET_SGE_CONTEXT: 9330 rc = get_sge_context(sc, (struct t4_sge_context *)data); 9331 break; 9332 case CHELSIO_T4_LOAD_FW: 9333 rc = load_fw(sc, (struct t4_data *)data); 9334 break; 9335 case CHELSIO_T4_GET_MEM: 9336 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 9337 break; 9338 case CHELSIO_T4_GET_I2C: 9339 rc = read_i2c(sc, (struct t4_i2c_data *)data); 9340 break; 9341 case CHELSIO_T4_CLEAR_STATS: { 9342 int i, v; 9343 u_int port_id = *(uint32_t *)data; 9344 struct port_info *pi; 9345 struct vi_info *vi; 9346 9347 if (port_id >= sc->params.nports) 9348 return (EINVAL); 9349 pi = sc->port[port_id]; 9350 if (pi == NULL) 9351 return (EIO); 9352 9353 /* MAC stats */ 9354 t4_clr_port_stats(sc, pi->tx_chan); 9355 pi->tx_parse_error = 0; 9356 mtx_lock(&sc->reg_lock); 9357 for_each_vi(pi, v, vi) { 9358 if (vi->flags & VI_INIT_DONE) 9359 t4_clr_vi_stats(sc, vi->viid); 9360 } 9361 mtx_unlock(&sc->reg_lock); 9362 9363 /* 9364 * Since this command accepts a port, clear stats for 9365 * all VIs on this port. 9366 */ 9367 for_each_vi(pi, v, vi) { 9368 if (vi->flags & VI_INIT_DONE) { 9369 struct sge_rxq *rxq; 9370 struct sge_txq *txq; 9371 struct sge_wrq *wrq; 9372 9373 for_each_rxq(vi, i, rxq) { 9374 #if defined(INET) || defined(INET6) 9375 rxq->lro.lro_queued = 0; 9376 rxq->lro.lro_flushed = 0; 9377 #endif 9378 rxq->rxcsum = 0; 9379 rxq->vlan_extraction = 0; 9380 } 9381 9382 for_each_txq(vi, i, txq) { 9383 txq->txcsum = 0; 9384 txq->tso_wrs = 0; 9385 txq->vlan_insertion = 0; 9386 txq->imm_wrs = 0; 9387 txq->sgl_wrs = 0; 9388 txq->txpkt_wrs = 0; 9389 txq->txpkts0_wrs = 0; 9390 txq->txpkts1_wrs = 0; 9391 txq->txpkts0_pkts = 0; 9392 txq->txpkts1_pkts = 0; 9393 mp_ring_reset_stats(txq->r); 9394 } 9395 9396 #ifdef TCP_OFFLOAD 9397 /* nothing to clear for each ofld_rxq */ 9398 9399 for_each_ofld_txq(vi, i, wrq) { 9400 wrq->tx_wrs_direct = 0; 9401 wrq->tx_wrs_copied = 0; 9402 } 9403 #endif 9404 9405 if (IS_MAIN_VI(vi)) { 9406 wrq = &sc->sge.ctrlq[pi->port_id]; 9407 wrq->tx_wrs_direct = 0; 9408 wrq->tx_wrs_copied = 0; 9409 } 9410 } 9411 } 9412 break; 9413 } 9414 case CHELSIO_T4_SCHED_CLASS: 9415 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); 9416 break; 9417 case CHELSIO_T4_SCHED_QUEUE: 9418 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); 9419 break; 9420 case CHELSIO_T4_GET_TRACER: 9421 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 9422 break; 9423 case CHELSIO_T4_SET_TRACER: 9424 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 9425 break; 9426 case CHELSIO_T4_LOAD_CFG: 9427 rc = load_cfg(sc, (struct t4_data *)data); 9428 break; 9429 case CHELSIO_T4_LOAD_BOOT: 9430 rc = load_boot(sc, (struct t4_bootrom *)data); 9431 break; 9432 case CHELSIO_T4_LOAD_BOOTCFG: 9433 rc = load_bootcfg(sc, (struct t4_data *)data); 9434 break; 9435 default: 9436 rc = ENOTTY; 9437 } 9438 9439 return (rc); 9440 } 9441 9442 void 9443 t4_db_full(struct adapter *sc) 9444 { 9445 9446 CXGBE_UNIMPLEMENTED(__func__); 9447 } 9448 9449 void 9450 t4_db_dropped(struct adapter *sc) 9451 { 9452 9453 CXGBE_UNIMPLEMENTED(__func__); 9454 } 9455 9456 #ifdef TCP_OFFLOAD 9457 static int 9458 toe_capability(struct vi_info *vi, int enable) 9459 { 9460 int rc; 9461 struct port_info *pi = vi->pi; 9462 struct adapter *sc = pi->adapter; 9463 9464 ASSERT_SYNCHRONIZED_OP(sc); 9465 9466 if (!is_offload(sc)) 9467 return (ENODEV); 9468 9469 if (enable) { 9470 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 9471 /* TOE is already enabled. */ 9472 return (0); 9473 } 9474 9475 /* 9476 * We need the port's queues around so that we're able to send 9477 * and receive CPLs to/from the TOE even if the ifnet for this 9478 * port has never been UP'd administratively. 9479 */ 9480 if (!(vi->flags & VI_INIT_DONE)) { 9481 rc = vi_full_init(vi); 9482 if (rc) 9483 return (rc); 9484 } 9485 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 9486 rc = vi_full_init(&pi->vi[0]); 9487 if (rc) 9488 return (rc); 9489 } 9490 9491 if (isset(&sc->offload_map, pi->port_id)) { 9492 /* TOE is enabled on another VI of this port. */ 9493 pi->uld_vis++; 9494 return (0); 9495 } 9496 9497 if (!uld_active(sc, ULD_TOM)) { 9498 rc = t4_activate_uld(sc, ULD_TOM); 9499 if (rc == EAGAIN) { 9500 log(LOG_WARNING, 9501 "You must kldload t4_tom.ko before trying " 9502 "to enable TOE on a cxgbe interface.\n"); 9503 } 9504 if (rc != 0) 9505 return (rc); 9506 KASSERT(sc->tom_softc != NULL, 9507 ("%s: TOM activated but softc NULL", __func__)); 9508 KASSERT(uld_active(sc, ULD_TOM), 9509 ("%s: TOM activated but flag not set", __func__)); 9510 } 9511 9512 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 9513 if (!uld_active(sc, ULD_IWARP)) 9514 (void) t4_activate_uld(sc, ULD_IWARP); 9515 if (!uld_active(sc, ULD_ISCSI)) 9516 (void) t4_activate_uld(sc, ULD_ISCSI); 9517 9518 pi->uld_vis++; 9519 setbit(&sc->offload_map, pi->port_id); 9520 } else { 9521 pi->uld_vis--; 9522 9523 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 9524 return (0); 9525 9526 KASSERT(uld_active(sc, ULD_TOM), 9527 ("%s: TOM never initialized?", __func__)); 9528 clrbit(&sc->offload_map, pi->port_id); 9529 } 9530 9531 return (0); 9532 } 9533 9534 /* 9535 * Add an upper layer driver to the global list. 9536 */ 9537 int 9538 t4_register_uld(struct uld_info *ui) 9539 { 9540 int rc = 0; 9541 struct uld_info *u; 9542 9543 sx_xlock(&t4_uld_list_lock); 9544 SLIST_FOREACH(u, &t4_uld_list, link) { 9545 if (u->uld_id == ui->uld_id) { 9546 rc = EEXIST; 9547 goto done; 9548 } 9549 } 9550 9551 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 9552 ui->refcount = 0; 9553 done: 9554 sx_xunlock(&t4_uld_list_lock); 9555 return (rc); 9556 } 9557 9558 int 9559 t4_unregister_uld(struct uld_info *ui) 9560 { 9561 int rc = EINVAL; 9562 struct uld_info *u; 9563 9564 sx_xlock(&t4_uld_list_lock); 9565 9566 SLIST_FOREACH(u, &t4_uld_list, link) { 9567 if (u == ui) { 9568 if (ui->refcount > 0) { 9569 rc = EBUSY; 9570 goto done; 9571 } 9572 9573 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 9574 rc = 0; 9575 goto done; 9576 } 9577 } 9578 done: 9579 sx_xunlock(&t4_uld_list_lock); 9580 return (rc); 9581 } 9582 9583 int 9584 t4_activate_uld(struct adapter *sc, int id) 9585 { 9586 int rc; 9587 struct uld_info *ui; 9588 9589 ASSERT_SYNCHRONIZED_OP(sc); 9590 9591 if (id < 0 || id > ULD_MAX) 9592 return (EINVAL); 9593 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 9594 9595 sx_slock(&t4_uld_list_lock); 9596 9597 SLIST_FOREACH(ui, &t4_uld_list, link) { 9598 if (ui->uld_id == id) { 9599 if (!(sc->flags & FULL_INIT_DONE)) { 9600 rc = adapter_full_init(sc); 9601 if (rc != 0) 9602 break; 9603 } 9604 9605 rc = ui->activate(sc); 9606 if (rc == 0) { 9607 setbit(&sc->active_ulds, id); 9608 ui->refcount++; 9609 } 9610 break; 9611 } 9612 } 9613 9614 sx_sunlock(&t4_uld_list_lock); 9615 9616 return (rc); 9617 } 9618 9619 int 9620 t4_deactivate_uld(struct adapter *sc, int id) 9621 { 9622 int rc; 9623 struct uld_info *ui; 9624 9625 ASSERT_SYNCHRONIZED_OP(sc); 9626 9627 if (id < 0 || id > ULD_MAX) 9628 return (EINVAL); 9629 rc = ENXIO; 9630 9631 sx_slock(&t4_uld_list_lock); 9632 9633 SLIST_FOREACH(ui, &t4_uld_list, link) { 9634 if (ui->uld_id == id) { 9635 rc = ui->deactivate(sc); 9636 if (rc == 0) { 9637 clrbit(&sc->active_ulds, id); 9638 ui->refcount--; 9639 } 9640 break; 9641 } 9642 } 9643 9644 sx_sunlock(&t4_uld_list_lock); 9645 9646 return (rc); 9647 } 9648 9649 int 9650 uld_active(struct adapter *sc, int uld_id) 9651 { 9652 9653 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9654 9655 return (isset(&sc->active_ulds, uld_id)); 9656 } 9657 #endif 9658 9659 /* 9660 * t = ptr to tunable. 9661 * nc = number of CPUs. 9662 * c = compiled in default for that tunable. 9663 */ 9664 static void 9665 calculate_nqueues(int *t, int nc, const int c) 9666 { 9667 int nq; 9668 9669 if (*t > 0) 9670 return; 9671 nq = *t < 0 ? -*t : c; 9672 *t = min(nc, nq); 9673 } 9674 9675 /* 9676 * Come up with reasonable defaults for some of the tunables, provided they're 9677 * not set by the user (in which case we'll use the values as is). 9678 */ 9679 static void 9680 tweak_tunables(void) 9681 { 9682 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 9683 9684 if (t4_ntxq10g < 1) { 9685 #ifdef RSS 9686 t4_ntxq10g = rss_getnumbuckets(); 9687 #else 9688 calculate_nqueues(&t4_ntxq10g, nc, NTXQ_10G); 9689 #endif 9690 } 9691 9692 if (t4_ntxq1g < 1) { 9693 #ifdef RSS 9694 /* XXX: way too many for 1GbE? */ 9695 t4_ntxq1g = rss_getnumbuckets(); 9696 #else 9697 calculate_nqueues(&t4_ntxq1g, nc, NTXQ_1G); 9698 #endif 9699 } 9700 9701 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); 9702 9703 if (t4_nrxq10g < 1) { 9704 #ifdef RSS 9705 t4_nrxq10g = rss_getnumbuckets(); 9706 #else 9707 calculate_nqueues(&t4_nrxq10g, nc, NRXQ_10G); 9708 #endif 9709 } 9710 9711 if (t4_nrxq1g < 1) { 9712 #ifdef RSS 9713 /* XXX: way too many for 1GbE? */ 9714 t4_nrxq1g = rss_getnumbuckets(); 9715 #else 9716 calculate_nqueues(&t4_nrxq1g, nc, NRXQ_1G); 9717 #endif 9718 } 9719 9720 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); 9721 9722 #ifdef TCP_OFFLOAD 9723 calculate_nqueues(&t4_nofldtxq10g, nc, NOFLDTXQ_10G); 9724 calculate_nqueues(&t4_nofldtxq1g, nc, NOFLDTXQ_1G); 9725 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); 9726 calculate_nqueues(&t4_nofldrxq10g, nc, NOFLDRXQ_10G); 9727 calculate_nqueues(&t4_nofldrxq1g, nc, NOFLDRXQ_1G); 9728 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); 9729 9730 if (t4_toecaps_allowed == -1) 9731 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 9732 9733 if (t4_rdmacaps_allowed == -1) { 9734 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 9735 FW_CAPS_CONFIG_RDMA_RDMAC; 9736 } 9737 9738 if (t4_iscsicaps_allowed == -1) { 9739 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 9740 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 9741 FW_CAPS_CONFIG_ISCSI_T10DIF; 9742 } 9743 #else 9744 if (t4_toecaps_allowed == -1) 9745 t4_toecaps_allowed = 0; 9746 9747 if (t4_rdmacaps_allowed == -1) 9748 t4_rdmacaps_allowed = 0; 9749 9750 if (t4_iscsicaps_allowed == -1) 9751 t4_iscsicaps_allowed = 0; 9752 #endif 9753 9754 #ifdef DEV_NETMAP 9755 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); 9756 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); 9757 #endif 9758 9759 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 9760 t4_tmr_idx_10g = TMR_IDX_10G; 9761 9762 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 9763 t4_pktc_idx_10g = PKTC_IDX_10G; 9764 9765 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 9766 t4_tmr_idx_1g = TMR_IDX_1G; 9767 9768 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 9769 t4_pktc_idx_1g = PKTC_IDX_1G; 9770 9771 if (t4_qsize_txq < 128) 9772 t4_qsize_txq = 128; 9773 9774 if (t4_qsize_rxq < 128) 9775 t4_qsize_rxq = 128; 9776 while (t4_qsize_rxq & 7) 9777 t4_qsize_rxq++; 9778 9779 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 9780 } 9781 9782 #ifdef DDB 9783 static void 9784 t4_dump_tcb(struct adapter *sc, int tid) 9785 { 9786 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 9787 9788 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 9789 save = t4_read_reg(sc, reg); 9790 base = sc->memwin[2].mw_base; 9791 9792 /* Dump TCB for the tid */ 9793 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 9794 tcb_addr += tid * TCB_SIZE; 9795 9796 if (is_t4(sc)) { 9797 pf = 0; 9798 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 9799 } else { 9800 pf = V_PFNUM(sc->pf); 9801 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 9802 } 9803 t4_write_reg(sc, reg, win_pos | pf); 9804 t4_read_reg(sc, reg); 9805 9806 off = tcb_addr - win_pos; 9807 for (i = 0; i < 4; i++) { 9808 uint32_t buf[8]; 9809 for (j = 0; j < 8; j++, off += 4) 9810 buf[j] = htonl(t4_read_reg(sc, base + off)); 9811 9812 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 9813 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 9814 buf[7]); 9815 } 9816 9817 t4_write_reg(sc, reg, save); 9818 t4_read_reg(sc, reg); 9819 } 9820 9821 static void 9822 t4_dump_devlog(struct adapter *sc) 9823 { 9824 struct devlog_params *dparams = &sc->params.devlog; 9825 struct fw_devlog_e e; 9826 int i, first, j, m, nentries, rc; 9827 uint64_t ftstamp = UINT64_MAX; 9828 9829 if (dparams->start == 0) { 9830 db_printf("devlog params not valid\n"); 9831 return; 9832 } 9833 9834 nentries = dparams->size / sizeof(struct fw_devlog_e); 9835 m = fwmtype_to_hwmtype(dparams->memtype); 9836 9837 /* Find the first entry. */ 9838 first = -1; 9839 for (i = 0; i < nentries && !db_pager_quit; i++) { 9840 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9841 sizeof(e), (void *)&e); 9842 if (rc != 0) 9843 break; 9844 9845 if (e.timestamp == 0) 9846 break; 9847 9848 e.timestamp = be64toh(e.timestamp); 9849 if (e.timestamp < ftstamp) { 9850 ftstamp = e.timestamp; 9851 first = i; 9852 } 9853 } 9854 9855 if (first == -1) 9856 return; 9857 9858 i = first; 9859 do { 9860 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9861 sizeof(e), (void *)&e); 9862 if (rc != 0) 9863 return; 9864 9865 if (e.timestamp == 0) 9866 return; 9867 9868 e.timestamp = be64toh(e.timestamp); 9869 e.seqno = be32toh(e.seqno); 9870 for (j = 0; j < 8; j++) 9871 e.params[j] = be32toh(e.params[j]); 9872 9873 db_printf("%10d %15ju %8s %8s ", 9874 e.seqno, e.timestamp, 9875 (e.level < nitems(devlog_level_strings) ? 9876 devlog_level_strings[e.level] : "UNKNOWN"), 9877 (e.facility < nitems(devlog_facility_strings) ? 9878 devlog_facility_strings[e.facility] : "UNKNOWN")); 9879 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 9880 e.params[3], e.params[4], e.params[5], e.params[6], 9881 e.params[7]); 9882 9883 if (++i == nentries) 9884 i = 0; 9885 } while (i != first && !db_pager_quit); 9886 } 9887 9888 static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 9889 _DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 9890 9891 DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 9892 { 9893 device_t dev; 9894 int t; 9895 bool valid; 9896 9897 valid = false; 9898 t = db_read_token(); 9899 if (t == tIDENT) { 9900 dev = device_lookup_by_name(db_tok_string); 9901 valid = true; 9902 } 9903 db_skip_to_eol(); 9904 if (!valid) { 9905 db_printf("usage: show t4 devlog <nexus>\n"); 9906 return; 9907 } 9908 9909 if (dev == NULL) { 9910 db_printf("device not found\n"); 9911 return; 9912 } 9913 9914 t4_dump_devlog(device_get_softc(dev)); 9915 } 9916 9917 DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 9918 { 9919 device_t dev; 9920 int radix, tid, t; 9921 bool valid; 9922 9923 valid = false; 9924 radix = db_radix; 9925 db_radix = 10; 9926 t = db_read_token(); 9927 if (t == tIDENT) { 9928 dev = device_lookup_by_name(db_tok_string); 9929 t = db_read_token(); 9930 if (t == tNUMBER) { 9931 tid = db_tok_number; 9932 valid = true; 9933 } 9934 } 9935 db_radix = radix; 9936 db_skip_to_eol(); 9937 if (!valid) { 9938 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 9939 return; 9940 } 9941 9942 if (dev == NULL) { 9943 db_printf("device not found\n"); 9944 return; 9945 } 9946 if (tid < 0) { 9947 db_printf("invalid tid\n"); 9948 return; 9949 } 9950 9951 t4_dump_tcb(device_get_softc(dev), tid); 9952 } 9953 #endif 9954 9955 static struct sx mlu; /* mod load unload */ 9956 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 9957 9958 static int 9959 mod_event(module_t mod, int cmd, void *arg) 9960 { 9961 int rc = 0; 9962 static int loaded = 0; 9963 9964 switch (cmd) { 9965 case MOD_LOAD: 9966 sx_xlock(&mlu); 9967 if (loaded++ == 0) { 9968 t4_sge_modload(); 9969 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); 9970 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); 9971 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 9972 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 9973 sx_init(&t4_list_lock, "T4/T5 adapters"); 9974 SLIST_INIT(&t4_list); 9975 #ifdef TCP_OFFLOAD 9976 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 9977 SLIST_INIT(&t4_uld_list); 9978 #endif 9979 t4_tracer_modload(); 9980 tweak_tunables(); 9981 } 9982 sx_xunlock(&mlu); 9983 break; 9984 9985 case MOD_UNLOAD: 9986 sx_xlock(&mlu); 9987 if (--loaded == 0) { 9988 int tries; 9989 9990 sx_slock(&t4_list_lock); 9991 if (!SLIST_EMPTY(&t4_list)) { 9992 rc = EBUSY; 9993 sx_sunlock(&t4_list_lock); 9994 goto done_unload; 9995 } 9996 #ifdef TCP_OFFLOAD 9997 sx_slock(&t4_uld_list_lock); 9998 if (!SLIST_EMPTY(&t4_uld_list)) { 9999 rc = EBUSY; 10000 sx_sunlock(&t4_uld_list_lock); 10001 sx_sunlock(&t4_list_lock); 10002 goto done_unload; 10003 } 10004 #endif 10005 tries = 0; 10006 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 10007 uprintf("%ju clusters with custom free routine " 10008 "still is use.\n", t4_sge_extfree_refs()); 10009 pause("t4unload", 2 * hz); 10010 } 10011 #ifdef TCP_OFFLOAD 10012 sx_sunlock(&t4_uld_list_lock); 10013 #endif 10014 sx_sunlock(&t4_list_lock); 10015 10016 if (t4_sge_extfree_refs() == 0) { 10017 t4_tracer_modunload(); 10018 #ifdef TCP_OFFLOAD 10019 sx_destroy(&t4_uld_list_lock); 10020 #endif 10021 sx_destroy(&t4_list_lock); 10022 t4_sge_modunload(); 10023 loaded = 0; 10024 } else { 10025 rc = EBUSY; 10026 loaded++; /* undo earlier decrement */ 10027 } 10028 } 10029 done_unload: 10030 sx_xunlock(&mlu); 10031 break; 10032 } 10033 10034 return (rc); 10035 } 10036 10037 static devclass_t t4_devclass, t5_devclass, t6_devclass; 10038 static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; 10039 static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; 10040 10041 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 10042 MODULE_VERSION(t4nex, 1); 10043 MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 10044 #ifdef DEV_NETMAP 10045 MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 10046 #endif /* DEV_NETMAP */ 10047 10048 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 10049 MODULE_VERSION(t5nex, 1); 10050 MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 10051 #ifdef DEV_NETMAP 10052 MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 10053 #endif /* DEV_NETMAP */ 10054 10055 DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); 10056 MODULE_VERSION(t6nex, 1); 10057 MODULE_DEPEND(t6nex, firmware, 1, 1, 1); 10058 #ifdef DEV_NETMAP 10059 MODULE_DEPEND(t6nex, netmap, 1, 1, 1); 10060 #endif /* DEV_NETMAP */ 10061 10062 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 10063 MODULE_VERSION(cxgbe, 1); 10064 10065 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 10066 MODULE_VERSION(cxl, 1); 10067 10068 DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); 10069 MODULE_VERSION(cc, 1); 10070 10071 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 10072 MODULE_VERSION(vcxgbe, 1); 10073 10074 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 10075 MODULE_VERSION(vcxl, 1); 10076 10077 DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); 10078 MODULE_VERSION(vcc, 1); 10079