1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/param.h> 35 #include <sys/conf.h> 36 #include <sys/priv.h> 37 #include <sys/kernel.h> 38 #include <sys/bus.h> 39 #include <sys/module.h> 40 #include <sys/malloc.h> 41 #include <sys/queue.h> 42 #include <sys/taskqueue.h> 43 #include <sys/pciio.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/pci_private.h> 47 #include <sys/firmware.h> 48 #include <sys/sbuf.h> 49 #include <sys/smp.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_types.h> 56 #include <net/if_dl.h> 57 #include <net/if_vlan_var.h> 58 #if defined(__i386__) || defined(__amd64__) 59 #include <vm/vm.h> 60 #include <vm/pmap.h> 61 #endif 62 63 #include "common/common.h" 64 #include "common/t4_msg.h" 65 #include "common/t4_regs.h" 66 #include "common/t4_regs_values.h" 67 #include "t4_ioctl.h" 68 #include "t4_l2t.h" 69 #include "t4_mp_ring.h" 70 71 /* T4 bus driver interface */ 72 static int t4_probe(device_t); 73 static int t4_attach(device_t); 74 static int t4_detach(device_t); 75 static device_method_t t4_methods[] = { 76 DEVMETHOD(device_probe, t4_probe), 77 DEVMETHOD(device_attach, t4_attach), 78 DEVMETHOD(device_detach, t4_detach), 79 80 DEVMETHOD_END 81 }; 82 static driver_t t4_driver = { 83 "t4nex", 84 t4_methods, 85 sizeof(struct adapter) 86 }; 87 88 89 /* T4 port (cxgbe) interface */ 90 static int cxgbe_probe(device_t); 91 static int cxgbe_attach(device_t); 92 static int cxgbe_detach(device_t); 93 static device_method_t cxgbe_methods[] = { 94 DEVMETHOD(device_probe, cxgbe_probe), 95 DEVMETHOD(device_attach, cxgbe_attach), 96 DEVMETHOD(device_detach, cxgbe_detach), 97 { 0, 0 } 98 }; 99 static driver_t cxgbe_driver = { 100 "cxgbe", 101 cxgbe_methods, 102 sizeof(struct port_info) 103 }; 104 105 static d_ioctl_t t4_ioctl; 106 static d_open_t t4_open; 107 static d_close_t t4_close; 108 109 static struct cdevsw t4_cdevsw = { 110 .d_version = D_VERSION, 111 .d_flags = 0, 112 .d_open = t4_open, 113 .d_close = t4_close, 114 .d_ioctl = t4_ioctl, 115 .d_name = "t4nex", 116 }; 117 118 /* T5 bus driver interface */ 119 static int t5_probe(device_t); 120 static device_method_t t5_methods[] = { 121 DEVMETHOD(device_probe, t5_probe), 122 DEVMETHOD(device_attach, t4_attach), 123 DEVMETHOD(device_detach, t4_detach), 124 125 DEVMETHOD_END 126 }; 127 static driver_t t5_driver = { 128 "t5nex", 129 t5_methods, 130 sizeof(struct adapter) 131 }; 132 133 134 /* T5 port (cxl) interface */ 135 static driver_t cxl_driver = { 136 "cxl", 137 cxgbe_methods, 138 sizeof(struct port_info) 139 }; 140 141 static struct cdevsw t5_cdevsw = { 142 .d_version = D_VERSION, 143 .d_flags = 0, 144 .d_open = t4_open, 145 .d_close = t4_close, 146 .d_ioctl = t4_ioctl, 147 .d_name = "t5nex", 148 }; 149 150 /* ifnet + media interface */ 151 static void cxgbe_init(void *); 152 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 153 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 154 static void cxgbe_qflush(struct ifnet *); 155 static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter); 156 static int cxgbe_media_change(struct ifnet *); 157 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 158 159 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 160 161 /* 162 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 163 * then ADAPTER_LOCK, then t4_uld_list_lock. 164 */ 165 static struct sx t4_list_lock; 166 SLIST_HEAD(, adapter) t4_list; 167 #ifdef TCP_OFFLOAD 168 static struct sx t4_uld_list_lock; 169 SLIST_HEAD(, uld_info) t4_uld_list; 170 #endif 171 172 /* 173 * Tunables. See tweak_tunables() too. 174 * 175 * Each tunable is set to a default value here if it's known at compile-time. 176 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 177 * provide a reasonable default when the driver is loaded. 178 * 179 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 180 * T5 are under hw.cxl. 181 */ 182 183 /* 184 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 185 */ 186 #define NTXQ_10G 16 187 static int t4_ntxq10g = -1; 188 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 189 190 #define NRXQ_10G 8 191 static int t4_nrxq10g = -1; 192 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 193 194 #define NTXQ_1G 4 195 static int t4_ntxq1g = -1; 196 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 197 198 #define NRXQ_1G 2 199 static int t4_nrxq1g = -1; 200 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 201 202 static int t4_rsrv_noflowq = 0; 203 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 204 205 #ifdef TCP_OFFLOAD 206 #define NOFLDTXQ_10G 8 207 static int t4_nofldtxq10g = -1; 208 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 209 210 #define NOFLDRXQ_10G 2 211 static int t4_nofldrxq10g = -1; 212 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 213 214 #define NOFLDTXQ_1G 2 215 static int t4_nofldtxq1g = -1; 216 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 217 218 #define NOFLDRXQ_1G 1 219 static int t4_nofldrxq1g = -1; 220 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 221 #endif 222 223 #ifdef DEV_NETMAP 224 #define NNMTXQ_10G 2 225 static int t4_nnmtxq10g = -1; 226 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g); 227 228 #define NNMRXQ_10G 2 229 static int t4_nnmrxq10g = -1; 230 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g); 231 232 #define NNMTXQ_1G 1 233 static int t4_nnmtxq1g = -1; 234 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g); 235 236 #define NNMRXQ_1G 1 237 static int t4_nnmrxq1g = -1; 238 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g); 239 #endif 240 241 /* 242 * Holdoff parameters for 10G and 1G ports. 243 */ 244 #define TMR_IDX_10G 1 245 static int t4_tmr_idx_10g = TMR_IDX_10G; 246 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 247 248 #define PKTC_IDX_10G (-1) 249 static int t4_pktc_idx_10g = PKTC_IDX_10G; 250 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 251 252 #define TMR_IDX_1G 1 253 static int t4_tmr_idx_1g = TMR_IDX_1G; 254 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 255 256 #define PKTC_IDX_1G (-1) 257 static int t4_pktc_idx_1g = PKTC_IDX_1G; 258 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 259 260 /* 261 * Size (# of entries) of each tx and rx queue. 262 */ 263 static unsigned int t4_qsize_txq = TX_EQ_QSIZE; 264 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 265 266 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 267 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 268 269 /* 270 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 271 */ 272 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 273 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 274 275 /* 276 * Configuration file. 277 */ 278 #define DEFAULT_CF "default" 279 #define FLASH_CF "flash" 280 #define UWIRE_CF "uwire" 281 #define FPGA_CF "fpga" 282 static char t4_cfg_file[32] = DEFAULT_CF; 283 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 284 285 /* 286 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 287 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 288 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 289 * mark or when signalled to do so, 0 to never emit PAUSE. 290 */ 291 static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 292 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 293 294 /* 295 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 296 * encouraged respectively). 297 */ 298 static unsigned int t4_fw_install = 1; 299 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 300 301 /* 302 * ASIC features that will be used. Disable the ones you don't want so that the 303 * chip resources aren't wasted on features that will not be used. 304 */ 305 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 306 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 307 308 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 309 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 310 311 static int t4_toecaps_allowed = -1; 312 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 313 314 static int t4_rdmacaps_allowed = 0; 315 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 316 317 static int t4_iscsicaps_allowed = 0; 318 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 319 320 static int t4_fcoecaps_allowed = 0; 321 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 322 323 static int t5_write_combine = 0; 324 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 325 326 struct intrs_and_queues { 327 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 328 uint16_t nirq; /* Total # of vectors */ 329 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 330 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 331 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 332 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 333 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 334 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 335 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 336 #ifdef TCP_OFFLOAD 337 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 338 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 339 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 340 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 341 #endif 342 #ifdef DEV_NETMAP 343 uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */ 344 uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */ 345 uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */ 346 uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */ 347 #endif 348 }; 349 350 struct filter_entry { 351 uint32_t valid:1; /* filter allocated and valid */ 352 uint32_t locked:1; /* filter is administratively locked */ 353 uint32_t pending:1; /* filter action is pending firmware reply */ 354 uint32_t smtidx:8; /* Source MAC Table index for smac */ 355 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 356 357 struct t4_filter_specification fs; 358 }; 359 360 static int map_bars_0_and_4(struct adapter *); 361 static int map_bar_2(struct adapter *); 362 static void setup_memwin(struct adapter *); 363 static int validate_mem_range(struct adapter *, uint32_t, int); 364 static int fwmtype_to_hwmtype(int); 365 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 366 uint32_t *); 367 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *); 368 static uint32_t position_memwin(struct adapter *, int, uint32_t); 369 static int cfg_itype_and_nqueues(struct adapter *, int, int, 370 struct intrs_and_queues *); 371 static int prep_firmware(struct adapter *); 372 static int partition_resources(struct adapter *, const struct firmware *, 373 const char *); 374 static int get_params__pre_init(struct adapter *); 375 static int get_params__post_init(struct adapter *); 376 static int set_params__post_init(struct adapter *); 377 static void t4_set_desc(struct adapter *); 378 static void build_medialist(struct port_info *, struct ifmedia *); 379 static int cxgbe_init_synchronized(struct port_info *); 380 static int cxgbe_uninit_synchronized(struct port_info *); 381 static int setup_intr_handlers(struct adapter *); 382 static void quiesce_txq(struct adapter *, struct sge_txq *); 383 static void quiesce_wrq(struct adapter *, struct sge_wrq *); 384 static void quiesce_iq(struct adapter *, struct sge_iq *); 385 static void quiesce_fl(struct adapter *, struct sge_fl *); 386 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 387 driver_intr_t *, void *, char *); 388 static int t4_free_irq(struct adapter *, struct irq *); 389 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int, 390 unsigned int); 391 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 392 static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 393 static void cxgbe_tick(void *); 394 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 395 static int cpl_not_handled(struct sge_iq *, const struct rss_header *, 396 struct mbuf *); 397 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *); 398 static int fw_msg_not_handled(struct adapter *, const __be64 *); 399 static int t4_sysctls(struct adapter *); 400 static int cxgbe_sysctls(struct port_info *); 401 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 402 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 403 static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 404 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 405 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 406 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 407 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 408 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 409 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 410 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 411 static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 412 #ifdef SBUF_DRAIN 413 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 414 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 415 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 416 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 417 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 418 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 419 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 420 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 421 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 422 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 423 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 424 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 425 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 426 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 427 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 428 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 429 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 430 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 431 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 432 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 433 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 434 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 435 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 436 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 437 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 438 #endif 439 static uint32_t fconf_to_mode(uint32_t); 440 static uint32_t mode_to_fconf(uint32_t); 441 static uint32_t fspec_to_fconf(struct t4_filter_specification *); 442 static int get_filter_mode(struct adapter *, uint32_t *); 443 static int set_filter_mode(struct adapter *, uint32_t); 444 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 445 static int get_filter(struct adapter *, struct t4_filter *); 446 static int set_filter(struct adapter *, struct t4_filter *); 447 static int del_filter(struct adapter *, struct t4_filter *); 448 static void clear_filter(struct filter_entry *); 449 static int set_filter_wr(struct adapter *, int); 450 static int del_filter_wr(struct adapter *, int); 451 static int get_sge_context(struct adapter *, struct t4_sge_context *); 452 static int load_fw(struct adapter *, struct t4_data *); 453 static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 454 static int read_i2c(struct adapter *, struct t4_i2c_data *); 455 static int set_sched_class(struct adapter *, struct t4_sched_params *); 456 static int set_sched_queue(struct adapter *, struct t4_sched_queue *); 457 #ifdef TCP_OFFLOAD 458 static int toe_capability(struct port_info *, int); 459 #endif 460 static int mod_event(module_t, int, void *); 461 462 struct { 463 uint16_t device; 464 char *desc; 465 } t4_pciids[] = { 466 {0xa000, "Chelsio Terminator 4 FPGA"}, 467 {0x4400, "Chelsio T440-dbg"}, 468 {0x4401, "Chelsio T420-CR"}, 469 {0x4402, "Chelsio T422-CR"}, 470 {0x4403, "Chelsio T440-CR"}, 471 {0x4404, "Chelsio T420-BCH"}, 472 {0x4405, "Chelsio T440-BCH"}, 473 {0x4406, "Chelsio T440-CH"}, 474 {0x4407, "Chelsio T420-SO"}, 475 {0x4408, "Chelsio T420-CX"}, 476 {0x4409, "Chelsio T420-BT"}, 477 {0x440a, "Chelsio T404-BT"}, 478 {0x440e, "Chelsio T440-LP-CR"}, 479 }, t5_pciids[] = { 480 {0xb000, "Chelsio Terminator 5 FPGA"}, 481 {0x5400, "Chelsio T580-dbg"}, 482 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 483 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 484 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 485 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 486 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 487 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 488 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 489 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 490 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 491 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 492 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 493 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 494 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 495 #ifdef notyet 496 {0x5404, "Chelsio T520-BCH"}, 497 {0x5405, "Chelsio T540-BCH"}, 498 {0x5406, "Chelsio T540-CH"}, 499 {0x5408, "Chelsio T520-CX"}, 500 {0x540b, "Chelsio B520-SR"}, 501 {0x540c, "Chelsio B504-BT"}, 502 {0x540f, "Chelsio Amsterdam"}, 503 {0x5413, "Chelsio T580-CHR"}, 504 #endif 505 }; 506 507 #ifdef TCP_OFFLOAD 508 /* 509 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 510 * exactly the same for both rxq and ofld_rxq. 511 */ 512 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 513 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 514 #endif 515 516 /* No easy way to include t4_msg.h before adapter.h so we check this way */ 517 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS); 518 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES); 519 520 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 521 522 static int 523 t4_probe(device_t dev) 524 { 525 int i; 526 uint16_t v = pci_get_vendor(dev); 527 uint16_t d = pci_get_device(dev); 528 uint8_t f = pci_get_function(dev); 529 530 if (v != PCI_VENDOR_ID_CHELSIO) 531 return (ENXIO); 532 533 /* Attach only to PF0 of the FPGA */ 534 if (d == 0xa000 && f != 0) 535 return (ENXIO); 536 537 for (i = 0; i < nitems(t4_pciids); i++) { 538 if (d == t4_pciids[i].device) { 539 device_set_desc(dev, t4_pciids[i].desc); 540 return (BUS_PROBE_DEFAULT); 541 } 542 } 543 544 return (ENXIO); 545 } 546 547 static int 548 t5_probe(device_t dev) 549 { 550 int i; 551 uint16_t v = pci_get_vendor(dev); 552 uint16_t d = pci_get_device(dev); 553 uint8_t f = pci_get_function(dev); 554 555 if (v != PCI_VENDOR_ID_CHELSIO) 556 return (ENXIO); 557 558 /* Attach only to PF0 of the FPGA */ 559 if (d == 0xb000 && f != 0) 560 return (ENXIO); 561 562 for (i = 0; i < nitems(t5_pciids); i++) { 563 if (d == t5_pciids[i].device) { 564 device_set_desc(dev, t5_pciids[i].desc); 565 return (BUS_PROBE_DEFAULT); 566 } 567 } 568 569 return (ENXIO); 570 } 571 572 static int 573 t4_attach(device_t dev) 574 { 575 struct adapter *sc; 576 int rc = 0, i, n10g, n1g, rqidx, tqidx; 577 struct intrs_and_queues iaq; 578 struct sge *s; 579 #ifdef TCP_OFFLOAD 580 int ofld_rqidx, ofld_tqidx; 581 #endif 582 #ifdef DEV_NETMAP 583 int nm_rqidx, nm_tqidx; 584 #endif 585 const char *pcie_ts; 586 587 sc = device_get_softc(dev); 588 sc->dev = dev; 589 590 pci_enable_busmaster(dev); 591 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 592 uint32_t v; 593 594 pci_set_max_read_req(dev, 4096); 595 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 596 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 597 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 598 599 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 600 } 601 602 sc->traceq = -1; 603 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 604 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 605 device_get_nameunit(dev)); 606 607 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 608 device_get_nameunit(dev)); 609 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 610 sx_xlock(&t4_list_lock); 611 SLIST_INSERT_HEAD(&t4_list, sc, link); 612 sx_xunlock(&t4_list_lock); 613 614 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 615 TAILQ_INIT(&sc->sfl); 616 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE); 617 618 mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF); 619 620 rc = map_bars_0_and_4(sc); 621 if (rc != 0) 622 goto done; /* error message displayed already */ 623 624 /* 625 * This is the real PF# to which we're attaching. Works from within PCI 626 * passthrough environments too, where pci_get_function() could return a 627 * different PF# depending on the passthrough configuration. We need to 628 * use the real PF# in all our communication with the firmware. 629 */ 630 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI)); 631 sc->mbox = sc->pf; 632 633 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 634 sc->an_handler = an_not_handled; 635 for (i = 0; i < nitems(sc->cpl_handler); i++) 636 sc->cpl_handler[i] = cpl_not_handled; 637 for (i = 0; i < nitems(sc->fw_msg_handler); i++) 638 sc->fw_msg_handler[i] = fw_msg_not_handled; 639 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 640 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt); 641 t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt); 642 t4_init_sge_cpl_handlers(sc); 643 644 /* Prepare the adapter for operation */ 645 rc = -t4_prep_adapter(sc); 646 if (rc != 0) { 647 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 648 goto done; 649 } 650 651 /* 652 * Do this really early, with the memory windows set up even before the 653 * character device. The userland tool's register i/o and mem read 654 * will work even in "recovery mode". 655 */ 656 setup_memwin(sc); 657 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw, 658 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s", 659 device_get_nameunit(dev)); 660 if (sc->cdev == NULL) 661 device_printf(dev, "failed to create nexus char device.\n"); 662 else 663 sc->cdev->si_drv1 = sc; 664 665 /* Go no further if recovery mode has been requested. */ 666 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 667 device_printf(dev, "recovery mode.\n"); 668 goto done; 669 } 670 671 #if defined(__i386__) 672 if ((cpu_feature & CPUID_CX8) == 0) { 673 device_printf(dev, "64 bit atomics not available.\n"); 674 rc = ENOTSUP; 675 goto done; 676 } 677 #endif 678 679 /* Prepare the firmware for operation */ 680 rc = prep_firmware(sc); 681 if (rc != 0) 682 goto done; /* error message displayed already */ 683 684 rc = get_params__post_init(sc); 685 if (rc != 0) 686 goto done; /* error message displayed already */ 687 688 rc = set_params__post_init(sc); 689 if (rc != 0) 690 goto done; /* error message displayed already */ 691 692 rc = map_bar_2(sc); 693 if (rc != 0) 694 goto done; /* error message displayed already */ 695 696 rc = t4_create_dma_tag(sc); 697 if (rc != 0) 698 goto done; /* error message displayed already */ 699 700 /* 701 * First pass over all the ports - allocate VIs and initialize some 702 * basic parameters like mac address, port type, etc. We also figure 703 * out whether a port is 10G or 1G and use that information when 704 * calculating how many interrupts to attempt to allocate. 705 */ 706 n10g = n1g = 0; 707 for_each_port(sc, i) { 708 struct port_info *pi; 709 710 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 711 sc->port[i] = pi; 712 713 /* These must be set before t4_port_init */ 714 pi->adapter = sc; 715 pi->port_id = i; 716 717 /* Allocate the vi and initialize parameters like mac addr */ 718 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0); 719 if (rc != 0) { 720 device_printf(dev, "unable to initialize port %d: %d\n", 721 i, rc); 722 free(pi, M_CXGBE); 723 sc->port[i] = NULL; 724 goto done; 725 } 726 727 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX); 728 pi->link_cfg.requested_fc |= t4_pause_settings; 729 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX); 730 pi->link_cfg.fc |= t4_pause_settings; 731 732 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 733 if (rc != 0) { 734 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 735 free(pi, M_CXGBE); 736 sc->port[i] = NULL; 737 goto done; 738 } 739 740 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 741 device_get_nameunit(dev), i); 742 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 743 sc->chan_map[pi->tx_chan] = i; 744 745 if (is_10G_port(pi) || is_40G_port(pi)) { 746 n10g++; 747 pi->tmr_idx = t4_tmr_idx_10g; 748 pi->pktc_idx = t4_pktc_idx_10g; 749 } else { 750 n1g++; 751 pi->tmr_idx = t4_tmr_idx_1g; 752 pi->pktc_idx = t4_pktc_idx_1g; 753 } 754 755 pi->xact_addr_filt = -1; 756 pi->linkdnrc = -1; 757 758 pi->qsize_rxq = t4_qsize_rxq; 759 pi->qsize_txq = t4_qsize_txq; 760 761 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1); 762 if (pi->dev == NULL) { 763 device_printf(dev, 764 "failed to add device for port %d.\n", i); 765 rc = ENXIO; 766 goto done; 767 } 768 device_set_softc(pi->dev, pi); 769 } 770 771 /* 772 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 773 */ 774 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq); 775 if (rc != 0) 776 goto done; /* error message displayed already */ 777 778 sc->intr_type = iaq.intr_type; 779 sc->intr_count = iaq.nirq; 780 781 s = &sc->sge; 782 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 783 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 784 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 785 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 786 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 787 #ifdef TCP_OFFLOAD 788 if (is_offload(sc)) { 789 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 790 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 791 s->neq += s->nofldtxq + s->nofldrxq; 792 s->niq += s->nofldrxq; 793 794 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 795 M_CXGBE, M_ZERO | M_WAITOK); 796 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 797 M_CXGBE, M_ZERO | M_WAITOK); 798 } 799 #endif 800 #ifdef DEV_NETMAP 801 s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g; 802 s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g; 803 s->neq += s->nnmtxq + s->nnmrxq; 804 s->niq += s->nnmrxq; 805 806 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 807 M_CXGBE, M_ZERO | M_WAITOK); 808 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 809 M_CXGBE, M_ZERO | M_WAITOK); 810 #endif 811 812 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 813 M_ZERO | M_WAITOK); 814 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 815 M_ZERO | M_WAITOK); 816 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 817 M_ZERO | M_WAITOK); 818 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 819 M_ZERO | M_WAITOK); 820 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 821 M_ZERO | M_WAITOK); 822 823 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 824 M_ZERO | M_WAITOK); 825 826 t4_init_l2t(sc, M_WAITOK); 827 828 /* 829 * Second pass over the ports. This time we know the number of rx and 830 * tx queues that each port should get. 831 */ 832 rqidx = tqidx = 0; 833 #ifdef TCP_OFFLOAD 834 ofld_rqidx = ofld_tqidx = 0; 835 #endif 836 #ifdef DEV_NETMAP 837 nm_rqidx = nm_tqidx = 0; 838 #endif 839 for_each_port(sc, i) { 840 struct port_info *pi = sc->port[i]; 841 842 if (pi == NULL) 843 continue; 844 845 pi->first_rxq = rqidx; 846 pi->first_txq = tqidx; 847 if (is_10G_port(pi) || is_40G_port(pi)) { 848 pi->flags |= iaq.intr_flags_10g; 849 pi->nrxq = iaq.nrxq10g; 850 pi->ntxq = iaq.ntxq10g; 851 } else { 852 pi->flags |= iaq.intr_flags_1g; 853 pi->nrxq = iaq.nrxq1g; 854 pi->ntxq = iaq.ntxq1g; 855 } 856 857 if (pi->ntxq > 1) 858 pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 859 else 860 pi->rsrv_noflowq = 0; 861 862 rqidx += pi->nrxq; 863 tqidx += pi->ntxq; 864 #ifdef TCP_OFFLOAD 865 if (is_offload(sc)) { 866 pi->first_ofld_rxq = ofld_rqidx; 867 pi->first_ofld_txq = ofld_tqidx; 868 if (is_10G_port(pi) || is_40G_port(pi)) { 869 pi->nofldrxq = iaq.nofldrxq10g; 870 pi->nofldtxq = iaq.nofldtxq10g; 871 } else { 872 pi->nofldrxq = iaq.nofldrxq1g; 873 pi->nofldtxq = iaq.nofldtxq1g; 874 } 875 ofld_rqidx += pi->nofldrxq; 876 ofld_tqidx += pi->nofldtxq; 877 } 878 #endif 879 #ifdef DEV_NETMAP 880 pi->first_nm_rxq = nm_rqidx; 881 pi->first_nm_txq = nm_tqidx; 882 if (is_10G_port(pi) || is_40G_port(pi)) { 883 pi->nnmrxq = iaq.nnmrxq10g; 884 pi->nnmtxq = iaq.nnmtxq10g; 885 } else { 886 pi->nnmrxq = iaq.nnmrxq1g; 887 pi->nnmtxq = iaq.nnmtxq1g; 888 } 889 nm_rqidx += pi->nnmrxq; 890 nm_tqidx += pi->nnmtxq; 891 #endif 892 } 893 894 rc = setup_intr_handlers(sc); 895 if (rc != 0) { 896 device_printf(dev, 897 "failed to setup interrupt handlers: %d\n", rc); 898 goto done; 899 } 900 901 rc = bus_generic_attach(dev); 902 if (rc != 0) { 903 device_printf(dev, 904 "failed to attach all child ports: %d\n", rc); 905 goto done; 906 } 907 908 switch (sc->params.pci.speed) { 909 case 0x1: 910 pcie_ts = "2.5"; 911 break; 912 case 0x2: 913 pcie_ts = "5.0"; 914 break; 915 case 0x3: 916 pcie_ts = "8.0"; 917 break; 918 default: 919 pcie_ts = "??"; 920 break; 921 } 922 device_printf(dev, 923 "PCIe x%d (%s GTS/s) (%d), %d ports, %d %s interrupt%s, %d eq, %d iq\n", 924 sc->params.pci.width, pcie_ts, sc->params.pci.speed, 925 sc->params.nports, sc->intr_count, 926 sc->intr_type == INTR_MSIX ? "MSI-X" : 927 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 928 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 929 930 t4_set_desc(sc); 931 932 done: 933 if (rc != 0 && sc->cdev) { 934 /* cdev was created and so cxgbetool works; recover that way. */ 935 device_printf(dev, 936 "error during attach, adapter is now in recovery mode.\n"); 937 rc = 0; 938 } 939 940 if (rc != 0) 941 t4_detach(dev); 942 else 943 t4_sysctls(sc); 944 945 return (rc); 946 } 947 948 /* 949 * Idempotent 950 */ 951 static int 952 t4_detach(device_t dev) 953 { 954 struct adapter *sc; 955 struct port_info *pi; 956 int i, rc; 957 958 sc = device_get_softc(dev); 959 960 if (sc->flags & FULL_INIT_DONE) 961 t4_intr_disable(sc); 962 963 if (sc->cdev) { 964 destroy_dev(sc->cdev); 965 sc->cdev = NULL; 966 } 967 968 rc = bus_generic_detach(dev); 969 if (rc) { 970 device_printf(dev, 971 "failed to detach child devices: %d\n", rc); 972 return (rc); 973 } 974 975 for (i = 0; i < sc->intr_count; i++) 976 t4_free_irq(sc, &sc->irq[i]); 977 978 for (i = 0; i < MAX_NPORTS; i++) { 979 pi = sc->port[i]; 980 if (pi) { 981 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->viid); 982 if (pi->dev) 983 device_delete_child(dev, pi->dev); 984 985 mtx_destroy(&pi->pi_lock); 986 free(pi, M_CXGBE); 987 } 988 } 989 990 if (sc->flags & FULL_INIT_DONE) 991 adapter_full_uninit(sc); 992 993 if (sc->flags & FW_OK) 994 t4_fw_bye(sc, sc->mbox); 995 996 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 997 pci_release_msi(dev); 998 999 if (sc->regs_res) 1000 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1001 sc->regs_res); 1002 1003 if (sc->udbs_res) 1004 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1005 sc->udbs_res); 1006 1007 if (sc->msix_res) 1008 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1009 sc->msix_res); 1010 1011 if (sc->l2t) 1012 t4_free_l2t(sc->l2t); 1013 1014 #ifdef TCP_OFFLOAD 1015 free(sc->sge.ofld_rxq, M_CXGBE); 1016 free(sc->sge.ofld_txq, M_CXGBE); 1017 #endif 1018 #ifdef DEV_NETMAP 1019 free(sc->sge.nm_rxq, M_CXGBE); 1020 free(sc->sge.nm_txq, M_CXGBE); 1021 #endif 1022 free(sc->irq, M_CXGBE); 1023 free(sc->sge.rxq, M_CXGBE); 1024 free(sc->sge.txq, M_CXGBE); 1025 free(sc->sge.ctrlq, M_CXGBE); 1026 free(sc->sge.iqmap, M_CXGBE); 1027 free(sc->sge.eqmap, M_CXGBE); 1028 free(sc->tids.ftid_tab, M_CXGBE); 1029 t4_destroy_dma_tag(sc); 1030 if (mtx_initialized(&sc->sc_lock)) { 1031 sx_xlock(&t4_list_lock); 1032 SLIST_REMOVE(&t4_list, sc, adapter, link); 1033 sx_xunlock(&t4_list_lock); 1034 mtx_destroy(&sc->sc_lock); 1035 } 1036 1037 if (mtx_initialized(&sc->tids.ftid_lock)) 1038 mtx_destroy(&sc->tids.ftid_lock); 1039 if (mtx_initialized(&sc->sfl_lock)) 1040 mtx_destroy(&sc->sfl_lock); 1041 if (mtx_initialized(&sc->ifp_lock)) 1042 mtx_destroy(&sc->ifp_lock); 1043 if (mtx_initialized(&sc->regwin_lock)) 1044 mtx_destroy(&sc->regwin_lock); 1045 1046 bzero(sc, sizeof(*sc)); 1047 1048 return (0); 1049 } 1050 1051 static int 1052 cxgbe_probe(device_t dev) 1053 { 1054 char buf[128]; 1055 struct port_info *pi = device_get_softc(dev); 1056 1057 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1058 device_set_desc_copy(dev, buf); 1059 1060 return (BUS_PROBE_DEFAULT); 1061 } 1062 1063 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1064 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1065 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1066 #define T4_CAP_ENABLE (T4_CAP) 1067 1068 static int 1069 cxgbe_attach(device_t dev) 1070 { 1071 struct port_info *pi = device_get_softc(dev); 1072 struct ifnet *ifp; 1073 char *s; 1074 int n, o; 1075 1076 /* Allocate an ifnet and set it up */ 1077 ifp = if_alloc(IFT_ETHER); 1078 if (ifp == NULL) { 1079 device_printf(dev, "Cannot allocate ifnet\n"); 1080 return (ENOMEM); 1081 } 1082 pi->ifp = ifp; 1083 ifp->if_softc = pi; 1084 1085 callout_init(&pi->tick, CALLOUT_MPSAFE); 1086 1087 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1088 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1089 1090 ifp->if_init = cxgbe_init; 1091 ifp->if_ioctl = cxgbe_ioctl; 1092 ifp->if_transmit = cxgbe_transmit; 1093 ifp->if_qflush = cxgbe_qflush; 1094 ifp->if_get_counter = cxgbe_get_counter; 1095 1096 ifp->if_capabilities = T4_CAP; 1097 #ifdef TCP_OFFLOAD 1098 if (is_offload(pi->adapter)) 1099 ifp->if_capabilities |= IFCAP_TOE; 1100 #endif 1101 ifp->if_capenable = T4_CAP_ENABLE; 1102 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1103 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1104 1105 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1106 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1107 ifp->if_hw_tsomaxsegsize = 65536; 1108 1109 /* Initialize ifmedia for this port */ 1110 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, 1111 cxgbe_media_status); 1112 build_medialist(pi, &pi->media); 1113 1114 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1115 EVENTHANDLER_PRI_ANY); 1116 1117 ether_ifattach(ifp, pi->hw_addr); 1118 1119 n = 128; 1120 s = malloc(n, M_CXGBE, M_WAITOK); 1121 o = snprintf(s, n, "%d txq, %d rxq (NIC)", pi->ntxq, pi->nrxq); 1122 MPASS(n > o); 1123 #ifdef TCP_OFFLOAD 1124 if (is_offload(pi->adapter)) { 1125 o += snprintf(s + o, n - o, "; %d txq, %d rxq (TOE)", 1126 pi->nofldtxq, pi->nofldrxq); 1127 MPASS(n > o); 1128 } 1129 #endif 1130 #ifdef DEV_NETMAP 1131 o += snprintf(s + o, n - o, "; %d txq, %d rxq (netmap)", pi->nnmtxq, 1132 pi->nnmrxq); 1133 MPASS(n > o); 1134 #endif 1135 device_printf(dev, "%s\n", s); 1136 free(s, M_CXGBE); 1137 1138 #ifdef DEV_NETMAP 1139 /* nm_media handled here to keep implementation private to this file */ 1140 ifmedia_init(&pi->nm_media, IFM_IMASK, cxgbe_media_change, 1141 cxgbe_media_status); 1142 build_medialist(pi, &pi->nm_media); 1143 create_netmap_ifnet(pi); /* logs errors it something fails */ 1144 #endif 1145 cxgbe_sysctls(pi); 1146 1147 return (0); 1148 } 1149 1150 static int 1151 cxgbe_detach(device_t dev) 1152 { 1153 struct port_info *pi = device_get_softc(dev); 1154 struct adapter *sc = pi->adapter; 1155 struct ifnet *ifp = pi->ifp; 1156 1157 /* Tell if_ioctl and if_init that the port is going away */ 1158 ADAPTER_LOCK(sc); 1159 SET_DOOMED(pi); 1160 wakeup(&sc->flags); 1161 while (IS_BUSY(sc)) 1162 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 1163 SET_BUSY(sc); 1164 #ifdef INVARIANTS 1165 sc->last_op = "t4detach"; 1166 sc->last_op_thr = curthread; 1167 #endif 1168 ADAPTER_UNLOCK(sc); 1169 1170 if (pi->flags & HAS_TRACEQ) { 1171 sc->traceq = -1; /* cloner should not create ifnet */ 1172 t4_tracer_port_detach(sc); 1173 } 1174 1175 if (pi->vlan_c) 1176 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c); 1177 1178 PORT_LOCK(pi); 1179 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1180 callout_stop(&pi->tick); 1181 PORT_UNLOCK(pi); 1182 callout_drain(&pi->tick); 1183 1184 /* Let detach proceed even if these fail. */ 1185 cxgbe_uninit_synchronized(pi); 1186 port_full_uninit(pi); 1187 1188 ifmedia_removeall(&pi->media); 1189 ether_ifdetach(pi->ifp); 1190 if_free(pi->ifp); 1191 1192 #ifdef DEV_NETMAP 1193 /* XXXNM: equivalent of cxgbe_uninit_synchronized to ifdown nm_ifp */ 1194 destroy_netmap_ifnet(pi); 1195 #endif 1196 1197 ADAPTER_LOCK(sc); 1198 CLR_BUSY(sc); 1199 wakeup(&sc->flags); 1200 ADAPTER_UNLOCK(sc); 1201 1202 return (0); 1203 } 1204 1205 static void 1206 cxgbe_init(void *arg) 1207 { 1208 struct port_info *pi = arg; 1209 struct adapter *sc = pi->adapter; 1210 1211 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0) 1212 return; 1213 cxgbe_init_synchronized(pi); 1214 end_synchronized_op(sc, 0); 1215 } 1216 1217 static int 1218 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1219 { 1220 int rc = 0, mtu, flags, can_sleep; 1221 struct port_info *pi = ifp->if_softc; 1222 struct adapter *sc = pi->adapter; 1223 struct ifreq *ifr = (struct ifreq *)data; 1224 uint32_t mask; 1225 1226 switch (cmd) { 1227 case SIOCSIFMTU: 1228 mtu = ifr->ifr_mtu; 1229 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 1230 return (EINVAL); 1231 1232 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu"); 1233 if (rc) 1234 return (rc); 1235 ifp->if_mtu = mtu; 1236 if (pi->flags & PORT_INIT_DONE) { 1237 t4_update_fl_bufsize(ifp); 1238 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1239 rc = update_mac_settings(ifp, XGMAC_MTU); 1240 } 1241 end_synchronized_op(sc, 0); 1242 break; 1243 1244 case SIOCSIFFLAGS: 1245 can_sleep = 0; 1246 redo_sifflags: 1247 rc = begin_synchronized_op(sc, pi, 1248 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1249 if (rc) 1250 return (rc); 1251 1252 if (ifp->if_flags & IFF_UP) { 1253 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1254 flags = pi->if_flags; 1255 if ((ifp->if_flags ^ flags) & 1256 (IFF_PROMISC | IFF_ALLMULTI)) { 1257 if (can_sleep == 1) { 1258 end_synchronized_op(sc, 0); 1259 can_sleep = 0; 1260 goto redo_sifflags; 1261 } 1262 rc = update_mac_settings(ifp, 1263 XGMAC_PROMISC | XGMAC_ALLMULTI); 1264 } 1265 } else { 1266 if (can_sleep == 0) { 1267 end_synchronized_op(sc, LOCK_HELD); 1268 can_sleep = 1; 1269 goto redo_sifflags; 1270 } 1271 rc = cxgbe_init_synchronized(pi); 1272 } 1273 pi->if_flags = ifp->if_flags; 1274 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1275 if (can_sleep == 0) { 1276 end_synchronized_op(sc, LOCK_HELD); 1277 can_sleep = 1; 1278 goto redo_sifflags; 1279 } 1280 rc = cxgbe_uninit_synchronized(pi); 1281 } 1282 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1283 break; 1284 1285 case SIOCADDMULTI: 1286 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1287 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi"); 1288 if (rc) 1289 return (rc); 1290 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1291 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1292 end_synchronized_op(sc, LOCK_HELD); 1293 break; 1294 1295 case SIOCSIFCAP: 1296 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap"); 1297 if (rc) 1298 return (rc); 1299 1300 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1301 if (mask & IFCAP_TXCSUM) { 1302 ifp->if_capenable ^= IFCAP_TXCSUM; 1303 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1304 1305 if (IFCAP_TSO4 & ifp->if_capenable && 1306 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1307 ifp->if_capenable &= ~IFCAP_TSO4; 1308 if_printf(ifp, 1309 "tso4 disabled due to -txcsum.\n"); 1310 } 1311 } 1312 if (mask & IFCAP_TXCSUM_IPV6) { 1313 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1314 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1315 1316 if (IFCAP_TSO6 & ifp->if_capenable && 1317 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1318 ifp->if_capenable &= ~IFCAP_TSO6; 1319 if_printf(ifp, 1320 "tso6 disabled due to -txcsum6.\n"); 1321 } 1322 } 1323 if (mask & IFCAP_RXCSUM) 1324 ifp->if_capenable ^= IFCAP_RXCSUM; 1325 if (mask & IFCAP_RXCSUM_IPV6) 1326 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1327 1328 /* 1329 * Note that we leave CSUM_TSO alone (it is always set). The 1330 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1331 * sending a TSO request our way, so it's sufficient to toggle 1332 * IFCAP_TSOx only. 1333 */ 1334 if (mask & IFCAP_TSO4) { 1335 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1336 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1337 if_printf(ifp, "enable txcsum first.\n"); 1338 rc = EAGAIN; 1339 goto fail; 1340 } 1341 ifp->if_capenable ^= IFCAP_TSO4; 1342 } 1343 if (mask & IFCAP_TSO6) { 1344 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1345 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1346 if_printf(ifp, "enable txcsum6 first.\n"); 1347 rc = EAGAIN; 1348 goto fail; 1349 } 1350 ifp->if_capenable ^= IFCAP_TSO6; 1351 } 1352 if (mask & IFCAP_LRO) { 1353 #if defined(INET) || defined(INET6) 1354 int i; 1355 struct sge_rxq *rxq; 1356 1357 ifp->if_capenable ^= IFCAP_LRO; 1358 for_each_rxq(pi, i, rxq) { 1359 if (ifp->if_capenable & IFCAP_LRO) 1360 rxq->iq.flags |= IQ_LRO_ENABLED; 1361 else 1362 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1363 } 1364 #endif 1365 } 1366 #ifdef TCP_OFFLOAD 1367 if (mask & IFCAP_TOE) { 1368 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1369 1370 rc = toe_capability(pi, enable); 1371 if (rc != 0) 1372 goto fail; 1373 1374 ifp->if_capenable ^= mask; 1375 } 1376 #endif 1377 if (mask & IFCAP_VLAN_HWTAGGING) { 1378 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1379 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1380 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1381 } 1382 if (mask & IFCAP_VLAN_MTU) { 1383 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1384 1385 /* Need to find out how to disable auto-mtu-inflation */ 1386 } 1387 if (mask & IFCAP_VLAN_HWTSO) 1388 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1389 if (mask & IFCAP_VLAN_HWCSUM) 1390 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1391 1392 #ifdef VLAN_CAPABILITIES 1393 VLAN_CAPABILITIES(ifp); 1394 #endif 1395 fail: 1396 end_synchronized_op(sc, 0); 1397 break; 1398 1399 case SIOCSIFMEDIA: 1400 case SIOCGIFMEDIA: 1401 ifmedia_ioctl(ifp, ifr, &pi->media, cmd); 1402 break; 1403 1404 case SIOCGI2C: { 1405 struct ifi2creq i2c; 1406 1407 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1408 if (rc != 0) 1409 break; 1410 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1411 rc = EPERM; 1412 break; 1413 } 1414 if (i2c.len > sizeof(i2c.data)) { 1415 rc = EINVAL; 1416 break; 1417 } 1418 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4i2c"); 1419 if (rc) 1420 return (rc); 1421 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, 1422 i2c.offset, i2c.len, &i2c.data[0]); 1423 end_synchronized_op(sc, 0); 1424 if (rc == 0) 1425 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1426 break; 1427 } 1428 1429 default: 1430 rc = ether_ioctl(ifp, cmd, data); 1431 } 1432 1433 return (rc); 1434 } 1435 1436 static int 1437 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1438 { 1439 struct port_info *pi = ifp->if_softc; 1440 struct adapter *sc = pi->adapter; 1441 struct sge_txq *txq; 1442 void *items[1]; 1443 int rc; 1444 1445 M_ASSERTPKTHDR(m); 1446 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1447 1448 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1449 m_freem(m); 1450 return (ENETDOWN); 1451 } 1452 1453 rc = parse_pkt(&m); 1454 if (__predict_false(rc != 0)) { 1455 MPASS(m == NULL); /* was freed already */ 1456 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1457 return (rc); 1458 } 1459 1460 /* Select a txq. */ 1461 txq = &sc->sge.txq[pi->first_txq]; 1462 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1463 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq)) + 1464 pi->rsrv_noflowq); 1465 1466 items[0] = m; 1467 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1468 if (__predict_false(rc != 0)) 1469 m_freem(m); 1470 1471 return (rc); 1472 } 1473 1474 static void 1475 cxgbe_qflush(struct ifnet *ifp) 1476 { 1477 struct port_info *pi = ifp->if_softc; 1478 struct sge_txq *txq; 1479 int i; 1480 1481 /* queues do not exist if !PORT_INIT_DONE. */ 1482 if (pi->flags & PORT_INIT_DONE) { 1483 for_each_txq(pi, i, txq) { 1484 TXQ_LOCK(txq); 1485 txq->eq.flags &= ~EQ_ENABLED; 1486 TXQ_UNLOCK(txq); 1487 while (!mp_ring_is_idle(txq->r)) { 1488 mp_ring_check_drainage(txq->r, 0); 1489 pause("qflush", 1); 1490 } 1491 } 1492 } 1493 if_qflush(ifp); 1494 } 1495 1496 static uint64_t 1497 cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1498 { 1499 struct port_info *pi = ifp->if_softc; 1500 struct adapter *sc = pi->adapter; 1501 struct port_stats *s = &pi->stats; 1502 1503 cxgbe_refresh_stats(sc, pi); 1504 1505 switch (c) { 1506 case IFCOUNTER_IPACKETS: 1507 return (s->rx_frames - s->rx_pause); 1508 1509 case IFCOUNTER_IERRORS: 1510 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1511 s->rx_fcs_err + s->rx_len_err); 1512 1513 case IFCOUNTER_OPACKETS: 1514 return (s->tx_frames - s->tx_pause); 1515 1516 case IFCOUNTER_OERRORS: 1517 return (s->tx_error_frames); 1518 1519 case IFCOUNTER_IBYTES: 1520 return (s->rx_octets - s->rx_pause * 64); 1521 1522 case IFCOUNTER_OBYTES: 1523 return (s->tx_octets - s->tx_pause * 64); 1524 1525 case IFCOUNTER_IMCASTS: 1526 return (s->rx_mcast_frames - s->rx_pause); 1527 1528 case IFCOUNTER_OMCASTS: 1529 return (s->tx_mcast_frames - s->tx_pause); 1530 1531 case IFCOUNTER_IQDROPS: 1532 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1533 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1534 s->rx_trunc3 + pi->tnl_cong_drops); 1535 1536 case IFCOUNTER_OQDROPS: { 1537 uint64_t drops; 1538 1539 drops = s->tx_drop; 1540 if (pi->flags & PORT_INIT_DONE) { 1541 int i; 1542 struct sge_txq *txq; 1543 1544 for_each_txq(pi, i, txq) 1545 drops += counter_u64_fetch(txq->r->drops); 1546 } 1547 1548 return (drops); 1549 1550 } 1551 1552 default: 1553 return (if_get_counter_default(ifp, c)); 1554 } 1555 } 1556 1557 static int 1558 cxgbe_media_change(struct ifnet *ifp) 1559 { 1560 struct port_info *pi = ifp->if_softc; 1561 1562 device_printf(pi->dev, "%s unimplemented.\n", __func__); 1563 1564 return (EOPNOTSUPP); 1565 } 1566 1567 static void 1568 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1569 { 1570 struct port_info *pi = ifp->if_softc; 1571 struct ifmedia *media = NULL; 1572 struct ifmedia_entry *cur; 1573 int speed = pi->link_cfg.speed; 1574 #ifdef INVARIANTS 1575 int data = (pi->port_type << 8) | pi->mod_type; 1576 #endif 1577 1578 if (ifp == pi->ifp) 1579 media = &pi->media; 1580 #ifdef DEV_NETMAP 1581 else if (ifp == pi->nm_ifp) 1582 media = &pi->nm_media; 1583 #endif 1584 MPASS(media != NULL); 1585 1586 cur = media->ifm_cur; 1587 MPASS(cur->ifm_data == data); 1588 1589 ifmr->ifm_status = IFM_AVALID; 1590 if (!pi->link_cfg.link_ok) 1591 return; 1592 1593 ifmr->ifm_status |= IFM_ACTIVE; 1594 1595 /* active and current will differ iff current media is autoselect. */ 1596 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1597 return; 1598 1599 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1600 if (speed == SPEED_10000) 1601 ifmr->ifm_active |= IFM_10G_T; 1602 else if (speed == SPEED_1000) 1603 ifmr->ifm_active |= IFM_1000_T; 1604 else if (speed == SPEED_100) 1605 ifmr->ifm_active |= IFM_100_TX; 1606 else if (speed == SPEED_10) 1607 ifmr->ifm_active |= IFM_10_T; 1608 else 1609 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1610 speed)); 1611 } 1612 1613 void 1614 t4_fatal_err(struct adapter *sc) 1615 { 1616 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1617 t4_intr_disable(sc); 1618 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1619 device_get_nameunit(sc->dev)); 1620 } 1621 1622 static int 1623 map_bars_0_and_4(struct adapter *sc) 1624 { 1625 sc->regs_rid = PCIR_BAR(0); 1626 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1627 &sc->regs_rid, RF_ACTIVE); 1628 if (sc->regs_res == NULL) { 1629 device_printf(sc->dev, "cannot map registers.\n"); 1630 return (ENXIO); 1631 } 1632 sc->bt = rman_get_bustag(sc->regs_res); 1633 sc->bh = rman_get_bushandle(sc->regs_res); 1634 sc->mmio_len = rman_get_size(sc->regs_res); 1635 setbit(&sc->doorbells, DOORBELL_KDB); 1636 1637 sc->msix_rid = PCIR_BAR(4); 1638 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1639 &sc->msix_rid, RF_ACTIVE); 1640 if (sc->msix_res == NULL) { 1641 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1642 return (ENXIO); 1643 } 1644 1645 return (0); 1646 } 1647 1648 static int 1649 map_bar_2(struct adapter *sc) 1650 { 1651 1652 /* 1653 * T4: only iWARP driver uses the userspace doorbells. There is no need 1654 * to map it if RDMA is disabled. 1655 */ 1656 if (is_t4(sc) && sc->rdmacaps == 0) 1657 return (0); 1658 1659 sc->udbs_rid = PCIR_BAR(2); 1660 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1661 &sc->udbs_rid, RF_ACTIVE); 1662 if (sc->udbs_res == NULL) { 1663 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 1664 return (ENXIO); 1665 } 1666 sc->udbs_base = rman_get_virtual(sc->udbs_res); 1667 1668 if (is_t5(sc)) { 1669 setbit(&sc->doorbells, DOORBELL_UDB); 1670 #if defined(__i386__) || defined(__amd64__) 1671 if (t5_write_combine) { 1672 int rc; 1673 1674 /* 1675 * Enable write combining on BAR2. This is the 1676 * userspace doorbell BAR and is split into 128B 1677 * (UDBS_SEG_SIZE) doorbell regions, each associated 1678 * with an egress queue. The first 64B has the doorbell 1679 * and the second 64B can be used to submit a tx work 1680 * request with an implicit doorbell. 1681 */ 1682 1683 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 1684 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 1685 if (rc == 0) { 1686 clrbit(&sc->doorbells, DOORBELL_UDB); 1687 setbit(&sc->doorbells, DOORBELL_WCWR); 1688 setbit(&sc->doorbells, DOORBELL_UDBWC); 1689 } else { 1690 device_printf(sc->dev, 1691 "couldn't enable write combining: %d\n", 1692 rc); 1693 } 1694 1695 t4_write_reg(sc, A_SGE_STAT_CFG, 1696 V_STATSOURCE_T5(7) | V_STATMODE(0)); 1697 } 1698 #endif 1699 } 1700 1701 return (0); 1702 } 1703 1704 static const struct memwin t4_memwin[] = { 1705 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1706 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1707 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 1708 }; 1709 1710 static const struct memwin t5_memwin[] = { 1711 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1712 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1713 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 1714 }; 1715 1716 static void 1717 setup_memwin(struct adapter *sc) 1718 { 1719 const struct memwin *mw; 1720 int i, n; 1721 uint32_t bar0; 1722 1723 if (is_t4(sc)) { 1724 /* 1725 * Read low 32b of bar0 indirectly via the hardware backdoor 1726 * mechanism. Works from within PCI passthrough environments 1727 * too, where rman_get_start() can return a different value. We 1728 * need to program the T4 memory window decoders with the actual 1729 * addresses that will be coming across the PCIe link. 1730 */ 1731 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 1732 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 1733 1734 mw = &t4_memwin[0]; 1735 n = nitems(t4_memwin); 1736 } else { 1737 /* T5 uses the relative offset inside the PCIe BAR */ 1738 bar0 = 0; 1739 1740 mw = &t5_memwin[0]; 1741 n = nitems(t5_memwin); 1742 } 1743 1744 for (i = 0; i < n; i++, mw++) { 1745 t4_write_reg(sc, 1746 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 1747 (mw->base + bar0) | V_BIR(0) | 1748 V_WINDOW(ilog2(mw->aperture) - 10)); 1749 } 1750 1751 /* flush */ 1752 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 1753 } 1754 1755 /* 1756 * Verify that the memory range specified by the addr/len pair is valid and lies 1757 * entirely within a single region (EDCx or MCx). 1758 */ 1759 static int 1760 validate_mem_range(struct adapter *sc, uint32_t addr, int len) 1761 { 1762 uint32_t em, addr_len, maddr, mlen; 1763 1764 /* Memory can only be accessed in naturally aligned 4 byte units */ 1765 if (addr & 3 || len & 3 || len == 0) 1766 return (EINVAL); 1767 1768 /* Enabled memories */ 1769 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1770 if (em & F_EDRAM0_ENABLE) { 1771 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1772 maddr = G_EDRAM0_BASE(addr_len) << 20; 1773 mlen = G_EDRAM0_SIZE(addr_len) << 20; 1774 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1775 addr + len <= maddr + mlen) 1776 return (0); 1777 } 1778 if (em & F_EDRAM1_ENABLE) { 1779 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1780 maddr = G_EDRAM1_BASE(addr_len) << 20; 1781 mlen = G_EDRAM1_SIZE(addr_len) << 20; 1782 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1783 addr + len <= maddr + mlen) 1784 return (0); 1785 } 1786 if (em & F_EXT_MEM_ENABLE) { 1787 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1788 maddr = G_EXT_MEM_BASE(addr_len) << 20; 1789 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 1790 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1791 addr + len <= maddr + mlen) 1792 return (0); 1793 } 1794 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) { 1795 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 1796 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 1797 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 1798 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1799 addr + len <= maddr + mlen) 1800 return (0); 1801 } 1802 1803 return (EFAULT); 1804 } 1805 1806 static int 1807 fwmtype_to_hwmtype(int mtype) 1808 { 1809 1810 switch (mtype) { 1811 case FW_MEMTYPE_EDC0: 1812 return (MEM_EDC0); 1813 case FW_MEMTYPE_EDC1: 1814 return (MEM_EDC1); 1815 case FW_MEMTYPE_EXTMEM: 1816 return (MEM_MC0); 1817 case FW_MEMTYPE_EXTMEM1: 1818 return (MEM_MC1); 1819 default: 1820 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 1821 } 1822 } 1823 1824 /* 1825 * Verify that the memory range specified by the memtype/offset/len pair is 1826 * valid and lies entirely within the memtype specified. The global address of 1827 * the start of the range is returned in addr. 1828 */ 1829 static int 1830 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 1831 uint32_t *addr) 1832 { 1833 uint32_t em, addr_len, maddr, mlen; 1834 1835 /* Memory can only be accessed in naturally aligned 4 byte units */ 1836 if (off & 3 || len & 3 || len == 0) 1837 return (EINVAL); 1838 1839 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1840 switch (fwmtype_to_hwmtype(mtype)) { 1841 case MEM_EDC0: 1842 if (!(em & F_EDRAM0_ENABLE)) 1843 return (EINVAL); 1844 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1845 maddr = G_EDRAM0_BASE(addr_len) << 20; 1846 mlen = G_EDRAM0_SIZE(addr_len) << 20; 1847 break; 1848 case MEM_EDC1: 1849 if (!(em & F_EDRAM1_ENABLE)) 1850 return (EINVAL); 1851 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1852 maddr = G_EDRAM1_BASE(addr_len) << 20; 1853 mlen = G_EDRAM1_SIZE(addr_len) << 20; 1854 break; 1855 case MEM_MC: 1856 if (!(em & F_EXT_MEM_ENABLE)) 1857 return (EINVAL); 1858 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1859 maddr = G_EXT_MEM_BASE(addr_len) << 20; 1860 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 1861 break; 1862 case MEM_MC1: 1863 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE)) 1864 return (EINVAL); 1865 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 1866 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 1867 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 1868 break; 1869 default: 1870 return (EINVAL); 1871 } 1872 1873 if (mlen > 0 && off < mlen && off + len <= mlen) { 1874 *addr = maddr + off; /* global address */ 1875 return (0); 1876 } 1877 1878 return (EFAULT); 1879 } 1880 1881 static void 1882 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture) 1883 { 1884 const struct memwin *mw; 1885 1886 if (is_t4(sc)) { 1887 KASSERT(win >= 0 && win < nitems(t4_memwin), 1888 ("%s: incorrect memwin# (%d)", __func__, win)); 1889 mw = &t4_memwin[win]; 1890 } else { 1891 KASSERT(win >= 0 && win < nitems(t5_memwin), 1892 ("%s: incorrect memwin# (%d)", __func__, win)); 1893 mw = &t5_memwin[win]; 1894 } 1895 1896 if (base != NULL) 1897 *base = mw->base; 1898 if (aperture != NULL) 1899 *aperture = mw->aperture; 1900 } 1901 1902 /* 1903 * Positions the memory window such that it can be used to access the specified 1904 * address in the chip's address space. The return value is the offset of addr 1905 * from the start of the window. 1906 */ 1907 static uint32_t 1908 position_memwin(struct adapter *sc, int n, uint32_t addr) 1909 { 1910 uint32_t start, pf; 1911 uint32_t reg; 1912 1913 KASSERT(n >= 0 && n <= 3, 1914 ("%s: invalid window %d.", __func__, n)); 1915 KASSERT((addr & 3) == 0, 1916 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr)); 1917 1918 if (is_t4(sc)) { 1919 pf = 0; 1920 start = addr & ~0xf; /* start must be 16B aligned */ 1921 } else { 1922 pf = V_PFNUM(sc->pf); 1923 start = addr & ~0x7f; /* start must be 128B aligned */ 1924 } 1925 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n); 1926 1927 t4_write_reg(sc, reg, start | pf); 1928 t4_read_reg(sc, reg); 1929 1930 return (addr - start); 1931 } 1932 1933 static int 1934 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 1935 struct intrs_and_queues *iaq) 1936 { 1937 int rc, itype, navail, nrxq10g, nrxq1g, n; 1938 int nofldrxq10g = 0, nofldrxq1g = 0; 1939 int nnmrxq10g = 0, nnmrxq1g = 0; 1940 1941 bzero(iaq, sizeof(*iaq)); 1942 1943 iaq->ntxq10g = t4_ntxq10g; 1944 iaq->ntxq1g = t4_ntxq1g; 1945 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 1946 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 1947 iaq->rsrv_noflowq = t4_rsrv_noflowq; 1948 #ifdef TCP_OFFLOAD 1949 if (is_offload(sc)) { 1950 iaq->nofldtxq10g = t4_nofldtxq10g; 1951 iaq->nofldtxq1g = t4_nofldtxq1g; 1952 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 1953 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 1954 } 1955 #endif 1956 #ifdef DEV_NETMAP 1957 iaq->nnmtxq10g = t4_nnmtxq10g; 1958 iaq->nnmtxq1g = t4_nnmtxq1g; 1959 iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g; 1960 iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g; 1961 #endif 1962 1963 for (itype = INTR_MSIX; itype; itype >>= 1) { 1964 1965 if ((itype & t4_intr_types) == 0) 1966 continue; /* not allowed */ 1967 1968 if (itype == INTR_MSIX) 1969 navail = pci_msix_count(sc->dev); 1970 else if (itype == INTR_MSI) 1971 navail = pci_msi_count(sc->dev); 1972 else 1973 navail = 1; 1974 restart: 1975 if (navail == 0) 1976 continue; 1977 1978 iaq->intr_type = itype; 1979 iaq->intr_flags_10g = 0; 1980 iaq->intr_flags_1g = 0; 1981 1982 /* 1983 * Best option: an interrupt vector for errors, one for the 1984 * firmware event queue, and one for every rxq (NIC, TOE, and 1985 * netmap). 1986 */ 1987 iaq->nirq = T4_EXTRA_INTR; 1988 iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g); 1989 iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g); 1990 if (iaq->nirq <= navail && 1991 (itype != INTR_MSI || powerof2(iaq->nirq))) { 1992 iaq->intr_flags_10g = INTR_ALL; 1993 iaq->intr_flags_1g = INTR_ALL; 1994 goto allocate; 1995 } 1996 1997 /* 1998 * Second best option: a vector for errors, one for the firmware 1999 * event queue, and vectors for either all the NIC rx queues or 2000 * all the TOE rx queues. The queues that don't get vectors 2001 * will forward their interrupts to those that do. 2002 * 2003 * Note: netmap rx queues cannot be created early and so they 2004 * can't be setup to receive forwarded interrupts for others. 2005 */ 2006 iaq->nirq = T4_EXTRA_INTR; 2007 if (nrxq10g >= nofldrxq10g) { 2008 iaq->intr_flags_10g = INTR_RXQ; 2009 iaq->nirq += n10g * nrxq10g; 2010 #ifdef DEV_NETMAP 2011 iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g); 2012 #endif 2013 } else { 2014 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2015 iaq->nirq += n10g * nofldrxq10g; 2016 #ifdef DEV_NETMAP 2017 iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g); 2018 #endif 2019 } 2020 if (nrxq1g >= nofldrxq1g) { 2021 iaq->intr_flags_1g = INTR_RXQ; 2022 iaq->nirq += n1g * nrxq1g; 2023 #ifdef DEV_NETMAP 2024 iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g); 2025 #endif 2026 } else { 2027 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2028 iaq->nirq += n1g * nofldrxq1g; 2029 #ifdef DEV_NETMAP 2030 iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g); 2031 #endif 2032 } 2033 if (iaq->nirq <= navail && 2034 (itype != INTR_MSI || powerof2(iaq->nirq))) 2035 goto allocate; 2036 2037 /* 2038 * Next best option: an interrupt vector for errors, one for the 2039 * firmware event queue, and at least one per port. At this 2040 * point we know we'll have to downsize nrxq and/or nofldrxq 2041 * and/or nnmrxq to fit what's available to us. 2042 */ 2043 iaq->nirq = T4_EXTRA_INTR; 2044 iaq->nirq += n10g + n1g; 2045 if (iaq->nirq <= navail) { 2046 int leftover = navail - iaq->nirq; 2047 2048 if (n10g > 0) { 2049 int target = max(nrxq10g, nofldrxq10g); 2050 2051 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2052 INTR_RXQ : INTR_OFLD_RXQ; 2053 2054 n = 1; 2055 while (n < target && leftover >= n10g) { 2056 leftover -= n10g; 2057 iaq->nirq += n10g; 2058 n++; 2059 } 2060 iaq->nrxq10g = min(n, nrxq10g); 2061 #ifdef TCP_OFFLOAD 2062 iaq->nofldrxq10g = min(n, nofldrxq10g); 2063 #endif 2064 #ifdef DEV_NETMAP 2065 iaq->nnmrxq10g = min(n, nnmrxq10g); 2066 #endif 2067 } 2068 2069 if (n1g > 0) { 2070 int target = max(nrxq1g, nofldrxq1g); 2071 2072 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2073 INTR_RXQ : INTR_OFLD_RXQ; 2074 2075 n = 1; 2076 while (n < target && leftover >= n1g) { 2077 leftover -= n1g; 2078 iaq->nirq += n1g; 2079 n++; 2080 } 2081 iaq->nrxq1g = min(n, nrxq1g); 2082 #ifdef TCP_OFFLOAD 2083 iaq->nofldrxq1g = min(n, nofldrxq1g); 2084 #endif 2085 #ifdef DEV_NETMAP 2086 iaq->nnmrxq1g = min(n, nnmrxq1g); 2087 #endif 2088 } 2089 2090 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2091 goto allocate; 2092 } 2093 2094 /* 2095 * Least desirable option: one interrupt vector for everything. 2096 */ 2097 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2098 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2099 #ifdef TCP_OFFLOAD 2100 if (is_offload(sc)) 2101 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2102 #endif 2103 #ifdef DEV_NETMAP 2104 iaq->nnmrxq10g = iaq->nnmrxq1g = 1; 2105 #endif 2106 2107 allocate: 2108 navail = iaq->nirq; 2109 rc = 0; 2110 if (itype == INTR_MSIX) 2111 rc = pci_alloc_msix(sc->dev, &navail); 2112 else if (itype == INTR_MSI) 2113 rc = pci_alloc_msi(sc->dev, &navail); 2114 2115 if (rc == 0) { 2116 if (navail == iaq->nirq) 2117 return (0); 2118 2119 /* 2120 * Didn't get the number requested. Use whatever number 2121 * the kernel is willing to allocate (it's in navail). 2122 */ 2123 device_printf(sc->dev, "fewer vectors than requested, " 2124 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2125 itype, iaq->nirq, navail); 2126 pci_release_msi(sc->dev); 2127 goto restart; 2128 } 2129 2130 device_printf(sc->dev, 2131 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2132 itype, rc, iaq->nirq, navail); 2133 } 2134 2135 device_printf(sc->dev, 2136 "failed to find a usable interrupt type. " 2137 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2138 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2139 2140 return (ENXIO); 2141 } 2142 2143 #define FW_VERSION(chip) ( \ 2144 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2145 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2146 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2147 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2148 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2149 2150 struct fw_info { 2151 uint8_t chip; 2152 char *kld_name; 2153 char *fw_mod_name; 2154 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2155 } fw_info[] = { 2156 { 2157 .chip = CHELSIO_T4, 2158 .kld_name = "t4fw_cfg", 2159 .fw_mod_name = "t4fw", 2160 .fw_hdr = { 2161 .chip = FW_HDR_CHIP_T4, 2162 .fw_ver = htobe32_const(FW_VERSION(T4)), 2163 .intfver_nic = FW_INTFVER(T4, NIC), 2164 .intfver_vnic = FW_INTFVER(T4, VNIC), 2165 .intfver_ofld = FW_INTFVER(T4, OFLD), 2166 .intfver_ri = FW_INTFVER(T4, RI), 2167 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2168 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2169 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2170 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2171 }, 2172 }, { 2173 .chip = CHELSIO_T5, 2174 .kld_name = "t5fw_cfg", 2175 .fw_mod_name = "t5fw", 2176 .fw_hdr = { 2177 .chip = FW_HDR_CHIP_T5, 2178 .fw_ver = htobe32_const(FW_VERSION(T5)), 2179 .intfver_nic = FW_INTFVER(T5, NIC), 2180 .intfver_vnic = FW_INTFVER(T5, VNIC), 2181 .intfver_ofld = FW_INTFVER(T5, OFLD), 2182 .intfver_ri = FW_INTFVER(T5, RI), 2183 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2184 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2185 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2186 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2187 }, 2188 } 2189 }; 2190 2191 static struct fw_info * 2192 find_fw_info(int chip) 2193 { 2194 int i; 2195 2196 for (i = 0; i < nitems(fw_info); i++) { 2197 if (fw_info[i].chip == chip) 2198 return (&fw_info[i]); 2199 } 2200 return (NULL); 2201 } 2202 2203 /* 2204 * Is the given firmware API compatible with the one the driver was compiled 2205 * with? 2206 */ 2207 static int 2208 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2209 { 2210 2211 /* short circuit if it's the exact same firmware version */ 2212 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2213 return (1); 2214 2215 /* 2216 * XXX: Is this too conservative? Perhaps I should limit this to the 2217 * features that are supported in the driver. 2218 */ 2219 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2220 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2221 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2222 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2223 return (1); 2224 #undef SAME_INTF 2225 2226 return (0); 2227 } 2228 2229 /* 2230 * The firmware in the KLD is usable, but should it be installed? This routine 2231 * explains itself in detail if it indicates the KLD firmware should be 2232 * installed. 2233 */ 2234 static int 2235 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2236 { 2237 const char *reason; 2238 2239 if (!card_fw_usable) { 2240 reason = "incompatible or unusable"; 2241 goto install; 2242 } 2243 2244 if (k > c) { 2245 reason = "older than the version bundled with this driver"; 2246 goto install; 2247 } 2248 2249 if (t4_fw_install == 2 && k != c) { 2250 reason = "different than the version bundled with this driver"; 2251 goto install; 2252 } 2253 2254 return (0); 2255 2256 install: 2257 if (t4_fw_install == 0) { 2258 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2259 "but the driver is prohibited from installing a different " 2260 "firmware on the card.\n", 2261 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2262 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2263 2264 return (0); 2265 } 2266 2267 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2268 "installing firmware %u.%u.%u.%u on card.\n", 2269 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2270 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2271 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2272 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2273 2274 return (1); 2275 } 2276 /* 2277 * Establish contact with the firmware and determine if we are the master driver 2278 * or not, and whether we are responsible for chip initialization. 2279 */ 2280 static int 2281 prep_firmware(struct adapter *sc) 2282 { 2283 const struct firmware *fw = NULL, *default_cfg; 2284 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2285 enum dev_state state; 2286 struct fw_info *fw_info; 2287 struct fw_hdr *card_fw; /* fw on the card */ 2288 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2289 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2290 against */ 2291 2292 /* Contact firmware. */ 2293 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2294 if (rc < 0 || state == DEV_STATE_ERR) { 2295 rc = -rc; 2296 device_printf(sc->dev, 2297 "failed to connect to the firmware: %d, %d.\n", rc, state); 2298 return (rc); 2299 } 2300 pf = rc; 2301 if (pf == sc->mbox) 2302 sc->flags |= MASTER_PF; 2303 else if (state == DEV_STATE_UNINIT) { 2304 /* 2305 * We didn't get to be the master so we definitely won't be 2306 * configuring the chip. It's a bug if someone else hasn't 2307 * configured it already. 2308 */ 2309 device_printf(sc->dev, "couldn't be master(%d), " 2310 "device not already initialized either(%d).\n", rc, state); 2311 return (EDOOFUS); 2312 } 2313 2314 /* This is the firmware whose headers the driver was compiled against */ 2315 fw_info = find_fw_info(chip_id(sc)); 2316 if (fw_info == NULL) { 2317 device_printf(sc->dev, 2318 "unable to look up firmware information for chip %d.\n", 2319 chip_id(sc)); 2320 return (EINVAL); 2321 } 2322 drv_fw = &fw_info->fw_hdr; 2323 2324 /* 2325 * The firmware KLD contains many modules. The KLD name is also the 2326 * name of the module that contains the default config file. 2327 */ 2328 default_cfg = firmware_get(fw_info->kld_name); 2329 2330 /* Read the header of the firmware on the card */ 2331 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2332 rc = -t4_read_flash(sc, FLASH_FW_START, 2333 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2334 if (rc == 0) 2335 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2336 else { 2337 device_printf(sc->dev, 2338 "Unable to read card's firmware header: %d\n", rc); 2339 card_fw_usable = 0; 2340 } 2341 2342 /* This is the firmware in the KLD */ 2343 fw = firmware_get(fw_info->fw_mod_name); 2344 if (fw != NULL) { 2345 kld_fw = (const void *)fw->data; 2346 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2347 } else { 2348 kld_fw = NULL; 2349 kld_fw_usable = 0; 2350 } 2351 2352 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2353 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2354 /* 2355 * Common case: the firmware on the card is an exact match and 2356 * the KLD is an exact match too, or the KLD is 2357 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2358 * here -- use cxgbetool loadfw if you want to reinstall the 2359 * same firmware as the one on the card. 2360 */ 2361 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2362 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2363 be32toh(card_fw->fw_ver))) { 2364 2365 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2366 if (rc != 0) { 2367 device_printf(sc->dev, 2368 "failed to install firmware: %d\n", rc); 2369 goto done; 2370 } 2371 2372 /* Installed successfully, update the cached header too. */ 2373 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2374 card_fw_usable = 1; 2375 need_fw_reset = 0; /* already reset as part of load_fw */ 2376 } 2377 2378 if (!card_fw_usable) { 2379 uint32_t d, c, k; 2380 2381 d = ntohl(drv_fw->fw_ver); 2382 c = ntohl(card_fw->fw_ver); 2383 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2384 2385 device_printf(sc->dev, "Cannot find a usable firmware: " 2386 "fw_install %d, chip state %d, " 2387 "driver compiled with %d.%d.%d.%d, " 2388 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2389 t4_fw_install, state, 2390 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2391 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2392 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2393 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2394 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2395 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2396 rc = EINVAL; 2397 goto done; 2398 } 2399 2400 /* We're using whatever's on the card and it's known to be good. */ 2401 sc->params.fw_vers = ntohl(card_fw->fw_ver); 2402 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 2403 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2404 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2405 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2406 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2407 t4_get_tp_version(sc, &sc->params.tp_vers); 2408 2409 /* Reset device */ 2410 if (need_fw_reset && 2411 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2412 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2413 if (rc != ETIMEDOUT && rc != EIO) 2414 t4_fw_bye(sc, sc->mbox); 2415 goto done; 2416 } 2417 sc->flags |= FW_OK; 2418 2419 rc = get_params__pre_init(sc); 2420 if (rc != 0) 2421 goto done; /* error message displayed already */ 2422 2423 /* Partition adapter resources as specified in the config file. */ 2424 if (state == DEV_STATE_UNINIT) { 2425 2426 KASSERT(sc->flags & MASTER_PF, 2427 ("%s: trying to change chip settings when not master.", 2428 __func__)); 2429 2430 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2431 if (rc != 0) 2432 goto done; /* error message displayed already */ 2433 2434 t4_tweak_chip_settings(sc); 2435 2436 /* get basic stuff going */ 2437 rc = -t4_fw_initialize(sc, sc->mbox); 2438 if (rc != 0) { 2439 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2440 goto done; 2441 } 2442 } else { 2443 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2444 sc->cfcsum = 0; 2445 } 2446 2447 done: 2448 free(card_fw, M_CXGBE); 2449 if (fw != NULL) 2450 firmware_put(fw, FIRMWARE_UNLOAD); 2451 if (default_cfg != NULL) 2452 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2453 2454 return (rc); 2455 } 2456 2457 #define FW_PARAM_DEV(param) \ 2458 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2459 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2460 #define FW_PARAM_PFVF(param) \ 2461 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2462 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2463 2464 /* 2465 * Partition chip resources for use between various PFs, VFs, etc. 2466 */ 2467 static int 2468 partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2469 const char *name_prefix) 2470 { 2471 const struct firmware *cfg = NULL; 2472 int rc = 0; 2473 struct fw_caps_config_cmd caps; 2474 uint32_t mtype, moff, finicsum, cfcsum; 2475 2476 /* 2477 * Figure out what configuration file to use. Pick the default config 2478 * file for the card if the user hasn't specified one explicitly. 2479 */ 2480 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2481 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2482 /* Card specific overrides go here. */ 2483 if (pci_get_device(sc->dev) == 0x440a) 2484 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2485 if (is_fpga(sc)) 2486 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2487 } 2488 2489 /* 2490 * We need to load another module if the profile is anything except 2491 * "default" or "flash". 2492 */ 2493 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 2494 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2495 char s[32]; 2496 2497 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 2498 cfg = firmware_get(s); 2499 if (cfg == NULL) { 2500 if (default_cfg != NULL) { 2501 device_printf(sc->dev, 2502 "unable to load module \"%s\" for " 2503 "configuration profile \"%s\", will use " 2504 "the default config file instead.\n", 2505 s, sc->cfg_file); 2506 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2507 "%s", DEFAULT_CF); 2508 } else { 2509 device_printf(sc->dev, 2510 "unable to load module \"%s\" for " 2511 "configuration profile \"%s\", will use " 2512 "the config file on the card's flash " 2513 "instead.\n", s, sc->cfg_file); 2514 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2515 "%s", FLASH_CF); 2516 } 2517 } 2518 } 2519 2520 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 2521 default_cfg == NULL) { 2522 device_printf(sc->dev, 2523 "default config file not available, will use the config " 2524 "file on the card's flash instead.\n"); 2525 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 2526 } 2527 2528 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2529 u_int cflen, i, n; 2530 const uint32_t *cfdata; 2531 uint32_t param, val, addr, off, mw_base, mw_aperture; 2532 2533 KASSERT(cfg != NULL || default_cfg != NULL, 2534 ("%s: no config to upload", __func__)); 2535 2536 /* 2537 * Ask the firmware where it wants us to upload the config file. 2538 */ 2539 param = FW_PARAM_DEV(CF); 2540 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2541 if (rc != 0) { 2542 /* No support for config file? Shouldn't happen. */ 2543 device_printf(sc->dev, 2544 "failed to query config file location: %d.\n", rc); 2545 goto done; 2546 } 2547 mtype = G_FW_PARAMS_PARAM_Y(val); 2548 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 2549 2550 /* 2551 * XXX: sheer laziness. We deliberately added 4 bytes of 2552 * useless stuffing/comments at the end of the config file so 2553 * it's ok to simply throw away the last remaining bytes when 2554 * the config file is not an exact multiple of 4. This also 2555 * helps with the validate_mt_off_len check. 2556 */ 2557 if (cfg != NULL) { 2558 cflen = cfg->datasize & ~3; 2559 cfdata = cfg->data; 2560 } else { 2561 cflen = default_cfg->datasize & ~3; 2562 cfdata = default_cfg->data; 2563 } 2564 2565 if (cflen > FLASH_CFG_MAX_SIZE) { 2566 device_printf(sc->dev, 2567 "config file too long (%d, max allowed is %d). " 2568 "Will try to use the config on the card, if any.\n", 2569 cflen, FLASH_CFG_MAX_SIZE); 2570 goto use_config_on_flash; 2571 } 2572 2573 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 2574 if (rc != 0) { 2575 device_printf(sc->dev, 2576 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 2577 "Will try to use the config on the card, if any.\n", 2578 __func__, mtype, moff, cflen, rc); 2579 goto use_config_on_flash; 2580 } 2581 2582 memwin_info(sc, 2, &mw_base, &mw_aperture); 2583 while (cflen) { 2584 off = position_memwin(sc, 2, addr); 2585 n = min(cflen, mw_aperture - off); 2586 for (i = 0; i < n; i += 4) 2587 t4_write_reg(sc, mw_base + off + i, *cfdata++); 2588 cflen -= n; 2589 addr += n; 2590 } 2591 } else { 2592 use_config_on_flash: 2593 mtype = FW_MEMTYPE_FLASH; 2594 moff = t4_flash_cfg_addr(sc); 2595 } 2596 2597 bzero(&caps, sizeof(caps)); 2598 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2599 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2600 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 2601 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 2602 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 2603 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 2604 if (rc != 0) { 2605 device_printf(sc->dev, 2606 "failed to pre-process config file: %d " 2607 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 2608 goto done; 2609 } 2610 2611 finicsum = be32toh(caps.finicsum); 2612 cfcsum = be32toh(caps.cfcsum); 2613 if (finicsum != cfcsum) { 2614 device_printf(sc->dev, 2615 "WARNING: config file checksum mismatch: %08x %08x\n", 2616 finicsum, cfcsum); 2617 } 2618 sc->cfcsum = cfcsum; 2619 2620 #define LIMIT_CAPS(x) do { \ 2621 caps.x &= htobe16(t4_##x##_allowed); \ 2622 } while (0) 2623 2624 /* 2625 * Let the firmware know what features will (not) be used so it can tune 2626 * things accordingly. 2627 */ 2628 LIMIT_CAPS(linkcaps); 2629 LIMIT_CAPS(niccaps); 2630 LIMIT_CAPS(toecaps); 2631 LIMIT_CAPS(rdmacaps); 2632 LIMIT_CAPS(iscsicaps); 2633 LIMIT_CAPS(fcoecaps); 2634 #undef LIMIT_CAPS 2635 2636 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2637 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 2638 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 2639 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 2640 if (rc != 0) { 2641 device_printf(sc->dev, 2642 "failed to process config file: %d.\n", rc); 2643 } 2644 done: 2645 if (cfg != NULL) 2646 firmware_put(cfg, FIRMWARE_UNLOAD); 2647 return (rc); 2648 } 2649 2650 /* 2651 * Retrieve parameters that are needed (or nice to have) very early. 2652 */ 2653 static int 2654 get_params__pre_init(struct adapter *sc) 2655 { 2656 int rc; 2657 uint32_t param[2], val[2]; 2658 struct fw_devlog_cmd cmd; 2659 struct devlog_params *dlog = &sc->params.devlog; 2660 2661 param[0] = FW_PARAM_DEV(PORTVEC); 2662 param[1] = FW_PARAM_DEV(CCLK); 2663 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 2664 if (rc != 0) { 2665 device_printf(sc->dev, 2666 "failed to query parameters (pre_init): %d.\n", rc); 2667 return (rc); 2668 } 2669 2670 sc->params.portvec = val[0]; 2671 sc->params.nports = bitcount32(val[0]); 2672 sc->params.vpd.cclk = val[1]; 2673 2674 /* Read device log parameters. */ 2675 bzero(&cmd, sizeof(cmd)); 2676 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 2677 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2678 cmd.retval_len16 = htobe32(FW_LEN16(cmd)); 2679 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd); 2680 if (rc != 0) { 2681 device_printf(sc->dev, 2682 "failed to get devlog parameters: %d.\n", rc); 2683 bzero(dlog, sizeof (*dlog)); 2684 rc = 0; /* devlog isn't critical for device operation */ 2685 } else { 2686 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog); 2687 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]); 2688 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4; 2689 dlog->size = be32toh(cmd.memsize_devlog); 2690 } 2691 2692 return (rc); 2693 } 2694 2695 /* 2696 * Retrieve various parameters that are of interest to the driver. The device 2697 * has been initialized by the firmware at this point. 2698 */ 2699 static int 2700 get_params__post_init(struct adapter *sc) 2701 { 2702 int rc; 2703 uint32_t param[7], val[7]; 2704 struct fw_caps_config_cmd caps; 2705 2706 param[0] = FW_PARAM_PFVF(IQFLINT_START); 2707 param[1] = FW_PARAM_PFVF(EQ_START); 2708 param[2] = FW_PARAM_PFVF(FILTER_START); 2709 param[3] = FW_PARAM_PFVF(FILTER_END); 2710 param[4] = FW_PARAM_PFVF(L2T_START); 2711 param[5] = FW_PARAM_PFVF(L2T_END); 2712 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2713 if (rc != 0) { 2714 device_printf(sc->dev, 2715 "failed to query parameters (post_init): %d.\n", rc); 2716 return (rc); 2717 } 2718 2719 sc->sge.iq_start = val[0]; 2720 sc->sge.eq_start = val[1]; 2721 sc->tids.ftid_base = val[2]; 2722 sc->tids.nftids = val[3] - val[2] + 1; 2723 sc->params.ftid_min = val[2]; 2724 sc->params.ftid_max = val[3]; 2725 sc->vres.l2t.start = val[4]; 2726 sc->vres.l2t.size = val[5] - val[4] + 1; 2727 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 2728 ("%s: L2 table size (%u) larger than expected (%u)", 2729 __func__, sc->vres.l2t.size, L2T_SIZE)); 2730 2731 /* get capabilites */ 2732 bzero(&caps, sizeof(caps)); 2733 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2734 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2735 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 2736 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 2737 if (rc != 0) { 2738 device_printf(sc->dev, 2739 "failed to get card capabilities: %d.\n", rc); 2740 return (rc); 2741 } 2742 2743 #define READ_CAPS(x) do { \ 2744 sc->x = htobe16(caps.x); \ 2745 } while (0) 2746 READ_CAPS(linkcaps); 2747 READ_CAPS(niccaps); 2748 READ_CAPS(toecaps); 2749 READ_CAPS(rdmacaps); 2750 READ_CAPS(iscsicaps); 2751 READ_CAPS(fcoecaps); 2752 2753 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 2754 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 2755 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 2756 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 2757 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 2758 if (rc != 0) { 2759 device_printf(sc->dev, 2760 "failed to query NIC parameters: %d.\n", rc); 2761 return (rc); 2762 } 2763 sc->tids.etid_base = val[0]; 2764 sc->params.etid_min = val[0]; 2765 sc->tids.netids = val[1] - val[0] + 1; 2766 sc->params.netids = sc->tids.netids; 2767 sc->params.eo_wr_cred = val[2]; 2768 sc->params.ethoffload = 1; 2769 } 2770 2771 if (sc->toecaps) { 2772 /* query offload-related parameters */ 2773 param[0] = FW_PARAM_DEV(NTID); 2774 param[1] = FW_PARAM_PFVF(SERVER_START); 2775 param[2] = FW_PARAM_PFVF(SERVER_END); 2776 param[3] = FW_PARAM_PFVF(TDDP_START); 2777 param[4] = FW_PARAM_PFVF(TDDP_END); 2778 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 2779 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2780 if (rc != 0) { 2781 device_printf(sc->dev, 2782 "failed to query TOE parameters: %d.\n", rc); 2783 return (rc); 2784 } 2785 sc->tids.ntids = val[0]; 2786 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 2787 sc->tids.stid_base = val[1]; 2788 sc->tids.nstids = val[2] - val[1] + 1; 2789 sc->vres.ddp.start = val[3]; 2790 sc->vres.ddp.size = val[4] - val[3] + 1; 2791 sc->params.ofldq_wr_cred = val[5]; 2792 sc->params.offload = 1; 2793 } 2794 if (sc->rdmacaps) { 2795 param[0] = FW_PARAM_PFVF(STAG_START); 2796 param[1] = FW_PARAM_PFVF(STAG_END); 2797 param[2] = FW_PARAM_PFVF(RQ_START); 2798 param[3] = FW_PARAM_PFVF(RQ_END); 2799 param[4] = FW_PARAM_PFVF(PBL_START); 2800 param[5] = FW_PARAM_PFVF(PBL_END); 2801 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2802 if (rc != 0) { 2803 device_printf(sc->dev, 2804 "failed to query RDMA parameters(1): %d.\n", rc); 2805 return (rc); 2806 } 2807 sc->vres.stag.start = val[0]; 2808 sc->vres.stag.size = val[1] - val[0] + 1; 2809 sc->vres.rq.start = val[2]; 2810 sc->vres.rq.size = val[3] - val[2] + 1; 2811 sc->vres.pbl.start = val[4]; 2812 sc->vres.pbl.size = val[5] - val[4] + 1; 2813 2814 param[0] = FW_PARAM_PFVF(SQRQ_START); 2815 param[1] = FW_PARAM_PFVF(SQRQ_END); 2816 param[2] = FW_PARAM_PFVF(CQ_START); 2817 param[3] = FW_PARAM_PFVF(CQ_END); 2818 param[4] = FW_PARAM_PFVF(OCQ_START); 2819 param[5] = FW_PARAM_PFVF(OCQ_END); 2820 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2821 if (rc != 0) { 2822 device_printf(sc->dev, 2823 "failed to query RDMA parameters(2): %d.\n", rc); 2824 return (rc); 2825 } 2826 sc->vres.qp.start = val[0]; 2827 sc->vres.qp.size = val[1] - val[0] + 1; 2828 sc->vres.cq.start = val[2]; 2829 sc->vres.cq.size = val[3] - val[2] + 1; 2830 sc->vres.ocq.start = val[4]; 2831 sc->vres.ocq.size = val[5] - val[4] + 1; 2832 } 2833 if (sc->iscsicaps) { 2834 param[0] = FW_PARAM_PFVF(ISCSI_START); 2835 param[1] = FW_PARAM_PFVF(ISCSI_END); 2836 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 2837 if (rc != 0) { 2838 device_printf(sc->dev, 2839 "failed to query iSCSI parameters: %d.\n", rc); 2840 return (rc); 2841 } 2842 sc->vres.iscsi.start = val[0]; 2843 sc->vres.iscsi.size = val[1] - val[0] + 1; 2844 } 2845 2846 /* 2847 * We've got the params we wanted to query via the firmware. Now grab 2848 * some others directly from the chip. 2849 */ 2850 rc = t4_read_chip_settings(sc); 2851 2852 return (rc); 2853 } 2854 2855 static int 2856 set_params__post_init(struct adapter *sc) 2857 { 2858 uint32_t param, val; 2859 2860 /* ask for encapsulated CPLs */ 2861 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 2862 val = 1; 2863 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2864 2865 return (0); 2866 } 2867 2868 #undef FW_PARAM_PFVF 2869 #undef FW_PARAM_DEV 2870 2871 static void 2872 t4_set_desc(struct adapter *sc) 2873 { 2874 char buf[128]; 2875 struct adapter_params *p = &sc->params; 2876 2877 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, " 2878 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "", 2879 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec); 2880 2881 device_set_desc_copy(sc->dev, buf); 2882 } 2883 2884 static void 2885 build_medialist(struct port_info *pi, struct ifmedia *media) 2886 { 2887 int data, m; 2888 2889 PORT_LOCK(pi); 2890 2891 ifmedia_removeall(media); 2892 2893 m = IFM_ETHER | IFM_FDX; 2894 data = (pi->port_type << 8) | pi->mod_type; 2895 2896 switch(pi->port_type) { 2897 case FW_PORT_TYPE_BT_XFI: 2898 ifmedia_add(media, m | IFM_10G_T, data, NULL); 2899 break; 2900 2901 case FW_PORT_TYPE_BT_XAUI: 2902 ifmedia_add(media, m | IFM_10G_T, data, NULL); 2903 /* fall through */ 2904 2905 case FW_PORT_TYPE_BT_SGMII: 2906 ifmedia_add(media, m | IFM_1000_T, data, NULL); 2907 ifmedia_add(media, m | IFM_100_TX, data, NULL); 2908 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL); 2909 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 2910 break; 2911 2912 case FW_PORT_TYPE_CX4: 2913 ifmedia_add(media, m | IFM_10G_CX4, data, NULL); 2914 ifmedia_set(media, m | IFM_10G_CX4); 2915 break; 2916 2917 case FW_PORT_TYPE_QSFP_10G: 2918 case FW_PORT_TYPE_SFP: 2919 case FW_PORT_TYPE_FIBER_XFI: 2920 case FW_PORT_TYPE_FIBER_XAUI: 2921 switch (pi->mod_type) { 2922 2923 case FW_PORT_MOD_TYPE_LR: 2924 ifmedia_add(media, m | IFM_10G_LR, data, NULL); 2925 ifmedia_set(media, m | IFM_10G_LR); 2926 break; 2927 2928 case FW_PORT_MOD_TYPE_SR: 2929 ifmedia_add(media, m | IFM_10G_SR, data, NULL); 2930 ifmedia_set(media, m | IFM_10G_SR); 2931 break; 2932 2933 case FW_PORT_MOD_TYPE_LRM: 2934 ifmedia_add(media, m | IFM_10G_LRM, data, NULL); 2935 ifmedia_set(media, m | IFM_10G_LRM); 2936 break; 2937 2938 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 2939 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 2940 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL); 2941 ifmedia_set(media, m | IFM_10G_TWINAX); 2942 break; 2943 2944 case FW_PORT_MOD_TYPE_NONE: 2945 m &= ~IFM_FDX; 2946 ifmedia_add(media, m | IFM_NONE, data, NULL); 2947 ifmedia_set(media, m | IFM_NONE); 2948 break; 2949 2950 case FW_PORT_MOD_TYPE_NA: 2951 case FW_PORT_MOD_TYPE_ER: 2952 default: 2953 device_printf(pi->dev, 2954 "unknown port_type (%d), mod_type (%d)\n", 2955 pi->port_type, pi->mod_type); 2956 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 2957 ifmedia_set(media, m | IFM_UNKNOWN); 2958 break; 2959 } 2960 break; 2961 2962 case FW_PORT_TYPE_QSFP: 2963 switch (pi->mod_type) { 2964 2965 case FW_PORT_MOD_TYPE_LR: 2966 ifmedia_add(media, m | IFM_40G_LR4, data, NULL); 2967 ifmedia_set(media, m | IFM_40G_LR4); 2968 break; 2969 2970 case FW_PORT_MOD_TYPE_SR: 2971 ifmedia_add(media, m | IFM_40G_SR4, data, NULL); 2972 ifmedia_set(media, m | IFM_40G_SR4); 2973 break; 2974 2975 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 2976 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 2977 ifmedia_add(media, m | IFM_40G_CR4, data, NULL); 2978 ifmedia_set(media, m | IFM_40G_CR4); 2979 break; 2980 2981 case FW_PORT_MOD_TYPE_NONE: 2982 m &= ~IFM_FDX; 2983 ifmedia_add(media, m | IFM_NONE, data, NULL); 2984 ifmedia_set(media, m | IFM_NONE); 2985 break; 2986 2987 default: 2988 device_printf(pi->dev, 2989 "unknown port_type (%d), mod_type (%d)\n", 2990 pi->port_type, pi->mod_type); 2991 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 2992 ifmedia_set(media, m | IFM_UNKNOWN); 2993 break; 2994 } 2995 break; 2996 2997 default: 2998 device_printf(pi->dev, 2999 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3000 pi->mod_type); 3001 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 3002 ifmedia_set(media, m | IFM_UNKNOWN); 3003 break; 3004 } 3005 3006 PORT_UNLOCK(pi); 3007 } 3008 3009 #define FW_MAC_EXACT_CHUNK 7 3010 3011 /* 3012 * Program the port's XGMAC based on parameters in ifnet. The caller also 3013 * indicates which parameters should be programmed (the rest are left alone). 3014 */ 3015 int 3016 update_mac_settings(struct ifnet *ifp, int flags) 3017 { 3018 int rc = 0; 3019 struct port_info *pi = ifp->if_softc; 3020 struct adapter *sc = pi->adapter; 3021 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3022 uint16_t viid = 0xffff; 3023 int16_t *xact_addr_filt = NULL; 3024 3025 ASSERT_SYNCHRONIZED_OP(sc); 3026 KASSERT(flags, ("%s: not told what to update.", __func__)); 3027 3028 if (ifp == pi->ifp) { 3029 viid = pi->viid; 3030 xact_addr_filt = &pi->xact_addr_filt; 3031 } 3032 #ifdef DEV_NETMAP 3033 else if (ifp == pi->nm_ifp) { 3034 viid = pi->nm_viid; 3035 xact_addr_filt = &pi->nm_xact_addr_filt; 3036 } 3037 #endif 3038 if (flags & XGMAC_MTU) 3039 mtu = ifp->if_mtu; 3040 3041 if (flags & XGMAC_PROMISC) 3042 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3043 3044 if (flags & XGMAC_ALLMULTI) 3045 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3046 3047 if (flags & XGMAC_VLANEX) 3048 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3049 3050 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3051 rc = -t4_set_rxmode(sc, sc->mbox, viid, mtu, promisc, allmulti, 3052 1, vlanex, false); 3053 if (rc) { 3054 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3055 rc); 3056 return (rc); 3057 } 3058 } 3059 3060 if (flags & XGMAC_UCADDR) { 3061 uint8_t ucaddr[ETHER_ADDR_LEN]; 3062 3063 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3064 rc = t4_change_mac(sc, sc->mbox, viid, *xact_addr_filt, ucaddr, 3065 true, true); 3066 if (rc < 0) { 3067 rc = -rc; 3068 if_printf(ifp, "change_mac failed: %d\n", rc); 3069 return (rc); 3070 } else { 3071 *xact_addr_filt = rc; 3072 rc = 0; 3073 } 3074 } 3075 3076 if (flags & XGMAC_MCADDRS) { 3077 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3078 int del = 1; 3079 uint64_t hash = 0; 3080 struct ifmultiaddr *ifma; 3081 int i = 0, j; 3082 3083 if_maddr_rlock(ifp); 3084 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3085 if (ifma->ifma_addr->sa_family != AF_LINK) 3086 continue; 3087 mcaddr[i] = 3088 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3089 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3090 i++; 3091 3092 if (i == FW_MAC_EXACT_CHUNK) { 3093 rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, 3094 i, mcaddr, NULL, &hash, 0); 3095 if (rc < 0) { 3096 rc = -rc; 3097 for (j = 0; j < i; j++) { 3098 if_printf(ifp, 3099 "failed to add mc address" 3100 " %02x:%02x:%02x:" 3101 "%02x:%02x:%02x rc=%d\n", 3102 mcaddr[j][0], mcaddr[j][1], 3103 mcaddr[j][2], mcaddr[j][3], 3104 mcaddr[j][4], mcaddr[j][5], 3105 rc); 3106 } 3107 goto mcfail; 3108 } 3109 del = 0; 3110 i = 0; 3111 } 3112 } 3113 if (i > 0) { 3114 rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, i, 3115 mcaddr, NULL, &hash, 0); 3116 if (rc < 0) { 3117 rc = -rc; 3118 for (j = 0; j < i; j++) { 3119 if_printf(ifp, 3120 "failed to add mc address" 3121 " %02x:%02x:%02x:" 3122 "%02x:%02x:%02x rc=%d\n", 3123 mcaddr[j][0], mcaddr[j][1], 3124 mcaddr[j][2], mcaddr[j][3], 3125 mcaddr[j][4], mcaddr[j][5], 3126 rc); 3127 } 3128 goto mcfail; 3129 } 3130 } 3131 3132 rc = -t4_set_addr_hash(sc, sc->mbox, viid, 0, hash, 0); 3133 if (rc != 0) 3134 if_printf(ifp, "failed to set mc address hash: %d", rc); 3135 mcfail: 3136 if_maddr_runlock(ifp); 3137 } 3138 3139 return (rc); 3140 } 3141 3142 /* 3143 * {begin|end}_synchronized_op must be called from the same thread. 3144 */ 3145 int 3146 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags, 3147 char *wmesg) 3148 { 3149 int rc, pri; 3150 3151 #ifdef WITNESS 3152 /* the caller thinks it's ok to sleep, but is it really? */ 3153 if (flags & SLEEP_OK) 3154 pause("t4slptst", 1); 3155 #endif 3156 3157 if (INTR_OK) 3158 pri = PCATCH; 3159 else 3160 pri = 0; 3161 3162 ADAPTER_LOCK(sc); 3163 for (;;) { 3164 3165 if (pi && IS_DOOMED(pi)) { 3166 rc = ENXIO; 3167 goto done; 3168 } 3169 3170 if (!IS_BUSY(sc)) { 3171 rc = 0; 3172 break; 3173 } 3174 3175 if (!(flags & SLEEP_OK)) { 3176 rc = EBUSY; 3177 goto done; 3178 } 3179 3180 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3181 rc = EINTR; 3182 goto done; 3183 } 3184 } 3185 3186 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3187 SET_BUSY(sc); 3188 #ifdef INVARIANTS 3189 sc->last_op = wmesg; 3190 sc->last_op_thr = curthread; 3191 #endif 3192 3193 done: 3194 if (!(flags & HOLD_LOCK) || rc) 3195 ADAPTER_UNLOCK(sc); 3196 3197 return (rc); 3198 } 3199 3200 /* 3201 * {begin|end}_synchronized_op must be called from the same thread. 3202 */ 3203 void 3204 end_synchronized_op(struct adapter *sc, int flags) 3205 { 3206 3207 if (flags & LOCK_HELD) 3208 ADAPTER_LOCK_ASSERT_OWNED(sc); 3209 else 3210 ADAPTER_LOCK(sc); 3211 3212 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3213 CLR_BUSY(sc); 3214 wakeup(&sc->flags); 3215 ADAPTER_UNLOCK(sc); 3216 } 3217 3218 static int 3219 cxgbe_init_synchronized(struct port_info *pi) 3220 { 3221 struct adapter *sc = pi->adapter; 3222 struct ifnet *ifp = pi->ifp; 3223 int rc = 0, i; 3224 struct sge_txq *txq; 3225 3226 ASSERT_SYNCHRONIZED_OP(sc); 3227 3228 if (isset(&sc->open_device_map, pi->port_id)) { 3229 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, 3230 ("mismatch between open_device_map and if_drv_flags")); 3231 return (0); /* already running */ 3232 } 3233 3234 if (!(sc->flags & FULL_INIT_DONE) && 3235 ((rc = adapter_full_init(sc)) != 0)) 3236 return (rc); /* error message displayed already */ 3237 3238 if (!(pi->flags & PORT_INIT_DONE) && 3239 ((rc = port_full_init(pi)) != 0)) 3240 return (rc); /* error message displayed already */ 3241 3242 rc = update_mac_settings(ifp, XGMAC_ALL); 3243 if (rc) 3244 goto done; /* error message displayed already */ 3245 3246 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true); 3247 if (rc != 0) { 3248 if_printf(ifp, "enable_vi failed: %d\n", rc); 3249 goto done; 3250 } 3251 3252 /* 3253 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3254 * if this changes. 3255 */ 3256 3257 for_each_txq(pi, i, txq) { 3258 TXQ_LOCK(txq); 3259 txq->eq.flags |= EQ_ENABLED; 3260 TXQ_UNLOCK(txq); 3261 } 3262 3263 /* 3264 * The first iq of the first port to come up is used for tracing. 3265 */ 3266 if (sc->traceq < 0) { 3267 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id; 3268 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3269 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3270 V_QUEUENUMBER(sc->traceq)); 3271 pi->flags |= HAS_TRACEQ; 3272 } 3273 3274 /* all ok */ 3275 setbit(&sc->open_device_map, pi->port_id); 3276 PORT_LOCK(pi); 3277 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3278 PORT_UNLOCK(pi); 3279 3280 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3281 done: 3282 if (rc != 0) 3283 cxgbe_uninit_synchronized(pi); 3284 3285 return (rc); 3286 } 3287 3288 /* 3289 * Idempotent. 3290 */ 3291 static int 3292 cxgbe_uninit_synchronized(struct port_info *pi) 3293 { 3294 struct adapter *sc = pi->adapter; 3295 struct ifnet *ifp = pi->ifp; 3296 int rc, i; 3297 struct sge_txq *txq; 3298 3299 ASSERT_SYNCHRONIZED_OP(sc); 3300 3301 if (!(pi->flags & PORT_INIT_DONE)) { 3302 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 3303 ("uninited port is running")); 3304 return (0); 3305 } 3306 3307 /* 3308 * Disable the VI so that all its data in either direction is discarded 3309 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3310 * tick) intact as the TP can deliver negative advice or data that it's 3311 * holding in its RAM (for an offloaded connection) even after the VI is 3312 * disabled. 3313 */ 3314 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false); 3315 if (rc) { 3316 if_printf(ifp, "disable_vi failed: %d\n", rc); 3317 return (rc); 3318 } 3319 3320 for_each_txq(pi, i, txq) { 3321 TXQ_LOCK(txq); 3322 txq->eq.flags &= ~EQ_ENABLED; 3323 TXQ_UNLOCK(txq); 3324 } 3325 3326 clrbit(&sc->open_device_map, pi->port_id); 3327 PORT_LOCK(pi); 3328 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3329 PORT_UNLOCK(pi); 3330 3331 pi->link_cfg.link_ok = 0; 3332 pi->link_cfg.speed = 0; 3333 pi->linkdnrc = -1; 3334 t4_os_link_changed(sc, pi->port_id, 0, -1); 3335 3336 return (0); 3337 } 3338 3339 /* 3340 * It is ok for this function to fail midway and return right away. t4_detach 3341 * will walk the entire sc->irq list and clean up whatever is valid. 3342 */ 3343 static int 3344 setup_intr_handlers(struct adapter *sc) 3345 { 3346 int rc, rid, p, q; 3347 char s[8]; 3348 struct irq *irq; 3349 struct port_info *pi; 3350 struct sge_rxq *rxq; 3351 #ifdef TCP_OFFLOAD 3352 struct sge_ofld_rxq *ofld_rxq; 3353 #endif 3354 #ifdef DEV_NETMAP 3355 struct sge_nm_rxq *nm_rxq; 3356 #endif 3357 3358 /* 3359 * Setup interrupts. 3360 */ 3361 irq = &sc->irq[0]; 3362 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3363 if (sc->intr_count == 1) 3364 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3365 3366 /* Multiple interrupts. */ 3367 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3368 ("%s: too few intr.", __func__)); 3369 3370 /* The first one is always error intr */ 3371 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3372 if (rc != 0) 3373 return (rc); 3374 irq++; 3375 rid++; 3376 3377 /* The second one is always the firmware event queue */ 3378 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt"); 3379 if (rc != 0) 3380 return (rc); 3381 irq++; 3382 rid++; 3383 3384 for_each_port(sc, p) { 3385 pi = sc->port[p]; 3386 3387 if (pi->flags & INTR_RXQ) { 3388 for_each_rxq(pi, q, rxq) { 3389 snprintf(s, sizeof(s), "%d.%d", p, q); 3390 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq, 3391 s); 3392 if (rc != 0) 3393 return (rc); 3394 irq++; 3395 rid++; 3396 } 3397 } 3398 #ifdef TCP_OFFLOAD 3399 if (pi->flags & INTR_OFLD_RXQ) { 3400 for_each_ofld_rxq(pi, q, ofld_rxq) { 3401 snprintf(s, sizeof(s), "%d,%d", p, q); 3402 rc = t4_alloc_irq(sc, irq, rid, t4_intr, 3403 ofld_rxq, s); 3404 if (rc != 0) 3405 return (rc); 3406 irq++; 3407 rid++; 3408 } 3409 } 3410 #endif 3411 #ifdef DEV_NETMAP 3412 if (pi->flags & INTR_NM_RXQ) { 3413 for_each_nm_rxq(pi, q, nm_rxq) { 3414 snprintf(s, sizeof(s), "%d-%d", p, q); 3415 rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr, 3416 nm_rxq, s); 3417 if (rc != 0) 3418 return (rc); 3419 irq++; 3420 rid++; 3421 } 3422 } 3423 #endif 3424 } 3425 MPASS(irq == &sc->irq[sc->intr_count]); 3426 3427 return (0); 3428 } 3429 3430 int 3431 adapter_full_init(struct adapter *sc) 3432 { 3433 int rc, i; 3434 3435 ASSERT_SYNCHRONIZED_OP(sc); 3436 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3437 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 3438 ("%s: FULL_INIT_DONE already", __func__)); 3439 3440 /* 3441 * queues that belong to the adapter (not any particular port). 3442 */ 3443 rc = t4_setup_adapter_queues(sc); 3444 if (rc != 0) 3445 goto done; 3446 3447 for (i = 0; i < nitems(sc->tq); i++) { 3448 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 3449 taskqueue_thread_enqueue, &sc->tq[i]); 3450 if (sc->tq[i] == NULL) { 3451 device_printf(sc->dev, 3452 "failed to allocate task queue %d\n", i); 3453 rc = ENOMEM; 3454 goto done; 3455 } 3456 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 3457 device_get_nameunit(sc->dev), i); 3458 } 3459 3460 t4_intr_enable(sc); 3461 sc->flags |= FULL_INIT_DONE; 3462 done: 3463 if (rc != 0) 3464 adapter_full_uninit(sc); 3465 3466 return (rc); 3467 } 3468 3469 int 3470 adapter_full_uninit(struct adapter *sc) 3471 { 3472 int i; 3473 3474 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3475 3476 t4_teardown_adapter_queues(sc); 3477 3478 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 3479 taskqueue_free(sc->tq[i]); 3480 sc->tq[i] = NULL; 3481 } 3482 3483 sc->flags &= ~FULL_INIT_DONE; 3484 3485 return (0); 3486 } 3487 3488 int 3489 port_full_init(struct port_info *pi) 3490 { 3491 struct adapter *sc = pi->adapter; 3492 struct ifnet *ifp = pi->ifp; 3493 uint16_t *rss; 3494 struct sge_rxq *rxq; 3495 int rc, i, j; 3496 3497 ASSERT_SYNCHRONIZED_OP(sc); 3498 KASSERT((pi->flags & PORT_INIT_DONE) == 0, 3499 ("%s: PORT_INIT_DONE already", __func__)); 3500 3501 sysctl_ctx_init(&pi->ctx); 3502 pi->flags |= PORT_SYSCTL_CTX; 3503 3504 /* 3505 * Allocate tx/rx/fl queues for this port. 3506 */ 3507 rc = t4_setup_port_queues(pi); 3508 if (rc != 0) 3509 goto done; /* error message displayed already */ 3510 3511 /* 3512 * Setup RSS for this port. Save a copy of the RSS table for later use. 3513 */ 3514 rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 3515 for (i = 0; i < pi->rss_size;) { 3516 for_each_rxq(pi, j, rxq) { 3517 rss[i++] = rxq->iq.abs_id; 3518 if (i == pi->rss_size) 3519 break; 3520 } 3521 } 3522 3523 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss, 3524 pi->rss_size); 3525 if (rc != 0) { 3526 if_printf(ifp, "rss_config failed: %d\n", rc); 3527 goto done; 3528 } 3529 3530 pi->rss = rss; 3531 pi->flags |= PORT_INIT_DONE; 3532 done: 3533 if (rc != 0) 3534 port_full_uninit(pi); 3535 3536 return (rc); 3537 } 3538 3539 /* 3540 * Idempotent. 3541 */ 3542 int 3543 port_full_uninit(struct port_info *pi) 3544 { 3545 struct adapter *sc = pi->adapter; 3546 int i; 3547 struct sge_rxq *rxq; 3548 struct sge_txq *txq; 3549 #ifdef TCP_OFFLOAD 3550 struct sge_ofld_rxq *ofld_rxq; 3551 struct sge_wrq *ofld_txq; 3552 #endif 3553 3554 if (pi->flags & PORT_INIT_DONE) { 3555 3556 /* Need to quiesce queues. */ 3557 3558 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 3559 3560 for_each_txq(pi, i, txq) { 3561 quiesce_txq(sc, txq); 3562 } 3563 3564 #ifdef TCP_OFFLOAD 3565 for_each_ofld_txq(pi, i, ofld_txq) { 3566 quiesce_wrq(sc, ofld_txq); 3567 } 3568 #endif 3569 3570 for_each_rxq(pi, i, rxq) { 3571 quiesce_iq(sc, &rxq->iq); 3572 quiesce_fl(sc, &rxq->fl); 3573 } 3574 3575 #ifdef TCP_OFFLOAD 3576 for_each_ofld_rxq(pi, i, ofld_rxq) { 3577 quiesce_iq(sc, &ofld_rxq->iq); 3578 quiesce_fl(sc, &ofld_rxq->fl); 3579 } 3580 #endif 3581 free(pi->rss, M_CXGBE); 3582 } 3583 3584 t4_teardown_port_queues(pi); 3585 pi->flags &= ~PORT_INIT_DONE; 3586 3587 return (0); 3588 } 3589 3590 static void 3591 quiesce_txq(struct adapter *sc, struct sge_txq *txq) 3592 { 3593 struct sge_eq *eq = &txq->eq; 3594 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 3595 3596 (void) sc; /* unused */ 3597 3598 #ifdef INVARIANTS 3599 TXQ_LOCK(txq); 3600 MPASS((eq->flags & EQ_ENABLED) == 0); 3601 TXQ_UNLOCK(txq); 3602 #endif 3603 3604 /* Wait for the mp_ring to empty. */ 3605 while (!mp_ring_is_idle(txq->r)) { 3606 mp_ring_check_drainage(txq->r, 0); 3607 pause("rquiesce", 1); 3608 } 3609 3610 /* Then wait for the hardware to finish. */ 3611 while (spg->cidx != htobe16(eq->pidx)) 3612 pause("equiesce", 1); 3613 3614 /* Finally, wait for the driver to reclaim all descriptors. */ 3615 while (eq->cidx != eq->pidx) 3616 pause("dquiesce", 1); 3617 } 3618 3619 static void 3620 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 3621 { 3622 3623 /* XXXTX */ 3624 } 3625 3626 static void 3627 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 3628 { 3629 (void) sc; /* unused */ 3630 3631 /* Synchronize with the interrupt handler */ 3632 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 3633 pause("iqfree", 1); 3634 } 3635 3636 static void 3637 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 3638 { 3639 mtx_lock(&sc->sfl_lock); 3640 FL_LOCK(fl); 3641 fl->flags |= FL_DOOMED; 3642 FL_UNLOCK(fl); 3643 mtx_unlock(&sc->sfl_lock); 3644 3645 callout_drain(&sc->sfl_callout); 3646 KASSERT((fl->flags & FL_STARVING) == 0, 3647 ("%s: still starving", __func__)); 3648 } 3649 3650 static int 3651 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 3652 driver_intr_t *handler, void *arg, char *name) 3653 { 3654 int rc; 3655 3656 irq->rid = rid; 3657 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 3658 RF_SHAREABLE | RF_ACTIVE); 3659 if (irq->res == NULL) { 3660 device_printf(sc->dev, 3661 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 3662 return (ENOMEM); 3663 } 3664 3665 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 3666 NULL, handler, arg, &irq->tag); 3667 if (rc != 0) { 3668 device_printf(sc->dev, 3669 "failed to setup interrupt for rid %d, name %s: %d\n", 3670 rid, name, rc); 3671 } else if (name) 3672 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 3673 3674 return (rc); 3675 } 3676 3677 static int 3678 t4_free_irq(struct adapter *sc, struct irq *irq) 3679 { 3680 if (irq->tag) 3681 bus_teardown_intr(sc->dev, irq->res, irq->tag); 3682 if (irq->res) 3683 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 3684 3685 bzero(irq, sizeof(*irq)); 3686 3687 return (0); 3688 } 3689 3690 static void 3691 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start, 3692 unsigned int end) 3693 { 3694 uint32_t *p = (uint32_t *)(buf + start); 3695 3696 for ( ; start <= end; start += sizeof(uint32_t)) 3697 *p++ = t4_read_reg(sc, start); 3698 } 3699 3700 static void 3701 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 3702 { 3703 int i, n; 3704 const unsigned int *reg_ranges; 3705 static const unsigned int t4_reg_ranges[] = { 3706 0x1008, 0x1108, 3707 0x1180, 0x11b4, 3708 0x11fc, 0x123c, 3709 0x1300, 0x173c, 3710 0x1800, 0x18fc, 3711 0x3000, 0x30d8, 3712 0x30e0, 0x5924, 3713 0x5960, 0x59d4, 3714 0x5a00, 0x5af8, 3715 0x6000, 0x6098, 3716 0x6100, 0x6150, 3717 0x6200, 0x6208, 3718 0x6240, 0x6248, 3719 0x6280, 0x6338, 3720 0x6370, 0x638c, 3721 0x6400, 0x643c, 3722 0x6500, 0x6524, 3723 0x6a00, 0x6a38, 3724 0x6a60, 0x6a78, 3725 0x6b00, 0x6b84, 3726 0x6bf0, 0x6c84, 3727 0x6cf0, 0x6d84, 3728 0x6df0, 0x6e84, 3729 0x6ef0, 0x6f84, 3730 0x6ff0, 0x7084, 3731 0x70f0, 0x7184, 3732 0x71f0, 0x7284, 3733 0x72f0, 0x7384, 3734 0x73f0, 0x7450, 3735 0x7500, 0x7530, 3736 0x7600, 0x761c, 3737 0x7680, 0x76cc, 3738 0x7700, 0x7798, 3739 0x77c0, 0x77fc, 3740 0x7900, 0x79fc, 3741 0x7b00, 0x7c38, 3742 0x7d00, 0x7efc, 3743 0x8dc0, 0x8e1c, 3744 0x8e30, 0x8e78, 3745 0x8ea0, 0x8f6c, 3746 0x8fc0, 0x9074, 3747 0x90fc, 0x90fc, 3748 0x9400, 0x9458, 3749 0x9600, 0x96bc, 3750 0x9800, 0x9808, 3751 0x9820, 0x983c, 3752 0x9850, 0x9864, 3753 0x9c00, 0x9c6c, 3754 0x9c80, 0x9cec, 3755 0x9d00, 0x9d6c, 3756 0x9d80, 0x9dec, 3757 0x9e00, 0x9e6c, 3758 0x9e80, 0x9eec, 3759 0x9f00, 0x9f6c, 3760 0x9f80, 0x9fec, 3761 0xd004, 0xd03c, 3762 0xdfc0, 0xdfe0, 3763 0xe000, 0xea7c, 3764 0xf000, 0x11110, 3765 0x11118, 0x11190, 3766 0x19040, 0x1906c, 3767 0x19078, 0x19080, 3768 0x1908c, 0x19124, 3769 0x19150, 0x191b0, 3770 0x191d0, 0x191e8, 3771 0x19238, 0x1924c, 3772 0x193f8, 0x19474, 3773 0x19490, 0x194f8, 3774 0x19800, 0x19f30, 3775 0x1a000, 0x1a06c, 3776 0x1a0b0, 0x1a120, 3777 0x1a128, 0x1a138, 3778 0x1a190, 0x1a1c4, 3779 0x1a1fc, 0x1a1fc, 3780 0x1e040, 0x1e04c, 3781 0x1e284, 0x1e28c, 3782 0x1e2c0, 0x1e2c0, 3783 0x1e2e0, 0x1e2e0, 3784 0x1e300, 0x1e384, 3785 0x1e3c0, 0x1e3c8, 3786 0x1e440, 0x1e44c, 3787 0x1e684, 0x1e68c, 3788 0x1e6c0, 0x1e6c0, 3789 0x1e6e0, 0x1e6e0, 3790 0x1e700, 0x1e784, 3791 0x1e7c0, 0x1e7c8, 3792 0x1e840, 0x1e84c, 3793 0x1ea84, 0x1ea8c, 3794 0x1eac0, 0x1eac0, 3795 0x1eae0, 0x1eae0, 3796 0x1eb00, 0x1eb84, 3797 0x1ebc0, 0x1ebc8, 3798 0x1ec40, 0x1ec4c, 3799 0x1ee84, 0x1ee8c, 3800 0x1eec0, 0x1eec0, 3801 0x1eee0, 0x1eee0, 3802 0x1ef00, 0x1ef84, 3803 0x1efc0, 0x1efc8, 3804 0x1f040, 0x1f04c, 3805 0x1f284, 0x1f28c, 3806 0x1f2c0, 0x1f2c0, 3807 0x1f2e0, 0x1f2e0, 3808 0x1f300, 0x1f384, 3809 0x1f3c0, 0x1f3c8, 3810 0x1f440, 0x1f44c, 3811 0x1f684, 0x1f68c, 3812 0x1f6c0, 0x1f6c0, 3813 0x1f6e0, 0x1f6e0, 3814 0x1f700, 0x1f784, 3815 0x1f7c0, 0x1f7c8, 3816 0x1f840, 0x1f84c, 3817 0x1fa84, 0x1fa8c, 3818 0x1fac0, 0x1fac0, 3819 0x1fae0, 0x1fae0, 3820 0x1fb00, 0x1fb84, 3821 0x1fbc0, 0x1fbc8, 3822 0x1fc40, 0x1fc4c, 3823 0x1fe84, 0x1fe8c, 3824 0x1fec0, 0x1fec0, 3825 0x1fee0, 0x1fee0, 3826 0x1ff00, 0x1ff84, 3827 0x1ffc0, 0x1ffc8, 3828 0x20000, 0x2002c, 3829 0x20100, 0x2013c, 3830 0x20190, 0x201c8, 3831 0x20200, 0x20318, 3832 0x20400, 0x20528, 3833 0x20540, 0x20614, 3834 0x21000, 0x21040, 3835 0x2104c, 0x21060, 3836 0x210c0, 0x210ec, 3837 0x21200, 0x21268, 3838 0x21270, 0x21284, 3839 0x212fc, 0x21388, 3840 0x21400, 0x21404, 3841 0x21500, 0x21518, 3842 0x2152c, 0x2153c, 3843 0x21550, 0x21554, 3844 0x21600, 0x21600, 3845 0x21608, 0x21628, 3846 0x21630, 0x2163c, 3847 0x21700, 0x2171c, 3848 0x21780, 0x2178c, 3849 0x21800, 0x21c38, 3850 0x21c80, 0x21d7c, 3851 0x21e00, 0x21e04, 3852 0x22000, 0x2202c, 3853 0x22100, 0x2213c, 3854 0x22190, 0x221c8, 3855 0x22200, 0x22318, 3856 0x22400, 0x22528, 3857 0x22540, 0x22614, 3858 0x23000, 0x23040, 3859 0x2304c, 0x23060, 3860 0x230c0, 0x230ec, 3861 0x23200, 0x23268, 3862 0x23270, 0x23284, 3863 0x232fc, 0x23388, 3864 0x23400, 0x23404, 3865 0x23500, 0x23518, 3866 0x2352c, 0x2353c, 3867 0x23550, 0x23554, 3868 0x23600, 0x23600, 3869 0x23608, 0x23628, 3870 0x23630, 0x2363c, 3871 0x23700, 0x2371c, 3872 0x23780, 0x2378c, 3873 0x23800, 0x23c38, 3874 0x23c80, 0x23d7c, 3875 0x23e00, 0x23e04, 3876 0x24000, 0x2402c, 3877 0x24100, 0x2413c, 3878 0x24190, 0x241c8, 3879 0x24200, 0x24318, 3880 0x24400, 0x24528, 3881 0x24540, 0x24614, 3882 0x25000, 0x25040, 3883 0x2504c, 0x25060, 3884 0x250c0, 0x250ec, 3885 0x25200, 0x25268, 3886 0x25270, 0x25284, 3887 0x252fc, 0x25388, 3888 0x25400, 0x25404, 3889 0x25500, 0x25518, 3890 0x2552c, 0x2553c, 3891 0x25550, 0x25554, 3892 0x25600, 0x25600, 3893 0x25608, 0x25628, 3894 0x25630, 0x2563c, 3895 0x25700, 0x2571c, 3896 0x25780, 0x2578c, 3897 0x25800, 0x25c38, 3898 0x25c80, 0x25d7c, 3899 0x25e00, 0x25e04, 3900 0x26000, 0x2602c, 3901 0x26100, 0x2613c, 3902 0x26190, 0x261c8, 3903 0x26200, 0x26318, 3904 0x26400, 0x26528, 3905 0x26540, 0x26614, 3906 0x27000, 0x27040, 3907 0x2704c, 0x27060, 3908 0x270c0, 0x270ec, 3909 0x27200, 0x27268, 3910 0x27270, 0x27284, 3911 0x272fc, 0x27388, 3912 0x27400, 0x27404, 3913 0x27500, 0x27518, 3914 0x2752c, 0x2753c, 3915 0x27550, 0x27554, 3916 0x27600, 0x27600, 3917 0x27608, 0x27628, 3918 0x27630, 0x2763c, 3919 0x27700, 0x2771c, 3920 0x27780, 0x2778c, 3921 0x27800, 0x27c38, 3922 0x27c80, 0x27d7c, 3923 0x27e00, 0x27e04 3924 }; 3925 static const unsigned int t5_reg_ranges[] = { 3926 0x1008, 0x1148, 3927 0x1180, 0x11b4, 3928 0x11fc, 0x123c, 3929 0x1280, 0x173c, 3930 0x1800, 0x18fc, 3931 0x3000, 0x3028, 3932 0x3060, 0x30d8, 3933 0x30e0, 0x30fc, 3934 0x3140, 0x357c, 3935 0x35a8, 0x35cc, 3936 0x35ec, 0x35ec, 3937 0x3600, 0x5624, 3938 0x56cc, 0x575c, 3939 0x580c, 0x5814, 3940 0x5890, 0x58bc, 3941 0x5940, 0x59dc, 3942 0x59fc, 0x5a18, 3943 0x5a60, 0x5a9c, 3944 0x5b94, 0x5bfc, 3945 0x6000, 0x6040, 3946 0x6058, 0x614c, 3947 0x7700, 0x7798, 3948 0x77c0, 0x78fc, 3949 0x7b00, 0x7c54, 3950 0x7d00, 0x7efc, 3951 0x8dc0, 0x8de0, 3952 0x8df8, 0x8e84, 3953 0x8ea0, 0x8f84, 3954 0x8fc0, 0x90f8, 3955 0x9400, 0x9470, 3956 0x9600, 0x96f4, 3957 0x9800, 0x9808, 3958 0x9820, 0x983c, 3959 0x9850, 0x9864, 3960 0x9c00, 0x9c6c, 3961 0x9c80, 0x9cec, 3962 0x9d00, 0x9d6c, 3963 0x9d80, 0x9dec, 3964 0x9e00, 0x9e6c, 3965 0x9e80, 0x9eec, 3966 0x9f00, 0x9f6c, 3967 0x9f80, 0xa020, 3968 0xd004, 0xd03c, 3969 0xdfc0, 0xdfe0, 3970 0xe000, 0x11088, 3971 0x1109c, 0x11110, 3972 0x11118, 0x1117c, 3973 0x11190, 0x11204, 3974 0x19040, 0x1906c, 3975 0x19078, 0x19080, 3976 0x1908c, 0x19124, 3977 0x19150, 0x191b0, 3978 0x191d0, 0x191e8, 3979 0x19238, 0x19290, 3980 0x193f8, 0x19474, 3981 0x19490, 0x194cc, 3982 0x194f0, 0x194f8, 3983 0x19c00, 0x19c60, 3984 0x19c94, 0x19e10, 3985 0x19e50, 0x19f34, 3986 0x19f40, 0x19f50, 3987 0x19f90, 0x19fe4, 3988 0x1a000, 0x1a06c, 3989 0x1a0b0, 0x1a120, 3990 0x1a128, 0x1a138, 3991 0x1a190, 0x1a1c4, 3992 0x1a1fc, 0x1a1fc, 3993 0x1e008, 0x1e00c, 3994 0x1e040, 0x1e04c, 3995 0x1e284, 0x1e290, 3996 0x1e2c0, 0x1e2c0, 3997 0x1e2e0, 0x1e2e0, 3998 0x1e300, 0x1e384, 3999 0x1e3c0, 0x1e3c8, 4000 0x1e408, 0x1e40c, 4001 0x1e440, 0x1e44c, 4002 0x1e684, 0x1e690, 4003 0x1e6c0, 0x1e6c0, 4004 0x1e6e0, 0x1e6e0, 4005 0x1e700, 0x1e784, 4006 0x1e7c0, 0x1e7c8, 4007 0x1e808, 0x1e80c, 4008 0x1e840, 0x1e84c, 4009 0x1ea84, 0x1ea90, 4010 0x1eac0, 0x1eac0, 4011 0x1eae0, 0x1eae0, 4012 0x1eb00, 0x1eb84, 4013 0x1ebc0, 0x1ebc8, 4014 0x1ec08, 0x1ec0c, 4015 0x1ec40, 0x1ec4c, 4016 0x1ee84, 0x1ee90, 4017 0x1eec0, 0x1eec0, 4018 0x1eee0, 0x1eee0, 4019 0x1ef00, 0x1ef84, 4020 0x1efc0, 0x1efc8, 4021 0x1f008, 0x1f00c, 4022 0x1f040, 0x1f04c, 4023 0x1f284, 0x1f290, 4024 0x1f2c0, 0x1f2c0, 4025 0x1f2e0, 0x1f2e0, 4026 0x1f300, 0x1f384, 4027 0x1f3c0, 0x1f3c8, 4028 0x1f408, 0x1f40c, 4029 0x1f440, 0x1f44c, 4030 0x1f684, 0x1f690, 4031 0x1f6c0, 0x1f6c0, 4032 0x1f6e0, 0x1f6e0, 4033 0x1f700, 0x1f784, 4034 0x1f7c0, 0x1f7c8, 4035 0x1f808, 0x1f80c, 4036 0x1f840, 0x1f84c, 4037 0x1fa84, 0x1fa90, 4038 0x1fac0, 0x1fac0, 4039 0x1fae0, 0x1fae0, 4040 0x1fb00, 0x1fb84, 4041 0x1fbc0, 0x1fbc8, 4042 0x1fc08, 0x1fc0c, 4043 0x1fc40, 0x1fc4c, 4044 0x1fe84, 0x1fe90, 4045 0x1fec0, 0x1fec0, 4046 0x1fee0, 0x1fee0, 4047 0x1ff00, 0x1ff84, 4048 0x1ffc0, 0x1ffc8, 4049 0x30000, 0x30030, 4050 0x30100, 0x30144, 4051 0x30190, 0x301d0, 4052 0x30200, 0x30318, 4053 0x30400, 0x3052c, 4054 0x30540, 0x3061c, 4055 0x30800, 0x30834, 4056 0x308c0, 0x30908, 4057 0x30910, 0x309ac, 4058 0x30a00, 0x30a2c, 4059 0x30a44, 0x30a50, 4060 0x30a74, 0x30c24, 4061 0x30d00, 0x30d00, 4062 0x30d08, 0x30d14, 4063 0x30d1c, 0x30d20, 4064 0x30d3c, 0x30d50, 4065 0x31200, 0x3120c, 4066 0x31220, 0x31220, 4067 0x31240, 0x31240, 4068 0x31600, 0x3160c, 4069 0x31a00, 0x31a1c, 4070 0x31e00, 0x31e20, 4071 0x31e38, 0x31e3c, 4072 0x31e80, 0x31e80, 4073 0x31e88, 0x31ea8, 4074 0x31eb0, 0x31eb4, 4075 0x31ec8, 0x31ed4, 4076 0x31fb8, 0x32004, 4077 0x32200, 0x32200, 4078 0x32208, 0x32240, 4079 0x32248, 0x32280, 4080 0x32288, 0x322c0, 4081 0x322c8, 0x322fc, 4082 0x32600, 0x32630, 4083 0x32a00, 0x32abc, 4084 0x32b00, 0x32b70, 4085 0x33000, 0x33048, 4086 0x33060, 0x3309c, 4087 0x330f0, 0x33148, 4088 0x33160, 0x3319c, 4089 0x331f0, 0x332e4, 4090 0x332f8, 0x333e4, 4091 0x333f8, 0x33448, 4092 0x33460, 0x3349c, 4093 0x334f0, 0x33548, 4094 0x33560, 0x3359c, 4095 0x335f0, 0x336e4, 4096 0x336f8, 0x337e4, 4097 0x337f8, 0x337fc, 4098 0x33814, 0x33814, 4099 0x3382c, 0x3382c, 4100 0x33880, 0x3388c, 4101 0x338e8, 0x338ec, 4102 0x33900, 0x33948, 4103 0x33960, 0x3399c, 4104 0x339f0, 0x33ae4, 4105 0x33af8, 0x33b10, 4106 0x33b28, 0x33b28, 4107 0x33b3c, 0x33b50, 4108 0x33bf0, 0x33c10, 4109 0x33c28, 0x33c28, 4110 0x33c3c, 0x33c50, 4111 0x33cf0, 0x33cfc, 4112 0x34000, 0x34030, 4113 0x34100, 0x34144, 4114 0x34190, 0x341d0, 4115 0x34200, 0x34318, 4116 0x34400, 0x3452c, 4117 0x34540, 0x3461c, 4118 0x34800, 0x34834, 4119 0x348c0, 0x34908, 4120 0x34910, 0x349ac, 4121 0x34a00, 0x34a2c, 4122 0x34a44, 0x34a50, 4123 0x34a74, 0x34c24, 4124 0x34d00, 0x34d00, 4125 0x34d08, 0x34d14, 4126 0x34d1c, 0x34d20, 4127 0x34d3c, 0x34d50, 4128 0x35200, 0x3520c, 4129 0x35220, 0x35220, 4130 0x35240, 0x35240, 4131 0x35600, 0x3560c, 4132 0x35a00, 0x35a1c, 4133 0x35e00, 0x35e20, 4134 0x35e38, 0x35e3c, 4135 0x35e80, 0x35e80, 4136 0x35e88, 0x35ea8, 4137 0x35eb0, 0x35eb4, 4138 0x35ec8, 0x35ed4, 4139 0x35fb8, 0x36004, 4140 0x36200, 0x36200, 4141 0x36208, 0x36240, 4142 0x36248, 0x36280, 4143 0x36288, 0x362c0, 4144 0x362c8, 0x362fc, 4145 0x36600, 0x36630, 4146 0x36a00, 0x36abc, 4147 0x36b00, 0x36b70, 4148 0x37000, 0x37048, 4149 0x37060, 0x3709c, 4150 0x370f0, 0x37148, 4151 0x37160, 0x3719c, 4152 0x371f0, 0x372e4, 4153 0x372f8, 0x373e4, 4154 0x373f8, 0x37448, 4155 0x37460, 0x3749c, 4156 0x374f0, 0x37548, 4157 0x37560, 0x3759c, 4158 0x375f0, 0x376e4, 4159 0x376f8, 0x377e4, 4160 0x377f8, 0x377fc, 4161 0x37814, 0x37814, 4162 0x3782c, 0x3782c, 4163 0x37880, 0x3788c, 4164 0x378e8, 0x378ec, 4165 0x37900, 0x37948, 4166 0x37960, 0x3799c, 4167 0x379f0, 0x37ae4, 4168 0x37af8, 0x37b10, 4169 0x37b28, 0x37b28, 4170 0x37b3c, 0x37b50, 4171 0x37bf0, 0x37c10, 4172 0x37c28, 0x37c28, 4173 0x37c3c, 0x37c50, 4174 0x37cf0, 0x37cfc, 4175 0x38000, 0x38030, 4176 0x38100, 0x38144, 4177 0x38190, 0x381d0, 4178 0x38200, 0x38318, 4179 0x38400, 0x3852c, 4180 0x38540, 0x3861c, 4181 0x38800, 0x38834, 4182 0x388c0, 0x38908, 4183 0x38910, 0x389ac, 4184 0x38a00, 0x38a2c, 4185 0x38a44, 0x38a50, 4186 0x38a74, 0x38c24, 4187 0x38d00, 0x38d00, 4188 0x38d08, 0x38d14, 4189 0x38d1c, 0x38d20, 4190 0x38d3c, 0x38d50, 4191 0x39200, 0x3920c, 4192 0x39220, 0x39220, 4193 0x39240, 0x39240, 4194 0x39600, 0x3960c, 4195 0x39a00, 0x39a1c, 4196 0x39e00, 0x39e20, 4197 0x39e38, 0x39e3c, 4198 0x39e80, 0x39e80, 4199 0x39e88, 0x39ea8, 4200 0x39eb0, 0x39eb4, 4201 0x39ec8, 0x39ed4, 4202 0x39fb8, 0x3a004, 4203 0x3a200, 0x3a200, 4204 0x3a208, 0x3a240, 4205 0x3a248, 0x3a280, 4206 0x3a288, 0x3a2c0, 4207 0x3a2c8, 0x3a2fc, 4208 0x3a600, 0x3a630, 4209 0x3aa00, 0x3aabc, 4210 0x3ab00, 0x3ab70, 4211 0x3b000, 0x3b048, 4212 0x3b060, 0x3b09c, 4213 0x3b0f0, 0x3b148, 4214 0x3b160, 0x3b19c, 4215 0x3b1f0, 0x3b2e4, 4216 0x3b2f8, 0x3b3e4, 4217 0x3b3f8, 0x3b448, 4218 0x3b460, 0x3b49c, 4219 0x3b4f0, 0x3b548, 4220 0x3b560, 0x3b59c, 4221 0x3b5f0, 0x3b6e4, 4222 0x3b6f8, 0x3b7e4, 4223 0x3b7f8, 0x3b7fc, 4224 0x3b814, 0x3b814, 4225 0x3b82c, 0x3b82c, 4226 0x3b880, 0x3b88c, 4227 0x3b8e8, 0x3b8ec, 4228 0x3b900, 0x3b948, 4229 0x3b960, 0x3b99c, 4230 0x3b9f0, 0x3bae4, 4231 0x3baf8, 0x3bb10, 4232 0x3bb28, 0x3bb28, 4233 0x3bb3c, 0x3bb50, 4234 0x3bbf0, 0x3bc10, 4235 0x3bc28, 0x3bc28, 4236 0x3bc3c, 0x3bc50, 4237 0x3bcf0, 0x3bcfc, 4238 0x3c000, 0x3c030, 4239 0x3c100, 0x3c144, 4240 0x3c190, 0x3c1d0, 4241 0x3c200, 0x3c318, 4242 0x3c400, 0x3c52c, 4243 0x3c540, 0x3c61c, 4244 0x3c800, 0x3c834, 4245 0x3c8c0, 0x3c908, 4246 0x3c910, 0x3c9ac, 4247 0x3ca00, 0x3ca2c, 4248 0x3ca44, 0x3ca50, 4249 0x3ca74, 0x3cc24, 4250 0x3cd00, 0x3cd00, 4251 0x3cd08, 0x3cd14, 4252 0x3cd1c, 0x3cd20, 4253 0x3cd3c, 0x3cd50, 4254 0x3d200, 0x3d20c, 4255 0x3d220, 0x3d220, 4256 0x3d240, 0x3d240, 4257 0x3d600, 0x3d60c, 4258 0x3da00, 0x3da1c, 4259 0x3de00, 0x3de20, 4260 0x3de38, 0x3de3c, 4261 0x3de80, 0x3de80, 4262 0x3de88, 0x3dea8, 4263 0x3deb0, 0x3deb4, 4264 0x3dec8, 0x3ded4, 4265 0x3dfb8, 0x3e004, 4266 0x3e200, 0x3e200, 4267 0x3e208, 0x3e240, 4268 0x3e248, 0x3e280, 4269 0x3e288, 0x3e2c0, 4270 0x3e2c8, 0x3e2fc, 4271 0x3e600, 0x3e630, 4272 0x3ea00, 0x3eabc, 4273 0x3eb00, 0x3eb70, 4274 0x3f000, 0x3f048, 4275 0x3f060, 0x3f09c, 4276 0x3f0f0, 0x3f148, 4277 0x3f160, 0x3f19c, 4278 0x3f1f0, 0x3f2e4, 4279 0x3f2f8, 0x3f3e4, 4280 0x3f3f8, 0x3f448, 4281 0x3f460, 0x3f49c, 4282 0x3f4f0, 0x3f548, 4283 0x3f560, 0x3f59c, 4284 0x3f5f0, 0x3f6e4, 4285 0x3f6f8, 0x3f7e4, 4286 0x3f7f8, 0x3f7fc, 4287 0x3f814, 0x3f814, 4288 0x3f82c, 0x3f82c, 4289 0x3f880, 0x3f88c, 4290 0x3f8e8, 0x3f8ec, 4291 0x3f900, 0x3f948, 4292 0x3f960, 0x3f99c, 4293 0x3f9f0, 0x3fae4, 4294 0x3faf8, 0x3fb10, 4295 0x3fb28, 0x3fb28, 4296 0x3fb3c, 0x3fb50, 4297 0x3fbf0, 0x3fc10, 4298 0x3fc28, 0x3fc28, 4299 0x3fc3c, 0x3fc50, 4300 0x3fcf0, 0x3fcfc, 4301 0x40000, 0x4000c, 4302 0x40040, 0x40068, 4303 0x4007c, 0x40144, 4304 0x40180, 0x4018c, 4305 0x40200, 0x40298, 4306 0x402ac, 0x4033c, 4307 0x403f8, 0x403fc, 4308 0x41304, 0x413c4, 4309 0x41400, 0x4141c, 4310 0x41480, 0x414d0, 4311 0x44000, 0x44078, 4312 0x440c0, 0x44278, 4313 0x442c0, 0x44478, 4314 0x444c0, 0x44678, 4315 0x446c0, 0x44878, 4316 0x448c0, 0x449fc, 4317 0x45000, 0x45068, 4318 0x45080, 0x45084, 4319 0x450a0, 0x450b0, 4320 0x45200, 0x45268, 4321 0x45280, 0x45284, 4322 0x452a0, 0x452b0, 4323 0x460c0, 0x460e4, 4324 0x47000, 0x4708c, 4325 0x47200, 0x47250, 4326 0x47400, 0x47420, 4327 0x47600, 0x47618, 4328 0x47800, 0x47814, 4329 0x48000, 0x4800c, 4330 0x48040, 0x48068, 4331 0x4807c, 0x48144, 4332 0x48180, 0x4818c, 4333 0x48200, 0x48298, 4334 0x482ac, 0x4833c, 4335 0x483f8, 0x483fc, 4336 0x49304, 0x493c4, 4337 0x49400, 0x4941c, 4338 0x49480, 0x494d0, 4339 0x4c000, 0x4c078, 4340 0x4c0c0, 0x4c278, 4341 0x4c2c0, 0x4c478, 4342 0x4c4c0, 0x4c678, 4343 0x4c6c0, 0x4c878, 4344 0x4c8c0, 0x4c9fc, 4345 0x4d000, 0x4d068, 4346 0x4d080, 0x4d084, 4347 0x4d0a0, 0x4d0b0, 4348 0x4d200, 0x4d268, 4349 0x4d280, 0x4d284, 4350 0x4d2a0, 0x4d2b0, 4351 0x4e0c0, 0x4e0e4, 4352 0x4f000, 0x4f08c, 4353 0x4f200, 0x4f250, 4354 0x4f400, 0x4f420, 4355 0x4f600, 0x4f618, 4356 0x4f800, 0x4f814, 4357 0x50000, 0x500cc, 4358 0x50400, 0x50400, 4359 0x50800, 0x508cc, 4360 0x50c00, 0x50c00, 4361 0x51000, 0x5101c, 4362 0x51300, 0x51308, 4363 }; 4364 4365 if (is_t4(sc)) { 4366 reg_ranges = &t4_reg_ranges[0]; 4367 n = nitems(t4_reg_ranges); 4368 } else { 4369 reg_ranges = &t5_reg_ranges[0]; 4370 n = nitems(t5_reg_ranges); 4371 } 4372 4373 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4374 for (i = 0; i < n; i += 2) 4375 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]); 4376 } 4377 4378 static void 4379 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4380 { 4381 int i; 4382 u_int v, tnl_cong_drops; 4383 struct timeval tv; 4384 const struct timeval interval = {0, 250000}; /* 250ms */ 4385 4386 getmicrotime(&tv); 4387 timevalsub(&tv, &interval); 4388 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4389 return; 4390 4391 tnl_cong_drops = 0; 4392 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4393 for (i = 0; i < NCHAN; i++) { 4394 if (pi->rx_chan_map & (1 << i)) { 4395 mtx_lock(&sc->regwin_lock); 4396 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4397 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4398 mtx_unlock(&sc->regwin_lock); 4399 tnl_cong_drops += v; 4400 } 4401 } 4402 pi->tnl_cong_drops = tnl_cong_drops; 4403 getmicrotime(&pi->last_refreshed); 4404 } 4405 4406 static void 4407 cxgbe_tick(void *arg) 4408 { 4409 struct port_info *pi = arg; 4410 struct adapter *sc = pi->adapter; 4411 struct ifnet *ifp = pi->ifp; 4412 4413 PORT_LOCK(pi); 4414 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4415 PORT_UNLOCK(pi); 4416 return; /* without scheduling another callout */ 4417 } 4418 4419 cxgbe_refresh_stats(sc, pi); 4420 4421 callout_schedule(&pi->tick, hz); 4422 PORT_UNLOCK(pi); 4423 } 4424 4425 static void 4426 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4427 { 4428 struct ifnet *vlan; 4429 4430 if (arg != ifp || ifp->if_type != IFT_ETHER) 4431 return; 4432 4433 vlan = VLAN_DEVAT(ifp, vid); 4434 VLAN_SETCOOKIE(vlan, ifp); 4435 } 4436 4437 static int 4438 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4439 { 4440 4441 #ifdef INVARIANTS 4442 panic("%s: opcode 0x%02x on iq %p with payload %p", 4443 __func__, rss->opcode, iq, m); 4444 #else 4445 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", 4446 __func__, rss->opcode, iq, m); 4447 m_freem(m); 4448 #endif 4449 return (EDOOFUS); 4450 } 4451 4452 int 4453 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 4454 { 4455 uintptr_t *loc, new; 4456 4457 if (opcode >= nitems(sc->cpl_handler)) 4458 return (EINVAL); 4459 4460 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 4461 loc = (uintptr_t *) &sc->cpl_handler[opcode]; 4462 atomic_store_rel_ptr(loc, new); 4463 4464 return (0); 4465 } 4466 4467 static int 4468 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) 4469 { 4470 4471 #ifdef INVARIANTS 4472 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); 4473 #else 4474 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", 4475 __func__, iq, ctrl); 4476 #endif 4477 return (EDOOFUS); 4478 } 4479 4480 int 4481 t4_register_an_handler(struct adapter *sc, an_handler_t h) 4482 { 4483 uintptr_t *loc, new; 4484 4485 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; 4486 loc = (uintptr_t *) &sc->an_handler; 4487 atomic_store_rel_ptr(loc, new); 4488 4489 return (0); 4490 } 4491 4492 static int 4493 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) 4494 { 4495 const struct cpl_fw6_msg *cpl = 4496 __containerof(rpl, struct cpl_fw6_msg, data[0]); 4497 4498 #ifdef INVARIANTS 4499 panic("%s: fw_msg type %d", __func__, cpl->type); 4500 #else 4501 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); 4502 #endif 4503 return (EDOOFUS); 4504 } 4505 4506 int 4507 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 4508 { 4509 uintptr_t *loc, new; 4510 4511 if (type >= nitems(sc->fw_msg_handler)) 4512 return (EINVAL); 4513 4514 /* 4515 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 4516 * handler dispatch table. Reject any attempt to install a handler for 4517 * this subtype. 4518 */ 4519 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) 4520 return (EINVAL); 4521 4522 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; 4523 loc = (uintptr_t *) &sc->fw_msg_handler[type]; 4524 atomic_store_rel_ptr(loc, new); 4525 4526 return (0); 4527 } 4528 4529 static int 4530 t4_sysctls(struct adapter *sc) 4531 { 4532 struct sysctl_ctx_list *ctx; 4533 struct sysctl_oid *oid; 4534 struct sysctl_oid_list *children, *c0; 4535 static char *caps[] = { 4536 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */ 4537 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL" /* caps[1] niccaps */ 4538 "\6HASHFILTER\7ETHOFLD", 4539 "\20\1TOE", /* caps[2] toecaps */ 4540 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */ 4541 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */ 4542 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD" 4543 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD", 4544 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */ 4545 "\4PO_INITIAOR\5PO_TARGET" 4546 }; 4547 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4548 4549 ctx = device_get_sysctl_ctx(sc->dev); 4550 4551 /* 4552 * dev.t4nex.X. 4553 */ 4554 oid = device_get_sysctl_tree(sc->dev); 4555 c0 = children = SYSCTL_CHILDREN(oid); 4556 4557 sc->sc_do_rxcopy = 1; 4558 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4559 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4560 4561 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4562 sc->params.nports, "# of ports"); 4563 4564 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4565 NULL, chip_rev(sc), "chip hardware revision"); 4566 4567 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4568 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4569 4570 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4571 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4572 4573 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4574 sc->cfcsum, "config file checksum"); 4575 4576 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4577 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4578 sysctl_bitfield, "A", "available doorbells"); 4579 4580 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps", 4581 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps, 4582 sysctl_bitfield, "A", "available link capabilities"); 4583 4584 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps", 4585 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps, 4586 sysctl_bitfield, "A", "available NIC capabilities"); 4587 4588 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps", 4589 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps, 4590 sysctl_bitfield, "A", "available TCP offload capabilities"); 4591 4592 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps", 4593 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps, 4594 sysctl_bitfield, "A", "available RDMA capabilities"); 4595 4596 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps", 4597 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps, 4598 sysctl_bitfield, "A", "available iSCSI capabilities"); 4599 4600 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps", 4601 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps, 4602 sysctl_bitfield, "A", "available FCoE capabilities"); 4603 4604 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4605 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4606 4607 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4608 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val, 4609 sizeof(sc->sge.timer_val), sysctl_int_array, "A", 4610 "interrupt holdoff timer values (us)"); 4611 4612 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4613 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val, 4614 sizeof(sc->sge.counter_val), sysctl_int_array, "A", 4615 "interrupt holdoff packet counter values"); 4616 4617 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4618 NULL, sc->tids.nftids, "number of filters"); 4619 4620 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4621 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4622 "chip temperature (in Celsius)"); 4623 4624 t4_sge_sysctls(sc, ctx, children); 4625 4626 sc->lro_timeout = 100; 4627 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4628 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4629 4630 #ifdef SBUF_DRAIN 4631 /* 4632 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4633 */ 4634 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4635 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4636 "logs and miscellaneous information"); 4637 children = SYSCTL_CHILDREN(oid); 4638 4639 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4640 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4641 sysctl_cctrl, "A", "congestion control"); 4642 4643 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4644 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4645 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4646 4647 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4648 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4649 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4650 4651 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4652 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4653 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4654 4655 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4656 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4657 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4658 4659 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4660 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4661 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4662 4663 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4664 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4665 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4666 4667 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4668 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4669 sysctl_cim_la, "A", "CIM logic analyzer"); 4670 4671 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4672 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4673 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4674 4675 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4676 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4677 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4678 4679 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4680 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4681 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4682 4683 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4684 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4685 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4686 4687 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4688 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4689 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4690 4691 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4692 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4693 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4694 4695 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4696 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4697 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4698 4699 if (is_t5(sc)) { 4700 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4701 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4702 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4703 4704 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4705 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4706 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4707 } 4708 4709 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4710 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4711 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4712 4713 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4714 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4715 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4716 4717 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4718 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4719 sysctl_cpl_stats, "A", "CPL statistics"); 4720 4721 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4722 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4723 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4724 4725 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4726 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4727 sysctl_devlog, "A", "firmware's device log"); 4728 4729 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4730 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4731 sysctl_fcoe_stats, "A", "FCoE statistics"); 4732 4733 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4734 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4735 sysctl_hw_sched, "A", "hardware scheduler "); 4736 4737 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4738 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4739 sysctl_l2t, "A", "hardware L2 table"); 4740 4741 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4742 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4743 sysctl_lb_stats, "A", "loopback statistics"); 4744 4745 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4746 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4747 sysctl_meminfo, "A", "memory regions"); 4748 4749 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4750 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4751 sysctl_mps_tcam, "A", "MPS TCAM entries"); 4752 4753 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4754 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4755 sysctl_path_mtus, "A", "path MTUs"); 4756 4757 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4758 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4759 sysctl_pm_stats, "A", "PM statistics"); 4760 4761 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4762 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4763 sysctl_rdma_stats, "A", "RDMA statistics"); 4764 4765 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4766 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4767 sysctl_tcp_stats, "A", "TCP statistics"); 4768 4769 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4770 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4771 sysctl_tids, "A", "TID information"); 4772 4773 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4774 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4775 sysctl_tp_err_stats, "A", "TP error statistics"); 4776 4777 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4778 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4779 sysctl_tp_la, "A", "TP logic analyzer"); 4780 4781 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4782 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4783 sysctl_tx_rate, "A", "Tx rate"); 4784 4785 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4786 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4787 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4788 4789 if (is_t5(sc)) { 4790 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4791 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4792 sysctl_wcwr_stats, "A", "write combined work requests"); 4793 } 4794 #endif 4795 4796 #ifdef TCP_OFFLOAD 4797 if (is_offload(sc)) { 4798 /* 4799 * dev.t4nex.X.toe. 4800 */ 4801 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4802 NULL, "TOE parameters"); 4803 children = SYSCTL_CHILDREN(oid); 4804 4805 sc->tt.sndbuf = 256 * 1024; 4806 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4807 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4808 4809 sc->tt.ddp = 0; 4810 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4811 &sc->tt.ddp, 0, "DDP allowed"); 4812 4813 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 4814 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 4815 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 4816 4817 sc->tt.ddp_thres = 4818 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 4819 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 4820 &sc->tt.ddp_thres, 0, "DDP threshold"); 4821 4822 sc->tt.rx_coalesce = 1; 4823 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 4824 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 4825 4826 sc->tt.tx_align = 1; 4827 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 4828 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 4829 } 4830 #endif 4831 4832 4833 return (0); 4834 } 4835 4836 static int 4837 cxgbe_sysctls(struct port_info *pi) 4838 { 4839 struct sysctl_ctx_list *ctx; 4840 struct sysctl_oid *oid; 4841 struct sysctl_oid_list *children; 4842 struct adapter *sc = pi->adapter; 4843 4844 ctx = device_get_sysctl_ctx(pi->dev); 4845 4846 /* 4847 * dev.cxgbe.X. 4848 */ 4849 oid = device_get_sysctl_tree(pi->dev); 4850 children = SYSCTL_CHILDREN(oid); 4851 4852 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 4853 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 4854 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 4855 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 4856 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 4857 "PHY temperature (in Celsius)"); 4858 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 4859 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 4860 "PHY firmware version"); 4861 } 4862 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 4863 &pi->nrxq, 0, "# of rx queues"); 4864 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 4865 &pi->ntxq, 0, "# of tx queues"); 4866 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 4867 &pi->first_rxq, 0, "index of first rx queue"); 4868 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 4869 &pi->first_txq, 0, "index of first tx queue"); 4870 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | 4871 CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU", 4872 "Reserve queue 0 for non-flowid packets"); 4873 4874 #ifdef TCP_OFFLOAD 4875 if (is_offload(sc)) { 4876 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 4877 &pi->nofldrxq, 0, 4878 "# of rx queues for offloaded TCP connections"); 4879 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 4880 &pi->nofldtxq, 0, 4881 "# of tx queues for offloaded TCP connections"); 4882 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 4883 CTLFLAG_RD, &pi->first_ofld_rxq, 0, 4884 "index of first TOE rx queue"); 4885 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 4886 CTLFLAG_RD, &pi->first_ofld_txq, 0, 4887 "index of first TOE tx queue"); 4888 } 4889 #endif 4890 #ifdef DEV_NETMAP 4891 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 4892 &pi->nnmrxq, 0, "# of rx queues for netmap"); 4893 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 4894 &pi->nnmtxq, 0, "# of tx queues for netmap"); 4895 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 4896 CTLFLAG_RD, &pi->first_nm_rxq, 0, 4897 "index of first netmap rx queue"); 4898 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 4899 CTLFLAG_RD, &pi->first_nm_txq, 0, 4900 "index of first netmap tx queue"); 4901 #endif 4902 4903 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 4904 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I", 4905 "holdoff timer index"); 4906 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 4907 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I", 4908 "holdoff packet counter index"); 4909 4910 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 4911 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I", 4912 "rx queue size"); 4913 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 4914 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I", 4915 "tx queue size"); 4916 4917 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 4918 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings, 4919 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 4920 4921 /* 4922 * dev.cxgbe.X.stats. 4923 */ 4924 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4925 NULL, "port statistics"); 4926 children = SYSCTL_CHILDREN(oid); 4927 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 4928 &pi->tx_parse_error, 0, 4929 "# of tx packets with invalid length or # of segments"); 4930 4931 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 4932 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 4933 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 4934 sysctl_handle_t4_reg64, "QU", desc) 4935 4936 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 4937 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 4938 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 4939 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 4940 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 4941 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 4942 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 4943 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 4944 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 4945 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 4946 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 4947 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 4948 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 4949 "# of tx frames in this range", 4950 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 4951 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 4952 "# of tx frames in this range", 4953 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 4954 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 4955 "# of tx frames in this range", 4956 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 4957 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 4958 "# of tx frames in this range", 4959 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 4960 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 4961 "# of tx frames in this range", 4962 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 4963 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 4964 "# of tx frames in this range", 4965 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 4966 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 4967 "# of tx frames in this range", 4968 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 4969 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 4970 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 4971 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 4972 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 4973 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 4974 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 4975 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 4976 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 4977 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 4978 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 4979 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 4980 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 4981 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 4982 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 4983 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 4984 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 4985 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 4986 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 4987 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 4988 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 4989 4990 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 4991 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 4992 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 4993 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 4994 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 4995 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 4996 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 4997 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 4998 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 4999 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5000 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5001 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5002 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5003 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5004 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5005 "# of frames received with bad FCS", 5006 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5007 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5008 "# of frames received with length error", 5009 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5010 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5011 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5012 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5013 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5014 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5015 "# of rx frames in this range", 5016 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5017 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5018 "# of rx frames in this range", 5019 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5020 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5021 "# of rx frames in this range", 5022 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5023 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5024 "# of rx frames in this range", 5025 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5026 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5027 "# of rx frames in this range", 5028 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5029 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5030 "# of rx frames in this range", 5031 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5032 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5033 "# of rx frames in this range", 5034 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5035 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5036 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5037 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5038 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5039 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5040 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5041 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5042 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5043 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5044 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5045 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5046 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5047 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5048 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5049 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5050 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5051 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5052 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5053 5054 #undef SYSCTL_ADD_T4_REG64 5055 5056 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5057 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5058 &pi->stats.name, desc) 5059 5060 /* We get these from port_stats and they may be stale by upto 1s */ 5061 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5062 "# drops due to buffer-group 0 overflows"); 5063 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5064 "# drops due to buffer-group 1 overflows"); 5065 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5066 "# drops due to buffer-group 2 overflows"); 5067 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5068 "# drops due to buffer-group 3 overflows"); 5069 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5070 "# of buffer-group 0 truncated packets"); 5071 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5072 "# of buffer-group 1 truncated packets"); 5073 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5074 "# of buffer-group 2 truncated packets"); 5075 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5076 "# of buffer-group 3 truncated packets"); 5077 5078 #undef SYSCTL_ADD_T4_PORTSTAT 5079 5080 return (0); 5081 } 5082 5083 static int 5084 sysctl_int_array(SYSCTL_HANDLER_ARGS) 5085 { 5086 int rc, *i; 5087 struct sbuf sb; 5088 5089 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 5090 for (i = arg1; arg2; arg2 -= sizeof(int), i++) 5091 sbuf_printf(&sb, "%d ", *i); 5092 sbuf_trim(&sb); 5093 sbuf_finish(&sb); 5094 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 5095 sbuf_delete(&sb); 5096 return (rc); 5097 } 5098 5099 static int 5100 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5101 { 5102 int rc; 5103 struct sbuf *sb; 5104 5105 rc = sysctl_wire_old_buffer(req, 0); 5106 if (rc != 0) 5107 return(rc); 5108 5109 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5110 if (sb == NULL) 5111 return (ENOMEM); 5112 5113 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5114 rc = sbuf_finish(sb); 5115 sbuf_delete(sb); 5116 5117 return (rc); 5118 } 5119 5120 static int 5121 sysctl_btphy(SYSCTL_HANDLER_ARGS) 5122 { 5123 struct port_info *pi = arg1; 5124 int op = arg2; 5125 struct adapter *sc = pi->adapter; 5126 u_int v; 5127 int rc; 5128 5129 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt"); 5130 if (rc) 5131 return (rc); 5132 /* XXX: magic numbers */ 5133 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5134 &v); 5135 end_synchronized_op(sc, 0); 5136 if (rc) 5137 return (rc); 5138 if (op == 0) 5139 v /= 256; 5140 5141 rc = sysctl_handle_int(oidp, &v, 0, req); 5142 return (rc); 5143 } 5144 5145 static int 5146 sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5147 { 5148 struct port_info *pi = arg1; 5149 int rc, val; 5150 5151 val = pi->rsrv_noflowq; 5152 rc = sysctl_handle_int(oidp, &val, 0, req); 5153 if (rc != 0 || req->newptr == NULL) 5154 return (rc); 5155 5156 if ((val >= 1) && (pi->ntxq > 1)) 5157 pi->rsrv_noflowq = 1; 5158 else 5159 pi->rsrv_noflowq = 0; 5160 5161 return (rc); 5162 } 5163 5164 static int 5165 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5166 { 5167 struct port_info *pi = arg1; 5168 struct adapter *sc = pi->adapter; 5169 int idx, rc, i; 5170 struct sge_rxq *rxq; 5171 #ifdef TCP_OFFLOAD 5172 struct sge_ofld_rxq *ofld_rxq; 5173 #endif 5174 uint8_t v; 5175 5176 idx = pi->tmr_idx; 5177 5178 rc = sysctl_handle_int(oidp, &idx, 0, req); 5179 if (rc != 0 || req->newptr == NULL) 5180 return (rc); 5181 5182 if (idx < 0 || idx >= SGE_NTIMERS) 5183 return (EINVAL); 5184 5185 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5186 "t4tmr"); 5187 if (rc) 5188 return (rc); 5189 5190 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1); 5191 for_each_rxq(pi, i, rxq) { 5192 #ifdef atomic_store_rel_8 5193 atomic_store_rel_8(&rxq->iq.intr_params, v); 5194 #else 5195 rxq->iq.intr_params = v; 5196 #endif 5197 } 5198 #ifdef TCP_OFFLOAD 5199 for_each_ofld_rxq(pi, i, ofld_rxq) { 5200 #ifdef atomic_store_rel_8 5201 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5202 #else 5203 ofld_rxq->iq.intr_params = v; 5204 #endif 5205 } 5206 #endif 5207 pi->tmr_idx = idx; 5208 5209 end_synchronized_op(sc, LOCK_HELD); 5210 return (0); 5211 } 5212 5213 static int 5214 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5215 { 5216 struct port_info *pi = arg1; 5217 struct adapter *sc = pi->adapter; 5218 int idx, rc; 5219 5220 idx = pi->pktc_idx; 5221 5222 rc = sysctl_handle_int(oidp, &idx, 0, req); 5223 if (rc != 0 || req->newptr == NULL) 5224 return (rc); 5225 5226 if (idx < -1 || idx >= SGE_NCOUNTERS) 5227 return (EINVAL); 5228 5229 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5230 "t4pktc"); 5231 if (rc) 5232 return (rc); 5233 5234 if (pi->flags & PORT_INIT_DONE) 5235 rc = EBUSY; /* cannot be changed once the queues are created */ 5236 else 5237 pi->pktc_idx = idx; 5238 5239 end_synchronized_op(sc, LOCK_HELD); 5240 return (rc); 5241 } 5242 5243 static int 5244 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5245 { 5246 struct port_info *pi = arg1; 5247 struct adapter *sc = pi->adapter; 5248 int qsize, rc; 5249 5250 qsize = pi->qsize_rxq; 5251 5252 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5253 if (rc != 0 || req->newptr == NULL) 5254 return (rc); 5255 5256 if (qsize < 128 || (qsize & 7)) 5257 return (EINVAL); 5258 5259 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5260 "t4rxqs"); 5261 if (rc) 5262 return (rc); 5263 5264 if (pi->flags & PORT_INIT_DONE) 5265 rc = EBUSY; /* cannot be changed once the queues are created */ 5266 else 5267 pi->qsize_rxq = qsize; 5268 5269 end_synchronized_op(sc, LOCK_HELD); 5270 return (rc); 5271 } 5272 5273 static int 5274 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5275 { 5276 struct port_info *pi = arg1; 5277 struct adapter *sc = pi->adapter; 5278 int qsize, rc; 5279 5280 qsize = pi->qsize_txq; 5281 5282 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5283 if (rc != 0 || req->newptr == NULL) 5284 return (rc); 5285 5286 if (qsize < 128 || qsize > 65536) 5287 return (EINVAL); 5288 5289 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5290 "t4txqs"); 5291 if (rc) 5292 return (rc); 5293 5294 if (pi->flags & PORT_INIT_DONE) 5295 rc = EBUSY; /* cannot be changed once the queues are created */ 5296 else 5297 pi->qsize_txq = qsize; 5298 5299 end_synchronized_op(sc, LOCK_HELD); 5300 return (rc); 5301 } 5302 5303 static int 5304 sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5305 { 5306 struct port_info *pi = arg1; 5307 struct adapter *sc = pi->adapter; 5308 struct link_config *lc = &pi->link_cfg; 5309 int rc; 5310 5311 if (req->newptr == NULL) { 5312 struct sbuf *sb; 5313 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5314 5315 rc = sysctl_wire_old_buffer(req, 0); 5316 if (rc != 0) 5317 return(rc); 5318 5319 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5320 if (sb == NULL) 5321 return (ENOMEM); 5322 5323 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5324 rc = sbuf_finish(sb); 5325 sbuf_delete(sb); 5326 } else { 5327 char s[2]; 5328 int n; 5329 5330 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5331 s[1] = 0; 5332 5333 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5334 if (rc != 0) 5335 return(rc); 5336 5337 if (s[1] != 0) 5338 return (EINVAL); 5339 if (s[0] < '0' || s[0] > '9') 5340 return (EINVAL); /* not a number */ 5341 n = s[0] - '0'; 5342 if (n & ~(PAUSE_TX | PAUSE_RX)) 5343 return (EINVAL); /* some other bit is set too */ 5344 5345 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4PAUSE"); 5346 if (rc) 5347 return (rc); 5348 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5349 int link_ok = lc->link_ok; 5350 5351 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5352 lc->requested_fc |= n; 5353 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, lc); 5354 lc->link_ok = link_ok; /* restore */ 5355 } 5356 end_synchronized_op(sc, 0); 5357 } 5358 5359 return (rc); 5360 } 5361 5362 static int 5363 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5364 { 5365 struct adapter *sc = arg1; 5366 int reg = arg2; 5367 uint64_t val; 5368 5369 val = t4_read_reg64(sc, reg); 5370 5371 return (sysctl_handle_64(oidp, &val, 0, req)); 5372 } 5373 5374 static int 5375 sysctl_temperature(SYSCTL_HANDLER_ARGS) 5376 { 5377 struct adapter *sc = arg1; 5378 int rc, t; 5379 uint32_t param, val; 5380 5381 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5382 if (rc) 5383 return (rc); 5384 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5385 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5386 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5387 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5388 end_synchronized_op(sc, 0); 5389 if (rc) 5390 return (rc); 5391 5392 /* unknown is returned as 0 but we display -1 in that case */ 5393 t = val == 0 ? -1 : val; 5394 5395 rc = sysctl_handle_int(oidp, &t, 0, req); 5396 return (rc); 5397 } 5398 5399 #ifdef SBUF_DRAIN 5400 static int 5401 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5402 { 5403 struct adapter *sc = arg1; 5404 struct sbuf *sb; 5405 int rc, i; 5406 uint16_t incr[NMTUS][NCCTRL_WIN]; 5407 static const char *dec_fac[] = { 5408 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5409 "0.9375" 5410 }; 5411 5412 rc = sysctl_wire_old_buffer(req, 0); 5413 if (rc != 0) 5414 return (rc); 5415 5416 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5417 if (sb == NULL) 5418 return (ENOMEM); 5419 5420 t4_read_cong_tbl(sc, incr); 5421 5422 for (i = 0; i < NCCTRL_WIN; ++i) { 5423 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5424 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5425 incr[5][i], incr[6][i], incr[7][i]); 5426 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5427 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5428 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5429 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5430 } 5431 5432 rc = sbuf_finish(sb); 5433 sbuf_delete(sb); 5434 5435 return (rc); 5436 } 5437 5438 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5439 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5440 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5441 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5442 }; 5443 5444 static int 5445 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5446 { 5447 struct adapter *sc = arg1; 5448 struct sbuf *sb; 5449 int rc, i, n, qid = arg2; 5450 uint32_t *buf, *p; 5451 char *qtype; 5452 u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 5453 5454 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5455 ("%s: bad qid %d\n", __func__, qid)); 5456 5457 if (qid < CIM_NUM_IBQ) { 5458 /* inbound queue */ 5459 qtype = "IBQ"; 5460 n = 4 * CIM_IBQ_SIZE; 5461 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5462 rc = t4_read_cim_ibq(sc, qid, buf, n); 5463 } else { 5464 /* outbound queue */ 5465 qtype = "OBQ"; 5466 qid -= CIM_NUM_IBQ; 5467 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5468 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5469 rc = t4_read_cim_obq(sc, qid, buf, n); 5470 } 5471 5472 if (rc < 0) { 5473 rc = -rc; 5474 goto done; 5475 } 5476 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5477 5478 rc = sysctl_wire_old_buffer(req, 0); 5479 if (rc != 0) 5480 goto done; 5481 5482 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5483 if (sb == NULL) { 5484 rc = ENOMEM; 5485 goto done; 5486 } 5487 5488 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5489 for (i = 0, p = buf; i < n; i += 16, p += 4) 5490 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5491 p[2], p[3]); 5492 5493 rc = sbuf_finish(sb); 5494 sbuf_delete(sb); 5495 done: 5496 free(buf, M_CXGBE); 5497 return (rc); 5498 } 5499 5500 static int 5501 sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5502 { 5503 struct adapter *sc = arg1; 5504 u_int cfg; 5505 struct sbuf *sb; 5506 uint32_t *buf, *p; 5507 int rc; 5508 5509 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5510 if (rc != 0) 5511 return (rc); 5512 5513 rc = sysctl_wire_old_buffer(req, 0); 5514 if (rc != 0) 5515 return (rc); 5516 5517 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5518 if (sb == NULL) 5519 return (ENOMEM); 5520 5521 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5522 M_ZERO | M_WAITOK); 5523 5524 rc = -t4_cim_read_la(sc, buf, NULL); 5525 if (rc != 0) 5526 goto done; 5527 5528 sbuf_printf(sb, "Status Data PC%s", 5529 cfg & F_UPDBGLACAPTPCONLY ? "" : 5530 " LS0Stat LS0Addr LS0Data"); 5531 5532 KASSERT((sc->params.cim_la_size & 7) == 0, 5533 ("%s: p will walk off the end of buf", __func__)); 5534 5535 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) { 5536 if (cfg & F_UPDBGLACAPTPCONLY) { 5537 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5538 p[6], p[7]); 5539 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5540 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5541 p[4] & 0xff, p[5] >> 8); 5542 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5543 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5544 p[1] & 0xf, p[2] >> 4); 5545 } else { 5546 sbuf_printf(sb, 5547 "\n %02x %x%07x %x%07x %08x %08x " 5548 "%08x%08x%08x%08x", 5549 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5550 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5551 p[6], p[7]); 5552 } 5553 } 5554 5555 rc = sbuf_finish(sb); 5556 sbuf_delete(sb); 5557 done: 5558 free(buf, M_CXGBE); 5559 return (rc); 5560 } 5561 5562 static int 5563 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 5564 { 5565 struct adapter *sc = arg1; 5566 u_int i; 5567 struct sbuf *sb; 5568 uint32_t *buf, *p; 5569 int rc; 5570 5571 rc = sysctl_wire_old_buffer(req, 0); 5572 if (rc != 0) 5573 return (rc); 5574 5575 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5576 if (sb == NULL) 5577 return (ENOMEM); 5578 5579 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 5580 M_ZERO | M_WAITOK); 5581 5582 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 5583 p = buf; 5584 5585 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5586 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 5587 p[1], p[0]); 5588 } 5589 5590 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 5591 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5592 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 5593 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 5594 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 5595 (p[1] >> 2) | ((p[2] & 3) << 30), 5596 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 5597 p[0] & 1); 5598 } 5599 5600 rc = sbuf_finish(sb); 5601 sbuf_delete(sb); 5602 free(buf, M_CXGBE); 5603 return (rc); 5604 } 5605 5606 static int 5607 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 5608 { 5609 struct adapter *sc = arg1; 5610 u_int i; 5611 struct sbuf *sb; 5612 uint32_t *buf, *p; 5613 int rc; 5614 5615 rc = sysctl_wire_old_buffer(req, 0); 5616 if (rc != 0) 5617 return (rc); 5618 5619 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5620 if (sb == NULL) 5621 return (ENOMEM); 5622 5623 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 5624 M_ZERO | M_WAITOK); 5625 5626 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 5627 p = buf; 5628 5629 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 5630 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) { 5631 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 5632 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 5633 p[4], p[3], p[2], p[1], p[0]); 5634 } 5635 5636 sbuf_printf(sb, "\n\nCntl ID Data"); 5637 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) { 5638 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 5639 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 5640 } 5641 5642 rc = sbuf_finish(sb); 5643 sbuf_delete(sb); 5644 free(buf, M_CXGBE); 5645 return (rc); 5646 } 5647 5648 static int 5649 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 5650 { 5651 struct adapter *sc = arg1; 5652 struct sbuf *sb; 5653 int rc, i; 5654 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5655 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5656 uint16_t thres[CIM_NUM_IBQ]; 5657 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 5658 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 5659 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 5660 5661 if (is_t4(sc)) { 5662 cim_num_obq = CIM_NUM_OBQ; 5663 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 5664 obq_rdaddr = A_UP_OBQ_0_REALADDR; 5665 } else { 5666 cim_num_obq = CIM_NUM_OBQ_T5; 5667 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 5668 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 5669 } 5670 nq = CIM_NUM_IBQ + cim_num_obq; 5671 5672 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 5673 if (rc == 0) 5674 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 5675 if (rc != 0) 5676 return (rc); 5677 5678 t4_read_cimq_cfg(sc, base, size, thres); 5679 5680 rc = sysctl_wire_old_buffer(req, 0); 5681 if (rc != 0) 5682 return (rc); 5683 5684 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5685 if (sb == NULL) 5686 return (ENOMEM); 5687 5688 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 5689 5690 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 5691 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 5692 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 5693 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5694 G_QUEREMFLITS(p[2]) * 16); 5695 for ( ; i < nq; i++, p += 4, wr += 2) 5696 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 5697 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 5698 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5699 G_QUEREMFLITS(p[2]) * 16); 5700 5701 rc = sbuf_finish(sb); 5702 sbuf_delete(sb); 5703 5704 return (rc); 5705 } 5706 5707 static int 5708 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 5709 { 5710 struct adapter *sc = arg1; 5711 struct sbuf *sb; 5712 int rc; 5713 struct tp_cpl_stats stats; 5714 5715 rc = sysctl_wire_old_buffer(req, 0); 5716 if (rc != 0) 5717 return (rc); 5718 5719 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5720 if (sb == NULL) 5721 return (ENOMEM); 5722 5723 t4_tp_get_cpl_stats(sc, &stats); 5724 5725 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 5726 "channel 3\n"); 5727 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n", 5728 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 5729 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u", 5730 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 5731 5732 rc = sbuf_finish(sb); 5733 sbuf_delete(sb); 5734 5735 return (rc); 5736 } 5737 5738 static int 5739 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 5740 { 5741 struct adapter *sc = arg1; 5742 struct sbuf *sb; 5743 int rc; 5744 struct tp_usm_stats stats; 5745 5746 rc = sysctl_wire_old_buffer(req, 0); 5747 if (rc != 0) 5748 return(rc); 5749 5750 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5751 if (sb == NULL) 5752 return (ENOMEM); 5753 5754 t4_get_usm_stats(sc, &stats); 5755 5756 sbuf_printf(sb, "Frames: %u\n", stats.frames); 5757 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 5758 sbuf_printf(sb, "Drops: %u", stats.drops); 5759 5760 rc = sbuf_finish(sb); 5761 sbuf_delete(sb); 5762 5763 return (rc); 5764 } 5765 5766 const char *devlog_level_strings[] = { 5767 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 5768 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 5769 [FW_DEVLOG_LEVEL_ERR] = "ERR", 5770 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 5771 [FW_DEVLOG_LEVEL_INFO] = "INFO", 5772 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 5773 }; 5774 5775 const char *devlog_facility_strings[] = { 5776 [FW_DEVLOG_FACILITY_CORE] = "CORE", 5777 [FW_DEVLOG_FACILITY_CF] = "CF", 5778 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 5779 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 5780 [FW_DEVLOG_FACILITY_RES] = "RES", 5781 [FW_DEVLOG_FACILITY_HW] = "HW", 5782 [FW_DEVLOG_FACILITY_FLR] = "FLR", 5783 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 5784 [FW_DEVLOG_FACILITY_PHY] = "PHY", 5785 [FW_DEVLOG_FACILITY_MAC] = "MAC", 5786 [FW_DEVLOG_FACILITY_PORT] = "PORT", 5787 [FW_DEVLOG_FACILITY_VI] = "VI", 5788 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 5789 [FW_DEVLOG_FACILITY_ACL] = "ACL", 5790 [FW_DEVLOG_FACILITY_TM] = "TM", 5791 [FW_DEVLOG_FACILITY_QFC] = "QFC", 5792 [FW_DEVLOG_FACILITY_DCB] = "DCB", 5793 [FW_DEVLOG_FACILITY_ETH] = "ETH", 5794 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 5795 [FW_DEVLOG_FACILITY_RI] = "RI", 5796 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 5797 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 5798 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 5799 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE" 5800 }; 5801 5802 static int 5803 sysctl_devlog(SYSCTL_HANDLER_ARGS) 5804 { 5805 struct adapter *sc = arg1; 5806 struct devlog_params *dparams = &sc->params.devlog; 5807 struct fw_devlog_e *buf, *e; 5808 int i, j, rc, nentries, first = 0, m; 5809 struct sbuf *sb; 5810 uint64_t ftstamp = UINT64_MAX; 5811 5812 if (dparams->start == 0) { 5813 dparams->memtype = FW_MEMTYPE_EDC0; 5814 dparams->start = 0x84000; 5815 dparams->size = 32768; 5816 } 5817 5818 nentries = dparams->size / sizeof(struct fw_devlog_e); 5819 5820 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 5821 if (buf == NULL) 5822 return (ENOMEM); 5823 5824 m = fwmtype_to_hwmtype(dparams->memtype); 5825 rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf); 5826 if (rc != 0) 5827 goto done; 5828 5829 for (i = 0; i < nentries; i++) { 5830 e = &buf[i]; 5831 5832 if (e->timestamp == 0) 5833 break; /* end */ 5834 5835 e->timestamp = be64toh(e->timestamp); 5836 e->seqno = be32toh(e->seqno); 5837 for (j = 0; j < 8; j++) 5838 e->params[j] = be32toh(e->params[j]); 5839 5840 if (e->timestamp < ftstamp) { 5841 ftstamp = e->timestamp; 5842 first = i; 5843 } 5844 } 5845 5846 if (buf[first].timestamp == 0) 5847 goto done; /* nothing in the log */ 5848 5849 rc = sysctl_wire_old_buffer(req, 0); 5850 if (rc != 0) 5851 goto done; 5852 5853 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5854 if (sb == NULL) { 5855 rc = ENOMEM; 5856 goto done; 5857 } 5858 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 5859 "Seq#", "Tstamp", "Level", "Facility", "Message"); 5860 5861 i = first; 5862 do { 5863 e = &buf[i]; 5864 if (e->timestamp == 0) 5865 break; /* end */ 5866 5867 sbuf_printf(sb, "%10d %15ju %8s %8s ", 5868 e->seqno, e->timestamp, 5869 (e->level < nitems(devlog_level_strings) ? 5870 devlog_level_strings[e->level] : "UNKNOWN"), 5871 (e->facility < nitems(devlog_facility_strings) ? 5872 devlog_facility_strings[e->facility] : "UNKNOWN")); 5873 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 5874 e->params[2], e->params[3], e->params[4], 5875 e->params[5], e->params[6], e->params[7]); 5876 5877 if (++i == nentries) 5878 i = 0; 5879 } while (i != first); 5880 5881 rc = sbuf_finish(sb); 5882 sbuf_delete(sb); 5883 done: 5884 free(buf, M_CXGBE); 5885 return (rc); 5886 } 5887 5888 static int 5889 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 5890 { 5891 struct adapter *sc = arg1; 5892 struct sbuf *sb; 5893 int rc; 5894 struct tp_fcoe_stats stats[4]; 5895 5896 rc = sysctl_wire_old_buffer(req, 0); 5897 if (rc != 0) 5898 return (rc); 5899 5900 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5901 if (sb == NULL) 5902 return (ENOMEM); 5903 5904 t4_get_fcoe_stats(sc, 0, &stats[0]); 5905 t4_get_fcoe_stats(sc, 1, &stats[1]); 5906 t4_get_fcoe_stats(sc, 2, &stats[2]); 5907 t4_get_fcoe_stats(sc, 3, &stats[3]); 5908 5909 sbuf_printf(sb, " channel 0 channel 1 " 5910 "channel 2 channel 3\n"); 5911 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n", 5912 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP, 5913 stats[3].octetsDDP); 5914 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP, 5915 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP); 5916 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u", 5917 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop, 5918 stats[3].framesDrop); 5919 5920 rc = sbuf_finish(sb); 5921 sbuf_delete(sb); 5922 5923 return (rc); 5924 } 5925 5926 static int 5927 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 5928 { 5929 struct adapter *sc = arg1; 5930 struct sbuf *sb; 5931 int rc, i; 5932 unsigned int map, kbps, ipg, mode; 5933 unsigned int pace_tab[NTX_SCHED]; 5934 5935 rc = sysctl_wire_old_buffer(req, 0); 5936 if (rc != 0) 5937 return (rc); 5938 5939 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5940 if (sb == NULL) 5941 return (ENOMEM); 5942 5943 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 5944 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 5945 t4_read_pace_tbl(sc, pace_tab); 5946 5947 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 5948 "Class IPG (0.1 ns) Flow IPG (us)"); 5949 5950 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 5951 t4_get_tx_sched(sc, i, &kbps, &ipg); 5952 sbuf_printf(sb, "\n %u %-5s %u ", i, 5953 (mode & (1 << i)) ? "flow" : "class", map & 3); 5954 if (kbps) 5955 sbuf_printf(sb, "%9u ", kbps); 5956 else 5957 sbuf_printf(sb, " disabled "); 5958 5959 if (ipg) 5960 sbuf_printf(sb, "%13u ", ipg); 5961 else 5962 sbuf_printf(sb, " disabled "); 5963 5964 if (pace_tab[i]) 5965 sbuf_printf(sb, "%10u", pace_tab[i]); 5966 else 5967 sbuf_printf(sb, " disabled"); 5968 } 5969 5970 rc = sbuf_finish(sb); 5971 sbuf_delete(sb); 5972 5973 return (rc); 5974 } 5975 5976 static int 5977 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 5978 { 5979 struct adapter *sc = arg1; 5980 struct sbuf *sb; 5981 int rc, i, j; 5982 uint64_t *p0, *p1; 5983 struct lb_port_stats s[2]; 5984 static const char *stat_name[] = { 5985 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 5986 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 5987 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 5988 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 5989 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 5990 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 5991 "BG2FramesTrunc:", "BG3FramesTrunc:" 5992 }; 5993 5994 rc = sysctl_wire_old_buffer(req, 0); 5995 if (rc != 0) 5996 return (rc); 5997 5998 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5999 if (sb == NULL) 6000 return (ENOMEM); 6001 6002 memset(s, 0, sizeof(s)); 6003 6004 for (i = 0; i < 4; i += 2) { 6005 t4_get_lb_stats(sc, i, &s[0]); 6006 t4_get_lb_stats(sc, i + 1, &s[1]); 6007 6008 p0 = &s[0].octets; 6009 p1 = &s[1].octets; 6010 sbuf_printf(sb, "%s Loopback %u" 6011 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6012 6013 for (j = 0; j < nitems(stat_name); j++) 6014 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6015 *p0++, *p1++); 6016 } 6017 6018 rc = sbuf_finish(sb); 6019 sbuf_delete(sb); 6020 6021 return (rc); 6022 } 6023 6024 static int 6025 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6026 { 6027 int rc = 0; 6028 struct port_info *pi = arg1; 6029 struct sbuf *sb; 6030 static const char *linkdnreasons[] = { 6031 "non-specific", "remote fault", "autoneg failed", "reserved3", 6032 "PHY overheated", "unknown", "rx los", "reserved7" 6033 }; 6034 6035 rc = sysctl_wire_old_buffer(req, 0); 6036 if (rc != 0) 6037 return(rc); 6038 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6039 if (sb == NULL) 6040 return (ENOMEM); 6041 6042 if (pi->linkdnrc < 0) 6043 sbuf_printf(sb, "n/a"); 6044 else if (pi->linkdnrc < nitems(linkdnreasons)) 6045 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]); 6046 else 6047 sbuf_printf(sb, "%d", pi->linkdnrc); 6048 6049 rc = sbuf_finish(sb); 6050 sbuf_delete(sb); 6051 6052 return (rc); 6053 } 6054 6055 struct mem_desc { 6056 unsigned int base; 6057 unsigned int limit; 6058 unsigned int idx; 6059 }; 6060 6061 static int 6062 mem_desc_cmp(const void *a, const void *b) 6063 { 6064 return ((const struct mem_desc *)a)->base - 6065 ((const struct mem_desc *)b)->base; 6066 } 6067 6068 static void 6069 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6070 unsigned int to) 6071 { 6072 unsigned int size; 6073 6074 size = to - from + 1; 6075 if (size == 0) 6076 return; 6077 6078 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6079 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6080 } 6081 6082 static int 6083 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6084 { 6085 struct adapter *sc = arg1; 6086 struct sbuf *sb; 6087 int rc, i, n; 6088 uint32_t lo, hi, used, alloc; 6089 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6090 static const char *region[] = { 6091 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6092 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6093 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6094 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6095 "RQUDP region:", "PBL region:", "TXPBL region:", 6096 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6097 "On-chip queues:" 6098 }; 6099 struct mem_desc avail[4]; 6100 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6101 struct mem_desc *md = mem; 6102 6103 rc = sysctl_wire_old_buffer(req, 0); 6104 if (rc != 0) 6105 return (rc); 6106 6107 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6108 if (sb == NULL) 6109 return (ENOMEM); 6110 6111 for (i = 0; i < nitems(mem); i++) { 6112 mem[i].limit = 0; 6113 mem[i].idx = i; 6114 } 6115 6116 /* Find and sort the populated memory ranges */ 6117 i = 0; 6118 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6119 if (lo & F_EDRAM0_ENABLE) { 6120 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6121 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6122 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6123 avail[i].idx = 0; 6124 i++; 6125 } 6126 if (lo & F_EDRAM1_ENABLE) { 6127 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6128 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6129 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6130 avail[i].idx = 1; 6131 i++; 6132 } 6133 if (lo & F_EXT_MEM_ENABLE) { 6134 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6135 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6136 avail[i].limit = avail[i].base + 6137 (G_EXT_MEM_SIZE(hi) << 20); 6138 avail[i].idx = is_t4(sc) ? 2 : 3; /* Call it MC for T4 */ 6139 i++; 6140 } 6141 if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) { 6142 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6143 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6144 avail[i].limit = avail[i].base + 6145 (G_EXT_MEM1_SIZE(hi) << 20); 6146 avail[i].idx = 4; 6147 i++; 6148 } 6149 if (!i) /* no memory available */ 6150 return 0; 6151 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6152 6153 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6154 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6155 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6156 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6157 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6158 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6159 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6160 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6161 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6162 6163 /* the next few have explicit upper bounds */ 6164 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6165 md->limit = md->base - 1 + 6166 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6167 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6168 md++; 6169 6170 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6171 md->limit = md->base - 1 + 6172 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6173 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6174 md++; 6175 6176 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6177 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 6178 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6179 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1; 6180 } else { 6181 md->base = 0; 6182 md->idx = nitems(region); /* hide it */ 6183 } 6184 md++; 6185 6186 #define ulp_region(reg) \ 6187 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6188 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6189 6190 ulp_region(RX_ISCSI); 6191 ulp_region(RX_TDDP); 6192 ulp_region(TX_TPT); 6193 ulp_region(RX_STAG); 6194 ulp_region(RX_RQ); 6195 ulp_region(RX_RQUDP); 6196 ulp_region(RX_PBL); 6197 ulp_region(TX_PBL); 6198 #undef ulp_region 6199 6200 md->base = 0; 6201 md->idx = nitems(region); 6202 if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) { 6203 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR)); 6204 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc, 6205 A_SGE_DBVFIFO_SIZE))) << 2) - 1; 6206 } 6207 md++; 6208 6209 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6210 md->limit = md->base + sc->tids.ntids - 1; 6211 md++; 6212 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6213 md->limit = md->base + sc->tids.ntids - 1; 6214 md++; 6215 6216 md->base = sc->vres.ocq.start; 6217 if (sc->vres.ocq.size) 6218 md->limit = md->base + sc->vres.ocq.size - 1; 6219 else 6220 md->idx = nitems(region); /* hide it */ 6221 md++; 6222 6223 /* add any address-space holes, there can be up to 3 */ 6224 for (n = 0; n < i - 1; n++) 6225 if (avail[n].limit < avail[n + 1].base) 6226 (md++)->base = avail[n].limit; 6227 if (avail[n].limit) 6228 (md++)->base = avail[n].limit; 6229 6230 n = md - mem; 6231 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6232 6233 for (lo = 0; lo < i; lo++) 6234 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6235 avail[lo].limit - 1); 6236 6237 sbuf_printf(sb, "\n"); 6238 for (i = 0; i < n; i++) { 6239 if (mem[i].idx >= nitems(region)) 6240 continue; /* skip holes */ 6241 if (!mem[i].limit) 6242 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6243 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6244 mem[i].limit); 6245 } 6246 6247 sbuf_printf(sb, "\n"); 6248 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6249 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6250 mem_region_show(sb, "uP RAM:", lo, hi); 6251 6252 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6253 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6254 mem_region_show(sb, "uP Extmem2:", lo, hi); 6255 6256 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6257 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6258 G_PMRXMAXPAGE(lo), 6259 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6260 (lo & F_PMRXNUMCHN) ? 2 : 1); 6261 6262 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6263 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6264 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6265 G_PMTXMAXPAGE(lo), 6266 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6267 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6268 sbuf_printf(sb, "%u p-structs\n", 6269 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6270 6271 for (i = 0; i < 4; i++) { 6272 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6273 if (is_t4(sc)) { 6274 used = G_USED(lo); 6275 alloc = G_ALLOC(lo); 6276 } else { 6277 used = G_T5_USED(lo); 6278 alloc = G_T5_ALLOC(lo); 6279 } 6280 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6281 i, used, alloc); 6282 } 6283 for (i = 0; i < 4; i++) { 6284 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6285 if (is_t4(sc)) { 6286 used = G_USED(lo); 6287 alloc = G_ALLOC(lo); 6288 } else { 6289 used = G_T5_USED(lo); 6290 alloc = G_T5_ALLOC(lo); 6291 } 6292 sbuf_printf(sb, 6293 "\nLoopback %d using %u pages out of %u allocated", 6294 i, used, alloc); 6295 } 6296 6297 rc = sbuf_finish(sb); 6298 sbuf_delete(sb); 6299 6300 return (rc); 6301 } 6302 6303 static inline void 6304 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6305 { 6306 *mask = x | y; 6307 y = htobe64(y); 6308 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6309 } 6310 6311 static int 6312 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6313 { 6314 struct adapter *sc = arg1; 6315 struct sbuf *sb; 6316 int rc, i, n; 6317 6318 rc = sysctl_wire_old_buffer(req, 0); 6319 if (rc != 0) 6320 return (rc); 6321 6322 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6323 if (sb == NULL) 6324 return (ENOMEM); 6325 6326 sbuf_printf(sb, 6327 "Idx Ethernet address Mask Vld Ports PF" 6328 " VF Replication P0 P1 P2 P3 ML"); 6329 n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES : 6330 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 6331 for (i = 0; i < n; i++) { 6332 uint64_t tcamx, tcamy, mask; 6333 uint32_t cls_lo, cls_hi; 6334 uint8_t addr[ETHER_ADDR_LEN]; 6335 6336 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6337 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6338 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6339 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6340 6341 if (tcamx & tcamy) 6342 continue; 6343 6344 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6345 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6346 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6347 addr[3], addr[4], addr[5], (uintmax_t)mask, 6348 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6349 G_PORTMAP(cls_hi), G_PF(cls_lo), 6350 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6351 6352 if (cls_lo & F_REPLICATE) { 6353 struct fw_ldst_cmd ldst_cmd; 6354 6355 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6356 ldst_cmd.op_to_addrspace = 6357 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6358 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6359 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6360 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6361 ldst_cmd.u.mps.fid_ctl = 6362 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6363 V_FW_LDST_CMD_CTL(i)); 6364 6365 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6366 "t4mps"); 6367 if (rc) 6368 break; 6369 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6370 sizeof(ldst_cmd), &ldst_cmd); 6371 end_synchronized_op(sc, 0); 6372 6373 if (rc != 0) { 6374 sbuf_printf(sb, 6375 " ------------ error %3u ------------", rc); 6376 rc = 0; 6377 } else { 6378 sbuf_printf(sb, " %08x %08x %08x %08x", 6379 be32toh(ldst_cmd.u.mps.rplc127_96), 6380 be32toh(ldst_cmd.u.mps.rplc95_64), 6381 be32toh(ldst_cmd.u.mps.rplc63_32), 6382 be32toh(ldst_cmd.u.mps.rplc31_0)); 6383 } 6384 } else 6385 sbuf_printf(sb, "%36s", ""); 6386 6387 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6388 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6389 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6390 } 6391 6392 if (rc) 6393 (void) sbuf_finish(sb); 6394 else 6395 rc = sbuf_finish(sb); 6396 sbuf_delete(sb); 6397 6398 return (rc); 6399 } 6400 6401 static int 6402 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 6403 { 6404 struct adapter *sc = arg1; 6405 struct sbuf *sb; 6406 int rc; 6407 uint16_t mtus[NMTUS]; 6408 6409 rc = sysctl_wire_old_buffer(req, 0); 6410 if (rc != 0) 6411 return (rc); 6412 6413 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6414 if (sb == NULL) 6415 return (ENOMEM); 6416 6417 t4_read_mtu_tbl(sc, mtus, NULL); 6418 6419 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 6420 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 6421 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 6422 mtus[14], mtus[15]); 6423 6424 rc = sbuf_finish(sb); 6425 sbuf_delete(sb); 6426 6427 return (rc); 6428 } 6429 6430 static int 6431 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 6432 { 6433 struct adapter *sc = arg1; 6434 struct sbuf *sb; 6435 int rc, i; 6436 uint32_t cnt[PM_NSTATS]; 6437 uint64_t cyc[PM_NSTATS]; 6438 static const char *rx_stats[] = { 6439 "Read:", "Write bypass:", "Write mem:", "Flush:" 6440 }; 6441 static const char *tx_stats[] = { 6442 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:" 6443 }; 6444 6445 rc = sysctl_wire_old_buffer(req, 0); 6446 if (rc != 0) 6447 return (rc); 6448 6449 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6450 if (sb == NULL) 6451 return (ENOMEM); 6452 6453 t4_pmtx_get_stats(sc, cnt, cyc); 6454 sbuf_printf(sb, " Tx pcmds Tx bytes"); 6455 for (i = 0; i < ARRAY_SIZE(tx_stats); i++) 6456 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i], 6457 cyc[i]); 6458 6459 t4_pmrx_get_stats(sc, cnt, cyc); 6460 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 6461 for (i = 0; i < ARRAY_SIZE(rx_stats); i++) 6462 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i], 6463 cyc[i]); 6464 6465 rc = sbuf_finish(sb); 6466 sbuf_delete(sb); 6467 6468 return (rc); 6469 } 6470 6471 static int 6472 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 6473 { 6474 struct adapter *sc = arg1; 6475 struct sbuf *sb; 6476 int rc; 6477 struct tp_rdma_stats stats; 6478 6479 rc = sysctl_wire_old_buffer(req, 0); 6480 if (rc != 0) 6481 return (rc); 6482 6483 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6484 if (sb == NULL) 6485 return (ENOMEM); 6486 6487 t4_tp_get_rdma_stats(sc, &stats); 6488 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 6489 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 6490 6491 rc = sbuf_finish(sb); 6492 sbuf_delete(sb); 6493 6494 return (rc); 6495 } 6496 6497 static int 6498 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 6499 { 6500 struct adapter *sc = arg1; 6501 struct sbuf *sb; 6502 int rc; 6503 struct tp_tcp_stats v4, v6; 6504 6505 rc = sysctl_wire_old_buffer(req, 0); 6506 if (rc != 0) 6507 return (rc); 6508 6509 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6510 if (sb == NULL) 6511 return (ENOMEM); 6512 6513 t4_tp_get_tcp_stats(sc, &v4, &v6); 6514 sbuf_printf(sb, 6515 " IP IPv6\n"); 6516 sbuf_printf(sb, "OutRsts: %20u %20u\n", 6517 v4.tcpOutRsts, v6.tcpOutRsts); 6518 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 6519 v4.tcpInSegs, v6.tcpInSegs); 6520 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 6521 v4.tcpOutSegs, v6.tcpOutSegs); 6522 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 6523 v4.tcpRetransSegs, v6.tcpRetransSegs); 6524 6525 rc = sbuf_finish(sb); 6526 sbuf_delete(sb); 6527 6528 return (rc); 6529 } 6530 6531 static int 6532 sysctl_tids(SYSCTL_HANDLER_ARGS) 6533 { 6534 struct adapter *sc = arg1; 6535 struct sbuf *sb; 6536 int rc; 6537 struct tid_info *t = &sc->tids; 6538 6539 rc = sysctl_wire_old_buffer(req, 0); 6540 if (rc != 0) 6541 return (rc); 6542 6543 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6544 if (sb == NULL) 6545 return (ENOMEM); 6546 6547 if (t->natids) { 6548 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 6549 t->atids_in_use); 6550 } 6551 6552 if (t->ntids) { 6553 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6554 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 6555 6556 if (b) { 6557 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 6558 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6559 t->ntids - 1); 6560 } else { 6561 sbuf_printf(sb, "TID range: %u-%u", 6562 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6563 t->ntids - 1); 6564 } 6565 } else 6566 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 6567 sbuf_printf(sb, ", in use: %u\n", 6568 atomic_load_acq_int(&t->tids_in_use)); 6569 } 6570 6571 if (t->nstids) { 6572 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 6573 t->stid_base + t->nstids - 1, t->stids_in_use); 6574 } 6575 6576 if (t->nftids) { 6577 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 6578 t->ftid_base + t->nftids - 1); 6579 } 6580 6581 if (t->netids) { 6582 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 6583 t->etid_base + t->netids - 1); 6584 } 6585 6586 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 6587 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 6588 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 6589 6590 rc = sbuf_finish(sb); 6591 sbuf_delete(sb); 6592 6593 return (rc); 6594 } 6595 6596 static int 6597 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 6598 { 6599 struct adapter *sc = arg1; 6600 struct sbuf *sb; 6601 int rc; 6602 struct tp_err_stats stats; 6603 6604 rc = sysctl_wire_old_buffer(req, 0); 6605 if (rc != 0) 6606 return (rc); 6607 6608 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6609 if (sb == NULL) 6610 return (ENOMEM); 6611 6612 t4_tp_get_err_stats(sc, &stats); 6613 6614 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 6615 "channel 3\n"); 6616 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 6617 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2], 6618 stats.macInErrs[3]); 6619 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 6620 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2], 6621 stats.hdrInErrs[3]); 6622 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 6623 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2], 6624 stats.tcpInErrs[3]); 6625 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 6626 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2], 6627 stats.tcp6InErrs[3]); 6628 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 6629 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2], 6630 stats.tnlCongDrops[3]); 6631 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 6632 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2], 6633 stats.tnlTxDrops[3]); 6634 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 6635 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1], 6636 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]); 6637 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 6638 stats.ofldChanDrops[0], stats.ofldChanDrops[1], 6639 stats.ofldChanDrops[2], stats.ofldChanDrops[3]); 6640 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 6641 stats.ofldNoNeigh, stats.ofldCongDefer); 6642 6643 rc = sbuf_finish(sb); 6644 sbuf_delete(sb); 6645 6646 return (rc); 6647 } 6648 6649 struct field_desc { 6650 const char *name; 6651 u_int start; 6652 u_int width; 6653 }; 6654 6655 static void 6656 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 6657 { 6658 char buf[32]; 6659 int line_size = 0; 6660 6661 while (f->name) { 6662 uint64_t mask = (1ULL << f->width) - 1; 6663 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 6664 ((uintmax_t)v >> f->start) & mask); 6665 6666 if (line_size + len >= 79) { 6667 line_size = 8; 6668 sbuf_printf(sb, "\n "); 6669 } 6670 sbuf_printf(sb, "%s ", buf); 6671 line_size += len + 1; 6672 f++; 6673 } 6674 sbuf_printf(sb, "\n"); 6675 } 6676 6677 static struct field_desc tp_la0[] = { 6678 { "RcfOpCodeOut", 60, 4 }, 6679 { "State", 56, 4 }, 6680 { "WcfState", 52, 4 }, 6681 { "RcfOpcSrcOut", 50, 2 }, 6682 { "CRxError", 49, 1 }, 6683 { "ERxError", 48, 1 }, 6684 { "SanityFailed", 47, 1 }, 6685 { "SpuriousMsg", 46, 1 }, 6686 { "FlushInputMsg", 45, 1 }, 6687 { "FlushInputCpl", 44, 1 }, 6688 { "RssUpBit", 43, 1 }, 6689 { "RssFilterHit", 42, 1 }, 6690 { "Tid", 32, 10 }, 6691 { "InitTcb", 31, 1 }, 6692 { "LineNumber", 24, 7 }, 6693 { "Emsg", 23, 1 }, 6694 { "EdataOut", 22, 1 }, 6695 { "Cmsg", 21, 1 }, 6696 { "CdataOut", 20, 1 }, 6697 { "EreadPdu", 19, 1 }, 6698 { "CreadPdu", 18, 1 }, 6699 { "TunnelPkt", 17, 1 }, 6700 { "RcfPeerFin", 16, 1 }, 6701 { "RcfReasonOut", 12, 4 }, 6702 { "TxCchannel", 10, 2 }, 6703 { "RcfTxChannel", 8, 2 }, 6704 { "RxEchannel", 6, 2 }, 6705 { "RcfRxChannel", 5, 1 }, 6706 { "RcfDataOutSrdy", 4, 1 }, 6707 { "RxDvld", 3, 1 }, 6708 { "RxOoDvld", 2, 1 }, 6709 { "RxCongestion", 1, 1 }, 6710 { "TxCongestion", 0, 1 }, 6711 { NULL } 6712 }; 6713 6714 static struct field_desc tp_la1[] = { 6715 { "CplCmdIn", 56, 8 }, 6716 { "CplCmdOut", 48, 8 }, 6717 { "ESynOut", 47, 1 }, 6718 { "EAckOut", 46, 1 }, 6719 { "EFinOut", 45, 1 }, 6720 { "ERstOut", 44, 1 }, 6721 { "SynIn", 43, 1 }, 6722 { "AckIn", 42, 1 }, 6723 { "FinIn", 41, 1 }, 6724 { "RstIn", 40, 1 }, 6725 { "DataIn", 39, 1 }, 6726 { "DataInVld", 38, 1 }, 6727 { "PadIn", 37, 1 }, 6728 { "RxBufEmpty", 36, 1 }, 6729 { "RxDdp", 35, 1 }, 6730 { "RxFbCongestion", 34, 1 }, 6731 { "TxFbCongestion", 33, 1 }, 6732 { "TxPktSumSrdy", 32, 1 }, 6733 { "RcfUlpType", 28, 4 }, 6734 { "Eread", 27, 1 }, 6735 { "Ebypass", 26, 1 }, 6736 { "Esave", 25, 1 }, 6737 { "Static0", 24, 1 }, 6738 { "Cread", 23, 1 }, 6739 { "Cbypass", 22, 1 }, 6740 { "Csave", 21, 1 }, 6741 { "CPktOut", 20, 1 }, 6742 { "RxPagePoolFull", 18, 2 }, 6743 { "RxLpbkPkt", 17, 1 }, 6744 { "TxLpbkPkt", 16, 1 }, 6745 { "RxVfValid", 15, 1 }, 6746 { "SynLearned", 14, 1 }, 6747 { "SetDelEntry", 13, 1 }, 6748 { "SetInvEntry", 12, 1 }, 6749 { "CpcmdDvld", 11, 1 }, 6750 { "CpcmdSave", 10, 1 }, 6751 { "RxPstructsFull", 8, 2 }, 6752 { "EpcmdDvld", 7, 1 }, 6753 { "EpcmdFlush", 6, 1 }, 6754 { "EpcmdTrimPrefix", 5, 1 }, 6755 { "EpcmdTrimPostfix", 4, 1 }, 6756 { "ERssIp4Pkt", 3, 1 }, 6757 { "ERssIp6Pkt", 2, 1 }, 6758 { "ERssTcpUdpPkt", 1, 1 }, 6759 { "ERssFceFipPkt", 0, 1 }, 6760 { NULL } 6761 }; 6762 6763 static struct field_desc tp_la2[] = { 6764 { "CplCmdIn", 56, 8 }, 6765 { "MpsVfVld", 55, 1 }, 6766 { "MpsPf", 52, 3 }, 6767 { "MpsVf", 44, 8 }, 6768 { "SynIn", 43, 1 }, 6769 { "AckIn", 42, 1 }, 6770 { "FinIn", 41, 1 }, 6771 { "RstIn", 40, 1 }, 6772 { "DataIn", 39, 1 }, 6773 { "DataInVld", 38, 1 }, 6774 { "PadIn", 37, 1 }, 6775 { "RxBufEmpty", 36, 1 }, 6776 { "RxDdp", 35, 1 }, 6777 { "RxFbCongestion", 34, 1 }, 6778 { "TxFbCongestion", 33, 1 }, 6779 { "TxPktSumSrdy", 32, 1 }, 6780 { "RcfUlpType", 28, 4 }, 6781 { "Eread", 27, 1 }, 6782 { "Ebypass", 26, 1 }, 6783 { "Esave", 25, 1 }, 6784 { "Static0", 24, 1 }, 6785 { "Cread", 23, 1 }, 6786 { "Cbypass", 22, 1 }, 6787 { "Csave", 21, 1 }, 6788 { "CPktOut", 20, 1 }, 6789 { "RxPagePoolFull", 18, 2 }, 6790 { "RxLpbkPkt", 17, 1 }, 6791 { "TxLpbkPkt", 16, 1 }, 6792 { "RxVfValid", 15, 1 }, 6793 { "SynLearned", 14, 1 }, 6794 { "SetDelEntry", 13, 1 }, 6795 { "SetInvEntry", 12, 1 }, 6796 { "CpcmdDvld", 11, 1 }, 6797 { "CpcmdSave", 10, 1 }, 6798 { "RxPstructsFull", 8, 2 }, 6799 { "EpcmdDvld", 7, 1 }, 6800 { "EpcmdFlush", 6, 1 }, 6801 { "EpcmdTrimPrefix", 5, 1 }, 6802 { "EpcmdTrimPostfix", 4, 1 }, 6803 { "ERssIp4Pkt", 3, 1 }, 6804 { "ERssIp6Pkt", 2, 1 }, 6805 { "ERssTcpUdpPkt", 1, 1 }, 6806 { "ERssFceFipPkt", 0, 1 }, 6807 { NULL } 6808 }; 6809 6810 static void 6811 tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 6812 { 6813 6814 field_desc_show(sb, *p, tp_la0); 6815 } 6816 6817 static void 6818 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 6819 { 6820 6821 if (idx) 6822 sbuf_printf(sb, "\n"); 6823 field_desc_show(sb, p[0], tp_la0); 6824 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 6825 field_desc_show(sb, p[1], tp_la0); 6826 } 6827 6828 static void 6829 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 6830 { 6831 6832 if (idx) 6833 sbuf_printf(sb, "\n"); 6834 field_desc_show(sb, p[0], tp_la0); 6835 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 6836 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 6837 } 6838 6839 static int 6840 sysctl_tp_la(SYSCTL_HANDLER_ARGS) 6841 { 6842 struct adapter *sc = arg1; 6843 struct sbuf *sb; 6844 uint64_t *buf, *p; 6845 int rc; 6846 u_int i, inc; 6847 void (*show_func)(struct sbuf *, uint64_t *, int); 6848 6849 rc = sysctl_wire_old_buffer(req, 0); 6850 if (rc != 0) 6851 return (rc); 6852 6853 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6854 if (sb == NULL) 6855 return (ENOMEM); 6856 6857 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 6858 6859 t4_tp_read_la(sc, buf, NULL); 6860 p = buf; 6861 6862 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 6863 case 2: 6864 inc = 2; 6865 show_func = tp_la_show2; 6866 break; 6867 case 3: 6868 inc = 2; 6869 show_func = tp_la_show3; 6870 break; 6871 default: 6872 inc = 1; 6873 show_func = tp_la_show; 6874 } 6875 6876 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 6877 (*show_func)(sb, p, i); 6878 6879 rc = sbuf_finish(sb); 6880 sbuf_delete(sb); 6881 free(buf, M_CXGBE); 6882 return (rc); 6883 } 6884 6885 static int 6886 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 6887 { 6888 struct adapter *sc = arg1; 6889 struct sbuf *sb; 6890 int rc; 6891 u64 nrate[NCHAN], orate[NCHAN]; 6892 6893 rc = sysctl_wire_old_buffer(req, 0); 6894 if (rc != 0) 6895 return (rc); 6896 6897 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6898 if (sb == NULL) 6899 return (ENOMEM); 6900 6901 t4_get_chan_txrate(sc, nrate, orate); 6902 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 6903 "channel 3\n"); 6904 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 6905 nrate[0], nrate[1], nrate[2], nrate[3]); 6906 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 6907 orate[0], orate[1], orate[2], orate[3]); 6908 6909 rc = sbuf_finish(sb); 6910 sbuf_delete(sb); 6911 6912 return (rc); 6913 } 6914 6915 static int 6916 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 6917 { 6918 struct adapter *sc = arg1; 6919 struct sbuf *sb; 6920 uint32_t *buf, *p; 6921 int rc, i; 6922 6923 rc = sysctl_wire_old_buffer(req, 0); 6924 if (rc != 0) 6925 return (rc); 6926 6927 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6928 if (sb == NULL) 6929 return (ENOMEM); 6930 6931 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 6932 M_ZERO | M_WAITOK); 6933 6934 t4_ulprx_read_la(sc, buf); 6935 p = buf; 6936 6937 sbuf_printf(sb, " Pcmd Type Message" 6938 " Data"); 6939 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 6940 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 6941 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 6942 } 6943 6944 rc = sbuf_finish(sb); 6945 sbuf_delete(sb); 6946 free(buf, M_CXGBE); 6947 return (rc); 6948 } 6949 6950 static int 6951 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 6952 { 6953 struct adapter *sc = arg1; 6954 struct sbuf *sb; 6955 int rc, v; 6956 6957 rc = sysctl_wire_old_buffer(req, 0); 6958 if (rc != 0) 6959 return (rc); 6960 6961 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6962 if (sb == NULL) 6963 return (ENOMEM); 6964 6965 v = t4_read_reg(sc, A_SGE_STAT_CFG); 6966 if (G_STATSOURCE_T5(v) == 7) { 6967 if (G_STATMODE(v) == 0) { 6968 sbuf_printf(sb, "total %d, incomplete %d", 6969 t4_read_reg(sc, A_SGE_STAT_TOTAL), 6970 t4_read_reg(sc, A_SGE_STAT_MATCH)); 6971 } else if (G_STATMODE(v) == 1) { 6972 sbuf_printf(sb, "total %d, data overflow %d", 6973 t4_read_reg(sc, A_SGE_STAT_TOTAL), 6974 t4_read_reg(sc, A_SGE_STAT_MATCH)); 6975 } 6976 } 6977 rc = sbuf_finish(sb); 6978 sbuf_delete(sb); 6979 6980 return (rc); 6981 } 6982 #endif 6983 6984 static uint32_t 6985 fconf_to_mode(uint32_t fconf) 6986 { 6987 uint32_t mode; 6988 6989 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 6990 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 6991 6992 if (fconf & F_FRAGMENTATION) 6993 mode |= T4_FILTER_IP_FRAGMENT; 6994 6995 if (fconf & F_MPSHITTYPE) 6996 mode |= T4_FILTER_MPS_HIT_TYPE; 6997 6998 if (fconf & F_MACMATCH) 6999 mode |= T4_FILTER_MAC_IDX; 7000 7001 if (fconf & F_ETHERTYPE) 7002 mode |= T4_FILTER_ETH_TYPE; 7003 7004 if (fconf & F_PROTOCOL) 7005 mode |= T4_FILTER_IP_PROTO; 7006 7007 if (fconf & F_TOS) 7008 mode |= T4_FILTER_IP_TOS; 7009 7010 if (fconf & F_VLAN) 7011 mode |= T4_FILTER_VLAN; 7012 7013 if (fconf & F_VNIC_ID) 7014 mode |= T4_FILTER_VNIC; 7015 7016 if (fconf & F_PORT) 7017 mode |= T4_FILTER_PORT; 7018 7019 if (fconf & F_FCOE) 7020 mode |= T4_FILTER_FCoE; 7021 7022 return (mode); 7023 } 7024 7025 static uint32_t 7026 mode_to_fconf(uint32_t mode) 7027 { 7028 uint32_t fconf = 0; 7029 7030 if (mode & T4_FILTER_IP_FRAGMENT) 7031 fconf |= F_FRAGMENTATION; 7032 7033 if (mode & T4_FILTER_MPS_HIT_TYPE) 7034 fconf |= F_MPSHITTYPE; 7035 7036 if (mode & T4_FILTER_MAC_IDX) 7037 fconf |= F_MACMATCH; 7038 7039 if (mode & T4_FILTER_ETH_TYPE) 7040 fconf |= F_ETHERTYPE; 7041 7042 if (mode & T4_FILTER_IP_PROTO) 7043 fconf |= F_PROTOCOL; 7044 7045 if (mode & T4_FILTER_IP_TOS) 7046 fconf |= F_TOS; 7047 7048 if (mode & T4_FILTER_VLAN) 7049 fconf |= F_VLAN; 7050 7051 if (mode & T4_FILTER_VNIC) 7052 fconf |= F_VNIC_ID; 7053 7054 if (mode & T4_FILTER_PORT) 7055 fconf |= F_PORT; 7056 7057 if (mode & T4_FILTER_FCoE) 7058 fconf |= F_FCOE; 7059 7060 return (fconf); 7061 } 7062 7063 static uint32_t 7064 fspec_to_fconf(struct t4_filter_specification *fs) 7065 { 7066 uint32_t fconf = 0; 7067 7068 if (fs->val.frag || fs->mask.frag) 7069 fconf |= F_FRAGMENTATION; 7070 7071 if (fs->val.matchtype || fs->mask.matchtype) 7072 fconf |= F_MPSHITTYPE; 7073 7074 if (fs->val.macidx || fs->mask.macidx) 7075 fconf |= F_MACMATCH; 7076 7077 if (fs->val.ethtype || fs->mask.ethtype) 7078 fconf |= F_ETHERTYPE; 7079 7080 if (fs->val.proto || fs->mask.proto) 7081 fconf |= F_PROTOCOL; 7082 7083 if (fs->val.tos || fs->mask.tos) 7084 fconf |= F_TOS; 7085 7086 if (fs->val.vlan_vld || fs->mask.vlan_vld) 7087 fconf |= F_VLAN; 7088 7089 if (fs->val.vnic_vld || fs->mask.vnic_vld) 7090 fconf |= F_VNIC_ID; 7091 7092 if (fs->val.iport || fs->mask.iport) 7093 fconf |= F_PORT; 7094 7095 if (fs->val.fcoe || fs->mask.fcoe) 7096 fconf |= F_FCOE; 7097 7098 return (fconf); 7099 } 7100 7101 static int 7102 get_filter_mode(struct adapter *sc, uint32_t *mode) 7103 { 7104 int rc; 7105 uint32_t fconf; 7106 7107 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7108 "t4getfm"); 7109 if (rc) 7110 return (rc); 7111 7112 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1, 7113 A_TP_VLAN_PRI_MAP); 7114 7115 if (sc->params.tp.vlan_pri_map != fconf) { 7116 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n", 7117 device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map, 7118 fconf); 7119 } 7120 7121 *mode = fconf_to_mode(fconf); 7122 7123 end_synchronized_op(sc, LOCK_HELD); 7124 return (0); 7125 } 7126 7127 static int 7128 set_filter_mode(struct adapter *sc, uint32_t mode) 7129 { 7130 uint32_t fconf; 7131 int rc; 7132 7133 fconf = mode_to_fconf(mode); 7134 7135 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7136 "t4setfm"); 7137 if (rc) 7138 return (rc); 7139 7140 if (sc->tids.ftids_in_use > 0) { 7141 rc = EBUSY; 7142 goto done; 7143 } 7144 7145 #ifdef TCP_OFFLOAD 7146 if (uld_active(sc, ULD_TOM)) { 7147 rc = EBUSY; 7148 goto done; 7149 } 7150 #endif 7151 7152 rc = -t4_set_filter_mode(sc, fconf); 7153 done: 7154 end_synchronized_op(sc, LOCK_HELD); 7155 return (rc); 7156 } 7157 7158 static inline uint64_t 7159 get_filter_hits(struct adapter *sc, uint32_t fid) 7160 { 7161 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 7162 uint64_t hits; 7163 7164 memwin_info(sc, 0, &mw_base, NULL); 7165 off = position_memwin(sc, 0, 7166 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE); 7167 if (is_t4(sc)) { 7168 hits = t4_read_reg64(sc, mw_base + off + 16); 7169 hits = be64toh(hits); 7170 } else { 7171 hits = t4_read_reg(sc, mw_base + off + 24); 7172 hits = be32toh(hits); 7173 } 7174 7175 return (hits); 7176 } 7177 7178 static int 7179 get_filter(struct adapter *sc, struct t4_filter *t) 7180 { 7181 int i, rc, nfilters = sc->tids.nftids; 7182 struct filter_entry *f; 7183 7184 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7185 "t4getf"); 7186 if (rc) 7187 return (rc); 7188 7189 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 7190 t->idx >= nfilters) { 7191 t->idx = 0xffffffff; 7192 goto done; 7193 } 7194 7195 f = &sc->tids.ftid_tab[t->idx]; 7196 for (i = t->idx; i < nfilters; i++, f++) { 7197 if (f->valid) { 7198 t->idx = i; 7199 t->l2tidx = f->l2t ? f->l2t->idx : 0; 7200 t->smtidx = f->smtidx; 7201 if (f->fs.hitcnts) 7202 t->hits = get_filter_hits(sc, t->idx); 7203 else 7204 t->hits = UINT64_MAX; 7205 t->fs = f->fs; 7206 7207 goto done; 7208 } 7209 } 7210 7211 t->idx = 0xffffffff; 7212 done: 7213 end_synchronized_op(sc, LOCK_HELD); 7214 return (0); 7215 } 7216 7217 static int 7218 set_filter(struct adapter *sc, struct t4_filter *t) 7219 { 7220 unsigned int nfilters, nports; 7221 struct filter_entry *f; 7222 int i, rc; 7223 7224 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 7225 if (rc) 7226 return (rc); 7227 7228 nfilters = sc->tids.nftids; 7229 nports = sc->params.nports; 7230 7231 if (nfilters == 0) { 7232 rc = ENOTSUP; 7233 goto done; 7234 } 7235 7236 if (!(sc->flags & FULL_INIT_DONE)) { 7237 rc = EAGAIN; 7238 goto done; 7239 } 7240 7241 if (t->idx >= nfilters) { 7242 rc = EINVAL; 7243 goto done; 7244 } 7245 7246 /* Validate against the global filter mode */ 7247 if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) != 7248 sc->params.tp.vlan_pri_map) { 7249 rc = E2BIG; 7250 goto done; 7251 } 7252 7253 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 7254 rc = EINVAL; 7255 goto done; 7256 } 7257 7258 if (t->fs.val.iport >= nports) { 7259 rc = EINVAL; 7260 goto done; 7261 } 7262 7263 /* Can't specify an iq if not steering to it */ 7264 if (!t->fs.dirsteer && t->fs.iq) { 7265 rc = EINVAL; 7266 goto done; 7267 } 7268 7269 /* IPv6 filter idx must be 4 aligned */ 7270 if (t->fs.type == 1 && 7271 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 7272 rc = EINVAL; 7273 goto done; 7274 } 7275 7276 if (sc->tids.ftid_tab == NULL) { 7277 KASSERT(sc->tids.ftids_in_use == 0, 7278 ("%s: no memory allocated but filters_in_use > 0", 7279 __func__)); 7280 7281 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 7282 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 7283 if (sc->tids.ftid_tab == NULL) { 7284 rc = ENOMEM; 7285 goto done; 7286 } 7287 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 7288 } 7289 7290 for (i = 0; i < 4; i++) { 7291 f = &sc->tids.ftid_tab[t->idx + i]; 7292 7293 if (f->pending || f->valid) { 7294 rc = EBUSY; 7295 goto done; 7296 } 7297 if (f->locked) { 7298 rc = EPERM; 7299 goto done; 7300 } 7301 7302 if (t->fs.type == 0) 7303 break; 7304 } 7305 7306 f = &sc->tids.ftid_tab[t->idx]; 7307 f->fs = t->fs; 7308 7309 rc = set_filter_wr(sc, t->idx); 7310 done: 7311 end_synchronized_op(sc, 0); 7312 7313 if (rc == 0) { 7314 mtx_lock(&sc->tids.ftid_lock); 7315 for (;;) { 7316 if (f->pending == 0) { 7317 rc = f->valid ? 0 : EIO; 7318 break; 7319 } 7320 7321 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7322 PCATCH, "t4setfw", 0)) { 7323 rc = EINPROGRESS; 7324 break; 7325 } 7326 } 7327 mtx_unlock(&sc->tids.ftid_lock); 7328 } 7329 return (rc); 7330 } 7331 7332 static int 7333 del_filter(struct adapter *sc, struct t4_filter *t) 7334 { 7335 unsigned int nfilters; 7336 struct filter_entry *f; 7337 int rc; 7338 7339 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 7340 if (rc) 7341 return (rc); 7342 7343 nfilters = sc->tids.nftids; 7344 7345 if (nfilters == 0) { 7346 rc = ENOTSUP; 7347 goto done; 7348 } 7349 7350 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 7351 t->idx >= nfilters) { 7352 rc = EINVAL; 7353 goto done; 7354 } 7355 7356 if (!(sc->flags & FULL_INIT_DONE)) { 7357 rc = EAGAIN; 7358 goto done; 7359 } 7360 7361 f = &sc->tids.ftid_tab[t->idx]; 7362 7363 if (f->pending) { 7364 rc = EBUSY; 7365 goto done; 7366 } 7367 if (f->locked) { 7368 rc = EPERM; 7369 goto done; 7370 } 7371 7372 if (f->valid) { 7373 t->fs = f->fs; /* extra info for the caller */ 7374 rc = del_filter_wr(sc, t->idx); 7375 } 7376 7377 done: 7378 end_synchronized_op(sc, 0); 7379 7380 if (rc == 0) { 7381 mtx_lock(&sc->tids.ftid_lock); 7382 for (;;) { 7383 if (f->pending == 0) { 7384 rc = f->valid ? EIO : 0; 7385 break; 7386 } 7387 7388 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7389 PCATCH, "t4delfw", 0)) { 7390 rc = EINPROGRESS; 7391 break; 7392 } 7393 } 7394 mtx_unlock(&sc->tids.ftid_lock); 7395 } 7396 7397 return (rc); 7398 } 7399 7400 static void 7401 clear_filter(struct filter_entry *f) 7402 { 7403 if (f->l2t) 7404 t4_l2t_release(f->l2t); 7405 7406 bzero(f, sizeof (*f)); 7407 } 7408 7409 static int 7410 set_filter_wr(struct adapter *sc, int fidx) 7411 { 7412 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7413 struct fw_filter_wr *fwr; 7414 unsigned int ftid; 7415 struct wrq_cookie cookie; 7416 7417 ASSERT_SYNCHRONIZED_OP(sc); 7418 7419 if (f->fs.newdmac || f->fs.newvlan) { 7420 /* This filter needs an L2T entry; allocate one. */ 7421 f->l2t = t4_l2t_alloc_switching(sc->l2t); 7422 if (f->l2t == NULL) 7423 return (EAGAIN); 7424 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 7425 f->fs.dmac)) { 7426 t4_l2t_release(f->l2t); 7427 f->l2t = NULL; 7428 return (ENOMEM); 7429 } 7430 } 7431 7432 ftid = sc->tids.ftid_base + fidx; 7433 7434 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7435 if (fwr == NULL) 7436 return (ENOMEM); 7437 bzero(fwr, sizeof(*fwr)); 7438 7439 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 7440 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 7441 fwr->tid_to_iq = 7442 htobe32(V_FW_FILTER_WR_TID(ftid) | 7443 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 7444 V_FW_FILTER_WR_NOREPLY(0) | 7445 V_FW_FILTER_WR_IQ(f->fs.iq)); 7446 fwr->del_filter_to_l2tix = 7447 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 7448 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 7449 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 7450 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 7451 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 7452 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 7453 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 7454 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 7455 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 7456 f->fs.newvlan == VLAN_REWRITE) | 7457 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 7458 f->fs.newvlan == VLAN_REWRITE) | 7459 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 7460 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 7461 V_FW_FILTER_WR_PRIO(f->fs.prio) | 7462 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 7463 fwr->ethtype = htobe16(f->fs.val.ethtype); 7464 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 7465 fwr->frag_to_ovlan_vldm = 7466 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 7467 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 7468 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 7469 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) | 7470 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 7471 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld)); 7472 fwr->smac_sel = 0; 7473 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 7474 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 7475 fwr->maci_to_matchtypem = 7476 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 7477 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 7478 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 7479 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 7480 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 7481 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 7482 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 7483 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 7484 fwr->ptcl = f->fs.val.proto; 7485 fwr->ptclm = f->fs.mask.proto; 7486 fwr->ttyp = f->fs.val.tos; 7487 fwr->ttypm = f->fs.mask.tos; 7488 fwr->ivlan = htobe16(f->fs.val.vlan); 7489 fwr->ivlanm = htobe16(f->fs.mask.vlan); 7490 fwr->ovlan = htobe16(f->fs.val.vnic); 7491 fwr->ovlanm = htobe16(f->fs.mask.vnic); 7492 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 7493 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 7494 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 7495 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 7496 fwr->lp = htobe16(f->fs.val.dport); 7497 fwr->lpm = htobe16(f->fs.mask.dport); 7498 fwr->fp = htobe16(f->fs.val.sport); 7499 fwr->fpm = htobe16(f->fs.mask.sport); 7500 if (f->fs.newsmac) 7501 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 7502 7503 f->pending = 1; 7504 sc->tids.ftids_in_use++; 7505 7506 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7507 return (0); 7508 } 7509 7510 static int 7511 del_filter_wr(struct adapter *sc, int fidx) 7512 { 7513 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7514 struct fw_filter_wr *fwr; 7515 unsigned int ftid; 7516 struct wrq_cookie cookie; 7517 7518 ftid = sc->tids.ftid_base + fidx; 7519 7520 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7521 if (fwr == NULL) 7522 return (ENOMEM); 7523 bzero(fwr, sizeof (*fwr)); 7524 7525 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 7526 7527 f->pending = 1; 7528 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7529 return (0); 7530 } 7531 7532 int 7533 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 7534 { 7535 struct adapter *sc = iq->adapter; 7536 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 7537 unsigned int idx = GET_TID(rpl); 7538 unsigned int rc; 7539 struct filter_entry *f; 7540 7541 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 7542 rss->opcode)); 7543 7544 if (is_ftid(sc, idx)) { 7545 7546 idx -= sc->tids.ftid_base; 7547 f = &sc->tids.ftid_tab[idx]; 7548 rc = G_COOKIE(rpl->cookie); 7549 7550 mtx_lock(&sc->tids.ftid_lock); 7551 if (rc == FW_FILTER_WR_FLT_ADDED) { 7552 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 7553 __func__, idx)); 7554 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 7555 f->pending = 0; /* asynchronous setup completed */ 7556 f->valid = 1; 7557 } else { 7558 if (rc != FW_FILTER_WR_FLT_DELETED) { 7559 /* Add or delete failed, display an error */ 7560 log(LOG_ERR, 7561 "filter %u setup failed with error %u\n", 7562 idx, rc); 7563 } 7564 7565 clear_filter(f); 7566 sc->tids.ftids_in_use--; 7567 } 7568 wakeup(&sc->tids.ftid_tab); 7569 mtx_unlock(&sc->tids.ftid_lock); 7570 } 7571 7572 return (0); 7573 } 7574 7575 static int 7576 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 7577 { 7578 int rc; 7579 7580 if (cntxt->cid > M_CTXTQID) 7581 return (EINVAL); 7582 7583 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 7584 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 7585 return (EINVAL); 7586 7587 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 7588 if (rc) 7589 return (rc); 7590 7591 if (sc->flags & FW_OK) { 7592 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 7593 &cntxt->data[0]); 7594 if (rc == 0) 7595 goto done; 7596 } 7597 7598 /* 7599 * Read via firmware failed or wasn't even attempted. Read directly via 7600 * the backdoor. 7601 */ 7602 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 7603 done: 7604 end_synchronized_op(sc, 0); 7605 return (rc); 7606 } 7607 7608 static int 7609 load_fw(struct adapter *sc, struct t4_data *fw) 7610 { 7611 int rc; 7612 uint8_t *fw_data; 7613 7614 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 7615 if (rc) 7616 return (rc); 7617 7618 if (sc->flags & FULL_INIT_DONE) { 7619 rc = EBUSY; 7620 goto done; 7621 } 7622 7623 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 7624 if (fw_data == NULL) { 7625 rc = ENOMEM; 7626 goto done; 7627 } 7628 7629 rc = copyin(fw->data, fw_data, fw->len); 7630 if (rc == 0) 7631 rc = -t4_load_fw(sc, fw_data, fw->len); 7632 7633 free(fw_data, M_CXGBE); 7634 done: 7635 end_synchronized_op(sc, 0); 7636 return (rc); 7637 } 7638 7639 static int 7640 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 7641 { 7642 uint32_t addr, off, remaining, i, n; 7643 uint32_t *buf, *b; 7644 uint32_t mw_base, mw_aperture; 7645 int rc; 7646 uint8_t *dst; 7647 7648 rc = validate_mem_range(sc, mr->addr, mr->len); 7649 if (rc != 0) 7650 return (rc); 7651 7652 memwin_info(sc, win, &mw_base, &mw_aperture); 7653 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK); 7654 addr = mr->addr; 7655 remaining = mr->len; 7656 dst = (void *)mr->data; 7657 7658 while (remaining) { 7659 off = position_memwin(sc, win, addr); 7660 7661 /* number of bytes that we'll copy in the inner loop */ 7662 n = min(remaining, mw_aperture - off); 7663 for (i = 0; i < n; i += 4) 7664 *b++ = t4_read_reg(sc, mw_base + off + i); 7665 7666 rc = copyout(buf, dst, n); 7667 if (rc != 0) 7668 break; 7669 7670 b = buf; 7671 dst += n; 7672 remaining -= n; 7673 addr += n; 7674 } 7675 7676 free(buf, M_CXGBE); 7677 return (rc); 7678 } 7679 7680 static int 7681 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 7682 { 7683 int rc; 7684 7685 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 7686 return (EINVAL); 7687 7688 if (i2cd->len > sizeof(i2cd->data)) 7689 return (EFBIG); 7690 7691 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 7692 if (rc) 7693 return (rc); 7694 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 7695 i2cd->offset, i2cd->len, &i2cd->data[0]); 7696 end_synchronized_op(sc, 0); 7697 7698 return (rc); 7699 } 7700 7701 static int 7702 in_range(int val, int lo, int hi) 7703 { 7704 7705 return (val < 0 || (val <= hi && val >= lo)); 7706 } 7707 7708 static int 7709 set_sched_class(struct adapter *sc, struct t4_sched_params *p) 7710 { 7711 int fw_subcmd, fw_type, rc; 7712 7713 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc"); 7714 if (rc) 7715 return (rc); 7716 7717 if (!(sc->flags & FULL_INIT_DONE)) { 7718 rc = EAGAIN; 7719 goto done; 7720 } 7721 7722 /* 7723 * Translate the cxgbetool parameters into T4 firmware parameters. (The 7724 * sub-command and type are in common locations.) 7725 */ 7726 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 7727 fw_subcmd = FW_SCHED_SC_CONFIG; 7728 else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 7729 fw_subcmd = FW_SCHED_SC_PARAMS; 7730 else { 7731 rc = EINVAL; 7732 goto done; 7733 } 7734 if (p->type == SCHED_CLASS_TYPE_PACKET) 7735 fw_type = FW_SCHED_TYPE_PKTSCHED; 7736 else { 7737 rc = EINVAL; 7738 goto done; 7739 } 7740 7741 if (fw_subcmd == FW_SCHED_SC_CONFIG) { 7742 /* Vet our parameters ..*/ 7743 if (p->u.config.minmax < 0) { 7744 rc = EINVAL; 7745 goto done; 7746 } 7747 7748 /* And pass the request to the firmware ...*/ 7749 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1); 7750 goto done; 7751 } 7752 7753 if (fw_subcmd == FW_SCHED_SC_PARAMS) { 7754 int fw_level; 7755 int fw_mode; 7756 int fw_rateunit; 7757 int fw_ratemode; 7758 7759 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL) 7760 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 7761 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) 7762 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 7763 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) 7764 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 7765 else { 7766 rc = EINVAL; 7767 goto done; 7768 } 7769 7770 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS) 7771 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 7772 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) 7773 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 7774 else { 7775 rc = EINVAL; 7776 goto done; 7777 } 7778 7779 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS) 7780 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 7781 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS) 7782 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 7783 else { 7784 rc = EINVAL; 7785 goto done; 7786 } 7787 7788 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL) 7789 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 7790 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS) 7791 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 7792 else { 7793 rc = EINVAL; 7794 goto done; 7795 } 7796 7797 /* Vet our parameters ... */ 7798 if (!in_range(p->u.params.channel, 0, 3) || 7799 !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) || 7800 !in_range(p->u.params.minrate, 0, 10000000) || 7801 !in_range(p->u.params.maxrate, 0, 10000000) || 7802 !in_range(p->u.params.weight, 0, 100)) { 7803 rc = ERANGE; 7804 goto done; 7805 } 7806 7807 /* 7808 * Translate any unset parameters into the firmware's 7809 * nomenclature and/or fail the call if the parameters 7810 * are required ... 7811 */ 7812 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 || 7813 p->u.params.channel < 0 || p->u.params.cl < 0) { 7814 rc = EINVAL; 7815 goto done; 7816 } 7817 if (p->u.params.minrate < 0) 7818 p->u.params.minrate = 0; 7819 if (p->u.params.maxrate < 0) { 7820 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 7821 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 7822 rc = EINVAL; 7823 goto done; 7824 } else 7825 p->u.params.maxrate = 0; 7826 } 7827 if (p->u.params.weight < 0) { 7828 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) { 7829 rc = EINVAL; 7830 goto done; 7831 } else 7832 p->u.params.weight = 0; 7833 } 7834 if (p->u.params.pktsize < 0) { 7835 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 7836 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 7837 rc = EINVAL; 7838 goto done; 7839 } else 7840 p->u.params.pktsize = 0; 7841 } 7842 7843 /* See what the firmware thinks of the request ... */ 7844 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode, 7845 fw_rateunit, fw_ratemode, p->u.params.channel, 7846 p->u.params.cl, p->u.params.minrate, p->u.params.maxrate, 7847 p->u.params.weight, p->u.params.pktsize, 1); 7848 goto done; 7849 } 7850 7851 rc = EINVAL; 7852 done: 7853 end_synchronized_op(sc, 0); 7854 return (rc); 7855 } 7856 7857 static int 7858 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 7859 { 7860 struct port_info *pi = NULL; 7861 struct sge_txq *txq; 7862 uint32_t fw_mnem, fw_queue, fw_class; 7863 int i, rc; 7864 7865 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 7866 if (rc) 7867 return (rc); 7868 7869 if (!(sc->flags & FULL_INIT_DONE)) { 7870 rc = EAGAIN; 7871 goto done; 7872 } 7873 7874 if (p->port >= sc->params.nports) { 7875 rc = EINVAL; 7876 goto done; 7877 } 7878 7879 pi = sc->port[p->port]; 7880 if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) { 7881 rc = EINVAL; 7882 goto done; 7883 } 7884 7885 /* 7886 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 7887 * Scheduling Class in this case). 7888 */ 7889 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 7890 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 7891 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 7892 7893 /* 7894 * If op.queue is non-negative, then we're only changing the scheduling 7895 * on a single specified TX queue. 7896 */ 7897 if (p->queue >= 0) { 7898 txq = &sc->sge.txq[pi->first_txq + p->queue]; 7899 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 7900 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 7901 &fw_class); 7902 goto done; 7903 } 7904 7905 /* 7906 * Change the scheduling on all the TX queues for the 7907 * interface. 7908 */ 7909 for_each_txq(pi, i, txq) { 7910 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 7911 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 7912 &fw_class); 7913 if (rc) 7914 goto done; 7915 } 7916 7917 rc = 0; 7918 done: 7919 end_synchronized_op(sc, 0); 7920 return (rc); 7921 } 7922 7923 int 7924 t4_os_find_pci_capability(struct adapter *sc, int cap) 7925 { 7926 int i; 7927 7928 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 7929 } 7930 7931 int 7932 t4_os_pci_save_state(struct adapter *sc) 7933 { 7934 device_t dev; 7935 struct pci_devinfo *dinfo; 7936 7937 dev = sc->dev; 7938 dinfo = device_get_ivars(dev); 7939 7940 pci_cfg_save(dev, dinfo, 0); 7941 return (0); 7942 } 7943 7944 int 7945 t4_os_pci_restore_state(struct adapter *sc) 7946 { 7947 device_t dev; 7948 struct pci_devinfo *dinfo; 7949 7950 dev = sc->dev; 7951 dinfo = device_get_ivars(dev); 7952 7953 pci_cfg_restore(dev, dinfo); 7954 return (0); 7955 } 7956 7957 void 7958 t4_os_portmod_changed(const struct adapter *sc, int idx) 7959 { 7960 struct port_info *pi = sc->port[idx]; 7961 static const char *mod_str[] = { 7962 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 7963 }; 7964 7965 build_medialist(pi, &pi->media); 7966 #ifdef DEV_NETMAP 7967 build_medialist(pi, &pi->nm_media); 7968 #endif 7969 7970 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 7971 if_printf(pi->ifp, "transceiver unplugged.\n"); 7972 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 7973 if_printf(pi->ifp, "unknown transceiver inserted.\n"); 7974 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 7975 if_printf(pi->ifp, "unsupported transceiver inserted.\n"); 7976 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 7977 if_printf(pi->ifp, "%s transceiver inserted.\n", 7978 mod_str[pi->mod_type]); 7979 } else { 7980 if_printf(pi->ifp, "transceiver (type %d) inserted.\n", 7981 pi->mod_type); 7982 } 7983 } 7984 7985 void 7986 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason) 7987 { 7988 struct port_info *pi = sc->port[idx]; 7989 struct ifnet *ifp = pi->ifp; 7990 7991 if (link_stat) { 7992 pi->linkdnrc = -1; 7993 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 7994 if_link_state_change(ifp, LINK_STATE_UP); 7995 } else { 7996 if (reason >= 0) 7997 pi->linkdnrc = reason; 7998 if_link_state_change(ifp, LINK_STATE_DOWN); 7999 } 8000 } 8001 8002 void 8003 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8004 { 8005 struct adapter *sc; 8006 8007 sx_slock(&t4_list_lock); 8008 SLIST_FOREACH(sc, &t4_list, link) { 8009 /* 8010 * func should not make any assumptions about what state sc is 8011 * in - the only guarantee is that sc->sc_lock is a valid lock. 8012 */ 8013 func(sc, arg); 8014 } 8015 sx_sunlock(&t4_list_lock); 8016 } 8017 8018 static int 8019 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 8020 { 8021 return (0); 8022 } 8023 8024 static int 8025 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 8026 { 8027 return (0); 8028 } 8029 8030 static int 8031 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8032 struct thread *td) 8033 { 8034 int rc; 8035 struct adapter *sc = dev->si_drv1; 8036 8037 rc = priv_check(td, PRIV_DRIVER); 8038 if (rc != 0) 8039 return (rc); 8040 8041 switch (cmd) { 8042 case CHELSIO_T4_GETREG: { 8043 struct t4_reg *edata = (struct t4_reg *)data; 8044 8045 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8046 return (EFAULT); 8047 8048 if (edata->size == 4) 8049 edata->val = t4_read_reg(sc, edata->addr); 8050 else if (edata->size == 8) 8051 edata->val = t4_read_reg64(sc, edata->addr); 8052 else 8053 return (EINVAL); 8054 8055 break; 8056 } 8057 case CHELSIO_T4_SETREG: { 8058 struct t4_reg *edata = (struct t4_reg *)data; 8059 8060 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8061 return (EFAULT); 8062 8063 if (edata->size == 4) { 8064 if (edata->val & 0xffffffff00000000) 8065 return (EINVAL); 8066 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 8067 } else if (edata->size == 8) 8068 t4_write_reg64(sc, edata->addr, edata->val); 8069 else 8070 return (EINVAL); 8071 break; 8072 } 8073 case CHELSIO_T4_REGDUMP: { 8074 struct t4_regdump *regs = (struct t4_regdump *)data; 8075 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE; 8076 uint8_t *buf; 8077 8078 if (regs->len < reglen) { 8079 regs->len = reglen; /* hint to the caller */ 8080 return (ENOBUFS); 8081 } 8082 8083 regs->len = reglen; 8084 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 8085 t4_get_regs(sc, regs, buf); 8086 rc = copyout(buf, regs->data, reglen); 8087 free(buf, M_CXGBE); 8088 break; 8089 } 8090 case CHELSIO_T4_GET_FILTER_MODE: 8091 rc = get_filter_mode(sc, (uint32_t *)data); 8092 break; 8093 case CHELSIO_T4_SET_FILTER_MODE: 8094 rc = set_filter_mode(sc, *(uint32_t *)data); 8095 break; 8096 case CHELSIO_T4_GET_FILTER: 8097 rc = get_filter(sc, (struct t4_filter *)data); 8098 break; 8099 case CHELSIO_T4_SET_FILTER: 8100 rc = set_filter(sc, (struct t4_filter *)data); 8101 break; 8102 case CHELSIO_T4_DEL_FILTER: 8103 rc = del_filter(sc, (struct t4_filter *)data); 8104 break; 8105 case CHELSIO_T4_GET_SGE_CONTEXT: 8106 rc = get_sge_context(sc, (struct t4_sge_context *)data); 8107 break; 8108 case CHELSIO_T4_LOAD_FW: 8109 rc = load_fw(sc, (struct t4_data *)data); 8110 break; 8111 case CHELSIO_T4_GET_MEM: 8112 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 8113 break; 8114 case CHELSIO_T4_GET_I2C: 8115 rc = read_i2c(sc, (struct t4_i2c_data *)data); 8116 break; 8117 case CHELSIO_T4_CLEAR_STATS: { 8118 int i; 8119 u_int port_id = *(uint32_t *)data; 8120 struct port_info *pi; 8121 8122 if (port_id >= sc->params.nports) 8123 return (EINVAL); 8124 pi = sc->port[port_id]; 8125 8126 /* MAC stats */ 8127 t4_clr_port_stats(sc, pi->tx_chan); 8128 pi->tx_parse_error = 0; 8129 8130 if (pi->flags & PORT_INIT_DONE) { 8131 struct sge_rxq *rxq; 8132 struct sge_txq *txq; 8133 struct sge_wrq *wrq; 8134 8135 for_each_rxq(pi, i, rxq) { 8136 #if defined(INET) || defined(INET6) 8137 rxq->lro.lro_queued = 0; 8138 rxq->lro.lro_flushed = 0; 8139 #endif 8140 rxq->rxcsum = 0; 8141 rxq->vlan_extraction = 0; 8142 } 8143 8144 for_each_txq(pi, i, txq) { 8145 txq->txcsum = 0; 8146 txq->tso_wrs = 0; 8147 txq->vlan_insertion = 0; 8148 txq->imm_wrs = 0; 8149 txq->sgl_wrs = 0; 8150 txq->txpkt_wrs = 0; 8151 txq->txpkts0_wrs = 0; 8152 txq->txpkts1_wrs = 0; 8153 txq->txpkts0_pkts = 0; 8154 txq->txpkts1_pkts = 0; 8155 mp_ring_reset_stats(txq->r); 8156 } 8157 8158 #ifdef TCP_OFFLOAD 8159 /* nothing to clear for each ofld_rxq */ 8160 8161 for_each_ofld_txq(pi, i, wrq) { 8162 wrq->tx_wrs_direct = 0; 8163 wrq->tx_wrs_copied = 0; 8164 } 8165 #endif 8166 wrq = &sc->sge.ctrlq[pi->port_id]; 8167 wrq->tx_wrs_direct = 0; 8168 wrq->tx_wrs_copied = 0; 8169 } 8170 break; 8171 } 8172 case CHELSIO_T4_SCHED_CLASS: 8173 rc = set_sched_class(sc, (struct t4_sched_params *)data); 8174 break; 8175 case CHELSIO_T4_SCHED_QUEUE: 8176 rc = set_sched_queue(sc, (struct t4_sched_queue *)data); 8177 break; 8178 case CHELSIO_T4_GET_TRACER: 8179 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 8180 break; 8181 case CHELSIO_T4_SET_TRACER: 8182 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 8183 break; 8184 default: 8185 rc = EINVAL; 8186 } 8187 8188 return (rc); 8189 } 8190 8191 #ifdef TCP_OFFLOAD 8192 void 8193 t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask, 8194 const unsigned int *pgsz_order) 8195 { 8196 struct port_info *pi = ifp->if_softc; 8197 struct adapter *sc = pi->adapter; 8198 8199 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 8200 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 8201 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 8202 V_HPZ3(pgsz_order[3])); 8203 } 8204 8205 static int 8206 toe_capability(struct port_info *pi, int enable) 8207 { 8208 int rc; 8209 struct adapter *sc = pi->adapter; 8210 8211 ASSERT_SYNCHRONIZED_OP(sc); 8212 8213 if (!is_offload(sc)) 8214 return (ENODEV); 8215 8216 if (enable) { 8217 /* 8218 * We need the port's queues around so that we're able to send 8219 * and receive CPLs to/from the TOE even if the ifnet for this 8220 * port has never been UP'd administratively. 8221 */ 8222 if (!(pi->flags & PORT_INIT_DONE)) { 8223 rc = cxgbe_init_synchronized(pi); 8224 if (rc) 8225 return (rc); 8226 } 8227 8228 if (isset(&sc->offload_map, pi->port_id)) 8229 return (0); 8230 8231 if (!uld_active(sc, ULD_TOM)) { 8232 rc = t4_activate_uld(sc, ULD_TOM); 8233 if (rc == EAGAIN) { 8234 log(LOG_WARNING, 8235 "You must kldload t4_tom.ko before trying " 8236 "to enable TOE on a cxgbe interface.\n"); 8237 } 8238 if (rc != 0) 8239 return (rc); 8240 KASSERT(sc->tom_softc != NULL, 8241 ("%s: TOM activated but softc NULL", __func__)); 8242 KASSERT(uld_active(sc, ULD_TOM), 8243 ("%s: TOM activated but flag not set", __func__)); 8244 } 8245 8246 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 8247 if (!uld_active(sc, ULD_IWARP)) 8248 (void) t4_activate_uld(sc, ULD_IWARP); 8249 if (!uld_active(sc, ULD_ISCSI)) 8250 (void) t4_activate_uld(sc, ULD_ISCSI); 8251 8252 setbit(&sc->offload_map, pi->port_id); 8253 } else { 8254 if (!isset(&sc->offload_map, pi->port_id)) 8255 return (0); 8256 8257 KASSERT(uld_active(sc, ULD_TOM), 8258 ("%s: TOM never initialized?", __func__)); 8259 clrbit(&sc->offload_map, pi->port_id); 8260 } 8261 8262 return (0); 8263 } 8264 8265 /* 8266 * Add an upper layer driver to the global list. 8267 */ 8268 int 8269 t4_register_uld(struct uld_info *ui) 8270 { 8271 int rc = 0; 8272 struct uld_info *u; 8273 8274 sx_xlock(&t4_uld_list_lock); 8275 SLIST_FOREACH(u, &t4_uld_list, link) { 8276 if (u->uld_id == ui->uld_id) { 8277 rc = EEXIST; 8278 goto done; 8279 } 8280 } 8281 8282 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 8283 ui->refcount = 0; 8284 done: 8285 sx_xunlock(&t4_uld_list_lock); 8286 return (rc); 8287 } 8288 8289 int 8290 t4_unregister_uld(struct uld_info *ui) 8291 { 8292 int rc = EINVAL; 8293 struct uld_info *u; 8294 8295 sx_xlock(&t4_uld_list_lock); 8296 8297 SLIST_FOREACH(u, &t4_uld_list, link) { 8298 if (u == ui) { 8299 if (ui->refcount > 0) { 8300 rc = EBUSY; 8301 goto done; 8302 } 8303 8304 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 8305 rc = 0; 8306 goto done; 8307 } 8308 } 8309 done: 8310 sx_xunlock(&t4_uld_list_lock); 8311 return (rc); 8312 } 8313 8314 int 8315 t4_activate_uld(struct adapter *sc, int id) 8316 { 8317 int rc; 8318 struct uld_info *ui; 8319 8320 ASSERT_SYNCHRONIZED_OP(sc); 8321 8322 if (id < 0 || id > ULD_MAX) 8323 return (EINVAL); 8324 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 8325 8326 sx_slock(&t4_uld_list_lock); 8327 8328 SLIST_FOREACH(ui, &t4_uld_list, link) { 8329 if (ui->uld_id == id) { 8330 if (!(sc->flags & FULL_INIT_DONE)) { 8331 rc = adapter_full_init(sc); 8332 if (rc != 0) 8333 break; 8334 } 8335 8336 rc = ui->activate(sc); 8337 if (rc == 0) { 8338 setbit(&sc->active_ulds, id); 8339 ui->refcount++; 8340 } 8341 break; 8342 } 8343 } 8344 8345 sx_sunlock(&t4_uld_list_lock); 8346 8347 return (rc); 8348 } 8349 8350 int 8351 t4_deactivate_uld(struct adapter *sc, int id) 8352 { 8353 int rc; 8354 struct uld_info *ui; 8355 8356 ASSERT_SYNCHRONIZED_OP(sc); 8357 8358 if (id < 0 || id > ULD_MAX) 8359 return (EINVAL); 8360 rc = ENXIO; 8361 8362 sx_slock(&t4_uld_list_lock); 8363 8364 SLIST_FOREACH(ui, &t4_uld_list, link) { 8365 if (ui->uld_id == id) { 8366 rc = ui->deactivate(sc); 8367 if (rc == 0) { 8368 clrbit(&sc->active_ulds, id); 8369 ui->refcount--; 8370 } 8371 break; 8372 } 8373 } 8374 8375 sx_sunlock(&t4_uld_list_lock); 8376 8377 return (rc); 8378 } 8379 8380 int 8381 uld_active(struct adapter *sc, int uld_id) 8382 { 8383 8384 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 8385 8386 return (isset(&sc->active_ulds, uld_id)); 8387 } 8388 #endif 8389 8390 /* 8391 * Come up with reasonable defaults for some of the tunables, provided they're 8392 * not set by the user (in which case we'll use the values as is). 8393 */ 8394 static void 8395 tweak_tunables(void) 8396 { 8397 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 8398 8399 if (t4_ntxq10g < 1) 8400 t4_ntxq10g = min(nc, NTXQ_10G); 8401 8402 if (t4_ntxq1g < 1) 8403 t4_ntxq1g = min(nc, NTXQ_1G); 8404 8405 if (t4_nrxq10g < 1) 8406 t4_nrxq10g = min(nc, NRXQ_10G); 8407 8408 if (t4_nrxq1g < 1) 8409 t4_nrxq1g = min(nc, NRXQ_1G); 8410 8411 #ifdef TCP_OFFLOAD 8412 if (t4_nofldtxq10g < 1) 8413 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 8414 8415 if (t4_nofldtxq1g < 1) 8416 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 8417 8418 if (t4_nofldrxq10g < 1) 8419 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 8420 8421 if (t4_nofldrxq1g < 1) 8422 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 8423 8424 if (t4_toecaps_allowed == -1) 8425 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 8426 #else 8427 if (t4_toecaps_allowed == -1) 8428 t4_toecaps_allowed = 0; 8429 #endif 8430 8431 #ifdef DEV_NETMAP 8432 if (t4_nnmtxq10g < 1) 8433 t4_nnmtxq10g = min(nc, NNMTXQ_10G); 8434 8435 if (t4_nnmtxq1g < 1) 8436 t4_nnmtxq1g = min(nc, NNMTXQ_1G); 8437 8438 if (t4_nnmrxq10g < 1) 8439 t4_nnmrxq10g = min(nc, NNMRXQ_10G); 8440 8441 if (t4_nnmrxq1g < 1) 8442 t4_nnmrxq1g = min(nc, NNMRXQ_1G); 8443 #endif 8444 8445 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 8446 t4_tmr_idx_10g = TMR_IDX_10G; 8447 8448 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 8449 t4_pktc_idx_10g = PKTC_IDX_10G; 8450 8451 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 8452 t4_tmr_idx_1g = TMR_IDX_1G; 8453 8454 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 8455 t4_pktc_idx_1g = PKTC_IDX_1G; 8456 8457 if (t4_qsize_txq < 128) 8458 t4_qsize_txq = 128; 8459 8460 if (t4_qsize_rxq < 128) 8461 t4_qsize_rxq = 128; 8462 while (t4_qsize_rxq & 7) 8463 t4_qsize_rxq++; 8464 8465 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 8466 } 8467 8468 static struct sx mlu; /* mod load unload */ 8469 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 8470 8471 static int 8472 mod_event(module_t mod, int cmd, void *arg) 8473 { 8474 int rc = 0; 8475 static int loaded = 0; 8476 8477 switch (cmd) { 8478 case MOD_LOAD: 8479 sx_xlock(&mlu); 8480 if (loaded++ == 0) { 8481 t4_sge_modload(); 8482 sx_init(&t4_list_lock, "T4/T5 adapters"); 8483 SLIST_INIT(&t4_list); 8484 #ifdef TCP_OFFLOAD 8485 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 8486 SLIST_INIT(&t4_uld_list); 8487 #endif 8488 t4_tracer_modload(); 8489 tweak_tunables(); 8490 } 8491 sx_xunlock(&mlu); 8492 break; 8493 8494 case MOD_UNLOAD: 8495 sx_xlock(&mlu); 8496 if (--loaded == 0) { 8497 int tries; 8498 8499 sx_slock(&t4_list_lock); 8500 if (!SLIST_EMPTY(&t4_list)) { 8501 rc = EBUSY; 8502 sx_sunlock(&t4_list_lock); 8503 goto done_unload; 8504 } 8505 #ifdef TCP_OFFLOAD 8506 sx_slock(&t4_uld_list_lock); 8507 if (!SLIST_EMPTY(&t4_uld_list)) { 8508 rc = EBUSY; 8509 sx_sunlock(&t4_uld_list_lock); 8510 sx_sunlock(&t4_list_lock); 8511 goto done_unload; 8512 } 8513 #endif 8514 tries = 0; 8515 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 8516 uprintf("%ju clusters with custom free routine " 8517 "still is use.\n", t4_sge_extfree_refs()); 8518 pause("t4unload", 2 * hz); 8519 } 8520 #ifdef TCP_OFFLOAD 8521 sx_sunlock(&t4_uld_list_lock); 8522 #endif 8523 sx_sunlock(&t4_list_lock); 8524 8525 if (t4_sge_extfree_refs() == 0) { 8526 t4_tracer_modunload(); 8527 #ifdef TCP_OFFLOAD 8528 sx_destroy(&t4_uld_list_lock); 8529 #endif 8530 sx_destroy(&t4_list_lock); 8531 t4_sge_modunload(); 8532 loaded = 0; 8533 } else { 8534 rc = EBUSY; 8535 loaded++; /* undo earlier decrement */ 8536 } 8537 } 8538 done_unload: 8539 sx_xunlock(&mlu); 8540 break; 8541 } 8542 8543 return (rc); 8544 } 8545 8546 static devclass_t t4_devclass, t5_devclass; 8547 static devclass_t cxgbe_devclass, cxl_devclass; 8548 8549 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 8550 MODULE_VERSION(t4nex, 1); 8551 MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 8552 8553 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 8554 MODULE_VERSION(t5nex, 1); 8555 MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 8556 8557 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 8558 MODULE_VERSION(cxgbe, 1); 8559 8560 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 8561 MODULE_VERSION(cxl, 1); 8562