1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/param.h> 35 #include <sys/conf.h> 36 #include <sys/priv.h> 37 #include <sys/kernel.h> 38 #include <sys/bus.h> 39 #include <sys/module.h> 40 #include <sys/malloc.h> 41 #include <sys/queue.h> 42 #include <sys/taskqueue.h> 43 #include <sys/pciio.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/pci_private.h> 47 #include <sys/firmware.h> 48 #include <sys/sbuf.h> 49 #include <sys/smp.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_types.h> 56 #include <net/if_dl.h> 57 #include <net/if_vlan_var.h> 58 #if defined(__i386__) || defined(__amd64__) 59 #include <vm/vm.h> 60 #include <vm/pmap.h> 61 #endif 62 63 #include "common/common.h" 64 #include "common/t4_msg.h" 65 #include "common/t4_regs.h" 66 #include "common/t4_regs_values.h" 67 #include "t4_ioctl.h" 68 #include "t4_l2t.h" 69 #include "t4_mp_ring.h" 70 71 /* T4 bus driver interface */ 72 static int t4_probe(device_t); 73 static int t4_attach(device_t); 74 static int t4_detach(device_t); 75 static device_method_t t4_methods[] = { 76 DEVMETHOD(device_probe, t4_probe), 77 DEVMETHOD(device_attach, t4_attach), 78 DEVMETHOD(device_detach, t4_detach), 79 80 DEVMETHOD_END 81 }; 82 static driver_t t4_driver = { 83 "t4nex", 84 t4_methods, 85 sizeof(struct adapter) 86 }; 87 88 89 /* T4 port (cxgbe) interface */ 90 static int cxgbe_probe(device_t); 91 static int cxgbe_attach(device_t); 92 static int cxgbe_detach(device_t); 93 static device_method_t cxgbe_methods[] = { 94 DEVMETHOD(device_probe, cxgbe_probe), 95 DEVMETHOD(device_attach, cxgbe_attach), 96 DEVMETHOD(device_detach, cxgbe_detach), 97 { 0, 0 } 98 }; 99 static driver_t cxgbe_driver = { 100 "cxgbe", 101 cxgbe_methods, 102 sizeof(struct port_info) 103 }; 104 105 static d_ioctl_t t4_ioctl; 106 static d_open_t t4_open; 107 static d_close_t t4_close; 108 109 static struct cdevsw t4_cdevsw = { 110 .d_version = D_VERSION, 111 .d_flags = 0, 112 .d_open = t4_open, 113 .d_close = t4_close, 114 .d_ioctl = t4_ioctl, 115 .d_name = "t4nex", 116 }; 117 118 /* T5 bus driver interface */ 119 static int t5_probe(device_t); 120 static device_method_t t5_methods[] = { 121 DEVMETHOD(device_probe, t5_probe), 122 DEVMETHOD(device_attach, t4_attach), 123 DEVMETHOD(device_detach, t4_detach), 124 125 DEVMETHOD_END 126 }; 127 static driver_t t5_driver = { 128 "t5nex", 129 t5_methods, 130 sizeof(struct adapter) 131 }; 132 133 134 /* T5 port (cxl) interface */ 135 static driver_t cxl_driver = { 136 "cxl", 137 cxgbe_methods, 138 sizeof(struct port_info) 139 }; 140 141 static struct cdevsw t5_cdevsw = { 142 .d_version = D_VERSION, 143 .d_flags = 0, 144 .d_open = t4_open, 145 .d_close = t4_close, 146 .d_ioctl = t4_ioctl, 147 .d_name = "t5nex", 148 }; 149 150 /* ifnet + media interface */ 151 static void cxgbe_init(void *); 152 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 153 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 154 static void cxgbe_qflush(struct ifnet *); 155 static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter); 156 static int cxgbe_media_change(struct ifnet *); 157 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 158 159 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 160 161 /* 162 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 163 * then ADAPTER_LOCK, then t4_uld_list_lock. 164 */ 165 static struct sx t4_list_lock; 166 SLIST_HEAD(, adapter) t4_list; 167 #ifdef TCP_OFFLOAD 168 static struct sx t4_uld_list_lock; 169 SLIST_HEAD(, uld_info) t4_uld_list; 170 #endif 171 172 /* 173 * Tunables. See tweak_tunables() too. 174 * 175 * Each tunable is set to a default value here if it's known at compile-time. 176 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 177 * provide a reasonable default when the driver is loaded. 178 * 179 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 180 * T5 are under hw.cxl. 181 */ 182 183 /* 184 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 185 */ 186 #define NTXQ_10G 16 187 static int t4_ntxq10g = -1; 188 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 189 190 #define NRXQ_10G 8 191 static int t4_nrxq10g = -1; 192 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 193 194 #define NTXQ_1G 4 195 static int t4_ntxq1g = -1; 196 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 197 198 #define NRXQ_1G 2 199 static int t4_nrxq1g = -1; 200 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 201 202 static int t4_rsrv_noflowq = 0; 203 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 204 205 #ifdef TCP_OFFLOAD 206 #define NOFLDTXQ_10G 8 207 static int t4_nofldtxq10g = -1; 208 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 209 210 #define NOFLDRXQ_10G 2 211 static int t4_nofldrxq10g = -1; 212 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 213 214 #define NOFLDTXQ_1G 2 215 static int t4_nofldtxq1g = -1; 216 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 217 218 #define NOFLDRXQ_1G 1 219 static int t4_nofldrxq1g = -1; 220 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 221 #endif 222 223 #ifdef DEV_NETMAP 224 #define NNMTXQ_10G 2 225 static int t4_nnmtxq10g = -1; 226 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g); 227 228 #define NNMRXQ_10G 2 229 static int t4_nnmrxq10g = -1; 230 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g); 231 232 #define NNMTXQ_1G 1 233 static int t4_nnmtxq1g = -1; 234 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g); 235 236 #define NNMRXQ_1G 1 237 static int t4_nnmrxq1g = -1; 238 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g); 239 #endif 240 241 /* 242 * Holdoff parameters for 10G and 1G ports. 243 */ 244 #define TMR_IDX_10G 1 245 static int t4_tmr_idx_10g = TMR_IDX_10G; 246 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 247 248 #define PKTC_IDX_10G (-1) 249 static int t4_pktc_idx_10g = PKTC_IDX_10G; 250 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 251 252 #define TMR_IDX_1G 1 253 static int t4_tmr_idx_1g = TMR_IDX_1G; 254 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 255 256 #define PKTC_IDX_1G (-1) 257 static int t4_pktc_idx_1g = PKTC_IDX_1G; 258 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 259 260 /* 261 * Size (# of entries) of each tx and rx queue. 262 */ 263 static unsigned int t4_qsize_txq = TX_EQ_QSIZE; 264 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 265 266 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 267 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 268 269 /* 270 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 271 */ 272 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 273 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 274 275 /* 276 * Configuration file. 277 */ 278 #define DEFAULT_CF "default" 279 #define FLASH_CF "flash" 280 #define UWIRE_CF "uwire" 281 #define FPGA_CF "fpga" 282 static char t4_cfg_file[32] = DEFAULT_CF; 283 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 284 285 /* 286 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 287 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 288 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 289 * mark or when signalled to do so, 0 to never emit PAUSE. 290 */ 291 static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 292 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 293 294 /* 295 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 296 * encouraged respectively). 297 */ 298 static unsigned int t4_fw_install = 1; 299 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 300 301 /* 302 * ASIC features that will be used. Disable the ones you don't want so that the 303 * chip resources aren't wasted on features that will not be used. 304 */ 305 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 306 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 307 308 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 309 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 310 311 static int t4_toecaps_allowed = -1; 312 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 313 314 static int t4_rdmacaps_allowed = 0; 315 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 316 317 static int t4_iscsicaps_allowed = 0; 318 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 319 320 static int t4_fcoecaps_allowed = 0; 321 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 322 323 static int t5_write_combine = 0; 324 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 325 326 struct intrs_and_queues { 327 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 328 uint16_t nirq; /* Total # of vectors */ 329 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 330 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 331 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 332 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 333 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 334 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 335 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 336 #ifdef TCP_OFFLOAD 337 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 338 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 339 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 340 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 341 #endif 342 #ifdef DEV_NETMAP 343 uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */ 344 uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */ 345 uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */ 346 uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */ 347 #endif 348 }; 349 350 struct filter_entry { 351 uint32_t valid:1; /* filter allocated and valid */ 352 uint32_t locked:1; /* filter is administratively locked */ 353 uint32_t pending:1; /* filter action is pending firmware reply */ 354 uint32_t smtidx:8; /* Source MAC Table index for smac */ 355 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 356 357 struct t4_filter_specification fs; 358 }; 359 360 static int map_bars_0_and_4(struct adapter *); 361 static int map_bar_2(struct adapter *); 362 static void setup_memwin(struct adapter *); 363 static int validate_mem_range(struct adapter *, uint32_t, int); 364 static int fwmtype_to_hwmtype(int); 365 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 366 uint32_t *); 367 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *); 368 static uint32_t position_memwin(struct adapter *, int, uint32_t); 369 static int cfg_itype_and_nqueues(struct adapter *, int, int, 370 struct intrs_and_queues *); 371 static int prep_firmware(struct adapter *); 372 static int partition_resources(struct adapter *, const struct firmware *, 373 const char *); 374 static int get_params__pre_init(struct adapter *); 375 static int get_params__post_init(struct adapter *); 376 static int set_params__post_init(struct adapter *); 377 static void t4_set_desc(struct adapter *); 378 static void build_medialist(struct port_info *, struct ifmedia *); 379 static int cxgbe_init_synchronized(struct port_info *); 380 static int cxgbe_uninit_synchronized(struct port_info *); 381 static int setup_intr_handlers(struct adapter *); 382 static void quiesce_txq(struct adapter *, struct sge_txq *); 383 static void quiesce_wrq(struct adapter *, struct sge_wrq *); 384 static void quiesce_iq(struct adapter *, struct sge_iq *); 385 static void quiesce_fl(struct adapter *, struct sge_fl *); 386 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 387 driver_intr_t *, void *, char *); 388 static int t4_free_irq(struct adapter *, struct irq *); 389 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int, 390 unsigned int); 391 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 392 static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 393 static void cxgbe_tick(void *); 394 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 395 static int cpl_not_handled(struct sge_iq *, const struct rss_header *, 396 struct mbuf *); 397 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *); 398 static int fw_msg_not_handled(struct adapter *, const __be64 *); 399 static int t4_sysctls(struct adapter *); 400 static int cxgbe_sysctls(struct port_info *); 401 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 402 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 403 static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 404 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 405 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 406 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 407 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 408 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 409 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 410 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 411 static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 412 #ifdef SBUF_DRAIN 413 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 414 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 415 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 416 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 417 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 418 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 419 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 420 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 421 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 422 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 423 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 424 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 425 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 426 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 427 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 428 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 429 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 430 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 431 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 432 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 433 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 434 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 435 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 436 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 437 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 438 #endif 439 static uint32_t fconf_to_mode(uint32_t); 440 static uint32_t mode_to_fconf(uint32_t); 441 static uint32_t fspec_to_fconf(struct t4_filter_specification *); 442 static int get_filter_mode(struct adapter *, uint32_t *); 443 static int set_filter_mode(struct adapter *, uint32_t); 444 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 445 static int get_filter(struct adapter *, struct t4_filter *); 446 static int set_filter(struct adapter *, struct t4_filter *); 447 static int del_filter(struct adapter *, struct t4_filter *); 448 static void clear_filter(struct filter_entry *); 449 static int set_filter_wr(struct adapter *, int); 450 static int del_filter_wr(struct adapter *, int); 451 static int get_sge_context(struct adapter *, struct t4_sge_context *); 452 static int load_fw(struct adapter *, struct t4_data *); 453 static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 454 static int read_i2c(struct adapter *, struct t4_i2c_data *); 455 static int set_sched_class(struct adapter *, struct t4_sched_params *); 456 static int set_sched_queue(struct adapter *, struct t4_sched_queue *); 457 #ifdef TCP_OFFLOAD 458 static int toe_capability(struct port_info *, int); 459 #endif 460 static int mod_event(module_t, int, void *); 461 462 struct { 463 uint16_t device; 464 char *desc; 465 } t4_pciids[] = { 466 {0xa000, "Chelsio Terminator 4 FPGA"}, 467 {0x4400, "Chelsio T440-dbg"}, 468 {0x4401, "Chelsio T420-CR"}, 469 {0x4402, "Chelsio T422-CR"}, 470 {0x4403, "Chelsio T440-CR"}, 471 {0x4404, "Chelsio T420-BCH"}, 472 {0x4405, "Chelsio T440-BCH"}, 473 {0x4406, "Chelsio T440-CH"}, 474 {0x4407, "Chelsio T420-SO"}, 475 {0x4408, "Chelsio T420-CX"}, 476 {0x4409, "Chelsio T420-BT"}, 477 {0x440a, "Chelsio T404-BT"}, 478 {0x440e, "Chelsio T440-LP-CR"}, 479 }, t5_pciids[] = { 480 {0xb000, "Chelsio Terminator 5 FPGA"}, 481 {0x5400, "Chelsio T580-dbg"}, 482 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 483 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 484 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 485 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 486 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 487 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 488 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 489 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 490 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 491 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 492 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 493 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 494 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 495 #ifdef notyet 496 {0x5404, "Chelsio T520-BCH"}, 497 {0x5405, "Chelsio T540-BCH"}, 498 {0x5406, "Chelsio T540-CH"}, 499 {0x5408, "Chelsio T520-CX"}, 500 {0x540b, "Chelsio B520-SR"}, 501 {0x540c, "Chelsio B504-BT"}, 502 {0x540f, "Chelsio Amsterdam"}, 503 {0x5413, "Chelsio T580-CHR"}, 504 #endif 505 }; 506 507 #ifdef TCP_OFFLOAD 508 /* 509 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 510 * exactly the same for both rxq and ofld_rxq. 511 */ 512 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 513 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 514 #endif 515 516 /* No easy way to include t4_msg.h before adapter.h so we check this way */ 517 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS); 518 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES); 519 520 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 521 522 static int 523 t4_probe(device_t dev) 524 { 525 int i; 526 uint16_t v = pci_get_vendor(dev); 527 uint16_t d = pci_get_device(dev); 528 uint8_t f = pci_get_function(dev); 529 530 if (v != PCI_VENDOR_ID_CHELSIO) 531 return (ENXIO); 532 533 /* Attach only to PF0 of the FPGA */ 534 if (d == 0xa000 && f != 0) 535 return (ENXIO); 536 537 for (i = 0; i < nitems(t4_pciids); i++) { 538 if (d == t4_pciids[i].device) { 539 device_set_desc(dev, t4_pciids[i].desc); 540 return (BUS_PROBE_DEFAULT); 541 } 542 } 543 544 return (ENXIO); 545 } 546 547 static int 548 t5_probe(device_t dev) 549 { 550 int i; 551 uint16_t v = pci_get_vendor(dev); 552 uint16_t d = pci_get_device(dev); 553 uint8_t f = pci_get_function(dev); 554 555 if (v != PCI_VENDOR_ID_CHELSIO) 556 return (ENXIO); 557 558 /* Attach only to PF0 of the FPGA */ 559 if (d == 0xb000 && f != 0) 560 return (ENXIO); 561 562 for (i = 0; i < nitems(t5_pciids); i++) { 563 if (d == t5_pciids[i].device) { 564 device_set_desc(dev, t5_pciids[i].desc); 565 return (BUS_PROBE_DEFAULT); 566 } 567 } 568 569 return (ENXIO); 570 } 571 572 static int 573 t4_attach(device_t dev) 574 { 575 struct adapter *sc; 576 int rc = 0, i, n10g, n1g, rqidx, tqidx; 577 struct intrs_and_queues iaq; 578 struct sge *s; 579 #ifdef TCP_OFFLOAD 580 int ofld_rqidx, ofld_tqidx; 581 #endif 582 #ifdef DEV_NETMAP 583 int nm_rqidx, nm_tqidx; 584 #endif 585 const char *pcie_ts; 586 587 sc = device_get_softc(dev); 588 sc->dev = dev; 589 590 pci_enable_busmaster(dev); 591 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 592 uint32_t v; 593 594 pci_set_max_read_req(dev, 4096); 595 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 596 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 597 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 598 599 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 600 } 601 602 sc->traceq = -1; 603 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 604 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 605 device_get_nameunit(dev)); 606 607 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 608 device_get_nameunit(dev)); 609 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 610 sx_xlock(&t4_list_lock); 611 SLIST_INSERT_HEAD(&t4_list, sc, link); 612 sx_xunlock(&t4_list_lock); 613 614 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 615 TAILQ_INIT(&sc->sfl); 616 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE); 617 618 mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF); 619 620 rc = map_bars_0_and_4(sc); 621 if (rc != 0) 622 goto done; /* error message displayed already */ 623 624 /* 625 * This is the real PF# to which we're attaching. Works from within PCI 626 * passthrough environments too, where pci_get_function() could return a 627 * different PF# depending on the passthrough configuration. We need to 628 * use the real PF# in all our communication with the firmware. 629 */ 630 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI)); 631 sc->mbox = sc->pf; 632 633 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 634 sc->an_handler = an_not_handled; 635 for (i = 0; i < nitems(sc->cpl_handler); i++) 636 sc->cpl_handler[i] = cpl_not_handled; 637 for (i = 0; i < nitems(sc->fw_msg_handler); i++) 638 sc->fw_msg_handler[i] = fw_msg_not_handled; 639 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 640 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt); 641 t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt); 642 t4_init_sge_cpl_handlers(sc); 643 644 /* Prepare the adapter for operation */ 645 rc = -t4_prep_adapter(sc); 646 if (rc != 0) { 647 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 648 goto done; 649 } 650 651 /* 652 * Do this really early, with the memory windows set up even before the 653 * character device. The userland tool's register i/o and mem read 654 * will work even in "recovery mode". 655 */ 656 setup_memwin(sc); 657 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw, 658 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s", 659 device_get_nameunit(dev)); 660 if (sc->cdev == NULL) 661 device_printf(dev, "failed to create nexus char device.\n"); 662 else 663 sc->cdev->si_drv1 = sc; 664 665 /* Go no further if recovery mode has been requested. */ 666 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 667 device_printf(dev, "recovery mode.\n"); 668 goto done; 669 } 670 671 #if defined(__i386__) 672 if ((cpu_feature & CPUID_CX8) == 0) { 673 device_printf(dev, "64 bit atomics not available.\n"); 674 rc = ENOTSUP; 675 goto done; 676 } 677 #endif 678 679 /* Prepare the firmware for operation */ 680 rc = prep_firmware(sc); 681 if (rc != 0) 682 goto done; /* error message displayed already */ 683 684 rc = get_params__post_init(sc); 685 if (rc != 0) 686 goto done; /* error message displayed already */ 687 688 rc = set_params__post_init(sc); 689 if (rc != 0) 690 goto done; /* error message displayed already */ 691 692 rc = map_bar_2(sc); 693 if (rc != 0) 694 goto done; /* error message displayed already */ 695 696 rc = t4_create_dma_tag(sc); 697 if (rc != 0) 698 goto done; /* error message displayed already */ 699 700 /* 701 * First pass over all the ports - allocate VIs and initialize some 702 * basic parameters like mac address, port type, etc. We also figure 703 * out whether a port is 10G or 1G and use that information when 704 * calculating how many interrupts to attempt to allocate. 705 */ 706 n10g = n1g = 0; 707 for_each_port(sc, i) { 708 struct port_info *pi; 709 710 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 711 sc->port[i] = pi; 712 713 /* These must be set before t4_port_init */ 714 pi->adapter = sc; 715 pi->port_id = i; 716 717 /* Allocate the vi and initialize parameters like mac addr */ 718 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0); 719 if (rc != 0) { 720 device_printf(dev, "unable to initialize port %d: %d\n", 721 i, rc); 722 free(pi, M_CXGBE); 723 sc->port[i] = NULL; 724 goto done; 725 } 726 727 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX); 728 pi->link_cfg.requested_fc |= t4_pause_settings; 729 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX); 730 pi->link_cfg.fc |= t4_pause_settings; 731 732 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 733 if (rc != 0) { 734 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 735 free(pi, M_CXGBE); 736 sc->port[i] = NULL; 737 goto done; 738 } 739 740 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 741 device_get_nameunit(dev), i); 742 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 743 sc->chan_map[pi->tx_chan] = i; 744 745 if (is_10G_port(pi) || is_40G_port(pi)) { 746 n10g++; 747 pi->tmr_idx = t4_tmr_idx_10g; 748 pi->pktc_idx = t4_pktc_idx_10g; 749 } else { 750 n1g++; 751 pi->tmr_idx = t4_tmr_idx_1g; 752 pi->pktc_idx = t4_pktc_idx_1g; 753 } 754 755 pi->xact_addr_filt = -1; 756 pi->linkdnrc = -1; 757 758 pi->qsize_rxq = t4_qsize_rxq; 759 pi->qsize_txq = t4_qsize_txq; 760 761 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1); 762 if (pi->dev == NULL) { 763 device_printf(dev, 764 "failed to add device for port %d.\n", i); 765 rc = ENXIO; 766 goto done; 767 } 768 device_set_softc(pi->dev, pi); 769 } 770 771 /* 772 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 773 */ 774 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq); 775 if (rc != 0) 776 goto done; /* error message displayed already */ 777 778 sc->intr_type = iaq.intr_type; 779 sc->intr_count = iaq.nirq; 780 781 s = &sc->sge; 782 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 783 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 784 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 785 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 786 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 787 #ifdef TCP_OFFLOAD 788 if (is_offload(sc)) { 789 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 790 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 791 s->neq += s->nofldtxq + s->nofldrxq; 792 s->niq += s->nofldrxq; 793 794 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 795 M_CXGBE, M_ZERO | M_WAITOK); 796 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 797 M_CXGBE, M_ZERO | M_WAITOK); 798 } 799 #endif 800 #ifdef DEV_NETMAP 801 s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g; 802 s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g; 803 s->neq += s->nnmtxq + s->nnmrxq; 804 s->niq += s->nnmrxq; 805 806 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 807 M_CXGBE, M_ZERO | M_WAITOK); 808 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 809 M_CXGBE, M_ZERO | M_WAITOK); 810 #endif 811 812 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 813 M_ZERO | M_WAITOK); 814 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 815 M_ZERO | M_WAITOK); 816 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 817 M_ZERO | M_WAITOK); 818 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 819 M_ZERO | M_WAITOK); 820 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 821 M_ZERO | M_WAITOK); 822 823 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 824 M_ZERO | M_WAITOK); 825 826 t4_init_l2t(sc, M_WAITOK); 827 828 /* 829 * Second pass over the ports. This time we know the number of rx and 830 * tx queues that each port should get. 831 */ 832 rqidx = tqidx = 0; 833 #ifdef TCP_OFFLOAD 834 ofld_rqidx = ofld_tqidx = 0; 835 #endif 836 #ifdef DEV_NETMAP 837 nm_rqidx = nm_tqidx = 0; 838 #endif 839 for_each_port(sc, i) { 840 struct port_info *pi = sc->port[i]; 841 842 if (pi == NULL) 843 continue; 844 845 pi->first_rxq = rqidx; 846 pi->first_txq = tqidx; 847 if (is_10G_port(pi) || is_40G_port(pi)) { 848 pi->flags |= iaq.intr_flags_10g; 849 pi->nrxq = iaq.nrxq10g; 850 pi->ntxq = iaq.ntxq10g; 851 } else { 852 pi->flags |= iaq.intr_flags_1g; 853 pi->nrxq = iaq.nrxq1g; 854 pi->ntxq = iaq.ntxq1g; 855 } 856 857 if (pi->ntxq > 1) 858 pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 859 else 860 pi->rsrv_noflowq = 0; 861 862 rqidx += pi->nrxq; 863 tqidx += pi->ntxq; 864 #ifdef TCP_OFFLOAD 865 if (is_offload(sc)) { 866 pi->first_ofld_rxq = ofld_rqidx; 867 pi->first_ofld_txq = ofld_tqidx; 868 if (is_10G_port(pi) || is_40G_port(pi)) { 869 pi->nofldrxq = iaq.nofldrxq10g; 870 pi->nofldtxq = iaq.nofldtxq10g; 871 } else { 872 pi->nofldrxq = iaq.nofldrxq1g; 873 pi->nofldtxq = iaq.nofldtxq1g; 874 } 875 ofld_rqidx += pi->nofldrxq; 876 ofld_tqidx += pi->nofldtxq; 877 } 878 #endif 879 #ifdef DEV_NETMAP 880 pi->first_nm_rxq = nm_rqidx; 881 pi->first_nm_txq = nm_tqidx; 882 if (is_10G_port(pi) || is_40G_port(pi)) { 883 pi->nnmrxq = iaq.nnmrxq10g; 884 pi->nnmtxq = iaq.nnmtxq10g; 885 } else { 886 pi->nnmrxq = iaq.nnmrxq1g; 887 pi->nnmtxq = iaq.nnmtxq1g; 888 } 889 nm_rqidx += pi->nnmrxq; 890 nm_tqidx += pi->nnmtxq; 891 #endif 892 } 893 894 rc = setup_intr_handlers(sc); 895 if (rc != 0) { 896 device_printf(dev, 897 "failed to setup interrupt handlers: %d\n", rc); 898 goto done; 899 } 900 901 rc = bus_generic_attach(dev); 902 if (rc != 0) { 903 device_printf(dev, 904 "failed to attach all child ports: %d\n", rc); 905 goto done; 906 } 907 908 switch (sc->params.pci.speed) { 909 case 0x1: 910 pcie_ts = "2.5"; 911 break; 912 case 0x2: 913 pcie_ts = "5.0"; 914 break; 915 case 0x3: 916 pcie_ts = "8.0"; 917 break; 918 default: 919 pcie_ts = "??"; 920 break; 921 } 922 device_printf(dev, 923 "PCIe x%d (%s GTS/s) (%d), %d ports, %d %s interrupt%s, %d eq, %d iq\n", 924 sc->params.pci.width, pcie_ts, sc->params.pci.speed, 925 sc->params.nports, sc->intr_count, 926 sc->intr_type == INTR_MSIX ? "MSI-X" : 927 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 928 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 929 930 t4_set_desc(sc); 931 932 done: 933 if (rc != 0 && sc->cdev) { 934 /* cdev was created and so cxgbetool works; recover that way. */ 935 device_printf(dev, 936 "error during attach, adapter is now in recovery mode.\n"); 937 rc = 0; 938 } 939 940 if (rc != 0) 941 t4_detach(dev); 942 else 943 t4_sysctls(sc); 944 945 return (rc); 946 } 947 948 /* 949 * Idempotent 950 */ 951 static int 952 t4_detach(device_t dev) 953 { 954 struct adapter *sc; 955 struct port_info *pi; 956 int i, rc; 957 958 sc = device_get_softc(dev); 959 960 if (sc->flags & FULL_INIT_DONE) 961 t4_intr_disable(sc); 962 963 if (sc->cdev) { 964 destroy_dev(sc->cdev); 965 sc->cdev = NULL; 966 } 967 968 rc = bus_generic_detach(dev); 969 if (rc) { 970 device_printf(dev, 971 "failed to detach child devices: %d\n", rc); 972 return (rc); 973 } 974 975 for (i = 0; i < sc->intr_count; i++) 976 t4_free_irq(sc, &sc->irq[i]); 977 978 for (i = 0; i < MAX_NPORTS; i++) { 979 pi = sc->port[i]; 980 if (pi) { 981 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->viid); 982 if (pi->dev) 983 device_delete_child(dev, pi->dev); 984 985 mtx_destroy(&pi->pi_lock); 986 free(pi, M_CXGBE); 987 } 988 } 989 990 if (sc->flags & FULL_INIT_DONE) 991 adapter_full_uninit(sc); 992 993 if (sc->flags & FW_OK) 994 t4_fw_bye(sc, sc->mbox); 995 996 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 997 pci_release_msi(dev); 998 999 if (sc->regs_res) 1000 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1001 sc->regs_res); 1002 1003 if (sc->udbs_res) 1004 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1005 sc->udbs_res); 1006 1007 if (sc->msix_res) 1008 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1009 sc->msix_res); 1010 1011 if (sc->l2t) 1012 t4_free_l2t(sc->l2t); 1013 1014 #ifdef TCP_OFFLOAD 1015 free(sc->sge.ofld_rxq, M_CXGBE); 1016 free(sc->sge.ofld_txq, M_CXGBE); 1017 #endif 1018 #ifdef DEV_NETMAP 1019 free(sc->sge.nm_rxq, M_CXGBE); 1020 free(sc->sge.nm_txq, M_CXGBE); 1021 #endif 1022 free(sc->irq, M_CXGBE); 1023 free(sc->sge.rxq, M_CXGBE); 1024 free(sc->sge.txq, M_CXGBE); 1025 free(sc->sge.ctrlq, M_CXGBE); 1026 free(sc->sge.iqmap, M_CXGBE); 1027 free(sc->sge.eqmap, M_CXGBE); 1028 free(sc->tids.ftid_tab, M_CXGBE); 1029 t4_destroy_dma_tag(sc); 1030 if (mtx_initialized(&sc->sc_lock)) { 1031 sx_xlock(&t4_list_lock); 1032 SLIST_REMOVE(&t4_list, sc, adapter, link); 1033 sx_xunlock(&t4_list_lock); 1034 mtx_destroy(&sc->sc_lock); 1035 } 1036 1037 if (mtx_initialized(&sc->tids.ftid_lock)) 1038 mtx_destroy(&sc->tids.ftid_lock); 1039 if (mtx_initialized(&sc->sfl_lock)) 1040 mtx_destroy(&sc->sfl_lock); 1041 if (mtx_initialized(&sc->ifp_lock)) 1042 mtx_destroy(&sc->ifp_lock); 1043 if (mtx_initialized(&sc->regwin_lock)) 1044 mtx_destroy(&sc->regwin_lock); 1045 1046 bzero(sc, sizeof(*sc)); 1047 1048 return (0); 1049 } 1050 1051 static int 1052 cxgbe_probe(device_t dev) 1053 { 1054 char buf[128]; 1055 struct port_info *pi = device_get_softc(dev); 1056 1057 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1058 device_set_desc_copy(dev, buf); 1059 1060 return (BUS_PROBE_DEFAULT); 1061 } 1062 1063 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1064 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1065 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1066 #define T4_CAP_ENABLE (T4_CAP) 1067 1068 static int 1069 cxgbe_attach(device_t dev) 1070 { 1071 struct port_info *pi = device_get_softc(dev); 1072 struct ifnet *ifp; 1073 char *s; 1074 int n, o; 1075 1076 /* Allocate an ifnet and set it up */ 1077 ifp = if_alloc(IFT_ETHER); 1078 if (ifp == NULL) { 1079 device_printf(dev, "Cannot allocate ifnet\n"); 1080 return (ENOMEM); 1081 } 1082 pi->ifp = ifp; 1083 ifp->if_softc = pi; 1084 1085 callout_init(&pi->tick, CALLOUT_MPSAFE); 1086 1087 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1088 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1089 1090 ifp->if_init = cxgbe_init; 1091 ifp->if_ioctl = cxgbe_ioctl; 1092 ifp->if_transmit = cxgbe_transmit; 1093 ifp->if_qflush = cxgbe_qflush; 1094 ifp->if_get_counter = cxgbe_get_counter; 1095 1096 ifp->if_capabilities = T4_CAP; 1097 #ifdef TCP_OFFLOAD 1098 if (is_offload(pi->adapter)) 1099 ifp->if_capabilities |= IFCAP_TOE; 1100 #endif 1101 ifp->if_capenable = T4_CAP_ENABLE; 1102 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1103 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1104 1105 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1106 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1107 ifp->if_hw_tsomaxsegsize = 65536; 1108 1109 /* Initialize ifmedia for this port */ 1110 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, 1111 cxgbe_media_status); 1112 build_medialist(pi, &pi->media); 1113 1114 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1115 EVENTHANDLER_PRI_ANY); 1116 1117 ether_ifattach(ifp, pi->hw_addr); 1118 1119 n = 128; 1120 s = malloc(n, M_CXGBE, M_WAITOK); 1121 o = snprintf(s, n, "%d txq, %d rxq (NIC)", pi->ntxq, pi->nrxq); 1122 MPASS(n > o); 1123 #ifdef TCP_OFFLOAD 1124 if (is_offload(pi->adapter)) { 1125 o += snprintf(s + o, n - o, "; %d txq, %d rxq (TOE)", 1126 pi->nofldtxq, pi->nofldrxq); 1127 MPASS(n > o); 1128 } 1129 #endif 1130 #ifdef DEV_NETMAP 1131 o += snprintf(s + o, n - o, "; %d txq, %d rxq (netmap)", pi->nnmtxq, 1132 pi->nnmrxq); 1133 MPASS(n > o); 1134 #endif 1135 device_printf(dev, "%s\n", s); 1136 free(s, M_CXGBE); 1137 1138 #ifdef DEV_NETMAP 1139 /* nm_media handled here to keep implementation private to this file */ 1140 ifmedia_init(&pi->nm_media, IFM_IMASK, cxgbe_media_change, 1141 cxgbe_media_status); 1142 build_medialist(pi, &pi->nm_media); 1143 create_netmap_ifnet(pi); /* logs errors it something fails */ 1144 #endif 1145 cxgbe_sysctls(pi); 1146 1147 return (0); 1148 } 1149 1150 static int 1151 cxgbe_detach(device_t dev) 1152 { 1153 struct port_info *pi = device_get_softc(dev); 1154 struct adapter *sc = pi->adapter; 1155 struct ifnet *ifp = pi->ifp; 1156 1157 /* Tell if_ioctl and if_init that the port is going away */ 1158 ADAPTER_LOCK(sc); 1159 SET_DOOMED(pi); 1160 wakeup(&sc->flags); 1161 while (IS_BUSY(sc)) 1162 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 1163 SET_BUSY(sc); 1164 #ifdef INVARIANTS 1165 sc->last_op = "t4detach"; 1166 sc->last_op_thr = curthread; 1167 #endif 1168 ADAPTER_UNLOCK(sc); 1169 1170 if (pi->flags & HAS_TRACEQ) { 1171 sc->traceq = -1; /* cloner should not create ifnet */ 1172 t4_tracer_port_detach(sc); 1173 } 1174 1175 if (pi->vlan_c) 1176 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c); 1177 1178 PORT_LOCK(pi); 1179 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1180 callout_stop(&pi->tick); 1181 PORT_UNLOCK(pi); 1182 callout_drain(&pi->tick); 1183 1184 /* Let detach proceed even if these fail. */ 1185 cxgbe_uninit_synchronized(pi); 1186 port_full_uninit(pi); 1187 1188 ifmedia_removeall(&pi->media); 1189 ether_ifdetach(pi->ifp); 1190 if_free(pi->ifp); 1191 1192 #ifdef DEV_NETMAP 1193 /* XXXNM: equivalent of cxgbe_uninit_synchronized to ifdown nm_ifp */ 1194 destroy_netmap_ifnet(pi); 1195 #endif 1196 1197 ADAPTER_LOCK(sc); 1198 CLR_BUSY(sc); 1199 wakeup(&sc->flags); 1200 ADAPTER_UNLOCK(sc); 1201 1202 return (0); 1203 } 1204 1205 static void 1206 cxgbe_init(void *arg) 1207 { 1208 struct port_info *pi = arg; 1209 struct adapter *sc = pi->adapter; 1210 1211 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0) 1212 return; 1213 cxgbe_init_synchronized(pi); 1214 end_synchronized_op(sc, 0); 1215 } 1216 1217 static int 1218 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1219 { 1220 int rc = 0, mtu, flags, can_sleep; 1221 struct port_info *pi = ifp->if_softc; 1222 struct adapter *sc = pi->adapter; 1223 struct ifreq *ifr = (struct ifreq *)data; 1224 uint32_t mask; 1225 1226 switch (cmd) { 1227 case SIOCSIFMTU: 1228 mtu = ifr->ifr_mtu; 1229 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 1230 return (EINVAL); 1231 1232 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu"); 1233 if (rc) 1234 return (rc); 1235 ifp->if_mtu = mtu; 1236 if (pi->flags & PORT_INIT_DONE) { 1237 t4_update_fl_bufsize(ifp); 1238 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1239 rc = update_mac_settings(ifp, XGMAC_MTU); 1240 } 1241 end_synchronized_op(sc, 0); 1242 break; 1243 1244 case SIOCSIFFLAGS: 1245 can_sleep = 0; 1246 redo_sifflags: 1247 rc = begin_synchronized_op(sc, pi, 1248 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1249 if (rc) 1250 return (rc); 1251 1252 if (ifp->if_flags & IFF_UP) { 1253 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1254 flags = pi->if_flags; 1255 if ((ifp->if_flags ^ flags) & 1256 (IFF_PROMISC | IFF_ALLMULTI)) { 1257 if (can_sleep == 1) { 1258 end_synchronized_op(sc, 0); 1259 can_sleep = 0; 1260 goto redo_sifflags; 1261 } 1262 rc = update_mac_settings(ifp, 1263 XGMAC_PROMISC | XGMAC_ALLMULTI); 1264 } 1265 } else { 1266 if (can_sleep == 0) { 1267 end_synchronized_op(sc, LOCK_HELD); 1268 can_sleep = 1; 1269 goto redo_sifflags; 1270 } 1271 rc = cxgbe_init_synchronized(pi); 1272 } 1273 pi->if_flags = ifp->if_flags; 1274 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1275 if (can_sleep == 0) { 1276 end_synchronized_op(sc, LOCK_HELD); 1277 can_sleep = 1; 1278 goto redo_sifflags; 1279 } 1280 rc = cxgbe_uninit_synchronized(pi); 1281 } 1282 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1283 break; 1284 1285 case SIOCADDMULTI: 1286 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1287 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi"); 1288 if (rc) 1289 return (rc); 1290 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1291 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1292 end_synchronized_op(sc, LOCK_HELD); 1293 break; 1294 1295 case SIOCSIFCAP: 1296 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap"); 1297 if (rc) 1298 return (rc); 1299 1300 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1301 if (mask & IFCAP_TXCSUM) { 1302 ifp->if_capenable ^= IFCAP_TXCSUM; 1303 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1304 1305 if (IFCAP_TSO4 & ifp->if_capenable && 1306 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1307 ifp->if_capenable &= ~IFCAP_TSO4; 1308 if_printf(ifp, 1309 "tso4 disabled due to -txcsum.\n"); 1310 } 1311 } 1312 if (mask & IFCAP_TXCSUM_IPV6) { 1313 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1314 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1315 1316 if (IFCAP_TSO6 & ifp->if_capenable && 1317 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1318 ifp->if_capenable &= ~IFCAP_TSO6; 1319 if_printf(ifp, 1320 "tso6 disabled due to -txcsum6.\n"); 1321 } 1322 } 1323 if (mask & IFCAP_RXCSUM) 1324 ifp->if_capenable ^= IFCAP_RXCSUM; 1325 if (mask & IFCAP_RXCSUM_IPV6) 1326 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1327 1328 /* 1329 * Note that we leave CSUM_TSO alone (it is always set). The 1330 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1331 * sending a TSO request our way, so it's sufficient to toggle 1332 * IFCAP_TSOx only. 1333 */ 1334 if (mask & IFCAP_TSO4) { 1335 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1336 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1337 if_printf(ifp, "enable txcsum first.\n"); 1338 rc = EAGAIN; 1339 goto fail; 1340 } 1341 ifp->if_capenable ^= IFCAP_TSO4; 1342 } 1343 if (mask & IFCAP_TSO6) { 1344 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1345 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1346 if_printf(ifp, "enable txcsum6 first.\n"); 1347 rc = EAGAIN; 1348 goto fail; 1349 } 1350 ifp->if_capenable ^= IFCAP_TSO6; 1351 } 1352 if (mask & IFCAP_LRO) { 1353 #if defined(INET) || defined(INET6) 1354 int i; 1355 struct sge_rxq *rxq; 1356 1357 ifp->if_capenable ^= IFCAP_LRO; 1358 for_each_rxq(pi, i, rxq) { 1359 if (ifp->if_capenable & IFCAP_LRO) 1360 rxq->iq.flags |= IQ_LRO_ENABLED; 1361 else 1362 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1363 } 1364 #endif 1365 } 1366 #ifdef TCP_OFFLOAD 1367 if (mask & IFCAP_TOE) { 1368 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1369 1370 rc = toe_capability(pi, enable); 1371 if (rc != 0) 1372 goto fail; 1373 1374 ifp->if_capenable ^= mask; 1375 } 1376 #endif 1377 if (mask & IFCAP_VLAN_HWTAGGING) { 1378 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1379 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1380 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1381 } 1382 if (mask & IFCAP_VLAN_MTU) { 1383 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1384 1385 /* Need to find out how to disable auto-mtu-inflation */ 1386 } 1387 if (mask & IFCAP_VLAN_HWTSO) 1388 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1389 if (mask & IFCAP_VLAN_HWCSUM) 1390 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1391 1392 #ifdef VLAN_CAPABILITIES 1393 VLAN_CAPABILITIES(ifp); 1394 #endif 1395 fail: 1396 end_synchronized_op(sc, 0); 1397 break; 1398 1399 case SIOCSIFMEDIA: 1400 case SIOCGIFMEDIA: 1401 ifmedia_ioctl(ifp, ifr, &pi->media, cmd); 1402 break; 1403 1404 case SIOCGI2C: { 1405 struct ifi2creq i2c; 1406 1407 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1408 if (rc != 0) 1409 break; 1410 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1411 rc = EPERM; 1412 break; 1413 } 1414 if (i2c.len > sizeof(i2c.data)) { 1415 rc = EINVAL; 1416 break; 1417 } 1418 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4i2c"); 1419 if (rc) 1420 return (rc); 1421 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, 1422 i2c.offset, i2c.len, &i2c.data[0]); 1423 end_synchronized_op(sc, 0); 1424 if (rc == 0) 1425 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1426 break; 1427 } 1428 1429 default: 1430 rc = ether_ioctl(ifp, cmd, data); 1431 } 1432 1433 return (rc); 1434 } 1435 1436 static int 1437 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1438 { 1439 struct port_info *pi = ifp->if_softc; 1440 struct adapter *sc = pi->adapter; 1441 struct sge_txq *txq; 1442 void *items[1]; 1443 int rc; 1444 1445 M_ASSERTPKTHDR(m); 1446 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1447 1448 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1449 m_freem(m); 1450 return (ENETDOWN); 1451 } 1452 1453 rc = parse_pkt(&m); 1454 if (__predict_false(rc != 0)) { 1455 MPASS(m == NULL); /* was freed already */ 1456 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1457 return (rc); 1458 } 1459 1460 /* Select a txq. */ 1461 txq = &sc->sge.txq[pi->first_txq]; 1462 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1463 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq)) + 1464 pi->rsrv_noflowq); 1465 1466 items[0] = m; 1467 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1468 if (__predict_false(rc != 0)) 1469 m_freem(m); 1470 1471 return (rc); 1472 } 1473 1474 static void 1475 cxgbe_qflush(struct ifnet *ifp) 1476 { 1477 struct port_info *pi = ifp->if_softc; 1478 struct sge_txq *txq; 1479 int i; 1480 1481 /* queues do not exist if !PORT_INIT_DONE. */ 1482 if (pi->flags & PORT_INIT_DONE) { 1483 for_each_txq(pi, i, txq) { 1484 TXQ_LOCK(txq); 1485 txq->eq.flags &= ~EQ_ENABLED; 1486 TXQ_UNLOCK(txq); 1487 while (!mp_ring_is_idle(txq->r)) { 1488 mp_ring_check_drainage(txq->r, 0); 1489 pause("qflush", 1); 1490 } 1491 } 1492 } 1493 if_qflush(ifp); 1494 } 1495 1496 static uint64_t 1497 cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1498 { 1499 struct port_info *pi = ifp->if_softc; 1500 struct adapter *sc = pi->adapter; 1501 struct port_stats *s = &pi->stats; 1502 1503 cxgbe_refresh_stats(sc, pi); 1504 1505 switch (c) { 1506 case IFCOUNTER_IPACKETS: 1507 return (s->rx_frames - s->rx_pause); 1508 1509 case IFCOUNTER_IERRORS: 1510 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1511 s->rx_fcs_err + s->rx_len_err); 1512 1513 case IFCOUNTER_OPACKETS: 1514 return (s->tx_frames - s->tx_pause); 1515 1516 case IFCOUNTER_OERRORS: 1517 return (s->tx_error_frames); 1518 1519 case IFCOUNTER_IBYTES: 1520 return (s->rx_octets - s->rx_pause * 64); 1521 1522 case IFCOUNTER_OBYTES: 1523 return (s->tx_octets - s->tx_pause * 64); 1524 1525 case IFCOUNTER_IMCASTS: 1526 return (s->rx_mcast_frames - s->rx_pause); 1527 1528 case IFCOUNTER_OMCASTS: 1529 return (s->tx_mcast_frames - s->tx_pause); 1530 1531 case IFCOUNTER_IQDROPS: 1532 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1533 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1534 s->rx_trunc3 + pi->tnl_cong_drops); 1535 1536 case IFCOUNTER_OQDROPS: { 1537 uint64_t drops; 1538 1539 drops = s->tx_drop; 1540 if (pi->flags & PORT_INIT_DONE) { 1541 int i; 1542 struct sge_txq *txq; 1543 1544 for_each_txq(pi, i, txq) 1545 drops += counter_u64_fetch(txq->r->drops); 1546 } 1547 1548 return (drops); 1549 1550 } 1551 1552 default: 1553 return (if_get_counter_default(ifp, c)); 1554 } 1555 } 1556 1557 static int 1558 cxgbe_media_change(struct ifnet *ifp) 1559 { 1560 struct port_info *pi = ifp->if_softc; 1561 1562 device_printf(pi->dev, "%s unimplemented.\n", __func__); 1563 1564 return (EOPNOTSUPP); 1565 } 1566 1567 static void 1568 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1569 { 1570 struct port_info *pi = ifp->if_softc; 1571 struct ifmedia *media = NULL; 1572 struct ifmedia_entry *cur; 1573 int speed = pi->link_cfg.speed; 1574 #ifdef INVARIANTS 1575 int data = (pi->port_type << 8) | pi->mod_type; 1576 #endif 1577 1578 if (ifp == pi->ifp) 1579 media = &pi->media; 1580 #ifdef DEV_NETMAP 1581 else if (ifp == pi->nm_ifp) 1582 media = &pi->nm_media; 1583 #endif 1584 MPASS(media != NULL); 1585 1586 cur = media->ifm_cur; 1587 MPASS(cur->ifm_data == data); 1588 1589 ifmr->ifm_status = IFM_AVALID; 1590 if (!pi->link_cfg.link_ok) 1591 return; 1592 1593 ifmr->ifm_status |= IFM_ACTIVE; 1594 1595 /* active and current will differ iff current media is autoselect. */ 1596 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1597 return; 1598 1599 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1600 if (speed == SPEED_10000) 1601 ifmr->ifm_active |= IFM_10G_T; 1602 else if (speed == SPEED_1000) 1603 ifmr->ifm_active |= IFM_1000_T; 1604 else if (speed == SPEED_100) 1605 ifmr->ifm_active |= IFM_100_TX; 1606 else if (speed == SPEED_10) 1607 ifmr->ifm_active |= IFM_10_T; 1608 else 1609 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1610 speed)); 1611 } 1612 1613 void 1614 t4_fatal_err(struct adapter *sc) 1615 { 1616 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1617 t4_intr_disable(sc); 1618 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1619 device_get_nameunit(sc->dev)); 1620 } 1621 1622 static int 1623 map_bars_0_and_4(struct adapter *sc) 1624 { 1625 sc->regs_rid = PCIR_BAR(0); 1626 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1627 &sc->regs_rid, RF_ACTIVE); 1628 if (sc->regs_res == NULL) { 1629 device_printf(sc->dev, "cannot map registers.\n"); 1630 return (ENXIO); 1631 } 1632 sc->bt = rman_get_bustag(sc->regs_res); 1633 sc->bh = rman_get_bushandle(sc->regs_res); 1634 sc->mmio_len = rman_get_size(sc->regs_res); 1635 setbit(&sc->doorbells, DOORBELL_KDB); 1636 1637 sc->msix_rid = PCIR_BAR(4); 1638 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1639 &sc->msix_rid, RF_ACTIVE); 1640 if (sc->msix_res == NULL) { 1641 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1642 return (ENXIO); 1643 } 1644 1645 return (0); 1646 } 1647 1648 static int 1649 map_bar_2(struct adapter *sc) 1650 { 1651 1652 /* 1653 * T4: only iWARP driver uses the userspace doorbells. There is no need 1654 * to map it if RDMA is disabled. 1655 */ 1656 if (is_t4(sc) && sc->rdmacaps == 0) 1657 return (0); 1658 1659 sc->udbs_rid = PCIR_BAR(2); 1660 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1661 &sc->udbs_rid, RF_ACTIVE); 1662 if (sc->udbs_res == NULL) { 1663 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 1664 return (ENXIO); 1665 } 1666 sc->udbs_base = rman_get_virtual(sc->udbs_res); 1667 1668 if (is_t5(sc)) { 1669 setbit(&sc->doorbells, DOORBELL_UDB); 1670 #if defined(__i386__) || defined(__amd64__) 1671 if (t5_write_combine) { 1672 int rc; 1673 1674 /* 1675 * Enable write combining on BAR2. This is the 1676 * userspace doorbell BAR and is split into 128B 1677 * (UDBS_SEG_SIZE) doorbell regions, each associated 1678 * with an egress queue. The first 64B has the doorbell 1679 * and the second 64B can be used to submit a tx work 1680 * request with an implicit doorbell. 1681 */ 1682 1683 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 1684 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 1685 if (rc == 0) { 1686 clrbit(&sc->doorbells, DOORBELL_UDB); 1687 setbit(&sc->doorbells, DOORBELL_WCWR); 1688 setbit(&sc->doorbells, DOORBELL_UDBWC); 1689 } else { 1690 device_printf(sc->dev, 1691 "couldn't enable write combining: %d\n", 1692 rc); 1693 } 1694 1695 t4_write_reg(sc, A_SGE_STAT_CFG, 1696 V_STATSOURCE_T5(7) | V_STATMODE(0)); 1697 } 1698 #endif 1699 } 1700 1701 return (0); 1702 } 1703 1704 static const struct memwin t4_memwin[] = { 1705 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1706 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1707 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 1708 }; 1709 1710 static const struct memwin t5_memwin[] = { 1711 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1712 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1713 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 1714 }; 1715 1716 static void 1717 setup_memwin(struct adapter *sc) 1718 { 1719 const struct memwin *mw; 1720 int i, n; 1721 uint32_t bar0; 1722 1723 if (is_t4(sc)) { 1724 /* 1725 * Read low 32b of bar0 indirectly via the hardware backdoor 1726 * mechanism. Works from within PCI passthrough environments 1727 * too, where rman_get_start() can return a different value. We 1728 * need to program the T4 memory window decoders with the actual 1729 * addresses that will be coming across the PCIe link. 1730 */ 1731 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 1732 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 1733 1734 mw = &t4_memwin[0]; 1735 n = nitems(t4_memwin); 1736 } else { 1737 /* T5 uses the relative offset inside the PCIe BAR */ 1738 bar0 = 0; 1739 1740 mw = &t5_memwin[0]; 1741 n = nitems(t5_memwin); 1742 } 1743 1744 for (i = 0; i < n; i++, mw++) { 1745 t4_write_reg(sc, 1746 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 1747 (mw->base + bar0) | V_BIR(0) | 1748 V_WINDOW(ilog2(mw->aperture) - 10)); 1749 } 1750 1751 /* flush */ 1752 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 1753 } 1754 1755 /* 1756 * Verify that the memory range specified by the addr/len pair is valid and lies 1757 * entirely within a single region (EDCx or MCx). 1758 */ 1759 static int 1760 validate_mem_range(struct adapter *sc, uint32_t addr, int len) 1761 { 1762 uint32_t em, addr_len, maddr, mlen; 1763 1764 /* Memory can only be accessed in naturally aligned 4 byte units */ 1765 if (addr & 3 || len & 3 || len == 0) 1766 return (EINVAL); 1767 1768 /* Enabled memories */ 1769 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1770 if (em & F_EDRAM0_ENABLE) { 1771 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1772 maddr = G_EDRAM0_BASE(addr_len) << 20; 1773 mlen = G_EDRAM0_SIZE(addr_len) << 20; 1774 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1775 addr + len <= maddr + mlen) 1776 return (0); 1777 } 1778 if (em & F_EDRAM1_ENABLE) { 1779 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1780 maddr = G_EDRAM1_BASE(addr_len) << 20; 1781 mlen = G_EDRAM1_SIZE(addr_len) << 20; 1782 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1783 addr + len <= maddr + mlen) 1784 return (0); 1785 } 1786 if (em & F_EXT_MEM_ENABLE) { 1787 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1788 maddr = G_EXT_MEM_BASE(addr_len) << 20; 1789 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 1790 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1791 addr + len <= maddr + mlen) 1792 return (0); 1793 } 1794 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) { 1795 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 1796 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 1797 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 1798 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1799 addr + len <= maddr + mlen) 1800 return (0); 1801 } 1802 1803 return (EFAULT); 1804 } 1805 1806 static int 1807 fwmtype_to_hwmtype(int mtype) 1808 { 1809 1810 switch (mtype) { 1811 case FW_MEMTYPE_EDC0: 1812 return (MEM_EDC0); 1813 case FW_MEMTYPE_EDC1: 1814 return (MEM_EDC1); 1815 case FW_MEMTYPE_EXTMEM: 1816 return (MEM_MC0); 1817 case FW_MEMTYPE_EXTMEM1: 1818 return (MEM_MC1); 1819 default: 1820 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 1821 } 1822 } 1823 1824 /* 1825 * Verify that the memory range specified by the memtype/offset/len pair is 1826 * valid and lies entirely within the memtype specified. The global address of 1827 * the start of the range is returned in addr. 1828 */ 1829 static int 1830 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 1831 uint32_t *addr) 1832 { 1833 uint32_t em, addr_len, maddr, mlen; 1834 1835 /* Memory can only be accessed in naturally aligned 4 byte units */ 1836 if (off & 3 || len & 3 || len == 0) 1837 return (EINVAL); 1838 1839 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1840 switch (fwmtype_to_hwmtype(mtype)) { 1841 case MEM_EDC0: 1842 if (!(em & F_EDRAM0_ENABLE)) 1843 return (EINVAL); 1844 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1845 maddr = G_EDRAM0_BASE(addr_len) << 20; 1846 mlen = G_EDRAM0_SIZE(addr_len) << 20; 1847 break; 1848 case MEM_EDC1: 1849 if (!(em & F_EDRAM1_ENABLE)) 1850 return (EINVAL); 1851 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1852 maddr = G_EDRAM1_BASE(addr_len) << 20; 1853 mlen = G_EDRAM1_SIZE(addr_len) << 20; 1854 break; 1855 case MEM_MC: 1856 if (!(em & F_EXT_MEM_ENABLE)) 1857 return (EINVAL); 1858 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1859 maddr = G_EXT_MEM_BASE(addr_len) << 20; 1860 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 1861 break; 1862 case MEM_MC1: 1863 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE)) 1864 return (EINVAL); 1865 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 1866 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 1867 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 1868 break; 1869 default: 1870 return (EINVAL); 1871 } 1872 1873 if (mlen > 0 && off < mlen && off + len <= mlen) { 1874 *addr = maddr + off; /* global address */ 1875 return (0); 1876 } 1877 1878 return (EFAULT); 1879 } 1880 1881 static void 1882 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture) 1883 { 1884 const struct memwin *mw; 1885 1886 if (is_t4(sc)) { 1887 KASSERT(win >= 0 && win < nitems(t4_memwin), 1888 ("%s: incorrect memwin# (%d)", __func__, win)); 1889 mw = &t4_memwin[win]; 1890 } else { 1891 KASSERT(win >= 0 && win < nitems(t5_memwin), 1892 ("%s: incorrect memwin# (%d)", __func__, win)); 1893 mw = &t5_memwin[win]; 1894 } 1895 1896 if (base != NULL) 1897 *base = mw->base; 1898 if (aperture != NULL) 1899 *aperture = mw->aperture; 1900 } 1901 1902 /* 1903 * Positions the memory window such that it can be used to access the specified 1904 * address in the chip's address space. The return value is the offset of addr 1905 * from the start of the window. 1906 */ 1907 static uint32_t 1908 position_memwin(struct adapter *sc, int n, uint32_t addr) 1909 { 1910 uint32_t start, pf; 1911 uint32_t reg; 1912 1913 KASSERT(n >= 0 && n <= 3, 1914 ("%s: invalid window %d.", __func__, n)); 1915 KASSERT((addr & 3) == 0, 1916 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr)); 1917 1918 if (is_t4(sc)) { 1919 pf = 0; 1920 start = addr & ~0xf; /* start must be 16B aligned */ 1921 } else { 1922 pf = V_PFNUM(sc->pf); 1923 start = addr & ~0x7f; /* start must be 128B aligned */ 1924 } 1925 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n); 1926 1927 t4_write_reg(sc, reg, start | pf); 1928 t4_read_reg(sc, reg); 1929 1930 return (addr - start); 1931 } 1932 1933 static int 1934 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 1935 struct intrs_and_queues *iaq) 1936 { 1937 int rc, itype, navail, nrxq10g, nrxq1g, n; 1938 int nofldrxq10g = 0, nofldrxq1g = 0; 1939 int nnmrxq10g = 0, nnmrxq1g = 0; 1940 1941 bzero(iaq, sizeof(*iaq)); 1942 1943 iaq->ntxq10g = t4_ntxq10g; 1944 iaq->ntxq1g = t4_ntxq1g; 1945 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 1946 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 1947 iaq->rsrv_noflowq = t4_rsrv_noflowq; 1948 #ifdef TCP_OFFLOAD 1949 if (is_offload(sc)) { 1950 iaq->nofldtxq10g = t4_nofldtxq10g; 1951 iaq->nofldtxq1g = t4_nofldtxq1g; 1952 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 1953 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 1954 } 1955 #endif 1956 #ifdef DEV_NETMAP 1957 iaq->nnmtxq10g = t4_nnmtxq10g; 1958 iaq->nnmtxq1g = t4_nnmtxq1g; 1959 iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g; 1960 iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g; 1961 #endif 1962 1963 for (itype = INTR_MSIX; itype; itype >>= 1) { 1964 1965 if ((itype & t4_intr_types) == 0) 1966 continue; /* not allowed */ 1967 1968 if (itype == INTR_MSIX) 1969 navail = pci_msix_count(sc->dev); 1970 else if (itype == INTR_MSI) 1971 navail = pci_msi_count(sc->dev); 1972 else 1973 navail = 1; 1974 restart: 1975 if (navail == 0) 1976 continue; 1977 1978 iaq->intr_type = itype; 1979 iaq->intr_flags_10g = 0; 1980 iaq->intr_flags_1g = 0; 1981 1982 /* 1983 * Best option: an interrupt vector for errors, one for the 1984 * firmware event queue, and one for every rxq (NIC, TOE, and 1985 * netmap). 1986 */ 1987 iaq->nirq = T4_EXTRA_INTR; 1988 iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g); 1989 iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g); 1990 if (iaq->nirq <= navail && 1991 (itype != INTR_MSI || powerof2(iaq->nirq))) { 1992 iaq->intr_flags_10g = INTR_ALL; 1993 iaq->intr_flags_1g = INTR_ALL; 1994 goto allocate; 1995 } 1996 1997 /* 1998 * Second best option: a vector for errors, one for the firmware 1999 * event queue, and vectors for either all the NIC rx queues or 2000 * all the TOE rx queues. The queues that don't get vectors 2001 * will forward their interrupts to those that do. 2002 * 2003 * Note: netmap rx queues cannot be created early and so they 2004 * can't be setup to receive forwarded interrupts for others. 2005 */ 2006 iaq->nirq = T4_EXTRA_INTR; 2007 if (nrxq10g >= nofldrxq10g) { 2008 iaq->intr_flags_10g = INTR_RXQ; 2009 iaq->nirq += n10g * nrxq10g; 2010 #ifdef DEV_NETMAP 2011 iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g); 2012 #endif 2013 } else { 2014 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2015 iaq->nirq += n10g * nofldrxq10g; 2016 #ifdef DEV_NETMAP 2017 iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g); 2018 #endif 2019 } 2020 if (nrxq1g >= nofldrxq1g) { 2021 iaq->intr_flags_1g = INTR_RXQ; 2022 iaq->nirq += n1g * nrxq1g; 2023 #ifdef DEV_NETMAP 2024 iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g); 2025 #endif 2026 } else { 2027 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2028 iaq->nirq += n1g * nofldrxq1g; 2029 #ifdef DEV_NETMAP 2030 iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g); 2031 #endif 2032 } 2033 if (iaq->nirq <= navail && 2034 (itype != INTR_MSI || powerof2(iaq->nirq))) 2035 goto allocate; 2036 2037 /* 2038 * Next best option: an interrupt vector for errors, one for the 2039 * firmware event queue, and at least one per port. At this 2040 * point we know we'll have to downsize nrxq and/or nofldrxq 2041 * and/or nnmrxq to fit what's available to us. 2042 */ 2043 iaq->nirq = T4_EXTRA_INTR; 2044 iaq->nirq += n10g + n1g; 2045 if (iaq->nirq <= navail) { 2046 int leftover = navail - iaq->nirq; 2047 2048 if (n10g > 0) { 2049 int target = max(nrxq10g, nofldrxq10g); 2050 2051 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2052 INTR_RXQ : INTR_OFLD_RXQ; 2053 2054 n = 1; 2055 while (n < target && leftover >= n10g) { 2056 leftover -= n10g; 2057 iaq->nirq += n10g; 2058 n++; 2059 } 2060 iaq->nrxq10g = min(n, nrxq10g); 2061 #ifdef TCP_OFFLOAD 2062 iaq->nofldrxq10g = min(n, nofldrxq10g); 2063 #endif 2064 #ifdef DEV_NETMAP 2065 iaq->nnmrxq10g = min(n, nnmrxq10g); 2066 #endif 2067 } 2068 2069 if (n1g > 0) { 2070 int target = max(nrxq1g, nofldrxq1g); 2071 2072 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2073 INTR_RXQ : INTR_OFLD_RXQ; 2074 2075 n = 1; 2076 while (n < target && leftover >= n1g) { 2077 leftover -= n1g; 2078 iaq->nirq += n1g; 2079 n++; 2080 } 2081 iaq->nrxq1g = min(n, nrxq1g); 2082 #ifdef TCP_OFFLOAD 2083 iaq->nofldrxq1g = min(n, nofldrxq1g); 2084 #endif 2085 #ifdef DEV_NETMAP 2086 iaq->nnmrxq1g = min(n, nnmrxq1g); 2087 #endif 2088 } 2089 2090 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2091 goto allocate; 2092 } 2093 2094 /* 2095 * Least desirable option: one interrupt vector for everything. 2096 */ 2097 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2098 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2099 #ifdef TCP_OFFLOAD 2100 if (is_offload(sc)) 2101 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2102 #endif 2103 #ifdef DEV_NETMAP 2104 iaq->nnmrxq10g = iaq->nnmrxq1g = 1; 2105 #endif 2106 2107 allocate: 2108 navail = iaq->nirq; 2109 rc = 0; 2110 if (itype == INTR_MSIX) 2111 rc = pci_alloc_msix(sc->dev, &navail); 2112 else if (itype == INTR_MSI) 2113 rc = pci_alloc_msi(sc->dev, &navail); 2114 2115 if (rc == 0) { 2116 if (navail == iaq->nirq) 2117 return (0); 2118 2119 /* 2120 * Didn't get the number requested. Use whatever number 2121 * the kernel is willing to allocate (it's in navail). 2122 */ 2123 device_printf(sc->dev, "fewer vectors than requested, " 2124 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2125 itype, iaq->nirq, navail); 2126 pci_release_msi(sc->dev); 2127 goto restart; 2128 } 2129 2130 device_printf(sc->dev, 2131 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2132 itype, rc, iaq->nirq, navail); 2133 } 2134 2135 device_printf(sc->dev, 2136 "failed to find a usable interrupt type. " 2137 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2138 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2139 2140 return (ENXIO); 2141 } 2142 2143 #define FW_VERSION(chip) ( \ 2144 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2145 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2146 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2147 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2148 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2149 2150 struct fw_info { 2151 uint8_t chip; 2152 char *kld_name; 2153 char *fw_mod_name; 2154 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2155 } fw_info[] = { 2156 { 2157 .chip = CHELSIO_T4, 2158 .kld_name = "t4fw_cfg", 2159 .fw_mod_name = "t4fw", 2160 .fw_hdr = { 2161 .chip = FW_HDR_CHIP_T4, 2162 .fw_ver = htobe32_const(FW_VERSION(T4)), 2163 .intfver_nic = FW_INTFVER(T4, NIC), 2164 .intfver_vnic = FW_INTFVER(T4, VNIC), 2165 .intfver_ofld = FW_INTFVER(T4, OFLD), 2166 .intfver_ri = FW_INTFVER(T4, RI), 2167 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2168 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2169 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2170 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2171 }, 2172 }, { 2173 .chip = CHELSIO_T5, 2174 .kld_name = "t5fw_cfg", 2175 .fw_mod_name = "t5fw", 2176 .fw_hdr = { 2177 .chip = FW_HDR_CHIP_T5, 2178 .fw_ver = htobe32_const(FW_VERSION(T5)), 2179 .intfver_nic = FW_INTFVER(T5, NIC), 2180 .intfver_vnic = FW_INTFVER(T5, VNIC), 2181 .intfver_ofld = FW_INTFVER(T5, OFLD), 2182 .intfver_ri = FW_INTFVER(T5, RI), 2183 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2184 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2185 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2186 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2187 }, 2188 } 2189 }; 2190 2191 static struct fw_info * 2192 find_fw_info(int chip) 2193 { 2194 int i; 2195 2196 for (i = 0; i < nitems(fw_info); i++) { 2197 if (fw_info[i].chip == chip) 2198 return (&fw_info[i]); 2199 } 2200 return (NULL); 2201 } 2202 2203 /* 2204 * Is the given firmware API compatible with the one the driver was compiled 2205 * with? 2206 */ 2207 static int 2208 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2209 { 2210 2211 /* short circuit if it's the exact same firmware version */ 2212 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2213 return (1); 2214 2215 /* 2216 * XXX: Is this too conservative? Perhaps I should limit this to the 2217 * features that are supported in the driver. 2218 */ 2219 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2220 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2221 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2222 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2223 return (1); 2224 #undef SAME_INTF 2225 2226 return (0); 2227 } 2228 2229 /* 2230 * The firmware in the KLD is usable, but should it be installed? This routine 2231 * explains itself in detail if it indicates the KLD firmware should be 2232 * installed. 2233 */ 2234 static int 2235 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2236 { 2237 const char *reason; 2238 2239 if (!card_fw_usable) { 2240 reason = "incompatible or unusable"; 2241 goto install; 2242 } 2243 2244 if (k > c) { 2245 reason = "older than the version bundled with this driver"; 2246 goto install; 2247 } 2248 2249 if (t4_fw_install == 2 && k != c) { 2250 reason = "different than the version bundled with this driver"; 2251 goto install; 2252 } 2253 2254 return (0); 2255 2256 install: 2257 if (t4_fw_install == 0) { 2258 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2259 "but the driver is prohibited from installing a different " 2260 "firmware on the card.\n", 2261 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2262 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2263 2264 return (0); 2265 } 2266 2267 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2268 "installing firmware %u.%u.%u.%u on card.\n", 2269 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2270 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2271 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2272 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2273 2274 return (1); 2275 } 2276 /* 2277 * Establish contact with the firmware and determine if we are the master driver 2278 * or not, and whether we are responsible for chip initialization. 2279 */ 2280 static int 2281 prep_firmware(struct adapter *sc) 2282 { 2283 const struct firmware *fw = NULL, *default_cfg; 2284 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2285 enum dev_state state; 2286 struct fw_info *fw_info; 2287 struct fw_hdr *card_fw; /* fw on the card */ 2288 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2289 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2290 against */ 2291 2292 /* Contact firmware. */ 2293 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2294 if (rc < 0 || state == DEV_STATE_ERR) { 2295 rc = -rc; 2296 device_printf(sc->dev, 2297 "failed to connect to the firmware: %d, %d.\n", rc, state); 2298 return (rc); 2299 } 2300 pf = rc; 2301 if (pf == sc->mbox) 2302 sc->flags |= MASTER_PF; 2303 else if (state == DEV_STATE_UNINIT) { 2304 /* 2305 * We didn't get to be the master so we definitely won't be 2306 * configuring the chip. It's a bug if someone else hasn't 2307 * configured it already. 2308 */ 2309 device_printf(sc->dev, "couldn't be master(%d), " 2310 "device not already initialized either(%d).\n", rc, state); 2311 return (EDOOFUS); 2312 } 2313 2314 /* This is the firmware whose headers the driver was compiled against */ 2315 fw_info = find_fw_info(chip_id(sc)); 2316 if (fw_info == NULL) { 2317 device_printf(sc->dev, 2318 "unable to look up firmware information for chip %d.\n", 2319 chip_id(sc)); 2320 return (EINVAL); 2321 } 2322 drv_fw = &fw_info->fw_hdr; 2323 2324 /* 2325 * The firmware KLD contains many modules. The KLD name is also the 2326 * name of the module that contains the default config file. 2327 */ 2328 default_cfg = firmware_get(fw_info->kld_name); 2329 2330 /* Read the header of the firmware on the card */ 2331 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2332 rc = -t4_read_flash(sc, FLASH_FW_START, 2333 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2334 if (rc == 0) 2335 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2336 else { 2337 device_printf(sc->dev, 2338 "Unable to read card's firmware header: %d\n", rc); 2339 card_fw_usable = 0; 2340 } 2341 2342 /* This is the firmware in the KLD */ 2343 fw = firmware_get(fw_info->fw_mod_name); 2344 if (fw != NULL) { 2345 kld_fw = (const void *)fw->data; 2346 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2347 } else { 2348 kld_fw = NULL; 2349 kld_fw_usable = 0; 2350 } 2351 2352 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2353 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2354 /* 2355 * Common case: the firmware on the card is an exact match and 2356 * the KLD is an exact match too, or the KLD is 2357 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2358 * here -- use cxgbetool loadfw if you want to reinstall the 2359 * same firmware as the one on the card. 2360 */ 2361 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2362 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2363 be32toh(card_fw->fw_ver))) { 2364 2365 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2366 if (rc != 0) { 2367 device_printf(sc->dev, 2368 "failed to install firmware: %d\n", rc); 2369 goto done; 2370 } 2371 2372 /* Installed successfully, update the cached header too. */ 2373 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2374 card_fw_usable = 1; 2375 need_fw_reset = 0; /* already reset as part of load_fw */ 2376 } 2377 2378 if (!card_fw_usable) { 2379 uint32_t d, c, k; 2380 2381 d = ntohl(drv_fw->fw_ver); 2382 c = ntohl(card_fw->fw_ver); 2383 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2384 2385 device_printf(sc->dev, "Cannot find a usable firmware: " 2386 "fw_install %d, chip state %d, " 2387 "driver compiled with %d.%d.%d.%d, " 2388 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2389 t4_fw_install, state, 2390 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2391 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2392 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2393 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2394 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2395 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2396 rc = EINVAL; 2397 goto done; 2398 } 2399 2400 /* We're using whatever's on the card and it's known to be good. */ 2401 sc->params.fw_vers = ntohl(card_fw->fw_ver); 2402 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 2403 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2404 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2405 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2406 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2407 t4_get_tp_version(sc, &sc->params.tp_vers); 2408 2409 /* Reset device */ 2410 if (need_fw_reset && 2411 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2412 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2413 if (rc != ETIMEDOUT && rc != EIO) 2414 t4_fw_bye(sc, sc->mbox); 2415 goto done; 2416 } 2417 sc->flags |= FW_OK; 2418 2419 rc = get_params__pre_init(sc); 2420 if (rc != 0) 2421 goto done; /* error message displayed already */ 2422 2423 /* Partition adapter resources as specified in the config file. */ 2424 if (state == DEV_STATE_UNINIT) { 2425 2426 KASSERT(sc->flags & MASTER_PF, 2427 ("%s: trying to change chip settings when not master.", 2428 __func__)); 2429 2430 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2431 if (rc != 0) 2432 goto done; /* error message displayed already */ 2433 2434 t4_tweak_chip_settings(sc); 2435 2436 /* get basic stuff going */ 2437 rc = -t4_fw_initialize(sc, sc->mbox); 2438 if (rc != 0) { 2439 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2440 goto done; 2441 } 2442 } else { 2443 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2444 sc->cfcsum = 0; 2445 } 2446 2447 done: 2448 free(card_fw, M_CXGBE); 2449 if (fw != NULL) 2450 firmware_put(fw, FIRMWARE_UNLOAD); 2451 if (default_cfg != NULL) 2452 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2453 2454 return (rc); 2455 } 2456 2457 #define FW_PARAM_DEV(param) \ 2458 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2459 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2460 #define FW_PARAM_PFVF(param) \ 2461 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2462 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2463 2464 /* 2465 * Partition chip resources for use between various PFs, VFs, etc. 2466 */ 2467 static int 2468 partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2469 const char *name_prefix) 2470 { 2471 const struct firmware *cfg = NULL; 2472 int rc = 0; 2473 struct fw_caps_config_cmd caps; 2474 uint32_t mtype, moff, finicsum, cfcsum; 2475 2476 /* 2477 * Figure out what configuration file to use. Pick the default config 2478 * file for the card if the user hasn't specified one explicitly. 2479 */ 2480 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2481 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2482 /* Card specific overrides go here. */ 2483 if (pci_get_device(sc->dev) == 0x440a) 2484 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2485 if (is_fpga(sc)) 2486 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2487 } 2488 2489 /* 2490 * We need to load another module if the profile is anything except 2491 * "default" or "flash". 2492 */ 2493 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 2494 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2495 char s[32]; 2496 2497 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 2498 cfg = firmware_get(s); 2499 if (cfg == NULL) { 2500 if (default_cfg != NULL) { 2501 device_printf(sc->dev, 2502 "unable to load module \"%s\" for " 2503 "configuration profile \"%s\", will use " 2504 "the default config file instead.\n", 2505 s, sc->cfg_file); 2506 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2507 "%s", DEFAULT_CF); 2508 } else { 2509 device_printf(sc->dev, 2510 "unable to load module \"%s\" for " 2511 "configuration profile \"%s\", will use " 2512 "the config file on the card's flash " 2513 "instead.\n", s, sc->cfg_file); 2514 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2515 "%s", FLASH_CF); 2516 } 2517 } 2518 } 2519 2520 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 2521 default_cfg == NULL) { 2522 device_printf(sc->dev, 2523 "default config file not available, will use the config " 2524 "file on the card's flash instead.\n"); 2525 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 2526 } 2527 2528 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2529 u_int cflen, i, n; 2530 const uint32_t *cfdata; 2531 uint32_t param, val, addr, off, mw_base, mw_aperture; 2532 2533 KASSERT(cfg != NULL || default_cfg != NULL, 2534 ("%s: no config to upload", __func__)); 2535 2536 /* 2537 * Ask the firmware where it wants us to upload the config file. 2538 */ 2539 param = FW_PARAM_DEV(CF); 2540 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2541 if (rc != 0) { 2542 /* No support for config file? Shouldn't happen. */ 2543 device_printf(sc->dev, 2544 "failed to query config file location: %d.\n", rc); 2545 goto done; 2546 } 2547 mtype = G_FW_PARAMS_PARAM_Y(val); 2548 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 2549 2550 /* 2551 * XXX: sheer laziness. We deliberately added 4 bytes of 2552 * useless stuffing/comments at the end of the config file so 2553 * it's ok to simply throw away the last remaining bytes when 2554 * the config file is not an exact multiple of 4. This also 2555 * helps with the validate_mt_off_len check. 2556 */ 2557 if (cfg != NULL) { 2558 cflen = cfg->datasize & ~3; 2559 cfdata = cfg->data; 2560 } else { 2561 cflen = default_cfg->datasize & ~3; 2562 cfdata = default_cfg->data; 2563 } 2564 2565 if (cflen > FLASH_CFG_MAX_SIZE) { 2566 device_printf(sc->dev, 2567 "config file too long (%d, max allowed is %d). " 2568 "Will try to use the config on the card, if any.\n", 2569 cflen, FLASH_CFG_MAX_SIZE); 2570 goto use_config_on_flash; 2571 } 2572 2573 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 2574 if (rc != 0) { 2575 device_printf(sc->dev, 2576 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 2577 "Will try to use the config on the card, if any.\n", 2578 __func__, mtype, moff, cflen, rc); 2579 goto use_config_on_flash; 2580 } 2581 2582 memwin_info(sc, 2, &mw_base, &mw_aperture); 2583 while (cflen) { 2584 off = position_memwin(sc, 2, addr); 2585 n = min(cflen, mw_aperture - off); 2586 for (i = 0; i < n; i += 4) 2587 t4_write_reg(sc, mw_base + off + i, *cfdata++); 2588 cflen -= n; 2589 addr += n; 2590 } 2591 } else { 2592 use_config_on_flash: 2593 mtype = FW_MEMTYPE_FLASH; 2594 moff = t4_flash_cfg_addr(sc); 2595 } 2596 2597 bzero(&caps, sizeof(caps)); 2598 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2599 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2600 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 2601 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 2602 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 2603 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 2604 if (rc != 0) { 2605 device_printf(sc->dev, 2606 "failed to pre-process config file: %d " 2607 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 2608 goto done; 2609 } 2610 2611 finicsum = be32toh(caps.finicsum); 2612 cfcsum = be32toh(caps.cfcsum); 2613 if (finicsum != cfcsum) { 2614 device_printf(sc->dev, 2615 "WARNING: config file checksum mismatch: %08x %08x\n", 2616 finicsum, cfcsum); 2617 } 2618 sc->cfcsum = cfcsum; 2619 2620 #define LIMIT_CAPS(x) do { \ 2621 caps.x &= htobe16(t4_##x##_allowed); \ 2622 } while (0) 2623 2624 /* 2625 * Let the firmware know what features will (not) be used so it can tune 2626 * things accordingly. 2627 */ 2628 LIMIT_CAPS(linkcaps); 2629 LIMIT_CAPS(niccaps); 2630 LIMIT_CAPS(toecaps); 2631 LIMIT_CAPS(rdmacaps); 2632 LIMIT_CAPS(iscsicaps); 2633 LIMIT_CAPS(fcoecaps); 2634 #undef LIMIT_CAPS 2635 2636 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2637 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 2638 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 2639 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 2640 if (rc != 0) { 2641 device_printf(sc->dev, 2642 "failed to process config file: %d.\n", rc); 2643 } 2644 done: 2645 if (cfg != NULL) 2646 firmware_put(cfg, FIRMWARE_UNLOAD); 2647 return (rc); 2648 } 2649 2650 /* 2651 * Retrieve parameters that are needed (or nice to have) very early. 2652 */ 2653 static int 2654 get_params__pre_init(struct adapter *sc) 2655 { 2656 int rc; 2657 uint32_t param[2], val[2]; 2658 struct fw_devlog_cmd cmd; 2659 struct devlog_params *dlog = &sc->params.devlog; 2660 2661 param[0] = FW_PARAM_DEV(PORTVEC); 2662 param[1] = FW_PARAM_DEV(CCLK); 2663 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 2664 if (rc != 0) { 2665 device_printf(sc->dev, 2666 "failed to query parameters (pre_init): %d.\n", rc); 2667 return (rc); 2668 } 2669 2670 sc->params.portvec = val[0]; 2671 sc->params.nports = bitcount32(val[0]); 2672 sc->params.vpd.cclk = val[1]; 2673 2674 /* Read device log parameters. */ 2675 bzero(&cmd, sizeof(cmd)); 2676 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 2677 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2678 cmd.retval_len16 = htobe32(FW_LEN16(cmd)); 2679 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd); 2680 if (rc != 0) { 2681 device_printf(sc->dev, 2682 "failed to get devlog parameters: %d.\n", rc); 2683 bzero(dlog, sizeof (*dlog)); 2684 rc = 0; /* devlog isn't critical for device operation */ 2685 } else { 2686 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog); 2687 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]); 2688 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4; 2689 dlog->size = be32toh(cmd.memsize_devlog); 2690 } 2691 2692 return (rc); 2693 } 2694 2695 /* 2696 * Retrieve various parameters that are of interest to the driver. The device 2697 * has been initialized by the firmware at this point. 2698 */ 2699 static int 2700 get_params__post_init(struct adapter *sc) 2701 { 2702 int rc; 2703 uint32_t param[7], val[7]; 2704 struct fw_caps_config_cmd caps; 2705 2706 param[0] = FW_PARAM_PFVF(IQFLINT_START); 2707 param[1] = FW_PARAM_PFVF(EQ_START); 2708 param[2] = FW_PARAM_PFVF(FILTER_START); 2709 param[3] = FW_PARAM_PFVF(FILTER_END); 2710 param[4] = FW_PARAM_PFVF(L2T_START); 2711 param[5] = FW_PARAM_PFVF(L2T_END); 2712 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2713 if (rc != 0) { 2714 device_printf(sc->dev, 2715 "failed to query parameters (post_init): %d.\n", rc); 2716 return (rc); 2717 } 2718 2719 sc->sge.iq_start = val[0]; 2720 sc->sge.eq_start = val[1]; 2721 sc->tids.ftid_base = val[2]; 2722 sc->tids.nftids = val[3] - val[2] + 1; 2723 sc->params.ftid_min = val[2]; 2724 sc->params.ftid_max = val[3]; 2725 sc->vres.l2t.start = val[4]; 2726 sc->vres.l2t.size = val[5] - val[4] + 1; 2727 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 2728 ("%s: L2 table size (%u) larger than expected (%u)", 2729 __func__, sc->vres.l2t.size, L2T_SIZE)); 2730 2731 /* get capabilites */ 2732 bzero(&caps, sizeof(caps)); 2733 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2734 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2735 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 2736 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 2737 if (rc != 0) { 2738 device_printf(sc->dev, 2739 "failed to get card capabilities: %d.\n", rc); 2740 return (rc); 2741 } 2742 2743 #define READ_CAPS(x) do { \ 2744 sc->x = htobe16(caps.x); \ 2745 } while (0) 2746 READ_CAPS(linkcaps); 2747 READ_CAPS(niccaps); 2748 READ_CAPS(toecaps); 2749 READ_CAPS(rdmacaps); 2750 READ_CAPS(iscsicaps); 2751 READ_CAPS(fcoecaps); 2752 2753 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 2754 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 2755 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 2756 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 2757 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 2758 if (rc != 0) { 2759 device_printf(sc->dev, 2760 "failed to query NIC parameters: %d.\n", rc); 2761 return (rc); 2762 } 2763 sc->tids.etid_base = val[0]; 2764 sc->params.etid_min = val[0]; 2765 sc->tids.netids = val[1] - val[0] + 1; 2766 sc->params.netids = sc->tids.netids; 2767 sc->params.eo_wr_cred = val[2]; 2768 sc->params.ethoffload = 1; 2769 } 2770 2771 if (sc->toecaps) { 2772 /* query offload-related parameters */ 2773 param[0] = FW_PARAM_DEV(NTID); 2774 param[1] = FW_PARAM_PFVF(SERVER_START); 2775 param[2] = FW_PARAM_PFVF(SERVER_END); 2776 param[3] = FW_PARAM_PFVF(TDDP_START); 2777 param[4] = FW_PARAM_PFVF(TDDP_END); 2778 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 2779 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2780 if (rc != 0) { 2781 device_printf(sc->dev, 2782 "failed to query TOE parameters: %d.\n", rc); 2783 return (rc); 2784 } 2785 sc->tids.ntids = val[0]; 2786 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 2787 sc->tids.stid_base = val[1]; 2788 sc->tids.nstids = val[2] - val[1] + 1; 2789 sc->vres.ddp.start = val[3]; 2790 sc->vres.ddp.size = val[4] - val[3] + 1; 2791 sc->params.ofldq_wr_cred = val[5]; 2792 sc->params.offload = 1; 2793 } 2794 if (sc->rdmacaps) { 2795 param[0] = FW_PARAM_PFVF(STAG_START); 2796 param[1] = FW_PARAM_PFVF(STAG_END); 2797 param[2] = FW_PARAM_PFVF(RQ_START); 2798 param[3] = FW_PARAM_PFVF(RQ_END); 2799 param[4] = FW_PARAM_PFVF(PBL_START); 2800 param[5] = FW_PARAM_PFVF(PBL_END); 2801 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2802 if (rc != 0) { 2803 device_printf(sc->dev, 2804 "failed to query RDMA parameters(1): %d.\n", rc); 2805 return (rc); 2806 } 2807 sc->vres.stag.start = val[0]; 2808 sc->vres.stag.size = val[1] - val[0] + 1; 2809 sc->vres.rq.start = val[2]; 2810 sc->vres.rq.size = val[3] - val[2] + 1; 2811 sc->vres.pbl.start = val[4]; 2812 sc->vres.pbl.size = val[5] - val[4] + 1; 2813 2814 param[0] = FW_PARAM_PFVF(SQRQ_START); 2815 param[1] = FW_PARAM_PFVF(SQRQ_END); 2816 param[2] = FW_PARAM_PFVF(CQ_START); 2817 param[3] = FW_PARAM_PFVF(CQ_END); 2818 param[4] = FW_PARAM_PFVF(OCQ_START); 2819 param[5] = FW_PARAM_PFVF(OCQ_END); 2820 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2821 if (rc != 0) { 2822 device_printf(sc->dev, 2823 "failed to query RDMA parameters(2): %d.\n", rc); 2824 return (rc); 2825 } 2826 sc->vres.qp.start = val[0]; 2827 sc->vres.qp.size = val[1] - val[0] + 1; 2828 sc->vres.cq.start = val[2]; 2829 sc->vres.cq.size = val[3] - val[2] + 1; 2830 sc->vres.ocq.start = val[4]; 2831 sc->vres.ocq.size = val[5] - val[4] + 1; 2832 } 2833 if (sc->iscsicaps) { 2834 param[0] = FW_PARAM_PFVF(ISCSI_START); 2835 param[1] = FW_PARAM_PFVF(ISCSI_END); 2836 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 2837 if (rc != 0) { 2838 device_printf(sc->dev, 2839 "failed to query iSCSI parameters: %d.\n", rc); 2840 return (rc); 2841 } 2842 sc->vres.iscsi.start = val[0]; 2843 sc->vres.iscsi.size = val[1] - val[0] + 1; 2844 } 2845 2846 /* 2847 * We've got the params we wanted to query via the firmware. Now grab 2848 * some others directly from the chip. 2849 */ 2850 rc = t4_read_chip_settings(sc); 2851 2852 return (rc); 2853 } 2854 2855 static int 2856 set_params__post_init(struct adapter *sc) 2857 { 2858 uint32_t param, val; 2859 2860 /* ask for encapsulated CPLs */ 2861 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 2862 val = 1; 2863 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2864 2865 return (0); 2866 } 2867 2868 #undef FW_PARAM_PFVF 2869 #undef FW_PARAM_DEV 2870 2871 static void 2872 t4_set_desc(struct adapter *sc) 2873 { 2874 char buf[128]; 2875 struct adapter_params *p = &sc->params; 2876 2877 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, " 2878 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "", 2879 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec); 2880 2881 device_set_desc_copy(sc->dev, buf); 2882 } 2883 2884 static void 2885 build_medialist(struct port_info *pi, struct ifmedia *media) 2886 { 2887 int data, m; 2888 2889 PORT_LOCK(pi); 2890 2891 ifmedia_removeall(media); 2892 2893 m = IFM_ETHER | IFM_FDX; 2894 data = (pi->port_type << 8) | pi->mod_type; 2895 2896 switch(pi->port_type) { 2897 case FW_PORT_TYPE_BT_XFI: 2898 case FW_PORT_TYPE_BT_XAUI: 2899 ifmedia_add(media, m | IFM_10G_T, data, NULL); 2900 /* fall through */ 2901 2902 case FW_PORT_TYPE_BT_SGMII: 2903 ifmedia_add(media, m | IFM_1000_T, data, NULL); 2904 ifmedia_add(media, m | IFM_100_TX, data, NULL); 2905 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL); 2906 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 2907 break; 2908 2909 case FW_PORT_TYPE_CX4: 2910 ifmedia_add(media, m | IFM_10G_CX4, data, NULL); 2911 ifmedia_set(media, m | IFM_10G_CX4); 2912 break; 2913 2914 case FW_PORT_TYPE_QSFP_10G: 2915 case FW_PORT_TYPE_SFP: 2916 case FW_PORT_TYPE_FIBER_XFI: 2917 case FW_PORT_TYPE_FIBER_XAUI: 2918 switch (pi->mod_type) { 2919 2920 case FW_PORT_MOD_TYPE_LR: 2921 ifmedia_add(media, m | IFM_10G_LR, data, NULL); 2922 ifmedia_set(media, m | IFM_10G_LR); 2923 break; 2924 2925 case FW_PORT_MOD_TYPE_SR: 2926 ifmedia_add(media, m | IFM_10G_SR, data, NULL); 2927 ifmedia_set(media, m | IFM_10G_SR); 2928 break; 2929 2930 case FW_PORT_MOD_TYPE_LRM: 2931 ifmedia_add(media, m | IFM_10G_LRM, data, NULL); 2932 ifmedia_set(media, m | IFM_10G_LRM); 2933 break; 2934 2935 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 2936 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 2937 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL); 2938 ifmedia_set(media, m | IFM_10G_TWINAX); 2939 break; 2940 2941 case FW_PORT_MOD_TYPE_NONE: 2942 m &= ~IFM_FDX; 2943 ifmedia_add(media, m | IFM_NONE, data, NULL); 2944 ifmedia_set(media, m | IFM_NONE); 2945 break; 2946 2947 case FW_PORT_MOD_TYPE_NA: 2948 case FW_PORT_MOD_TYPE_ER: 2949 default: 2950 device_printf(pi->dev, 2951 "unknown port_type (%d), mod_type (%d)\n", 2952 pi->port_type, pi->mod_type); 2953 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 2954 ifmedia_set(media, m | IFM_UNKNOWN); 2955 break; 2956 } 2957 break; 2958 2959 case FW_PORT_TYPE_QSFP: 2960 switch (pi->mod_type) { 2961 2962 case FW_PORT_MOD_TYPE_LR: 2963 ifmedia_add(media, m | IFM_40G_LR4, data, NULL); 2964 ifmedia_set(media, m | IFM_40G_LR4); 2965 break; 2966 2967 case FW_PORT_MOD_TYPE_SR: 2968 ifmedia_add(media, m | IFM_40G_SR4, data, NULL); 2969 ifmedia_set(media, m | IFM_40G_SR4); 2970 break; 2971 2972 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 2973 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 2974 ifmedia_add(media, m | IFM_40G_CR4, data, NULL); 2975 ifmedia_set(media, m | IFM_40G_CR4); 2976 break; 2977 2978 case FW_PORT_MOD_TYPE_NONE: 2979 m &= ~IFM_FDX; 2980 ifmedia_add(media, m | IFM_NONE, data, NULL); 2981 ifmedia_set(media, m | IFM_NONE); 2982 break; 2983 2984 default: 2985 device_printf(pi->dev, 2986 "unknown port_type (%d), mod_type (%d)\n", 2987 pi->port_type, pi->mod_type); 2988 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 2989 ifmedia_set(media, m | IFM_UNKNOWN); 2990 break; 2991 } 2992 break; 2993 2994 default: 2995 device_printf(pi->dev, 2996 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 2997 pi->mod_type); 2998 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 2999 ifmedia_set(media, m | IFM_UNKNOWN); 3000 break; 3001 } 3002 3003 PORT_UNLOCK(pi); 3004 } 3005 3006 #define FW_MAC_EXACT_CHUNK 7 3007 3008 /* 3009 * Program the port's XGMAC based on parameters in ifnet. The caller also 3010 * indicates which parameters should be programmed (the rest are left alone). 3011 */ 3012 int 3013 update_mac_settings(struct ifnet *ifp, int flags) 3014 { 3015 int rc = 0; 3016 struct port_info *pi = ifp->if_softc; 3017 struct adapter *sc = pi->adapter; 3018 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3019 uint16_t viid = 0xffff; 3020 int16_t *xact_addr_filt = NULL; 3021 3022 ASSERT_SYNCHRONIZED_OP(sc); 3023 KASSERT(flags, ("%s: not told what to update.", __func__)); 3024 3025 if (ifp == pi->ifp) { 3026 viid = pi->viid; 3027 xact_addr_filt = &pi->xact_addr_filt; 3028 } 3029 #ifdef DEV_NETMAP 3030 else if (ifp == pi->nm_ifp) { 3031 viid = pi->nm_viid; 3032 xact_addr_filt = &pi->nm_xact_addr_filt; 3033 } 3034 #endif 3035 if (flags & XGMAC_MTU) 3036 mtu = ifp->if_mtu; 3037 3038 if (flags & XGMAC_PROMISC) 3039 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3040 3041 if (flags & XGMAC_ALLMULTI) 3042 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3043 3044 if (flags & XGMAC_VLANEX) 3045 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3046 3047 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3048 rc = -t4_set_rxmode(sc, sc->mbox, viid, mtu, promisc, allmulti, 3049 1, vlanex, false); 3050 if (rc) { 3051 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3052 rc); 3053 return (rc); 3054 } 3055 } 3056 3057 if (flags & XGMAC_UCADDR) { 3058 uint8_t ucaddr[ETHER_ADDR_LEN]; 3059 3060 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3061 rc = t4_change_mac(sc, sc->mbox, viid, *xact_addr_filt, ucaddr, 3062 true, true); 3063 if (rc < 0) { 3064 rc = -rc; 3065 if_printf(ifp, "change_mac failed: %d\n", rc); 3066 return (rc); 3067 } else { 3068 *xact_addr_filt = rc; 3069 rc = 0; 3070 } 3071 } 3072 3073 if (flags & XGMAC_MCADDRS) { 3074 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3075 int del = 1; 3076 uint64_t hash = 0; 3077 struct ifmultiaddr *ifma; 3078 int i = 0, j; 3079 3080 if_maddr_rlock(ifp); 3081 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3082 if (ifma->ifma_addr->sa_family != AF_LINK) 3083 continue; 3084 mcaddr[i] = 3085 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3086 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3087 i++; 3088 3089 if (i == FW_MAC_EXACT_CHUNK) { 3090 rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, 3091 i, mcaddr, NULL, &hash, 0); 3092 if (rc < 0) { 3093 rc = -rc; 3094 for (j = 0; j < i; j++) { 3095 if_printf(ifp, 3096 "failed to add mc address" 3097 " %02x:%02x:%02x:" 3098 "%02x:%02x:%02x rc=%d\n", 3099 mcaddr[j][0], mcaddr[j][1], 3100 mcaddr[j][2], mcaddr[j][3], 3101 mcaddr[j][4], mcaddr[j][5], 3102 rc); 3103 } 3104 goto mcfail; 3105 } 3106 del = 0; 3107 i = 0; 3108 } 3109 } 3110 if (i > 0) { 3111 rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, i, 3112 mcaddr, NULL, &hash, 0); 3113 if (rc < 0) { 3114 rc = -rc; 3115 for (j = 0; j < i; j++) { 3116 if_printf(ifp, 3117 "failed to add mc address" 3118 " %02x:%02x:%02x:" 3119 "%02x:%02x:%02x rc=%d\n", 3120 mcaddr[j][0], mcaddr[j][1], 3121 mcaddr[j][2], mcaddr[j][3], 3122 mcaddr[j][4], mcaddr[j][5], 3123 rc); 3124 } 3125 goto mcfail; 3126 } 3127 } 3128 3129 rc = -t4_set_addr_hash(sc, sc->mbox, viid, 0, hash, 0); 3130 if (rc != 0) 3131 if_printf(ifp, "failed to set mc address hash: %d", rc); 3132 mcfail: 3133 if_maddr_runlock(ifp); 3134 } 3135 3136 return (rc); 3137 } 3138 3139 /* 3140 * {begin|end}_synchronized_op must be called from the same thread. 3141 */ 3142 int 3143 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags, 3144 char *wmesg) 3145 { 3146 int rc, pri; 3147 3148 #ifdef WITNESS 3149 /* the caller thinks it's ok to sleep, but is it really? */ 3150 if (flags & SLEEP_OK) 3151 pause("t4slptst", 1); 3152 #endif 3153 3154 if (INTR_OK) 3155 pri = PCATCH; 3156 else 3157 pri = 0; 3158 3159 ADAPTER_LOCK(sc); 3160 for (;;) { 3161 3162 if (pi && IS_DOOMED(pi)) { 3163 rc = ENXIO; 3164 goto done; 3165 } 3166 3167 if (!IS_BUSY(sc)) { 3168 rc = 0; 3169 break; 3170 } 3171 3172 if (!(flags & SLEEP_OK)) { 3173 rc = EBUSY; 3174 goto done; 3175 } 3176 3177 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3178 rc = EINTR; 3179 goto done; 3180 } 3181 } 3182 3183 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3184 SET_BUSY(sc); 3185 #ifdef INVARIANTS 3186 sc->last_op = wmesg; 3187 sc->last_op_thr = curthread; 3188 #endif 3189 3190 done: 3191 if (!(flags & HOLD_LOCK) || rc) 3192 ADAPTER_UNLOCK(sc); 3193 3194 return (rc); 3195 } 3196 3197 /* 3198 * {begin|end}_synchronized_op must be called from the same thread. 3199 */ 3200 void 3201 end_synchronized_op(struct adapter *sc, int flags) 3202 { 3203 3204 if (flags & LOCK_HELD) 3205 ADAPTER_LOCK_ASSERT_OWNED(sc); 3206 else 3207 ADAPTER_LOCK(sc); 3208 3209 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3210 CLR_BUSY(sc); 3211 wakeup(&sc->flags); 3212 ADAPTER_UNLOCK(sc); 3213 } 3214 3215 static int 3216 cxgbe_init_synchronized(struct port_info *pi) 3217 { 3218 struct adapter *sc = pi->adapter; 3219 struct ifnet *ifp = pi->ifp; 3220 int rc = 0, i; 3221 struct sge_txq *txq; 3222 3223 ASSERT_SYNCHRONIZED_OP(sc); 3224 3225 if (isset(&sc->open_device_map, pi->port_id)) { 3226 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, 3227 ("mismatch between open_device_map and if_drv_flags")); 3228 return (0); /* already running */ 3229 } 3230 3231 if (!(sc->flags & FULL_INIT_DONE) && 3232 ((rc = adapter_full_init(sc)) != 0)) 3233 return (rc); /* error message displayed already */ 3234 3235 if (!(pi->flags & PORT_INIT_DONE) && 3236 ((rc = port_full_init(pi)) != 0)) 3237 return (rc); /* error message displayed already */ 3238 3239 rc = update_mac_settings(ifp, XGMAC_ALL); 3240 if (rc) 3241 goto done; /* error message displayed already */ 3242 3243 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true); 3244 if (rc != 0) { 3245 if_printf(ifp, "enable_vi failed: %d\n", rc); 3246 goto done; 3247 } 3248 3249 /* 3250 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3251 * if this changes. 3252 */ 3253 3254 for_each_txq(pi, i, txq) { 3255 TXQ_LOCK(txq); 3256 txq->eq.flags |= EQ_ENABLED; 3257 TXQ_UNLOCK(txq); 3258 } 3259 3260 /* 3261 * The first iq of the first port to come up is used for tracing. 3262 */ 3263 if (sc->traceq < 0) { 3264 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id; 3265 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3266 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3267 V_QUEUENUMBER(sc->traceq)); 3268 pi->flags |= HAS_TRACEQ; 3269 } 3270 3271 /* all ok */ 3272 setbit(&sc->open_device_map, pi->port_id); 3273 PORT_LOCK(pi); 3274 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3275 PORT_UNLOCK(pi); 3276 3277 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3278 done: 3279 if (rc != 0) 3280 cxgbe_uninit_synchronized(pi); 3281 3282 return (rc); 3283 } 3284 3285 /* 3286 * Idempotent. 3287 */ 3288 static int 3289 cxgbe_uninit_synchronized(struct port_info *pi) 3290 { 3291 struct adapter *sc = pi->adapter; 3292 struct ifnet *ifp = pi->ifp; 3293 int rc, i; 3294 struct sge_txq *txq; 3295 3296 ASSERT_SYNCHRONIZED_OP(sc); 3297 3298 if (!(pi->flags & PORT_INIT_DONE)) { 3299 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 3300 ("uninited port is running")); 3301 return (0); 3302 } 3303 3304 /* 3305 * Disable the VI so that all its data in either direction is discarded 3306 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3307 * tick) intact as the TP can deliver negative advice or data that it's 3308 * holding in its RAM (for an offloaded connection) even after the VI is 3309 * disabled. 3310 */ 3311 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false); 3312 if (rc) { 3313 if_printf(ifp, "disable_vi failed: %d\n", rc); 3314 return (rc); 3315 } 3316 3317 for_each_txq(pi, i, txq) { 3318 TXQ_LOCK(txq); 3319 txq->eq.flags &= ~EQ_ENABLED; 3320 TXQ_UNLOCK(txq); 3321 } 3322 3323 clrbit(&sc->open_device_map, pi->port_id); 3324 PORT_LOCK(pi); 3325 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3326 PORT_UNLOCK(pi); 3327 3328 pi->link_cfg.link_ok = 0; 3329 pi->link_cfg.speed = 0; 3330 pi->linkdnrc = -1; 3331 t4_os_link_changed(sc, pi->port_id, 0, -1); 3332 3333 return (0); 3334 } 3335 3336 /* 3337 * It is ok for this function to fail midway and return right away. t4_detach 3338 * will walk the entire sc->irq list and clean up whatever is valid. 3339 */ 3340 static int 3341 setup_intr_handlers(struct adapter *sc) 3342 { 3343 int rc, rid, p, q; 3344 char s[8]; 3345 struct irq *irq; 3346 struct port_info *pi; 3347 struct sge_rxq *rxq; 3348 #ifdef TCP_OFFLOAD 3349 struct sge_ofld_rxq *ofld_rxq; 3350 #endif 3351 #ifdef DEV_NETMAP 3352 struct sge_nm_rxq *nm_rxq; 3353 #endif 3354 3355 /* 3356 * Setup interrupts. 3357 */ 3358 irq = &sc->irq[0]; 3359 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3360 if (sc->intr_count == 1) 3361 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3362 3363 /* Multiple interrupts. */ 3364 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3365 ("%s: too few intr.", __func__)); 3366 3367 /* The first one is always error intr */ 3368 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3369 if (rc != 0) 3370 return (rc); 3371 irq++; 3372 rid++; 3373 3374 /* The second one is always the firmware event queue */ 3375 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt"); 3376 if (rc != 0) 3377 return (rc); 3378 irq++; 3379 rid++; 3380 3381 for_each_port(sc, p) { 3382 pi = sc->port[p]; 3383 3384 if (pi->flags & INTR_RXQ) { 3385 for_each_rxq(pi, q, rxq) { 3386 snprintf(s, sizeof(s), "%d.%d", p, q); 3387 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq, 3388 s); 3389 if (rc != 0) 3390 return (rc); 3391 irq++; 3392 rid++; 3393 } 3394 } 3395 #ifdef TCP_OFFLOAD 3396 if (pi->flags & INTR_OFLD_RXQ) { 3397 for_each_ofld_rxq(pi, q, ofld_rxq) { 3398 snprintf(s, sizeof(s), "%d,%d", p, q); 3399 rc = t4_alloc_irq(sc, irq, rid, t4_intr, 3400 ofld_rxq, s); 3401 if (rc != 0) 3402 return (rc); 3403 irq++; 3404 rid++; 3405 } 3406 } 3407 #endif 3408 #ifdef DEV_NETMAP 3409 if (pi->flags & INTR_NM_RXQ) { 3410 for_each_nm_rxq(pi, q, nm_rxq) { 3411 snprintf(s, sizeof(s), "%d-%d", p, q); 3412 rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr, 3413 nm_rxq, s); 3414 if (rc != 0) 3415 return (rc); 3416 irq++; 3417 rid++; 3418 } 3419 } 3420 #endif 3421 } 3422 MPASS(irq == &sc->irq[sc->intr_count]); 3423 3424 return (0); 3425 } 3426 3427 int 3428 adapter_full_init(struct adapter *sc) 3429 { 3430 int rc, i; 3431 3432 ASSERT_SYNCHRONIZED_OP(sc); 3433 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3434 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 3435 ("%s: FULL_INIT_DONE already", __func__)); 3436 3437 /* 3438 * queues that belong to the adapter (not any particular port). 3439 */ 3440 rc = t4_setup_adapter_queues(sc); 3441 if (rc != 0) 3442 goto done; 3443 3444 for (i = 0; i < nitems(sc->tq); i++) { 3445 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 3446 taskqueue_thread_enqueue, &sc->tq[i]); 3447 if (sc->tq[i] == NULL) { 3448 device_printf(sc->dev, 3449 "failed to allocate task queue %d\n", i); 3450 rc = ENOMEM; 3451 goto done; 3452 } 3453 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 3454 device_get_nameunit(sc->dev), i); 3455 } 3456 3457 t4_intr_enable(sc); 3458 sc->flags |= FULL_INIT_DONE; 3459 done: 3460 if (rc != 0) 3461 adapter_full_uninit(sc); 3462 3463 return (rc); 3464 } 3465 3466 int 3467 adapter_full_uninit(struct adapter *sc) 3468 { 3469 int i; 3470 3471 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3472 3473 t4_teardown_adapter_queues(sc); 3474 3475 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 3476 taskqueue_free(sc->tq[i]); 3477 sc->tq[i] = NULL; 3478 } 3479 3480 sc->flags &= ~FULL_INIT_DONE; 3481 3482 return (0); 3483 } 3484 3485 int 3486 port_full_init(struct port_info *pi) 3487 { 3488 struct adapter *sc = pi->adapter; 3489 struct ifnet *ifp = pi->ifp; 3490 uint16_t *rss; 3491 struct sge_rxq *rxq; 3492 int rc, i, j; 3493 3494 ASSERT_SYNCHRONIZED_OP(sc); 3495 KASSERT((pi->flags & PORT_INIT_DONE) == 0, 3496 ("%s: PORT_INIT_DONE already", __func__)); 3497 3498 sysctl_ctx_init(&pi->ctx); 3499 pi->flags |= PORT_SYSCTL_CTX; 3500 3501 /* 3502 * Allocate tx/rx/fl queues for this port. 3503 */ 3504 rc = t4_setup_port_queues(pi); 3505 if (rc != 0) 3506 goto done; /* error message displayed already */ 3507 3508 /* 3509 * Setup RSS for this port. Save a copy of the RSS table for later use. 3510 */ 3511 rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 3512 for (i = 0; i < pi->rss_size;) { 3513 for_each_rxq(pi, j, rxq) { 3514 rss[i++] = rxq->iq.abs_id; 3515 if (i == pi->rss_size) 3516 break; 3517 } 3518 } 3519 3520 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss, 3521 pi->rss_size); 3522 if (rc != 0) { 3523 if_printf(ifp, "rss_config failed: %d\n", rc); 3524 goto done; 3525 } 3526 3527 pi->rss = rss; 3528 pi->flags |= PORT_INIT_DONE; 3529 done: 3530 if (rc != 0) 3531 port_full_uninit(pi); 3532 3533 return (rc); 3534 } 3535 3536 /* 3537 * Idempotent. 3538 */ 3539 int 3540 port_full_uninit(struct port_info *pi) 3541 { 3542 struct adapter *sc = pi->adapter; 3543 int i; 3544 struct sge_rxq *rxq; 3545 struct sge_txq *txq; 3546 #ifdef TCP_OFFLOAD 3547 struct sge_ofld_rxq *ofld_rxq; 3548 struct sge_wrq *ofld_txq; 3549 #endif 3550 3551 if (pi->flags & PORT_INIT_DONE) { 3552 3553 /* Need to quiesce queues. */ 3554 3555 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 3556 3557 for_each_txq(pi, i, txq) { 3558 quiesce_txq(sc, txq); 3559 } 3560 3561 #ifdef TCP_OFFLOAD 3562 for_each_ofld_txq(pi, i, ofld_txq) { 3563 quiesce_wrq(sc, ofld_txq); 3564 } 3565 #endif 3566 3567 for_each_rxq(pi, i, rxq) { 3568 quiesce_iq(sc, &rxq->iq); 3569 quiesce_fl(sc, &rxq->fl); 3570 } 3571 3572 #ifdef TCP_OFFLOAD 3573 for_each_ofld_rxq(pi, i, ofld_rxq) { 3574 quiesce_iq(sc, &ofld_rxq->iq); 3575 quiesce_fl(sc, &ofld_rxq->fl); 3576 } 3577 #endif 3578 free(pi->rss, M_CXGBE); 3579 } 3580 3581 t4_teardown_port_queues(pi); 3582 pi->flags &= ~PORT_INIT_DONE; 3583 3584 return (0); 3585 } 3586 3587 static void 3588 quiesce_txq(struct adapter *sc, struct sge_txq *txq) 3589 { 3590 struct sge_eq *eq = &txq->eq; 3591 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 3592 3593 (void) sc; /* unused */ 3594 3595 #ifdef INVARIANTS 3596 TXQ_LOCK(txq); 3597 MPASS((eq->flags & EQ_ENABLED) == 0); 3598 TXQ_UNLOCK(txq); 3599 #endif 3600 3601 /* Wait for the mp_ring to empty. */ 3602 while (!mp_ring_is_idle(txq->r)) { 3603 mp_ring_check_drainage(txq->r, 0); 3604 pause("rquiesce", 1); 3605 } 3606 3607 /* Then wait for the hardware to finish. */ 3608 while (spg->cidx != htobe16(eq->pidx)) 3609 pause("equiesce", 1); 3610 3611 /* Finally, wait for the driver to reclaim all descriptors. */ 3612 while (eq->cidx != eq->pidx) 3613 pause("dquiesce", 1); 3614 } 3615 3616 static void 3617 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 3618 { 3619 3620 /* XXXTX */ 3621 } 3622 3623 static void 3624 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 3625 { 3626 (void) sc; /* unused */ 3627 3628 /* Synchronize with the interrupt handler */ 3629 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 3630 pause("iqfree", 1); 3631 } 3632 3633 static void 3634 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 3635 { 3636 mtx_lock(&sc->sfl_lock); 3637 FL_LOCK(fl); 3638 fl->flags |= FL_DOOMED; 3639 FL_UNLOCK(fl); 3640 mtx_unlock(&sc->sfl_lock); 3641 3642 callout_drain(&sc->sfl_callout); 3643 KASSERT((fl->flags & FL_STARVING) == 0, 3644 ("%s: still starving", __func__)); 3645 } 3646 3647 static int 3648 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 3649 driver_intr_t *handler, void *arg, char *name) 3650 { 3651 int rc; 3652 3653 irq->rid = rid; 3654 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 3655 RF_SHAREABLE | RF_ACTIVE); 3656 if (irq->res == NULL) { 3657 device_printf(sc->dev, 3658 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 3659 return (ENOMEM); 3660 } 3661 3662 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 3663 NULL, handler, arg, &irq->tag); 3664 if (rc != 0) { 3665 device_printf(sc->dev, 3666 "failed to setup interrupt for rid %d, name %s: %d\n", 3667 rid, name, rc); 3668 } else if (name) 3669 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 3670 3671 return (rc); 3672 } 3673 3674 static int 3675 t4_free_irq(struct adapter *sc, struct irq *irq) 3676 { 3677 if (irq->tag) 3678 bus_teardown_intr(sc->dev, irq->res, irq->tag); 3679 if (irq->res) 3680 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 3681 3682 bzero(irq, sizeof(*irq)); 3683 3684 return (0); 3685 } 3686 3687 static void 3688 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start, 3689 unsigned int end) 3690 { 3691 uint32_t *p = (uint32_t *)(buf + start); 3692 3693 for ( ; start <= end; start += sizeof(uint32_t)) 3694 *p++ = t4_read_reg(sc, start); 3695 } 3696 3697 static void 3698 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 3699 { 3700 int i, n; 3701 const unsigned int *reg_ranges; 3702 static const unsigned int t4_reg_ranges[] = { 3703 0x1008, 0x1108, 3704 0x1180, 0x11b4, 3705 0x11fc, 0x123c, 3706 0x1300, 0x173c, 3707 0x1800, 0x18fc, 3708 0x3000, 0x30d8, 3709 0x30e0, 0x5924, 3710 0x5960, 0x59d4, 3711 0x5a00, 0x5af8, 3712 0x6000, 0x6098, 3713 0x6100, 0x6150, 3714 0x6200, 0x6208, 3715 0x6240, 0x6248, 3716 0x6280, 0x6338, 3717 0x6370, 0x638c, 3718 0x6400, 0x643c, 3719 0x6500, 0x6524, 3720 0x6a00, 0x6a38, 3721 0x6a60, 0x6a78, 3722 0x6b00, 0x6b84, 3723 0x6bf0, 0x6c84, 3724 0x6cf0, 0x6d84, 3725 0x6df0, 0x6e84, 3726 0x6ef0, 0x6f84, 3727 0x6ff0, 0x7084, 3728 0x70f0, 0x7184, 3729 0x71f0, 0x7284, 3730 0x72f0, 0x7384, 3731 0x73f0, 0x7450, 3732 0x7500, 0x7530, 3733 0x7600, 0x761c, 3734 0x7680, 0x76cc, 3735 0x7700, 0x7798, 3736 0x77c0, 0x77fc, 3737 0x7900, 0x79fc, 3738 0x7b00, 0x7c38, 3739 0x7d00, 0x7efc, 3740 0x8dc0, 0x8e1c, 3741 0x8e30, 0x8e78, 3742 0x8ea0, 0x8f6c, 3743 0x8fc0, 0x9074, 3744 0x90fc, 0x90fc, 3745 0x9400, 0x9458, 3746 0x9600, 0x96bc, 3747 0x9800, 0x9808, 3748 0x9820, 0x983c, 3749 0x9850, 0x9864, 3750 0x9c00, 0x9c6c, 3751 0x9c80, 0x9cec, 3752 0x9d00, 0x9d6c, 3753 0x9d80, 0x9dec, 3754 0x9e00, 0x9e6c, 3755 0x9e80, 0x9eec, 3756 0x9f00, 0x9f6c, 3757 0x9f80, 0x9fec, 3758 0xd004, 0xd03c, 3759 0xdfc0, 0xdfe0, 3760 0xe000, 0xea7c, 3761 0xf000, 0x11110, 3762 0x11118, 0x11190, 3763 0x19040, 0x1906c, 3764 0x19078, 0x19080, 3765 0x1908c, 0x19124, 3766 0x19150, 0x191b0, 3767 0x191d0, 0x191e8, 3768 0x19238, 0x1924c, 3769 0x193f8, 0x19474, 3770 0x19490, 0x194f8, 3771 0x19800, 0x19f30, 3772 0x1a000, 0x1a06c, 3773 0x1a0b0, 0x1a120, 3774 0x1a128, 0x1a138, 3775 0x1a190, 0x1a1c4, 3776 0x1a1fc, 0x1a1fc, 3777 0x1e040, 0x1e04c, 3778 0x1e284, 0x1e28c, 3779 0x1e2c0, 0x1e2c0, 3780 0x1e2e0, 0x1e2e0, 3781 0x1e300, 0x1e384, 3782 0x1e3c0, 0x1e3c8, 3783 0x1e440, 0x1e44c, 3784 0x1e684, 0x1e68c, 3785 0x1e6c0, 0x1e6c0, 3786 0x1e6e0, 0x1e6e0, 3787 0x1e700, 0x1e784, 3788 0x1e7c0, 0x1e7c8, 3789 0x1e840, 0x1e84c, 3790 0x1ea84, 0x1ea8c, 3791 0x1eac0, 0x1eac0, 3792 0x1eae0, 0x1eae0, 3793 0x1eb00, 0x1eb84, 3794 0x1ebc0, 0x1ebc8, 3795 0x1ec40, 0x1ec4c, 3796 0x1ee84, 0x1ee8c, 3797 0x1eec0, 0x1eec0, 3798 0x1eee0, 0x1eee0, 3799 0x1ef00, 0x1ef84, 3800 0x1efc0, 0x1efc8, 3801 0x1f040, 0x1f04c, 3802 0x1f284, 0x1f28c, 3803 0x1f2c0, 0x1f2c0, 3804 0x1f2e0, 0x1f2e0, 3805 0x1f300, 0x1f384, 3806 0x1f3c0, 0x1f3c8, 3807 0x1f440, 0x1f44c, 3808 0x1f684, 0x1f68c, 3809 0x1f6c0, 0x1f6c0, 3810 0x1f6e0, 0x1f6e0, 3811 0x1f700, 0x1f784, 3812 0x1f7c0, 0x1f7c8, 3813 0x1f840, 0x1f84c, 3814 0x1fa84, 0x1fa8c, 3815 0x1fac0, 0x1fac0, 3816 0x1fae0, 0x1fae0, 3817 0x1fb00, 0x1fb84, 3818 0x1fbc0, 0x1fbc8, 3819 0x1fc40, 0x1fc4c, 3820 0x1fe84, 0x1fe8c, 3821 0x1fec0, 0x1fec0, 3822 0x1fee0, 0x1fee0, 3823 0x1ff00, 0x1ff84, 3824 0x1ffc0, 0x1ffc8, 3825 0x20000, 0x2002c, 3826 0x20100, 0x2013c, 3827 0x20190, 0x201c8, 3828 0x20200, 0x20318, 3829 0x20400, 0x20528, 3830 0x20540, 0x20614, 3831 0x21000, 0x21040, 3832 0x2104c, 0x21060, 3833 0x210c0, 0x210ec, 3834 0x21200, 0x21268, 3835 0x21270, 0x21284, 3836 0x212fc, 0x21388, 3837 0x21400, 0x21404, 3838 0x21500, 0x21518, 3839 0x2152c, 0x2153c, 3840 0x21550, 0x21554, 3841 0x21600, 0x21600, 3842 0x21608, 0x21628, 3843 0x21630, 0x2163c, 3844 0x21700, 0x2171c, 3845 0x21780, 0x2178c, 3846 0x21800, 0x21c38, 3847 0x21c80, 0x21d7c, 3848 0x21e00, 0x21e04, 3849 0x22000, 0x2202c, 3850 0x22100, 0x2213c, 3851 0x22190, 0x221c8, 3852 0x22200, 0x22318, 3853 0x22400, 0x22528, 3854 0x22540, 0x22614, 3855 0x23000, 0x23040, 3856 0x2304c, 0x23060, 3857 0x230c0, 0x230ec, 3858 0x23200, 0x23268, 3859 0x23270, 0x23284, 3860 0x232fc, 0x23388, 3861 0x23400, 0x23404, 3862 0x23500, 0x23518, 3863 0x2352c, 0x2353c, 3864 0x23550, 0x23554, 3865 0x23600, 0x23600, 3866 0x23608, 0x23628, 3867 0x23630, 0x2363c, 3868 0x23700, 0x2371c, 3869 0x23780, 0x2378c, 3870 0x23800, 0x23c38, 3871 0x23c80, 0x23d7c, 3872 0x23e00, 0x23e04, 3873 0x24000, 0x2402c, 3874 0x24100, 0x2413c, 3875 0x24190, 0x241c8, 3876 0x24200, 0x24318, 3877 0x24400, 0x24528, 3878 0x24540, 0x24614, 3879 0x25000, 0x25040, 3880 0x2504c, 0x25060, 3881 0x250c0, 0x250ec, 3882 0x25200, 0x25268, 3883 0x25270, 0x25284, 3884 0x252fc, 0x25388, 3885 0x25400, 0x25404, 3886 0x25500, 0x25518, 3887 0x2552c, 0x2553c, 3888 0x25550, 0x25554, 3889 0x25600, 0x25600, 3890 0x25608, 0x25628, 3891 0x25630, 0x2563c, 3892 0x25700, 0x2571c, 3893 0x25780, 0x2578c, 3894 0x25800, 0x25c38, 3895 0x25c80, 0x25d7c, 3896 0x25e00, 0x25e04, 3897 0x26000, 0x2602c, 3898 0x26100, 0x2613c, 3899 0x26190, 0x261c8, 3900 0x26200, 0x26318, 3901 0x26400, 0x26528, 3902 0x26540, 0x26614, 3903 0x27000, 0x27040, 3904 0x2704c, 0x27060, 3905 0x270c0, 0x270ec, 3906 0x27200, 0x27268, 3907 0x27270, 0x27284, 3908 0x272fc, 0x27388, 3909 0x27400, 0x27404, 3910 0x27500, 0x27518, 3911 0x2752c, 0x2753c, 3912 0x27550, 0x27554, 3913 0x27600, 0x27600, 3914 0x27608, 0x27628, 3915 0x27630, 0x2763c, 3916 0x27700, 0x2771c, 3917 0x27780, 0x2778c, 3918 0x27800, 0x27c38, 3919 0x27c80, 0x27d7c, 3920 0x27e00, 0x27e04 3921 }; 3922 static const unsigned int t5_reg_ranges[] = { 3923 0x1008, 0x1148, 3924 0x1180, 0x11b4, 3925 0x11fc, 0x123c, 3926 0x1280, 0x173c, 3927 0x1800, 0x18fc, 3928 0x3000, 0x3028, 3929 0x3060, 0x30d8, 3930 0x30e0, 0x30fc, 3931 0x3140, 0x357c, 3932 0x35a8, 0x35cc, 3933 0x35ec, 0x35ec, 3934 0x3600, 0x5624, 3935 0x56cc, 0x575c, 3936 0x580c, 0x5814, 3937 0x5890, 0x58bc, 3938 0x5940, 0x59dc, 3939 0x59fc, 0x5a18, 3940 0x5a60, 0x5a9c, 3941 0x5b94, 0x5bfc, 3942 0x6000, 0x6040, 3943 0x6058, 0x614c, 3944 0x7700, 0x7798, 3945 0x77c0, 0x78fc, 3946 0x7b00, 0x7c54, 3947 0x7d00, 0x7efc, 3948 0x8dc0, 0x8de0, 3949 0x8df8, 0x8e84, 3950 0x8ea0, 0x8f84, 3951 0x8fc0, 0x90f8, 3952 0x9400, 0x9470, 3953 0x9600, 0x96f4, 3954 0x9800, 0x9808, 3955 0x9820, 0x983c, 3956 0x9850, 0x9864, 3957 0x9c00, 0x9c6c, 3958 0x9c80, 0x9cec, 3959 0x9d00, 0x9d6c, 3960 0x9d80, 0x9dec, 3961 0x9e00, 0x9e6c, 3962 0x9e80, 0x9eec, 3963 0x9f00, 0x9f6c, 3964 0x9f80, 0xa020, 3965 0xd004, 0xd03c, 3966 0xdfc0, 0xdfe0, 3967 0xe000, 0x11088, 3968 0x1109c, 0x11110, 3969 0x11118, 0x1117c, 3970 0x11190, 0x11204, 3971 0x19040, 0x1906c, 3972 0x19078, 0x19080, 3973 0x1908c, 0x19124, 3974 0x19150, 0x191b0, 3975 0x191d0, 0x191e8, 3976 0x19238, 0x19290, 3977 0x193f8, 0x19474, 3978 0x19490, 0x194cc, 3979 0x194f0, 0x194f8, 3980 0x19c00, 0x19c60, 3981 0x19c94, 0x19e10, 3982 0x19e50, 0x19f34, 3983 0x19f40, 0x19f50, 3984 0x19f90, 0x19fe4, 3985 0x1a000, 0x1a06c, 3986 0x1a0b0, 0x1a120, 3987 0x1a128, 0x1a138, 3988 0x1a190, 0x1a1c4, 3989 0x1a1fc, 0x1a1fc, 3990 0x1e008, 0x1e00c, 3991 0x1e040, 0x1e04c, 3992 0x1e284, 0x1e290, 3993 0x1e2c0, 0x1e2c0, 3994 0x1e2e0, 0x1e2e0, 3995 0x1e300, 0x1e384, 3996 0x1e3c0, 0x1e3c8, 3997 0x1e408, 0x1e40c, 3998 0x1e440, 0x1e44c, 3999 0x1e684, 0x1e690, 4000 0x1e6c0, 0x1e6c0, 4001 0x1e6e0, 0x1e6e0, 4002 0x1e700, 0x1e784, 4003 0x1e7c0, 0x1e7c8, 4004 0x1e808, 0x1e80c, 4005 0x1e840, 0x1e84c, 4006 0x1ea84, 0x1ea90, 4007 0x1eac0, 0x1eac0, 4008 0x1eae0, 0x1eae0, 4009 0x1eb00, 0x1eb84, 4010 0x1ebc0, 0x1ebc8, 4011 0x1ec08, 0x1ec0c, 4012 0x1ec40, 0x1ec4c, 4013 0x1ee84, 0x1ee90, 4014 0x1eec0, 0x1eec0, 4015 0x1eee0, 0x1eee0, 4016 0x1ef00, 0x1ef84, 4017 0x1efc0, 0x1efc8, 4018 0x1f008, 0x1f00c, 4019 0x1f040, 0x1f04c, 4020 0x1f284, 0x1f290, 4021 0x1f2c0, 0x1f2c0, 4022 0x1f2e0, 0x1f2e0, 4023 0x1f300, 0x1f384, 4024 0x1f3c0, 0x1f3c8, 4025 0x1f408, 0x1f40c, 4026 0x1f440, 0x1f44c, 4027 0x1f684, 0x1f690, 4028 0x1f6c0, 0x1f6c0, 4029 0x1f6e0, 0x1f6e0, 4030 0x1f700, 0x1f784, 4031 0x1f7c0, 0x1f7c8, 4032 0x1f808, 0x1f80c, 4033 0x1f840, 0x1f84c, 4034 0x1fa84, 0x1fa90, 4035 0x1fac0, 0x1fac0, 4036 0x1fae0, 0x1fae0, 4037 0x1fb00, 0x1fb84, 4038 0x1fbc0, 0x1fbc8, 4039 0x1fc08, 0x1fc0c, 4040 0x1fc40, 0x1fc4c, 4041 0x1fe84, 0x1fe90, 4042 0x1fec0, 0x1fec0, 4043 0x1fee0, 0x1fee0, 4044 0x1ff00, 0x1ff84, 4045 0x1ffc0, 0x1ffc8, 4046 0x30000, 0x30030, 4047 0x30100, 0x30144, 4048 0x30190, 0x301d0, 4049 0x30200, 0x30318, 4050 0x30400, 0x3052c, 4051 0x30540, 0x3061c, 4052 0x30800, 0x30834, 4053 0x308c0, 0x30908, 4054 0x30910, 0x309ac, 4055 0x30a00, 0x30a2c, 4056 0x30a44, 0x30a50, 4057 0x30a74, 0x30c24, 4058 0x30d00, 0x30d00, 4059 0x30d08, 0x30d14, 4060 0x30d1c, 0x30d20, 4061 0x30d3c, 0x30d50, 4062 0x31200, 0x3120c, 4063 0x31220, 0x31220, 4064 0x31240, 0x31240, 4065 0x31600, 0x3160c, 4066 0x31a00, 0x31a1c, 4067 0x31e00, 0x31e20, 4068 0x31e38, 0x31e3c, 4069 0x31e80, 0x31e80, 4070 0x31e88, 0x31ea8, 4071 0x31eb0, 0x31eb4, 4072 0x31ec8, 0x31ed4, 4073 0x31fb8, 0x32004, 4074 0x32200, 0x32200, 4075 0x32208, 0x32240, 4076 0x32248, 0x32280, 4077 0x32288, 0x322c0, 4078 0x322c8, 0x322fc, 4079 0x32600, 0x32630, 4080 0x32a00, 0x32abc, 4081 0x32b00, 0x32b70, 4082 0x33000, 0x33048, 4083 0x33060, 0x3309c, 4084 0x330f0, 0x33148, 4085 0x33160, 0x3319c, 4086 0x331f0, 0x332e4, 4087 0x332f8, 0x333e4, 4088 0x333f8, 0x33448, 4089 0x33460, 0x3349c, 4090 0x334f0, 0x33548, 4091 0x33560, 0x3359c, 4092 0x335f0, 0x336e4, 4093 0x336f8, 0x337e4, 4094 0x337f8, 0x337fc, 4095 0x33814, 0x33814, 4096 0x3382c, 0x3382c, 4097 0x33880, 0x3388c, 4098 0x338e8, 0x338ec, 4099 0x33900, 0x33948, 4100 0x33960, 0x3399c, 4101 0x339f0, 0x33ae4, 4102 0x33af8, 0x33b10, 4103 0x33b28, 0x33b28, 4104 0x33b3c, 0x33b50, 4105 0x33bf0, 0x33c10, 4106 0x33c28, 0x33c28, 4107 0x33c3c, 0x33c50, 4108 0x33cf0, 0x33cfc, 4109 0x34000, 0x34030, 4110 0x34100, 0x34144, 4111 0x34190, 0x341d0, 4112 0x34200, 0x34318, 4113 0x34400, 0x3452c, 4114 0x34540, 0x3461c, 4115 0x34800, 0x34834, 4116 0x348c0, 0x34908, 4117 0x34910, 0x349ac, 4118 0x34a00, 0x34a2c, 4119 0x34a44, 0x34a50, 4120 0x34a74, 0x34c24, 4121 0x34d00, 0x34d00, 4122 0x34d08, 0x34d14, 4123 0x34d1c, 0x34d20, 4124 0x34d3c, 0x34d50, 4125 0x35200, 0x3520c, 4126 0x35220, 0x35220, 4127 0x35240, 0x35240, 4128 0x35600, 0x3560c, 4129 0x35a00, 0x35a1c, 4130 0x35e00, 0x35e20, 4131 0x35e38, 0x35e3c, 4132 0x35e80, 0x35e80, 4133 0x35e88, 0x35ea8, 4134 0x35eb0, 0x35eb4, 4135 0x35ec8, 0x35ed4, 4136 0x35fb8, 0x36004, 4137 0x36200, 0x36200, 4138 0x36208, 0x36240, 4139 0x36248, 0x36280, 4140 0x36288, 0x362c0, 4141 0x362c8, 0x362fc, 4142 0x36600, 0x36630, 4143 0x36a00, 0x36abc, 4144 0x36b00, 0x36b70, 4145 0x37000, 0x37048, 4146 0x37060, 0x3709c, 4147 0x370f0, 0x37148, 4148 0x37160, 0x3719c, 4149 0x371f0, 0x372e4, 4150 0x372f8, 0x373e4, 4151 0x373f8, 0x37448, 4152 0x37460, 0x3749c, 4153 0x374f0, 0x37548, 4154 0x37560, 0x3759c, 4155 0x375f0, 0x376e4, 4156 0x376f8, 0x377e4, 4157 0x377f8, 0x377fc, 4158 0x37814, 0x37814, 4159 0x3782c, 0x3782c, 4160 0x37880, 0x3788c, 4161 0x378e8, 0x378ec, 4162 0x37900, 0x37948, 4163 0x37960, 0x3799c, 4164 0x379f0, 0x37ae4, 4165 0x37af8, 0x37b10, 4166 0x37b28, 0x37b28, 4167 0x37b3c, 0x37b50, 4168 0x37bf0, 0x37c10, 4169 0x37c28, 0x37c28, 4170 0x37c3c, 0x37c50, 4171 0x37cf0, 0x37cfc, 4172 0x38000, 0x38030, 4173 0x38100, 0x38144, 4174 0x38190, 0x381d0, 4175 0x38200, 0x38318, 4176 0x38400, 0x3852c, 4177 0x38540, 0x3861c, 4178 0x38800, 0x38834, 4179 0x388c0, 0x38908, 4180 0x38910, 0x389ac, 4181 0x38a00, 0x38a2c, 4182 0x38a44, 0x38a50, 4183 0x38a74, 0x38c24, 4184 0x38d00, 0x38d00, 4185 0x38d08, 0x38d14, 4186 0x38d1c, 0x38d20, 4187 0x38d3c, 0x38d50, 4188 0x39200, 0x3920c, 4189 0x39220, 0x39220, 4190 0x39240, 0x39240, 4191 0x39600, 0x3960c, 4192 0x39a00, 0x39a1c, 4193 0x39e00, 0x39e20, 4194 0x39e38, 0x39e3c, 4195 0x39e80, 0x39e80, 4196 0x39e88, 0x39ea8, 4197 0x39eb0, 0x39eb4, 4198 0x39ec8, 0x39ed4, 4199 0x39fb8, 0x3a004, 4200 0x3a200, 0x3a200, 4201 0x3a208, 0x3a240, 4202 0x3a248, 0x3a280, 4203 0x3a288, 0x3a2c0, 4204 0x3a2c8, 0x3a2fc, 4205 0x3a600, 0x3a630, 4206 0x3aa00, 0x3aabc, 4207 0x3ab00, 0x3ab70, 4208 0x3b000, 0x3b048, 4209 0x3b060, 0x3b09c, 4210 0x3b0f0, 0x3b148, 4211 0x3b160, 0x3b19c, 4212 0x3b1f0, 0x3b2e4, 4213 0x3b2f8, 0x3b3e4, 4214 0x3b3f8, 0x3b448, 4215 0x3b460, 0x3b49c, 4216 0x3b4f0, 0x3b548, 4217 0x3b560, 0x3b59c, 4218 0x3b5f0, 0x3b6e4, 4219 0x3b6f8, 0x3b7e4, 4220 0x3b7f8, 0x3b7fc, 4221 0x3b814, 0x3b814, 4222 0x3b82c, 0x3b82c, 4223 0x3b880, 0x3b88c, 4224 0x3b8e8, 0x3b8ec, 4225 0x3b900, 0x3b948, 4226 0x3b960, 0x3b99c, 4227 0x3b9f0, 0x3bae4, 4228 0x3baf8, 0x3bb10, 4229 0x3bb28, 0x3bb28, 4230 0x3bb3c, 0x3bb50, 4231 0x3bbf0, 0x3bc10, 4232 0x3bc28, 0x3bc28, 4233 0x3bc3c, 0x3bc50, 4234 0x3bcf0, 0x3bcfc, 4235 0x3c000, 0x3c030, 4236 0x3c100, 0x3c144, 4237 0x3c190, 0x3c1d0, 4238 0x3c200, 0x3c318, 4239 0x3c400, 0x3c52c, 4240 0x3c540, 0x3c61c, 4241 0x3c800, 0x3c834, 4242 0x3c8c0, 0x3c908, 4243 0x3c910, 0x3c9ac, 4244 0x3ca00, 0x3ca2c, 4245 0x3ca44, 0x3ca50, 4246 0x3ca74, 0x3cc24, 4247 0x3cd00, 0x3cd00, 4248 0x3cd08, 0x3cd14, 4249 0x3cd1c, 0x3cd20, 4250 0x3cd3c, 0x3cd50, 4251 0x3d200, 0x3d20c, 4252 0x3d220, 0x3d220, 4253 0x3d240, 0x3d240, 4254 0x3d600, 0x3d60c, 4255 0x3da00, 0x3da1c, 4256 0x3de00, 0x3de20, 4257 0x3de38, 0x3de3c, 4258 0x3de80, 0x3de80, 4259 0x3de88, 0x3dea8, 4260 0x3deb0, 0x3deb4, 4261 0x3dec8, 0x3ded4, 4262 0x3dfb8, 0x3e004, 4263 0x3e200, 0x3e200, 4264 0x3e208, 0x3e240, 4265 0x3e248, 0x3e280, 4266 0x3e288, 0x3e2c0, 4267 0x3e2c8, 0x3e2fc, 4268 0x3e600, 0x3e630, 4269 0x3ea00, 0x3eabc, 4270 0x3eb00, 0x3eb70, 4271 0x3f000, 0x3f048, 4272 0x3f060, 0x3f09c, 4273 0x3f0f0, 0x3f148, 4274 0x3f160, 0x3f19c, 4275 0x3f1f0, 0x3f2e4, 4276 0x3f2f8, 0x3f3e4, 4277 0x3f3f8, 0x3f448, 4278 0x3f460, 0x3f49c, 4279 0x3f4f0, 0x3f548, 4280 0x3f560, 0x3f59c, 4281 0x3f5f0, 0x3f6e4, 4282 0x3f6f8, 0x3f7e4, 4283 0x3f7f8, 0x3f7fc, 4284 0x3f814, 0x3f814, 4285 0x3f82c, 0x3f82c, 4286 0x3f880, 0x3f88c, 4287 0x3f8e8, 0x3f8ec, 4288 0x3f900, 0x3f948, 4289 0x3f960, 0x3f99c, 4290 0x3f9f0, 0x3fae4, 4291 0x3faf8, 0x3fb10, 4292 0x3fb28, 0x3fb28, 4293 0x3fb3c, 0x3fb50, 4294 0x3fbf0, 0x3fc10, 4295 0x3fc28, 0x3fc28, 4296 0x3fc3c, 0x3fc50, 4297 0x3fcf0, 0x3fcfc, 4298 0x40000, 0x4000c, 4299 0x40040, 0x40068, 4300 0x4007c, 0x40144, 4301 0x40180, 0x4018c, 4302 0x40200, 0x40298, 4303 0x402ac, 0x4033c, 4304 0x403f8, 0x403fc, 4305 0x41304, 0x413c4, 4306 0x41400, 0x4141c, 4307 0x41480, 0x414d0, 4308 0x44000, 0x44078, 4309 0x440c0, 0x44278, 4310 0x442c0, 0x44478, 4311 0x444c0, 0x44678, 4312 0x446c0, 0x44878, 4313 0x448c0, 0x449fc, 4314 0x45000, 0x45068, 4315 0x45080, 0x45084, 4316 0x450a0, 0x450b0, 4317 0x45200, 0x45268, 4318 0x45280, 0x45284, 4319 0x452a0, 0x452b0, 4320 0x460c0, 0x460e4, 4321 0x47000, 0x4708c, 4322 0x47200, 0x47250, 4323 0x47400, 0x47420, 4324 0x47600, 0x47618, 4325 0x47800, 0x47814, 4326 0x48000, 0x4800c, 4327 0x48040, 0x48068, 4328 0x4807c, 0x48144, 4329 0x48180, 0x4818c, 4330 0x48200, 0x48298, 4331 0x482ac, 0x4833c, 4332 0x483f8, 0x483fc, 4333 0x49304, 0x493c4, 4334 0x49400, 0x4941c, 4335 0x49480, 0x494d0, 4336 0x4c000, 0x4c078, 4337 0x4c0c0, 0x4c278, 4338 0x4c2c0, 0x4c478, 4339 0x4c4c0, 0x4c678, 4340 0x4c6c0, 0x4c878, 4341 0x4c8c0, 0x4c9fc, 4342 0x4d000, 0x4d068, 4343 0x4d080, 0x4d084, 4344 0x4d0a0, 0x4d0b0, 4345 0x4d200, 0x4d268, 4346 0x4d280, 0x4d284, 4347 0x4d2a0, 0x4d2b0, 4348 0x4e0c0, 0x4e0e4, 4349 0x4f000, 0x4f08c, 4350 0x4f200, 0x4f250, 4351 0x4f400, 0x4f420, 4352 0x4f600, 0x4f618, 4353 0x4f800, 0x4f814, 4354 0x50000, 0x500cc, 4355 0x50400, 0x50400, 4356 0x50800, 0x508cc, 4357 0x50c00, 0x50c00, 4358 0x51000, 0x5101c, 4359 0x51300, 0x51308, 4360 }; 4361 4362 if (is_t4(sc)) { 4363 reg_ranges = &t4_reg_ranges[0]; 4364 n = nitems(t4_reg_ranges); 4365 } else { 4366 reg_ranges = &t5_reg_ranges[0]; 4367 n = nitems(t5_reg_ranges); 4368 } 4369 4370 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4371 for (i = 0; i < n; i += 2) 4372 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]); 4373 } 4374 4375 static void 4376 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4377 { 4378 int i; 4379 u_int v, tnl_cong_drops; 4380 struct timeval tv; 4381 const struct timeval interval = {0, 250000}; /* 250ms */ 4382 4383 getmicrotime(&tv); 4384 timevalsub(&tv, &interval); 4385 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4386 return; 4387 4388 tnl_cong_drops = 0; 4389 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4390 for (i = 0; i < NCHAN; i++) { 4391 if (pi->rx_chan_map & (1 << i)) { 4392 mtx_lock(&sc->regwin_lock); 4393 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4394 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4395 mtx_unlock(&sc->regwin_lock); 4396 tnl_cong_drops += v; 4397 } 4398 } 4399 pi->tnl_cong_drops = tnl_cong_drops; 4400 getmicrotime(&pi->last_refreshed); 4401 } 4402 4403 static void 4404 cxgbe_tick(void *arg) 4405 { 4406 struct port_info *pi = arg; 4407 struct adapter *sc = pi->adapter; 4408 struct ifnet *ifp = pi->ifp; 4409 4410 PORT_LOCK(pi); 4411 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4412 PORT_UNLOCK(pi); 4413 return; /* without scheduling another callout */ 4414 } 4415 4416 cxgbe_refresh_stats(sc, pi); 4417 4418 callout_schedule(&pi->tick, hz); 4419 PORT_UNLOCK(pi); 4420 } 4421 4422 static void 4423 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4424 { 4425 struct ifnet *vlan; 4426 4427 if (arg != ifp || ifp->if_type != IFT_ETHER) 4428 return; 4429 4430 vlan = VLAN_DEVAT(ifp, vid); 4431 VLAN_SETCOOKIE(vlan, ifp); 4432 } 4433 4434 static int 4435 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4436 { 4437 4438 #ifdef INVARIANTS 4439 panic("%s: opcode 0x%02x on iq %p with payload %p", 4440 __func__, rss->opcode, iq, m); 4441 #else 4442 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", 4443 __func__, rss->opcode, iq, m); 4444 m_freem(m); 4445 #endif 4446 return (EDOOFUS); 4447 } 4448 4449 int 4450 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 4451 { 4452 uintptr_t *loc, new; 4453 4454 if (opcode >= nitems(sc->cpl_handler)) 4455 return (EINVAL); 4456 4457 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 4458 loc = (uintptr_t *) &sc->cpl_handler[opcode]; 4459 atomic_store_rel_ptr(loc, new); 4460 4461 return (0); 4462 } 4463 4464 static int 4465 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) 4466 { 4467 4468 #ifdef INVARIANTS 4469 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); 4470 #else 4471 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", 4472 __func__, iq, ctrl); 4473 #endif 4474 return (EDOOFUS); 4475 } 4476 4477 int 4478 t4_register_an_handler(struct adapter *sc, an_handler_t h) 4479 { 4480 uintptr_t *loc, new; 4481 4482 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; 4483 loc = (uintptr_t *) &sc->an_handler; 4484 atomic_store_rel_ptr(loc, new); 4485 4486 return (0); 4487 } 4488 4489 static int 4490 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) 4491 { 4492 const struct cpl_fw6_msg *cpl = 4493 __containerof(rpl, struct cpl_fw6_msg, data[0]); 4494 4495 #ifdef INVARIANTS 4496 panic("%s: fw_msg type %d", __func__, cpl->type); 4497 #else 4498 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); 4499 #endif 4500 return (EDOOFUS); 4501 } 4502 4503 int 4504 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 4505 { 4506 uintptr_t *loc, new; 4507 4508 if (type >= nitems(sc->fw_msg_handler)) 4509 return (EINVAL); 4510 4511 /* 4512 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 4513 * handler dispatch table. Reject any attempt to install a handler for 4514 * this subtype. 4515 */ 4516 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) 4517 return (EINVAL); 4518 4519 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; 4520 loc = (uintptr_t *) &sc->fw_msg_handler[type]; 4521 atomic_store_rel_ptr(loc, new); 4522 4523 return (0); 4524 } 4525 4526 static int 4527 t4_sysctls(struct adapter *sc) 4528 { 4529 struct sysctl_ctx_list *ctx; 4530 struct sysctl_oid *oid; 4531 struct sysctl_oid_list *children, *c0; 4532 static char *caps[] = { 4533 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */ 4534 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL" /* caps[1] niccaps */ 4535 "\6HASHFILTER\7ETHOFLD", 4536 "\20\1TOE", /* caps[2] toecaps */ 4537 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */ 4538 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */ 4539 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD" 4540 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD", 4541 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */ 4542 "\4PO_INITIAOR\5PO_TARGET" 4543 }; 4544 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4545 4546 ctx = device_get_sysctl_ctx(sc->dev); 4547 4548 /* 4549 * dev.t4nex.X. 4550 */ 4551 oid = device_get_sysctl_tree(sc->dev); 4552 c0 = children = SYSCTL_CHILDREN(oid); 4553 4554 sc->sc_do_rxcopy = 1; 4555 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4556 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4557 4558 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4559 sc->params.nports, "# of ports"); 4560 4561 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4562 NULL, chip_rev(sc), "chip hardware revision"); 4563 4564 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4565 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4566 4567 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4568 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4569 4570 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4571 sc->cfcsum, "config file checksum"); 4572 4573 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4574 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4575 sysctl_bitfield, "A", "available doorbells"); 4576 4577 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps", 4578 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps, 4579 sysctl_bitfield, "A", "available link capabilities"); 4580 4581 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps", 4582 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps, 4583 sysctl_bitfield, "A", "available NIC capabilities"); 4584 4585 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps", 4586 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps, 4587 sysctl_bitfield, "A", "available TCP offload capabilities"); 4588 4589 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps", 4590 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps, 4591 sysctl_bitfield, "A", "available RDMA capabilities"); 4592 4593 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps", 4594 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps, 4595 sysctl_bitfield, "A", "available iSCSI capabilities"); 4596 4597 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps", 4598 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps, 4599 sysctl_bitfield, "A", "available FCoE capabilities"); 4600 4601 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4602 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4603 4604 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4605 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val, 4606 sizeof(sc->sge.timer_val), sysctl_int_array, "A", 4607 "interrupt holdoff timer values (us)"); 4608 4609 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4610 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val, 4611 sizeof(sc->sge.counter_val), sysctl_int_array, "A", 4612 "interrupt holdoff packet counter values"); 4613 4614 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4615 NULL, sc->tids.nftids, "number of filters"); 4616 4617 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4618 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4619 "chip temperature (in Celsius)"); 4620 4621 t4_sge_sysctls(sc, ctx, children); 4622 4623 sc->lro_timeout = 100; 4624 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4625 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4626 4627 #ifdef SBUF_DRAIN 4628 /* 4629 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4630 */ 4631 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4632 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4633 "logs and miscellaneous information"); 4634 children = SYSCTL_CHILDREN(oid); 4635 4636 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4637 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4638 sysctl_cctrl, "A", "congestion control"); 4639 4640 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4641 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4642 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4643 4644 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4645 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4646 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4647 4648 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4649 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4650 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4651 4652 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4653 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4654 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4655 4656 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4657 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4658 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4659 4660 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4661 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4662 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4663 4664 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4665 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4666 sysctl_cim_la, "A", "CIM logic analyzer"); 4667 4668 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4669 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4670 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4671 4672 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4673 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4674 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4675 4676 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4677 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4678 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4679 4680 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4681 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4682 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4683 4684 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4685 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4686 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4687 4688 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4689 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4690 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4691 4692 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4693 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4694 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4695 4696 if (is_t5(sc)) { 4697 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4698 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4699 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4700 4701 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4702 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4703 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4704 } 4705 4706 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4707 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4708 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4709 4710 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4711 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4712 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4713 4714 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4715 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4716 sysctl_cpl_stats, "A", "CPL statistics"); 4717 4718 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4719 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4720 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4721 4722 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4723 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4724 sysctl_devlog, "A", "firmware's device log"); 4725 4726 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4727 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4728 sysctl_fcoe_stats, "A", "FCoE statistics"); 4729 4730 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4731 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4732 sysctl_hw_sched, "A", "hardware scheduler "); 4733 4734 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4735 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4736 sysctl_l2t, "A", "hardware L2 table"); 4737 4738 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4739 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4740 sysctl_lb_stats, "A", "loopback statistics"); 4741 4742 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4743 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4744 sysctl_meminfo, "A", "memory regions"); 4745 4746 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4747 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4748 sysctl_mps_tcam, "A", "MPS TCAM entries"); 4749 4750 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4751 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4752 sysctl_path_mtus, "A", "path MTUs"); 4753 4754 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4755 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4756 sysctl_pm_stats, "A", "PM statistics"); 4757 4758 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4759 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4760 sysctl_rdma_stats, "A", "RDMA statistics"); 4761 4762 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4763 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4764 sysctl_tcp_stats, "A", "TCP statistics"); 4765 4766 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4767 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4768 sysctl_tids, "A", "TID information"); 4769 4770 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4771 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4772 sysctl_tp_err_stats, "A", "TP error statistics"); 4773 4774 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4775 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4776 sysctl_tp_la, "A", "TP logic analyzer"); 4777 4778 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4779 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4780 sysctl_tx_rate, "A", "Tx rate"); 4781 4782 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4783 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4784 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4785 4786 if (is_t5(sc)) { 4787 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4788 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4789 sysctl_wcwr_stats, "A", "write combined work requests"); 4790 } 4791 #endif 4792 4793 #ifdef TCP_OFFLOAD 4794 if (is_offload(sc)) { 4795 /* 4796 * dev.t4nex.X.toe. 4797 */ 4798 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4799 NULL, "TOE parameters"); 4800 children = SYSCTL_CHILDREN(oid); 4801 4802 sc->tt.sndbuf = 256 * 1024; 4803 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4804 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4805 4806 sc->tt.ddp = 0; 4807 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4808 &sc->tt.ddp, 0, "DDP allowed"); 4809 4810 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 4811 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 4812 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 4813 4814 sc->tt.ddp_thres = 4815 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 4816 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 4817 &sc->tt.ddp_thres, 0, "DDP threshold"); 4818 4819 sc->tt.rx_coalesce = 1; 4820 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 4821 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 4822 4823 sc->tt.tx_align = 1; 4824 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 4825 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 4826 } 4827 #endif 4828 4829 4830 return (0); 4831 } 4832 4833 static int 4834 cxgbe_sysctls(struct port_info *pi) 4835 { 4836 struct sysctl_ctx_list *ctx; 4837 struct sysctl_oid *oid; 4838 struct sysctl_oid_list *children; 4839 struct adapter *sc = pi->adapter; 4840 4841 ctx = device_get_sysctl_ctx(pi->dev); 4842 4843 /* 4844 * dev.cxgbe.X. 4845 */ 4846 oid = device_get_sysctl_tree(pi->dev); 4847 children = SYSCTL_CHILDREN(oid); 4848 4849 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 4850 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 4851 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 4852 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 4853 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 4854 "PHY temperature (in Celsius)"); 4855 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 4856 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 4857 "PHY firmware version"); 4858 } 4859 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 4860 &pi->nrxq, 0, "# of rx queues"); 4861 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 4862 &pi->ntxq, 0, "# of tx queues"); 4863 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 4864 &pi->first_rxq, 0, "index of first rx queue"); 4865 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 4866 &pi->first_txq, 0, "index of first tx queue"); 4867 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | 4868 CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU", 4869 "Reserve queue 0 for non-flowid packets"); 4870 4871 #ifdef TCP_OFFLOAD 4872 if (is_offload(sc)) { 4873 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 4874 &pi->nofldrxq, 0, 4875 "# of rx queues for offloaded TCP connections"); 4876 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 4877 &pi->nofldtxq, 0, 4878 "# of tx queues for offloaded TCP connections"); 4879 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 4880 CTLFLAG_RD, &pi->first_ofld_rxq, 0, 4881 "index of first TOE rx queue"); 4882 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 4883 CTLFLAG_RD, &pi->first_ofld_txq, 0, 4884 "index of first TOE tx queue"); 4885 } 4886 #endif 4887 #ifdef DEV_NETMAP 4888 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 4889 &pi->nnmrxq, 0, "# of rx queues for netmap"); 4890 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 4891 &pi->nnmtxq, 0, "# of tx queues for netmap"); 4892 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 4893 CTLFLAG_RD, &pi->first_nm_rxq, 0, 4894 "index of first netmap rx queue"); 4895 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 4896 CTLFLAG_RD, &pi->first_nm_txq, 0, 4897 "index of first netmap tx queue"); 4898 #endif 4899 4900 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 4901 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I", 4902 "holdoff timer index"); 4903 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 4904 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I", 4905 "holdoff packet counter index"); 4906 4907 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 4908 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I", 4909 "rx queue size"); 4910 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 4911 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I", 4912 "tx queue size"); 4913 4914 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 4915 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings, 4916 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 4917 4918 /* 4919 * dev.cxgbe.X.stats. 4920 */ 4921 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4922 NULL, "port statistics"); 4923 children = SYSCTL_CHILDREN(oid); 4924 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 4925 &pi->tx_parse_error, 0, 4926 "# of tx packets with invalid length or # of segments"); 4927 4928 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 4929 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 4930 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 4931 sysctl_handle_t4_reg64, "QU", desc) 4932 4933 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 4934 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 4935 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 4936 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 4937 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 4938 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 4939 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 4940 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 4941 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 4942 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 4943 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 4944 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 4945 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 4946 "# of tx frames in this range", 4947 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 4948 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 4949 "# of tx frames in this range", 4950 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 4951 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 4952 "# of tx frames in this range", 4953 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 4954 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 4955 "# of tx frames in this range", 4956 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 4957 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 4958 "# of tx frames in this range", 4959 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 4960 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 4961 "# of tx frames in this range", 4962 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 4963 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 4964 "# of tx frames in this range", 4965 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 4966 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 4967 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 4968 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 4969 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 4970 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 4971 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 4972 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 4973 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 4974 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 4975 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 4976 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 4977 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 4978 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 4979 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 4980 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 4981 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 4982 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 4983 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 4984 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 4985 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 4986 4987 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 4988 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 4989 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 4990 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 4991 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 4992 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 4993 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 4994 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 4995 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 4996 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 4997 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 4998 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 4999 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5000 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5001 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5002 "# of frames received with bad FCS", 5003 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5004 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5005 "# of frames received with length error", 5006 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5007 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5008 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5009 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5010 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5011 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5012 "# of rx frames in this range", 5013 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5014 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5015 "# of rx frames in this range", 5016 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5017 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5018 "# of rx frames in this range", 5019 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5020 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5021 "# of rx frames in this range", 5022 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5023 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5024 "# of rx frames in this range", 5025 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5026 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5027 "# of rx frames in this range", 5028 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5029 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5030 "# of rx frames in this range", 5031 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5032 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5033 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5034 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5035 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5036 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5037 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5038 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5039 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5040 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5041 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5042 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5043 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5044 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5045 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5046 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5047 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5048 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5049 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5050 5051 #undef SYSCTL_ADD_T4_REG64 5052 5053 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5054 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5055 &pi->stats.name, desc) 5056 5057 /* We get these from port_stats and they may be stale by upto 1s */ 5058 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5059 "# drops due to buffer-group 0 overflows"); 5060 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5061 "# drops due to buffer-group 1 overflows"); 5062 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5063 "# drops due to buffer-group 2 overflows"); 5064 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5065 "# drops due to buffer-group 3 overflows"); 5066 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5067 "# of buffer-group 0 truncated packets"); 5068 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5069 "# of buffer-group 1 truncated packets"); 5070 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5071 "# of buffer-group 2 truncated packets"); 5072 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5073 "# of buffer-group 3 truncated packets"); 5074 5075 #undef SYSCTL_ADD_T4_PORTSTAT 5076 5077 return (0); 5078 } 5079 5080 static int 5081 sysctl_int_array(SYSCTL_HANDLER_ARGS) 5082 { 5083 int rc, *i, space = 0; 5084 struct sbuf sb; 5085 5086 sbuf_new_for_sysctl(&sb, NULL, 64, req); 5087 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5088 if (space) 5089 sbuf_printf(&sb, " "); 5090 sbuf_printf(&sb, "%d", *i); 5091 space = 1; 5092 } 5093 rc = sbuf_finish(&sb); 5094 sbuf_delete(&sb); 5095 return (rc); 5096 } 5097 5098 static int 5099 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5100 { 5101 int rc; 5102 struct sbuf *sb; 5103 5104 rc = sysctl_wire_old_buffer(req, 0); 5105 if (rc != 0) 5106 return(rc); 5107 5108 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5109 if (sb == NULL) 5110 return (ENOMEM); 5111 5112 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5113 rc = sbuf_finish(sb); 5114 sbuf_delete(sb); 5115 5116 return (rc); 5117 } 5118 5119 static int 5120 sysctl_btphy(SYSCTL_HANDLER_ARGS) 5121 { 5122 struct port_info *pi = arg1; 5123 int op = arg2; 5124 struct adapter *sc = pi->adapter; 5125 u_int v; 5126 int rc; 5127 5128 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt"); 5129 if (rc) 5130 return (rc); 5131 /* XXX: magic numbers */ 5132 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5133 &v); 5134 end_synchronized_op(sc, 0); 5135 if (rc) 5136 return (rc); 5137 if (op == 0) 5138 v /= 256; 5139 5140 rc = sysctl_handle_int(oidp, &v, 0, req); 5141 return (rc); 5142 } 5143 5144 static int 5145 sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5146 { 5147 struct port_info *pi = arg1; 5148 int rc, val; 5149 5150 val = pi->rsrv_noflowq; 5151 rc = sysctl_handle_int(oidp, &val, 0, req); 5152 if (rc != 0 || req->newptr == NULL) 5153 return (rc); 5154 5155 if ((val >= 1) && (pi->ntxq > 1)) 5156 pi->rsrv_noflowq = 1; 5157 else 5158 pi->rsrv_noflowq = 0; 5159 5160 return (rc); 5161 } 5162 5163 static int 5164 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5165 { 5166 struct port_info *pi = arg1; 5167 struct adapter *sc = pi->adapter; 5168 int idx, rc, i; 5169 struct sge_rxq *rxq; 5170 #ifdef TCP_OFFLOAD 5171 struct sge_ofld_rxq *ofld_rxq; 5172 #endif 5173 uint8_t v; 5174 5175 idx = pi->tmr_idx; 5176 5177 rc = sysctl_handle_int(oidp, &idx, 0, req); 5178 if (rc != 0 || req->newptr == NULL) 5179 return (rc); 5180 5181 if (idx < 0 || idx >= SGE_NTIMERS) 5182 return (EINVAL); 5183 5184 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5185 "t4tmr"); 5186 if (rc) 5187 return (rc); 5188 5189 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1); 5190 for_each_rxq(pi, i, rxq) { 5191 #ifdef atomic_store_rel_8 5192 atomic_store_rel_8(&rxq->iq.intr_params, v); 5193 #else 5194 rxq->iq.intr_params = v; 5195 #endif 5196 } 5197 #ifdef TCP_OFFLOAD 5198 for_each_ofld_rxq(pi, i, ofld_rxq) { 5199 #ifdef atomic_store_rel_8 5200 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5201 #else 5202 ofld_rxq->iq.intr_params = v; 5203 #endif 5204 } 5205 #endif 5206 pi->tmr_idx = idx; 5207 5208 end_synchronized_op(sc, LOCK_HELD); 5209 return (0); 5210 } 5211 5212 static int 5213 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5214 { 5215 struct port_info *pi = arg1; 5216 struct adapter *sc = pi->adapter; 5217 int idx, rc; 5218 5219 idx = pi->pktc_idx; 5220 5221 rc = sysctl_handle_int(oidp, &idx, 0, req); 5222 if (rc != 0 || req->newptr == NULL) 5223 return (rc); 5224 5225 if (idx < -1 || idx >= SGE_NCOUNTERS) 5226 return (EINVAL); 5227 5228 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5229 "t4pktc"); 5230 if (rc) 5231 return (rc); 5232 5233 if (pi->flags & PORT_INIT_DONE) 5234 rc = EBUSY; /* cannot be changed once the queues are created */ 5235 else 5236 pi->pktc_idx = idx; 5237 5238 end_synchronized_op(sc, LOCK_HELD); 5239 return (rc); 5240 } 5241 5242 static int 5243 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5244 { 5245 struct port_info *pi = arg1; 5246 struct adapter *sc = pi->adapter; 5247 int qsize, rc; 5248 5249 qsize = pi->qsize_rxq; 5250 5251 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5252 if (rc != 0 || req->newptr == NULL) 5253 return (rc); 5254 5255 if (qsize < 128 || (qsize & 7)) 5256 return (EINVAL); 5257 5258 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5259 "t4rxqs"); 5260 if (rc) 5261 return (rc); 5262 5263 if (pi->flags & PORT_INIT_DONE) 5264 rc = EBUSY; /* cannot be changed once the queues are created */ 5265 else 5266 pi->qsize_rxq = qsize; 5267 5268 end_synchronized_op(sc, LOCK_HELD); 5269 return (rc); 5270 } 5271 5272 static int 5273 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5274 { 5275 struct port_info *pi = arg1; 5276 struct adapter *sc = pi->adapter; 5277 int qsize, rc; 5278 5279 qsize = pi->qsize_txq; 5280 5281 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5282 if (rc != 0 || req->newptr == NULL) 5283 return (rc); 5284 5285 if (qsize < 128 || qsize > 65536) 5286 return (EINVAL); 5287 5288 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5289 "t4txqs"); 5290 if (rc) 5291 return (rc); 5292 5293 if (pi->flags & PORT_INIT_DONE) 5294 rc = EBUSY; /* cannot be changed once the queues are created */ 5295 else 5296 pi->qsize_txq = qsize; 5297 5298 end_synchronized_op(sc, LOCK_HELD); 5299 return (rc); 5300 } 5301 5302 static int 5303 sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5304 { 5305 struct port_info *pi = arg1; 5306 struct adapter *sc = pi->adapter; 5307 struct link_config *lc = &pi->link_cfg; 5308 int rc; 5309 5310 if (req->newptr == NULL) { 5311 struct sbuf *sb; 5312 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5313 5314 rc = sysctl_wire_old_buffer(req, 0); 5315 if (rc != 0) 5316 return(rc); 5317 5318 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5319 if (sb == NULL) 5320 return (ENOMEM); 5321 5322 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5323 rc = sbuf_finish(sb); 5324 sbuf_delete(sb); 5325 } else { 5326 char s[2]; 5327 int n; 5328 5329 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5330 s[1] = 0; 5331 5332 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5333 if (rc != 0) 5334 return(rc); 5335 5336 if (s[1] != 0) 5337 return (EINVAL); 5338 if (s[0] < '0' || s[0] > '9') 5339 return (EINVAL); /* not a number */ 5340 n = s[0] - '0'; 5341 if (n & ~(PAUSE_TX | PAUSE_RX)) 5342 return (EINVAL); /* some other bit is set too */ 5343 5344 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4PAUSE"); 5345 if (rc) 5346 return (rc); 5347 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5348 int link_ok = lc->link_ok; 5349 5350 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5351 lc->requested_fc |= n; 5352 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, lc); 5353 lc->link_ok = link_ok; /* restore */ 5354 } 5355 end_synchronized_op(sc, 0); 5356 } 5357 5358 return (rc); 5359 } 5360 5361 static int 5362 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5363 { 5364 struct adapter *sc = arg1; 5365 int reg = arg2; 5366 uint64_t val; 5367 5368 val = t4_read_reg64(sc, reg); 5369 5370 return (sysctl_handle_64(oidp, &val, 0, req)); 5371 } 5372 5373 static int 5374 sysctl_temperature(SYSCTL_HANDLER_ARGS) 5375 { 5376 struct adapter *sc = arg1; 5377 int rc, t; 5378 uint32_t param, val; 5379 5380 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5381 if (rc) 5382 return (rc); 5383 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5384 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5385 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5386 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5387 end_synchronized_op(sc, 0); 5388 if (rc) 5389 return (rc); 5390 5391 /* unknown is returned as 0 but we display -1 in that case */ 5392 t = val == 0 ? -1 : val; 5393 5394 rc = sysctl_handle_int(oidp, &t, 0, req); 5395 return (rc); 5396 } 5397 5398 #ifdef SBUF_DRAIN 5399 static int 5400 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5401 { 5402 struct adapter *sc = arg1; 5403 struct sbuf *sb; 5404 int rc, i; 5405 uint16_t incr[NMTUS][NCCTRL_WIN]; 5406 static const char *dec_fac[] = { 5407 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5408 "0.9375" 5409 }; 5410 5411 rc = sysctl_wire_old_buffer(req, 0); 5412 if (rc != 0) 5413 return (rc); 5414 5415 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5416 if (sb == NULL) 5417 return (ENOMEM); 5418 5419 t4_read_cong_tbl(sc, incr); 5420 5421 for (i = 0; i < NCCTRL_WIN; ++i) { 5422 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5423 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5424 incr[5][i], incr[6][i], incr[7][i]); 5425 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5426 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5427 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5428 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5429 } 5430 5431 rc = sbuf_finish(sb); 5432 sbuf_delete(sb); 5433 5434 return (rc); 5435 } 5436 5437 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5438 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5439 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5440 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5441 }; 5442 5443 static int 5444 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5445 { 5446 struct adapter *sc = arg1; 5447 struct sbuf *sb; 5448 int rc, i, n, qid = arg2; 5449 uint32_t *buf, *p; 5450 char *qtype; 5451 u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 5452 5453 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5454 ("%s: bad qid %d\n", __func__, qid)); 5455 5456 if (qid < CIM_NUM_IBQ) { 5457 /* inbound queue */ 5458 qtype = "IBQ"; 5459 n = 4 * CIM_IBQ_SIZE; 5460 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5461 rc = t4_read_cim_ibq(sc, qid, buf, n); 5462 } else { 5463 /* outbound queue */ 5464 qtype = "OBQ"; 5465 qid -= CIM_NUM_IBQ; 5466 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5467 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5468 rc = t4_read_cim_obq(sc, qid, buf, n); 5469 } 5470 5471 if (rc < 0) { 5472 rc = -rc; 5473 goto done; 5474 } 5475 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5476 5477 rc = sysctl_wire_old_buffer(req, 0); 5478 if (rc != 0) 5479 goto done; 5480 5481 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5482 if (sb == NULL) { 5483 rc = ENOMEM; 5484 goto done; 5485 } 5486 5487 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5488 for (i = 0, p = buf; i < n; i += 16, p += 4) 5489 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5490 p[2], p[3]); 5491 5492 rc = sbuf_finish(sb); 5493 sbuf_delete(sb); 5494 done: 5495 free(buf, M_CXGBE); 5496 return (rc); 5497 } 5498 5499 static int 5500 sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5501 { 5502 struct adapter *sc = arg1; 5503 u_int cfg; 5504 struct sbuf *sb; 5505 uint32_t *buf, *p; 5506 int rc; 5507 5508 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5509 if (rc != 0) 5510 return (rc); 5511 5512 rc = sysctl_wire_old_buffer(req, 0); 5513 if (rc != 0) 5514 return (rc); 5515 5516 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5517 if (sb == NULL) 5518 return (ENOMEM); 5519 5520 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5521 M_ZERO | M_WAITOK); 5522 5523 rc = -t4_cim_read_la(sc, buf, NULL); 5524 if (rc != 0) 5525 goto done; 5526 5527 sbuf_printf(sb, "Status Data PC%s", 5528 cfg & F_UPDBGLACAPTPCONLY ? "" : 5529 " LS0Stat LS0Addr LS0Data"); 5530 5531 KASSERT((sc->params.cim_la_size & 7) == 0, 5532 ("%s: p will walk off the end of buf", __func__)); 5533 5534 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) { 5535 if (cfg & F_UPDBGLACAPTPCONLY) { 5536 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5537 p[6], p[7]); 5538 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5539 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5540 p[4] & 0xff, p[5] >> 8); 5541 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5542 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5543 p[1] & 0xf, p[2] >> 4); 5544 } else { 5545 sbuf_printf(sb, 5546 "\n %02x %x%07x %x%07x %08x %08x " 5547 "%08x%08x%08x%08x", 5548 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5549 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5550 p[6], p[7]); 5551 } 5552 } 5553 5554 rc = sbuf_finish(sb); 5555 sbuf_delete(sb); 5556 done: 5557 free(buf, M_CXGBE); 5558 return (rc); 5559 } 5560 5561 static int 5562 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 5563 { 5564 struct adapter *sc = arg1; 5565 u_int i; 5566 struct sbuf *sb; 5567 uint32_t *buf, *p; 5568 int rc; 5569 5570 rc = sysctl_wire_old_buffer(req, 0); 5571 if (rc != 0) 5572 return (rc); 5573 5574 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5575 if (sb == NULL) 5576 return (ENOMEM); 5577 5578 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 5579 M_ZERO | M_WAITOK); 5580 5581 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 5582 p = buf; 5583 5584 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5585 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 5586 p[1], p[0]); 5587 } 5588 5589 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 5590 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5591 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 5592 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 5593 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 5594 (p[1] >> 2) | ((p[2] & 3) << 30), 5595 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 5596 p[0] & 1); 5597 } 5598 5599 rc = sbuf_finish(sb); 5600 sbuf_delete(sb); 5601 free(buf, M_CXGBE); 5602 return (rc); 5603 } 5604 5605 static int 5606 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 5607 { 5608 struct adapter *sc = arg1; 5609 u_int i; 5610 struct sbuf *sb; 5611 uint32_t *buf, *p; 5612 int rc; 5613 5614 rc = sysctl_wire_old_buffer(req, 0); 5615 if (rc != 0) 5616 return (rc); 5617 5618 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5619 if (sb == NULL) 5620 return (ENOMEM); 5621 5622 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 5623 M_ZERO | M_WAITOK); 5624 5625 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 5626 p = buf; 5627 5628 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 5629 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) { 5630 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 5631 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 5632 p[4], p[3], p[2], p[1], p[0]); 5633 } 5634 5635 sbuf_printf(sb, "\n\nCntl ID Data"); 5636 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) { 5637 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 5638 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 5639 } 5640 5641 rc = sbuf_finish(sb); 5642 sbuf_delete(sb); 5643 free(buf, M_CXGBE); 5644 return (rc); 5645 } 5646 5647 static int 5648 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 5649 { 5650 struct adapter *sc = arg1; 5651 struct sbuf *sb; 5652 int rc, i; 5653 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5654 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5655 uint16_t thres[CIM_NUM_IBQ]; 5656 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 5657 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 5658 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 5659 5660 if (is_t4(sc)) { 5661 cim_num_obq = CIM_NUM_OBQ; 5662 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 5663 obq_rdaddr = A_UP_OBQ_0_REALADDR; 5664 } else { 5665 cim_num_obq = CIM_NUM_OBQ_T5; 5666 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 5667 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 5668 } 5669 nq = CIM_NUM_IBQ + cim_num_obq; 5670 5671 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 5672 if (rc == 0) 5673 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 5674 if (rc != 0) 5675 return (rc); 5676 5677 t4_read_cimq_cfg(sc, base, size, thres); 5678 5679 rc = sysctl_wire_old_buffer(req, 0); 5680 if (rc != 0) 5681 return (rc); 5682 5683 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5684 if (sb == NULL) 5685 return (ENOMEM); 5686 5687 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 5688 5689 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 5690 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 5691 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 5692 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5693 G_QUEREMFLITS(p[2]) * 16); 5694 for ( ; i < nq; i++, p += 4, wr += 2) 5695 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 5696 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 5697 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5698 G_QUEREMFLITS(p[2]) * 16); 5699 5700 rc = sbuf_finish(sb); 5701 sbuf_delete(sb); 5702 5703 return (rc); 5704 } 5705 5706 static int 5707 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 5708 { 5709 struct adapter *sc = arg1; 5710 struct sbuf *sb; 5711 int rc; 5712 struct tp_cpl_stats stats; 5713 5714 rc = sysctl_wire_old_buffer(req, 0); 5715 if (rc != 0) 5716 return (rc); 5717 5718 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5719 if (sb == NULL) 5720 return (ENOMEM); 5721 5722 t4_tp_get_cpl_stats(sc, &stats); 5723 5724 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 5725 "channel 3\n"); 5726 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n", 5727 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 5728 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u", 5729 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 5730 5731 rc = sbuf_finish(sb); 5732 sbuf_delete(sb); 5733 5734 return (rc); 5735 } 5736 5737 static int 5738 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 5739 { 5740 struct adapter *sc = arg1; 5741 struct sbuf *sb; 5742 int rc; 5743 struct tp_usm_stats stats; 5744 5745 rc = sysctl_wire_old_buffer(req, 0); 5746 if (rc != 0) 5747 return(rc); 5748 5749 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5750 if (sb == NULL) 5751 return (ENOMEM); 5752 5753 t4_get_usm_stats(sc, &stats); 5754 5755 sbuf_printf(sb, "Frames: %u\n", stats.frames); 5756 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 5757 sbuf_printf(sb, "Drops: %u", stats.drops); 5758 5759 rc = sbuf_finish(sb); 5760 sbuf_delete(sb); 5761 5762 return (rc); 5763 } 5764 5765 const char *devlog_level_strings[] = { 5766 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 5767 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 5768 [FW_DEVLOG_LEVEL_ERR] = "ERR", 5769 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 5770 [FW_DEVLOG_LEVEL_INFO] = "INFO", 5771 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 5772 }; 5773 5774 const char *devlog_facility_strings[] = { 5775 [FW_DEVLOG_FACILITY_CORE] = "CORE", 5776 [FW_DEVLOG_FACILITY_CF] = "CF", 5777 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 5778 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 5779 [FW_DEVLOG_FACILITY_RES] = "RES", 5780 [FW_DEVLOG_FACILITY_HW] = "HW", 5781 [FW_DEVLOG_FACILITY_FLR] = "FLR", 5782 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 5783 [FW_DEVLOG_FACILITY_PHY] = "PHY", 5784 [FW_DEVLOG_FACILITY_MAC] = "MAC", 5785 [FW_DEVLOG_FACILITY_PORT] = "PORT", 5786 [FW_DEVLOG_FACILITY_VI] = "VI", 5787 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 5788 [FW_DEVLOG_FACILITY_ACL] = "ACL", 5789 [FW_DEVLOG_FACILITY_TM] = "TM", 5790 [FW_DEVLOG_FACILITY_QFC] = "QFC", 5791 [FW_DEVLOG_FACILITY_DCB] = "DCB", 5792 [FW_DEVLOG_FACILITY_ETH] = "ETH", 5793 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 5794 [FW_DEVLOG_FACILITY_RI] = "RI", 5795 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 5796 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 5797 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 5798 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE" 5799 }; 5800 5801 static int 5802 sysctl_devlog(SYSCTL_HANDLER_ARGS) 5803 { 5804 struct adapter *sc = arg1; 5805 struct devlog_params *dparams = &sc->params.devlog; 5806 struct fw_devlog_e *buf, *e; 5807 int i, j, rc, nentries, first = 0, m; 5808 struct sbuf *sb; 5809 uint64_t ftstamp = UINT64_MAX; 5810 5811 if (dparams->start == 0) { 5812 dparams->memtype = FW_MEMTYPE_EDC0; 5813 dparams->start = 0x84000; 5814 dparams->size = 32768; 5815 } 5816 5817 nentries = dparams->size / sizeof(struct fw_devlog_e); 5818 5819 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 5820 if (buf == NULL) 5821 return (ENOMEM); 5822 5823 m = fwmtype_to_hwmtype(dparams->memtype); 5824 rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf); 5825 if (rc != 0) 5826 goto done; 5827 5828 for (i = 0; i < nentries; i++) { 5829 e = &buf[i]; 5830 5831 if (e->timestamp == 0) 5832 break; /* end */ 5833 5834 e->timestamp = be64toh(e->timestamp); 5835 e->seqno = be32toh(e->seqno); 5836 for (j = 0; j < 8; j++) 5837 e->params[j] = be32toh(e->params[j]); 5838 5839 if (e->timestamp < ftstamp) { 5840 ftstamp = e->timestamp; 5841 first = i; 5842 } 5843 } 5844 5845 if (buf[first].timestamp == 0) 5846 goto done; /* nothing in the log */ 5847 5848 rc = sysctl_wire_old_buffer(req, 0); 5849 if (rc != 0) 5850 goto done; 5851 5852 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5853 if (sb == NULL) { 5854 rc = ENOMEM; 5855 goto done; 5856 } 5857 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 5858 "Seq#", "Tstamp", "Level", "Facility", "Message"); 5859 5860 i = first; 5861 do { 5862 e = &buf[i]; 5863 if (e->timestamp == 0) 5864 break; /* end */ 5865 5866 sbuf_printf(sb, "%10d %15ju %8s %8s ", 5867 e->seqno, e->timestamp, 5868 (e->level < nitems(devlog_level_strings) ? 5869 devlog_level_strings[e->level] : "UNKNOWN"), 5870 (e->facility < nitems(devlog_facility_strings) ? 5871 devlog_facility_strings[e->facility] : "UNKNOWN")); 5872 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 5873 e->params[2], e->params[3], e->params[4], 5874 e->params[5], e->params[6], e->params[7]); 5875 5876 if (++i == nentries) 5877 i = 0; 5878 } while (i != first); 5879 5880 rc = sbuf_finish(sb); 5881 sbuf_delete(sb); 5882 done: 5883 free(buf, M_CXGBE); 5884 return (rc); 5885 } 5886 5887 static int 5888 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 5889 { 5890 struct adapter *sc = arg1; 5891 struct sbuf *sb; 5892 int rc; 5893 struct tp_fcoe_stats stats[4]; 5894 5895 rc = sysctl_wire_old_buffer(req, 0); 5896 if (rc != 0) 5897 return (rc); 5898 5899 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5900 if (sb == NULL) 5901 return (ENOMEM); 5902 5903 t4_get_fcoe_stats(sc, 0, &stats[0]); 5904 t4_get_fcoe_stats(sc, 1, &stats[1]); 5905 t4_get_fcoe_stats(sc, 2, &stats[2]); 5906 t4_get_fcoe_stats(sc, 3, &stats[3]); 5907 5908 sbuf_printf(sb, " channel 0 channel 1 " 5909 "channel 2 channel 3\n"); 5910 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n", 5911 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP, 5912 stats[3].octetsDDP); 5913 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP, 5914 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP); 5915 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u", 5916 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop, 5917 stats[3].framesDrop); 5918 5919 rc = sbuf_finish(sb); 5920 sbuf_delete(sb); 5921 5922 return (rc); 5923 } 5924 5925 static int 5926 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 5927 { 5928 struct adapter *sc = arg1; 5929 struct sbuf *sb; 5930 int rc, i; 5931 unsigned int map, kbps, ipg, mode; 5932 unsigned int pace_tab[NTX_SCHED]; 5933 5934 rc = sysctl_wire_old_buffer(req, 0); 5935 if (rc != 0) 5936 return (rc); 5937 5938 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5939 if (sb == NULL) 5940 return (ENOMEM); 5941 5942 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 5943 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 5944 t4_read_pace_tbl(sc, pace_tab); 5945 5946 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 5947 "Class IPG (0.1 ns) Flow IPG (us)"); 5948 5949 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 5950 t4_get_tx_sched(sc, i, &kbps, &ipg); 5951 sbuf_printf(sb, "\n %u %-5s %u ", i, 5952 (mode & (1 << i)) ? "flow" : "class", map & 3); 5953 if (kbps) 5954 sbuf_printf(sb, "%9u ", kbps); 5955 else 5956 sbuf_printf(sb, " disabled "); 5957 5958 if (ipg) 5959 sbuf_printf(sb, "%13u ", ipg); 5960 else 5961 sbuf_printf(sb, " disabled "); 5962 5963 if (pace_tab[i]) 5964 sbuf_printf(sb, "%10u", pace_tab[i]); 5965 else 5966 sbuf_printf(sb, " disabled"); 5967 } 5968 5969 rc = sbuf_finish(sb); 5970 sbuf_delete(sb); 5971 5972 return (rc); 5973 } 5974 5975 static int 5976 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 5977 { 5978 struct adapter *sc = arg1; 5979 struct sbuf *sb; 5980 int rc, i, j; 5981 uint64_t *p0, *p1; 5982 struct lb_port_stats s[2]; 5983 static const char *stat_name[] = { 5984 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 5985 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 5986 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 5987 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 5988 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 5989 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 5990 "BG2FramesTrunc:", "BG3FramesTrunc:" 5991 }; 5992 5993 rc = sysctl_wire_old_buffer(req, 0); 5994 if (rc != 0) 5995 return (rc); 5996 5997 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5998 if (sb == NULL) 5999 return (ENOMEM); 6000 6001 memset(s, 0, sizeof(s)); 6002 6003 for (i = 0; i < 4; i += 2) { 6004 t4_get_lb_stats(sc, i, &s[0]); 6005 t4_get_lb_stats(sc, i + 1, &s[1]); 6006 6007 p0 = &s[0].octets; 6008 p1 = &s[1].octets; 6009 sbuf_printf(sb, "%s Loopback %u" 6010 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6011 6012 for (j = 0; j < nitems(stat_name); j++) 6013 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6014 *p0++, *p1++); 6015 } 6016 6017 rc = sbuf_finish(sb); 6018 sbuf_delete(sb); 6019 6020 return (rc); 6021 } 6022 6023 static int 6024 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6025 { 6026 int rc = 0; 6027 struct port_info *pi = arg1; 6028 struct sbuf *sb; 6029 static const char *linkdnreasons[] = { 6030 "non-specific", "remote fault", "autoneg failed", "reserved3", 6031 "PHY overheated", "unknown", "rx los", "reserved7" 6032 }; 6033 6034 rc = sysctl_wire_old_buffer(req, 0); 6035 if (rc != 0) 6036 return(rc); 6037 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6038 if (sb == NULL) 6039 return (ENOMEM); 6040 6041 if (pi->linkdnrc < 0) 6042 sbuf_printf(sb, "n/a"); 6043 else if (pi->linkdnrc < nitems(linkdnreasons)) 6044 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]); 6045 else 6046 sbuf_printf(sb, "%d", pi->linkdnrc); 6047 6048 rc = sbuf_finish(sb); 6049 sbuf_delete(sb); 6050 6051 return (rc); 6052 } 6053 6054 struct mem_desc { 6055 unsigned int base; 6056 unsigned int limit; 6057 unsigned int idx; 6058 }; 6059 6060 static int 6061 mem_desc_cmp(const void *a, const void *b) 6062 { 6063 return ((const struct mem_desc *)a)->base - 6064 ((const struct mem_desc *)b)->base; 6065 } 6066 6067 static void 6068 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6069 unsigned int to) 6070 { 6071 unsigned int size; 6072 6073 size = to - from + 1; 6074 if (size == 0) 6075 return; 6076 6077 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6078 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6079 } 6080 6081 static int 6082 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6083 { 6084 struct adapter *sc = arg1; 6085 struct sbuf *sb; 6086 int rc, i, n; 6087 uint32_t lo, hi, used, alloc; 6088 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6089 static const char *region[] = { 6090 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6091 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6092 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6093 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6094 "RQUDP region:", "PBL region:", "TXPBL region:", 6095 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6096 "On-chip queues:" 6097 }; 6098 struct mem_desc avail[4]; 6099 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6100 struct mem_desc *md = mem; 6101 6102 rc = sysctl_wire_old_buffer(req, 0); 6103 if (rc != 0) 6104 return (rc); 6105 6106 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6107 if (sb == NULL) 6108 return (ENOMEM); 6109 6110 for (i = 0; i < nitems(mem); i++) { 6111 mem[i].limit = 0; 6112 mem[i].idx = i; 6113 } 6114 6115 /* Find and sort the populated memory ranges */ 6116 i = 0; 6117 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6118 if (lo & F_EDRAM0_ENABLE) { 6119 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6120 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6121 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6122 avail[i].idx = 0; 6123 i++; 6124 } 6125 if (lo & F_EDRAM1_ENABLE) { 6126 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6127 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6128 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6129 avail[i].idx = 1; 6130 i++; 6131 } 6132 if (lo & F_EXT_MEM_ENABLE) { 6133 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6134 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6135 avail[i].limit = avail[i].base + 6136 (G_EXT_MEM_SIZE(hi) << 20); 6137 avail[i].idx = is_t4(sc) ? 2 : 3; /* Call it MC for T4 */ 6138 i++; 6139 } 6140 if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) { 6141 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6142 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6143 avail[i].limit = avail[i].base + 6144 (G_EXT_MEM1_SIZE(hi) << 20); 6145 avail[i].idx = 4; 6146 i++; 6147 } 6148 if (!i) /* no memory available */ 6149 return 0; 6150 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6151 6152 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6153 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6154 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6155 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6156 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6157 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6158 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6159 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6160 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6161 6162 /* the next few have explicit upper bounds */ 6163 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6164 md->limit = md->base - 1 + 6165 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6166 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6167 md++; 6168 6169 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6170 md->limit = md->base - 1 + 6171 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6172 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6173 md++; 6174 6175 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6176 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 6177 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6178 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1; 6179 } else { 6180 md->base = 0; 6181 md->idx = nitems(region); /* hide it */ 6182 } 6183 md++; 6184 6185 #define ulp_region(reg) \ 6186 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6187 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6188 6189 ulp_region(RX_ISCSI); 6190 ulp_region(RX_TDDP); 6191 ulp_region(TX_TPT); 6192 ulp_region(RX_STAG); 6193 ulp_region(RX_RQ); 6194 ulp_region(RX_RQUDP); 6195 ulp_region(RX_PBL); 6196 ulp_region(TX_PBL); 6197 #undef ulp_region 6198 6199 md->base = 0; 6200 md->idx = nitems(region); 6201 if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) { 6202 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR)); 6203 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc, 6204 A_SGE_DBVFIFO_SIZE))) << 2) - 1; 6205 } 6206 md++; 6207 6208 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6209 md->limit = md->base + sc->tids.ntids - 1; 6210 md++; 6211 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6212 md->limit = md->base + sc->tids.ntids - 1; 6213 md++; 6214 6215 md->base = sc->vres.ocq.start; 6216 if (sc->vres.ocq.size) 6217 md->limit = md->base + sc->vres.ocq.size - 1; 6218 else 6219 md->idx = nitems(region); /* hide it */ 6220 md++; 6221 6222 /* add any address-space holes, there can be up to 3 */ 6223 for (n = 0; n < i - 1; n++) 6224 if (avail[n].limit < avail[n + 1].base) 6225 (md++)->base = avail[n].limit; 6226 if (avail[n].limit) 6227 (md++)->base = avail[n].limit; 6228 6229 n = md - mem; 6230 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6231 6232 for (lo = 0; lo < i; lo++) 6233 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6234 avail[lo].limit - 1); 6235 6236 sbuf_printf(sb, "\n"); 6237 for (i = 0; i < n; i++) { 6238 if (mem[i].idx >= nitems(region)) 6239 continue; /* skip holes */ 6240 if (!mem[i].limit) 6241 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6242 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6243 mem[i].limit); 6244 } 6245 6246 sbuf_printf(sb, "\n"); 6247 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6248 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6249 mem_region_show(sb, "uP RAM:", lo, hi); 6250 6251 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6252 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6253 mem_region_show(sb, "uP Extmem2:", lo, hi); 6254 6255 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6256 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6257 G_PMRXMAXPAGE(lo), 6258 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6259 (lo & F_PMRXNUMCHN) ? 2 : 1); 6260 6261 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6262 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6263 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6264 G_PMTXMAXPAGE(lo), 6265 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6266 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6267 sbuf_printf(sb, "%u p-structs\n", 6268 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6269 6270 for (i = 0; i < 4; i++) { 6271 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6272 if (is_t4(sc)) { 6273 used = G_USED(lo); 6274 alloc = G_ALLOC(lo); 6275 } else { 6276 used = G_T5_USED(lo); 6277 alloc = G_T5_ALLOC(lo); 6278 } 6279 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6280 i, used, alloc); 6281 } 6282 for (i = 0; i < 4; i++) { 6283 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6284 if (is_t4(sc)) { 6285 used = G_USED(lo); 6286 alloc = G_ALLOC(lo); 6287 } else { 6288 used = G_T5_USED(lo); 6289 alloc = G_T5_ALLOC(lo); 6290 } 6291 sbuf_printf(sb, 6292 "\nLoopback %d using %u pages out of %u allocated", 6293 i, used, alloc); 6294 } 6295 6296 rc = sbuf_finish(sb); 6297 sbuf_delete(sb); 6298 6299 return (rc); 6300 } 6301 6302 static inline void 6303 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6304 { 6305 *mask = x | y; 6306 y = htobe64(y); 6307 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6308 } 6309 6310 static int 6311 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6312 { 6313 struct adapter *sc = arg1; 6314 struct sbuf *sb; 6315 int rc, i, n; 6316 6317 rc = sysctl_wire_old_buffer(req, 0); 6318 if (rc != 0) 6319 return (rc); 6320 6321 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6322 if (sb == NULL) 6323 return (ENOMEM); 6324 6325 sbuf_printf(sb, 6326 "Idx Ethernet address Mask Vld Ports PF" 6327 " VF Replication P0 P1 P2 P3 ML"); 6328 n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES : 6329 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 6330 for (i = 0; i < n; i++) { 6331 uint64_t tcamx, tcamy, mask; 6332 uint32_t cls_lo, cls_hi; 6333 uint8_t addr[ETHER_ADDR_LEN]; 6334 6335 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6336 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6337 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6338 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6339 6340 if (tcamx & tcamy) 6341 continue; 6342 6343 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6344 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6345 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6346 addr[3], addr[4], addr[5], (uintmax_t)mask, 6347 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6348 G_PORTMAP(cls_hi), G_PF(cls_lo), 6349 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6350 6351 if (cls_lo & F_REPLICATE) { 6352 struct fw_ldst_cmd ldst_cmd; 6353 6354 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6355 ldst_cmd.op_to_addrspace = 6356 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6357 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6358 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6359 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6360 ldst_cmd.u.mps.fid_ctl = 6361 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6362 V_FW_LDST_CMD_CTL(i)); 6363 6364 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6365 "t4mps"); 6366 if (rc) 6367 break; 6368 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6369 sizeof(ldst_cmd), &ldst_cmd); 6370 end_synchronized_op(sc, 0); 6371 6372 if (rc != 0) { 6373 sbuf_printf(sb, 6374 " ------------ error %3u ------------", rc); 6375 rc = 0; 6376 } else { 6377 sbuf_printf(sb, " %08x %08x %08x %08x", 6378 be32toh(ldst_cmd.u.mps.rplc127_96), 6379 be32toh(ldst_cmd.u.mps.rplc95_64), 6380 be32toh(ldst_cmd.u.mps.rplc63_32), 6381 be32toh(ldst_cmd.u.mps.rplc31_0)); 6382 } 6383 } else 6384 sbuf_printf(sb, "%36s", ""); 6385 6386 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6387 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6388 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6389 } 6390 6391 if (rc) 6392 (void) sbuf_finish(sb); 6393 else 6394 rc = sbuf_finish(sb); 6395 sbuf_delete(sb); 6396 6397 return (rc); 6398 } 6399 6400 static int 6401 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 6402 { 6403 struct adapter *sc = arg1; 6404 struct sbuf *sb; 6405 int rc; 6406 uint16_t mtus[NMTUS]; 6407 6408 rc = sysctl_wire_old_buffer(req, 0); 6409 if (rc != 0) 6410 return (rc); 6411 6412 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6413 if (sb == NULL) 6414 return (ENOMEM); 6415 6416 t4_read_mtu_tbl(sc, mtus, NULL); 6417 6418 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 6419 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 6420 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 6421 mtus[14], mtus[15]); 6422 6423 rc = sbuf_finish(sb); 6424 sbuf_delete(sb); 6425 6426 return (rc); 6427 } 6428 6429 static int 6430 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 6431 { 6432 struct adapter *sc = arg1; 6433 struct sbuf *sb; 6434 int rc, i; 6435 uint32_t cnt[PM_NSTATS]; 6436 uint64_t cyc[PM_NSTATS]; 6437 static const char *rx_stats[] = { 6438 "Read:", "Write bypass:", "Write mem:", "Flush:" 6439 }; 6440 static const char *tx_stats[] = { 6441 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:" 6442 }; 6443 6444 rc = sysctl_wire_old_buffer(req, 0); 6445 if (rc != 0) 6446 return (rc); 6447 6448 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6449 if (sb == NULL) 6450 return (ENOMEM); 6451 6452 t4_pmtx_get_stats(sc, cnt, cyc); 6453 sbuf_printf(sb, " Tx pcmds Tx bytes"); 6454 for (i = 0; i < ARRAY_SIZE(tx_stats); i++) 6455 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i], 6456 cyc[i]); 6457 6458 t4_pmrx_get_stats(sc, cnt, cyc); 6459 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 6460 for (i = 0; i < ARRAY_SIZE(rx_stats); i++) 6461 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i], 6462 cyc[i]); 6463 6464 rc = sbuf_finish(sb); 6465 sbuf_delete(sb); 6466 6467 return (rc); 6468 } 6469 6470 static int 6471 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 6472 { 6473 struct adapter *sc = arg1; 6474 struct sbuf *sb; 6475 int rc; 6476 struct tp_rdma_stats stats; 6477 6478 rc = sysctl_wire_old_buffer(req, 0); 6479 if (rc != 0) 6480 return (rc); 6481 6482 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6483 if (sb == NULL) 6484 return (ENOMEM); 6485 6486 t4_tp_get_rdma_stats(sc, &stats); 6487 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 6488 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 6489 6490 rc = sbuf_finish(sb); 6491 sbuf_delete(sb); 6492 6493 return (rc); 6494 } 6495 6496 static int 6497 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 6498 { 6499 struct adapter *sc = arg1; 6500 struct sbuf *sb; 6501 int rc; 6502 struct tp_tcp_stats v4, v6; 6503 6504 rc = sysctl_wire_old_buffer(req, 0); 6505 if (rc != 0) 6506 return (rc); 6507 6508 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6509 if (sb == NULL) 6510 return (ENOMEM); 6511 6512 t4_tp_get_tcp_stats(sc, &v4, &v6); 6513 sbuf_printf(sb, 6514 " IP IPv6\n"); 6515 sbuf_printf(sb, "OutRsts: %20u %20u\n", 6516 v4.tcpOutRsts, v6.tcpOutRsts); 6517 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 6518 v4.tcpInSegs, v6.tcpInSegs); 6519 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 6520 v4.tcpOutSegs, v6.tcpOutSegs); 6521 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 6522 v4.tcpRetransSegs, v6.tcpRetransSegs); 6523 6524 rc = sbuf_finish(sb); 6525 sbuf_delete(sb); 6526 6527 return (rc); 6528 } 6529 6530 static int 6531 sysctl_tids(SYSCTL_HANDLER_ARGS) 6532 { 6533 struct adapter *sc = arg1; 6534 struct sbuf *sb; 6535 int rc; 6536 struct tid_info *t = &sc->tids; 6537 6538 rc = sysctl_wire_old_buffer(req, 0); 6539 if (rc != 0) 6540 return (rc); 6541 6542 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6543 if (sb == NULL) 6544 return (ENOMEM); 6545 6546 if (t->natids) { 6547 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 6548 t->atids_in_use); 6549 } 6550 6551 if (t->ntids) { 6552 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6553 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 6554 6555 if (b) { 6556 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 6557 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6558 t->ntids - 1); 6559 } else { 6560 sbuf_printf(sb, "TID range: %u-%u", 6561 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6562 t->ntids - 1); 6563 } 6564 } else 6565 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 6566 sbuf_printf(sb, ", in use: %u\n", 6567 atomic_load_acq_int(&t->tids_in_use)); 6568 } 6569 6570 if (t->nstids) { 6571 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 6572 t->stid_base + t->nstids - 1, t->stids_in_use); 6573 } 6574 6575 if (t->nftids) { 6576 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 6577 t->ftid_base + t->nftids - 1); 6578 } 6579 6580 if (t->netids) { 6581 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 6582 t->etid_base + t->netids - 1); 6583 } 6584 6585 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 6586 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 6587 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 6588 6589 rc = sbuf_finish(sb); 6590 sbuf_delete(sb); 6591 6592 return (rc); 6593 } 6594 6595 static int 6596 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 6597 { 6598 struct adapter *sc = arg1; 6599 struct sbuf *sb; 6600 int rc; 6601 struct tp_err_stats stats; 6602 6603 rc = sysctl_wire_old_buffer(req, 0); 6604 if (rc != 0) 6605 return (rc); 6606 6607 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6608 if (sb == NULL) 6609 return (ENOMEM); 6610 6611 t4_tp_get_err_stats(sc, &stats); 6612 6613 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 6614 "channel 3\n"); 6615 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 6616 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2], 6617 stats.macInErrs[3]); 6618 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 6619 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2], 6620 stats.hdrInErrs[3]); 6621 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 6622 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2], 6623 stats.tcpInErrs[3]); 6624 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 6625 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2], 6626 stats.tcp6InErrs[3]); 6627 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 6628 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2], 6629 stats.tnlCongDrops[3]); 6630 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 6631 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2], 6632 stats.tnlTxDrops[3]); 6633 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 6634 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1], 6635 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]); 6636 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 6637 stats.ofldChanDrops[0], stats.ofldChanDrops[1], 6638 stats.ofldChanDrops[2], stats.ofldChanDrops[3]); 6639 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 6640 stats.ofldNoNeigh, stats.ofldCongDefer); 6641 6642 rc = sbuf_finish(sb); 6643 sbuf_delete(sb); 6644 6645 return (rc); 6646 } 6647 6648 struct field_desc { 6649 const char *name; 6650 u_int start; 6651 u_int width; 6652 }; 6653 6654 static void 6655 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 6656 { 6657 char buf[32]; 6658 int line_size = 0; 6659 6660 while (f->name) { 6661 uint64_t mask = (1ULL << f->width) - 1; 6662 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 6663 ((uintmax_t)v >> f->start) & mask); 6664 6665 if (line_size + len >= 79) { 6666 line_size = 8; 6667 sbuf_printf(sb, "\n "); 6668 } 6669 sbuf_printf(sb, "%s ", buf); 6670 line_size += len + 1; 6671 f++; 6672 } 6673 sbuf_printf(sb, "\n"); 6674 } 6675 6676 static struct field_desc tp_la0[] = { 6677 { "RcfOpCodeOut", 60, 4 }, 6678 { "State", 56, 4 }, 6679 { "WcfState", 52, 4 }, 6680 { "RcfOpcSrcOut", 50, 2 }, 6681 { "CRxError", 49, 1 }, 6682 { "ERxError", 48, 1 }, 6683 { "SanityFailed", 47, 1 }, 6684 { "SpuriousMsg", 46, 1 }, 6685 { "FlushInputMsg", 45, 1 }, 6686 { "FlushInputCpl", 44, 1 }, 6687 { "RssUpBit", 43, 1 }, 6688 { "RssFilterHit", 42, 1 }, 6689 { "Tid", 32, 10 }, 6690 { "InitTcb", 31, 1 }, 6691 { "LineNumber", 24, 7 }, 6692 { "Emsg", 23, 1 }, 6693 { "EdataOut", 22, 1 }, 6694 { "Cmsg", 21, 1 }, 6695 { "CdataOut", 20, 1 }, 6696 { "EreadPdu", 19, 1 }, 6697 { "CreadPdu", 18, 1 }, 6698 { "TunnelPkt", 17, 1 }, 6699 { "RcfPeerFin", 16, 1 }, 6700 { "RcfReasonOut", 12, 4 }, 6701 { "TxCchannel", 10, 2 }, 6702 { "RcfTxChannel", 8, 2 }, 6703 { "RxEchannel", 6, 2 }, 6704 { "RcfRxChannel", 5, 1 }, 6705 { "RcfDataOutSrdy", 4, 1 }, 6706 { "RxDvld", 3, 1 }, 6707 { "RxOoDvld", 2, 1 }, 6708 { "RxCongestion", 1, 1 }, 6709 { "TxCongestion", 0, 1 }, 6710 { NULL } 6711 }; 6712 6713 static struct field_desc tp_la1[] = { 6714 { "CplCmdIn", 56, 8 }, 6715 { "CplCmdOut", 48, 8 }, 6716 { "ESynOut", 47, 1 }, 6717 { "EAckOut", 46, 1 }, 6718 { "EFinOut", 45, 1 }, 6719 { "ERstOut", 44, 1 }, 6720 { "SynIn", 43, 1 }, 6721 { "AckIn", 42, 1 }, 6722 { "FinIn", 41, 1 }, 6723 { "RstIn", 40, 1 }, 6724 { "DataIn", 39, 1 }, 6725 { "DataInVld", 38, 1 }, 6726 { "PadIn", 37, 1 }, 6727 { "RxBufEmpty", 36, 1 }, 6728 { "RxDdp", 35, 1 }, 6729 { "RxFbCongestion", 34, 1 }, 6730 { "TxFbCongestion", 33, 1 }, 6731 { "TxPktSumSrdy", 32, 1 }, 6732 { "RcfUlpType", 28, 4 }, 6733 { "Eread", 27, 1 }, 6734 { "Ebypass", 26, 1 }, 6735 { "Esave", 25, 1 }, 6736 { "Static0", 24, 1 }, 6737 { "Cread", 23, 1 }, 6738 { "Cbypass", 22, 1 }, 6739 { "Csave", 21, 1 }, 6740 { "CPktOut", 20, 1 }, 6741 { "RxPagePoolFull", 18, 2 }, 6742 { "RxLpbkPkt", 17, 1 }, 6743 { "TxLpbkPkt", 16, 1 }, 6744 { "RxVfValid", 15, 1 }, 6745 { "SynLearned", 14, 1 }, 6746 { "SetDelEntry", 13, 1 }, 6747 { "SetInvEntry", 12, 1 }, 6748 { "CpcmdDvld", 11, 1 }, 6749 { "CpcmdSave", 10, 1 }, 6750 { "RxPstructsFull", 8, 2 }, 6751 { "EpcmdDvld", 7, 1 }, 6752 { "EpcmdFlush", 6, 1 }, 6753 { "EpcmdTrimPrefix", 5, 1 }, 6754 { "EpcmdTrimPostfix", 4, 1 }, 6755 { "ERssIp4Pkt", 3, 1 }, 6756 { "ERssIp6Pkt", 2, 1 }, 6757 { "ERssTcpUdpPkt", 1, 1 }, 6758 { "ERssFceFipPkt", 0, 1 }, 6759 { NULL } 6760 }; 6761 6762 static struct field_desc tp_la2[] = { 6763 { "CplCmdIn", 56, 8 }, 6764 { "MpsVfVld", 55, 1 }, 6765 { "MpsPf", 52, 3 }, 6766 { "MpsVf", 44, 8 }, 6767 { "SynIn", 43, 1 }, 6768 { "AckIn", 42, 1 }, 6769 { "FinIn", 41, 1 }, 6770 { "RstIn", 40, 1 }, 6771 { "DataIn", 39, 1 }, 6772 { "DataInVld", 38, 1 }, 6773 { "PadIn", 37, 1 }, 6774 { "RxBufEmpty", 36, 1 }, 6775 { "RxDdp", 35, 1 }, 6776 { "RxFbCongestion", 34, 1 }, 6777 { "TxFbCongestion", 33, 1 }, 6778 { "TxPktSumSrdy", 32, 1 }, 6779 { "RcfUlpType", 28, 4 }, 6780 { "Eread", 27, 1 }, 6781 { "Ebypass", 26, 1 }, 6782 { "Esave", 25, 1 }, 6783 { "Static0", 24, 1 }, 6784 { "Cread", 23, 1 }, 6785 { "Cbypass", 22, 1 }, 6786 { "Csave", 21, 1 }, 6787 { "CPktOut", 20, 1 }, 6788 { "RxPagePoolFull", 18, 2 }, 6789 { "RxLpbkPkt", 17, 1 }, 6790 { "TxLpbkPkt", 16, 1 }, 6791 { "RxVfValid", 15, 1 }, 6792 { "SynLearned", 14, 1 }, 6793 { "SetDelEntry", 13, 1 }, 6794 { "SetInvEntry", 12, 1 }, 6795 { "CpcmdDvld", 11, 1 }, 6796 { "CpcmdSave", 10, 1 }, 6797 { "RxPstructsFull", 8, 2 }, 6798 { "EpcmdDvld", 7, 1 }, 6799 { "EpcmdFlush", 6, 1 }, 6800 { "EpcmdTrimPrefix", 5, 1 }, 6801 { "EpcmdTrimPostfix", 4, 1 }, 6802 { "ERssIp4Pkt", 3, 1 }, 6803 { "ERssIp6Pkt", 2, 1 }, 6804 { "ERssTcpUdpPkt", 1, 1 }, 6805 { "ERssFceFipPkt", 0, 1 }, 6806 { NULL } 6807 }; 6808 6809 static void 6810 tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 6811 { 6812 6813 field_desc_show(sb, *p, tp_la0); 6814 } 6815 6816 static void 6817 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 6818 { 6819 6820 if (idx) 6821 sbuf_printf(sb, "\n"); 6822 field_desc_show(sb, p[0], tp_la0); 6823 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 6824 field_desc_show(sb, p[1], tp_la0); 6825 } 6826 6827 static void 6828 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 6829 { 6830 6831 if (idx) 6832 sbuf_printf(sb, "\n"); 6833 field_desc_show(sb, p[0], tp_la0); 6834 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 6835 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 6836 } 6837 6838 static int 6839 sysctl_tp_la(SYSCTL_HANDLER_ARGS) 6840 { 6841 struct adapter *sc = arg1; 6842 struct sbuf *sb; 6843 uint64_t *buf, *p; 6844 int rc; 6845 u_int i, inc; 6846 void (*show_func)(struct sbuf *, uint64_t *, int); 6847 6848 rc = sysctl_wire_old_buffer(req, 0); 6849 if (rc != 0) 6850 return (rc); 6851 6852 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6853 if (sb == NULL) 6854 return (ENOMEM); 6855 6856 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 6857 6858 t4_tp_read_la(sc, buf, NULL); 6859 p = buf; 6860 6861 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 6862 case 2: 6863 inc = 2; 6864 show_func = tp_la_show2; 6865 break; 6866 case 3: 6867 inc = 2; 6868 show_func = tp_la_show3; 6869 break; 6870 default: 6871 inc = 1; 6872 show_func = tp_la_show; 6873 } 6874 6875 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 6876 (*show_func)(sb, p, i); 6877 6878 rc = sbuf_finish(sb); 6879 sbuf_delete(sb); 6880 free(buf, M_CXGBE); 6881 return (rc); 6882 } 6883 6884 static int 6885 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 6886 { 6887 struct adapter *sc = arg1; 6888 struct sbuf *sb; 6889 int rc; 6890 u64 nrate[NCHAN], orate[NCHAN]; 6891 6892 rc = sysctl_wire_old_buffer(req, 0); 6893 if (rc != 0) 6894 return (rc); 6895 6896 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6897 if (sb == NULL) 6898 return (ENOMEM); 6899 6900 t4_get_chan_txrate(sc, nrate, orate); 6901 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 6902 "channel 3\n"); 6903 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 6904 nrate[0], nrate[1], nrate[2], nrate[3]); 6905 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 6906 orate[0], orate[1], orate[2], orate[3]); 6907 6908 rc = sbuf_finish(sb); 6909 sbuf_delete(sb); 6910 6911 return (rc); 6912 } 6913 6914 static int 6915 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 6916 { 6917 struct adapter *sc = arg1; 6918 struct sbuf *sb; 6919 uint32_t *buf, *p; 6920 int rc, i; 6921 6922 rc = sysctl_wire_old_buffer(req, 0); 6923 if (rc != 0) 6924 return (rc); 6925 6926 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6927 if (sb == NULL) 6928 return (ENOMEM); 6929 6930 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 6931 M_ZERO | M_WAITOK); 6932 6933 t4_ulprx_read_la(sc, buf); 6934 p = buf; 6935 6936 sbuf_printf(sb, " Pcmd Type Message" 6937 " Data"); 6938 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 6939 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 6940 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 6941 } 6942 6943 rc = sbuf_finish(sb); 6944 sbuf_delete(sb); 6945 free(buf, M_CXGBE); 6946 return (rc); 6947 } 6948 6949 static int 6950 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 6951 { 6952 struct adapter *sc = arg1; 6953 struct sbuf *sb; 6954 int rc, v; 6955 6956 rc = sysctl_wire_old_buffer(req, 0); 6957 if (rc != 0) 6958 return (rc); 6959 6960 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6961 if (sb == NULL) 6962 return (ENOMEM); 6963 6964 v = t4_read_reg(sc, A_SGE_STAT_CFG); 6965 if (G_STATSOURCE_T5(v) == 7) { 6966 if (G_STATMODE(v) == 0) { 6967 sbuf_printf(sb, "total %d, incomplete %d", 6968 t4_read_reg(sc, A_SGE_STAT_TOTAL), 6969 t4_read_reg(sc, A_SGE_STAT_MATCH)); 6970 } else if (G_STATMODE(v) == 1) { 6971 sbuf_printf(sb, "total %d, data overflow %d", 6972 t4_read_reg(sc, A_SGE_STAT_TOTAL), 6973 t4_read_reg(sc, A_SGE_STAT_MATCH)); 6974 } 6975 } 6976 rc = sbuf_finish(sb); 6977 sbuf_delete(sb); 6978 6979 return (rc); 6980 } 6981 #endif 6982 6983 static uint32_t 6984 fconf_to_mode(uint32_t fconf) 6985 { 6986 uint32_t mode; 6987 6988 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 6989 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 6990 6991 if (fconf & F_FRAGMENTATION) 6992 mode |= T4_FILTER_IP_FRAGMENT; 6993 6994 if (fconf & F_MPSHITTYPE) 6995 mode |= T4_FILTER_MPS_HIT_TYPE; 6996 6997 if (fconf & F_MACMATCH) 6998 mode |= T4_FILTER_MAC_IDX; 6999 7000 if (fconf & F_ETHERTYPE) 7001 mode |= T4_FILTER_ETH_TYPE; 7002 7003 if (fconf & F_PROTOCOL) 7004 mode |= T4_FILTER_IP_PROTO; 7005 7006 if (fconf & F_TOS) 7007 mode |= T4_FILTER_IP_TOS; 7008 7009 if (fconf & F_VLAN) 7010 mode |= T4_FILTER_VLAN; 7011 7012 if (fconf & F_VNIC_ID) 7013 mode |= T4_FILTER_VNIC; 7014 7015 if (fconf & F_PORT) 7016 mode |= T4_FILTER_PORT; 7017 7018 if (fconf & F_FCOE) 7019 mode |= T4_FILTER_FCoE; 7020 7021 return (mode); 7022 } 7023 7024 static uint32_t 7025 mode_to_fconf(uint32_t mode) 7026 { 7027 uint32_t fconf = 0; 7028 7029 if (mode & T4_FILTER_IP_FRAGMENT) 7030 fconf |= F_FRAGMENTATION; 7031 7032 if (mode & T4_FILTER_MPS_HIT_TYPE) 7033 fconf |= F_MPSHITTYPE; 7034 7035 if (mode & T4_FILTER_MAC_IDX) 7036 fconf |= F_MACMATCH; 7037 7038 if (mode & T4_FILTER_ETH_TYPE) 7039 fconf |= F_ETHERTYPE; 7040 7041 if (mode & T4_FILTER_IP_PROTO) 7042 fconf |= F_PROTOCOL; 7043 7044 if (mode & T4_FILTER_IP_TOS) 7045 fconf |= F_TOS; 7046 7047 if (mode & T4_FILTER_VLAN) 7048 fconf |= F_VLAN; 7049 7050 if (mode & T4_FILTER_VNIC) 7051 fconf |= F_VNIC_ID; 7052 7053 if (mode & T4_FILTER_PORT) 7054 fconf |= F_PORT; 7055 7056 if (mode & T4_FILTER_FCoE) 7057 fconf |= F_FCOE; 7058 7059 return (fconf); 7060 } 7061 7062 static uint32_t 7063 fspec_to_fconf(struct t4_filter_specification *fs) 7064 { 7065 uint32_t fconf = 0; 7066 7067 if (fs->val.frag || fs->mask.frag) 7068 fconf |= F_FRAGMENTATION; 7069 7070 if (fs->val.matchtype || fs->mask.matchtype) 7071 fconf |= F_MPSHITTYPE; 7072 7073 if (fs->val.macidx || fs->mask.macidx) 7074 fconf |= F_MACMATCH; 7075 7076 if (fs->val.ethtype || fs->mask.ethtype) 7077 fconf |= F_ETHERTYPE; 7078 7079 if (fs->val.proto || fs->mask.proto) 7080 fconf |= F_PROTOCOL; 7081 7082 if (fs->val.tos || fs->mask.tos) 7083 fconf |= F_TOS; 7084 7085 if (fs->val.vlan_vld || fs->mask.vlan_vld) 7086 fconf |= F_VLAN; 7087 7088 if (fs->val.vnic_vld || fs->mask.vnic_vld) 7089 fconf |= F_VNIC_ID; 7090 7091 if (fs->val.iport || fs->mask.iport) 7092 fconf |= F_PORT; 7093 7094 if (fs->val.fcoe || fs->mask.fcoe) 7095 fconf |= F_FCOE; 7096 7097 return (fconf); 7098 } 7099 7100 static int 7101 get_filter_mode(struct adapter *sc, uint32_t *mode) 7102 { 7103 int rc; 7104 uint32_t fconf; 7105 7106 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7107 "t4getfm"); 7108 if (rc) 7109 return (rc); 7110 7111 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1, 7112 A_TP_VLAN_PRI_MAP); 7113 7114 if (sc->params.tp.vlan_pri_map != fconf) { 7115 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n", 7116 device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map, 7117 fconf); 7118 } 7119 7120 *mode = fconf_to_mode(fconf); 7121 7122 end_synchronized_op(sc, LOCK_HELD); 7123 return (0); 7124 } 7125 7126 static int 7127 set_filter_mode(struct adapter *sc, uint32_t mode) 7128 { 7129 uint32_t fconf; 7130 int rc; 7131 7132 fconf = mode_to_fconf(mode); 7133 7134 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7135 "t4setfm"); 7136 if (rc) 7137 return (rc); 7138 7139 if (sc->tids.ftids_in_use > 0) { 7140 rc = EBUSY; 7141 goto done; 7142 } 7143 7144 #ifdef TCP_OFFLOAD 7145 if (uld_active(sc, ULD_TOM)) { 7146 rc = EBUSY; 7147 goto done; 7148 } 7149 #endif 7150 7151 rc = -t4_set_filter_mode(sc, fconf); 7152 done: 7153 end_synchronized_op(sc, LOCK_HELD); 7154 return (rc); 7155 } 7156 7157 static inline uint64_t 7158 get_filter_hits(struct adapter *sc, uint32_t fid) 7159 { 7160 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 7161 uint64_t hits; 7162 7163 memwin_info(sc, 0, &mw_base, NULL); 7164 off = position_memwin(sc, 0, 7165 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE); 7166 if (is_t4(sc)) { 7167 hits = t4_read_reg64(sc, mw_base + off + 16); 7168 hits = be64toh(hits); 7169 } else { 7170 hits = t4_read_reg(sc, mw_base + off + 24); 7171 hits = be32toh(hits); 7172 } 7173 7174 return (hits); 7175 } 7176 7177 static int 7178 get_filter(struct adapter *sc, struct t4_filter *t) 7179 { 7180 int i, rc, nfilters = sc->tids.nftids; 7181 struct filter_entry *f; 7182 7183 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7184 "t4getf"); 7185 if (rc) 7186 return (rc); 7187 7188 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 7189 t->idx >= nfilters) { 7190 t->idx = 0xffffffff; 7191 goto done; 7192 } 7193 7194 f = &sc->tids.ftid_tab[t->idx]; 7195 for (i = t->idx; i < nfilters; i++, f++) { 7196 if (f->valid) { 7197 t->idx = i; 7198 t->l2tidx = f->l2t ? f->l2t->idx : 0; 7199 t->smtidx = f->smtidx; 7200 if (f->fs.hitcnts) 7201 t->hits = get_filter_hits(sc, t->idx); 7202 else 7203 t->hits = UINT64_MAX; 7204 t->fs = f->fs; 7205 7206 goto done; 7207 } 7208 } 7209 7210 t->idx = 0xffffffff; 7211 done: 7212 end_synchronized_op(sc, LOCK_HELD); 7213 return (0); 7214 } 7215 7216 static int 7217 set_filter(struct adapter *sc, struct t4_filter *t) 7218 { 7219 unsigned int nfilters, nports; 7220 struct filter_entry *f; 7221 int i, rc; 7222 7223 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 7224 if (rc) 7225 return (rc); 7226 7227 nfilters = sc->tids.nftids; 7228 nports = sc->params.nports; 7229 7230 if (nfilters == 0) { 7231 rc = ENOTSUP; 7232 goto done; 7233 } 7234 7235 if (!(sc->flags & FULL_INIT_DONE)) { 7236 rc = EAGAIN; 7237 goto done; 7238 } 7239 7240 if (t->idx >= nfilters) { 7241 rc = EINVAL; 7242 goto done; 7243 } 7244 7245 /* Validate against the global filter mode */ 7246 if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) != 7247 sc->params.tp.vlan_pri_map) { 7248 rc = E2BIG; 7249 goto done; 7250 } 7251 7252 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 7253 rc = EINVAL; 7254 goto done; 7255 } 7256 7257 if (t->fs.val.iport >= nports) { 7258 rc = EINVAL; 7259 goto done; 7260 } 7261 7262 /* Can't specify an iq if not steering to it */ 7263 if (!t->fs.dirsteer && t->fs.iq) { 7264 rc = EINVAL; 7265 goto done; 7266 } 7267 7268 /* IPv6 filter idx must be 4 aligned */ 7269 if (t->fs.type == 1 && 7270 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 7271 rc = EINVAL; 7272 goto done; 7273 } 7274 7275 if (sc->tids.ftid_tab == NULL) { 7276 KASSERT(sc->tids.ftids_in_use == 0, 7277 ("%s: no memory allocated but filters_in_use > 0", 7278 __func__)); 7279 7280 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 7281 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 7282 if (sc->tids.ftid_tab == NULL) { 7283 rc = ENOMEM; 7284 goto done; 7285 } 7286 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 7287 } 7288 7289 for (i = 0; i < 4; i++) { 7290 f = &sc->tids.ftid_tab[t->idx + i]; 7291 7292 if (f->pending || f->valid) { 7293 rc = EBUSY; 7294 goto done; 7295 } 7296 if (f->locked) { 7297 rc = EPERM; 7298 goto done; 7299 } 7300 7301 if (t->fs.type == 0) 7302 break; 7303 } 7304 7305 f = &sc->tids.ftid_tab[t->idx]; 7306 f->fs = t->fs; 7307 7308 rc = set_filter_wr(sc, t->idx); 7309 done: 7310 end_synchronized_op(sc, 0); 7311 7312 if (rc == 0) { 7313 mtx_lock(&sc->tids.ftid_lock); 7314 for (;;) { 7315 if (f->pending == 0) { 7316 rc = f->valid ? 0 : EIO; 7317 break; 7318 } 7319 7320 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7321 PCATCH, "t4setfw", 0)) { 7322 rc = EINPROGRESS; 7323 break; 7324 } 7325 } 7326 mtx_unlock(&sc->tids.ftid_lock); 7327 } 7328 return (rc); 7329 } 7330 7331 static int 7332 del_filter(struct adapter *sc, struct t4_filter *t) 7333 { 7334 unsigned int nfilters; 7335 struct filter_entry *f; 7336 int rc; 7337 7338 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 7339 if (rc) 7340 return (rc); 7341 7342 nfilters = sc->tids.nftids; 7343 7344 if (nfilters == 0) { 7345 rc = ENOTSUP; 7346 goto done; 7347 } 7348 7349 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 7350 t->idx >= nfilters) { 7351 rc = EINVAL; 7352 goto done; 7353 } 7354 7355 if (!(sc->flags & FULL_INIT_DONE)) { 7356 rc = EAGAIN; 7357 goto done; 7358 } 7359 7360 f = &sc->tids.ftid_tab[t->idx]; 7361 7362 if (f->pending) { 7363 rc = EBUSY; 7364 goto done; 7365 } 7366 if (f->locked) { 7367 rc = EPERM; 7368 goto done; 7369 } 7370 7371 if (f->valid) { 7372 t->fs = f->fs; /* extra info for the caller */ 7373 rc = del_filter_wr(sc, t->idx); 7374 } 7375 7376 done: 7377 end_synchronized_op(sc, 0); 7378 7379 if (rc == 0) { 7380 mtx_lock(&sc->tids.ftid_lock); 7381 for (;;) { 7382 if (f->pending == 0) { 7383 rc = f->valid ? EIO : 0; 7384 break; 7385 } 7386 7387 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7388 PCATCH, "t4delfw", 0)) { 7389 rc = EINPROGRESS; 7390 break; 7391 } 7392 } 7393 mtx_unlock(&sc->tids.ftid_lock); 7394 } 7395 7396 return (rc); 7397 } 7398 7399 static void 7400 clear_filter(struct filter_entry *f) 7401 { 7402 if (f->l2t) 7403 t4_l2t_release(f->l2t); 7404 7405 bzero(f, sizeof (*f)); 7406 } 7407 7408 static int 7409 set_filter_wr(struct adapter *sc, int fidx) 7410 { 7411 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7412 struct fw_filter_wr *fwr; 7413 unsigned int ftid; 7414 struct wrq_cookie cookie; 7415 7416 ASSERT_SYNCHRONIZED_OP(sc); 7417 7418 if (f->fs.newdmac || f->fs.newvlan) { 7419 /* This filter needs an L2T entry; allocate one. */ 7420 f->l2t = t4_l2t_alloc_switching(sc->l2t); 7421 if (f->l2t == NULL) 7422 return (EAGAIN); 7423 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 7424 f->fs.dmac)) { 7425 t4_l2t_release(f->l2t); 7426 f->l2t = NULL; 7427 return (ENOMEM); 7428 } 7429 } 7430 7431 ftid = sc->tids.ftid_base + fidx; 7432 7433 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7434 if (fwr == NULL) 7435 return (ENOMEM); 7436 bzero(fwr, sizeof(*fwr)); 7437 7438 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 7439 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 7440 fwr->tid_to_iq = 7441 htobe32(V_FW_FILTER_WR_TID(ftid) | 7442 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 7443 V_FW_FILTER_WR_NOREPLY(0) | 7444 V_FW_FILTER_WR_IQ(f->fs.iq)); 7445 fwr->del_filter_to_l2tix = 7446 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 7447 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 7448 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 7449 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 7450 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 7451 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 7452 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 7453 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 7454 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 7455 f->fs.newvlan == VLAN_REWRITE) | 7456 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 7457 f->fs.newvlan == VLAN_REWRITE) | 7458 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 7459 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 7460 V_FW_FILTER_WR_PRIO(f->fs.prio) | 7461 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 7462 fwr->ethtype = htobe16(f->fs.val.ethtype); 7463 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 7464 fwr->frag_to_ovlan_vldm = 7465 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 7466 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 7467 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 7468 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) | 7469 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 7470 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld)); 7471 fwr->smac_sel = 0; 7472 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 7473 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 7474 fwr->maci_to_matchtypem = 7475 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 7476 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 7477 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 7478 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 7479 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 7480 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 7481 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 7482 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 7483 fwr->ptcl = f->fs.val.proto; 7484 fwr->ptclm = f->fs.mask.proto; 7485 fwr->ttyp = f->fs.val.tos; 7486 fwr->ttypm = f->fs.mask.tos; 7487 fwr->ivlan = htobe16(f->fs.val.vlan); 7488 fwr->ivlanm = htobe16(f->fs.mask.vlan); 7489 fwr->ovlan = htobe16(f->fs.val.vnic); 7490 fwr->ovlanm = htobe16(f->fs.mask.vnic); 7491 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 7492 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 7493 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 7494 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 7495 fwr->lp = htobe16(f->fs.val.dport); 7496 fwr->lpm = htobe16(f->fs.mask.dport); 7497 fwr->fp = htobe16(f->fs.val.sport); 7498 fwr->fpm = htobe16(f->fs.mask.sport); 7499 if (f->fs.newsmac) 7500 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 7501 7502 f->pending = 1; 7503 sc->tids.ftids_in_use++; 7504 7505 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7506 return (0); 7507 } 7508 7509 static int 7510 del_filter_wr(struct adapter *sc, int fidx) 7511 { 7512 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7513 struct fw_filter_wr *fwr; 7514 unsigned int ftid; 7515 struct wrq_cookie cookie; 7516 7517 ftid = sc->tids.ftid_base + fidx; 7518 7519 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7520 if (fwr == NULL) 7521 return (ENOMEM); 7522 bzero(fwr, sizeof (*fwr)); 7523 7524 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 7525 7526 f->pending = 1; 7527 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7528 return (0); 7529 } 7530 7531 int 7532 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 7533 { 7534 struct adapter *sc = iq->adapter; 7535 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 7536 unsigned int idx = GET_TID(rpl); 7537 unsigned int rc; 7538 struct filter_entry *f; 7539 7540 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 7541 rss->opcode)); 7542 7543 if (is_ftid(sc, idx)) { 7544 7545 idx -= sc->tids.ftid_base; 7546 f = &sc->tids.ftid_tab[idx]; 7547 rc = G_COOKIE(rpl->cookie); 7548 7549 mtx_lock(&sc->tids.ftid_lock); 7550 if (rc == FW_FILTER_WR_FLT_ADDED) { 7551 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 7552 __func__, idx)); 7553 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 7554 f->pending = 0; /* asynchronous setup completed */ 7555 f->valid = 1; 7556 } else { 7557 if (rc != FW_FILTER_WR_FLT_DELETED) { 7558 /* Add or delete failed, display an error */ 7559 log(LOG_ERR, 7560 "filter %u setup failed with error %u\n", 7561 idx, rc); 7562 } 7563 7564 clear_filter(f); 7565 sc->tids.ftids_in_use--; 7566 } 7567 wakeup(&sc->tids.ftid_tab); 7568 mtx_unlock(&sc->tids.ftid_lock); 7569 } 7570 7571 return (0); 7572 } 7573 7574 static int 7575 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 7576 { 7577 int rc; 7578 7579 if (cntxt->cid > M_CTXTQID) 7580 return (EINVAL); 7581 7582 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 7583 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 7584 return (EINVAL); 7585 7586 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 7587 if (rc) 7588 return (rc); 7589 7590 if (sc->flags & FW_OK) { 7591 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 7592 &cntxt->data[0]); 7593 if (rc == 0) 7594 goto done; 7595 } 7596 7597 /* 7598 * Read via firmware failed or wasn't even attempted. Read directly via 7599 * the backdoor. 7600 */ 7601 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 7602 done: 7603 end_synchronized_op(sc, 0); 7604 return (rc); 7605 } 7606 7607 static int 7608 load_fw(struct adapter *sc, struct t4_data *fw) 7609 { 7610 int rc; 7611 uint8_t *fw_data; 7612 7613 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 7614 if (rc) 7615 return (rc); 7616 7617 if (sc->flags & FULL_INIT_DONE) { 7618 rc = EBUSY; 7619 goto done; 7620 } 7621 7622 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 7623 if (fw_data == NULL) { 7624 rc = ENOMEM; 7625 goto done; 7626 } 7627 7628 rc = copyin(fw->data, fw_data, fw->len); 7629 if (rc == 0) 7630 rc = -t4_load_fw(sc, fw_data, fw->len); 7631 7632 free(fw_data, M_CXGBE); 7633 done: 7634 end_synchronized_op(sc, 0); 7635 return (rc); 7636 } 7637 7638 static int 7639 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 7640 { 7641 uint32_t addr, off, remaining, i, n; 7642 uint32_t *buf, *b; 7643 uint32_t mw_base, mw_aperture; 7644 int rc; 7645 uint8_t *dst; 7646 7647 rc = validate_mem_range(sc, mr->addr, mr->len); 7648 if (rc != 0) 7649 return (rc); 7650 7651 memwin_info(sc, win, &mw_base, &mw_aperture); 7652 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK); 7653 addr = mr->addr; 7654 remaining = mr->len; 7655 dst = (void *)mr->data; 7656 7657 while (remaining) { 7658 off = position_memwin(sc, win, addr); 7659 7660 /* number of bytes that we'll copy in the inner loop */ 7661 n = min(remaining, mw_aperture - off); 7662 for (i = 0; i < n; i += 4) 7663 *b++ = t4_read_reg(sc, mw_base + off + i); 7664 7665 rc = copyout(buf, dst, n); 7666 if (rc != 0) 7667 break; 7668 7669 b = buf; 7670 dst += n; 7671 remaining -= n; 7672 addr += n; 7673 } 7674 7675 free(buf, M_CXGBE); 7676 return (rc); 7677 } 7678 7679 static int 7680 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 7681 { 7682 int rc; 7683 7684 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 7685 return (EINVAL); 7686 7687 if (i2cd->len > sizeof(i2cd->data)) 7688 return (EFBIG); 7689 7690 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 7691 if (rc) 7692 return (rc); 7693 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 7694 i2cd->offset, i2cd->len, &i2cd->data[0]); 7695 end_synchronized_op(sc, 0); 7696 7697 return (rc); 7698 } 7699 7700 static int 7701 in_range(int val, int lo, int hi) 7702 { 7703 7704 return (val < 0 || (val <= hi && val >= lo)); 7705 } 7706 7707 static int 7708 set_sched_class(struct adapter *sc, struct t4_sched_params *p) 7709 { 7710 int fw_subcmd, fw_type, rc; 7711 7712 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc"); 7713 if (rc) 7714 return (rc); 7715 7716 if (!(sc->flags & FULL_INIT_DONE)) { 7717 rc = EAGAIN; 7718 goto done; 7719 } 7720 7721 /* 7722 * Translate the cxgbetool parameters into T4 firmware parameters. (The 7723 * sub-command and type are in common locations.) 7724 */ 7725 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 7726 fw_subcmd = FW_SCHED_SC_CONFIG; 7727 else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 7728 fw_subcmd = FW_SCHED_SC_PARAMS; 7729 else { 7730 rc = EINVAL; 7731 goto done; 7732 } 7733 if (p->type == SCHED_CLASS_TYPE_PACKET) 7734 fw_type = FW_SCHED_TYPE_PKTSCHED; 7735 else { 7736 rc = EINVAL; 7737 goto done; 7738 } 7739 7740 if (fw_subcmd == FW_SCHED_SC_CONFIG) { 7741 /* Vet our parameters ..*/ 7742 if (p->u.config.minmax < 0) { 7743 rc = EINVAL; 7744 goto done; 7745 } 7746 7747 /* And pass the request to the firmware ...*/ 7748 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1); 7749 goto done; 7750 } 7751 7752 if (fw_subcmd == FW_SCHED_SC_PARAMS) { 7753 int fw_level; 7754 int fw_mode; 7755 int fw_rateunit; 7756 int fw_ratemode; 7757 7758 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL) 7759 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 7760 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) 7761 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 7762 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) 7763 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 7764 else { 7765 rc = EINVAL; 7766 goto done; 7767 } 7768 7769 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS) 7770 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 7771 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) 7772 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 7773 else { 7774 rc = EINVAL; 7775 goto done; 7776 } 7777 7778 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS) 7779 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 7780 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS) 7781 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 7782 else { 7783 rc = EINVAL; 7784 goto done; 7785 } 7786 7787 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL) 7788 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 7789 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS) 7790 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 7791 else { 7792 rc = EINVAL; 7793 goto done; 7794 } 7795 7796 /* Vet our parameters ... */ 7797 if (!in_range(p->u.params.channel, 0, 3) || 7798 !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) || 7799 !in_range(p->u.params.minrate, 0, 10000000) || 7800 !in_range(p->u.params.maxrate, 0, 10000000) || 7801 !in_range(p->u.params.weight, 0, 100)) { 7802 rc = ERANGE; 7803 goto done; 7804 } 7805 7806 /* 7807 * Translate any unset parameters into the firmware's 7808 * nomenclature and/or fail the call if the parameters 7809 * are required ... 7810 */ 7811 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 || 7812 p->u.params.channel < 0 || p->u.params.cl < 0) { 7813 rc = EINVAL; 7814 goto done; 7815 } 7816 if (p->u.params.minrate < 0) 7817 p->u.params.minrate = 0; 7818 if (p->u.params.maxrate < 0) { 7819 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 7820 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 7821 rc = EINVAL; 7822 goto done; 7823 } else 7824 p->u.params.maxrate = 0; 7825 } 7826 if (p->u.params.weight < 0) { 7827 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) { 7828 rc = EINVAL; 7829 goto done; 7830 } else 7831 p->u.params.weight = 0; 7832 } 7833 if (p->u.params.pktsize < 0) { 7834 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 7835 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 7836 rc = EINVAL; 7837 goto done; 7838 } else 7839 p->u.params.pktsize = 0; 7840 } 7841 7842 /* See what the firmware thinks of the request ... */ 7843 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode, 7844 fw_rateunit, fw_ratemode, p->u.params.channel, 7845 p->u.params.cl, p->u.params.minrate, p->u.params.maxrate, 7846 p->u.params.weight, p->u.params.pktsize, 1); 7847 goto done; 7848 } 7849 7850 rc = EINVAL; 7851 done: 7852 end_synchronized_op(sc, 0); 7853 return (rc); 7854 } 7855 7856 static int 7857 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 7858 { 7859 struct port_info *pi = NULL; 7860 struct sge_txq *txq; 7861 uint32_t fw_mnem, fw_queue, fw_class; 7862 int i, rc; 7863 7864 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 7865 if (rc) 7866 return (rc); 7867 7868 if (!(sc->flags & FULL_INIT_DONE)) { 7869 rc = EAGAIN; 7870 goto done; 7871 } 7872 7873 if (p->port >= sc->params.nports) { 7874 rc = EINVAL; 7875 goto done; 7876 } 7877 7878 pi = sc->port[p->port]; 7879 if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) { 7880 rc = EINVAL; 7881 goto done; 7882 } 7883 7884 /* 7885 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 7886 * Scheduling Class in this case). 7887 */ 7888 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 7889 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 7890 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 7891 7892 /* 7893 * If op.queue is non-negative, then we're only changing the scheduling 7894 * on a single specified TX queue. 7895 */ 7896 if (p->queue >= 0) { 7897 txq = &sc->sge.txq[pi->first_txq + p->queue]; 7898 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 7899 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 7900 &fw_class); 7901 goto done; 7902 } 7903 7904 /* 7905 * Change the scheduling on all the TX queues for the 7906 * interface. 7907 */ 7908 for_each_txq(pi, i, txq) { 7909 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 7910 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 7911 &fw_class); 7912 if (rc) 7913 goto done; 7914 } 7915 7916 rc = 0; 7917 done: 7918 end_synchronized_op(sc, 0); 7919 return (rc); 7920 } 7921 7922 int 7923 t4_os_find_pci_capability(struct adapter *sc, int cap) 7924 { 7925 int i; 7926 7927 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 7928 } 7929 7930 int 7931 t4_os_pci_save_state(struct adapter *sc) 7932 { 7933 device_t dev; 7934 struct pci_devinfo *dinfo; 7935 7936 dev = sc->dev; 7937 dinfo = device_get_ivars(dev); 7938 7939 pci_cfg_save(dev, dinfo, 0); 7940 return (0); 7941 } 7942 7943 int 7944 t4_os_pci_restore_state(struct adapter *sc) 7945 { 7946 device_t dev; 7947 struct pci_devinfo *dinfo; 7948 7949 dev = sc->dev; 7950 dinfo = device_get_ivars(dev); 7951 7952 pci_cfg_restore(dev, dinfo); 7953 return (0); 7954 } 7955 7956 void 7957 t4_os_portmod_changed(const struct adapter *sc, int idx) 7958 { 7959 struct port_info *pi = sc->port[idx]; 7960 static const char *mod_str[] = { 7961 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 7962 }; 7963 7964 build_medialist(pi, &pi->media); 7965 #ifdef DEV_NETMAP 7966 build_medialist(pi, &pi->nm_media); 7967 #endif 7968 7969 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 7970 if_printf(pi->ifp, "transceiver unplugged.\n"); 7971 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 7972 if_printf(pi->ifp, "unknown transceiver inserted.\n"); 7973 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 7974 if_printf(pi->ifp, "unsupported transceiver inserted.\n"); 7975 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 7976 if_printf(pi->ifp, "%s transceiver inserted.\n", 7977 mod_str[pi->mod_type]); 7978 } else { 7979 if_printf(pi->ifp, "transceiver (type %d) inserted.\n", 7980 pi->mod_type); 7981 } 7982 } 7983 7984 void 7985 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason) 7986 { 7987 struct port_info *pi = sc->port[idx]; 7988 struct ifnet *ifp = pi->ifp; 7989 7990 if (link_stat) { 7991 pi->linkdnrc = -1; 7992 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 7993 if_link_state_change(ifp, LINK_STATE_UP); 7994 } else { 7995 if (reason >= 0) 7996 pi->linkdnrc = reason; 7997 if_link_state_change(ifp, LINK_STATE_DOWN); 7998 } 7999 } 8000 8001 void 8002 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8003 { 8004 struct adapter *sc; 8005 8006 sx_slock(&t4_list_lock); 8007 SLIST_FOREACH(sc, &t4_list, link) { 8008 /* 8009 * func should not make any assumptions about what state sc is 8010 * in - the only guarantee is that sc->sc_lock is a valid lock. 8011 */ 8012 func(sc, arg); 8013 } 8014 sx_sunlock(&t4_list_lock); 8015 } 8016 8017 static int 8018 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 8019 { 8020 return (0); 8021 } 8022 8023 static int 8024 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 8025 { 8026 return (0); 8027 } 8028 8029 static int 8030 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8031 struct thread *td) 8032 { 8033 int rc; 8034 struct adapter *sc = dev->si_drv1; 8035 8036 rc = priv_check(td, PRIV_DRIVER); 8037 if (rc != 0) 8038 return (rc); 8039 8040 switch (cmd) { 8041 case CHELSIO_T4_GETREG: { 8042 struct t4_reg *edata = (struct t4_reg *)data; 8043 8044 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8045 return (EFAULT); 8046 8047 if (edata->size == 4) 8048 edata->val = t4_read_reg(sc, edata->addr); 8049 else if (edata->size == 8) 8050 edata->val = t4_read_reg64(sc, edata->addr); 8051 else 8052 return (EINVAL); 8053 8054 break; 8055 } 8056 case CHELSIO_T4_SETREG: { 8057 struct t4_reg *edata = (struct t4_reg *)data; 8058 8059 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8060 return (EFAULT); 8061 8062 if (edata->size == 4) { 8063 if (edata->val & 0xffffffff00000000) 8064 return (EINVAL); 8065 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 8066 } else if (edata->size == 8) 8067 t4_write_reg64(sc, edata->addr, edata->val); 8068 else 8069 return (EINVAL); 8070 break; 8071 } 8072 case CHELSIO_T4_REGDUMP: { 8073 struct t4_regdump *regs = (struct t4_regdump *)data; 8074 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE; 8075 uint8_t *buf; 8076 8077 if (regs->len < reglen) { 8078 regs->len = reglen; /* hint to the caller */ 8079 return (ENOBUFS); 8080 } 8081 8082 regs->len = reglen; 8083 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 8084 t4_get_regs(sc, regs, buf); 8085 rc = copyout(buf, regs->data, reglen); 8086 free(buf, M_CXGBE); 8087 break; 8088 } 8089 case CHELSIO_T4_GET_FILTER_MODE: 8090 rc = get_filter_mode(sc, (uint32_t *)data); 8091 break; 8092 case CHELSIO_T4_SET_FILTER_MODE: 8093 rc = set_filter_mode(sc, *(uint32_t *)data); 8094 break; 8095 case CHELSIO_T4_GET_FILTER: 8096 rc = get_filter(sc, (struct t4_filter *)data); 8097 break; 8098 case CHELSIO_T4_SET_FILTER: 8099 rc = set_filter(sc, (struct t4_filter *)data); 8100 break; 8101 case CHELSIO_T4_DEL_FILTER: 8102 rc = del_filter(sc, (struct t4_filter *)data); 8103 break; 8104 case CHELSIO_T4_GET_SGE_CONTEXT: 8105 rc = get_sge_context(sc, (struct t4_sge_context *)data); 8106 break; 8107 case CHELSIO_T4_LOAD_FW: 8108 rc = load_fw(sc, (struct t4_data *)data); 8109 break; 8110 case CHELSIO_T4_GET_MEM: 8111 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 8112 break; 8113 case CHELSIO_T4_GET_I2C: 8114 rc = read_i2c(sc, (struct t4_i2c_data *)data); 8115 break; 8116 case CHELSIO_T4_CLEAR_STATS: { 8117 int i; 8118 u_int port_id = *(uint32_t *)data; 8119 struct port_info *pi; 8120 8121 if (port_id >= sc->params.nports) 8122 return (EINVAL); 8123 pi = sc->port[port_id]; 8124 8125 /* MAC stats */ 8126 t4_clr_port_stats(sc, pi->tx_chan); 8127 pi->tx_parse_error = 0; 8128 8129 if (pi->flags & PORT_INIT_DONE) { 8130 struct sge_rxq *rxq; 8131 struct sge_txq *txq; 8132 struct sge_wrq *wrq; 8133 8134 for_each_rxq(pi, i, rxq) { 8135 #if defined(INET) || defined(INET6) 8136 rxq->lro.lro_queued = 0; 8137 rxq->lro.lro_flushed = 0; 8138 #endif 8139 rxq->rxcsum = 0; 8140 rxq->vlan_extraction = 0; 8141 } 8142 8143 for_each_txq(pi, i, txq) { 8144 txq->txcsum = 0; 8145 txq->tso_wrs = 0; 8146 txq->vlan_insertion = 0; 8147 txq->imm_wrs = 0; 8148 txq->sgl_wrs = 0; 8149 txq->txpkt_wrs = 0; 8150 txq->txpkts0_wrs = 0; 8151 txq->txpkts1_wrs = 0; 8152 txq->txpkts0_pkts = 0; 8153 txq->txpkts1_pkts = 0; 8154 mp_ring_reset_stats(txq->r); 8155 } 8156 8157 #ifdef TCP_OFFLOAD 8158 /* nothing to clear for each ofld_rxq */ 8159 8160 for_each_ofld_txq(pi, i, wrq) { 8161 wrq->tx_wrs_direct = 0; 8162 wrq->tx_wrs_copied = 0; 8163 } 8164 #endif 8165 wrq = &sc->sge.ctrlq[pi->port_id]; 8166 wrq->tx_wrs_direct = 0; 8167 wrq->tx_wrs_copied = 0; 8168 } 8169 break; 8170 } 8171 case CHELSIO_T4_SCHED_CLASS: 8172 rc = set_sched_class(sc, (struct t4_sched_params *)data); 8173 break; 8174 case CHELSIO_T4_SCHED_QUEUE: 8175 rc = set_sched_queue(sc, (struct t4_sched_queue *)data); 8176 break; 8177 case CHELSIO_T4_GET_TRACER: 8178 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 8179 break; 8180 case CHELSIO_T4_SET_TRACER: 8181 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 8182 break; 8183 default: 8184 rc = EINVAL; 8185 } 8186 8187 return (rc); 8188 } 8189 8190 #ifdef TCP_OFFLOAD 8191 void 8192 t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask, 8193 const unsigned int *pgsz_order) 8194 { 8195 struct port_info *pi = ifp->if_softc; 8196 struct adapter *sc = pi->adapter; 8197 8198 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 8199 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 8200 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 8201 V_HPZ3(pgsz_order[3])); 8202 } 8203 8204 static int 8205 toe_capability(struct port_info *pi, int enable) 8206 { 8207 int rc; 8208 struct adapter *sc = pi->adapter; 8209 8210 ASSERT_SYNCHRONIZED_OP(sc); 8211 8212 if (!is_offload(sc)) 8213 return (ENODEV); 8214 8215 if (enable) { 8216 /* 8217 * We need the port's queues around so that we're able to send 8218 * and receive CPLs to/from the TOE even if the ifnet for this 8219 * port has never been UP'd administratively. 8220 */ 8221 if (!(pi->flags & PORT_INIT_DONE)) { 8222 rc = cxgbe_init_synchronized(pi); 8223 if (rc) 8224 return (rc); 8225 } 8226 8227 if (isset(&sc->offload_map, pi->port_id)) 8228 return (0); 8229 8230 if (!uld_active(sc, ULD_TOM)) { 8231 rc = t4_activate_uld(sc, ULD_TOM); 8232 if (rc == EAGAIN) { 8233 log(LOG_WARNING, 8234 "You must kldload t4_tom.ko before trying " 8235 "to enable TOE on a cxgbe interface.\n"); 8236 } 8237 if (rc != 0) 8238 return (rc); 8239 KASSERT(sc->tom_softc != NULL, 8240 ("%s: TOM activated but softc NULL", __func__)); 8241 KASSERT(uld_active(sc, ULD_TOM), 8242 ("%s: TOM activated but flag not set", __func__)); 8243 } 8244 8245 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 8246 if (!uld_active(sc, ULD_IWARP)) 8247 (void) t4_activate_uld(sc, ULD_IWARP); 8248 if (!uld_active(sc, ULD_ISCSI)) 8249 (void) t4_activate_uld(sc, ULD_ISCSI); 8250 8251 setbit(&sc->offload_map, pi->port_id); 8252 } else { 8253 if (!isset(&sc->offload_map, pi->port_id)) 8254 return (0); 8255 8256 KASSERT(uld_active(sc, ULD_TOM), 8257 ("%s: TOM never initialized?", __func__)); 8258 clrbit(&sc->offload_map, pi->port_id); 8259 } 8260 8261 return (0); 8262 } 8263 8264 /* 8265 * Add an upper layer driver to the global list. 8266 */ 8267 int 8268 t4_register_uld(struct uld_info *ui) 8269 { 8270 int rc = 0; 8271 struct uld_info *u; 8272 8273 sx_xlock(&t4_uld_list_lock); 8274 SLIST_FOREACH(u, &t4_uld_list, link) { 8275 if (u->uld_id == ui->uld_id) { 8276 rc = EEXIST; 8277 goto done; 8278 } 8279 } 8280 8281 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 8282 ui->refcount = 0; 8283 done: 8284 sx_xunlock(&t4_uld_list_lock); 8285 return (rc); 8286 } 8287 8288 int 8289 t4_unregister_uld(struct uld_info *ui) 8290 { 8291 int rc = EINVAL; 8292 struct uld_info *u; 8293 8294 sx_xlock(&t4_uld_list_lock); 8295 8296 SLIST_FOREACH(u, &t4_uld_list, link) { 8297 if (u == ui) { 8298 if (ui->refcount > 0) { 8299 rc = EBUSY; 8300 goto done; 8301 } 8302 8303 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 8304 rc = 0; 8305 goto done; 8306 } 8307 } 8308 done: 8309 sx_xunlock(&t4_uld_list_lock); 8310 return (rc); 8311 } 8312 8313 int 8314 t4_activate_uld(struct adapter *sc, int id) 8315 { 8316 int rc; 8317 struct uld_info *ui; 8318 8319 ASSERT_SYNCHRONIZED_OP(sc); 8320 8321 if (id < 0 || id > ULD_MAX) 8322 return (EINVAL); 8323 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 8324 8325 sx_slock(&t4_uld_list_lock); 8326 8327 SLIST_FOREACH(ui, &t4_uld_list, link) { 8328 if (ui->uld_id == id) { 8329 if (!(sc->flags & FULL_INIT_DONE)) { 8330 rc = adapter_full_init(sc); 8331 if (rc != 0) 8332 break; 8333 } 8334 8335 rc = ui->activate(sc); 8336 if (rc == 0) { 8337 setbit(&sc->active_ulds, id); 8338 ui->refcount++; 8339 } 8340 break; 8341 } 8342 } 8343 8344 sx_sunlock(&t4_uld_list_lock); 8345 8346 return (rc); 8347 } 8348 8349 int 8350 t4_deactivate_uld(struct adapter *sc, int id) 8351 { 8352 int rc; 8353 struct uld_info *ui; 8354 8355 ASSERT_SYNCHRONIZED_OP(sc); 8356 8357 if (id < 0 || id > ULD_MAX) 8358 return (EINVAL); 8359 rc = ENXIO; 8360 8361 sx_slock(&t4_uld_list_lock); 8362 8363 SLIST_FOREACH(ui, &t4_uld_list, link) { 8364 if (ui->uld_id == id) { 8365 rc = ui->deactivate(sc); 8366 if (rc == 0) { 8367 clrbit(&sc->active_ulds, id); 8368 ui->refcount--; 8369 } 8370 break; 8371 } 8372 } 8373 8374 sx_sunlock(&t4_uld_list_lock); 8375 8376 return (rc); 8377 } 8378 8379 int 8380 uld_active(struct adapter *sc, int uld_id) 8381 { 8382 8383 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 8384 8385 return (isset(&sc->active_ulds, uld_id)); 8386 } 8387 #endif 8388 8389 /* 8390 * Come up with reasonable defaults for some of the tunables, provided they're 8391 * not set by the user (in which case we'll use the values as is). 8392 */ 8393 static void 8394 tweak_tunables(void) 8395 { 8396 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 8397 8398 if (t4_ntxq10g < 1) 8399 t4_ntxq10g = min(nc, NTXQ_10G); 8400 8401 if (t4_ntxq1g < 1) 8402 t4_ntxq1g = min(nc, NTXQ_1G); 8403 8404 if (t4_nrxq10g < 1) 8405 t4_nrxq10g = min(nc, NRXQ_10G); 8406 8407 if (t4_nrxq1g < 1) 8408 t4_nrxq1g = min(nc, NRXQ_1G); 8409 8410 #ifdef TCP_OFFLOAD 8411 if (t4_nofldtxq10g < 1) 8412 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 8413 8414 if (t4_nofldtxq1g < 1) 8415 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 8416 8417 if (t4_nofldrxq10g < 1) 8418 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 8419 8420 if (t4_nofldrxq1g < 1) 8421 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 8422 8423 if (t4_toecaps_allowed == -1) 8424 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 8425 #else 8426 if (t4_toecaps_allowed == -1) 8427 t4_toecaps_allowed = 0; 8428 #endif 8429 8430 #ifdef DEV_NETMAP 8431 if (t4_nnmtxq10g < 1) 8432 t4_nnmtxq10g = min(nc, NNMTXQ_10G); 8433 8434 if (t4_nnmtxq1g < 1) 8435 t4_nnmtxq1g = min(nc, NNMTXQ_1G); 8436 8437 if (t4_nnmrxq10g < 1) 8438 t4_nnmrxq10g = min(nc, NNMRXQ_10G); 8439 8440 if (t4_nnmrxq1g < 1) 8441 t4_nnmrxq1g = min(nc, NNMRXQ_1G); 8442 #endif 8443 8444 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 8445 t4_tmr_idx_10g = TMR_IDX_10G; 8446 8447 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 8448 t4_pktc_idx_10g = PKTC_IDX_10G; 8449 8450 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 8451 t4_tmr_idx_1g = TMR_IDX_1G; 8452 8453 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 8454 t4_pktc_idx_1g = PKTC_IDX_1G; 8455 8456 if (t4_qsize_txq < 128) 8457 t4_qsize_txq = 128; 8458 8459 if (t4_qsize_rxq < 128) 8460 t4_qsize_rxq = 128; 8461 while (t4_qsize_rxq & 7) 8462 t4_qsize_rxq++; 8463 8464 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 8465 } 8466 8467 static struct sx mlu; /* mod load unload */ 8468 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 8469 8470 static int 8471 mod_event(module_t mod, int cmd, void *arg) 8472 { 8473 int rc = 0; 8474 static int loaded = 0; 8475 8476 switch (cmd) { 8477 case MOD_LOAD: 8478 sx_xlock(&mlu); 8479 if (loaded++ == 0) { 8480 t4_sge_modload(); 8481 sx_init(&t4_list_lock, "T4/T5 adapters"); 8482 SLIST_INIT(&t4_list); 8483 #ifdef TCP_OFFLOAD 8484 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 8485 SLIST_INIT(&t4_uld_list); 8486 #endif 8487 t4_tracer_modload(); 8488 tweak_tunables(); 8489 } 8490 sx_xunlock(&mlu); 8491 break; 8492 8493 case MOD_UNLOAD: 8494 sx_xlock(&mlu); 8495 if (--loaded == 0) { 8496 int tries; 8497 8498 sx_slock(&t4_list_lock); 8499 if (!SLIST_EMPTY(&t4_list)) { 8500 rc = EBUSY; 8501 sx_sunlock(&t4_list_lock); 8502 goto done_unload; 8503 } 8504 #ifdef TCP_OFFLOAD 8505 sx_slock(&t4_uld_list_lock); 8506 if (!SLIST_EMPTY(&t4_uld_list)) { 8507 rc = EBUSY; 8508 sx_sunlock(&t4_uld_list_lock); 8509 sx_sunlock(&t4_list_lock); 8510 goto done_unload; 8511 } 8512 #endif 8513 tries = 0; 8514 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 8515 uprintf("%ju clusters with custom free routine " 8516 "still is use.\n", t4_sge_extfree_refs()); 8517 pause("t4unload", 2 * hz); 8518 } 8519 #ifdef TCP_OFFLOAD 8520 sx_sunlock(&t4_uld_list_lock); 8521 #endif 8522 sx_sunlock(&t4_list_lock); 8523 8524 if (t4_sge_extfree_refs() == 0) { 8525 t4_tracer_modunload(); 8526 #ifdef TCP_OFFLOAD 8527 sx_destroy(&t4_uld_list_lock); 8528 #endif 8529 sx_destroy(&t4_list_lock); 8530 t4_sge_modunload(); 8531 loaded = 0; 8532 } else { 8533 rc = EBUSY; 8534 loaded++; /* undo earlier decrement */ 8535 } 8536 } 8537 done_unload: 8538 sx_xunlock(&mlu); 8539 break; 8540 } 8541 8542 return (rc); 8543 } 8544 8545 static devclass_t t4_devclass, t5_devclass; 8546 static devclass_t cxgbe_devclass, cxl_devclass; 8547 8548 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 8549 MODULE_VERSION(t4nex, 1); 8550 MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 8551 8552 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 8553 MODULE_VERSION(t5nex, 1); 8554 MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 8555 8556 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 8557 MODULE_VERSION(cxgbe, 1); 8558 8559 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 8560 MODULE_VERSION(cxl, 1); 8561