1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 34 #include <sys/param.h> 35 #include <sys/conf.h> 36 #include <sys/priv.h> 37 #include <sys/kernel.h> 38 #include <sys/bus.h> 39 #include <sys/module.h> 40 #include <sys/malloc.h> 41 #include <sys/queue.h> 42 #include <sys/taskqueue.h> 43 #include <sys/pciio.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/pci_private.h> 47 #include <sys/firmware.h> 48 #include <sys/sbuf.h> 49 #include <sys/smp.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_types.h> 56 #include <net/if_dl.h> 57 #include <net/if_vlan_var.h> 58 #if defined(__i386__) || defined(__amd64__) 59 #include <vm/vm.h> 60 #include <vm/pmap.h> 61 #endif 62 63 #include "common/common.h" 64 #include "common/t4_msg.h" 65 #include "common/t4_regs.h" 66 #include "common/t4_regs_values.h" 67 #include "t4_ioctl.h" 68 #include "t4_l2t.h" 69 #include "t4_mp_ring.h" 70 71 /* T4 bus driver interface */ 72 static int t4_probe(device_t); 73 static int t4_attach(device_t); 74 static int t4_detach(device_t); 75 static device_method_t t4_methods[] = { 76 DEVMETHOD(device_probe, t4_probe), 77 DEVMETHOD(device_attach, t4_attach), 78 DEVMETHOD(device_detach, t4_detach), 79 80 DEVMETHOD_END 81 }; 82 static driver_t t4_driver = { 83 "t4nex", 84 t4_methods, 85 sizeof(struct adapter) 86 }; 87 88 89 /* T4 port (cxgbe) interface */ 90 static int cxgbe_probe(device_t); 91 static int cxgbe_attach(device_t); 92 static int cxgbe_detach(device_t); 93 static device_method_t cxgbe_methods[] = { 94 DEVMETHOD(device_probe, cxgbe_probe), 95 DEVMETHOD(device_attach, cxgbe_attach), 96 DEVMETHOD(device_detach, cxgbe_detach), 97 { 0, 0 } 98 }; 99 static driver_t cxgbe_driver = { 100 "cxgbe", 101 cxgbe_methods, 102 sizeof(struct port_info) 103 }; 104 105 static d_ioctl_t t4_ioctl; 106 static d_open_t t4_open; 107 static d_close_t t4_close; 108 109 static struct cdevsw t4_cdevsw = { 110 .d_version = D_VERSION, 111 .d_flags = 0, 112 .d_open = t4_open, 113 .d_close = t4_close, 114 .d_ioctl = t4_ioctl, 115 .d_name = "t4nex", 116 }; 117 118 /* T5 bus driver interface */ 119 static int t5_probe(device_t); 120 static device_method_t t5_methods[] = { 121 DEVMETHOD(device_probe, t5_probe), 122 DEVMETHOD(device_attach, t4_attach), 123 DEVMETHOD(device_detach, t4_detach), 124 125 DEVMETHOD_END 126 }; 127 static driver_t t5_driver = { 128 "t5nex", 129 t5_methods, 130 sizeof(struct adapter) 131 }; 132 133 134 /* T5 port (cxl) interface */ 135 static driver_t cxl_driver = { 136 "cxl", 137 cxgbe_methods, 138 sizeof(struct port_info) 139 }; 140 141 static struct cdevsw t5_cdevsw = { 142 .d_version = D_VERSION, 143 .d_flags = 0, 144 .d_open = t4_open, 145 .d_close = t4_close, 146 .d_ioctl = t4_ioctl, 147 .d_name = "t5nex", 148 }; 149 150 /* ifnet + media interface */ 151 static void cxgbe_init(void *); 152 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 153 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 154 static void cxgbe_qflush(struct ifnet *); 155 static uint64_t cxgbe_get_counter(struct ifnet *, ift_counter); 156 static int cxgbe_media_change(struct ifnet *); 157 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 158 159 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 160 161 /* 162 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 163 * then ADAPTER_LOCK, then t4_uld_list_lock. 164 */ 165 static struct sx t4_list_lock; 166 SLIST_HEAD(, adapter) t4_list; 167 #ifdef TCP_OFFLOAD 168 static struct sx t4_uld_list_lock; 169 SLIST_HEAD(, uld_info) t4_uld_list; 170 #endif 171 172 /* 173 * Tunables. See tweak_tunables() too. 174 * 175 * Each tunable is set to a default value here if it's known at compile-time. 176 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 177 * provide a reasonable default when the driver is loaded. 178 * 179 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 180 * T5 are under hw.cxl. 181 */ 182 183 /* 184 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 185 */ 186 #define NTXQ_10G 16 187 static int t4_ntxq10g = -1; 188 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 189 190 #define NRXQ_10G 8 191 static int t4_nrxq10g = -1; 192 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 193 194 #define NTXQ_1G 4 195 static int t4_ntxq1g = -1; 196 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 197 198 #define NRXQ_1G 2 199 static int t4_nrxq1g = -1; 200 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 201 202 static int t4_rsrv_noflowq = 0; 203 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 204 205 #ifdef TCP_OFFLOAD 206 #define NOFLDTXQ_10G 8 207 static int t4_nofldtxq10g = -1; 208 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 209 210 #define NOFLDRXQ_10G 2 211 static int t4_nofldrxq10g = -1; 212 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 213 214 #define NOFLDTXQ_1G 2 215 static int t4_nofldtxq1g = -1; 216 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 217 218 #define NOFLDRXQ_1G 1 219 static int t4_nofldrxq1g = -1; 220 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 221 #endif 222 223 #ifdef DEV_NETMAP 224 #define NNMTXQ_10G 2 225 static int t4_nnmtxq10g = -1; 226 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g); 227 228 #define NNMRXQ_10G 2 229 static int t4_nnmrxq10g = -1; 230 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g); 231 232 #define NNMTXQ_1G 1 233 static int t4_nnmtxq1g = -1; 234 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g); 235 236 #define NNMRXQ_1G 1 237 static int t4_nnmrxq1g = -1; 238 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g); 239 #endif 240 241 /* 242 * Holdoff parameters for 10G and 1G ports. 243 */ 244 #define TMR_IDX_10G 1 245 static int t4_tmr_idx_10g = TMR_IDX_10G; 246 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 247 248 #define PKTC_IDX_10G (-1) 249 static int t4_pktc_idx_10g = PKTC_IDX_10G; 250 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 251 252 #define TMR_IDX_1G 1 253 static int t4_tmr_idx_1g = TMR_IDX_1G; 254 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 255 256 #define PKTC_IDX_1G (-1) 257 static int t4_pktc_idx_1g = PKTC_IDX_1G; 258 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 259 260 /* 261 * Size (# of entries) of each tx and rx queue. 262 */ 263 static unsigned int t4_qsize_txq = TX_EQ_QSIZE; 264 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 265 266 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 267 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 268 269 /* 270 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 271 */ 272 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 273 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 274 275 /* 276 * Configuration file. 277 */ 278 #define DEFAULT_CF "default" 279 #define FLASH_CF "flash" 280 #define UWIRE_CF "uwire" 281 #define FPGA_CF "fpga" 282 static char t4_cfg_file[32] = DEFAULT_CF; 283 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 284 285 /* 286 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 287 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 288 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 289 * mark or when signalled to do so, 0 to never emit PAUSE. 290 */ 291 static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 292 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 293 294 /* 295 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 296 * encouraged respectively). 297 */ 298 static unsigned int t4_fw_install = 1; 299 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 300 301 /* 302 * ASIC features that will be used. Disable the ones you don't want so that the 303 * chip resources aren't wasted on features that will not be used. 304 */ 305 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 306 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 307 308 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 309 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 310 311 static int t4_toecaps_allowed = -1; 312 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 313 314 static int t4_rdmacaps_allowed = 0; 315 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 316 317 static int t4_iscsicaps_allowed = 0; 318 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 319 320 static int t4_fcoecaps_allowed = 0; 321 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 322 323 static int t5_write_combine = 0; 324 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 325 326 struct intrs_and_queues { 327 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 328 uint16_t nirq; /* Total # of vectors */ 329 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 330 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 331 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 332 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 333 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 334 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 335 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 336 #ifdef TCP_OFFLOAD 337 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 338 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 339 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 340 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 341 #endif 342 #ifdef DEV_NETMAP 343 uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */ 344 uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */ 345 uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */ 346 uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */ 347 #endif 348 }; 349 350 struct filter_entry { 351 uint32_t valid:1; /* filter allocated and valid */ 352 uint32_t locked:1; /* filter is administratively locked */ 353 uint32_t pending:1; /* filter action is pending firmware reply */ 354 uint32_t smtidx:8; /* Source MAC Table index for smac */ 355 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 356 357 struct t4_filter_specification fs; 358 }; 359 360 static int map_bars_0_and_4(struct adapter *); 361 static int map_bar_2(struct adapter *); 362 static void setup_memwin(struct adapter *); 363 static int validate_mem_range(struct adapter *, uint32_t, int); 364 static int fwmtype_to_hwmtype(int); 365 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 366 uint32_t *); 367 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *); 368 static uint32_t position_memwin(struct adapter *, int, uint32_t); 369 static int cfg_itype_and_nqueues(struct adapter *, int, int, 370 struct intrs_and_queues *); 371 static int prep_firmware(struct adapter *); 372 static int partition_resources(struct adapter *, const struct firmware *, 373 const char *); 374 static int get_params__pre_init(struct adapter *); 375 static int get_params__post_init(struct adapter *); 376 static int set_params__post_init(struct adapter *); 377 static void t4_set_desc(struct adapter *); 378 static void build_medialist(struct port_info *, struct ifmedia *); 379 static int cxgbe_init_synchronized(struct port_info *); 380 static int cxgbe_uninit_synchronized(struct port_info *); 381 static int setup_intr_handlers(struct adapter *); 382 static void quiesce_txq(struct adapter *, struct sge_txq *); 383 static void quiesce_wrq(struct adapter *, struct sge_wrq *); 384 static void quiesce_iq(struct adapter *, struct sge_iq *); 385 static void quiesce_fl(struct adapter *, struct sge_fl *); 386 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 387 driver_intr_t *, void *, char *); 388 static int t4_free_irq(struct adapter *, struct irq *); 389 static void reg_block_dump(struct adapter *, uint8_t *, unsigned int, 390 unsigned int); 391 static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 392 static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 393 static void cxgbe_tick(void *); 394 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 395 static int cpl_not_handled(struct sge_iq *, const struct rss_header *, 396 struct mbuf *); 397 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *); 398 static int fw_msg_not_handled(struct adapter *, const __be64 *); 399 static int t4_sysctls(struct adapter *); 400 static int cxgbe_sysctls(struct port_info *); 401 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 402 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 403 static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 404 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 405 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 406 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 407 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 408 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 409 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 410 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 411 static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 412 #ifdef SBUF_DRAIN 413 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 414 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 415 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 416 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 417 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 418 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 419 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 420 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 421 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 422 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 423 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 424 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 425 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 426 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 427 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 428 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 429 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 430 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 431 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 432 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 433 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 434 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 435 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 436 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 437 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 438 #endif 439 static uint32_t fconf_to_mode(uint32_t); 440 static uint32_t mode_to_fconf(uint32_t); 441 static uint32_t fspec_to_fconf(struct t4_filter_specification *); 442 static int get_filter_mode(struct adapter *, uint32_t *); 443 static int set_filter_mode(struct adapter *, uint32_t); 444 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 445 static int get_filter(struct adapter *, struct t4_filter *); 446 static int set_filter(struct adapter *, struct t4_filter *); 447 static int del_filter(struct adapter *, struct t4_filter *); 448 static void clear_filter(struct filter_entry *); 449 static int set_filter_wr(struct adapter *, int); 450 static int del_filter_wr(struct adapter *, int); 451 static int get_sge_context(struct adapter *, struct t4_sge_context *); 452 static int load_fw(struct adapter *, struct t4_data *); 453 static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 454 static int read_i2c(struct adapter *, struct t4_i2c_data *); 455 static int set_sched_class(struct adapter *, struct t4_sched_params *); 456 static int set_sched_queue(struct adapter *, struct t4_sched_queue *); 457 #ifdef TCP_OFFLOAD 458 static int toe_capability(struct port_info *, int); 459 #endif 460 static int mod_event(module_t, int, void *); 461 462 struct { 463 uint16_t device; 464 char *desc; 465 } t4_pciids[] = { 466 {0xa000, "Chelsio Terminator 4 FPGA"}, 467 {0x4400, "Chelsio T440-dbg"}, 468 {0x4401, "Chelsio T420-CR"}, 469 {0x4402, "Chelsio T422-CR"}, 470 {0x4403, "Chelsio T440-CR"}, 471 {0x4404, "Chelsio T420-BCH"}, 472 {0x4405, "Chelsio T440-BCH"}, 473 {0x4406, "Chelsio T440-CH"}, 474 {0x4407, "Chelsio T420-SO"}, 475 {0x4408, "Chelsio T420-CX"}, 476 {0x4409, "Chelsio T420-BT"}, 477 {0x440a, "Chelsio T404-BT"}, 478 {0x440e, "Chelsio T440-LP-CR"}, 479 }, t5_pciids[] = { 480 {0xb000, "Chelsio Terminator 5 FPGA"}, 481 {0x5400, "Chelsio T580-dbg"}, 482 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 483 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 484 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 485 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 486 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 487 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 488 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 489 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 490 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 491 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 492 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 493 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 494 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 495 #ifdef notyet 496 {0x5404, "Chelsio T520-BCH"}, 497 {0x5405, "Chelsio T540-BCH"}, 498 {0x5406, "Chelsio T540-CH"}, 499 {0x5408, "Chelsio T520-CX"}, 500 {0x540b, "Chelsio B520-SR"}, 501 {0x540c, "Chelsio B504-BT"}, 502 {0x540f, "Chelsio Amsterdam"}, 503 {0x5413, "Chelsio T580-CHR"}, 504 #endif 505 }; 506 507 #ifdef TCP_OFFLOAD 508 /* 509 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 510 * exactly the same for both rxq and ofld_rxq. 511 */ 512 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 513 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 514 #endif 515 516 /* No easy way to include t4_msg.h before adapter.h so we check this way */ 517 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS); 518 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES); 519 520 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 521 522 static int 523 t4_probe(device_t dev) 524 { 525 int i; 526 uint16_t v = pci_get_vendor(dev); 527 uint16_t d = pci_get_device(dev); 528 uint8_t f = pci_get_function(dev); 529 530 if (v != PCI_VENDOR_ID_CHELSIO) 531 return (ENXIO); 532 533 /* Attach only to PF0 of the FPGA */ 534 if (d == 0xa000 && f != 0) 535 return (ENXIO); 536 537 for (i = 0; i < nitems(t4_pciids); i++) { 538 if (d == t4_pciids[i].device) { 539 device_set_desc(dev, t4_pciids[i].desc); 540 return (BUS_PROBE_DEFAULT); 541 } 542 } 543 544 return (ENXIO); 545 } 546 547 static int 548 t5_probe(device_t dev) 549 { 550 int i; 551 uint16_t v = pci_get_vendor(dev); 552 uint16_t d = pci_get_device(dev); 553 uint8_t f = pci_get_function(dev); 554 555 if (v != PCI_VENDOR_ID_CHELSIO) 556 return (ENXIO); 557 558 /* Attach only to PF0 of the FPGA */ 559 if (d == 0xb000 && f != 0) 560 return (ENXIO); 561 562 for (i = 0; i < nitems(t5_pciids); i++) { 563 if (d == t5_pciids[i].device) { 564 device_set_desc(dev, t5_pciids[i].desc); 565 return (BUS_PROBE_DEFAULT); 566 } 567 } 568 569 return (ENXIO); 570 } 571 572 static int 573 t4_attach(device_t dev) 574 { 575 struct adapter *sc; 576 int rc = 0, i, n10g, n1g, rqidx, tqidx; 577 struct intrs_and_queues iaq; 578 struct sge *s; 579 #ifdef TCP_OFFLOAD 580 int ofld_rqidx, ofld_tqidx; 581 #endif 582 #ifdef DEV_NETMAP 583 int nm_rqidx, nm_tqidx; 584 #endif 585 const char *pcie_ts; 586 587 sc = device_get_softc(dev); 588 sc->dev = dev; 589 590 pci_enable_busmaster(dev); 591 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 592 uint32_t v; 593 594 pci_set_max_read_req(dev, 4096); 595 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 596 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 597 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 598 599 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 600 } 601 602 sc->traceq = -1; 603 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 604 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 605 device_get_nameunit(dev)); 606 607 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 608 device_get_nameunit(dev)); 609 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 610 sx_xlock(&t4_list_lock); 611 SLIST_INSERT_HEAD(&t4_list, sc, link); 612 sx_xunlock(&t4_list_lock); 613 614 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 615 TAILQ_INIT(&sc->sfl); 616 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE); 617 618 mtx_init(&sc->regwin_lock, "register and memory window", 0, MTX_DEF); 619 620 rc = map_bars_0_and_4(sc); 621 if (rc != 0) 622 goto done; /* error message displayed already */ 623 624 /* 625 * This is the real PF# to which we're attaching. Works from within PCI 626 * passthrough environments too, where pci_get_function() could return a 627 * different PF# depending on the passthrough configuration. We need to 628 * use the real PF# in all our communication with the firmware. 629 */ 630 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI)); 631 sc->mbox = sc->pf; 632 633 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 634 sc->an_handler = an_not_handled; 635 for (i = 0; i < nitems(sc->cpl_handler); i++) 636 sc->cpl_handler[i] = cpl_not_handled; 637 for (i = 0; i < nitems(sc->fw_msg_handler); i++) 638 sc->fw_msg_handler[i] = fw_msg_not_handled; 639 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 640 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt); 641 t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt); 642 t4_init_sge_cpl_handlers(sc); 643 644 /* Prepare the adapter for operation */ 645 rc = -t4_prep_adapter(sc); 646 if (rc != 0) { 647 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 648 goto done; 649 } 650 651 /* 652 * Do this really early, with the memory windows set up even before the 653 * character device. The userland tool's register i/o and mem read 654 * will work even in "recovery mode". 655 */ 656 setup_memwin(sc); 657 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw, 658 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s", 659 device_get_nameunit(dev)); 660 if (sc->cdev == NULL) 661 device_printf(dev, "failed to create nexus char device.\n"); 662 else 663 sc->cdev->si_drv1 = sc; 664 665 /* Go no further if recovery mode has been requested. */ 666 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 667 device_printf(dev, "recovery mode.\n"); 668 goto done; 669 } 670 671 #if defined(__i386__) 672 if ((cpu_feature & CPUID_CX8) == 0) { 673 device_printf(dev, "64 bit atomics not available.\n"); 674 rc = ENOTSUP; 675 goto done; 676 } 677 #endif 678 679 /* Prepare the firmware for operation */ 680 rc = prep_firmware(sc); 681 if (rc != 0) 682 goto done; /* error message displayed already */ 683 684 rc = get_params__post_init(sc); 685 if (rc != 0) 686 goto done; /* error message displayed already */ 687 688 rc = set_params__post_init(sc); 689 if (rc != 0) 690 goto done; /* error message displayed already */ 691 692 rc = map_bar_2(sc); 693 if (rc != 0) 694 goto done; /* error message displayed already */ 695 696 rc = t4_create_dma_tag(sc); 697 if (rc != 0) 698 goto done; /* error message displayed already */ 699 700 /* 701 * First pass over all the ports - allocate VIs and initialize some 702 * basic parameters like mac address, port type, etc. We also figure 703 * out whether a port is 10G or 1G and use that information when 704 * calculating how many interrupts to attempt to allocate. 705 */ 706 n10g = n1g = 0; 707 for_each_port(sc, i) { 708 struct port_info *pi; 709 710 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 711 sc->port[i] = pi; 712 713 /* These must be set before t4_port_init */ 714 pi->adapter = sc; 715 pi->port_id = i; 716 717 /* Allocate the vi and initialize parameters like mac addr */ 718 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0); 719 if (rc != 0) { 720 device_printf(dev, "unable to initialize port %d: %d\n", 721 i, rc); 722 free(pi, M_CXGBE); 723 sc->port[i] = NULL; 724 goto done; 725 } 726 727 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX); 728 pi->link_cfg.requested_fc |= t4_pause_settings; 729 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX); 730 pi->link_cfg.fc |= t4_pause_settings; 731 732 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 733 if (rc != 0) { 734 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 735 free(pi, M_CXGBE); 736 sc->port[i] = NULL; 737 goto done; 738 } 739 740 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 741 device_get_nameunit(dev), i); 742 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 743 sc->chan_map[pi->tx_chan] = i; 744 745 if (is_10G_port(pi) || is_40G_port(pi)) { 746 n10g++; 747 pi->tmr_idx = t4_tmr_idx_10g; 748 pi->pktc_idx = t4_pktc_idx_10g; 749 } else { 750 n1g++; 751 pi->tmr_idx = t4_tmr_idx_1g; 752 pi->pktc_idx = t4_pktc_idx_1g; 753 } 754 755 pi->xact_addr_filt = -1; 756 pi->linkdnrc = -1; 757 758 pi->qsize_rxq = t4_qsize_rxq; 759 pi->qsize_txq = t4_qsize_txq; 760 761 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1); 762 if (pi->dev == NULL) { 763 device_printf(dev, 764 "failed to add device for port %d.\n", i); 765 rc = ENXIO; 766 goto done; 767 } 768 device_set_softc(pi->dev, pi); 769 } 770 771 /* 772 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 773 */ 774 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq); 775 if (rc != 0) 776 goto done; /* error message displayed already */ 777 778 sc->intr_type = iaq.intr_type; 779 sc->intr_count = iaq.nirq; 780 781 s = &sc->sge; 782 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 783 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 784 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 785 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 786 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 787 #ifdef TCP_OFFLOAD 788 if (is_offload(sc)) { 789 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 790 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 791 s->neq += s->nofldtxq + s->nofldrxq; 792 s->niq += s->nofldrxq; 793 794 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 795 M_CXGBE, M_ZERO | M_WAITOK); 796 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 797 M_CXGBE, M_ZERO | M_WAITOK); 798 } 799 #endif 800 #ifdef DEV_NETMAP 801 s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g; 802 s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g; 803 s->neq += s->nnmtxq + s->nnmrxq; 804 s->niq += s->nnmrxq; 805 806 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 807 M_CXGBE, M_ZERO | M_WAITOK); 808 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 809 M_CXGBE, M_ZERO | M_WAITOK); 810 #endif 811 812 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 813 M_ZERO | M_WAITOK); 814 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 815 M_ZERO | M_WAITOK); 816 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 817 M_ZERO | M_WAITOK); 818 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 819 M_ZERO | M_WAITOK); 820 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 821 M_ZERO | M_WAITOK); 822 823 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 824 M_ZERO | M_WAITOK); 825 826 t4_init_l2t(sc, M_WAITOK); 827 828 /* 829 * Second pass over the ports. This time we know the number of rx and 830 * tx queues that each port should get. 831 */ 832 rqidx = tqidx = 0; 833 #ifdef TCP_OFFLOAD 834 ofld_rqidx = ofld_tqidx = 0; 835 #endif 836 #ifdef DEV_NETMAP 837 nm_rqidx = nm_tqidx = 0; 838 #endif 839 for_each_port(sc, i) { 840 struct port_info *pi = sc->port[i]; 841 842 if (pi == NULL) 843 continue; 844 845 pi->first_rxq = rqidx; 846 pi->first_txq = tqidx; 847 if (is_10G_port(pi) || is_40G_port(pi)) { 848 pi->flags |= iaq.intr_flags_10g; 849 pi->nrxq = iaq.nrxq10g; 850 pi->ntxq = iaq.ntxq10g; 851 } else { 852 pi->flags |= iaq.intr_flags_1g; 853 pi->nrxq = iaq.nrxq1g; 854 pi->ntxq = iaq.ntxq1g; 855 } 856 857 if (pi->ntxq > 1) 858 pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 859 else 860 pi->rsrv_noflowq = 0; 861 862 rqidx += pi->nrxq; 863 tqidx += pi->ntxq; 864 #ifdef TCP_OFFLOAD 865 if (is_offload(sc)) { 866 pi->first_ofld_rxq = ofld_rqidx; 867 pi->first_ofld_txq = ofld_tqidx; 868 if (is_10G_port(pi) || is_40G_port(pi)) { 869 pi->nofldrxq = iaq.nofldrxq10g; 870 pi->nofldtxq = iaq.nofldtxq10g; 871 } else { 872 pi->nofldrxq = iaq.nofldrxq1g; 873 pi->nofldtxq = iaq.nofldtxq1g; 874 } 875 ofld_rqidx += pi->nofldrxq; 876 ofld_tqidx += pi->nofldtxq; 877 } 878 #endif 879 #ifdef DEV_NETMAP 880 pi->first_nm_rxq = nm_rqidx; 881 pi->first_nm_txq = nm_tqidx; 882 if (is_10G_port(pi) || is_40G_port(pi)) { 883 pi->nnmrxq = iaq.nnmrxq10g; 884 pi->nnmtxq = iaq.nnmtxq10g; 885 } else { 886 pi->nnmrxq = iaq.nnmrxq1g; 887 pi->nnmtxq = iaq.nnmtxq1g; 888 } 889 nm_rqidx += pi->nnmrxq; 890 nm_tqidx += pi->nnmtxq; 891 #endif 892 } 893 894 rc = setup_intr_handlers(sc); 895 if (rc != 0) { 896 device_printf(dev, 897 "failed to setup interrupt handlers: %d\n", rc); 898 goto done; 899 } 900 901 rc = bus_generic_attach(dev); 902 if (rc != 0) { 903 device_printf(dev, 904 "failed to attach all child ports: %d\n", rc); 905 goto done; 906 } 907 908 switch (sc->params.pci.speed) { 909 case 0x1: 910 pcie_ts = "2.5"; 911 break; 912 case 0x2: 913 pcie_ts = "5.0"; 914 break; 915 case 0x3: 916 pcie_ts = "8.0"; 917 break; 918 default: 919 pcie_ts = "??"; 920 break; 921 } 922 device_printf(dev, 923 "PCIe x%d (%s GTS/s) (%d), %d ports, %d %s interrupt%s, %d eq, %d iq\n", 924 sc->params.pci.width, pcie_ts, sc->params.pci.speed, 925 sc->params.nports, sc->intr_count, 926 sc->intr_type == INTR_MSIX ? "MSI-X" : 927 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 928 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 929 930 t4_set_desc(sc); 931 932 done: 933 if (rc != 0 && sc->cdev) { 934 /* cdev was created and so cxgbetool works; recover that way. */ 935 device_printf(dev, 936 "error during attach, adapter is now in recovery mode.\n"); 937 rc = 0; 938 } 939 940 if (rc != 0) 941 t4_detach(dev); 942 else 943 t4_sysctls(sc); 944 945 return (rc); 946 } 947 948 /* 949 * Idempotent 950 */ 951 static int 952 t4_detach(device_t dev) 953 { 954 struct adapter *sc; 955 struct port_info *pi; 956 int i, rc; 957 958 sc = device_get_softc(dev); 959 960 if (sc->flags & FULL_INIT_DONE) 961 t4_intr_disable(sc); 962 963 if (sc->cdev) { 964 destroy_dev(sc->cdev); 965 sc->cdev = NULL; 966 } 967 968 rc = bus_generic_detach(dev); 969 if (rc) { 970 device_printf(dev, 971 "failed to detach child devices: %d\n", rc); 972 return (rc); 973 } 974 975 for (i = 0; i < sc->intr_count; i++) 976 t4_free_irq(sc, &sc->irq[i]); 977 978 for (i = 0; i < MAX_NPORTS; i++) { 979 pi = sc->port[i]; 980 if (pi) { 981 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->viid); 982 if (pi->dev) 983 device_delete_child(dev, pi->dev); 984 985 mtx_destroy(&pi->pi_lock); 986 free(pi, M_CXGBE); 987 } 988 } 989 990 if (sc->flags & FULL_INIT_DONE) 991 adapter_full_uninit(sc); 992 993 if (sc->flags & FW_OK) 994 t4_fw_bye(sc, sc->mbox); 995 996 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 997 pci_release_msi(dev); 998 999 if (sc->regs_res) 1000 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1001 sc->regs_res); 1002 1003 if (sc->udbs_res) 1004 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1005 sc->udbs_res); 1006 1007 if (sc->msix_res) 1008 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1009 sc->msix_res); 1010 1011 if (sc->l2t) 1012 t4_free_l2t(sc->l2t); 1013 1014 #ifdef TCP_OFFLOAD 1015 free(sc->sge.ofld_rxq, M_CXGBE); 1016 free(sc->sge.ofld_txq, M_CXGBE); 1017 #endif 1018 #ifdef DEV_NETMAP 1019 free(sc->sge.nm_rxq, M_CXGBE); 1020 free(sc->sge.nm_txq, M_CXGBE); 1021 #endif 1022 free(sc->irq, M_CXGBE); 1023 free(sc->sge.rxq, M_CXGBE); 1024 free(sc->sge.txq, M_CXGBE); 1025 free(sc->sge.ctrlq, M_CXGBE); 1026 free(sc->sge.iqmap, M_CXGBE); 1027 free(sc->sge.eqmap, M_CXGBE); 1028 free(sc->tids.ftid_tab, M_CXGBE); 1029 t4_destroy_dma_tag(sc); 1030 if (mtx_initialized(&sc->sc_lock)) { 1031 sx_xlock(&t4_list_lock); 1032 SLIST_REMOVE(&t4_list, sc, adapter, link); 1033 sx_xunlock(&t4_list_lock); 1034 mtx_destroy(&sc->sc_lock); 1035 } 1036 1037 if (mtx_initialized(&sc->tids.ftid_lock)) 1038 mtx_destroy(&sc->tids.ftid_lock); 1039 if (mtx_initialized(&sc->sfl_lock)) 1040 mtx_destroy(&sc->sfl_lock); 1041 if (mtx_initialized(&sc->ifp_lock)) 1042 mtx_destroy(&sc->ifp_lock); 1043 if (mtx_initialized(&sc->regwin_lock)) 1044 mtx_destroy(&sc->regwin_lock); 1045 1046 bzero(sc, sizeof(*sc)); 1047 1048 return (0); 1049 } 1050 1051 static int 1052 cxgbe_probe(device_t dev) 1053 { 1054 char buf[128]; 1055 struct port_info *pi = device_get_softc(dev); 1056 1057 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1058 device_set_desc_copy(dev, buf); 1059 1060 return (BUS_PROBE_DEFAULT); 1061 } 1062 1063 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1064 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1065 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1066 #define T4_CAP_ENABLE (T4_CAP) 1067 1068 static int 1069 cxgbe_attach(device_t dev) 1070 { 1071 struct port_info *pi = device_get_softc(dev); 1072 struct ifnet *ifp; 1073 char *s; 1074 int n, o; 1075 1076 /* Allocate an ifnet and set it up */ 1077 ifp = if_alloc(IFT_ETHER); 1078 if (ifp == NULL) { 1079 device_printf(dev, "Cannot allocate ifnet\n"); 1080 return (ENOMEM); 1081 } 1082 pi->ifp = ifp; 1083 ifp->if_softc = pi; 1084 1085 callout_init(&pi->tick, CALLOUT_MPSAFE); 1086 1087 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1088 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1089 1090 ifp->if_init = cxgbe_init; 1091 ifp->if_ioctl = cxgbe_ioctl; 1092 ifp->if_transmit = cxgbe_transmit; 1093 ifp->if_qflush = cxgbe_qflush; 1094 ifp->if_get_counter = cxgbe_get_counter; 1095 1096 ifp->if_capabilities = T4_CAP; 1097 #ifdef TCP_OFFLOAD 1098 if (is_offload(pi->adapter)) 1099 ifp->if_capabilities |= IFCAP_TOE; 1100 #endif 1101 ifp->if_capenable = T4_CAP_ENABLE; 1102 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1103 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1104 1105 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1106 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1107 ifp->if_hw_tsomaxsegsize = 65536; 1108 1109 /* Initialize ifmedia for this port */ 1110 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, 1111 cxgbe_media_status); 1112 build_medialist(pi, &pi->media); 1113 1114 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1115 EVENTHANDLER_PRI_ANY); 1116 1117 ether_ifattach(ifp, pi->hw_addr); 1118 1119 n = 128; 1120 s = malloc(n, M_CXGBE, M_WAITOK); 1121 o = snprintf(s, n, "%d txq, %d rxq (NIC)", pi->ntxq, pi->nrxq); 1122 MPASS(n > o); 1123 #ifdef TCP_OFFLOAD 1124 if (is_offload(pi->adapter)) { 1125 o += snprintf(s + o, n - o, "; %d txq, %d rxq (TOE)", 1126 pi->nofldtxq, pi->nofldrxq); 1127 MPASS(n > o); 1128 } 1129 #endif 1130 #ifdef DEV_NETMAP 1131 o += snprintf(s + o, n - o, "; %d txq, %d rxq (netmap)", pi->nnmtxq, 1132 pi->nnmrxq); 1133 MPASS(n > o); 1134 #endif 1135 device_printf(dev, "%s\n", s); 1136 free(s, M_CXGBE); 1137 1138 #ifdef DEV_NETMAP 1139 /* nm_media handled here to keep implementation private to this file */ 1140 ifmedia_init(&pi->nm_media, IFM_IMASK, cxgbe_media_change, 1141 cxgbe_media_status); 1142 build_medialist(pi, &pi->nm_media); 1143 create_netmap_ifnet(pi); /* logs errors it something fails */ 1144 #endif 1145 cxgbe_sysctls(pi); 1146 1147 return (0); 1148 } 1149 1150 static int 1151 cxgbe_detach(device_t dev) 1152 { 1153 struct port_info *pi = device_get_softc(dev); 1154 struct adapter *sc = pi->adapter; 1155 struct ifnet *ifp = pi->ifp; 1156 1157 /* Tell if_ioctl and if_init that the port is going away */ 1158 ADAPTER_LOCK(sc); 1159 SET_DOOMED(pi); 1160 wakeup(&sc->flags); 1161 while (IS_BUSY(sc)) 1162 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 1163 SET_BUSY(sc); 1164 #ifdef INVARIANTS 1165 sc->last_op = "t4detach"; 1166 sc->last_op_thr = curthread; 1167 #endif 1168 ADAPTER_UNLOCK(sc); 1169 1170 if (pi->flags & HAS_TRACEQ) { 1171 sc->traceq = -1; /* cloner should not create ifnet */ 1172 t4_tracer_port_detach(sc); 1173 } 1174 1175 if (pi->vlan_c) 1176 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c); 1177 1178 PORT_LOCK(pi); 1179 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1180 callout_stop(&pi->tick); 1181 PORT_UNLOCK(pi); 1182 callout_drain(&pi->tick); 1183 1184 /* Let detach proceed even if these fail. */ 1185 cxgbe_uninit_synchronized(pi); 1186 port_full_uninit(pi); 1187 1188 ifmedia_removeall(&pi->media); 1189 ether_ifdetach(pi->ifp); 1190 if_free(pi->ifp); 1191 1192 #ifdef DEV_NETMAP 1193 /* XXXNM: equivalent of cxgbe_uninit_synchronized to ifdown nm_ifp */ 1194 destroy_netmap_ifnet(pi); 1195 #endif 1196 1197 ADAPTER_LOCK(sc); 1198 CLR_BUSY(sc); 1199 wakeup(&sc->flags); 1200 ADAPTER_UNLOCK(sc); 1201 1202 return (0); 1203 } 1204 1205 static void 1206 cxgbe_init(void *arg) 1207 { 1208 struct port_info *pi = arg; 1209 struct adapter *sc = pi->adapter; 1210 1211 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0) 1212 return; 1213 cxgbe_init_synchronized(pi); 1214 end_synchronized_op(sc, 0); 1215 } 1216 1217 static int 1218 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1219 { 1220 int rc = 0, mtu, flags, can_sleep; 1221 struct port_info *pi = ifp->if_softc; 1222 struct adapter *sc = pi->adapter; 1223 struct ifreq *ifr = (struct ifreq *)data; 1224 uint32_t mask; 1225 1226 switch (cmd) { 1227 case SIOCSIFMTU: 1228 mtu = ifr->ifr_mtu; 1229 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 1230 return (EINVAL); 1231 1232 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu"); 1233 if (rc) 1234 return (rc); 1235 ifp->if_mtu = mtu; 1236 if (pi->flags & PORT_INIT_DONE) { 1237 t4_update_fl_bufsize(ifp); 1238 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1239 rc = update_mac_settings(ifp, XGMAC_MTU); 1240 } 1241 end_synchronized_op(sc, 0); 1242 break; 1243 1244 case SIOCSIFFLAGS: 1245 can_sleep = 0; 1246 redo_sifflags: 1247 rc = begin_synchronized_op(sc, pi, 1248 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1249 if (rc) 1250 return (rc); 1251 1252 if (ifp->if_flags & IFF_UP) { 1253 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1254 flags = pi->if_flags; 1255 if ((ifp->if_flags ^ flags) & 1256 (IFF_PROMISC | IFF_ALLMULTI)) { 1257 if (can_sleep == 1) { 1258 end_synchronized_op(sc, 0); 1259 can_sleep = 0; 1260 goto redo_sifflags; 1261 } 1262 rc = update_mac_settings(ifp, 1263 XGMAC_PROMISC | XGMAC_ALLMULTI); 1264 } 1265 } else { 1266 if (can_sleep == 0) { 1267 end_synchronized_op(sc, LOCK_HELD); 1268 can_sleep = 1; 1269 goto redo_sifflags; 1270 } 1271 rc = cxgbe_init_synchronized(pi); 1272 } 1273 pi->if_flags = ifp->if_flags; 1274 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1275 if (can_sleep == 0) { 1276 end_synchronized_op(sc, LOCK_HELD); 1277 can_sleep = 1; 1278 goto redo_sifflags; 1279 } 1280 rc = cxgbe_uninit_synchronized(pi); 1281 } 1282 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1283 break; 1284 1285 case SIOCADDMULTI: 1286 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1287 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi"); 1288 if (rc) 1289 return (rc); 1290 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1291 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1292 end_synchronized_op(sc, LOCK_HELD); 1293 break; 1294 1295 case SIOCSIFCAP: 1296 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap"); 1297 if (rc) 1298 return (rc); 1299 1300 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1301 if (mask & IFCAP_TXCSUM) { 1302 ifp->if_capenable ^= IFCAP_TXCSUM; 1303 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1304 1305 if (IFCAP_TSO4 & ifp->if_capenable && 1306 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1307 ifp->if_capenable &= ~IFCAP_TSO4; 1308 if_printf(ifp, 1309 "tso4 disabled due to -txcsum.\n"); 1310 } 1311 } 1312 if (mask & IFCAP_TXCSUM_IPV6) { 1313 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1314 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1315 1316 if (IFCAP_TSO6 & ifp->if_capenable && 1317 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1318 ifp->if_capenable &= ~IFCAP_TSO6; 1319 if_printf(ifp, 1320 "tso6 disabled due to -txcsum6.\n"); 1321 } 1322 } 1323 if (mask & IFCAP_RXCSUM) 1324 ifp->if_capenable ^= IFCAP_RXCSUM; 1325 if (mask & IFCAP_RXCSUM_IPV6) 1326 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1327 1328 /* 1329 * Note that we leave CSUM_TSO alone (it is always set). The 1330 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1331 * sending a TSO request our way, so it's sufficient to toggle 1332 * IFCAP_TSOx only. 1333 */ 1334 if (mask & IFCAP_TSO4) { 1335 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1336 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1337 if_printf(ifp, "enable txcsum first.\n"); 1338 rc = EAGAIN; 1339 goto fail; 1340 } 1341 ifp->if_capenable ^= IFCAP_TSO4; 1342 } 1343 if (mask & IFCAP_TSO6) { 1344 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1345 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1346 if_printf(ifp, "enable txcsum6 first.\n"); 1347 rc = EAGAIN; 1348 goto fail; 1349 } 1350 ifp->if_capenable ^= IFCAP_TSO6; 1351 } 1352 if (mask & IFCAP_LRO) { 1353 #if defined(INET) || defined(INET6) 1354 int i; 1355 struct sge_rxq *rxq; 1356 1357 ifp->if_capenable ^= IFCAP_LRO; 1358 for_each_rxq(pi, i, rxq) { 1359 if (ifp->if_capenable & IFCAP_LRO) 1360 rxq->iq.flags |= IQ_LRO_ENABLED; 1361 else 1362 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1363 } 1364 #endif 1365 } 1366 #ifdef TCP_OFFLOAD 1367 if (mask & IFCAP_TOE) { 1368 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1369 1370 rc = toe_capability(pi, enable); 1371 if (rc != 0) 1372 goto fail; 1373 1374 ifp->if_capenable ^= mask; 1375 } 1376 #endif 1377 if (mask & IFCAP_VLAN_HWTAGGING) { 1378 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1379 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1380 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1381 } 1382 if (mask & IFCAP_VLAN_MTU) { 1383 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1384 1385 /* Need to find out how to disable auto-mtu-inflation */ 1386 } 1387 if (mask & IFCAP_VLAN_HWTSO) 1388 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1389 if (mask & IFCAP_VLAN_HWCSUM) 1390 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1391 1392 #ifdef VLAN_CAPABILITIES 1393 VLAN_CAPABILITIES(ifp); 1394 #endif 1395 fail: 1396 end_synchronized_op(sc, 0); 1397 break; 1398 1399 case SIOCSIFMEDIA: 1400 case SIOCGIFMEDIA: 1401 ifmedia_ioctl(ifp, ifr, &pi->media, cmd); 1402 break; 1403 1404 case SIOCGI2C: { 1405 struct ifi2creq i2c; 1406 1407 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1408 if (rc != 0) 1409 break; 1410 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1411 rc = EPERM; 1412 break; 1413 } 1414 if (i2c.len > sizeof(i2c.data)) { 1415 rc = EINVAL; 1416 break; 1417 } 1418 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4i2c"); 1419 if (rc) 1420 return (rc); 1421 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, 1422 i2c.offset, i2c.len, &i2c.data[0]); 1423 end_synchronized_op(sc, 0); 1424 if (rc == 0) 1425 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1426 break; 1427 } 1428 1429 default: 1430 rc = ether_ioctl(ifp, cmd, data); 1431 } 1432 1433 return (rc); 1434 } 1435 1436 static int 1437 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1438 { 1439 struct port_info *pi = ifp->if_softc; 1440 struct adapter *sc = pi->adapter; 1441 struct sge_txq *txq; 1442 void *items[1]; 1443 int rc; 1444 1445 M_ASSERTPKTHDR(m); 1446 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1447 1448 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1449 m_freem(m); 1450 return (ENETDOWN); 1451 } 1452 1453 rc = parse_pkt(&m); 1454 if (__predict_false(rc != 0)) { 1455 MPASS(m == NULL); /* was freed already */ 1456 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1457 return (rc); 1458 } 1459 1460 /* Select a txq. */ 1461 txq = &sc->sge.txq[pi->first_txq]; 1462 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1463 txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq)) + 1464 pi->rsrv_noflowq); 1465 1466 items[0] = m; 1467 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1468 if (__predict_false(rc != 0)) 1469 m_freem(m); 1470 1471 return (rc); 1472 } 1473 1474 static void 1475 cxgbe_qflush(struct ifnet *ifp) 1476 { 1477 struct port_info *pi = ifp->if_softc; 1478 struct sge_txq *txq; 1479 int i; 1480 1481 /* queues do not exist if !PORT_INIT_DONE. */ 1482 if (pi->flags & PORT_INIT_DONE) { 1483 for_each_txq(pi, i, txq) { 1484 TXQ_LOCK(txq); 1485 txq->eq.flags &= ~EQ_ENABLED; 1486 TXQ_UNLOCK(txq); 1487 while (!mp_ring_is_idle(txq->r)) { 1488 mp_ring_check_drainage(txq->r, 0); 1489 pause("qflush", 1); 1490 } 1491 } 1492 } 1493 if_qflush(ifp); 1494 } 1495 1496 static uint64_t 1497 cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1498 { 1499 struct port_info *pi = ifp->if_softc; 1500 struct adapter *sc = pi->adapter; 1501 struct port_stats *s = &pi->stats; 1502 1503 cxgbe_refresh_stats(sc, pi); 1504 1505 switch (c) { 1506 case IFCOUNTER_IPACKETS: 1507 return (s->rx_frames - s->rx_pause); 1508 1509 case IFCOUNTER_IERRORS: 1510 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1511 s->rx_fcs_err + s->rx_len_err); 1512 1513 case IFCOUNTER_OPACKETS: 1514 return (s->tx_frames - s->tx_pause); 1515 1516 case IFCOUNTER_OERRORS: 1517 return (s->tx_error_frames); 1518 1519 case IFCOUNTER_IBYTES: 1520 return (s->rx_octets - s->rx_pause * 64); 1521 1522 case IFCOUNTER_OBYTES: 1523 return (s->tx_octets - s->tx_pause * 64); 1524 1525 case IFCOUNTER_IMCASTS: 1526 return (s->rx_mcast_frames - s->rx_pause); 1527 1528 case IFCOUNTER_OMCASTS: 1529 return (s->tx_mcast_frames - s->tx_pause); 1530 1531 case IFCOUNTER_IQDROPS: 1532 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1533 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1534 s->rx_trunc3 + pi->tnl_cong_drops); 1535 1536 case IFCOUNTER_OQDROPS: { 1537 uint64_t drops; 1538 1539 drops = s->tx_drop; 1540 if (pi->flags & PORT_INIT_DONE) { 1541 int i; 1542 struct sge_txq *txq; 1543 1544 for_each_txq(pi, i, txq) 1545 drops += counter_u64_fetch(txq->r->drops); 1546 } 1547 1548 return (drops); 1549 1550 } 1551 1552 default: 1553 return (if_get_counter_default(ifp, c)); 1554 } 1555 } 1556 1557 static int 1558 cxgbe_media_change(struct ifnet *ifp) 1559 { 1560 struct port_info *pi = ifp->if_softc; 1561 1562 device_printf(pi->dev, "%s unimplemented.\n", __func__); 1563 1564 return (EOPNOTSUPP); 1565 } 1566 1567 static void 1568 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1569 { 1570 struct port_info *pi = ifp->if_softc; 1571 struct ifmedia *media = NULL; 1572 struct ifmedia_entry *cur; 1573 int speed = pi->link_cfg.speed; 1574 #ifdef INVARIANTS 1575 int data = (pi->port_type << 8) | pi->mod_type; 1576 #endif 1577 1578 if (ifp == pi->ifp) 1579 media = &pi->media; 1580 #ifdef DEV_NETMAP 1581 else if (ifp == pi->nm_ifp) 1582 media = &pi->nm_media; 1583 #endif 1584 MPASS(media != NULL); 1585 1586 cur = media->ifm_cur; 1587 MPASS(cur->ifm_data == data); 1588 1589 ifmr->ifm_status = IFM_AVALID; 1590 if (!pi->link_cfg.link_ok) 1591 return; 1592 1593 ifmr->ifm_status |= IFM_ACTIVE; 1594 1595 /* active and current will differ iff current media is autoselect. */ 1596 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1597 return; 1598 1599 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1600 if (speed == SPEED_10000) 1601 ifmr->ifm_active |= IFM_10G_T; 1602 else if (speed == SPEED_1000) 1603 ifmr->ifm_active |= IFM_1000_T; 1604 else if (speed == SPEED_100) 1605 ifmr->ifm_active |= IFM_100_TX; 1606 else if (speed == SPEED_10) 1607 ifmr->ifm_active |= IFM_10_T; 1608 else 1609 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1610 speed)); 1611 } 1612 1613 void 1614 t4_fatal_err(struct adapter *sc) 1615 { 1616 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1617 t4_intr_disable(sc); 1618 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1619 device_get_nameunit(sc->dev)); 1620 } 1621 1622 static int 1623 map_bars_0_and_4(struct adapter *sc) 1624 { 1625 sc->regs_rid = PCIR_BAR(0); 1626 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1627 &sc->regs_rid, RF_ACTIVE); 1628 if (sc->regs_res == NULL) { 1629 device_printf(sc->dev, "cannot map registers.\n"); 1630 return (ENXIO); 1631 } 1632 sc->bt = rman_get_bustag(sc->regs_res); 1633 sc->bh = rman_get_bushandle(sc->regs_res); 1634 sc->mmio_len = rman_get_size(sc->regs_res); 1635 setbit(&sc->doorbells, DOORBELL_KDB); 1636 1637 sc->msix_rid = PCIR_BAR(4); 1638 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1639 &sc->msix_rid, RF_ACTIVE); 1640 if (sc->msix_res == NULL) { 1641 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1642 return (ENXIO); 1643 } 1644 1645 return (0); 1646 } 1647 1648 static int 1649 map_bar_2(struct adapter *sc) 1650 { 1651 1652 /* 1653 * T4: only iWARP driver uses the userspace doorbells. There is no need 1654 * to map it if RDMA is disabled. 1655 */ 1656 if (is_t4(sc) && sc->rdmacaps == 0) 1657 return (0); 1658 1659 sc->udbs_rid = PCIR_BAR(2); 1660 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1661 &sc->udbs_rid, RF_ACTIVE); 1662 if (sc->udbs_res == NULL) { 1663 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 1664 return (ENXIO); 1665 } 1666 sc->udbs_base = rman_get_virtual(sc->udbs_res); 1667 1668 if (is_t5(sc)) { 1669 setbit(&sc->doorbells, DOORBELL_UDB); 1670 #if defined(__i386__) || defined(__amd64__) 1671 if (t5_write_combine) { 1672 int rc; 1673 1674 /* 1675 * Enable write combining on BAR2. This is the 1676 * userspace doorbell BAR and is split into 128B 1677 * (UDBS_SEG_SIZE) doorbell regions, each associated 1678 * with an egress queue. The first 64B has the doorbell 1679 * and the second 64B can be used to submit a tx work 1680 * request with an implicit doorbell. 1681 */ 1682 1683 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 1684 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 1685 if (rc == 0) { 1686 clrbit(&sc->doorbells, DOORBELL_UDB); 1687 setbit(&sc->doorbells, DOORBELL_WCWR); 1688 setbit(&sc->doorbells, DOORBELL_UDBWC); 1689 } else { 1690 device_printf(sc->dev, 1691 "couldn't enable write combining: %d\n", 1692 rc); 1693 } 1694 1695 t4_write_reg(sc, A_SGE_STAT_CFG, 1696 V_STATSOURCE_T5(7) | V_STATMODE(0)); 1697 } 1698 #endif 1699 } 1700 1701 return (0); 1702 } 1703 1704 static const struct memwin t4_memwin[] = { 1705 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1706 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1707 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 1708 }; 1709 1710 static const struct memwin t5_memwin[] = { 1711 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1712 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1713 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 1714 }; 1715 1716 static void 1717 setup_memwin(struct adapter *sc) 1718 { 1719 const struct memwin *mw; 1720 int i, n; 1721 uint32_t bar0; 1722 1723 if (is_t4(sc)) { 1724 /* 1725 * Read low 32b of bar0 indirectly via the hardware backdoor 1726 * mechanism. Works from within PCI passthrough environments 1727 * too, where rman_get_start() can return a different value. We 1728 * need to program the T4 memory window decoders with the actual 1729 * addresses that will be coming across the PCIe link. 1730 */ 1731 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 1732 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 1733 1734 mw = &t4_memwin[0]; 1735 n = nitems(t4_memwin); 1736 } else { 1737 /* T5 uses the relative offset inside the PCIe BAR */ 1738 bar0 = 0; 1739 1740 mw = &t5_memwin[0]; 1741 n = nitems(t5_memwin); 1742 } 1743 1744 for (i = 0; i < n; i++, mw++) { 1745 t4_write_reg(sc, 1746 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 1747 (mw->base + bar0) | V_BIR(0) | 1748 V_WINDOW(ilog2(mw->aperture) - 10)); 1749 } 1750 1751 /* flush */ 1752 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 1753 } 1754 1755 /* 1756 * Verify that the memory range specified by the addr/len pair is valid and lies 1757 * entirely within a single region (EDCx or MCx). 1758 */ 1759 static int 1760 validate_mem_range(struct adapter *sc, uint32_t addr, int len) 1761 { 1762 uint32_t em, addr_len, maddr, mlen; 1763 1764 /* Memory can only be accessed in naturally aligned 4 byte units */ 1765 if (addr & 3 || len & 3 || len == 0) 1766 return (EINVAL); 1767 1768 /* Enabled memories */ 1769 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1770 if (em & F_EDRAM0_ENABLE) { 1771 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1772 maddr = G_EDRAM0_BASE(addr_len) << 20; 1773 mlen = G_EDRAM0_SIZE(addr_len) << 20; 1774 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1775 addr + len <= maddr + mlen) 1776 return (0); 1777 } 1778 if (em & F_EDRAM1_ENABLE) { 1779 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1780 maddr = G_EDRAM1_BASE(addr_len) << 20; 1781 mlen = G_EDRAM1_SIZE(addr_len) << 20; 1782 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1783 addr + len <= maddr + mlen) 1784 return (0); 1785 } 1786 if (em & F_EXT_MEM_ENABLE) { 1787 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1788 maddr = G_EXT_MEM_BASE(addr_len) << 20; 1789 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 1790 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1791 addr + len <= maddr + mlen) 1792 return (0); 1793 } 1794 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) { 1795 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 1796 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 1797 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 1798 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 1799 addr + len <= maddr + mlen) 1800 return (0); 1801 } 1802 1803 return (EFAULT); 1804 } 1805 1806 static int 1807 fwmtype_to_hwmtype(int mtype) 1808 { 1809 1810 switch (mtype) { 1811 case FW_MEMTYPE_EDC0: 1812 return (MEM_EDC0); 1813 case FW_MEMTYPE_EDC1: 1814 return (MEM_EDC1); 1815 case FW_MEMTYPE_EXTMEM: 1816 return (MEM_MC0); 1817 case FW_MEMTYPE_EXTMEM1: 1818 return (MEM_MC1); 1819 default: 1820 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 1821 } 1822 } 1823 1824 /* 1825 * Verify that the memory range specified by the memtype/offset/len pair is 1826 * valid and lies entirely within the memtype specified. The global address of 1827 * the start of the range is returned in addr. 1828 */ 1829 static int 1830 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 1831 uint32_t *addr) 1832 { 1833 uint32_t em, addr_len, maddr, mlen; 1834 1835 /* Memory can only be accessed in naturally aligned 4 byte units */ 1836 if (off & 3 || len & 3 || len == 0) 1837 return (EINVAL); 1838 1839 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1840 switch (fwmtype_to_hwmtype(mtype)) { 1841 case MEM_EDC0: 1842 if (!(em & F_EDRAM0_ENABLE)) 1843 return (EINVAL); 1844 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1845 maddr = G_EDRAM0_BASE(addr_len) << 20; 1846 mlen = G_EDRAM0_SIZE(addr_len) << 20; 1847 break; 1848 case MEM_EDC1: 1849 if (!(em & F_EDRAM1_ENABLE)) 1850 return (EINVAL); 1851 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1852 maddr = G_EDRAM1_BASE(addr_len) << 20; 1853 mlen = G_EDRAM1_SIZE(addr_len) << 20; 1854 break; 1855 case MEM_MC: 1856 if (!(em & F_EXT_MEM_ENABLE)) 1857 return (EINVAL); 1858 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1859 maddr = G_EXT_MEM_BASE(addr_len) << 20; 1860 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 1861 break; 1862 case MEM_MC1: 1863 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE)) 1864 return (EINVAL); 1865 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 1866 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 1867 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 1868 break; 1869 default: 1870 return (EINVAL); 1871 } 1872 1873 if (mlen > 0 && off < mlen && off + len <= mlen) { 1874 *addr = maddr + off; /* global address */ 1875 return (0); 1876 } 1877 1878 return (EFAULT); 1879 } 1880 1881 static void 1882 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture) 1883 { 1884 const struct memwin *mw; 1885 1886 if (is_t4(sc)) { 1887 KASSERT(win >= 0 && win < nitems(t4_memwin), 1888 ("%s: incorrect memwin# (%d)", __func__, win)); 1889 mw = &t4_memwin[win]; 1890 } else { 1891 KASSERT(win >= 0 && win < nitems(t5_memwin), 1892 ("%s: incorrect memwin# (%d)", __func__, win)); 1893 mw = &t5_memwin[win]; 1894 } 1895 1896 if (base != NULL) 1897 *base = mw->base; 1898 if (aperture != NULL) 1899 *aperture = mw->aperture; 1900 } 1901 1902 /* 1903 * Positions the memory window such that it can be used to access the specified 1904 * address in the chip's address space. The return value is the offset of addr 1905 * from the start of the window. 1906 */ 1907 static uint32_t 1908 position_memwin(struct adapter *sc, int n, uint32_t addr) 1909 { 1910 uint32_t start, pf; 1911 uint32_t reg; 1912 1913 KASSERT(n >= 0 && n <= 3, 1914 ("%s: invalid window %d.", __func__, n)); 1915 KASSERT((addr & 3) == 0, 1916 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr)); 1917 1918 if (is_t4(sc)) { 1919 pf = 0; 1920 start = addr & ~0xf; /* start must be 16B aligned */ 1921 } else { 1922 pf = V_PFNUM(sc->pf); 1923 start = addr & ~0x7f; /* start must be 128B aligned */ 1924 } 1925 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n); 1926 1927 t4_write_reg(sc, reg, start | pf); 1928 t4_read_reg(sc, reg); 1929 1930 return (addr - start); 1931 } 1932 1933 static int 1934 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 1935 struct intrs_and_queues *iaq) 1936 { 1937 int rc, itype, navail, nrxq10g, nrxq1g, n; 1938 int nofldrxq10g = 0, nofldrxq1g = 0; 1939 int nnmrxq10g = 0, nnmrxq1g = 0; 1940 1941 bzero(iaq, sizeof(*iaq)); 1942 1943 iaq->ntxq10g = t4_ntxq10g; 1944 iaq->ntxq1g = t4_ntxq1g; 1945 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 1946 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 1947 iaq->rsrv_noflowq = t4_rsrv_noflowq; 1948 #ifdef TCP_OFFLOAD 1949 if (is_offload(sc)) { 1950 iaq->nofldtxq10g = t4_nofldtxq10g; 1951 iaq->nofldtxq1g = t4_nofldtxq1g; 1952 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 1953 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 1954 } 1955 #endif 1956 #ifdef DEV_NETMAP 1957 iaq->nnmtxq10g = t4_nnmtxq10g; 1958 iaq->nnmtxq1g = t4_nnmtxq1g; 1959 iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g; 1960 iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g; 1961 #endif 1962 1963 for (itype = INTR_MSIX; itype; itype >>= 1) { 1964 1965 if ((itype & t4_intr_types) == 0) 1966 continue; /* not allowed */ 1967 1968 if (itype == INTR_MSIX) 1969 navail = pci_msix_count(sc->dev); 1970 else if (itype == INTR_MSI) 1971 navail = pci_msi_count(sc->dev); 1972 else 1973 navail = 1; 1974 restart: 1975 if (navail == 0) 1976 continue; 1977 1978 iaq->intr_type = itype; 1979 iaq->intr_flags_10g = 0; 1980 iaq->intr_flags_1g = 0; 1981 1982 /* 1983 * Best option: an interrupt vector for errors, one for the 1984 * firmware event queue, and one for every rxq (NIC, TOE, and 1985 * netmap). 1986 */ 1987 iaq->nirq = T4_EXTRA_INTR; 1988 iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g); 1989 iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g); 1990 if (iaq->nirq <= navail && 1991 (itype != INTR_MSI || powerof2(iaq->nirq))) { 1992 iaq->intr_flags_10g = INTR_ALL; 1993 iaq->intr_flags_1g = INTR_ALL; 1994 goto allocate; 1995 } 1996 1997 /* 1998 * Second best option: a vector for errors, one for the firmware 1999 * event queue, and vectors for either all the NIC rx queues or 2000 * all the TOE rx queues. The queues that don't get vectors 2001 * will forward their interrupts to those that do. 2002 * 2003 * Note: netmap rx queues cannot be created early and so they 2004 * can't be setup to receive forwarded interrupts for others. 2005 */ 2006 iaq->nirq = T4_EXTRA_INTR; 2007 if (nrxq10g >= nofldrxq10g) { 2008 iaq->intr_flags_10g = INTR_RXQ; 2009 iaq->nirq += n10g * nrxq10g; 2010 #ifdef DEV_NETMAP 2011 iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g); 2012 #endif 2013 } else { 2014 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2015 iaq->nirq += n10g * nofldrxq10g; 2016 #ifdef DEV_NETMAP 2017 iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g); 2018 #endif 2019 } 2020 if (nrxq1g >= nofldrxq1g) { 2021 iaq->intr_flags_1g = INTR_RXQ; 2022 iaq->nirq += n1g * nrxq1g; 2023 #ifdef DEV_NETMAP 2024 iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g); 2025 #endif 2026 } else { 2027 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2028 iaq->nirq += n1g * nofldrxq1g; 2029 #ifdef DEV_NETMAP 2030 iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g); 2031 #endif 2032 } 2033 if (iaq->nirq <= navail && 2034 (itype != INTR_MSI || powerof2(iaq->nirq))) 2035 goto allocate; 2036 2037 /* 2038 * Next best option: an interrupt vector for errors, one for the 2039 * firmware event queue, and at least one per port. At this 2040 * point we know we'll have to downsize nrxq and/or nofldrxq 2041 * and/or nnmrxq to fit what's available to us. 2042 */ 2043 iaq->nirq = T4_EXTRA_INTR; 2044 iaq->nirq += n10g + n1g; 2045 if (iaq->nirq <= navail) { 2046 int leftover = navail - iaq->nirq; 2047 2048 if (n10g > 0) { 2049 int target = max(nrxq10g, nofldrxq10g); 2050 2051 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2052 INTR_RXQ : INTR_OFLD_RXQ; 2053 2054 n = 1; 2055 while (n < target && leftover >= n10g) { 2056 leftover -= n10g; 2057 iaq->nirq += n10g; 2058 n++; 2059 } 2060 iaq->nrxq10g = min(n, nrxq10g); 2061 #ifdef TCP_OFFLOAD 2062 iaq->nofldrxq10g = min(n, nofldrxq10g); 2063 #endif 2064 #ifdef DEV_NETMAP 2065 iaq->nnmrxq10g = min(n, nnmrxq10g); 2066 #endif 2067 } 2068 2069 if (n1g > 0) { 2070 int target = max(nrxq1g, nofldrxq1g); 2071 2072 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2073 INTR_RXQ : INTR_OFLD_RXQ; 2074 2075 n = 1; 2076 while (n < target && leftover >= n1g) { 2077 leftover -= n1g; 2078 iaq->nirq += n1g; 2079 n++; 2080 } 2081 iaq->nrxq1g = min(n, nrxq1g); 2082 #ifdef TCP_OFFLOAD 2083 iaq->nofldrxq1g = min(n, nofldrxq1g); 2084 #endif 2085 #ifdef DEV_NETMAP 2086 iaq->nnmrxq1g = min(n, nnmrxq1g); 2087 #endif 2088 } 2089 2090 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2091 goto allocate; 2092 } 2093 2094 /* 2095 * Least desirable option: one interrupt vector for everything. 2096 */ 2097 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2098 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2099 #ifdef TCP_OFFLOAD 2100 if (is_offload(sc)) 2101 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2102 #endif 2103 #ifdef DEV_NETMAP 2104 iaq->nnmrxq10g = iaq->nnmrxq1g = 1; 2105 #endif 2106 2107 allocate: 2108 navail = iaq->nirq; 2109 rc = 0; 2110 if (itype == INTR_MSIX) 2111 rc = pci_alloc_msix(sc->dev, &navail); 2112 else if (itype == INTR_MSI) 2113 rc = pci_alloc_msi(sc->dev, &navail); 2114 2115 if (rc == 0) { 2116 if (navail == iaq->nirq) 2117 return (0); 2118 2119 /* 2120 * Didn't get the number requested. Use whatever number 2121 * the kernel is willing to allocate (it's in navail). 2122 */ 2123 device_printf(sc->dev, "fewer vectors than requested, " 2124 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2125 itype, iaq->nirq, navail); 2126 pci_release_msi(sc->dev); 2127 goto restart; 2128 } 2129 2130 device_printf(sc->dev, 2131 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2132 itype, rc, iaq->nirq, navail); 2133 } 2134 2135 device_printf(sc->dev, 2136 "failed to find a usable interrupt type. " 2137 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2138 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2139 2140 return (ENXIO); 2141 } 2142 2143 #define FW_VERSION(chip) ( \ 2144 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2145 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2146 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2147 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2148 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2149 2150 struct fw_info { 2151 uint8_t chip; 2152 char *kld_name; 2153 char *fw_mod_name; 2154 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2155 } fw_info[] = { 2156 { 2157 .chip = CHELSIO_T4, 2158 .kld_name = "t4fw_cfg", 2159 .fw_mod_name = "t4fw", 2160 .fw_hdr = { 2161 .chip = FW_HDR_CHIP_T4, 2162 .fw_ver = htobe32_const(FW_VERSION(T4)), 2163 .intfver_nic = FW_INTFVER(T4, NIC), 2164 .intfver_vnic = FW_INTFVER(T4, VNIC), 2165 .intfver_ofld = FW_INTFVER(T4, OFLD), 2166 .intfver_ri = FW_INTFVER(T4, RI), 2167 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2168 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2169 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2170 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2171 }, 2172 }, { 2173 .chip = CHELSIO_T5, 2174 .kld_name = "t5fw_cfg", 2175 .fw_mod_name = "t5fw", 2176 .fw_hdr = { 2177 .chip = FW_HDR_CHIP_T5, 2178 .fw_ver = htobe32_const(FW_VERSION(T5)), 2179 .intfver_nic = FW_INTFVER(T5, NIC), 2180 .intfver_vnic = FW_INTFVER(T5, VNIC), 2181 .intfver_ofld = FW_INTFVER(T5, OFLD), 2182 .intfver_ri = FW_INTFVER(T5, RI), 2183 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2184 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2185 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2186 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2187 }, 2188 } 2189 }; 2190 2191 static struct fw_info * 2192 find_fw_info(int chip) 2193 { 2194 int i; 2195 2196 for (i = 0; i < nitems(fw_info); i++) { 2197 if (fw_info[i].chip == chip) 2198 return (&fw_info[i]); 2199 } 2200 return (NULL); 2201 } 2202 2203 /* 2204 * Is the given firmware API compatible with the one the driver was compiled 2205 * with? 2206 */ 2207 static int 2208 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2209 { 2210 2211 /* short circuit if it's the exact same firmware version */ 2212 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2213 return (1); 2214 2215 /* 2216 * XXX: Is this too conservative? Perhaps I should limit this to the 2217 * features that are supported in the driver. 2218 */ 2219 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2220 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2221 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2222 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2223 return (1); 2224 #undef SAME_INTF 2225 2226 return (0); 2227 } 2228 2229 /* 2230 * The firmware in the KLD is usable, but should it be installed? This routine 2231 * explains itself in detail if it indicates the KLD firmware should be 2232 * installed. 2233 */ 2234 static int 2235 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2236 { 2237 const char *reason; 2238 2239 if (!card_fw_usable) { 2240 reason = "incompatible or unusable"; 2241 goto install; 2242 } 2243 2244 if (k > c) { 2245 reason = "older than the version bundled with this driver"; 2246 goto install; 2247 } 2248 2249 if (t4_fw_install == 2 && k != c) { 2250 reason = "different than the version bundled with this driver"; 2251 goto install; 2252 } 2253 2254 return (0); 2255 2256 install: 2257 if (t4_fw_install == 0) { 2258 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2259 "but the driver is prohibited from installing a different " 2260 "firmware on the card.\n", 2261 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2262 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2263 2264 return (0); 2265 } 2266 2267 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2268 "installing firmware %u.%u.%u.%u on card.\n", 2269 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2270 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2271 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2272 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2273 2274 return (1); 2275 } 2276 /* 2277 * Establish contact with the firmware and determine if we are the master driver 2278 * or not, and whether we are responsible for chip initialization. 2279 */ 2280 static int 2281 prep_firmware(struct adapter *sc) 2282 { 2283 const struct firmware *fw = NULL, *default_cfg; 2284 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2285 enum dev_state state; 2286 struct fw_info *fw_info; 2287 struct fw_hdr *card_fw; /* fw on the card */ 2288 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2289 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2290 against */ 2291 2292 /* Contact firmware. */ 2293 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2294 if (rc < 0 || state == DEV_STATE_ERR) { 2295 rc = -rc; 2296 device_printf(sc->dev, 2297 "failed to connect to the firmware: %d, %d.\n", rc, state); 2298 return (rc); 2299 } 2300 pf = rc; 2301 if (pf == sc->mbox) 2302 sc->flags |= MASTER_PF; 2303 else if (state == DEV_STATE_UNINIT) { 2304 /* 2305 * We didn't get to be the master so we definitely won't be 2306 * configuring the chip. It's a bug if someone else hasn't 2307 * configured it already. 2308 */ 2309 device_printf(sc->dev, "couldn't be master(%d), " 2310 "device not already initialized either(%d).\n", rc, state); 2311 return (EDOOFUS); 2312 } 2313 2314 /* This is the firmware whose headers the driver was compiled against */ 2315 fw_info = find_fw_info(chip_id(sc)); 2316 if (fw_info == NULL) { 2317 device_printf(sc->dev, 2318 "unable to look up firmware information for chip %d.\n", 2319 chip_id(sc)); 2320 return (EINVAL); 2321 } 2322 drv_fw = &fw_info->fw_hdr; 2323 2324 /* 2325 * The firmware KLD contains many modules. The KLD name is also the 2326 * name of the module that contains the default config file. 2327 */ 2328 default_cfg = firmware_get(fw_info->kld_name); 2329 2330 /* Read the header of the firmware on the card */ 2331 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2332 rc = -t4_read_flash(sc, FLASH_FW_START, 2333 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2334 if (rc == 0) 2335 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2336 else { 2337 device_printf(sc->dev, 2338 "Unable to read card's firmware header: %d\n", rc); 2339 card_fw_usable = 0; 2340 } 2341 2342 /* This is the firmware in the KLD */ 2343 fw = firmware_get(fw_info->fw_mod_name); 2344 if (fw != NULL) { 2345 kld_fw = (const void *)fw->data; 2346 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2347 } else { 2348 kld_fw = NULL; 2349 kld_fw_usable = 0; 2350 } 2351 2352 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2353 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2354 /* 2355 * Common case: the firmware on the card is an exact match and 2356 * the KLD is an exact match too, or the KLD is 2357 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2358 * here -- use cxgbetool loadfw if you want to reinstall the 2359 * same firmware as the one on the card. 2360 */ 2361 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2362 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2363 be32toh(card_fw->fw_ver))) { 2364 2365 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2366 if (rc != 0) { 2367 device_printf(sc->dev, 2368 "failed to install firmware: %d\n", rc); 2369 goto done; 2370 } 2371 2372 /* Installed successfully, update the cached header too. */ 2373 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2374 card_fw_usable = 1; 2375 need_fw_reset = 0; /* already reset as part of load_fw */ 2376 } 2377 2378 if (!card_fw_usable) { 2379 uint32_t d, c, k; 2380 2381 d = ntohl(drv_fw->fw_ver); 2382 c = ntohl(card_fw->fw_ver); 2383 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2384 2385 device_printf(sc->dev, "Cannot find a usable firmware: " 2386 "fw_install %d, chip state %d, " 2387 "driver compiled with %d.%d.%d.%d, " 2388 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2389 t4_fw_install, state, 2390 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2391 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2392 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2393 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2394 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2395 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2396 rc = EINVAL; 2397 goto done; 2398 } 2399 2400 /* We're using whatever's on the card and it's known to be good. */ 2401 sc->params.fw_vers = ntohl(card_fw->fw_ver); 2402 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 2403 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2404 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2405 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2406 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2407 t4_get_tp_version(sc, &sc->params.tp_vers); 2408 2409 /* Reset device */ 2410 if (need_fw_reset && 2411 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2412 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2413 if (rc != ETIMEDOUT && rc != EIO) 2414 t4_fw_bye(sc, sc->mbox); 2415 goto done; 2416 } 2417 sc->flags |= FW_OK; 2418 2419 rc = get_params__pre_init(sc); 2420 if (rc != 0) 2421 goto done; /* error message displayed already */ 2422 2423 /* Partition adapter resources as specified in the config file. */ 2424 if (state == DEV_STATE_UNINIT) { 2425 2426 KASSERT(sc->flags & MASTER_PF, 2427 ("%s: trying to change chip settings when not master.", 2428 __func__)); 2429 2430 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2431 if (rc != 0) 2432 goto done; /* error message displayed already */ 2433 2434 t4_tweak_chip_settings(sc); 2435 2436 /* get basic stuff going */ 2437 rc = -t4_fw_initialize(sc, sc->mbox); 2438 if (rc != 0) { 2439 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2440 goto done; 2441 } 2442 } else { 2443 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2444 sc->cfcsum = 0; 2445 } 2446 2447 done: 2448 free(card_fw, M_CXGBE); 2449 if (fw != NULL) 2450 firmware_put(fw, FIRMWARE_UNLOAD); 2451 if (default_cfg != NULL) 2452 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2453 2454 return (rc); 2455 } 2456 2457 #define FW_PARAM_DEV(param) \ 2458 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2459 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2460 #define FW_PARAM_PFVF(param) \ 2461 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2462 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2463 2464 /* 2465 * Partition chip resources for use between various PFs, VFs, etc. 2466 */ 2467 static int 2468 partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2469 const char *name_prefix) 2470 { 2471 const struct firmware *cfg = NULL; 2472 int rc = 0; 2473 struct fw_caps_config_cmd caps; 2474 uint32_t mtype, moff, finicsum, cfcsum; 2475 2476 /* 2477 * Figure out what configuration file to use. Pick the default config 2478 * file for the card if the user hasn't specified one explicitly. 2479 */ 2480 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2481 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2482 /* Card specific overrides go here. */ 2483 if (pci_get_device(sc->dev) == 0x440a) 2484 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2485 if (is_fpga(sc)) 2486 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2487 } 2488 2489 /* 2490 * We need to load another module if the profile is anything except 2491 * "default" or "flash". 2492 */ 2493 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 2494 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2495 char s[32]; 2496 2497 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 2498 cfg = firmware_get(s); 2499 if (cfg == NULL) { 2500 if (default_cfg != NULL) { 2501 device_printf(sc->dev, 2502 "unable to load module \"%s\" for " 2503 "configuration profile \"%s\", will use " 2504 "the default config file instead.\n", 2505 s, sc->cfg_file); 2506 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2507 "%s", DEFAULT_CF); 2508 } else { 2509 device_printf(sc->dev, 2510 "unable to load module \"%s\" for " 2511 "configuration profile \"%s\", will use " 2512 "the config file on the card's flash " 2513 "instead.\n", s, sc->cfg_file); 2514 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2515 "%s", FLASH_CF); 2516 } 2517 } 2518 } 2519 2520 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 2521 default_cfg == NULL) { 2522 device_printf(sc->dev, 2523 "default config file not available, will use the config " 2524 "file on the card's flash instead.\n"); 2525 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 2526 } 2527 2528 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2529 u_int cflen, i, n; 2530 const uint32_t *cfdata; 2531 uint32_t param, val, addr, off, mw_base, mw_aperture; 2532 2533 KASSERT(cfg != NULL || default_cfg != NULL, 2534 ("%s: no config to upload", __func__)); 2535 2536 /* 2537 * Ask the firmware where it wants us to upload the config file. 2538 */ 2539 param = FW_PARAM_DEV(CF); 2540 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2541 if (rc != 0) { 2542 /* No support for config file? Shouldn't happen. */ 2543 device_printf(sc->dev, 2544 "failed to query config file location: %d.\n", rc); 2545 goto done; 2546 } 2547 mtype = G_FW_PARAMS_PARAM_Y(val); 2548 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 2549 2550 /* 2551 * XXX: sheer laziness. We deliberately added 4 bytes of 2552 * useless stuffing/comments at the end of the config file so 2553 * it's ok to simply throw away the last remaining bytes when 2554 * the config file is not an exact multiple of 4. This also 2555 * helps with the validate_mt_off_len check. 2556 */ 2557 if (cfg != NULL) { 2558 cflen = cfg->datasize & ~3; 2559 cfdata = cfg->data; 2560 } else { 2561 cflen = default_cfg->datasize & ~3; 2562 cfdata = default_cfg->data; 2563 } 2564 2565 if (cflen > FLASH_CFG_MAX_SIZE) { 2566 device_printf(sc->dev, 2567 "config file too long (%d, max allowed is %d). " 2568 "Will try to use the config on the card, if any.\n", 2569 cflen, FLASH_CFG_MAX_SIZE); 2570 goto use_config_on_flash; 2571 } 2572 2573 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 2574 if (rc != 0) { 2575 device_printf(sc->dev, 2576 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 2577 "Will try to use the config on the card, if any.\n", 2578 __func__, mtype, moff, cflen, rc); 2579 goto use_config_on_flash; 2580 } 2581 2582 memwin_info(sc, 2, &mw_base, &mw_aperture); 2583 while (cflen) { 2584 off = position_memwin(sc, 2, addr); 2585 n = min(cflen, mw_aperture - off); 2586 for (i = 0; i < n; i += 4) 2587 t4_write_reg(sc, mw_base + off + i, *cfdata++); 2588 cflen -= n; 2589 addr += n; 2590 } 2591 } else { 2592 use_config_on_flash: 2593 mtype = FW_MEMTYPE_FLASH; 2594 moff = t4_flash_cfg_addr(sc); 2595 } 2596 2597 bzero(&caps, sizeof(caps)); 2598 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2599 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2600 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 2601 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 2602 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 2603 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 2604 if (rc != 0) { 2605 device_printf(sc->dev, 2606 "failed to pre-process config file: %d " 2607 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 2608 goto done; 2609 } 2610 2611 finicsum = be32toh(caps.finicsum); 2612 cfcsum = be32toh(caps.cfcsum); 2613 if (finicsum != cfcsum) { 2614 device_printf(sc->dev, 2615 "WARNING: config file checksum mismatch: %08x %08x\n", 2616 finicsum, cfcsum); 2617 } 2618 sc->cfcsum = cfcsum; 2619 2620 #define LIMIT_CAPS(x) do { \ 2621 caps.x &= htobe16(t4_##x##_allowed); \ 2622 } while (0) 2623 2624 /* 2625 * Let the firmware know what features will (not) be used so it can tune 2626 * things accordingly. 2627 */ 2628 LIMIT_CAPS(linkcaps); 2629 LIMIT_CAPS(niccaps); 2630 LIMIT_CAPS(toecaps); 2631 LIMIT_CAPS(rdmacaps); 2632 LIMIT_CAPS(iscsicaps); 2633 LIMIT_CAPS(fcoecaps); 2634 #undef LIMIT_CAPS 2635 2636 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2637 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 2638 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 2639 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 2640 if (rc != 0) { 2641 device_printf(sc->dev, 2642 "failed to process config file: %d.\n", rc); 2643 } 2644 done: 2645 if (cfg != NULL) 2646 firmware_put(cfg, FIRMWARE_UNLOAD); 2647 return (rc); 2648 } 2649 2650 /* 2651 * Retrieve parameters that are needed (or nice to have) very early. 2652 */ 2653 static int 2654 get_params__pre_init(struct adapter *sc) 2655 { 2656 int rc; 2657 uint32_t param[2], val[2]; 2658 struct fw_devlog_cmd cmd; 2659 struct devlog_params *dlog = &sc->params.devlog; 2660 2661 param[0] = FW_PARAM_DEV(PORTVEC); 2662 param[1] = FW_PARAM_DEV(CCLK); 2663 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 2664 if (rc != 0) { 2665 device_printf(sc->dev, 2666 "failed to query parameters (pre_init): %d.\n", rc); 2667 return (rc); 2668 } 2669 2670 sc->params.portvec = val[0]; 2671 sc->params.nports = bitcount32(val[0]); 2672 sc->params.vpd.cclk = val[1]; 2673 2674 /* Read device log parameters. */ 2675 bzero(&cmd, sizeof(cmd)); 2676 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 2677 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2678 cmd.retval_len16 = htobe32(FW_LEN16(cmd)); 2679 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd); 2680 if (rc != 0) { 2681 device_printf(sc->dev, 2682 "failed to get devlog parameters: %d.\n", rc); 2683 bzero(dlog, sizeof (*dlog)); 2684 rc = 0; /* devlog isn't critical for device operation */ 2685 } else { 2686 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog); 2687 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]); 2688 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4; 2689 dlog->size = be32toh(cmd.memsize_devlog); 2690 } 2691 2692 return (rc); 2693 } 2694 2695 /* 2696 * Retrieve various parameters that are of interest to the driver. The device 2697 * has been initialized by the firmware at this point. 2698 */ 2699 static int 2700 get_params__post_init(struct adapter *sc) 2701 { 2702 int rc; 2703 uint32_t param[7], val[7]; 2704 struct fw_caps_config_cmd caps; 2705 2706 param[0] = FW_PARAM_PFVF(IQFLINT_START); 2707 param[1] = FW_PARAM_PFVF(EQ_START); 2708 param[2] = FW_PARAM_PFVF(FILTER_START); 2709 param[3] = FW_PARAM_PFVF(FILTER_END); 2710 param[4] = FW_PARAM_PFVF(L2T_START); 2711 param[5] = FW_PARAM_PFVF(L2T_END); 2712 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2713 if (rc != 0) { 2714 device_printf(sc->dev, 2715 "failed to query parameters (post_init): %d.\n", rc); 2716 return (rc); 2717 } 2718 2719 sc->sge.iq_start = val[0]; 2720 sc->sge.eq_start = val[1]; 2721 sc->tids.ftid_base = val[2]; 2722 sc->tids.nftids = val[3] - val[2] + 1; 2723 sc->params.ftid_min = val[2]; 2724 sc->params.ftid_max = val[3]; 2725 sc->vres.l2t.start = val[4]; 2726 sc->vres.l2t.size = val[5] - val[4] + 1; 2727 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 2728 ("%s: L2 table size (%u) larger than expected (%u)", 2729 __func__, sc->vres.l2t.size, L2T_SIZE)); 2730 2731 /* get capabilites */ 2732 bzero(&caps, sizeof(caps)); 2733 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2734 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2735 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 2736 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 2737 if (rc != 0) { 2738 device_printf(sc->dev, 2739 "failed to get card capabilities: %d.\n", rc); 2740 return (rc); 2741 } 2742 2743 #define READ_CAPS(x) do { \ 2744 sc->x = htobe16(caps.x); \ 2745 } while (0) 2746 READ_CAPS(linkcaps); 2747 READ_CAPS(niccaps); 2748 READ_CAPS(toecaps); 2749 READ_CAPS(rdmacaps); 2750 READ_CAPS(iscsicaps); 2751 READ_CAPS(fcoecaps); 2752 2753 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 2754 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 2755 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 2756 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 2757 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 2758 if (rc != 0) { 2759 device_printf(sc->dev, 2760 "failed to query NIC parameters: %d.\n", rc); 2761 return (rc); 2762 } 2763 sc->tids.etid_base = val[0]; 2764 sc->params.etid_min = val[0]; 2765 sc->tids.netids = val[1] - val[0] + 1; 2766 sc->params.netids = sc->tids.netids; 2767 sc->params.eo_wr_cred = val[2]; 2768 sc->params.ethoffload = 1; 2769 } 2770 2771 if (sc->toecaps) { 2772 /* query offload-related parameters */ 2773 param[0] = FW_PARAM_DEV(NTID); 2774 param[1] = FW_PARAM_PFVF(SERVER_START); 2775 param[2] = FW_PARAM_PFVF(SERVER_END); 2776 param[3] = FW_PARAM_PFVF(TDDP_START); 2777 param[4] = FW_PARAM_PFVF(TDDP_END); 2778 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 2779 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2780 if (rc != 0) { 2781 device_printf(sc->dev, 2782 "failed to query TOE parameters: %d.\n", rc); 2783 return (rc); 2784 } 2785 sc->tids.ntids = val[0]; 2786 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 2787 sc->tids.stid_base = val[1]; 2788 sc->tids.nstids = val[2] - val[1] + 1; 2789 sc->vres.ddp.start = val[3]; 2790 sc->vres.ddp.size = val[4] - val[3] + 1; 2791 sc->params.ofldq_wr_cred = val[5]; 2792 sc->params.offload = 1; 2793 } 2794 if (sc->rdmacaps) { 2795 param[0] = FW_PARAM_PFVF(STAG_START); 2796 param[1] = FW_PARAM_PFVF(STAG_END); 2797 param[2] = FW_PARAM_PFVF(RQ_START); 2798 param[3] = FW_PARAM_PFVF(RQ_END); 2799 param[4] = FW_PARAM_PFVF(PBL_START); 2800 param[5] = FW_PARAM_PFVF(PBL_END); 2801 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2802 if (rc != 0) { 2803 device_printf(sc->dev, 2804 "failed to query RDMA parameters(1): %d.\n", rc); 2805 return (rc); 2806 } 2807 sc->vres.stag.start = val[0]; 2808 sc->vres.stag.size = val[1] - val[0] + 1; 2809 sc->vres.rq.start = val[2]; 2810 sc->vres.rq.size = val[3] - val[2] + 1; 2811 sc->vres.pbl.start = val[4]; 2812 sc->vres.pbl.size = val[5] - val[4] + 1; 2813 2814 param[0] = FW_PARAM_PFVF(SQRQ_START); 2815 param[1] = FW_PARAM_PFVF(SQRQ_END); 2816 param[2] = FW_PARAM_PFVF(CQ_START); 2817 param[3] = FW_PARAM_PFVF(CQ_END); 2818 param[4] = FW_PARAM_PFVF(OCQ_START); 2819 param[5] = FW_PARAM_PFVF(OCQ_END); 2820 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2821 if (rc != 0) { 2822 device_printf(sc->dev, 2823 "failed to query RDMA parameters(2): %d.\n", rc); 2824 return (rc); 2825 } 2826 sc->vres.qp.start = val[0]; 2827 sc->vres.qp.size = val[1] - val[0] + 1; 2828 sc->vres.cq.start = val[2]; 2829 sc->vres.cq.size = val[3] - val[2] + 1; 2830 sc->vres.ocq.start = val[4]; 2831 sc->vres.ocq.size = val[5] - val[4] + 1; 2832 } 2833 if (sc->iscsicaps) { 2834 param[0] = FW_PARAM_PFVF(ISCSI_START); 2835 param[1] = FW_PARAM_PFVF(ISCSI_END); 2836 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 2837 if (rc != 0) { 2838 device_printf(sc->dev, 2839 "failed to query iSCSI parameters: %d.\n", rc); 2840 return (rc); 2841 } 2842 sc->vres.iscsi.start = val[0]; 2843 sc->vres.iscsi.size = val[1] - val[0] + 1; 2844 } 2845 2846 /* 2847 * We've got the params we wanted to query via the firmware. Now grab 2848 * some others directly from the chip. 2849 */ 2850 rc = t4_read_chip_settings(sc); 2851 2852 return (rc); 2853 } 2854 2855 static int 2856 set_params__post_init(struct adapter *sc) 2857 { 2858 uint32_t param, val; 2859 2860 /* ask for encapsulated CPLs */ 2861 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 2862 val = 1; 2863 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2864 2865 return (0); 2866 } 2867 2868 #undef FW_PARAM_PFVF 2869 #undef FW_PARAM_DEV 2870 2871 static void 2872 t4_set_desc(struct adapter *sc) 2873 { 2874 char buf[128]; 2875 struct adapter_params *p = &sc->params; 2876 2877 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, " 2878 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "", 2879 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec); 2880 2881 device_set_desc_copy(sc->dev, buf); 2882 } 2883 2884 static void 2885 build_medialist(struct port_info *pi, struct ifmedia *media) 2886 { 2887 int data, m; 2888 2889 PORT_LOCK(pi); 2890 2891 ifmedia_removeall(media); 2892 2893 m = IFM_ETHER | IFM_FDX; 2894 data = (pi->port_type << 8) | pi->mod_type; 2895 2896 switch(pi->port_type) { 2897 case FW_PORT_TYPE_BT_XFI: 2898 ifmedia_add(media, m | IFM_10G_T, data, NULL); 2899 break; 2900 2901 case FW_PORT_TYPE_BT_XAUI: 2902 ifmedia_add(media, m | IFM_10G_T, data, NULL); 2903 /* fall through */ 2904 2905 case FW_PORT_TYPE_BT_SGMII: 2906 ifmedia_add(media, m | IFM_1000_T, data, NULL); 2907 ifmedia_add(media, m | IFM_100_TX, data, NULL); 2908 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL); 2909 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 2910 break; 2911 2912 case FW_PORT_TYPE_CX4: 2913 ifmedia_add(media, m | IFM_10G_CX4, data, NULL); 2914 ifmedia_set(media, m | IFM_10G_CX4); 2915 break; 2916 2917 case FW_PORT_TYPE_QSFP_10G: 2918 case FW_PORT_TYPE_SFP: 2919 case FW_PORT_TYPE_FIBER_XFI: 2920 case FW_PORT_TYPE_FIBER_XAUI: 2921 switch (pi->mod_type) { 2922 2923 case FW_PORT_MOD_TYPE_LR: 2924 ifmedia_add(media, m | IFM_10G_LR, data, NULL); 2925 ifmedia_set(media, m | IFM_10G_LR); 2926 break; 2927 2928 case FW_PORT_MOD_TYPE_SR: 2929 ifmedia_add(media, m | IFM_10G_SR, data, NULL); 2930 ifmedia_set(media, m | IFM_10G_SR); 2931 break; 2932 2933 case FW_PORT_MOD_TYPE_LRM: 2934 ifmedia_add(media, m | IFM_10G_LRM, data, NULL); 2935 ifmedia_set(media, m | IFM_10G_LRM); 2936 break; 2937 2938 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 2939 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 2940 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL); 2941 ifmedia_set(media, m | IFM_10G_TWINAX); 2942 break; 2943 2944 case FW_PORT_MOD_TYPE_NONE: 2945 m &= ~IFM_FDX; 2946 ifmedia_add(media, m | IFM_NONE, data, NULL); 2947 ifmedia_set(media, m | IFM_NONE); 2948 break; 2949 2950 case FW_PORT_MOD_TYPE_NA: 2951 case FW_PORT_MOD_TYPE_ER: 2952 default: 2953 device_printf(pi->dev, 2954 "unknown port_type (%d), mod_type (%d)\n", 2955 pi->port_type, pi->mod_type); 2956 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 2957 ifmedia_set(media, m | IFM_UNKNOWN); 2958 break; 2959 } 2960 break; 2961 2962 case FW_PORT_TYPE_QSFP: 2963 switch (pi->mod_type) { 2964 2965 case FW_PORT_MOD_TYPE_LR: 2966 ifmedia_add(media, m | IFM_40G_LR4, data, NULL); 2967 ifmedia_set(media, m | IFM_40G_LR4); 2968 break; 2969 2970 case FW_PORT_MOD_TYPE_SR: 2971 ifmedia_add(media, m | IFM_40G_SR4, data, NULL); 2972 ifmedia_set(media, m | IFM_40G_SR4); 2973 break; 2974 2975 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 2976 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 2977 ifmedia_add(media, m | IFM_40G_CR4, data, NULL); 2978 ifmedia_set(media, m | IFM_40G_CR4); 2979 break; 2980 2981 case FW_PORT_MOD_TYPE_NONE: 2982 m &= ~IFM_FDX; 2983 ifmedia_add(media, m | IFM_NONE, data, NULL); 2984 ifmedia_set(media, m | IFM_NONE); 2985 break; 2986 2987 default: 2988 device_printf(pi->dev, 2989 "unknown port_type (%d), mod_type (%d)\n", 2990 pi->port_type, pi->mod_type); 2991 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 2992 ifmedia_set(media, m | IFM_UNKNOWN); 2993 break; 2994 } 2995 break; 2996 2997 default: 2998 device_printf(pi->dev, 2999 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3000 pi->mod_type); 3001 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL); 3002 ifmedia_set(media, m | IFM_UNKNOWN); 3003 break; 3004 } 3005 3006 PORT_UNLOCK(pi); 3007 } 3008 3009 #define FW_MAC_EXACT_CHUNK 7 3010 3011 /* 3012 * Program the port's XGMAC based on parameters in ifnet. The caller also 3013 * indicates which parameters should be programmed (the rest are left alone). 3014 */ 3015 int 3016 update_mac_settings(struct ifnet *ifp, int flags) 3017 { 3018 int rc = 0; 3019 struct port_info *pi = ifp->if_softc; 3020 struct adapter *sc = pi->adapter; 3021 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3022 uint16_t viid = 0xffff; 3023 int16_t *xact_addr_filt = NULL; 3024 3025 ASSERT_SYNCHRONIZED_OP(sc); 3026 KASSERT(flags, ("%s: not told what to update.", __func__)); 3027 3028 if (ifp == pi->ifp) { 3029 viid = pi->viid; 3030 xact_addr_filt = &pi->xact_addr_filt; 3031 } 3032 #ifdef DEV_NETMAP 3033 else if (ifp == pi->nm_ifp) { 3034 viid = pi->nm_viid; 3035 xact_addr_filt = &pi->nm_xact_addr_filt; 3036 } 3037 #endif 3038 if (flags & XGMAC_MTU) 3039 mtu = ifp->if_mtu; 3040 3041 if (flags & XGMAC_PROMISC) 3042 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3043 3044 if (flags & XGMAC_ALLMULTI) 3045 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3046 3047 if (flags & XGMAC_VLANEX) 3048 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3049 3050 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3051 rc = -t4_set_rxmode(sc, sc->mbox, viid, mtu, promisc, allmulti, 3052 1, vlanex, false); 3053 if (rc) { 3054 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3055 rc); 3056 return (rc); 3057 } 3058 } 3059 3060 if (flags & XGMAC_UCADDR) { 3061 uint8_t ucaddr[ETHER_ADDR_LEN]; 3062 3063 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3064 rc = t4_change_mac(sc, sc->mbox, viid, *xact_addr_filt, ucaddr, 3065 true, true); 3066 if (rc < 0) { 3067 rc = -rc; 3068 if_printf(ifp, "change_mac failed: %d\n", rc); 3069 return (rc); 3070 } else { 3071 *xact_addr_filt = rc; 3072 rc = 0; 3073 } 3074 } 3075 3076 if (flags & XGMAC_MCADDRS) { 3077 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3078 int del = 1; 3079 uint64_t hash = 0; 3080 struct ifmultiaddr *ifma; 3081 int i = 0, j; 3082 3083 if_maddr_rlock(ifp); 3084 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3085 if (ifma->ifma_addr->sa_family != AF_LINK) 3086 continue; 3087 mcaddr[i] = 3088 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3089 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3090 i++; 3091 3092 if (i == FW_MAC_EXACT_CHUNK) { 3093 rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, 3094 i, mcaddr, NULL, &hash, 0); 3095 if (rc < 0) { 3096 rc = -rc; 3097 for (j = 0; j < i; j++) { 3098 if_printf(ifp, 3099 "failed to add mc address" 3100 " %02x:%02x:%02x:" 3101 "%02x:%02x:%02x rc=%d\n", 3102 mcaddr[j][0], mcaddr[j][1], 3103 mcaddr[j][2], mcaddr[j][3], 3104 mcaddr[j][4], mcaddr[j][5], 3105 rc); 3106 } 3107 goto mcfail; 3108 } 3109 del = 0; 3110 i = 0; 3111 } 3112 } 3113 if (i > 0) { 3114 rc = t4_alloc_mac_filt(sc, sc->mbox, viid, del, i, 3115 mcaddr, NULL, &hash, 0); 3116 if (rc < 0) { 3117 rc = -rc; 3118 for (j = 0; j < i; j++) { 3119 if_printf(ifp, 3120 "failed to add mc address" 3121 " %02x:%02x:%02x:" 3122 "%02x:%02x:%02x rc=%d\n", 3123 mcaddr[j][0], mcaddr[j][1], 3124 mcaddr[j][2], mcaddr[j][3], 3125 mcaddr[j][4], mcaddr[j][5], 3126 rc); 3127 } 3128 goto mcfail; 3129 } 3130 } 3131 3132 rc = -t4_set_addr_hash(sc, sc->mbox, viid, 0, hash, 0); 3133 if (rc != 0) 3134 if_printf(ifp, "failed to set mc address hash: %d", rc); 3135 mcfail: 3136 if_maddr_runlock(ifp); 3137 } 3138 3139 return (rc); 3140 } 3141 3142 int 3143 begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags, 3144 char *wmesg) 3145 { 3146 int rc, pri; 3147 3148 #ifdef WITNESS 3149 /* the caller thinks it's ok to sleep, but is it really? */ 3150 if (flags & SLEEP_OK) 3151 pause("t4slptst", 1); 3152 #endif 3153 3154 if (INTR_OK) 3155 pri = PCATCH; 3156 else 3157 pri = 0; 3158 3159 ADAPTER_LOCK(sc); 3160 for (;;) { 3161 3162 if (pi && IS_DOOMED(pi)) { 3163 rc = ENXIO; 3164 goto done; 3165 } 3166 3167 if (!IS_BUSY(sc)) { 3168 rc = 0; 3169 break; 3170 } 3171 3172 if (!(flags & SLEEP_OK)) { 3173 rc = EBUSY; 3174 goto done; 3175 } 3176 3177 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3178 rc = EINTR; 3179 goto done; 3180 } 3181 } 3182 3183 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3184 SET_BUSY(sc); 3185 #ifdef INVARIANTS 3186 sc->last_op = wmesg; 3187 sc->last_op_thr = curthread; 3188 #endif 3189 3190 done: 3191 if (!(flags & HOLD_LOCK) || rc) 3192 ADAPTER_UNLOCK(sc); 3193 3194 return (rc); 3195 } 3196 3197 void 3198 end_synchronized_op(struct adapter *sc, int flags) 3199 { 3200 3201 if (flags & LOCK_HELD) 3202 ADAPTER_LOCK_ASSERT_OWNED(sc); 3203 else 3204 ADAPTER_LOCK(sc); 3205 3206 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3207 CLR_BUSY(sc); 3208 wakeup(&sc->flags); 3209 ADAPTER_UNLOCK(sc); 3210 } 3211 3212 static int 3213 cxgbe_init_synchronized(struct port_info *pi) 3214 { 3215 struct adapter *sc = pi->adapter; 3216 struct ifnet *ifp = pi->ifp; 3217 int rc = 0, i; 3218 struct sge_txq *txq; 3219 3220 ASSERT_SYNCHRONIZED_OP(sc); 3221 3222 if (isset(&sc->open_device_map, pi->port_id)) { 3223 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, 3224 ("mismatch between open_device_map and if_drv_flags")); 3225 return (0); /* already running */ 3226 } 3227 3228 if (!(sc->flags & FULL_INIT_DONE) && 3229 ((rc = adapter_full_init(sc)) != 0)) 3230 return (rc); /* error message displayed already */ 3231 3232 if (!(pi->flags & PORT_INIT_DONE) && 3233 ((rc = port_full_init(pi)) != 0)) 3234 return (rc); /* error message displayed already */ 3235 3236 rc = update_mac_settings(ifp, XGMAC_ALL); 3237 if (rc) 3238 goto done; /* error message displayed already */ 3239 3240 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true); 3241 if (rc != 0) { 3242 if_printf(ifp, "enable_vi failed: %d\n", rc); 3243 goto done; 3244 } 3245 3246 /* 3247 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3248 * if this changes. 3249 */ 3250 3251 for_each_txq(pi, i, txq) { 3252 TXQ_LOCK(txq); 3253 txq->eq.flags |= EQ_ENABLED; 3254 TXQ_UNLOCK(txq); 3255 } 3256 3257 /* 3258 * The first iq of the first port to come up is used for tracing. 3259 */ 3260 if (sc->traceq < 0) { 3261 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id; 3262 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3263 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3264 V_QUEUENUMBER(sc->traceq)); 3265 pi->flags |= HAS_TRACEQ; 3266 } 3267 3268 /* all ok */ 3269 setbit(&sc->open_device_map, pi->port_id); 3270 PORT_LOCK(pi); 3271 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3272 PORT_UNLOCK(pi); 3273 3274 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3275 done: 3276 if (rc != 0) 3277 cxgbe_uninit_synchronized(pi); 3278 3279 return (rc); 3280 } 3281 3282 /* 3283 * Idempotent. 3284 */ 3285 static int 3286 cxgbe_uninit_synchronized(struct port_info *pi) 3287 { 3288 struct adapter *sc = pi->adapter; 3289 struct ifnet *ifp = pi->ifp; 3290 int rc, i; 3291 struct sge_txq *txq; 3292 3293 ASSERT_SYNCHRONIZED_OP(sc); 3294 3295 /* 3296 * Disable the VI so that all its data in either direction is discarded 3297 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3298 * tick) intact as the TP can deliver negative advice or data that it's 3299 * holding in its RAM (for an offloaded connection) even after the VI is 3300 * disabled. 3301 */ 3302 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false); 3303 if (rc) { 3304 if_printf(ifp, "disable_vi failed: %d\n", rc); 3305 return (rc); 3306 } 3307 3308 for_each_txq(pi, i, txq) { 3309 TXQ_LOCK(txq); 3310 txq->eq.flags &= ~EQ_ENABLED; 3311 TXQ_UNLOCK(txq); 3312 } 3313 3314 clrbit(&sc->open_device_map, pi->port_id); 3315 PORT_LOCK(pi); 3316 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3317 PORT_UNLOCK(pi); 3318 3319 pi->link_cfg.link_ok = 0; 3320 pi->link_cfg.speed = 0; 3321 pi->linkdnrc = -1; 3322 t4_os_link_changed(sc, pi->port_id, 0, -1); 3323 3324 return (0); 3325 } 3326 3327 /* 3328 * It is ok for this function to fail midway and return right away. t4_detach 3329 * will walk the entire sc->irq list and clean up whatever is valid. 3330 */ 3331 static int 3332 setup_intr_handlers(struct adapter *sc) 3333 { 3334 int rc, rid, p, q; 3335 char s[8]; 3336 struct irq *irq; 3337 struct port_info *pi; 3338 struct sge_rxq *rxq; 3339 #ifdef TCP_OFFLOAD 3340 struct sge_ofld_rxq *ofld_rxq; 3341 #endif 3342 #ifdef DEV_NETMAP 3343 struct sge_nm_rxq *nm_rxq; 3344 #endif 3345 3346 /* 3347 * Setup interrupts. 3348 */ 3349 irq = &sc->irq[0]; 3350 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3351 if (sc->intr_count == 1) 3352 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3353 3354 /* Multiple interrupts. */ 3355 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3356 ("%s: too few intr.", __func__)); 3357 3358 /* The first one is always error intr */ 3359 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3360 if (rc != 0) 3361 return (rc); 3362 irq++; 3363 rid++; 3364 3365 /* The second one is always the firmware event queue */ 3366 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt"); 3367 if (rc != 0) 3368 return (rc); 3369 irq++; 3370 rid++; 3371 3372 for_each_port(sc, p) { 3373 pi = sc->port[p]; 3374 3375 if (pi->flags & INTR_RXQ) { 3376 for_each_rxq(pi, q, rxq) { 3377 snprintf(s, sizeof(s), "%d.%d", p, q); 3378 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq, 3379 s); 3380 if (rc != 0) 3381 return (rc); 3382 irq++; 3383 rid++; 3384 } 3385 } 3386 #ifdef TCP_OFFLOAD 3387 if (pi->flags & INTR_OFLD_RXQ) { 3388 for_each_ofld_rxq(pi, q, ofld_rxq) { 3389 snprintf(s, sizeof(s), "%d,%d", p, q); 3390 rc = t4_alloc_irq(sc, irq, rid, t4_intr, 3391 ofld_rxq, s); 3392 if (rc != 0) 3393 return (rc); 3394 irq++; 3395 rid++; 3396 } 3397 } 3398 #endif 3399 #ifdef DEV_NETMAP 3400 if (pi->flags & INTR_NM_RXQ) { 3401 for_each_nm_rxq(pi, q, nm_rxq) { 3402 snprintf(s, sizeof(s), "%d-%d", p, q); 3403 rc = t4_alloc_irq(sc, irq, rid, t4_nm_intr, 3404 nm_rxq, s); 3405 if (rc != 0) 3406 return (rc); 3407 irq++; 3408 rid++; 3409 } 3410 } 3411 #endif 3412 } 3413 MPASS(irq == &sc->irq[sc->intr_count]); 3414 3415 return (0); 3416 } 3417 3418 int 3419 adapter_full_init(struct adapter *sc) 3420 { 3421 int rc, i; 3422 3423 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3424 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 3425 ("%s: FULL_INIT_DONE already", __func__)); 3426 3427 /* 3428 * queues that belong to the adapter (not any particular port). 3429 */ 3430 rc = t4_setup_adapter_queues(sc); 3431 if (rc != 0) 3432 goto done; 3433 3434 for (i = 0; i < nitems(sc->tq); i++) { 3435 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 3436 taskqueue_thread_enqueue, &sc->tq[i]); 3437 if (sc->tq[i] == NULL) { 3438 device_printf(sc->dev, 3439 "failed to allocate task queue %d\n", i); 3440 rc = ENOMEM; 3441 goto done; 3442 } 3443 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 3444 device_get_nameunit(sc->dev), i); 3445 } 3446 3447 t4_intr_enable(sc); 3448 sc->flags |= FULL_INIT_DONE; 3449 done: 3450 if (rc != 0) 3451 adapter_full_uninit(sc); 3452 3453 return (rc); 3454 } 3455 3456 int 3457 adapter_full_uninit(struct adapter *sc) 3458 { 3459 int i; 3460 3461 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3462 3463 t4_teardown_adapter_queues(sc); 3464 3465 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 3466 taskqueue_free(sc->tq[i]); 3467 sc->tq[i] = NULL; 3468 } 3469 3470 sc->flags &= ~FULL_INIT_DONE; 3471 3472 return (0); 3473 } 3474 3475 int 3476 port_full_init(struct port_info *pi) 3477 { 3478 struct adapter *sc = pi->adapter; 3479 struct ifnet *ifp = pi->ifp; 3480 uint16_t *rss; 3481 struct sge_rxq *rxq; 3482 int rc, i, j; 3483 3484 ASSERT_SYNCHRONIZED_OP(sc); 3485 KASSERT((pi->flags & PORT_INIT_DONE) == 0, 3486 ("%s: PORT_INIT_DONE already", __func__)); 3487 3488 sysctl_ctx_init(&pi->ctx); 3489 pi->flags |= PORT_SYSCTL_CTX; 3490 3491 /* 3492 * Allocate tx/rx/fl queues for this port. 3493 */ 3494 rc = t4_setup_port_queues(pi); 3495 if (rc != 0) 3496 goto done; /* error message displayed already */ 3497 3498 /* 3499 * Setup RSS for this port. Save a copy of the RSS table for later use. 3500 */ 3501 rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 3502 for (i = 0; i < pi->rss_size;) { 3503 for_each_rxq(pi, j, rxq) { 3504 rss[i++] = rxq->iq.abs_id; 3505 if (i == pi->rss_size) 3506 break; 3507 } 3508 } 3509 3510 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss, 3511 pi->rss_size); 3512 if (rc != 0) { 3513 if_printf(ifp, "rss_config failed: %d\n", rc); 3514 goto done; 3515 } 3516 3517 pi->rss = rss; 3518 pi->flags |= PORT_INIT_DONE; 3519 done: 3520 if (rc != 0) 3521 port_full_uninit(pi); 3522 3523 return (rc); 3524 } 3525 3526 /* 3527 * Idempotent. 3528 */ 3529 int 3530 port_full_uninit(struct port_info *pi) 3531 { 3532 struct adapter *sc = pi->adapter; 3533 int i; 3534 struct sge_rxq *rxq; 3535 struct sge_txq *txq; 3536 #ifdef TCP_OFFLOAD 3537 struct sge_ofld_rxq *ofld_rxq; 3538 struct sge_wrq *ofld_txq; 3539 #endif 3540 3541 if (pi->flags & PORT_INIT_DONE) { 3542 3543 /* Need to quiesce queues. */ 3544 3545 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 3546 3547 for_each_txq(pi, i, txq) { 3548 quiesce_txq(sc, txq); 3549 } 3550 3551 #ifdef TCP_OFFLOAD 3552 for_each_ofld_txq(pi, i, ofld_txq) { 3553 quiesce_wrq(sc, ofld_txq); 3554 } 3555 #endif 3556 3557 for_each_rxq(pi, i, rxq) { 3558 quiesce_iq(sc, &rxq->iq); 3559 quiesce_fl(sc, &rxq->fl); 3560 } 3561 3562 #ifdef TCP_OFFLOAD 3563 for_each_ofld_rxq(pi, i, ofld_rxq) { 3564 quiesce_iq(sc, &ofld_rxq->iq); 3565 quiesce_fl(sc, &ofld_rxq->fl); 3566 } 3567 #endif 3568 free(pi->rss, M_CXGBE); 3569 } 3570 3571 t4_teardown_port_queues(pi); 3572 pi->flags &= ~PORT_INIT_DONE; 3573 3574 return (0); 3575 } 3576 3577 static void 3578 quiesce_txq(struct adapter *sc, struct sge_txq *txq) 3579 { 3580 struct sge_eq *eq = &txq->eq; 3581 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 3582 3583 (void) sc; /* unused */ 3584 3585 #ifdef INVARIANTS 3586 TXQ_LOCK(txq); 3587 MPASS((eq->flags & EQ_ENABLED) == 0); 3588 TXQ_UNLOCK(txq); 3589 #endif 3590 3591 /* Wait for the mp_ring to empty. */ 3592 while (!mp_ring_is_idle(txq->r)) { 3593 mp_ring_check_drainage(txq->r, 0); 3594 pause("rquiesce", 1); 3595 } 3596 3597 /* Then wait for the hardware to finish. */ 3598 while (spg->cidx != htobe16(eq->pidx)) 3599 pause("equiesce", 1); 3600 3601 /* Finally, wait for the driver to reclaim all descriptors. */ 3602 while (eq->cidx != eq->pidx) 3603 pause("dquiesce", 1); 3604 } 3605 3606 static void 3607 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 3608 { 3609 3610 /* XXXTX */ 3611 } 3612 3613 static void 3614 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 3615 { 3616 (void) sc; /* unused */ 3617 3618 /* Synchronize with the interrupt handler */ 3619 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 3620 pause("iqfree", 1); 3621 } 3622 3623 static void 3624 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 3625 { 3626 mtx_lock(&sc->sfl_lock); 3627 FL_LOCK(fl); 3628 fl->flags |= FL_DOOMED; 3629 FL_UNLOCK(fl); 3630 mtx_unlock(&sc->sfl_lock); 3631 3632 callout_drain(&sc->sfl_callout); 3633 KASSERT((fl->flags & FL_STARVING) == 0, 3634 ("%s: still starving", __func__)); 3635 } 3636 3637 static int 3638 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 3639 driver_intr_t *handler, void *arg, char *name) 3640 { 3641 int rc; 3642 3643 irq->rid = rid; 3644 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 3645 RF_SHAREABLE | RF_ACTIVE); 3646 if (irq->res == NULL) { 3647 device_printf(sc->dev, 3648 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 3649 return (ENOMEM); 3650 } 3651 3652 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 3653 NULL, handler, arg, &irq->tag); 3654 if (rc != 0) { 3655 device_printf(sc->dev, 3656 "failed to setup interrupt for rid %d, name %s: %d\n", 3657 rid, name, rc); 3658 } else if (name) 3659 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 3660 3661 return (rc); 3662 } 3663 3664 static int 3665 t4_free_irq(struct adapter *sc, struct irq *irq) 3666 { 3667 if (irq->tag) 3668 bus_teardown_intr(sc->dev, irq->res, irq->tag); 3669 if (irq->res) 3670 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 3671 3672 bzero(irq, sizeof(*irq)); 3673 3674 return (0); 3675 } 3676 3677 static void 3678 reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start, 3679 unsigned int end) 3680 { 3681 uint32_t *p = (uint32_t *)(buf + start); 3682 3683 for ( ; start <= end; start += sizeof(uint32_t)) 3684 *p++ = t4_read_reg(sc, start); 3685 } 3686 3687 static void 3688 t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 3689 { 3690 int i, n; 3691 const unsigned int *reg_ranges; 3692 static const unsigned int t4_reg_ranges[] = { 3693 0x1008, 0x1108, 3694 0x1180, 0x11b4, 3695 0x11fc, 0x123c, 3696 0x1300, 0x173c, 3697 0x1800, 0x18fc, 3698 0x3000, 0x30d8, 3699 0x30e0, 0x5924, 3700 0x5960, 0x59d4, 3701 0x5a00, 0x5af8, 3702 0x6000, 0x6098, 3703 0x6100, 0x6150, 3704 0x6200, 0x6208, 3705 0x6240, 0x6248, 3706 0x6280, 0x6338, 3707 0x6370, 0x638c, 3708 0x6400, 0x643c, 3709 0x6500, 0x6524, 3710 0x6a00, 0x6a38, 3711 0x6a60, 0x6a78, 3712 0x6b00, 0x6b84, 3713 0x6bf0, 0x6c84, 3714 0x6cf0, 0x6d84, 3715 0x6df0, 0x6e84, 3716 0x6ef0, 0x6f84, 3717 0x6ff0, 0x7084, 3718 0x70f0, 0x7184, 3719 0x71f0, 0x7284, 3720 0x72f0, 0x7384, 3721 0x73f0, 0x7450, 3722 0x7500, 0x7530, 3723 0x7600, 0x761c, 3724 0x7680, 0x76cc, 3725 0x7700, 0x7798, 3726 0x77c0, 0x77fc, 3727 0x7900, 0x79fc, 3728 0x7b00, 0x7c38, 3729 0x7d00, 0x7efc, 3730 0x8dc0, 0x8e1c, 3731 0x8e30, 0x8e78, 3732 0x8ea0, 0x8f6c, 3733 0x8fc0, 0x9074, 3734 0x90fc, 0x90fc, 3735 0x9400, 0x9458, 3736 0x9600, 0x96bc, 3737 0x9800, 0x9808, 3738 0x9820, 0x983c, 3739 0x9850, 0x9864, 3740 0x9c00, 0x9c6c, 3741 0x9c80, 0x9cec, 3742 0x9d00, 0x9d6c, 3743 0x9d80, 0x9dec, 3744 0x9e00, 0x9e6c, 3745 0x9e80, 0x9eec, 3746 0x9f00, 0x9f6c, 3747 0x9f80, 0x9fec, 3748 0xd004, 0xd03c, 3749 0xdfc0, 0xdfe0, 3750 0xe000, 0xea7c, 3751 0xf000, 0x11110, 3752 0x11118, 0x11190, 3753 0x19040, 0x1906c, 3754 0x19078, 0x19080, 3755 0x1908c, 0x19124, 3756 0x19150, 0x191b0, 3757 0x191d0, 0x191e8, 3758 0x19238, 0x1924c, 3759 0x193f8, 0x19474, 3760 0x19490, 0x194f8, 3761 0x19800, 0x19f30, 3762 0x1a000, 0x1a06c, 3763 0x1a0b0, 0x1a120, 3764 0x1a128, 0x1a138, 3765 0x1a190, 0x1a1c4, 3766 0x1a1fc, 0x1a1fc, 3767 0x1e040, 0x1e04c, 3768 0x1e284, 0x1e28c, 3769 0x1e2c0, 0x1e2c0, 3770 0x1e2e0, 0x1e2e0, 3771 0x1e300, 0x1e384, 3772 0x1e3c0, 0x1e3c8, 3773 0x1e440, 0x1e44c, 3774 0x1e684, 0x1e68c, 3775 0x1e6c0, 0x1e6c0, 3776 0x1e6e0, 0x1e6e0, 3777 0x1e700, 0x1e784, 3778 0x1e7c0, 0x1e7c8, 3779 0x1e840, 0x1e84c, 3780 0x1ea84, 0x1ea8c, 3781 0x1eac0, 0x1eac0, 3782 0x1eae0, 0x1eae0, 3783 0x1eb00, 0x1eb84, 3784 0x1ebc0, 0x1ebc8, 3785 0x1ec40, 0x1ec4c, 3786 0x1ee84, 0x1ee8c, 3787 0x1eec0, 0x1eec0, 3788 0x1eee0, 0x1eee0, 3789 0x1ef00, 0x1ef84, 3790 0x1efc0, 0x1efc8, 3791 0x1f040, 0x1f04c, 3792 0x1f284, 0x1f28c, 3793 0x1f2c0, 0x1f2c0, 3794 0x1f2e0, 0x1f2e0, 3795 0x1f300, 0x1f384, 3796 0x1f3c0, 0x1f3c8, 3797 0x1f440, 0x1f44c, 3798 0x1f684, 0x1f68c, 3799 0x1f6c0, 0x1f6c0, 3800 0x1f6e0, 0x1f6e0, 3801 0x1f700, 0x1f784, 3802 0x1f7c0, 0x1f7c8, 3803 0x1f840, 0x1f84c, 3804 0x1fa84, 0x1fa8c, 3805 0x1fac0, 0x1fac0, 3806 0x1fae0, 0x1fae0, 3807 0x1fb00, 0x1fb84, 3808 0x1fbc0, 0x1fbc8, 3809 0x1fc40, 0x1fc4c, 3810 0x1fe84, 0x1fe8c, 3811 0x1fec0, 0x1fec0, 3812 0x1fee0, 0x1fee0, 3813 0x1ff00, 0x1ff84, 3814 0x1ffc0, 0x1ffc8, 3815 0x20000, 0x2002c, 3816 0x20100, 0x2013c, 3817 0x20190, 0x201c8, 3818 0x20200, 0x20318, 3819 0x20400, 0x20528, 3820 0x20540, 0x20614, 3821 0x21000, 0x21040, 3822 0x2104c, 0x21060, 3823 0x210c0, 0x210ec, 3824 0x21200, 0x21268, 3825 0x21270, 0x21284, 3826 0x212fc, 0x21388, 3827 0x21400, 0x21404, 3828 0x21500, 0x21518, 3829 0x2152c, 0x2153c, 3830 0x21550, 0x21554, 3831 0x21600, 0x21600, 3832 0x21608, 0x21628, 3833 0x21630, 0x2163c, 3834 0x21700, 0x2171c, 3835 0x21780, 0x2178c, 3836 0x21800, 0x21c38, 3837 0x21c80, 0x21d7c, 3838 0x21e00, 0x21e04, 3839 0x22000, 0x2202c, 3840 0x22100, 0x2213c, 3841 0x22190, 0x221c8, 3842 0x22200, 0x22318, 3843 0x22400, 0x22528, 3844 0x22540, 0x22614, 3845 0x23000, 0x23040, 3846 0x2304c, 0x23060, 3847 0x230c0, 0x230ec, 3848 0x23200, 0x23268, 3849 0x23270, 0x23284, 3850 0x232fc, 0x23388, 3851 0x23400, 0x23404, 3852 0x23500, 0x23518, 3853 0x2352c, 0x2353c, 3854 0x23550, 0x23554, 3855 0x23600, 0x23600, 3856 0x23608, 0x23628, 3857 0x23630, 0x2363c, 3858 0x23700, 0x2371c, 3859 0x23780, 0x2378c, 3860 0x23800, 0x23c38, 3861 0x23c80, 0x23d7c, 3862 0x23e00, 0x23e04, 3863 0x24000, 0x2402c, 3864 0x24100, 0x2413c, 3865 0x24190, 0x241c8, 3866 0x24200, 0x24318, 3867 0x24400, 0x24528, 3868 0x24540, 0x24614, 3869 0x25000, 0x25040, 3870 0x2504c, 0x25060, 3871 0x250c0, 0x250ec, 3872 0x25200, 0x25268, 3873 0x25270, 0x25284, 3874 0x252fc, 0x25388, 3875 0x25400, 0x25404, 3876 0x25500, 0x25518, 3877 0x2552c, 0x2553c, 3878 0x25550, 0x25554, 3879 0x25600, 0x25600, 3880 0x25608, 0x25628, 3881 0x25630, 0x2563c, 3882 0x25700, 0x2571c, 3883 0x25780, 0x2578c, 3884 0x25800, 0x25c38, 3885 0x25c80, 0x25d7c, 3886 0x25e00, 0x25e04, 3887 0x26000, 0x2602c, 3888 0x26100, 0x2613c, 3889 0x26190, 0x261c8, 3890 0x26200, 0x26318, 3891 0x26400, 0x26528, 3892 0x26540, 0x26614, 3893 0x27000, 0x27040, 3894 0x2704c, 0x27060, 3895 0x270c0, 0x270ec, 3896 0x27200, 0x27268, 3897 0x27270, 0x27284, 3898 0x272fc, 0x27388, 3899 0x27400, 0x27404, 3900 0x27500, 0x27518, 3901 0x2752c, 0x2753c, 3902 0x27550, 0x27554, 3903 0x27600, 0x27600, 3904 0x27608, 0x27628, 3905 0x27630, 0x2763c, 3906 0x27700, 0x2771c, 3907 0x27780, 0x2778c, 3908 0x27800, 0x27c38, 3909 0x27c80, 0x27d7c, 3910 0x27e00, 0x27e04 3911 }; 3912 static const unsigned int t5_reg_ranges[] = { 3913 0x1008, 0x1148, 3914 0x1180, 0x11b4, 3915 0x11fc, 0x123c, 3916 0x1280, 0x173c, 3917 0x1800, 0x18fc, 3918 0x3000, 0x3028, 3919 0x3060, 0x30d8, 3920 0x30e0, 0x30fc, 3921 0x3140, 0x357c, 3922 0x35a8, 0x35cc, 3923 0x35ec, 0x35ec, 3924 0x3600, 0x5624, 3925 0x56cc, 0x575c, 3926 0x580c, 0x5814, 3927 0x5890, 0x58bc, 3928 0x5940, 0x59dc, 3929 0x59fc, 0x5a18, 3930 0x5a60, 0x5a9c, 3931 0x5b94, 0x5bfc, 3932 0x6000, 0x6040, 3933 0x6058, 0x614c, 3934 0x7700, 0x7798, 3935 0x77c0, 0x78fc, 3936 0x7b00, 0x7c54, 3937 0x7d00, 0x7efc, 3938 0x8dc0, 0x8de0, 3939 0x8df8, 0x8e84, 3940 0x8ea0, 0x8f84, 3941 0x8fc0, 0x90f8, 3942 0x9400, 0x9470, 3943 0x9600, 0x96f4, 3944 0x9800, 0x9808, 3945 0x9820, 0x983c, 3946 0x9850, 0x9864, 3947 0x9c00, 0x9c6c, 3948 0x9c80, 0x9cec, 3949 0x9d00, 0x9d6c, 3950 0x9d80, 0x9dec, 3951 0x9e00, 0x9e6c, 3952 0x9e80, 0x9eec, 3953 0x9f00, 0x9f6c, 3954 0x9f80, 0xa020, 3955 0xd004, 0xd03c, 3956 0xdfc0, 0xdfe0, 3957 0xe000, 0x11088, 3958 0x1109c, 0x11110, 3959 0x11118, 0x1117c, 3960 0x11190, 0x11204, 3961 0x19040, 0x1906c, 3962 0x19078, 0x19080, 3963 0x1908c, 0x19124, 3964 0x19150, 0x191b0, 3965 0x191d0, 0x191e8, 3966 0x19238, 0x19290, 3967 0x193f8, 0x19474, 3968 0x19490, 0x194cc, 3969 0x194f0, 0x194f8, 3970 0x19c00, 0x19c60, 3971 0x19c94, 0x19e10, 3972 0x19e50, 0x19f34, 3973 0x19f40, 0x19f50, 3974 0x19f90, 0x19fe4, 3975 0x1a000, 0x1a06c, 3976 0x1a0b0, 0x1a120, 3977 0x1a128, 0x1a138, 3978 0x1a190, 0x1a1c4, 3979 0x1a1fc, 0x1a1fc, 3980 0x1e008, 0x1e00c, 3981 0x1e040, 0x1e04c, 3982 0x1e284, 0x1e290, 3983 0x1e2c0, 0x1e2c0, 3984 0x1e2e0, 0x1e2e0, 3985 0x1e300, 0x1e384, 3986 0x1e3c0, 0x1e3c8, 3987 0x1e408, 0x1e40c, 3988 0x1e440, 0x1e44c, 3989 0x1e684, 0x1e690, 3990 0x1e6c0, 0x1e6c0, 3991 0x1e6e0, 0x1e6e0, 3992 0x1e700, 0x1e784, 3993 0x1e7c0, 0x1e7c8, 3994 0x1e808, 0x1e80c, 3995 0x1e840, 0x1e84c, 3996 0x1ea84, 0x1ea90, 3997 0x1eac0, 0x1eac0, 3998 0x1eae0, 0x1eae0, 3999 0x1eb00, 0x1eb84, 4000 0x1ebc0, 0x1ebc8, 4001 0x1ec08, 0x1ec0c, 4002 0x1ec40, 0x1ec4c, 4003 0x1ee84, 0x1ee90, 4004 0x1eec0, 0x1eec0, 4005 0x1eee0, 0x1eee0, 4006 0x1ef00, 0x1ef84, 4007 0x1efc0, 0x1efc8, 4008 0x1f008, 0x1f00c, 4009 0x1f040, 0x1f04c, 4010 0x1f284, 0x1f290, 4011 0x1f2c0, 0x1f2c0, 4012 0x1f2e0, 0x1f2e0, 4013 0x1f300, 0x1f384, 4014 0x1f3c0, 0x1f3c8, 4015 0x1f408, 0x1f40c, 4016 0x1f440, 0x1f44c, 4017 0x1f684, 0x1f690, 4018 0x1f6c0, 0x1f6c0, 4019 0x1f6e0, 0x1f6e0, 4020 0x1f700, 0x1f784, 4021 0x1f7c0, 0x1f7c8, 4022 0x1f808, 0x1f80c, 4023 0x1f840, 0x1f84c, 4024 0x1fa84, 0x1fa90, 4025 0x1fac0, 0x1fac0, 4026 0x1fae0, 0x1fae0, 4027 0x1fb00, 0x1fb84, 4028 0x1fbc0, 0x1fbc8, 4029 0x1fc08, 0x1fc0c, 4030 0x1fc40, 0x1fc4c, 4031 0x1fe84, 0x1fe90, 4032 0x1fec0, 0x1fec0, 4033 0x1fee0, 0x1fee0, 4034 0x1ff00, 0x1ff84, 4035 0x1ffc0, 0x1ffc8, 4036 0x30000, 0x30030, 4037 0x30100, 0x30144, 4038 0x30190, 0x301d0, 4039 0x30200, 0x30318, 4040 0x30400, 0x3052c, 4041 0x30540, 0x3061c, 4042 0x30800, 0x30834, 4043 0x308c0, 0x30908, 4044 0x30910, 0x309ac, 4045 0x30a00, 0x30a2c, 4046 0x30a44, 0x30a50, 4047 0x30a74, 0x30c24, 4048 0x30d00, 0x30d00, 4049 0x30d08, 0x30d14, 4050 0x30d1c, 0x30d20, 4051 0x30d3c, 0x30d50, 4052 0x31200, 0x3120c, 4053 0x31220, 0x31220, 4054 0x31240, 0x31240, 4055 0x31600, 0x3160c, 4056 0x31a00, 0x31a1c, 4057 0x31e00, 0x31e20, 4058 0x31e38, 0x31e3c, 4059 0x31e80, 0x31e80, 4060 0x31e88, 0x31ea8, 4061 0x31eb0, 0x31eb4, 4062 0x31ec8, 0x31ed4, 4063 0x31fb8, 0x32004, 4064 0x32200, 0x32200, 4065 0x32208, 0x32240, 4066 0x32248, 0x32280, 4067 0x32288, 0x322c0, 4068 0x322c8, 0x322fc, 4069 0x32600, 0x32630, 4070 0x32a00, 0x32abc, 4071 0x32b00, 0x32b70, 4072 0x33000, 0x33048, 4073 0x33060, 0x3309c, 4074 0x330f0, 0x33148, 4075 0x33160, 0x3319c, 4076 0x331f0, 0x332e4, 4077 0x332f8, 0x333e4, 4078 0x333f8, 0x33448, 4079 0x33460, 0x3349c, 4080 0x334f0, 0x33548, 4081 0x33560, 0x3359c, 4082 0x335f0, 0x336e4, 4083 0x336f8, 0x337e4, 4084 0x337f8, 0x337fc, 4085 0x33814, 0x33814, 4086 0x3382c, 0x3382c, 4087 0x33880, 0x3388c, 4088 0x338e8, 0x338ec, 4089 0x33900, 0x33948, 4090 0x33960, 0x3399c, 4091 0x339f0, 0x33ae4, 4092 0x33af8, 0x33b10, 4093 0x33b28, 0x33b28, 4094 0x33b3c, 0x33b50, 4095 0x33bf0, 0x33c10, 4096 0x33c28, 0x33c28, 4097 0x33c3c, 0x33c50, 4098 0x33cf0, 0x33cfc, 4099 0x34000, 0x34030, 4100 0x34100, 0x34144, 4101 0x34190, 0x341d0, 4102 0x34200, 0x34318, 4103 0x34400, 0x3452c, 4104 0x34540, 0x3461c, 4105 0x34800, 0x34834, 4106 0x348c0, 0x34908, 4107 0x34910, 0x349ac, 4108 0x34a00, 0x34a2c, 4109 0x34a44, 0x34a50, 4110 0x34a74, 0x34c24, 4111 0x34d00, 0x34d00, 4112 0x34d08, 0x34d14, 4113 0x34d1c, 0x34d20, 4114 0x34d3c, 0x34d50, 4115 0x35200, 0x3520c, 4116 0x35220, 0x35220, 4117 0x35240, 0x35240, 4118 0x35600, 0x3560c, 4119 0x35a00, 0x35a1c, 4120 0x35e00, 0x35e20, 4121 0x35e38, 0x35e3c, 4122 0x35e80, 0x35e80, 4123 0x35e88, 0x35ea8, 4124 0x35eb0, 0x35eb4, 4125 0x35ec8, 0x35ed4, 4126 0x35fb8, 0x36004, 4127 0x36200, 0x36200, 4128 0x36208, 0x36240, 4129 0x36248, 0x36280, 4130 0x36288, 0x362c0, 4131 0x362c8, 0x362fc, 4132 0x36600, 0x36630, 4133 0x36a00, 0x36abc, 4134 0x36b00, 0x36b70, 4135 0x37000, 0x37048, 4136 0x37060, 0x3709c, 4137 0x370f0, 0x37148, 4138 0x37160, 0x3719c, 4139 0x371f0, 0x372e4, 4140 0x372f8, 0x373e4, 4141 0x373f8, 0x37448, 4142 0x37460, 0x3749c, 4143 0x374f0, 0x37548, 4144 0x37560, 0x3759c, 4145 0x375f0, 0x376e4, 4146 0x376f8, 0x377e4, 4147 0x377f8, 0x377fc, 4148 0x37814, 0x37814, 4149 0x3782c, 0x3782c, 4150 0x37880, 0x3788c, 4151 0x378e8, 0x378ec, 4152 0x37900, 0x37948, 4153 0x37960, 0x3799c, 4154 0x379f0, 0x37ae4, 4155 0x37af8, 0x37b10, 4156 0x37b28, 0x37b28, 4157 0x37b3c, 0x37b50, 4158 0x37bf0, 0x37c10, 4159 0x37c28, 0x37c28, 4160 0x37c3c, 0x37c50, 4161 0x37cf0, 0x37cfc, 4162 0x38000, 0x38030, 4163 0x38100, 0x38144, 4164 0x38190, 0x381d0, 4165 0x38200, 0x38318, 4166 0x38400, 0x3852c, 4167 0x38540, 0x3861c, 4168 0x38800, 0x38834, 4169 0x388c0, 0x38908, 4170 0x38910, 0x389ac, 4171 0x38a00, 0x38a2c, 4172 0x38a44, 0x38a50, 4173 0x38a74, 0x38c24, 4174 0x38d00, 0x38d00, 4175 0x38d08, 0x38d14, 4176 0x38d1c, 0x38d20, 4177 0x38d3c, 0x38d50, 4178 0x39200, 0x3920c, 4179 0x39220, 0x39220, 4180 0x39240, 0x39240, 4181 0x39600, 0x3960c, 4182 0x39a00, 0x39a1c, 4183 0x39e00, 0x39e20, 4184 0x39e38, 0x39e3c, 4185 0x39e80, 0x39e80, 4186 0x39e88, 0x39ea8, 4187 0x39eb0, 0x39eb4, 4188 0x39ec8, 0x39ed4, 4189 0x39fb8, 0x3a004, 4190 0x3a200, 0x3a200, 4191 0x3a208, 0x3a240, 4192 0x3a248, 0x3a280, 4193 0x3a288, 0x3a2c0, 4194 0x3a2c8, 0x3a2fc, 4195 0x3a600, 0x3a630, 4196 0x3aa00, 0x3aabc, 4197 0x3ab00, 0x3ab70, 4198 0x3b000, 0x3b048, 4199 0x3b060, 0x3b09c, 4200 0x3b0f0, 0x3b148, 4201 0x3b160, 0x3b19c, 4202 0x3b1f0, 0x3b2e4, 4203 0x3b2f8, 0x3b3e4, 4204 0x3b3f8, 0x3b448, 4205 0x3b460, 0x3b49c, 4206 0x3b4f0, 0x3b548, 4207 0x3b560, 0x3b59c, 4208 0x3b5f0, 0x3b6e4, 4209 0x3b6f8, 0x3b7e4, 4210 0x3b7f8, 0x3b7fc, 4211 0x3b814, 0x3b814, 4212 0x3b82c, 0x3b82c, 4213 0x3b880, 0x3b88c, 4214 0x3b8e8, 0x3b8ec, 4215 0x3b900, 0x3b948, 4216 0x3b960, 0x3b99c, 4217 0x3b9f0, 0x3bae4, 4218 0x3baf8, 0x3bb10, 4219 0x3bb28, 0x3bb28, 4220 0x3bb3c, 0x3bb50, 4221 0x3bbf0, 0x3bc10, 4222 0x3bc28, 0x3bc28, 4223 0x3bc3c, 0x3bc50, 4224 0x3bcf0, 0x3bcfc, 4225 0x3c000, 0x3c030, 4226 0x3c100, 0x3c144, 4227 0x3c190, 0x3c1d0, 4228 0x3c200, 0x3c318, 4229 0x3c400, 0x3c52c, 4230 0x3c540, 0x3c61c, 4231 0x3c800, 0x3c834, 4232 0x3c8c0, 0x3c908, 4233 0x3c910, 0x3c9ac, 4234 0x3ca00, 0x3ca2c, 4235 0x3ca44, 0x3ca50, 4236 0x3ca74, 0x3cc24, 4237 0x3cd00, 0x3cd00, 4238 0x3cd08, 0x3cd14, 4239 0x3cd1c, 0x3cd20, 4240 0x3cd3c, 0x3cd50, 4241 0x3d200, 0x3d20c, 4242 0x3d220, 0x3d220, 4243 0x3d240, 0x3d240, 4244 0x3d600, 0x3d60c, 4245 0x3da00, 0x3da1c, 4246 0x3de00, 0x3de20, 4247 0x3de38, 0x3de3c, 4248 0x3de80, 0x3de80, 4249 0x3de88, 0x3dea8, 4250 0x3deb0, 0x3deb4, 4251 0x3dec8, 0x3ded4, 4252 0x3dfb8, 0x3e004, 4253 0x3e200, 0x3e200, 4254 0x3e208, 0x3e240, 4255 0x3e248, 0x3e280, 4256 0x3e288, 0x3e2c0, 4257 0x3e2c8, 0x3e2fc, 4258 0x3e600, 0x3e630, 4259 0x3ea00, 0x3eabc, 4260 0x3eb00, 0x3eb70, 4261 0x3f000, 0x3f048, 4262 0x3f060, 0x3f09c, 4263 0x3f0f0, 0x3f148, 4264 0x3f160, 0x3f19c, 4265 0x3f1f0, 0x3f2e4, 4266 0x3f2f8, 0x3f3e4, 4267 0x3f3f8, 0x3f448, 4268 0x3f460, 0x3f49c, 4269 0x3f4f0, 0x3f548, 4270 0x3f560, 0x3f59c, 4271 0x3f5f0, 0x3f6e4, 4272 0x3f6f8, 0x3f7e4, 4273 0x3f7f8, 0x3f7fc, 4274 0x3f814, 0x3f814, 4275 0x3f82c, 0x3f82c, 4276 0x3f880, 0x3f88c, 4277 0x3f8e8, 0x3f8ec, 4278 0x3f900, 0x3f948, 4279 0x3f960, 0x3f99c, 4280 0x3f9f0, 0x3fae4, 4281 0x3faf8, 0x3fb10, 4282 0x3fb28, 0x3fb28, 4283 0x3fb3c, 0x3fb50, 4284 0x3fbf0, 0x3fc10, 4285 0x3fc28, 0x3fc28, 4286 0x3fc3c, 0x3fc50, 4287 0x3fcf0, 0x3fcfc, 4288 0x40000, 0x4000c, 4289 0x40040, 0x40068, 4290 0x4007c, 0x40144, 4291 0x40180, 0x4018c, 4292 0x40200, 0x40298, 4293 0x402ac, 0x4033c, 4294 0x403f8, 0x403fc, 4295 0x41304, 0x413c4, 4296 0x41400, 0x4141c, 4297 0x41480, 0x414d0, 4298 0x44000, 0x44078, 4299 0x440c0, 0x44278, 4300 0x442c0, 0x44478, 4301 0x444c0, 0x44678, 4302 0x446c0, 0x44878, 4303 0x448c0, 0x449fc, 4304 0x45000, 0x45068, 4305 0x45080, 0x45084, 4306 0x450a0, 0x450b0, 4307 0x45200, 0x45268, 4308 0x45280, 0x45284, 4309 0x452a0, 0x452b0, 4310 0x460c0, 0x460e4, 4311 0x47000, 0x4708c, 4312 0x47200, 0x47250, 4313 0x47400, 0x47420, 4314 0x47600, 0x47618, 4315 0x47800, 0x47814, 4316 0x48000, 0x4800c, 4317 0x48040, 0x48068, 4318 0x4807c, 0x48144, 4319 0x48180, 0x4818c, 4320 0x48200, 0x48298, 4321 0x482ac, 0x4833c, 4322 0x483f8, 0x483fc, 4323 0x49304, 0x493c4, 4324 0x49400, 0x4941c, 4325 0x49480, 0x494d0, 4326 0x4c000, 0x4c078, 4327 0x4c0c0, 0x4c278, 4328 0x4c2c0, 0x4c478, 4329 0x4c4c0, 0x4c678, 4330 0x4c6c0, 0x4c878, 4331 0x4c8c0, 0x4c9fc, 4332 0x4d000, 0x4d068, 4333 0x4d080, 0x4d084, 4334 0x4d0a0, 0x4d0b0, 4335 0x4d200, 0x4d268, 4336 0x4d280, 0x4d284, 4337 0x4d2a0, 0x4d2b0, 4338 0x4e0c0, 0x4e0e4, 4339 0x4f000, 0x4f08c, 4340 0x4f200, 0x4f250, 4341 0x4f400, 0x4f420, 4342 0x4f600, 0x4f618, 4343 0x4f800, 0x4f814, 4344 0x50000, 0x500cc, 4345 0x50400, 0x50400, 4346 0x50800, 0x508cc, 4347 0x50c00, 0x50c00, 4348 0x51000, 0x5101c, 4349 0x51300, 0x51308, 4350 }; 4351 4352 if (is_t4(sc)) { 4353 reg_ranges = &t4_reg_ranges[0]; 4354 n = nitems(t4_reg_ranges); 4355 } else { 4356 reg_ranges = &t5_reg_ranges[0]; 4357 n = nitems(t5_reg_ranges); 4358 } 4359 4360 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4361 for (i = 0; i < n; i += 2) 4362 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]); 4363 } 4364 4365 static void 4366 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4367 { 4368 int i; 4369 u_int v, tnl_cong_drops; 4370 struct timeval tv; 4371 const struct timeval interval = {0, 250000}; /* 250ms */ 4372 4373 getmicrotime(&tv); 4374 timevalsub(&tv, &interval); 4375 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4376 return; 4377 4378 tnl_cong_drops = 0; 4379 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4380 for (i = 0; i < NCHAN; i++) { 4381 if (pi->rx_chan_map & (1 << i)) { 4382 mtx_lock(&sc->regwin_lock); 4383 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4384 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4385 mtx_unlock(&sc->regwin_lock); 4386 tnl_cong_drops += v; 4387 } 4388 } 4389 pi->tnl_cong_drops = tnl_cong_drops; 4390 getmicrotime(&pi->last_refreshed); 4391 } 4392 4393 static void 4394 cxgbe_tick(void *arg) 4395 { 4396 struct port_info *pi = arg; 4397 struct adapter *sc = pi->adapter; 4398 struct ifnet *ifp = pi->ifp; 4399 4400 PORT_LOCK(pi); 4401 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4402 PORT_UNLOCK(pi); 4403 return; /* without scheduling another callout */ 4404 } 4405 4406 cxgbe_refresh_stats(sc, pi); 4407 4408 callout_schedule(&pi->tick, hz); 4409 PORT_UNLOCK(pi); 4410 } 4411 4412 static void 4413 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4414 { 4415 struct ifnet *vlan; 4416 4417 if (arg != ifp || ifp->if_type != IFT_ETHER) 4418 return; 4419 4420 vlan = VLAN_DEVAT(ifp, vid); 4421 VLAN_SETCOOKIE(vlan, ifp); 4422 } 4423 4424 static int 4425 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4426 { 4427 4428 #ifdef INVARIANTS 4429 panic("%s: opcode 0x%02x on iq %p with payload %p", 4430 __func__, rss->opcode, iq, m); 4431 #else 4432 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", 4433 __func__, rss->opcode, iq, m); 4434 m_freem(m); 4435 #endif 4436 return (EDOOFUS); 4437 } 4438 4439 int 4440 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 4441 { 4442 uintptr_t *loc, new; 4443 4444 if (opcode >= nitems(sc->cpl_handler)) 4445 return (EINVAL); 4446 4447 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 4448 loc = (uintptr_t *) &sc->cpl_handler[opcode]; 4449 atomic_store_rel_ptr(loc, new); 4450 4451 return (0); 4452 } 4453 4454 static int 4455 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) 4456 { 4457 4458 #ifdef INVARIANTS 4459 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); 4460 #else 4461 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", 4462 __func__, iq, ctrl); 4463 #endif 4464 return (EDOOFUS); 4465 } 4466 4467 int 4468 t4_register_an_handler(struct adapter *sc, an_handler_t h) 4469 { 4470 uintptr_t *loc, new; 4471 4472 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; 4473 loc = (uintptr_t *) &sc->an_handler; 4474 atomic_store_rel_ptr(loc, new); 4475 4476 return (0); 4477 } 4478 4479 static int 4480 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) 4481 { 4482 const struct cpl_fw6_msg *cpl = 4483 __containerof(rpl, struct cpl_fw6_msg, data[0]); 4484 4485 #ifdef INVARIANTS 4486 panic("%s: fw_msg type %d", __func__, cpl->type); 4487 #else 4488 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); 4489 #endif 4490 return (EDOOFUS); 4491 } 4492 4493 int 4494 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 4495 { 4496 uintptr_t *loc, new; 4497 4498 if (type >= nitems(sc->fw_msg_handler)) 4499 return (EINVAL); 4500 4501 /* 4502 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 4503 * handler dispatch table. Reject any attempt to install a handler for 4504 * this subtype. 4505 */ 4506 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) 4507 return (EINVAL); 4508 4509 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; 4510 loc = (uintptr_t *) &sc->fw_msg_handler[type]; 4511 atomic_store_rel_ptr(loc, new); 4512 4513 return (0); 4514 } 4515 4516 static int 4517 t4_sysctls(struct adapter *sc) 4518 { 4519 struct sysctl_ctx_list *ctx; 4520 struct sysctl_oid *oid; 4521 struct sysctl_oid_list *children, *c0; 4522 static char *caps[] = { 4523 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */ 4524 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL" /* caps[1] niccaps */ 4525 "\6HASHFILTER\7ETHOFLD", 4526 "\20\1TOE", /* caps[2] toecaps */ 4527 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */ 4528 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */ 4529 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD" 4530 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD", 4531 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */ 4532 "\4PO_INITIAOR\5PO_TARGET" 4533 }; 4534 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4535 4536 ctx = device_get_sysctl_ctx(sc->dev); 4537 4538 /* 4539 * dev.t4nex.X. 4540 */ 4541 oid = device_get_sysctl_tree(sc->dev); 4542 c0 = children = SYSCTL_CHILDREN(oid); 4543 4544 sc->sc_do_rxcopy = 1; 4545 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4546 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4547 4548 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4549 sc->params.nports, "# of ports"); 4550 4551 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4552 NULL, chip_rev(sc), "chip hardware revision"); 4553 4554 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4555 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4556 4557 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4558 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4559 4560 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4561 sc->cfcsum, "config file checksum"); 4562 4563 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4564 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4565 sysctl_bitfield, "A", "available doorbells"); 4566 4567 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps", 4568 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps, 4569 sysctl_bitfield, "A", "available link capabilities"); 4570 4571 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps", 4572 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps, 4573 sysctl_bitfield, "A", "available NIC capabilities"); 4574 4575 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps", 4576 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps, 4577 sysctl_bitfield, "A", "available TCP offload capabilities"); 4578 4579 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps", 4580 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps, 4581 sysctl_bitfield, "A", "available RDMA capabilities"); 4582 4583 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps", 4584 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps, 4585 sysctl_bitfield, "A", "available iSCSI capabilities"); 4586 4587 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps", 4588 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps, 4589 sysctl_bitfield, "A", "available FCoE capabilities"); 4590 4591 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4592 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4593 4594 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4595 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val, 4596 sizeof(sc->sge.timer_val), sysctl_int_array, "A", 4597 "interrupt holdoff timer values (us)"); 4598 4599 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4600 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val, 4601 sizeof(sc->sge.counter_val), sysctl_int_array, "A", 4602 "interrupt holdoff packet counter values"); 4603 4604 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4605 NULL, sc->tids.nftids, "number of filters"); 4606 4607 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4608 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4609 "chip temperature (in Celsius)"); 4610 4611 t4_sge_sysctls(sc, ctx, children); 4612 4613 sc->lro_timeout = 100; 4614 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4615 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4616 4617 #ifdef SBUF_DRAIN 4618 /* 4619 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4620 */ 4621 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4622 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4623 "logs and miscellaneous information"); 4624 children = SYSCTL_CHILDREN(oid); 4625 4626 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4627 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4628 sysctl_cctrl, "A", "congestion control"); 4629 4630 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4631 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4632 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4633 4634 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4635 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4636 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4637 4638 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4639 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4640 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4641 4642 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4643 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4644 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4645 4646 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4647 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4648 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4649 4650 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4651 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4652 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4653 4654 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4655 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4656 sysctl_cim_la, "A", "CIM logic analyzer"); 4657 4658 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4659 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4660 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4661 4662 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4663 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4664 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4665 4666 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4667 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4668 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4669 4670 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4671 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4672 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4673 4674 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4675 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4676 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4677 4678 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4679 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4680 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4681 4682 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4683 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4684 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4685 4686 if (is_t5(sc)) { 4687 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4688 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4689 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4690 4691 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4692 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4693 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4694 } 4695 4696 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4697 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4698 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4699 4700 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4701 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4702 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4703 4704 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4705 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4706 sysctl_cpl_stats, "A", "CPL statistics"); 4707 4708 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4709 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4710 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4711 4712 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4713 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4714 sysctl_devlog, "A", "firmware's device log"); 4715 4716 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4717 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4718 sysctl_fcoe_stats, "A", "FCoE statistics"); 4719 4720 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4721 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4722 sysctl_hw_sched, "A", "hardware scheduler "); 4723 4724 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4725 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4726 sysctl_l2t, "A", "hardware L2 table"); 4727 4728 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4729 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4730 sysctl_lb_stats, "A", "loopback statistics"); 4731 4732 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4733 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4734 sysctl_meminfo, "A", "memory regions"); 4735 4736 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4737 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4738 sysctl_mps_tcam, "A", "MPS TCAM entries"); 4739 4740 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4741 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4742 sysctl_path_mtus, "A", "path MTUs"); 4743 4744 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4745 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4746 sysctl_pm_stats, "A", "PM statistics"); 4747 4748 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4749 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4750 sysctl_rdma_stats, "A", "RDMA statistics"); 4751 4752 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4753 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4754 sysctl_tcp_stats, "A", "TCP statistics"); 4755 4756 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4757 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4758 sysctl_tids, "A", "TID information"); 4759 4760 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4761 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4762 sysctl_tp_err_stats, "A", "TP error statistics"); 4763 4764 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4765 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4766 sysctl_tp_la, "A", "TP logic analyzer"); 4767 4768 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4769 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4770 sysctl_tx_rate, "A", "Tx rate"); 4771 4772 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4773 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4774 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4775 4776 if (is_t5(sc)) { 4777 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4778 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4779 sysctl_wcwr_stats, "A", "write combined work requests"); 4780 } 4781 #endif 4782 4783 #ifdef TCP_OFFLOAD 4784 if (is_offload(sc)) { 4785 /* 4786 * dev.t4nex.X.toe. 4787 */ 4788 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4789 NULL, "TOE parameters"); 4790 children = SYSCTL_CHILDREN(oid); 4791 4792 sc->tt.sndbuf = 256 * 1024; 4793 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4794 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4795 4796 sc->tt.ddp = 0; 4797 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4798 &sc->tt.ddp, 0, "DDP allowed"); 4799 4800 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 4801 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 4802 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 4803 4804 sc->tt.ddp_thres = 4805 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 4806 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 4807 &sc->tt.ddp_thres, 0, "DDP threshold"); 4808 4809 sc->tt.rx_coalesce = 1; 4810 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 4811 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 4812 4813 sc->tt.tx_align = 1; 4814 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 4815 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 4816 } 4817 #endif 4818 4819 4820 return (0); 4821 } 4822 4823 static int 4824 cxgbe_sysctls(struct port_info *pi) 4825 { 4826 struct sysctl_ctx_list *ctx; 4827 struct sysctl_oid *oid; 4828 struct sysctl_oid_list *children; 4829 struct adapter *sc = pi->adapter; 4830 4831 ctx = device_get_sysctl_ctx(pi->dev); 4832 4833 /* 4834 * dev.cxgbe.X. 4835 */ 4836 oid = device_get_sysctl_tree(pi->dev); 4837 children = SYSCTL_CHILDREN(oid); 4838 4839 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 4840 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 4841 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 4842 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 4843 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 4844 "PHY temperature (in Celsius)"); 4845 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 4846 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 4847 "PHY firmware version"); 4848 } 4849 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 4850 &pi->nrxq, 0, "# of rx queues"); 4851 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 4852 &pi->ntxq, 0, "# of tx queues"); 4853 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 4854 &pi->first_rxq, 0, "index of first rx queue"); 4855 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 4856 &pi->first_txq, 0, "index of first tx queue"); 4857 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | 4858 CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU", 4859 "Reserve queue 0 for non-flowid packets"); 4860 4861 #ifdef TCP_OFFLOAD 4862 if (is_offload(sc)) { 4863 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 4864 &pi->nofldrxq, 0, 4865 "# of rx queues for offloaded TCP connections"); 4866 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 4867 &pi->nofldtxq, 0, 4868 "# of tx queues for offloaded TCP connections"); 4869 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 4870 CTLFLAG_RD, &pi->first_ofld_rxq, 0, 4871 "index of first TOE rx queue"); 4872 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 4873 CTLFLAG_RD, &pi->first_ofld_txq, 0, 4874 "index of first TOE tx queue"); 4875 } 4876 #endif 4877 #ifdef DEV_NETMAP 4878 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 4879 &pi->nnmrxq, 0, "# of rx queues for netmap"); 4880 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 4881 &pi->nnmtxq, 0, "# of tx queues for netmap"); 4882 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 4883 CTLFLAG_RD, &pi->first_nm_rxq, 0, 4884 "index of first netmap rx queue"); 4885 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 4886 CTLFLAG_RD, &pi->first_nm_txq, 0, 4887 "index of first netmap tx queue"); 4888 #endif 4889 4890 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 4891 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I", 4892 "holdoff timer index"); 4893 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 4894 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I", 4895 "holdoff packet counter index"); 4896 4897 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 4898 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I", 4899 "rx queue size"); 4900 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 4901 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I", 4902 "tx queue size"); 4903 4904 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 4905 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings, 4906 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 4907 4908 /* 4909 * dev.cxgbe.X.stats. 4910 */ 4911 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4912 NULL, "port statistics"); 4913 children = SYSCTL_CHILDREN(oid); 4914 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 4915 &pi->tx_parse_error, 0, 4916 "# of tx packets with invalid length or # of segments"); 4917 4918 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 4919 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 4920 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 4921 sysctl_handle_t4_reg64, "QU", desc) 4922 4923 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 4924 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 4925 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 4926 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 4927 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 4928 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 4929 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 4930 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 4931 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 4932 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 4933 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 4934 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 4935 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 4936 "# of tx frames in this range", 4937 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 4938 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 4939 "# of tx frames in this range", 4940 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 4941 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 4942 "# of tx frames in this range", 4943 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 4944 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 4945 "# of tx frames in this range", 4946 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 4947 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 4948 "# of tx frames in this range", 4949 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 4950 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 4951 "# of tx frames in this range", 4952 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 4953 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 4954 "# of tx frames in this range", 4955 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 4956 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 4957 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 4958 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 4959 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 4960 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 4961 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 4962 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 4963 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 4964 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 4965 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 4966 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 4967 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 4968 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 4969 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 4970 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 4971 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 4972 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 4973 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 4974 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 4975 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 4976 4977 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 4978 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 4979 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 4980 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 4981 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 4982 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 4983 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 4984 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 4985 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 4986 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 4987 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 4988 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 4989 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 4990 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 4991 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 4992 "# of frames received with bad FCS", 4993 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 4994 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 4995 "# of frames received with length error", 4996 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 4997 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 4998 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 4999 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5000 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5001 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5002 "# of rx frames in this range", 5003 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5004 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5005 "# of rx frames in this range", 5006 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5007 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5008 "# of rx frames in this range", 5009 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5010 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5011 "# of rx frames in this range", 5012 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5013 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5014 "# of rx frames in this range", 5015 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5016 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5017 "# of rx frames in this range", 5018 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5019 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5020 "# of rx frames in this range", 5021 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5022 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5023 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5024 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5025 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5026 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5027 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5028 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5029 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5030 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5031 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5032 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5033 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5034 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5035 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5036 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5037 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5038 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5039 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5040 5041 #undef SYSCTL_ADD_T4_REG64 5042 5043 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5044 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5045 &pi->stats.name, desc) 5046 5047 /* We get these from port_stats and they may be stale by upto 1s */ 5048 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5049 "# drops due to buffer-group 0 overflows"); 5050 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5051 "# drops due to buffer-group 1 overflows"); 5052 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5053 "# drops due to buffer-group 2 overflows"); 5054 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5055 "# drops due to buffer-group 3 overflows"); 5056 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5057 "# of buffer-group 0 truncated packets"); 5058 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5059 "# of buffer-group 1 truncated packets"); 5060 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5061 "# of buffer-group 2 truncated packets"); 5062 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5063 "# of buffer-group 3 truncated packets"); 5064 5065 #undef SYSCTL_ADD_T4_PORTSTAT 5066 5067 return (0); 5068 } 5069 5070 static int 5071 sysctl_int_array(SYSCTL_HANDLER_ARGS) 5072 { 5073 int rc, *i; 5074 struct sbuf sb; 5075 5076 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 5077 for (i = arg1; arg2; arg2 -= sizeof(int), i++) 5078 sbuf_printf(&sb, "%d ", *i); 5079 sbuf_trim(&sb); 5080 sbuf_finish(&sb); 5081 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 5082 sbuf_delete(&sb); 5083 return (rc); 5084 } 5085 5086 static int 5087 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5088 { 5089 int rc; 5090 struct sbuf *sb; 5091 5092 rc = sysctl_wire_old_buffer(req, 0); 5093 if (rc != 0) 5094 return(rc); 5095 5096 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5097 if (sb == NULL) 5098 return (ENOMEM); 5099 5100 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5101 rc = sbuf_finish(sb); 5102 sbuf_delete(sb); 5103 5104 return (rc); 5105 } 5106 5107 static int 5108 sysctl_btphy(SYSCTL_HANDLER_ARGS) 5109 { 5110 struct port_info *pi = arg1; 5111 int op = arg2; 5112 struct adapter *sc = pi->adapter; 5113 u_int v; 5114 int rc; 5115 5116 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt"); 5117 if (rc) 5118 return (rc); 5119 /* XXX: magic numbers */ 5120 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5121 &v); 5122 end_synchronized_op(sc, 0); 5123 if (rc) 5124 return (rc); 5125 if (op == 0) 5126 v /= 256; 5127 5128 rc = sysctl_handle_int(oidp, &v, 0, req); 5129 return (rc); 5130 } 5131 5132 static int 5133 sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5134 { 5135 struct port_info *pi = arg1; 5136 int rc, val; 5137 5138 val = pi->rsrv_noflowq; 5139 rc = sysctl_handle_int(oidp, &val, 0, req); 5140 if (rc != 0 || req->newptr == NULL) 5141 return (rc); 5142 5143 if ((val >= 1) && (pi->ntxq > 1)) 5144 pi->rsrv_noflowq = 1; 5145 else 5146 pi->rsrv_noflowq = 0; 5147 5148 return (rc); 5149 } 5150 5151 static int 5152 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5153 { 5154 struct port_info *pi = arg1; 5155 struct adapter *sc = pi->adapter; 5156 int idx, rc, i; 5157 struct sge_rxq *rxq; 5158 #ifdef TCP_OFFLOAD 5159 struct sge_ofld_rxq *ofld_rxq; 5160 #endif 5161 uint8_t v; 5162 5163 idx = pi->tmr_idx; 5164 5165 rc = sysctl_handle_int(oidp, &idx, 0, req); 5166 if (rc != 0 || req->newptr == NULL) 5167 return (rc); 5168 5169 if (idx < 0 || idx >= SGE_NTIMERS) 5170 return (EINVAL); 5171 5172 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5173 "t4tmr"); 5174 if (rc) 5175 return (rc); 5176 5177 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1); 5178 for_each_rxq(pi, i, rxq) { 5179 #ifdef atomic_store_rel_8 5180 atomic_store_rel_8(&rxq->iq.intr_params, v); 5181 #else 5182 rxq->iq.intr_params = v; 5183 #endif 5184 } 5185 #ifdef TCP_OFFLOAD 5186 for_each_ofld_rxq(pi, i, ofld_rxq) { 5187 #ifdef atomic_store_rel_8 5188 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5189 #else 5190 ofld_rxq->iq.intr_params = v; 5191 #endif 5192 } 5193 #endif 5194 pi->tmr_idx = idx; 5195 5196 end_synchronized_op(sc, LOCK_HELD); 5197 return (0); 5198 } 5199 5200 static int 5201 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5202 { 5203 struct port_info *pi = arg1; 5204 struct adapter *sc = pi->adapter; 5205 int idx, rc; 5206 5207 idx = pi->pktc_idx; 5208 5209 rc = sysctl_handle_int(oidp, &idx, 0, req); 5210 if (rc != 0 || req->newptr == NULL) 5211 return (rc); 5212 5213 if (idx < -1 || idx >= SGE_NCOUNTERS) 5214 return (EINVAL); 5215 5216 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5217 "t4pktc"); 5218 if (rc) 5219 return (rc); 5220 5221 if (pi->flags & PORT_INIT_DONE) 5222 rc = EBUSY; /* cannot be changed once the queues are created */ 5223 else 5224 pi->pktc_idx = idx; 5225 5226 end_synchronized_op(sc, LOCK_HELD); 5227 return (rc); 5228 } 5229 5230 static int 5231 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5232 { 5233 struct port_info *pi = arg1; 5234 struct adapter *sc = pi->adapter; 5235 int qsize, rc; 5236 5237 qsize = pi->qsize_rxq; 5238 5239 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5240 if (rc != 0 || req->newptr == NULL) 5241 return (rc); 5242 5243 if (qsize < 128 || (qsize & 7)) 5244 return (EINVAL); 5245 5246 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5247 "t4rxqs"); 5248 if (rc) 5249 return (rc); 5250 5251 if (pi->flags & PORT_INIT_DONE) 5252 rc = EBUSY; /* cannot be changed once the queues are created */ 5253 else 5254 pi->qsize_rxq = qsize; 5255 5256 end_synchronized_op(sc, LOCK_HELD); 5257 return (rc); 5258 } 5259 5260 static int 5261 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5262 { 5263 struct port_info *pi = arg1; 5264 struct adapter *sc = pi->adapter; 5265 int qsize, rc; 5266 5267 qsize = pi->qsize_txq; 5268 5269 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5270 if (rc != 0 || req->newptr == NULL) 5271 return (rc); 5272 5273 if (qsize < 128 || qsize > 65536) 5274 return (EINVAL); 5275 5276 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5277 "t4txqs"); 5278 if (rc) 5279 return (rc); 5280 5281 if (pi->flags & PORT_INIT_DONE) 5282 rc = EBUSY; /* cannot be changed once the queues are created */ 5283 else 5284 pi->qsize_txq = qsize; 5285 5286 end_synchronized_op(sc, LOCK_HELD); 5287 return (rc); 5288 } 5289 5290 static int 5291 sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5292 { 5293 struct port_info *pi = arg1; 5294 struct adapter *sc = pi->adapter; 5295 struct link_config *lc = &pi->link_cfg; 5296 int rc; 5297 5298 if (req->newptr == NULL) { 5299 struct sbuf *sb; 5300 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5301 5302 rc = sysctl_wire_old_buffer(req, 0); 5303 if (rc != 0) 5304 return(rc); 5305 5306 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5307 if (sb == NULL) 5308 return (ENOMEM); 5309 5310 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5311 rc = sbuf_finish(sb); 5312 sbuf_delete(sb); 5313 } else { 5314 char s[2]; 5315 int n; 5316 5317 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5318 s[1] = 0; 5319 5320 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5321 if (rc != 0) 5322 return(rc); 5323 5324 if (s[1] != 0) 5325 return (EINVAL); 5326 if (s[0] < '0' || s[0] > '9') 5327 return (EINVAL); /* not a number */ 5328 n = s[0] - '0'; 5329 if (n & ~(PAUSE_TX | PAUSE_RX)) 5330 return (EINVAL); /* some other bit is set too */ 5331 5332 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4PAUSE"); 5333 if (rc) 5334 return (rc); 5335 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5336 int link_ok = lc->link_ok; 5337 5338 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5339 lc->requested_fc |= n; 5340 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, lc); 5341 lc->link_ok = link_ok; /* restore */ 5342 } 5343 end_synchronized_op(sc, 0); 5344 } 5345 5346 return (rc); 5347 } 5348 5349 static int 5350 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5351 { 5352 struct adapter *sc = arg1; 5353 int reg = arg2; 5354 uint64_t val; 5355 5356 val = t4_read_reg64(sc, reg); 5357 5358 return (sysctl_handle_64(oidp, &val, 0, req)); 5359 } 5360 5361 static int 5362 sysctl_temperature(SYSCTL_HANDLER_ARGS) 5363 { 5364 struct adapter *sc = arg1; 5365 int rc, t; 5366 uint32_t param, val; 5367 5368 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5369 if (rc) 5370 return (rc); 5371 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5372 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5373 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5374 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5375 end_synchronized_op(sc, 0); 5376 if (rc) 5377 return (rc); 5378 5379 /* unknown is returned as 0 but we display -1 in that case */ 5380 t = val == 0 ? -1 : val; 5381 5382 rc = sysctl_handle_int(oidp, &t, 0, req); 5383 return (rc); 5384 } 5385 5386 #ifdef SBUF_DRAIN 5387 static int 5388 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5389 { 5390 struct adapter *sc = arg1; 5391 struct sbuf *sb; 5392 int rc, i; 5393 uint16_t incr[NMTUS][NCCTRL_WIN]; 5394 static const char *dec_fac[] = { 5395 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5396 "0.9375" 5397 }; 5398 5399 rc = sysctl_wire_old_buffer(req, 0); 5400 if (rc != 0) 5401 return (rc); 5402 5403 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5404 if (sb == NULL) 5405 return (ENOMEM); 5406 5407 t4_read_cong_tbl(sc, incr); 5408 5409 for (i = 0; i < NCCTRL_WIN; ++i) { 5410 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5411 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5412 incr[5][i], incr[6][i], incr[7][i]); 5413 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5414 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5415 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5416 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5417 } 5418 5419 rc = sbuf_finish(sb); 5420 sbuf_delete(sb); 5421 5422 return (rc); 5423 } 5424 5425 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5426 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5427 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5428 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5429 }; 5430 5431 static int 5432 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5433 { 5434 struct adapter *sc = arg1; 5435 struct sbuf *sb; 5436 int rc, i, n, qid = arg2; 5437 uint32_t *buf, *p; 5438 char *qtype; 5439 u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5; 5440 5441 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5442 ("%s: bad qid %d\n", __func__, qid)); 5443 5444 if (qid < CIM_NUM_IBQ) { 5445 /* inbound queue */ 5446 qtype = "IBQ"; 5447 n = 4 * CIM_IBQ_SIZE; 5448 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5449 rc = t4_read_cim_ibq(sc, qid, buf, n); 5450 } else { 5451 /* outbound queue */ 5452 qtype = "OBQ"; 5453 qid -= CIM_NUM_IBQ; 5454 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5455 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5456 rc = t4_read_cim_obq(sc, qid, buf, n); 5457 } 5458 5459 if (rc < 0) { 5460 rc = -rc; 5461 goto done; 5462 } 5463 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5464 5465 rc = sysctl_wire_old_buffer(req, 0); 5466 if (rc != 0) 5467 goto done; 5468 5469 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5470 if (sb == NULL) { 5471 rc = ENOMEM; 5472 goto done; 5473 } 5474 5475 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5476 for (i = 0, p = buf; i < n; i += 16, p += 4) 5477 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5478 p[2], p[3]); 5479 5480 rc = sbuf_finish(sb); 5481 sbuf_delete(sb); 5482 done: 5483 free(buf, M_CXGBE); 5484 return (rc); 5485 } 5486 5487 static int 5488 sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5489 { 5490 struct adapter *sc = arg1; 5491 u_int cfg; 5492 struct sbuf *sb; 5493 uint32_t *buf, *p; 5494 int rc; 5495 5496 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5497 if (rc != 0) 5498 return (rc); 5499 5500 rc = sysctl_wire_old_buffer(req, 0); 5501 if (rc != 0) 5502 return (rc); 5503 5504 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5505 if (sb == NULL) 5506 return (ENOMEM); 5507 5508 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5509 M_ZERO | M_WAITOK); 5510 5511 rc = -t4_cim_read_la(sc, buf, NULL); 5512 if (rc != 0) 5513 goto done; 5514 5515 sbuf_printf(sb, "Status Data PC%s", 5516 cfg & F_UPDBGLACAPTPCONLY ? "" : 5517 " LS0Stat LS0Addr LS0Data"); 5518 5519 KASSERT((sc->params.cim_la_size & 7) == 0, 5520 ("%s: p will walk off the end of buf", __func__)); 5521 5522 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) { 5523 if (cfg & F_UPDBGLACAPTPCONLY) { 5524 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5525 p[6], p[7]); 5526 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5527 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5528 p[4] & 0xff, p[5] >> 8); 5529 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5530 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5531 p[1] & 0xf, p[2] >> 4); 5532 } else { 5533 sbuf_printf(sb, 5534 "\n %02x %x%07x %x%07x %08x %08x " 5535 "%08x%08x%08x%08x", 5536 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5537 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5538 p[6], p[7]); 5539 } 5540 } 5541 5542 rc = sbuf_finish(sb); 5543 sbuf_delete(sb); 5544 done: 5545 free(buf, M_CXGBE); 5546 return (rc); 5547 } 5548 5549 static int 5550 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 5551 { 5552 struct adapter *sc = arg1; 5553 u_int i; 5554 struct sbuf *sb; 5555 uint32_t *buf, *p; 5556 int rc; 5557 5558 rc = sysctl_wire_old_buffer(req, 0); 5559 if (rc != 0) 5560 return (rc); 5561 5562 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5563 if (sb == NULL) 5564 return (ENOMEM); 5565 5566 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 5567 M_ZERO | M_WAITOK); 5568 5569 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 5570 p = buf; 5571 5572 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5573 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 5574 p[1], p[0]); 5575 } 5576 5577 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 5578 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5579 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 5580 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 5581 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 5582 (p[1] >> 2) | ((p[2] & 3) << 30), 5583 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 5584 p[0] & 1); 5585 } 5586 5587 rc = sbuf_finish(sb); 5588 sbuf_delete(sb); 5589 free(buf, M_CXGBE); 5590 return (rc); 5591 } 5592 5593 static int 5594 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 5595 { 5596 struct adapter *sc = arg1; 5597 u_int i; 5598 struct sbuf *sb; 5599 uint32_t *buf, *p; 5600 int rc; 5601 5602 rc = sysctl_wire_old_buffer(req, 0); 5603 if (rc != 0) 5604 return (rc); 5605 5606 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5607 if (sb == NULL) 5608 return (ENOMEM); 5609 5610 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 5611 M_ZERO | M_WAITOK); 5612 5613 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 5614 p = buf; 5615 5616 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 5617 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) { 5618 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 5619 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 5620 p[4], p[3], p[2], p[1], p[0]); 5621 } 5622 5623 sbuf_printf(sb, "\n\nCntl ID Data"); 5624 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) { 5625 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 5626 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 5627 } 5628 5629 rc = sbuf_finish(sb); 5630 sbuf_delete(sb); 5631 free(buf, M_CXGBE); 5632 return (rc); 5633 } 5634 5635 static int 5636 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 5637 { 5638 struct adapter *sc = arg1; 5639 struct sbuf *sb; 5640 int rc, i; 5641 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5642 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5643 uint16_t thres[CIM_NUM_IBQ]; 5644 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 5645 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 5646 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 5647 5648 if (is_t4(sc)) { 5649 cim_num_obq = CIM_NUM_OBQ; 5650 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 5651 obq_rdaddr = A_UP_OBQ_0_REALADDR; 5652 } else { 5653 cim_num_obq = CIM_NUM_OBQ_T5; 5654 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 5655 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 5656 } 5657 nq = CIM_NUM_IBQ + cim_num_obq; 5658 5659 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 5660 if (rc == 0) 5661 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 5662 if (rc != 0) 5663 return (rc); 5664 5665 t4_read_cimq_cfg(sc, base, size, thres); 5666 5667 rc = sysctl_wire_old_buffer(req, 0); 5668 if (rc != 0) 5669 return (rc); 5670 5671 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5672 if (sb == NULL) 5673 return (ENOMEM); 5674 5675 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 5676 5677 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 5678 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 5679 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 5680 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5681 G_QUEREMFLITS(p[2]) * 16); 5682 for ( ; i < nq; i++, p += 4, wr += 2) 5683 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 5684 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 5685 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5686 G_QUEREMFLITS(p[2]) * 16); 5687 5688 rc = sbuf_finish(sb); 5689 sbuf_delete(sb); 5690 5691 return (rc); 5692 } 5693 5694 static int 5695 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 5696 { 5697 struct adapter *sc = arg1; 5698 struct sbuf *sb; 5699 int rc; 5700 struct tp_cpl_stats stats; 5701 5702 rc = sysctl_wire_old_buffer(req, 0); 5703 if (rc != 0) 5704 return (rc); 5705 5706 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5707 if (sb == NULL) 5708 return (ENOMEM); 5709 5710 t4_tp_get_cpl_stats(sc, &stats); 5711 5712 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 5713 "channel 3\n"); 5714 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n", 5715 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 5716 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u", 5717 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 5718 5719 rc = sbuf_finish(sb); 5720 sbuf_delete(sb); 5721 5722 return (rc); 5723 } 5724 5725 static int 5726 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 5727 { 5728 struct adapter *sc = arg1; 5729 struct sbuf *sb; 5730 int rc; 5731 struct tp_usm_stats stats; 5732 5733 rc = sysctl_wire_old_buffer(req, 0); 5734 if (rc != 0) 5735 return(rc); 5736 5737 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5738 if (sb == NULL) 5739 return (ENOMEM); 5740 5741 t4_get_usm_stats(sc, &stats); 5742 5743 sbuf_printf(sb, "Frames: %u\n", stats.frames); 5744 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 5745 sbuf_printf(sb, "Drops: %u", stats.drops); 5746 5747 rc = sbuf_finish(sb); 5748 sbuf_delete(sb); 5749 5750 return (rc); 5751 } 5752 5753 const char *devlog_level_strings[] = { 5754 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 5755 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 5756 [FW_DEVLOG_LEVEL_ERR] = "ERR", 5757 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 5758 [FW_DEVLOG_LEVEL_INFO] = "INFO", 5759 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 5760 }; 5761 5762 const char *devlog_facility_strings[] = { 5763 [FW_DEVLOG_FACILITY_CORE] = "CORE", 5764 [FW_DEVLOG_FACILITY_CF] = "CF", 5765 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 5766 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 5767 [FW_DEVLOG_FACILITY_RES] = "RES", 5768 [FW_DEVLOG_FACILITY_HW] = "HW", 5769 [FW_DEVLOG_FACILITY_FLR] = "FLR", 5770 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 5771 [FW_DEVLOG_FACILITY_PHY] = "PHY", 5772 [FW_DEVLOG_FACILITY_MAC] = "MAC", 5773 [FW_DEVLOG_FACILITY_PORT] = "PORT", 5774 [FW_DEVLOG_FACILITY_VI] = "VI", 5775 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 5776 [FW_DEVLOG_FACILITY_ACL] = "ACL", 5777 [FW_DEVLOG_FACILITY_TM] = "TM", 5778 [FW_DEVLOG_FACILITY_QFC] = "QFC", 5779 [FW_DEVLOG_FACILITY_DCB] = "DCB", 5780 [FW_DEVLOG_FACILITY_ETH] = "ETH", 5781 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 5782 [FW_DEVLOG_FACILITY_RI] = "RI", 5783 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 5784 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 5785 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 5786 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE" 5787 }; 5788 5789 static int 5790 sysctl_devlog(SYSCTL_HANDLER_ARGS) 5791 { 5792 struct adapter *sc = arg1; 5793 struct devlog_params *dparams = &sc->params.devlog; 5794 struct fw_devlog_e *buf, *e; 5795 int i, j, rc, nentries, first = 0, m; 5796 struct sbuf *sb; 5797 uint64_t ftstamp = UINT64_MAX; 5798 5799 if (dparams->start == 0) { 5800 dparams->memtype = FW_MEMTYPE_EDC0; 5801 dparams->start = 0x84000; 5802 dparams->size = 32768; 5803 } 5804 5805 nentries = dparams->size / sizeof(struct fw_devlog_e); 5806 5807 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 5808 if (buf == NULL) 5809 return (ENOMEM); 5810 5811 m = fwmtype_to_hwmtype(dparams->memtype); 5812 rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf); 5813 if (rc != 0) 5814 goto done; 5815 5816 for (i = 0; i < nentries; i++) { 5817 e = &buf[i]; 5818 5819 if (e->timestamp == 0) 5820 break; /* end */ 5821 5822 e->timestamp = be64toh(e->timestamp); 5823 e->seqno = be32toh(e->seqno); 5824 for (j = 0; j < 8; j++) 5825 e->params[j] = be32toh(e->params[j]); 5826 5827 if (e->timestamp < ftstamp) { 5828 ftstamp = e->timestamp; 5829 first = i; 5830 } 5831 } 5832 5833 if (buf[first].timestamp == 0) 5834 goto done; /* nothing in the log */ 5835 5836 rc = sysctl_wire_old_buffer(req, 0); 5837 if (rc != 0) 5838 goto done; 5839 5840 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5841 if (sb == NULL) { 5842 rc = ENOMEM; 5843 goto done; 5844 } 5845 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 5846 "Seq#", "Tstamp", "Level", "Facility", "Message"); 5847 5848 i = first; 5849 do { 5850 e = &buf[i]; 5851 if (e->timestamp == 0) 5852 break; /* end */ 5853 5854 sbuf_printf(sb, "%10d %15ju %8s %8s ", 5855 e->seqno, e->timestamp, 5856 (e->level < nitems(devlog_level_strings) ? 5857 devlog_level_strings[e->level] : "UNKNOWN"), 5858 (e->facility < nitems(devlog_facility_strings) ? 5859 devlog_facility_strings[e->facility] : "UNKNOWN")); 5860 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 5861 e->params[2], e->params[3], e->params[4], 5862 e->params[5], e->params[6], e->params[7]); 5863 5864 if (++i == nentries) 5865 i = 0; 5866 } while (i != first); 5867 5868 rc = sbuf_finish(sb); 5869 sbuf_delete(sb); 5870 done: 5871 free(buf, M_CXGBE); 5872 return (rc); 5873 } 5874 5875 static int 5876 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 5877 { 5878 struct adapter *sc = arg1; 5879 struct sbuf *sb; 5880 int rc; 5881 struct tp_fcoe_stats stats[4]; 5882 5883 rc = sysctl_wire_old_buffer(req, 0); 5884 if (rc != 0) 5885 return (rc); 5886 5887 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5888 if (sb == NULL) 5889 return (ENOMEM); 5890 5891 t4_get_fcoe_stats(sc, 0, &stats[0]); 5892 t4_get_fcoe_stats(sc, 1, &stats[1]); 5893 t4_get_fcoe_stats(sc, 2, &stats[2]); 5894 t4_get_fcoe_stats(sc, 3, &stats[3]); 5895 5896 sbuf_printf(sb, " channel 0 channel 1 " 5897 "channel 2 channel 3\n"); 5898 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n", 5899 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP, 5900 stats[3].octetsDDP); 5901 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP, 5902 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP); 5903 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u", 5904 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop, 5905 stats[3].framesDrop); 5906 5907 rc = sbuf_finish(sb); 5908 sbuf_delete(sb); 5909 5910 return (rc); 5911 } 5912 5913 static int 5914 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 5915 { 5916 struct adapter *sc = arg1; 5917 struct sbuf *sb; 5918 int rc, i; 5919 unsigned int map, kbps, ipg, mode; 5920 unsigned int pace_tab[NTX_SCHED]; 5921 5922 rc = sysctl_wire_old_buffer(req, 0); 5923 if (rc != 0) 5924 return (rc); 5925 5926 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5927 if (sb == NULL) 5928 return (ENOMEM); 5929 5930 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 5931 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 5932 t4_read_pace_tbl(sc, pace_tab); 5933 5934 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 5935 "Class IPG (0.1 ns) Flow IPG (us)"); 5936 5937 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 5938 t4_get_tx_sched(sc, i, &kbps, &ipg); 5939 sbuf_printf(sb, "\n %u %-5s %u ", i, 5940 (mode & (1 << i)) ? "flow" : "class", map & 3); 5941 if (kbps) 5942 sbuf_printf(sb, "%9u ", kbps); 5943 else 5944 sbuf_printf(sb, " disabled "); 5945 5946 if (ipg) 5947 sbuf_printf(sb, "%13u ", ipg); 5948 else 5949 sbuf_printf(sb, " disabled "); 5950 5951 if (pace_tab[i]) 5952 sbuf_printf(sb, "%10u", pace_tab[i]); 5953 else 5954 sbuf_printf(sb, " disabled"); 5955 } 5956 5957 rc = sbuf_finish(sb); 5958 sbuf_delete(sb); 5959 5960 return (rc); 5961 } 5962 5963 static int 5964 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 5965 { 5966 struct adapter *sc = arg1; 5967 struct sbuf *sb; 5968 int rc, i, j; 5969 uint64_t *p0, *p1; 5970 struct lb_port_stats s[2]; 5971 static const char *stat_name[] = { 5972 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 5973 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 5974 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 5975 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 5976 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 5977 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 5978 "BG2FramesTrunc:", "BG3FramesTrunc:" 5979 }; 5980 5981 rc = sysctl_wire_old_buffer(req, 0); 5982 if (rc != 0) 5983 return (rc); 5984 5985 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5986 if (sb == NULL) 5987 return (ENOMEM); 5988 5989 memset(s, 0, sizeof(s)); 5990 5991 for (i = 0; i < 4; i += 2) { 5992 t4_get_lb_stats(sc, i, &s[0]); 5993 t4_get_lb_stats(sc, i + 1, &s[1]); 5994 5995 p0 = &s[0].octets; 5996 p1 = &s[1].octets; 5997 sbuf_printf(sb, "%s Loopback %u" 5998 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 5999 6000 for (j = 0; j < nitems(stat_name); j++) 6001 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6002 *p0++, *p1++); 6003 } 6004 6005 rc = sbuf_finish(sb); 6006 sbuf_delete(sb); 6007 6008 return (rc); 6009 } 6010 6011 static int 6012 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6013 { 6014 int rc = 0; 6015 struct port_info *pi = arg1; 6016 struct sbuf *sb; 6017 static const char *linkdnreasons[] = { 6018 "non-specific", "remote fault", "autoneg failed", "reserved3", 6019 "PHY overheated", "unknown", "rx los", "reserved7" 6020 }; 6021 6022 rc = sysctl_wire_old_buffer(req, 0); 6023 if (rc != 0) 6024 return(rc); 6025 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6026 if (sb == NULL) 6027 return (ENOMEM); 6028 6029 if (pi->linkdnrc < 0) 6030 sbuf_printf(sb, "n/a"); 6031 else if (pi->linkdnrc < nitems(linkdnreasons)) 6032 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]); 6033 else 6034 sbuf_printf(sb, "%d", pi->linkdnrc); 6035 6036 rc = sbuf_finish(sb); 6037 sbuf_delete(sb); 6038 6039 return (rc); 6040 } 6041 6042 struct mem_desc { 6043 unsigned int base; 6044 unsigned int limit; 6045 unsigned int idx; 6046 }; 6047 6048 static int 6049 mem_desc_cmp(const void *a, const void *b) 6050 { 6051 return ((const struct mem_desc *)a)->base - 6052 ((const struct mem_desc *)b)->base; 6053 } 6054 6055 static void 6056 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6057 unsigned int to) 6058 { 6059 unsigned int size; 6060 6061 size = to - from + 1; 6062 if (size == 0) 6063 return; 6064 6065 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6066 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6067 } 6068 6069 static int 6070 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6071 { 6072 struct adapter *sc = arg1; 6073 struct sbuf *sb; 6074 int rc, i, n; 6075 uint32_t lo, hi, used, alloc; 6076 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6077 static const char *region[] = { 6078 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6079 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6080 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6081 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6082 "RQUDP region:", "PBL region:", "TXPBL region:", 6083 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6084 "On-chip queues:" 6085 }; 6086 struct mem_desc avail[4]; 6087 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6088 struct mem_desc *md = mem; 6089 6090 rc = sysctl_wire_old_buffer(req, 0); 6091 if (rc != 0) 6092 return (rc); 6093 6094 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6095 if (sb == NULL) 6096 return (ENOMEM); 6097 6098 for (i = 0; i < nitems(mem); i++) { 6099 mem[i].limit = 0; 6100 mem[i].idx = i; 6101 } 6102 6103 /* Find and sort the populated memory ranges */ 6104 i = 0; 6105 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6106 if (lo & F_EDRAM0_ENABLE) { 6107 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6108 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6109 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6110 avail[i].idx = 0; 6111 i++; 6112 } 6113 if (lo & F_EDRAM1_ENABLE) { 6114 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6115 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6116 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6117 avail[i].idx = 1; 6118 i++; 6119 } 6120 if (lo & F_EXT_MEM_ENABLE) { 6121 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6122 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6123 avail[i].limit = avail[i].base + 6124 (G_EXT_MEM_SIZE(hi) << 20); 6125 avail[i].idx = is_t4(sc) ? 2 : 3; /* Call it MC for T4 */ 6126 i++; 6127 } 6128 if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) { 6129 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6130 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6131 avail[i].limit = avail[i].base + 6132 (G_EXT_MEM1_SIZE(hi) << 20); 6133 avail[i].idx = 4; 6134 i++; 6135 } 6136 if (!i) /* no memory available */ 6137 return 0; 6138 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6139 6140 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6141 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6142 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6143 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6144 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6145 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6146 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6147 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6148 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6149 6150 /* the next few have explicit upper bounds */ 6151 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6152 md->limit = md->base - 1 + 6153 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6154 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6155 md++; 6156 6157 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6158 md->limit = md->base - 1 + 6159 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6160 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6161 md++; 6162 6163 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6164 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 6165 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6166 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1; 6167 } else { 6168 md->base = 0; 6169 md->idx = nitems(region); /* hide it */ 6170 } 6171 md++; 6172 6173 #define ulp_region(reg) \ 6174 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6175 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6176 6177 ulp_region(RX_ISCSI); 6178 ulp_region(RX_TDDP); 6179 ulp_region(TX_TPT); 6180 ulp_region(RX_STAG); 6181 ulp_region(RX_RQ); 6182 ulp_region(RX_RQUDP); 6183 ulp_region(RX_PBL); 6184 ulp_region(TX_PBL); 6185 #undef ulp_region 6186 6187 md->base = 0; 6188 md->idx = nitems(region); 6189 if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) { 6190 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR)); 6191 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc, 6192 A_SGE_DBVFIFO_SIZE))) << 2) - 1; 6193 } 6194 md++; 6195 6196 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6197 md->limit = md->base + sc->tids.ntids - 1; 6198 md++; 6199 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6200 md->limit = md->base + sc->tids.ntids - 1; 6201 md++; 6202 6203 md->base = sc->vres.ocq.start; 6204 if (sc->vres.ocq.size) 6205 md->limit = md->base + sc->vres.ocq.size - 1; 6206 else 6207 md->idx = nitems(region); /* hide it */ 6208 md++; 6209 6210 /* add any address-space holes, there can be up to 3 */ 6211 for (n = 0; n < i - 1; n++) 6212 if (avail[n].limit < avail[n + 1].base) 6213 (md++)->base = avail[n].limit; 6214 if (avail[n].limit) 6215 (md++)->base = avail[n].limit; 6216 6217 n = md - mem; 6218 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6219 6220 for (lo = 0; lo < i; lo++) 6221 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6222 avail[lo].limit - 1); 6223 6224 sbuf_printf(sb, "\n"); 6225 for (i = 0; i < n; i++) { 6226 if (mem[i].idx >= nitems(region)) 6227 continue; /* skip holes */ 6228 if (!mem[i].limit) 6229 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6230 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6231 mem[i].limit); 6232 } 6233 6234 sbuf_printf(sb, "\n"); 6235 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6236 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6237 mem_region_show(sb, "uP RAM:", lo, hi); 6238 6239 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6240 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6241 mem_region_show(sb, "uP Extmem2:", lo, hi); 6242 6243 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6244 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6245 G_PMRXMAXPAGE(lo), 6246 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6247 (lo & F_PMRXNUMCHN) ? 2 : 1); 6248 6249 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6250 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6251 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6252 G_PMTXMAXPAGE(lo), 6253 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6254 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6255 sbuf_printf(sb, "%u p-structs\n", 6256 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6257 6258 for (i = 0; i < 4; i++) { 6259 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6260 if (is_t4(sc)) { 6261 used = G_USED(lo); 6262 alloc = G_ALLOC(lo); 6263 } else { 6264 used = G_T5_USED(lo); 6265 alloc = G_T5_ALLOC(lo); 6266 } 6267 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6268 i, used, alloc); 6269 } 6270 for (i = 0; i < 4; i++) { 6271 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6272 if (is_t4(sc)) { 6273 used = G_USED(lo); 6274 alloc = G_ALLOC(lo); 6275 } else { 6276 used = G_T5_USED(lo); 6277 alloc = G_T5_ALLOC(lo); 6278 } 6279 sbuf_printf(sb, 6280 "\nLoopback %d using %u pages out of %u allocated", 6281 i, used, alloc); 6282 } 6283 6284 rc = sbuf_finish(sb); 6285 sbuf_delete(sb); 6286 6287 return (rc); 6288 } 6289 6290 static inline void 6291 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6292 { 6293 *mask = x | y; 6294 y = htobe64(y); 6295 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6296 } 6297 6298 static int 6299 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6300 { 6301 struct adapter *sc = arg1; 6302 struct sbuf *sb; 6303 int rc, i, n; 6304 6305 rc = sysctl_wire_old_buffer(req, 0); 6306 if (rc != 0) 6307 return (rc); 6308 6309 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6310 if (sb == NULL) 6311 return (ENOMEM); 6312 6313 sbuf_printf(sb, 6314 "Idx Ethernet address Mask Vld Ports PF" 6315 " VF Replication P0 P1 P2 P3 ML"); 6316 n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES : 6317 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 6318 for (i = 0; i < n; i++) { 6319 uint64_t tcamx, tcamy, mask; 6320 uint32_t cls_lo, cls_hi; 6321 uint8_t addr[ETHER_ADDR_LEN]; 6322 6323 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6324 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6325 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6326 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6327 6328 if (tcamx & tcamy) 6329 continue; 6330 6331 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6332 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6333 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6334 addr[3], addr[4], addr[5], (uintmax_t)mask, 6335 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6336 G_PORTMAP(cls_hi), G_PF(cls_lo), 6337 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6338 6339 if (cls_lo & F_REPLICATE) { 6340 struct fw_ldst_cmd ldst_cmd; 6341 6342 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6343 ldst_cmd.op_to_addrspace = 6344 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6345 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6346 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6347 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6348 ldst_cmd.u.mps.fid_ctl = 6349 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6350 V_FW_LDST_CMD_CTL(i)); 6351 6352 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6353 "t4mps"); 6354 if (rc) 6355 break; 6356 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6357 sizeof(ldst_cmd), &ldst_cmd); 6358 end_synchronized_op(sc, 0); 6359 6360 if (rc != 0) { 6361 sbuf_printf(sb, 6362 " ------------ error %3u ------------", rc); 6363 rc = 0; 6364 } else { 6365 sbuf_printf(sb, " %08x %08x %08x %08x", 6366 be32toh(ldst_cmd.u.mps.rplc127_96), 6367 be32toh(ldst_cmd.u.mps.rplc95_64), 6368 be32toh(ldst_cmd.u.mps.rplc63_32), 6369 be32toh(ldst_cmd.u.mps.rplc31_0)); 6370 } 6371 } else 6372 sbuf_printf(sb, "%36s", ""); 6373 6374 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6375 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6376 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6377 } 6378 6379 if (rc) 6380 (void) sbuf_finish(sb); 6381 else 6382 rc = sbuf_finish(sb); 6383 sbuf_delete(sb); 6384 6385 return (rc); 6386 } 6387 6388 static int 6389 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 6390 { 6391 struct adapter *sc = arg1; 6392 struct sbuf *sb; 6393 int rc; 6394 uint16_t mtus[NMTUS]; 6395 6396 rc = sysctl_wire_old_buffer(req, 0); 6397 if (rc != 0) 6398 return (rc); 6399 6400 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6401 if (sb == NULL) 6402 return (ENOMEM); 6403 6404 t4_read_mtu_tbl(sc, mtus, NULL); 6405 6406 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 6407 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 6408 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 6409 mtus[14], mtus[15]); 6410 6411 rc = sbuf_finish(sb); 6412 sbuf_delete(sb); 6413 6414 return (rc); 6415 } 6416 6417 static int 6418 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 6419 { 6420 struct adapter *sc = arg1; 6421 struct sbuf *sb; 6422 int rc, i; 6423 uint32_t cnt[PM_NSTATS]; 6424 uint64_t cyc[PM_NSTATS]; 6425 static const char *rx_stats[] = { 6426 "Read:", "Write bypass:", "Write mem:", "Flush:" 6427 }; 6428 static const char *tx_stats[] = { 6429 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:" 6430 }; 6431 6432 rc = sysctl_wire_old_buffer(req, 0); 6433 if (rc != 0) 6434 return (rc); 6435 6436 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6437 if (sb == NULL) 6438 return (ENOMEM); 6439 6440 t4_pmtx_get_stats(sc, cnt, cyc); 6441 sbuf_printf(sb, " Tx pcmds Tx bytes"); 6442 for (i = 0; i < ARRAY_SIZE(tx_stats); i++) 6443 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i], 6444 cyc[i]); 6445 6446 t4_pmrx_get_stats(sc, cnt, cyc); 6447 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 6448 for (i = 0; i < ARRAY_SIZE(rx_stats); i++) 6449 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i], 6450 cyc[i]); 6451 6452 rc = sbuf_finish(sb); 6453 sbuf_delete(sb); 6454 6455 return (rc); 6456 } 6457 6458 static int 6459 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 6460 { 6461 struct adapter *sc = arg1; 6462 struct sbuf *sb; 6463 int rc; 6464 struct tp_rdma_stats stats; 6465 6466 rc = sysctl_wire_old_buffer(req, 0); 6467 if (rc != 0) 6468 return (rc); 6469 6470 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6471 if (sb == NULL) 6472 return (ENOMEM); 6473 6474 t4_tp_get_rdma_stats(sc, &stats); 6475 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 6476 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 6477 6478 rc = sbuf_finish(sb); 6479 sbuf_delete(sb); 6480 6481 return (rc); 6482 } 6483 6484 static int 6485 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 6486 { 6487 struct adapter *sc = arg1; 6488 struct sbuf *sb; 6489 int rc; 6490 struct tp_tcp_stats v4, v6; 6491 6492 rc = sysctl_wire_old_buffer(req, 0); 6493 if (rc != 0) 6494 return (rc); 6495 6496 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6497 if (sb == NULL) 6498 return (ENOMEM); 6499 6500 t4_tp_get_tcp_stats(sc, &v4, &v6); 6501 sbuf_printf(sb, 6502 " IP IPv6\n"); 6503 sbuf_printf(sb, "OutRsts: %20u %20u\n", 6504 v4.tcpOutRsts, v6.tcpOutRsts); 6505 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 6506 v4.tcpInSegs, v6.tcpInSegs); 6507 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 6508 v4.tcpOutSegs, v6.tcpOutSegs); 6509 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 6510 v4.tcpRetransSegs, v6.tcpRetransSegs); 6511 6512 rc = sbuf_finish(sb); 6513 sbuf_delete(sb); 6514 6515 return (rc); 6516 } 6517 6518 static int 6519 sysctl_tids(SYSCTL_HANDLER_ARGS) 6520 { 6521 struct adapter *sc = arg1; 6522 struct sbuf *sb; 6523 int rc; 6524 struct tid_info *t = &sc->tids; 6525 6526 rc = sysctl_wire_old_buffer(req, 0); 6527 if (rc != 0) 6528 return (rc); 6529 6530 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6531 if (sb == NULL) 6532 return (ENOMEM); 6533 6534 if (t->natids) { 6535 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 6536 t->atids_in_use); 6537 } 6538 6539 if (t->ntids) { 6540 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6541 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 6542 6543 if (b) { 6544 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 6545 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6546 t->ntids - 1); 6547 } else { 6548 sbuf_printf(sb, "TID range: %u-%u", 6549 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6550 t->ntids - 1); 6551 } 6552 } else 6553 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 6554 sbuf_printf(sb, ", in use: %u\n", 6555 atomic_load_acq_int(&t->tids_in_use)); 6556 } 6557 6558 if (t->nstids) { 6559 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 6560 t->stid_base + t->nstids - 1, t->stids_in_use); 6561 } 6562 6563 if (t->nftids) { 6564 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 6565 t->ftid_base + t->nftids - 1); 6566 } 6567 6568 if (t->netids) { 6569 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 6570 t->etid_base + t->netids - 1); 6571 } 6572 6573 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 6574 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 6575 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 6576 6577 rc = sbuf_finish(sb); 6578 sbuf_delete(sb); 6579 6580 return (rc); 6581 } 6582 6583 static int 6584 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 6585 { 6586 struct adapter *sc = arg1; 6587 struct sbuf *sb; 6588 int rc; 6589 struct tp_err_stats stats; 6590 6591 rc = sysctl_wire_old_buffer(req, 0); 6592 if (rc != 0) 6593 return (rc); 6594 6595 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6596 if (sb == NULL) 6597 return (ENOMEM); 6598 6599 t4_tp_get_err_stats(sc, &stats); 6600 6601 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 6602 "channel 3\n"); 6603 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 6604 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2], 6605 stats.macInErrs[3]); 6606 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 6607 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2], 6608 stats.hdrInErrs[3]); 6609 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 6610 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2], 6611 stats.tcpInErrs[3]); 6612 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 6613 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2], 6614 stats.tcp6InErrs[3]); 6615 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 6616 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2], 6617 stats.tnlCongDrops[3]); 6618 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 6619 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2], 6620 stats.tnlTxDrops[3]); 6621 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 6622 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1], 6623 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]); 6624 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 6625 stats.ofldChanDrops[0], stats.ofldChanDrops[1], 6626 stats.ofldChanDrops[2], stats.ofldChanDrops[3]); 6627 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 6628 stats.ofldNoNeigh, stats.ofldCongDefer); 6629 6630 rc = sbuf_finish(sb); 6631 sbuf_delete(sb); 6632 6633 return (rc); 6634 } 6635 6636 struct field_desc { 6637 const char *name; 6638 u_int start; 6639 u_int width; 6640 }; 6641 6642 static void 6643 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 6644 { 6645 char buf[32]; 6646 int line_size = 0; 6647 6648 while (f->name) { 6649 uint64_t mask = (1ULL << f->width) - 1; 6650 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 6651 ((uintmax_t)v >> f->start) & mask); 6652 6653 if (line_size + len >= 79) { 6654 line_size = 8; 6655 sbuf_printf(sb, "\n "); 6656 } 6657 sbuf_printf(sb, "%s ", buf); 6658 line_size += len + 1; 6659 f++; 6660 } 6661 sbuf_printf(sb, "\n"); 6662 } 6663 6664 static struct field_desc tp_la0[] = { 6665 { "RcfOpCodeOut", 60, 4 }, 6666 { "State", 56, 4 }, 6667 { "WcfState", 52, 4 }, 6668 { "RcfOpcSrcOut", 50, 2 }, 6669 { "CRxError", 49, 1 }, 6670 { "ERxError", 48, 1 }, 6671 { "SanityFailed", 47, 1 }, 6672 { "SpuriousMsg", 46, 1 }, 6673 { "FlushInputMsg", 45, 1 }, 6674 { "FlushInputCpl", 44, 1 }, 6675 { "RssUpBit", 43, 1 }, 6676 { "RssFilterHit", 42, 1 }, 6677 { "Tid", 32, 10 }, 6678 { "InitTcb", 31, 1 }, 6679 { "LineNumber", 24, 7 }, 6680 { "Emsg", 23, 1 }, 6681 { "EdataOut", 22, 1 }, 6682 { "Cmsg", 21, 1 }, 6683 { "CdataOut", 20, 1 }, 6684 { "EreadPdu", 19, 1 }, 6685 { "CreadPdu", 18, 1 }, 6686 { "TunnelPkt", 17, 1 }, 6687 { "RcfPeerFin", 16, 1 }, 6688 { "RcfReasonOut", 12, 4 }, 6689 { "TxCchannel", 10, 2 }, 6690 { "RcfTxChannel", 8, 2 }, 6691 { "RxEchannel", 6, 2 }, 6692 { "RcfRxChannel", 5, 1 }, 6693 { "RcfDataOutSrdy", 4, 1 }, 6694 { "RxDvld", 3, 1 }, 6695 { "RxOoDvld", 2, 1 }, 6696 { "RxCongestion", 1, 1 }, 6697 { "TxCongestion", 0, 1 }, 6698 { NULL } 6699 }; 6700 6701 static struct field_desc tp_la1[] = { 6702 { "CplCmdIn", 56, 8 }, 6703 { "CplCmdOut", 48, 8 }, 6704 { "ESynOut", 47, 1 }, 6705 { "EAckOut", 46, 1 }, 6706 { "EFinOut", 45, 1 }, 6707 { "ERstOut", 44, 1 }, 6708 { "SynIn", 43, 1 }, 6709 { "AckIn", 42, 1 }, 6710 { "FinIn", 41, 1 }, 6711 { "RstIn", 40, 1 }, 6712 { "DataIn", 39, 1 }, 6713 { "DataInVld", 38, 1 }, 6714 { "PadIn", 37, 1 }, 6715 { "RxBufEmpty", 36, 1 }, 6716 { "RxDdp", 35, 1 }, 6717 { "RxFbCongestion", 34, 1 }, 6718 { "TxFbCongestion", 33, 1 }, 6719 { "TxPktSumSrdy", 32, 1 }, 6720 { "RcfUlpType", 28, 4 }, 6721 { "Eread", 27, 1 }, 6722 { "Ebypass", 26, 1 }, 6723 { "Esave", 25, 1 }, 6724 { "Static0", 24, 1 }, 6725 { "Cread", 23, 1 }, 6726 { "Cbypass", 22, 1 }, 6727 { "Csave", 21, 1 }, 6728 { "CPktOut", 20, 1 }, 6729 { "RxPagePoolFull", 18, 2 }, 6730 { "RxLpbkPkt", 17, 1 }, 6731 { "TxLpbkPkt", 16, 1 }, 6732 { "RxVfValid", 15, 1 }, 6733 { "SynLearned", 14, 1 }, 6734 { "SetDelEntry", 13, 1 }, 6735 { "SetInvEntry", 12, 1 }, 6736 { "CpcmdDvld", 11, 1 }, 6737 { "CpcmdSave", 10, 1 }, 6738 { "RxPstructsFull", 8, 2 }, 6739 { "EpcmdDvld", 7, 1 }, 6740 { "EpcmdFlush", 6, 1 }, 6741 { "EpcmdTrimPrefix", 5, 1 }, 6742 { "EpcmdTrimPostfix", 4, 1 }, 6743 { "ERssIp4Pkt", 3, 1 }, 6744 { "ERssIp6Pkt", 2, 1 }, 6745 { "ERssTcpUdpPkt", 1, 1 }, 6746 { "ERssFceFipPkt", 0, 1 }, 6747 { NULL } 6748 }; 6749 6750 static struct field_desc tp_la2[] = { 6751 { "CplCmdIn", 56, 8 }, 6752 { "MpsVfVld", 55, 1 }, 6753 { "MpsPf", 52, 3 }, 6754 { "MpsVf", 44, 8 }, 6755 { "SynIn", 43, 1 }, 6756 { "AckIn", 42, 1 }, 6757 { "FinIn", 41, 1 }, 6758 { "RstIn", 40, 1 }, 6759 { "DataIn", 39, 1 }, 6760 { "DataInVld", 38, 1 }, 6761 { "PadIn", 37, 1 }, 6762 { "RxBufEmpty", 36, 1 }, 6763 { "RxDdp", 35, 1 }, 6764 { "RxFbCongestion", 34, 1 }, 6765 { "TxFbCongestion", 33, 1 }, 6766 { "TxPktSumSrdy", 32, 1 }, 6767 { "RcfUlpType", 28, 4 }, 6768 { "Eread", 27, 1 }, 6769 { "Ebypass", 26, 1 }, 6770 { "Esave", 25, 1 }, 6771 { "Static0", 24, 1 }, 6772 { "Cread", 23, 1 }, 6773 { "Cbypass", 22, 1 }, 6774 { "Csave", 21, 1 }, 6775 { "CPktOut", 20, 1 }, 6776 { "RxPagePoolFull", 18, 2 }, 6777 { "RxLpbkPkt", 17, 1 }, 6778 { "TxLpbkPkt", 16, 1 }, 6779 { "RxVfValid", 15, 1 }, 6780 { "SynLearned", 14, 1 }, 6781 { "SetDelEntry", 13, 1 }, 6782 { "SetInvEntry", 12, 1 }, 6783 { "CpcmdDvld", 11, 1 }, 6784 { "CpcmdSave", 10, 1 }, 6785 { "RxPstructsFull", 8, 2 }, 6786 { "EpcmdDvld", 7, 1 }, 6787 { "EpcmdFlush", 6, 1 }, 6788 { "EpcmdTrimPrefix", 5, 1 }, 6789 { "EpcmdTrimPostfix", 4, 1 }, 6790 { "ERssIp4Pkt", 3, 1 }, 6791 { "ERssIp6Pkt", 2, 1 }, 6792 { "ERssTcpUdpPkt", 1, 1 }, 6793 { "ERssFceFipPkt", 0, 1 }, 6794 { NULL } 6795 }; 6796 6797 static void 6798 tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 6799 { 6800 6801 field_desc_show(sb, *p, tp_la0); 6802 } 6803 6804 static void 6805 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 6806 { 6807 6808 if (idx) 6809 sbuf_printf(sb, "\n"); 6810 field_desc_show(sb, p[0], tp_la0); 6811 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 6812 field_desc_show(sb, p[1], tp_la0); 6813 } 6814 6815 static void 6816 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 6817 { 6818 6819 if (idx) 6820 sbuf_printf(sb, "\n"); 6821 field_desc_show(sb, p[0], tp_la0); 6822 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 6823 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 6824 } 6825 6826 static int 6827 sysctl_tp_la(SYSCTL_HANDLER_ARGS) 6828 { 6829 struct adapter *sc = arg1; 6830 struct sbuf *sb; 6831 uint64_t *buf, *p; 6832 int rc; 6833 u_int i, inc; 6834 void (*show_func)(struct sbuf *, uint64_t *, int); 6835 6836 rc = sysctl_wire_old_buffer(req, 0); 6837 if (rc != 0) 6838 return (rc); 6839 6840 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6841 if (sb == NULL) 6842 return (ENOMEM); 6843 6844 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 6845 6846 t4_tp_read_la(sc, buf, NULL); 6847 p = buf; 6848 6849 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 6850 case 2: 6851 inc = 2; 6852 show_func = tp_la_show2; 6853 break; 6854 case 3: 6855 inc = 2; 6856 show_func = tp_la_show3; 6857 break; 6858 default: 6859 inc = 1; 6860 show_func = tp_la_show; 6861 } 6862 6863 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 6864 (*show_func)(sb, p, i); 6865 6866 rc = sbuf_finish(sb); 6867 sbuf_delete(sb); 6868 free(buf, M_CXGBE); 6869 return (rc); 6870 } 6871 6872 static int 6873 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 6874 { 6875 struct adapter *sc = arg1; 6876 struct sbuf *sb; 6877 int rc; 6878 u64 nrate[NCHAN], orate[NCHAN]; 6879 6880 rc = sysctl_wire_old_buffer(req, 0); 6881 if (rc != 0) 6882 return (rc); 6883 6884 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6885 if (sb == NULL) 6886 return (ENOMEM); 6887 6888 t4_get_chan_txrate(sc, nrate, orate); 6889 sbuf_printf(sb, " channel 0 channel 1 channel 2 " 6890 "channel 3\n"); 6891 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 6892 nrate[0], nrate[1], nrate[2], nrate[3]); 6893 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 6894 orate[0], orate[1], orate[2], orate[3]); 6895 6896 rc = sbuf_finish(sb); 6897 sbuf_delete(sb); 6898 6899 return (rc); 6900 } 6901 6902 static int 6903 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 6904 { 6905 struct adapter *sc = arg1; 6906 struct sbuf *sb; 6907 uint32_t *buf, *p; 6908 int rc, i; 6909 6910 rc = sysctl_wire_old_buffer(req, 0); 6911 if (rc != 0) 6912 return (rc); 6913 6914 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6915 if (sb == NULL) 6916 return (ENOMEM); 6917 6918 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 6919 M_ZERO | M_WAITOK); 6920 6921 t4_ulprx_read_la(sc, buf); 6922 p = buf; 6923 6924 sbuf_printf(sb, " Pcmd Type Message" 6925 " Data"); 6926 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 6927 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 6928 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 6929 } 6930 6931 rc = sbuf_finish(sb); 6932 sbuf_delete(sb); 6933 free(buf, M_CXGBE); 6934 return (rc); 6935 } 6936 6937 static int 6938 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 6939 { 6940 struct adapter *sc = arg1; 6941 struct sbuf *sb; 6942 int rc, v; 6943 6944 rc = sysctl_wire_old_buffer(req, 0); 6945 if (rc != 0) 6946 return (rc); 6947 6948 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6949 if (sb == NULL) 6950 return (ENOMEM); 6951 6952 v = t4_read_reg(sc, A_SGE_STAT_CFG); 6953 if (G_STATSOURCE_T5(v) == 7) { 6954 if (G_STATMODE(v) == 0) { 6955 sbuf_printf(sb, "total %d, incomplete %d", 6956 t4_read_reg(sc, A_SGE_STAT_TOTAL), 6957 t4_read_reg(sc, A_SGE_STAT_MATCH)); 6958 } else if (G_STATMODE(v) == 1) { 6959 sbuf_printf(sb, "total %d, data overflow %d", 6960 t4_read_reg(sc, A_SGE_STAT_TOTAL), 6961 t4_read_reg(sc, A_SGE_STAT_MATCH)); 6962 } 6963 } 6964 rc = sbuf_finish(sb); 6965 sbuf_delete(sb); 6966 6967 return (rc); 6968 } 6969 #endif 6970 6971 static uint32_t 6972 fconf_to_mode(uint32_t fconf) 6973 { 6974 uint32_t mode; 6975 6976 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 6977 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 6978 6979 if (fconf & F_FRAGMENTATION) 6980 mode |= T4_FILTER_IP_FRAGMENT; 6981 6982 if (fconf & F_MPSHITTYPE) 6983 mode |= T4_FILTER_MPS_HIT_TYPE; 6984 6985 if (fconf & F_MACMATCH) 6986 mode |= T4_FILTER_MAC_IDX; 6987 6988 if (fconf & F_ETHERTYPE) 6989 mode |= T4_FILTER_ETH_TYPE; 6990 6991 if (fconf & F_PROTOCOL) 6992 mode |= T4_FILTER_IP_PROTO; 6993 6994 if (fconf & F_TOS) 6995 mode |= T4_FILTER_IP_TOS; 6996 6997 if (fconf & F_VLAN) 6998 mode |= T4_FILTER_VLAN; 6999 7000 if (fconf & F_VNIC_ID) 7001 mode |= T4_FILTER_VNIC; 7002 7003 if (fconf & F_PORT) 7004 mode |= T4_FILTER_PORT; 7005 7006 if (fconf & F_FCOE) 7007 mode |= T4_FILTER_FCoE; 7008 7009 return (mode); 7010 } 7011 7012 static uint32_t 7013 mode_to_fconf(uint32_t mode) 7014 { 7015 uint32_t fconf = 0; 7016 7017 if (mode & T4_FILTER_IP_FRAGMENT) 7018 fconf |= F_FRAGMENTATION; 7019 7020 if (mode & T4_FILTER_MPS_HIT_TYPE) 7021 fconf |= F_MPSHITTYPE; 7022 7023 if (mode & T4_FILTER_MAC_IDX) 7024 fconf |= F_MACMATCH; 7025 7026 if (mode & T4_FILTER_ETH_TYPE) 7027 fconf |= F_ETHERTYPE; 7028 7029 if (mode & T4_FILTER_IP_PROTO) 7030 fconf |= F_PROTOCOL; 7031 7032 if (mode & T4_FILTER_IP_TOS) 7033 fconf |= F_TOS; 7034 7035 if (mode & T4_FILTER_VLAN) 7036 fconf |= F_VLAN; 7037 7038 if (mode & T4_FILTER_VNIC) 7039 fconf |= F_VNIC_ID; 7040 7041 if (mode & T4_FILTER_PORT) 7042 fconf |= F_PORT; 7043 7044 if (mode & T4_FILTER_FCoE) 7045 fconf |= F_FCOE; 7046 7047 return (fconf); 7048 } 7049 7050 static uint32_t 7051 fspec_to_fconf(struct t4_filter_specification *fs) 7052 { 7053 uint32_t fconf = 0; 7054 7055 if (fs->val.frag || fs->mask.frag) 7056 fconf |= F_FRAGMENTATION; 7057 7058 if (fs->val.matchtype || fs->mask.matchtype) 7059 fconf |= F_MPSHITTYPE; 7060 7061 if (fs->val.macidx || fs->mask.macidx) 7062 fconf |= F_MACMATCH; 7063 7064 if (fs->val.ethtype || fs->mask.ethtype) 7065 fconf |= F_ETHERTYPE; 7066 7067 if (fs->val.proto || fs->mask.proto) 7068 fconf |= F_PROTOCOL; 7069 7070 if (fs->val.tos || fs->mask.tos) 7071 fconf |= F_TOS; 7072 7073 if (fs->val.vlan_vld || fs->mask.vlan_vld) 7074 fconf |= F_VLAN; 7075 7076 if (fs->val.vnic_vld || fs->mask.vnic_vld) 7077 fconf |= F_VNIC_ID; 7078 7079 if (fs->val.iport || fs->mask.iport) 7080 fconf |= F_PORT; 7081 7082 if (fs->val.fcoe || fs->mask.fcoe) 7083 fconf |= F_FCOE; 7084 7085 return (fconf); 7086 } 7087 7088 static int 7089 get_filter_mode(struct adapter *sc, uint32_t *mode) 7090 { 7091 int rc; 7092 uint32_t fconf; 7093 7094 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7095 "t4getfm"); 7096 if (rc) 7097 return (rc); 7098 7099 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1, 7100 A_TP_VLAN_PRI_MAP); 7101 7102 if (sc->params.tp.vlan_pri_map != fconf) { 7103 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n", 7104 device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map, 7105 fconf); 7106 sc->params.tp.vlan_pri_map = fconf; 7107 } 7108 7109 *mode = fconf_to_mode(sc->params.tp.vlan_pri_map); 7110 7111 end_synchronized_op(sc, LOCK_HELD); 7112 return (0); 7113 } 7114 7115 static int 7116 set_filter_mode(struct adapter *sc, uint32_t mode) 7117 { 7118 uint32_t fconf; 7119 int rc; 7120 7121 fconf = mode_to_fconf(mode); 7122 7123 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7124 "t4setfm"); 7125 if (rc) 7126 return (rc); 7127 7128 if (sc->tids.ftids_in_use > 0) { 7129 rc = EBUSY; 7130 goto done; 7131 } 7132 7133 #ifdef TCP_OFFLOAD 7134 if (sc->offload_map) { 7135 rc = EBUSY; 7136 goto done; 7137 } 7138 #endif 7139 7140 #ifdef notyet 7141 rc = -t4_set_filter_mode(sc, fconf); 7142 if (rc == 0) 7143 sc->filter_mode = fconf; 7144 #else 7145 rc = ENOTSUP; 7146 #endif 7147 7148 done: 7149 end_synchronized_op(sc, LOCK_HELD); 7150 return (rc); 7151 } 7152 7153 static inline uint64_t 7154 get_filter_hits(struct adapter *sc, uint32_t fid) 7155 { 7156 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 7157 uint64_t hits; 7158 7159 memwin_info(sc, 0, &mw_base, NULL); 7160 off = position_memwin(sc, 0, 7161 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE); 7162 if (is_t4(sc)) { 7163 hits = t4_read_reg64(sc, mw_base + off + 16); 7164 hits = be64toh(hits); 7165 } else { 7166 hits = t4_read_reg(sc, mw_base + off + 24); 7167 hits = be32toh(hits); 7168 } 7169 7170 return (hits); 7171 } 7172 7173 static int 7174 get_filter(struct adapter *sc, struct t4_filter *t) 7175 { 7176 int i, rc, nfilters = sc->tids.nftids; 7177 struct filter_entry *f; 7178 7179 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7180 "t4getf"); 7181 if (rc) 7182 return (rc); 7183 7184 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 7185 t->idx >= nfilters) { 7186 t->idx = 0xffffffff; 7187 goto done; 7188 } 7189 7190 f = &sc->tids.ftid_tab[t->idx]; 7191 for (i = t->idx; i < nfilters; i++, f++) { 7192 if (f->valid) { 7193 t->idx = i; 7194 t->l2tidx = f->l2t ? f->l2t->idx : 0; 7195 t->smtidx = f->smtidx; 7196 if (f->fs.hitcnts) 7197 t->hits = get_filter_hits(sc, t->idx); 7198 else 7199 t->hits = UINT64_MAX; 7200 t->fs = f->fs; 7201 7202 goto done; 7203 } 7204 } 7205 7206 t->idx = 0xffffffff; 7207 done: 7208 end_synchronized_op(sc, LOCK_HELD); 7209 return (0); 7210 } 7211 7212 static int 7213 set_filter(struct adapter *sc, struct t4_filter *t) 7214 { 7215 unsigned int nfilters, nports; 7216 struct filter_entry *f; 7217 int i, rc; 7218 7219 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 7220 if (rc) 7221 return (rc); 7222 7223 nfilters = sc->tids.nftids; 7224 nports = sc->params.nports; 7225 7226 if (nfilters == 0) { 7227 rc = ENOTSUP; 7228 goto done; 7229 } 7230 7231 if (!(sc->flags & FULL_INIT_DONE)) { 7232 rc = EAGAIN; 7233 goto done; 7234 } 7235 7236 if (t->idx >= nfilters) { 7237 rc = EINVAL; 7238 goto done; 7239 } 7240 7241 /* Validate against the global filter mode */ 7242 if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) != 7243 sc->params.tp.vlan_pri_map) { 7244 rc = E2BIG; 7245 goto done; 7246 } 7247 7248 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 7249 rc = EINVAL; 7250 goto done; 7251 } 7252 7253 if (t->fs.val.iport >= nports) { 7254 rc = EINVAL; 7255 goto done; 7256 } 7257 7258 /* Can't specify an iq if not steering to it */ 7259 if (!t->fs.dirsteer && t->fs.iq) { 7260 rc = EINVAL; 7261 goto done; 7262 } 7263 7264 /* IPv6 filter idx must be 4 aligned */ 7265 if (t->fs.type == 1 && 7266 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 7267 rc = EINVAL; 7268 goto done; 7269 } 7270 7271 if (sc->tids.ftid_tab == NULL) { 7272 KASSERT(sc->tids.ftids_in_use == 0, 7273 ("%s: no memory allocated but filters_in_use > 0", 7274 __func__)); 7275 7276 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 7277 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 7278 if (sc->tids.ftid_tab == NULL) { 7279 rc = ENOMEM; 7280 goto done; 7281 } 7282 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 7283 } 7284 7285 for (i = 0; i < 4; i++) { 7286 f = &sc->tids.ftid_tab[t->idx + i]; 7287 7288 if (f->pending || f->valid) { 7289 rc = EBUSY; 7290 goto done; 7291 } 7292 if (f->locked) { 7293 rc = EPERM; 7294 goto done; 7295 } 7296 7297 if (t->fs.type == 0) 7298 break; 7299 } 7300 7301 f = &sc->tids.ftid_tab[t->idx]; 7302 f->fs = t->fs; 7303 7304 rc = set_filter_wr(sc, t->idx); 7305 done: 7306 end_synchronized_op(sc, 0); 7307 7308 if (rc == 0) { 7309 mtx_lock(&sc->tids.ftid_lock); 7310 for (;;) { 7311 if (f->pending == 0) { 7312 rc = f->valid ? 0 : EIO; 7313 break; 7314 } 7315 7316 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7317 PCATCH, "t4setfw", 0)) { 7318 rc = EINPROGRESS; 7319 break; 7320 } 7321 } 7322 mtx_unlock(&sc->tids.ftid_lock); 7323 } 7324 return (rc); 7325 } 7326 7327 static int 7328 del_filter(struct adapter *sc, struct t4_filter *t) 7329 { 7330 unsigned int nfilters; 7331 struct filter_entry *f; 7332 int rc; 7333 7334 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 7335 if (rc) 7336 return (rc); 7337 7338 nfilters = sc->tids.nftids; 7339 7340 if (nfilters == 0) { 7341 rc = ENOTSUP; 7342 goto done; 7343 } 7344 7345 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 7346 t->idx >= nfilters) { 7347 rc = EINVAL; 7348 goto done; 7349 } 7350 7351 if (!(sc->flags & FULL_INIT_DONE)) { 7352 rc = EAGAIN; 7353 goto done; 7354 } 7355 7356 f = &sc->tids.ftid_tab[t->idx]; 7357 7358 if (f->pending) { 7359 rc = EBUSY; 7360 goto done; 7361 } 7362 if (f->locked) { 7363 rc = EPERM; 7364 goto done; 7365 } 7366 7367 if (f->valid) { 7368 t->fs = f->fs; /* extra info for the caller */ 7369 rc = del_filter_wr(sc, t->idx); 7370 } 7371 7372 done: 7373 end_synchronized_op(sc, 0); 7374 7375 if (rc == 0) { 7376 mtx_lock(&sc->tids.ftid_lock); 7377 for (;;) { 7378 if (f->pending == 0) { 7379 rc = f->valid ? EIO : 0; 7380 break; 7381 } 7382 7383 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7384 PCATCH, "t4delfw", 0)) { 7385 rc = EINPROGRESS; 7386 break; 7387 } 7388 } 7389 mtx_unlock(&sc->tids.ftid_lock); 7390 } 7391 7392 return (rc); 7393 } 7394 7395 static void 7396 clear_filter(struct filter_entry *f) 7397 { 7398 if (f->l2t) 7399 t4_l2t_release(f->l2t); 7400 7401 bzero(f, sizeof (*f)); 7402 } 7403 7404 static int 7405 set_filter_wr(struct adapter *sc, int fidx) 7406 { 7407 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7408 struct fw_filter_wr *fwr; 7409 unsigned int ftid; 7410 struct wrq_cookie cookie; 7411 7412 ASSERT_SYNCHRONIZED_OP(sc); 7413 7414 if (f->fs.newdmac || f->fs.newvlan) { 7415 /* This filter needs an L2T entry; allocate one. */ 7416 f->l2t = t4_l2t_alloc_switching(sc->l2t); 7417 if (f->l2t == NULL) 7418 return (EAGAIN); 7419 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 7420 f->fs.dmac)) { 7421 t4_l2t_release(f->l2t); 7422 f->l2t = NULL; 7423 return (ENOMEM); 7424 } 7425 } 7426 7427 ftid = sc->tids.ftid_base + fidx; 7428 7429 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7430 if (fwr == NULL) 7431 return (ENOMEM); 7432 bzero(fwr, sizeof(*fwr)); 7433 7434 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 7435 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 7436 fwr->tid_to_iq = 7437 htobe32(V_FW_FILTER_WR_TID(ftid) | 7438 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 7439 V_FW_FILTER_WR_NOREPLY(0) | 7440 V_FW_FILTER_WR_IQ(f->fs.iq)); 7441 fwr->del_filter_to_l2tix = 7442 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 7443 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 7444 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 7445 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 7446 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 7447 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 7448 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 7449 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 7450 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 7451 f->fs.newvlan == VLAN_REWRITE) | 7452 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 7453 f->fs.newvlan == VLAN_REWRITE) | 7454 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 7455 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 7456 V_FW_FILTER_WR_PRIO(f->fs.prio) | 7457 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 7458 fwr->ethtype = htobe16(f->fs.val.ethtype); 7459 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 7460 fwr->frag_to_ovlan_vldm = 7461 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 7462 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 7463 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 7464 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) | 7465 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 7466 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld)); 7467 fwr->smac_sel = 0; 7468 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 7469 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 7470 fwr->maci_to_matchtypem = 7471 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 7472 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 7473 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 7474 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 7475 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 7476 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 7477 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 7478 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 7479 fwr->ptcl = f->fs.val.proto; 7480 fwr->ptclm = f->fs.mask.proto; 7481 fwr->ttyp = f->fs.val.tos; 7482 fwr->ttypm = f->fs.mask.tos; 7483 fwr->ivlan = htobe16(f->fs.val.vlan); 7484 fwr->ivlanm = htobe16(f->fs.mask.vlan); 7485 fwr->ovlan = htobe16(f->fs.val.vnic); 7486 fwr->ovlanm = htobe16(f->fs.mask.vnic); 7487 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 7488 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 7489 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 7490 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 7491 fwr->lp = htobe16(f->fs.val.dport); 7492 fwr->lpm = htobe16(f->fs.mask.dport); 7493 fwr->fp = htobe16(f->fs.val.sport); 7494 fwr->fpm = htobe16(f->fs.mask.sport); 7495 if (f->fs.newsmac) 7496 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 7497 7498 f->pending = 1; 7499 sc->tids.ftids_in_use++; 7500 7501 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7502 return (0); 7503 } 7504 7505 static int 7506 del_filter_wr(struct adapter *sc, int fidx) 7507 { 7508 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7509 struct fw_filter_wr *fwr; 7510 unsigned int ftid; 7511 struct wrq_cookie cookie; 7512 7513 ftid = sc->tids.ftid_base + fidx; 7514 7515 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7516 if (fwr == NULL) 7517 return (ENOMEM); 7518 bzero(fwr, sizeof (*fwr)); 7519 7520 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 7521 7522 f->pending = 1; 7523 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7524 return (0); 7525 } 7526 7527 int 7528 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 7529 { 7530 struct adapter *sc = iq->adapter; 7531 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 7532 unsigned int idx = GET_TID(rpl); 7533 unsigned int rc; 7534 struct filter_entry *f; 7535 7536 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 7537 rss->opcode)); 7538 7539 if (is_ftid(sc, idx)) { 7540 7541 idx -= sc->tids.ftid_base; 7542 f = &sc->tids.ftid_tab[idx]; 7543 rc = G_COOKIE(rpl->cookie); 7544 7545 mtx_lock(&sc->tids.ftid_lock); 7546 if (rc == FW_FILTER_WR_FLT_ADDED) { 7547 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 7548 __func__, idx)); 7549 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 7550 f->pending = 0; /* asynchronous setup completed */ 7551 f->valid = 1; 7552 } else { 7553 if (rc != FW_FILTER_WR_FLT_DELETED) { 7554 /* Add or delete failed, display an error */ 7555 log(LOG_ERR, 7556 "filter %u setup failed with error %u\n", 7557 idx, rc); 7558 } 7559 7560 clear_filter(f); 7561 sc->tids.ftids_in_use--; 7562 } 7563 wakeup(&sc->tids.ftid_tab); 7564 mtx_unlock(&sc->tids.ftid_lock); 7565 } 7566 7567 return (0); 7568 } 7569 7570 static int 7571 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 7572 { 7573 int rc; 7574 7575 if (cntxt->cid > M_CTXTQID) 7576 return (EINVAL); 7577 7578 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 7579 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 7580 return (EINVAL); 7581 7582 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 7583 if (rc) 7584 return (rc); 7585 7586 if (sc->flags & FW_OK) { 7587 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 7588 &cntxt->data[0]); 7589 if (rc == 0) 7590 goto done; 7591 } 7592 7593 /* 7594 * Read via firmware failed or wasn't even attempted. Read directly via 7595 * the backdoor. 7596 */ 7597 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 7598 done: 7599 end_synchronized_op(sc, 0); 7600 return (rc); 7601 } 7602 7603 static int 7604 load_fw(struct adapter *sc, struct t4_data *fw) 7605 { 7606 int rc; 7607 uint8_t *fw_data; 7608 7609 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 7610 if (rc) 7611 return (rc); 7612 7613 if (sc->flags & FULL_INIT_DONE) { 7614 rc = EBUSY; 7615 goto done; 7616 } 7617 7618 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 7619 if (fw_data == NULL) { 7620 rc = ENOMEM; 7621 goto done; 7622 } 7623 7624 rc = copyin(fw->data, fw_data, fw->len); 7625 if (rc == 0) 7626 rc = -t4_load_fw(sc, fw_data, fw->len); 7627 7628 free(fw_data, M_CXGBE); 7629 done: 7630 end_synchronized_op(sc, 0); 7631 return (rc); 7632 } 7633 7634 static int 7635 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 7636 { 7637 uint32_t addr, off, remaining, i, n; 7638 uint32_t *buf, *b; 7639 uint32_t mw_base, mw_aperture; 7640 int rc; 7641 uint8_t *dst; 7642 7643 rc = validate_mem_range(sc, mr->addr, mr->len); 7644 if (rc != 0) 7645 return (rc); 7646 7647 memwin_info(sc, win, &mw_base, &mw_aperture); 7648 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK); 7649 addr = mr->addr; 7650 remaining = mr->len; 7651 dst = (void *)mr->data; 7652 7653 while (remaining) { 7654 off = position_memwin(sc, win, addr); 7655 7656 /* number of bytes that we'll copy in the inner loop */ 7657 n = min(remaining, mw_aperture - off); 7658 for (i = 0; i < n; i += 4) 7659 *b++ = t4_read_reg(sc, mw_base + off + i); 7660 7661 rc = copyout(buf, dst, n); 7662 if (rc != 0) 7663 break; 7664 7665 b = buf; 7666 dst += n; 7667 remaining -= n; 7668 addr += n; 7669 } 7670 7671 free(buf, M_CXGBE); 7672 return (rc); 7673 } 7674 7675 static int 7676 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 7677 { 7678 int rc; 7679 7680 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 7681 return (EINVAL); 7682 7683 if (i2cd->len > sizeof(i2cd->data)) 7684 return (EFBIG); 7685 7686 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 7687 if (rc) 7688 return (rc); 7689 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 7690 i2cd->offset, i2cd->len, &i2cd->data[0]); 7691 end_synchronized_op(sc, 0); 7692 7693 return (rc); 7694 } 7695 7696 static int 7697 in_range(int val, int lo, int hi) 7698 { 7699 7700 return (val < 0 || (val <= hi && val >= lo)); 7701 } 7702 7703 static int 7704 set_sched_class(struct adapter *sc, struct t4_sched_params *p) 7705 { 7706 int fw_subcmd, fw_type, rc; 7707 7708 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc"); 7709 if (rc) 7710 return (rc); 7711 7712 if (!(sc->flags & FULL_INIT_DONE)) { 7713 rc = EAGAIN; 7714 goto done; 7715 } 7716 7717 /* 7718 * Translate the cxgbetool parameters into T4 firmware parameters. (The 7719 * sub-command and type are in common locations.) 7720 */ 7721 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 7722 fw_subcmd = FW_SCHED_SC_CONFIG; 7723 else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 7724 fw_subcmd = FW_SCHED_SC_PARAMS; 7725 else { 7726 rc = EINVAL; 7727 goto done; 7728 } 7729 if (p->type == SCHED_CLASS_TYPE_PACKET) 7730 fw_type = FW_SCHED_TYPE_PKTSCHED; 7731 else { 7732 rc = EINVAL; 7733 goto done; 7734 } 7735 7736 if (fw_subcmd == FW_SCHED_SC_CONFIG) { 7737 /* Vet our parameters ..*/ 7738 if (p->u.config.minmax < 0) { 7739 rc = EINVAL; 7740 goto done; 7741 } 7742 7743 /* And pass the request to the firmware ...*/ 7744 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1); 7745 goto done; 7746 } 7747 7748 if (fw_subcmd == FW_SCHED_SC_PARAMS) { 7749 int fw_level; 7750 int fw_mode; 7751 int fw_rateunit; 7752 int fw_ratemode; 7753 7754 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL) 7755 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 7756 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) 7757 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 7758 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) 7759 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 7760 else { 7761 rc = EINVAL; 7762 goto done; 7763 } 7764 7765 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS) 7766 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 7767 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) 7768 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 7769 else { 7770 rc = EINVAL; 7771 goto done; 7772 } 7773 7774 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS) 7775 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 7776 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS) 7777 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 7778 else { 7779 rc = EINVAL; 7780 goto done; 7781 } 7782 7783 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL) 7784 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 7785 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS) 7786 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 7787 else { 7788 rc = EINVAL; 7789 goto done; 7790 } 7791 7792 /* Vet our parameters ... */ 7793 if (!in_range(p->u.params.channel, 0, 3) || 7794 !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) || 7795 !in_range(p->u.params.minrate, 0, 10000000) || 7796 !in_range(p->u.params.maxrate, 0, 10000000) || 7797 !in_range(p->u.params.weight, 0, 100)) { 7798 rc = ERANGE; 7799 goto done; 7800 } 7801 7802 /* 7803 * Translate any unset parameters into the firmware's 7804 * nomenclature and/or fail the call if the parameters 7805 * are required ... 7806 */ 7807 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 || 7808 p->u.params.channel < 0 || p->u.params.cl < 0) { 7809 rc = EINVAL; 7810 goto done; 7811 } 7812 if (p->u.params.minrate < 0) 7813 p->u.params.minrate = 0; 7814 if (p->u.params.maxrate < 0) { 7815 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 7816 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 7817 rc = EINVAL; 7818 goto done; 7819 } else 7820 p->u.params.maxrate = 0; 7821 } 7822 if (p->u.params.weight < 0) { 7823 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) { 7824 rc = EINVAL; 7825 goto done; 7826 } else 7827 p->u.params.weight = 0; 7828 } 7829 if (p->u.params.pktsize < 0) { 7830 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 7831 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 7832 rc = EINVAL; 7833 goto done; 7834 } else 7835 p->u.params.pktsize = 0; 7836 } 7837 7838 /* See what the firmware thinks of the request ... */ 7839 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode, 7840 fw_rateunit, fw_ratemode, p->u.params.channel, 7841 p->u.params.cl, p->u.params.minrate, p->u.params.maxrate, 7842 p->u.params.weight, p->u.params.pktsize, 1); 7843 goto done; 7844 } 7845 7846 rc = EINVAL; 7847 done: 7848 end_synchronized_op(sc, 0); 7849 return (rc); 7850 } 7851 7852 static int 7853 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 7854 { 7855 struct port_info *pi = NULL; 7856 struct sge_txq *txq; 7857 uint32_t fw_mnem, fw_queue, fw_class; 7858 int i, rc; 7859 7860 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 7861 if (rc) 7862 return (rc); 7863 7864 if (!(sc->flags & FULL_INIT_DONE)) { 7865 rc = EAGAIN; 7866 goto done; 7867 } 7868 7869 if (p->port >= sc->params.nports) { 7870 rc = EINVAL; 7871 goto done; 7872 } 7873 7874 pi = sc->port[p->port]; 7875 if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) { 7876 rc = EINVAL; 7877 goto done; 7878 } 7879 7880 /* 7881 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 7882 * Scheduling Class in this case). 7883 */ 7884 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 7885 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 7886 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 7887 7888 /* 7889 * If op.queue is non-negative, then we're only changing the scheduling 7890 * on a single specified TX queue. 7891 */ 7892 if (p->queue >= 0) { 7893 txq = &sc->sge.txq[pi->first_txq + p->queue]; 7894 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 7895 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 7896 &fw_class); 7897 goto done; 7898 } 7899 7900 /* 7901 * Change the scheduling on all the TX queues for the 7902 * interface. 7903 */ 7904 for_each_txq(pi, i, txq) { 7905 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 7906 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 7907 &fw_class); 7908 if (rc) 7909 goto done; 7910 } 7911 7912 rc = 0; 7913 done: 7914 end_synchronized_op(sc, 0); 7915 return (rc); 7916 } 7917 7918 int 7919 t4_os_find_pci_capability(struct adapter *sc, int cap) 7920 { 7921 int i; 7922 7923 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 7924 } 7925 7926 int 7927 t4_os_pci_save_state(struct adapter *sc) 7928 { 7929 device_t dev; 7930 struct pci_devinfo *dinfo; 7931 7932 dev = sc->dev; 7933 dinfo = device_get_ivars(dev); 7934 7935 pci_cfg_save(dev, dinfo, 0); 7936 return (0); 7937 } 7938 7939 int 7940 t4_os_pci_restore_state(struct adapter *sc) 7941 { 7942 device_t dev; 7943 struct pci_devinfo *dinfo; 7944 7945 dev = sc->dev; 7946 dinfo = device_get_ivars(dev); 7947 7948 pci_cfg_restore(dev, dinfo); 7949 return (0); 7950 } 7951 7952 void 7953 t4_os_portmod_changed(const struct adapter *sc, int idx) 7954 { 7955 struct port_info *pi = sc->port[idx]; 7956 static const char *mod_str[] = { 7957 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 7958 }; 7959 7960 build_medialist(pi, &pi->media); 7961 #ifdef DEV_NETMAP 7962 build_medialist(pi, &pi->nm_media); 7963 #endif 7964 7965 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 7966 if_printf(pi->ifp, "transceiver unplugged.\n"); 7967 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 7968 if_printf(pi->ifp, "unknown transceiver inserted.\n"); 7969 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 7970 if_printf(pi->ifp, "unsupported transceiver inserted.\n"); 7971 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 7972 if_printf(pi->ifp, "%s transceiver inserted.\n", 7973 mod_str[pi->mod_type]); 7974 } else { 7975 if_printf(pi->ifp, "transceiver (type %d) inserted.\n", 7976 pi->mod_type); 7977 } 7978 } 7979 7980 void 7981 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason) 7982 { 7983 struct port_info *pi = sc->port[idx]; 7984 struct ifnet *ifp = pi->ifp; 7985 7986 if (link_stat) { 7987 pi->linkdnrc = -1; 7988 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 7989 if_link_state_change(ifp, LINK_STATE_UP); 7990 } else { 7991 if (reason >= 0) 7992 pi->linkdnrc = reason; 7993 if_link_state_change(ifp, LINK_STATE_DOWN); 7994 } 7995 } 7996 7997 void 7998 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 7999 { 8000 struct adapter *sc; 8001 8002 sx_slock(&t4_list_lock); 8003 SLIST_FOREACH(sc, &t4_list, link) { 8004 /* 8005 * func should not make any assumptions about what state sc is 8006 * in - the only guarantee is that sc->sc_lock is a valid lock. 8007 */ 8008 func(sc, arg); 8009 } 8010 sx_sunlock(&t4_list_lock); 8011 } 8012 8013 static int 8014 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 8015 { 8016 return (0); 8017 } 8018 8019 static int 8020 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 8021 { 8022 return (0); 8023 } 8024 8025 static int 8026 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8027 struct thread *td) 8028 { 8029 int rc; 8030 struct adapter *sc = dev->si_drv1; 8031 8032 rc = priv_check(td, PRIV_DRIVER); 8033 if (rc != 0) 8034 return (rc); 8035 8036 switch (cmd) { 8037 case CHELSIO_T4_GETREG: { 8038 struct t4_reg *edata = (struct t4_reg *)data; 8039 8040 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8041 return (EFAULT); 8042 8043 if (edata->size == 4) 8044 edata->val = t4_read_reg(sc, edata->addr); 8045 else if (edata->size == 8) 8046 edata->val = t4_read_reg64(sc, edata->addr); 8047 else 8048 return (EINVAL); 8049 8050 break; 8051 } 8052 case CHELSIO_T4_SETREG: { 8053 struct t4_reg *edata = (struct t4_reg *)data; 8054 8055 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8056 return (EFAULT); 8057 8058 if (edata->size == 4) { 8059 if (edata->val & 0xffffffff00000000) 8060 return (EINVAL); 8061 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 8062 } else if (edata->size == 8) 8063 t4_write_reg64(sc, edata->addr, edata->val); 8064 else 8065 return (EINVAL); 8066 break; 8067 } 8068 case CHELSIO_T4_REGDUMP: { 8069 struct t4_regdump *regs = (struct t4_regdump *)data; 8070 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE; 8071 uint8_t *buf; 8072 8073 if (regs->len < reglen) { 8074 regs->len = reglen; /* hint to the caller */ 8075 return (ENOBUFS); 8076 } 8077 8078 regs->len = reglen; 8079 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 8080 t4_get_regs(sc, regs, buf); 8081 rc = copyout(buf, regs->data, reglen); 8082 free(buf, M_CXGBE); 8083 break; 8084 } 8085 case CHELSIO_T4_GET_FILTER_MODE: 8086 rc = get_filter_mode(sc, (uint32_t *)data); 8087 break; 8088 case CHELSIO_T4_SET_FILTER_MODE: 8089 rc = set_filter_mode(sc, *(uint32_t *)data); 8090 break; 8091 case CHELSIO_T4_GET_FILTER: 8092 rc = get_filter(sc, (struct t4_filter *)data); 8093 break; 8094 case CHELSIO_T4_SET_FILTER: 8095 rc = set_filter(sc, (struct t4_filter *)data); 8096 break; 8097 case CHELSIO_T4_DEL_FILTER: 8098 rc = del_filter(sc, (struct t4_filter *)data); 8099 break; 8100 case CHELSIO_T4_GET_SGE_CONTEXT: 8101 rc = get_sge_context(sc, (struct t4_sge_context *)data); 8102 break; 8103 case CHELSIO_T4_LOAD_FW: 8104 rc = load_fw(sc, (struct t4_data *)data); 8105 break; 8106 case CHELSIO_T4_GET_MEM: 8107 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 8108 break; 8109 case CHELSIO_T4_GET_I2C: 8110 rc = read_i2c(sc, (struct t4_i2c_data *)data); 8111 break; 8112 case CHELSIO_T4_CLEAR_STATS: { 8113 int i; 8114 u_int port_id = *(uint32_t *)data; 8115 struct port_info *pi; 8116 8117 if (port_id >= sc->params.nports) 8118 return (EINVAL); 8119 pi = sc->port[port_id]; 8120 8121 /* MAC stats */ 8122 t4_clr_port_stats(sc, pi->tx_chan); 8123 pi->tx_parse_error = 0; 8124 8125 if (pi->flags & PORT_INIT_DONE) { 8126 struct sge_rxq *rxq; 8127 struct sge_txq *txq; 8128 struct sge_wrq *wrq; 8129 8130 for_each_rxq(pi, i, rxq) { 8131 #if defined(INET) || defined(INET6) 8132 rxq->lro.lro_queued = 0; 8133 rxq->lro.lro_flushed = 0; 8134 #endif 8135 rxq->rxcsum = 0; 8136 rxq->vlan_extraction = 0; 8137 } 8138 8139 for_each_txq(pi, i, txq) { 8140 txq->txcsum = 0; 8141 txq->tso_wrs = 0; 8142 txq->vlan_insertion = 0; 8143 txq->imm_wrs = 0; 8144 txq->sgl_wrs = 0; 8145 txq->txpkt_wrs = 0; 8146 txq->txpkts0_wrs = 0; 8147 txq->txpkts1_wrs = 0; 8148 txq->txpkts0_pkts = 0; 8149 txq->txpkts1_pkts = 0; 8150 mp_ring_reset_stats(txq->r); 8151 } 8152 8153 #ifdef TCP_OFFLOAD 8154 /* nothing to clear for each ofld_rxq */ 8155 8156 for_each_ofld_txq(pi, i, wrq) { 8157 wrq->tx_wrs_direct = 0; 8158 wrq->tx_wrs_copied = 0; 8159 } 8160 #endif 8161 wrq = &sc->sge.ctrlq[pi->port_id]; 8162 wrq->tx_wrs_direct = 0; 8163 wrq->tx_wrs_copied = 0; 8164 } 8165 break; 8166 } 8167 case CHELSIO_T4_SCHED_CLASS: 8168 rc = set_sched_class(sc, (struct t4_sched_params *)data); 8169 break; 8170 case CHELSIO_T4_SCHED_QUEUE: 8171 rc = set_sched_queue(sc, (struct t4_sched_queue *)data); 8172 break; 8173 case CHELSIO_T4_GET_TRACER: 8174 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 8175 break; 8176 case CHELSIO_T4_SET_TRACER: 8177 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 8178 break; 8179 default: 8180 rc = EINVAL; 8181 } 8182 8183 return (rc); 8184 } 8185 8186 #ifdef TCP_OFFLOAD 8187 void 8188 t4_iscsi_init(struct ifnet *ifp, unsigned int tag_mask, 8189 const unsigned int *pgsz_order) 8190 { 8191 struct port_info *pi = ifp->if_softc; 8192 struct adapter *sc = pi->adapter; 8193 8194 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 8195 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 8196 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 8197 V_HPZ3(pgsz_order[3])); 8198 } 8199 8200 static int 8201 toe_capability(struct port_info *pi, int enable) 8202 { 8203 int rc; 8204 struct adapter *sc = pi->adapter; 8205 8206 ASSERT_SYNCHRONIZED_OP(sc); 8207 8208 if (!is_offload(sc)) 8209 return (ENODEV); 8210 8211 if (enable) { 8212 if (!(sc->flags & FULL_INIT_DONE)) { 8213 rc = cxgbe_init_synchronized(pi); 8214 if (rc) 8215 return (rc); 8216 } 8217 8218 if (isset(&sc->offload_map, pi->port_id)) 8219 return (0); 8220 8221 if (!(sc->flags & TOM_INIT_DONE)) { 8222 rc = t4_activate_uld(sc, ULD_TOM); 8223 if (rc == EAGAIN) { 8224 log(LOG_WARNING, 8225 "You must kldload t4_tom.ko before trying " 8226 "to enable TOE on a cxgbe interface.\n"); 8227 } 8228 if (rc != 0) 8229 return (rc); 8230 KASSERT(sc->tom_softc != NULL, 8231 ("%s: TOM activated but softc NULL", __func__)); 8232 KASSERT(sc->flags & TOM_INIT_DONE, 8233 ("%s: TOM activated but flag not set", __func__)); 8234 } 8235 8236 setbit(&sc->offload_map, pi->port_id); 8237 } else { 8238 if (!isset(&sc->offload_map, pi->port_id)) 8239 return (0); 8240 8241 KASSERT(sc->flags & TOM_INIT_DONE, 8242 ("%s: TOM never initialized?", __func__)); 8243 clrbit(&sc->offload_map, pi->port_id); 8244 } 8245 8246 return (0); 8247 } 8248 8249 /* 8250 * Add an upper layer driver to the global list. 8251 */ 8252 int 8253 t4_register_uld(struct uld_info *ui) 8254 { 8255 int rc = 0; 8256 struct uld_info *u; 8257 8258 sx_xlock(&t4_uld_list_lock); 8259 SLIST_FOREACH(u, &t4_uld_list, link) { 8260 if (u->uld_id == ui->uld_id) { 8261 rc = EEXIST; 8262 goto done; 8263 } 8264 } 8265 8266 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 8267 ui->refcount = 0; 8268 done: 8269 sx_xunlock(&t4_uld_list_lock); 8270 return (rc); 8271 } 8272 8273 int 8274 t4_unregister_uld(struct uld_info *ui) 8275 { 8276 int rc = EINVAL; 8277 struct uld_info *u; 8278 8279 sx_xlock(&t4_uld_list_lock); 8280 8281 SLIST_FOREACH(u, &t4_uld_list, link) { 8282 if (u == ui) { 8283 if (ui->refcount > 0) { 8284 rc = EBUSY; 8285 goto done; 8286 } 8287 8288 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 8289 rc = 0; 8290 goto done; 8291 } 8292 } 8293 done: 8294 sx_xunlock(&t4_uld_list_lock); 8295 return (rc); 8296 } 8297 8298 int 8299 t4_activate_uld(struct adapter *sc, int id) 8300 { 8301 int rc = EAGAIN; 8302 struct uld_info *ui; 8303 8304 ASSERT_SYNCHRONIZED_OP(sc); 8305 8306 sx_slock(&t4_uld_list_lock); 8307 8308 SLIST_FOREACH(ui, &t4_uld_list, link) { 8309 if (ui->uld_id == id) { 8310 if (!(sc->flags & FULL_INIT_DONE)) { 8311 rc = adapter_full_init(sc); 8312 if (rc != 0) 8313 goto done; 8314 } 8315 8316 rc = ui->activate(sc); 8317 if (rc == 0) 8318 ui->refcount++; 8319 goto done; 8320 } 8321 } 8322 done: 8323 sx_sunlock(&t4_uld_list_lock); 8324 8325 return (rc); 8326 } 8327 8328 int 8329 t4_deactivate_uld(struct adapter *sc, int id) 8330 { 8331 int rc = EINVAL; 8332 struct uld_info *ui; 8333 8334 ASSERT_SYNCHRONIZED_OP(sc); 8335 8336 sx_slock(&t4_uld_list_lock); 8337 8338 SLIST_FOREACH(ui, &t4_uld_list, link) { 8339 if (ui->uld_id == id) { 8340 rc = ui->deactivate(sc); 8341 if (rc == 0) 8342 ui->refcount--; 8343 goto done; 8344 } 8345 } 8346 done: 8347 sx_sunlock(&t4_uld_list_lock); 8348 8349 return (rc); 8350 } 8351 #endif 8352 8353 /* 8354 * Come up with reasonable defaults for some of the tunables, provided they're 8355 * not set by the user (in which case we'll use the values as is). 8356 */ 8357 static void 8358 tweak_tunables(void) 8359 { 8360 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 8361 8362 if (t4_ntxq10g < 1) 8363 t4_ntxq10g = min(nc, NTXQ_10G); 8364 8365 if (t4_ntxq1g < 1) 8366 t4_ntxq1g = min(nc, NTXQ_1G); 8367 8368 if (t4_nrxq10g < 1) 8369 t4_nrxq10g = min(nc, NRXQ_10G); 8370 8371 if (t4_nrxq1g < 1) 8372 t4_nrxq1g = min(nc, NRXQ_1G); 8373 8374 #ifdef TCP_OFFLOAD 8375 if (t4_nofldtxq10g < 1) 8376 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 8377 8378 if (t4_nofldtxq1g < 1) 8379 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 8380 8381 if (t4_nofldrxq10g < 1) 8382 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 8383 8384 if (t4_nofldrxq1g < 1) 8385 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 8386 8387 if (t4_toecaps_allowed == -1) 8388 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 8389 #else 8390 if (t4_toecaps_allowed == -1) 8391 t4_toecaps_allowed = 0; 8392 #endif 8393 8394 #ifdef DEV_NETMAP 8395 if (t4_nnmtxq10g < 1) 8396 t4_nnmtxq10g = min(nc, NNMTXQ_10G); 8397 8398 if (t4_nnmtxq1g < 1) 8399 t4_nnmtxq1g = min(nc, NNMTXQ_1G); 8400 8401 if (t4_nnmrxq10g < 1) 8402 t4_nnmrxq10g = min(nc, NNMRXQ_10G); 8403 8404 if (t4_nnmrxq1g < 1) 8405 t4_nnmrxq1g = min(nc, NNMRXQ_1G); 8406 #endif 8407 8408 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 8409 t4_tmr_idx_10g = TMR_IDX_10G; 8410 8411 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 8412 t4_pktc_idx_10g = PKTC_IDX_10G; 8413 8414 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 8415 t4_tmr_idx_1g = TMR_IDX_1G; 8416 8417 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 8418 t4_pktc_idx_1g = PKTC_IDX_1G; 8419 8420 if (t4_qsize_txq < 128) 8421 t4_qsize_txq = 128; 8422 8423 if (t4_qsize_rxq < 128) 8424 t4_qsize_rxq = 128; 8425 while (t4_qsize_rxq & 7) 8426 t4_qsize_rxq++; 8427 8428 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 8429 } 8430 8431 static struct sx mlu; /* mod load unload */ 8432 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 8433 8434 static int 8435 mod_event(module_t mod, int cmd, void *arg) 8436 { 8437 int rc = 0; 8438 static int loaded = 0; 8439 8440 switch (cmd) { 8441 case MOD_LOAD: 8442 sx_xlock(&mlu); 8443 if (loaded++ == 0) { 8444 t4_sge_modload(); 8445 sx_init(&t4_list_lock, "T4/T5 adapters"); 8446 SLIST_INIT(&t4_list); 8447 #ifdef TCP_OFFLOAD 8448 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 8449 SLIST_INIT(&t4_uld_list); 8450 #endif 8451 t4_tracer_modload(); 8452 tweak_tunables(); 8453 } 8454 sx_xunlock(&mlu); 8455 break; 8456 8457 case MOD_UNLOAD: 8458 sx_xlock(&mlu); 8459 if (--loaded == 0) { 8460 int tries; 8461 8462 sx_slock(&t4_list_lock); 8463 if (!SLIST_EMPTY(&t4_list)) { 8464 rc = EBUSY; 8465 sx_sunlock(&t4_list_lock); 8466 goto done_unload; 8467 } 8468 #ifdef TCP_OFFLOAD 8469 sx_slock(&t4_uld_list_lock); 8470 if (!SLIST_EMPTY(&t4_uld_list)) { 8471 rc = EBUSY; 8472 sx_sunlock(&t4_uld_list_lock); 8473 sx_sunlock(&t4_list_lock); 8474 goto done_unload; 8475 } 8476 #endif 8477 tries = 0; 8478 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 8479 uprintf("%ju clusters with custom free routine " 8480 "still is use.\n", t4_sge_extfree_refs()); 8481 pause("t4unload", 2 * hz); 8482 } 8483 #ifdef TCP_OFFLOAD 8484 sx_sunlock(&t4_uld_list_lock); 8485 #endif 8486 sx_sunlock(&t4_list_lock); 8487 8488 if (t4_sge_extfree_refs() == 0) { 8489 t4_tracer_modunload(); 8490 #ifdef TCP_OFFLOAD 8491 sx_destroy(&t4_uld_list_lock); 8492 #endif 8493 sx_destroy(&t4_list_lock); 8494 t4_sge_modunload(); 8495 loaded = 0; 8496 } else { 8497 rc = EBUSY; 8498 loaded++; /* undo earlier decrement */ 8499 } 8500 } 8501 done_unload: 8502 sx_xunlock(&mlu); 8503 break; 8504 } 8505 8506 return (rc); 8507 } 8508 8509 static devclass_t t4_devclass, t5_devclass; 8510 static devclass_t cxgbe_devclass, cxl_devclass; 8511 8512 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 8513 MODULE_VERSION(t4nex, 1); 8514 MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 8515 8516 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 8517 MODULE_VERSION(t5nex, 1); 8518 MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 8519 8520 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 8521 MODULE_VERSION(cxgbe, 1); 8522 8523 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 8524 MODULE_VERSION(cxl, 1); 8525