1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_rss.h" 34 35 #include <sys/param.h> 36 #include <sys/conf.h> 37 #include <sys/priv.h> 38 #include <sys/kernel.h> 39 #include <sys/bus.h> 40 #include <sys/module.h> 41 #include <sys/malloc.h> 42 #include <sys/queue.h> 43 #include <sys/taskqueue.h> 44 #include <sys/pciio.h> 45 #include <dev/pci/pcireg.h> 46 #include <dev/pci/pcivar.h> 47 #include <dev/pci/pci_private.h> 48 #include <sys/firmware.h> 49 #include <sys/sbuf.h> 50 #include <sys/smp.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/sysctl.h> 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_types.h> 57 #include <net/if_dl.h> 58 #include <net/if_vlan_var.h> 59 #ifdef RSS 60 #include <net/rss_config.h> 61 #endif 62 #if defined(__i386__) || defined(__amd64__) 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 #endif 66 67 #include "common/common.h" 68 #include "common/t4_msg.h" 69 #include "common/t4_regs.h" 70 #include "common/t4_regs_values.h" 71 #include "t4_ioctl.h" 72 #include "t4_l2t.h" 73 #include "t4_mp_ring.h" 74 75 /* T4 bus driver interface */ 76 static int t4_probe(device_t); 77 static int t4_attach(device_t); 78 static int t4_detach(device_t); 79 static device_method_t t4_methods[] = { 80 DEVMETHOD(device_probe, t4_probe), 81 DEVMETHOD(device_attach, t4_attach), 82 DEVMETHOD(device_detach, t4_detach), 83 84 DEVMETHOD_END 85 }; 86 static driver_t t4_driver = { 87 "t4nex", 88 t4_methods, 89 sizeof(struct adapter) 90 }; 91 92 93 /* T4 port (cxgbe) interface */ 94 static int cxgbe_probe(device_t); 95 static int cxgbe_attach(device_t); 96 static int cxgbe_detach(device_t); 97 static device_method_t cxgbe_methods[] = { 98 DEVMETHOD(device_probe, cxgbe_probe), 99 DEVMETHOD(device_attach, cxgbe_attach), 100 DEVMETHOD(device_detach, cxgbe_detach), 101 { 0, 0 } 102 }; 103 static driver_t cxgbe_driver = { 104 "cxgbe", 105 cxgbe_methods, 106 sizeof(struct port_info) 107 }; 108 109 /* T4 VI (vcxgbe) interface */ 110 static int vcxgbe_probe(device_t); 111 static int vcxgbe_attach(device_t); 112 static int vcxgbe_detach(device_t); 113 static device_method_t vcxgbe_methods[] = { 114 DEVMETHOD(device_probe, vcxgbe_probe), 115 DEVMETHOD(device_attach, vcxgbe_attach), 116 DEVMETHOD(device_detach, vcxgbe_detach), 117 { 0, 0 } 118 }; 119 static driver_t vcxgbe_driver = { 120 "vcxgbe", 121 vcxgbe_methods, 122 sizeof(struct vi_info) 123 }; 124 125 static d_ioctl_t t4_ioctl; 126 static d_open_t t4_open; 127 static d_close_t t4_close; 128 129 static struct cdevsw t4_cdevsw = { 130 .d_version = D_VERSION, 131 .d_flags = 0, 132 .d_open = t4_open, 133 .d_close = t4_close, 134 .d_ioctl = t4_ioctl, 135 .d_name = "t4nex", 136 }; 137 138 /* T5 bus driver interface */ 139 static int t5_probe(device_t); 140 static device_method_t t5_methods[] = { 141 DEVMETHOD(device_probe, t5_probe), 142 DEVMETHOD(device_attach, t4_attach), 143 DEVMETHOD(device_detach, t4_detach), 144 145 DEVMETHOD_END 146 }; 147 static driver_t t5_driver = { 148 "t5nex", 149 t5_methods, 150 sizeof(struct adapter) 151 }; 152 153 154 /* T5 port (cxl) interface */ 155 static driver_t cxl_driver = { 156 "cxl", 157 cxgbe_methods, 158 sizeof(struct port_info) 159 }; 160 161 /* T5 VI (vcxl) interface */ 162 static driver_t vcxl_driver = { 163 "vcxl", 164 vcxgbe_methods, 165 sizeof(struct vi_info) 166 }; 167 168 static struct cdevsw t5_cdevsw = { 169 .d_version = D_VERSION, 170 .d_flags = 0, 171 .d_open = t4_open, 172 .d_close = t4_close, 173 .d_ioctl = t4_ioctl, 174 .d_name = "t5nex", 175 }; 176 177 /* ifnet + media interface */ 178 static void cxgbe_init(void *); 179 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 180 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 181 static void cxgbe_qflush(struct ifnet *); 182 static int cxgbe_media_change(struct ifnet *); 183 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 184 185 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 186 187 /* 188 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 189 * then ADAPTER_LOCK, then t4_uld_list_lock. 190 */ 191 static struct sx t4_list_lock; 192 SLIST_HEAD(, adapter) t4_list; 193 #ifdef TCP_OFFLOAD 194 static struct sx t4_uld_list_lock; 195 SLIST_HEAD(, uld_info) t4_uld_list; 196 #endif 197 198 /* 199 * Tunables. See tweak_tunables() too. 200 * 201 * Each tunable is set to a default value here if it's known at compile-time. 202 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 203 * provide a reasonable default when the driver is loaded. 204 * 205 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 206 * T5 are under hw.cxl. 207 */ 208 209 /* 210 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 211 */ 212 #define NTXQ_10G 16 213 static int t4_ntxq10g = -1; 214 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 215 216 #define NRXQ_10G 8 217 static int t4_nrxq10g = -1; 218 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 219 220 #define NTXQ_1G 4 221 static int t4_ntxq1g = -1; 222 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 223 224 #define NRXQ_1G 2 225 static int t4_nrxq1g = -1; 226 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 227 228 static int t4_rsrv_noflowq = 0; 229 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 230 231 #ifdef TCP_OFFLOAD 232 #define NOFLDTXQ_10G 8 233 static int t4_nofldtxq10g = -1; 234 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 235 236 #define NOFLDRXQ_10G 2 237 static int t4_nofldrxq10g = -1; 238 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 239 240 #define NOFLDTXQ_1G 2 241 static int t4_nofldtxq1g = -1; 242 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 243 244 #define NOFLDRXQ_1G 1 245 static int t4_nofldrxq1g = -1; 246 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 247 #endif 248 249 #ifdef DEV_NETMAP 250 #define NNMTXQ_10G 2 251 static int t4_nnmtxq10g = -1; 252 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g); 253 254 #define NNMRXQ_10G 2 255 static int t4_nnmrxq10g = -1; 256 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g); 257 258 #define NNMTXQ_1G 1 259 static int t4_nnmtxq1g = -1; 260 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g); 261 262 #define NNMRXQ_1G 1 263 static int t4_nnmrxq1g = -1; 264 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g); 265 #endif 266 267 /* 268 * Holdoff parameters for 10G and 1G ports. 269 */ 270 #define TMR_IDX_10G 1 271 static int t4_tmr_idx_10g = TMR_IDX_10G; 272 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 273 274 #define PKTC_IDX_10G (-1) 275 static int t4_pktc_idx_10g = PKTC_IDX_10G; 276 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 277 278 #define TMR_IDX_1G 1 279 static int t4_tmr_idx_1g = TMR_IDX_1G; 280 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 281 282 #define PKTC_IDX_1G (-1) 283 static int t4_pktc_idx_1g = PKTC_IDX_1G; 284 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 285 286 /* 287 * Size (# of entries) of each tx and rx queue. 288 */ 289 static unsigned int t4_qsize_txq = TX_EQ_QSIZE; 290 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 291 292 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 293 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 294 295 /* 296 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 297 */ 298 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 299 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 300 301 /* 302 * Configuration file. 303 */ 304 #define DEFAULT_CF "default" 305 #define FLASH_CF "flash" 306 #define UWIRE_CF "uwire" 307 #define FPGA_CF "fpga" 308 static char t4_cfg_file[32] = DEFAULT_CF; 309 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 310 311 /* 312 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 313 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 314 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 315 * mark or when signalled to do so, 0 to never emit PAUSE. 316 */ 317 static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 318 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 319 320 /* 321 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 322 * encouraged respectively). 323 */ 324 static unsigned int t4_fw_install = 1; 325 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 326 327 /* 328 * ASIC features that will be used. Disable the ones you don't want so that the 329 * chip resources aren't wasted on features that will not be used. 330 */ 331 static int t4_nbmcaps_allowed = 0; 332 TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 333 334 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 335 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 336 337 static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 338 FW_CAPS_CONFIG_SWITCH_EGRESS; 339 TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 340 341 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 342 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 343 344 static int t4_toecaps_allowed = -1; 345 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 346 347 static int t4_rdmacaps_allowed = -1; 348 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 349 350 static int t4_tlscaps_allowed = 0; 351 TUNABLE_INT("hw.cxgbe.tlscaps_allowed", &t4_tlscaps_allowed); 352 353 static int t4_iscsicaps_allowed = -1; 354 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 355 356 static int t4_fcoecaps_allowed = 0; 357 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 358 359 static int t5_write_combine = 0; 360 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 361 362 static int t4_num_vis = 1; 363 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 364 365 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 366 static int vi_mac_funcs[] = { 367 FW_VI_FUNC_OFLD, 368 FW_VI_FUNC_IWARP, 369 FW_VI_FUNC_OPENISCSI, 370 FW_VI_FUNC_OPENFCOE, 371 FW_VI_FUNC_FOISCSI, 372 FW_VI_FUNC_FOFCOE, 373 }; 374 375 struct intrs_and_queues { 376 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 377 uint16_t nirq; /* Total # of vectors */ 378 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 379 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 380 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 381 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 382 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 383 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 384 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 385 #ifdef TCP_OFFLOAD 386 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 387 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 388 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 389 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 390 #endif 391 #ifdef DEV_NETMAP 392 uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */ 393 uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */ 394 uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */ 395 uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */ 396 #endif 397 }; 398 399 struct filter_entry { 400 uint32_t valid:1; /* filter allocated and valid */ 401 uint32_t locked:1; /* filter is administratively locked */ 402 uint32_t pending:1; /* filter action is pending firmware reply */ 403 uint32_t smtidx:8; /* Source MAC Table index for smac */ 404 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 405 406 struct t4_filter_specification fs; 407 }; 408 409 static int map_bars_0_and_4(struct adapter *); 410 static int map_bar_2(struct adapter *); 411 static void setup_memwin(struct adapter *); 412 static void position_memwin(struct adapter *, int, uint32_t); 413 static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 414 static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 415 int); 416 static inline int write_via_memwin(struct adapter *, int, uint32_t, 417 const uint32_t *, int); 418 static int validate_mem_range(struct adapter *, uint32_t, int); 419 static int fwmtype_to_hwmtype(int); 420 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 421 uint32_t *); 422 static int fixup_devlog_params(struct adapter *); 423 static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 424 struct intrs_and_queues *); 425 static int prep_firmware(struct adapter *); 426 static int partition_resources(struct adapter *, const struct firmware *, 427 const char *); 428 static int get_params__pre_init(struct adapter *); 429 static int get_params__post_init(struct adapter *); 430 static int set_params__post_init(struct adapter *); 431 static void t4_set_desc(struct adapter *); 432 static void build_medialist(struct port_info *, struct ifmedia *); 433 static int cxgbe_init_synchronized(struct vi_info *); 434 static int cxgbe_uninit_synchronized(struct vi_info *); 435 static int setup_intr_handlers(struct adapter *); 436 static void quiesce_txq(struct adapter *, struct sge_txq *); 437 static void quiesce_wrq(struct adapter *, struct sge_wrq *); 438 static void quiesce_iq(struct adapter *, struct sge_iq *); 439 static void quiesce_fl(struct adapter *, struct sge_fl *); 440 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 441 driver_intr_t *, void *, char *); 442 static int t4_free_irq(struct adapter *, struct irq *); 443 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 444 static void vi_refresh_stats(struct adapter *, struct vi_info *); 445 static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 446 static void cxgbe_tick(void *); 447 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 448 static int cpl_not_handled(struct sge_iq *, const struct rss_header *, 449 struct mbuf *); 450 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *); 451 static int fw_msg_not_handled(struct adapter *, const __be64 *); 452 static void t4_sysctls(struct adapter *); 453 static void cxgbe_sysctls(struct port_info *); 454 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 455 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 456 static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 457 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 458 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 459 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 460 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 461 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 462 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 463 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 464 static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 465 #ifdef SBUF_DRAIN 466 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 467 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 468 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 469 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 470 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 471 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 472 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 473 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 474 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 475 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 476 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 477 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 478 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 479 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 480 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 481 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 482 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 483 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 484 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 485 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 486 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 487 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 488 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 489 static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 490 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 491 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 492 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 493 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 494 #endif 495 #ifdef TCP_OFFLOAD 496 static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 497 static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 498 static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 499 #endif 500 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 501 static uint32_t mode_to_fconf(uint32_t); 502 static uint32_t mode_to_iconf(uint32_t); 503 static int check_fspec_against_fconf_iconf(struct adapter *, 504 struct t4_filter_specification *); 505 static int get_filter_mode(struct adapter *, uint32_t *); 506 static int set_filter_mode(struct adapter *, uint32_t); 507 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 508 static int get_filter(struct adapter *, struct t4_filter *); 509 static int set_filter(struct adapter *, struct t4_filter *); 510 static int del_filter(struct adapter *, struct t4_filter *); 511 static void clear_filter(struct filter_entry *); 512 static int set_filter_wr(struct adapter *, int); 513 static int del_filter_wr(struct adapter *, int); 514 static int get_sge_context(struct adapter *, struct t4_sge_context *); 515 static int load_fw(struct adapter *, struct t4_data *); 516 static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 517 static int read_i2c(struct adapter *, struct t4_i2c_data *); 518 static int set_sched_class(struct adapter *, struct t4_sched_params *); 519 static int set_sched_queue(struct adapter *, struct t4_sched_queue *); 520 #ifdef TCP_OFFLOAD 521 static int toe_capability(struct vi_info *, int); 522 #endif 523 static int mod_event(module_t, int, void *); 524 525 struct { 526 uint16_t device; 527 char *desc; 528 } t4_pciids[] = { 529 {0xa000, "Chelsio Terminator 4 FPGA"}, 530 {0x4400, "Chelsio T440-dbg"}, 531 {0x4401, "Chelsio T420-CR"}, 532 {0x4402, "Chelsio T422-CR"}, 533 {0x4403, "Chelsio T440-CR"}, 534 {0x4404, "Chelsio T420-BCH"}, 535 {0x4405, "Chelsio T440-BCH"}, 536 {0x4406, "Chelsio T440-CH"}, 537 {0x4407, "Chelsio T420-SO"}, 538 {0x4408, "Chelsio T420-CX"}, 539 {0x4409, "Chelsio T420-BT"}, 540 {0x440a, "Chelsio T404-BT"}, 541 {0x440e, "Chelsio T440-LP-CR"}, 542 }, t5_pciids[] = { 543 {0xb000, "Chelsio Terminator 5 FPGA"}, 544 {0x5400, "Chelsio T580-dbg"}, 545 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 546 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 547 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 548 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 549 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 550 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 551 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 552 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 553 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 554 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 555 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 556 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 557 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 558 #ifdef notyet 559 {0x5404, "Chelsio T520-BCH"}, 560 {0x5405, "Chelsio T540-BCH"}, 561 {0x5406, "Chelsio T540-CH"}, 562 {0x5408, "Chelsio T520-CX"}, 563 {0x540b, "Chelsio B520-SR"}, 564 {0x540c, "Chelsio B504-BT"}, 565 {0x540f, "Chelsio Amsterdam"}, 566 {0x5413, "Chelsio T580-CHR"}, 567 #endif 568 }; 569 570 #ifdef TCP_OFFLOAD 571 /* 572 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 573 * exactly the same for both rxq and ofld_rxq. 574 */ 575 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 576 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 577 #endif 578 579 /* No easy way to include t4_msg.h before adapter.h so we check this way */ 580 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS); 581 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES); 582 583 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 584 585 static int 586 t4_probe(device_t dev) 587 { 588 int i; 589 uint16_t v = pci_get_vendor(dev); 590 uint16_t d = pci_get_device(dev); 591 uint8_t f = pci_get_function(dev); 592 593 if (v != PCI_VENDOR_ID_CHELSIO) 594 return (ENXIO); 595 596 /* Attach only to PF0 of the FPGA */ 597 if (d == 0xa000 && f != 0) 598 return (ENXIO); 599 600 for (i = 0; i < nitems(t4_pciids); i++) { 601 if (d == t4_pciids[i].device) { 602 device_set_desc(dev, t4_pciids[i].desc); 603 return (BUS_PROBE_DEFAULT); 604 } 605 } 606 607 return (ENXIO); 608 } 609 610 static int 611 t5_probe(device_t dev) 612 { 613 int i; 614 uint16_t v = pci_get_vendor(dev); 615 uint16_t d = pci_get_device(dev); 616 uint8_t f = pci_get_function(dev); 617 618 if (v != PCI_VENDOR_ID_CHELSIO) 619 return (ENXIO); 620 621 /* Attach only to PF0 of the FPGA */ 622 if (d == 0xb000 && f != 0) 623 return (ENXIO); 624 625 for (i = 0; i < nitems(t5_pciids); i++) { 626 if (d == t5_pciids[i].device) { 627 device_set_desc(dev, t5_pciids[i].desc); 628 return (BUS_PROBE_DEFAULT); 629 } 630 } 631 632 return (ENXIO); 633 } 634 635 static void 636 t5_attribute_workaround(device_t dev) 637 { 638 device_t root_port; 639 uint32_t v; 640 641 /* 642 * The T5 chips do not properly echo the No Snoop and Relaxed 643 * Ordering attributes when replying to a TLP from a Root 644 * Port. As a workaround, find the parent Root Port and 645 * disable No Snoop and Relaxed Ordering. Note that this 646 * affects all devices under this root port. 647 */ 648 root_port = pci_find_pcie_root_port(dev); 649 if (root_port == NULL) { 650 device_printf(dev, "Unable to find parent root port\n"); 651 return; 652 } 653 654 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 655 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 656 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 657 0) 658 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 659 device_get_nameunit(root_port)); 660 } 661 662 static int 663 t4_attach(device_t dev) 664 { 665 struct adapter *sc; 666 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 667 struct intrs_and_queues iaq; 668 struct sge *s; 669 uint8_t *buf; 670 #ifdef TCP_OFFLOAD 671 int ofld_rqidx, ofld_tqidx; 672 #endif 673 #ifdef DEV_NETMAP 674 int nm_rqidx, nm_tqidx; 675 #endif 676 int num_vis; 677 678 sc = device_get_softc(dev); 679 sc->dev = dev; 680 TUNABLE_INT_FETCH("hw.cxgbe.debug_flags", &sc->debug_flags); 681 682 if ((pci_get_device(dev) & 0xff00) == 0x5400) 683 t5_attribute_workaround(dev); 684 pci_enable_busmaster(dev); 685 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 686 uint32_t v; 687 688 pci_set_max_read_req(dev, 4096); 689 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 690 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 691 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 692 693 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 694 } 695 696 sc->traceq = -1; 697 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 698 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 699 device_get_nameunit(dev)); 700 701 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 702 device_get_nameunit(dev)); 703 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 704 sx_xlock(&t4_list_lock); 705 SLIST_INSERT_HEAD(&t4_list, sc, link); 706 sx_xunlock(&t4_list_lock); 707 708 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 709 TAILQ_INIT(&sc->sfl); 710 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 711 712 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 713 714 rc = map_bars_0_and_4(sc); 715 if (rc != 0) 716 goto done; /* error message displayed already */ 717 718 /* 719 * This is the real PF# to which we're attaching. Works from within PCI 720 * passthrough environments too, where pci_get_function() could return a 721 * different PF# depending on the passthrough configuration. We need to 722 * use the real PF# in all our communication with the firmware. 723 */ 724 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI)); 725 sc->mbox = sc->pf; 726 727 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 728 sc->an_handler = an_not_handled; 729 for (i = 0; i < nitems(sc->cpl_handler); i++) 730 sc->cpl_handler[i] = cpl_not_handled; 731 for (i = 0; i < nitems(sc->fw_msg_handler); i++) 732 sc->fw_msg_handler[i] = fw_msg_not_handled; 733 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 734 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt); 735 t4_register_cpl_handler(sc, CPL_T5_TRACE_PKT, t5_trace_pkt); 736 t4_init_sge_cpl_handlers(sc); 737 738 /* Prepare the adapter for operation. */ 739 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 740 rc = -t4_prep_adapter(sc, buf); 741 free(buf, M_CXGBE); 742 if (rc != 0) { 743 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 744 goto done; 745 } 746 747 /* 748 * Do this really early, with the memory windows set up even before the 749 * character device. The userland tool's register i/o and mem read 750 * will work even in "recovery mode". 751 */ 752 setup_memwin(sc); 753 if (t4_init_devlog_params(sc, 0) == 0) 754 fixup_devlog_params(sc); 755 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw, 756 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s", 757 device_get_nameunit(dev)); 758 if (sc->cdev == NULL) 759 device_printf(dev, "failed to create nexus char device.\n"); 760 else 761 sc->cdev->si_drv1 = sc; 762 763 /* Go no further if recovery mode has been requested. */ 764 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 765 device_printf(dev, "recovery mode.\n"); 766 goto done; 767 } 768 769 #if defined(__i386__) 770 if ((cpu_feature & CPUID_CX8) == 0) { 771 device_printf(dev, "64 bit atomics not available.\n"); 772 rc = ENOTSUP; 773 goto done; 774 } 775 #endif 776 777 /* Prepare the firmware for operation */ 778 rc = prep_firmware(sc); 779 if (rc != 0) 780 goto done; /* error message displayed already */ 781 782 rc = get_params__post_init(sc); 783 if (rc != 0) 784 goto done; /* error message displayed already */ 785 786 rc = set_params__post_init(sc); 787 if (rc != 0) 788 goto done; /* error message displayed already */ 789 790 rc = map_bar_2(sc); 791 if (rc != 0) 792 goto done; /* error message displayed already */ 793 794 rc = t4_create_dma_tag(sc); 795 if (rc != 0) 796 goto done; /* error message displayed already */ 797 798 /* 799 * Number of VIs to create per-port. The first VI is the 800 * "main" regular VI for the port. The second VI is used for 801 * netmap if present, and any remaining VIs are used for 802 * additional virtual interfaces. 803 * 804 * Limit the number of VIs per port to the number of available 805 * MAC addresses per port. 806 */ 807 if (t4_num_vis >= 1) 808 num_vis = t4_num_vis; 809 else 810 num_vis = 1; 811 #ifdef DEV_NETMAP 812 num_vis++; 813 #endif 814 if (num_vis > nitems(vi_mac_funcs)) { 815 num_vis = nitems(vi_mac_funcs); 816 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 817 } 818 819 /* 820 * First pass over all the ports - allocate VIs and initialize some 821 * basic parameters like mac address, port type, etc. We also figure 822 * out whether a port is 10G or 1G and use that information when 823 * calculating how many interrupts to attempt to allocate. 824 */ 825 n10g = n1g = 0; 826 for_each_port(sc, i) { 827 struct port_info *pi; 828 struct vi_info *vi; 829 830 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 831 sc->port[i] = pi; 832 833 /* These must be set before t4_port_init */ 834 pi->adapter = sc; 835 pi->port_id = i; 836 pi->nvi = num_vis; 837 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 838 M_ZERO | M_WAITOK); 839 840 /* 841 * Allocate the "main" VI and initialize parameters 842 * like mac addr. 843 */ 844 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 845 if (rc != 0) { 846 device_printf(dev, "unable to initialize port %d: %d\n", 847 i, rc); 848 free(pi->vi, M_CXGBE); 849 free(pi, M_CXGBE); 850 sc->port[i] = NULL; 851 goto done; 852 } 853 854 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX); 855 pi->link_cfg.requested_fc |= t4_pause_settings; 856 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX); 857 pi->link_cfg.fc |= t4_pause_settings; 858 859 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 860 if (rc != 0) { 861 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 862 free(pi->vi, M_CXGBE); 863 free(pi, M_CXGBE); 864 sc->port[i] = NULL; 865 goto done; 866 } 867 868 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 869 device_get_nameunit(dev), i); 870 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 871 sc->chan_map[pi->tx_chan] = i; 872 873 if (is_10G_port(pi) || is_40G_port(pi)) { 874 n10g++; 875 for_each_vi(pi, j, vi) { 876 vi->tmr_idx = t4_tmr_idx_10g; 877 vi->pktc_idx = t4_pktc_idx_10g; 878 } 879 } else { 880 n1g++; 881 for_each_vi(pi, j, vi) { 882 vi->tmr_idx = t4_tmr_idx_1g; 883 vi->pktc_idx = t4_pktc_idx_1g; 884 } 885 } 886 887 pi->linkdnrc = -1; 888 889 for_each_vi(pi, j, vi) { 890 vi->qsize_rxq = t4_qsize_rxq; 891 vi->qsize_txq = t4_qsize_txq; 892 vi->pi = pi; 893 } 894 895 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1); 896 if (pi->dev == NULL) { 897 device_printf(dev, 898 "failed to add device for port %d.\n", i); 899 rc = ENXIO; 900 goto done; 901 } 902 pi->vi[0].dev = pi->dev; 903 device_set_softc(pi->dev, pi); 904 } 905 906 /* 907 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 908 */ 909 #ifdef DEV_NETMAP 910 num_vis--; 911 #endif 912 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 913 if (rc != 0) 914 goto done; /* error message displayed already */ 915 916 sc->intr_type = iaq.intr_type; 917 sc->intr_count = iaq.nirq; 918 919 s = &sc->sge; 920 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 921 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 922 if (num_vis > 1) { 923 s->nrxq += (n10g + n1g) * (num_vis - 1); 924 s->ntxq += (n10g + n1g) * (num_vis - 1); 925 } 926 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 927 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 928 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 929 #ifdef TCP_OFFLOAD 930 if (is_offload(sc)) { 931 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 932 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 933 if (num_vis > 1) { 934 s->nofldrxq += (n10g + n1g) * (num_vis - 1); 935 s->nofldtxq += (n10g + n1g) * (num_vis - 1); 936 } 937 s->neq += s->nofldtxq + s->nofldrxq; 938 s->niq += s->nofldrxq; 939 940 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 941 M_CXGBE, M_ZERO | M_WAITOK); 942 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 943 M_CXGBE, M_ZERO | M_WAITOK); 944 } 945 #endif 946 #ifdef DEV_NETMAP 947 s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g; 948 s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g; 949 s->neq += s->nnmtxq + s->nnmrxq; 950 s->niq += s->nnmrxq; 951 952 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 953 M_CXGBE, M_ZERO | M_WAITOK); 954 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 955 M_CXGBE, M_ZERO | M_WAITOK); 956 #endif 957 958 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 959 M_ZERO | M_WAITOK); 960 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 961 M_ZERO | M_WAITOK); 962 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 963 M_ZERO | M_WAITOK); 964 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 965 M_ZERO | M_WAITOK); 966 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 967 M_ZERO | M_WAITOK); 968 969 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 970 M_ZERO | M_WAITOK); 971 972 t4_init_l2t(sc, M_WAITOK); 973 974 /* 975 * Second pass over the ports. This time we know the number of rx and 976 * tx queues that each port should get. 977 */ 978 rqidx = tqidx = 0; 979 #ifdef TCP_OFFLOAD 980 ofld_rqidx = ofld_tqidx = 0; 981 #endif 982 #ifdef DEV_NETMAP 983 nm_rqidx = nm_tqidx = 0; 984 #endif 985 for_each_port(sc, i) { 986 struct port_info *pi = sc->port[i]; 987 struct vi_info *vi; 988 989 if (pi == NULL) 990 continue; 991 992 for_each_vi(pi, j, vi) { 993 #ifdef DEV_NETMAP 994 if (j == 1) { 995 vi->flags |= VI_NETMAP | INTR_RXQ; 996 vi->first_rxq = nm_rqidx; 997 vi->first_txq = nm_tqidx; 998 if (is_10G_port(pi) || is_40G_port(pi)) { 999 vi->nrxq = iaq.nnmrxq10g; 1000 vi->ntxq = iaq.nnmtxq10g; 1001 } else { 1002 vi->nrxq = iaq.nnmrxq1g; 1003 vi->ntxq = iaq.nnmtxq1g; 1004 } 1005 nm_rqidx += vi->nrxq; 1006 nm_tqidx += vi->ntxq; 1007 continue; 1008 } 1009 #endif 1010 1011 vi->first_rxq = rqidx; 1012 vi->first_txq = tqidx; 1013 if (is_10G_port(pi) || is_40G_port(pi)) { 1014 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 1015 vi->nrxq = j == 0 ? iaq.nrxq10g : 1; 1016 vi->ntxq = j == 0 ? iaq.ntxq10g : 1; 1017 } else { 1018 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 1019 vi->nrxq = j == 0 ? iaq.nrxq1g : 1; 1020 vi->ntxq = j == 0 ? iaq.ntxq1g : 1; 1021 } 1022 1023 if (vi->ntxq > 1) 1024 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1025 else 1026 vi->rsrv_noflowq = 0; 1027 1028 rqidx += vi->nrxq; 1029 tqidx += vi->ntxq; 1030 1031 #ifdef TCP_OFFLOAD 1032 if (!is_offload(sc)) 1033 continue; 1034 vi->first_ofld_rxq = ofld_rqidx; 1035 vi->first_ofld_txq = ofld_tqidx; 1036 if (is_10G_port(pi) || is_40G_port(pi)) { 1037 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1038 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1; 1039 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1; 1040 } else { 1041 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1042 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1; 1043 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1; 1044 } 1045 ofld_rqidx += vi->nofldrxq; 1046 ofld_tqidx += vi->nofldtxq; 1047 #endif 1048 } 1049 } 1050 1051 rc = setup_intr_handlers(sc); 1052 if (rc != 0) { 1053 device_printf(dev, 1054 "failed to setup interrupt handlers: %d\n", rc); 1055 goto done; 1056 } 1057 1058 rc = bus_generic_attach(dev); 1059 if (rc != 0) { 1060 device_printf(dev, 1061 "failed to attach all child ports: %d\n", rc); 1062 goto done; 1063 } 1064 1065 device_printf(dev, 1066 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1067 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1068 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1069 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1070 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1071 1072 t4_set_desc(sc); 1073 1074 done: 1075 if (rc != 0 && sc->cdev) { 1076 /* cdev was created and so cxgbetool works; recover that way. */ 1077 device_printf(dev, 1078 "error during attach, adapter is now in recovery mode.\n"); 1079 rc = 0; 1080 } 1081 1082 if (rc != 0) 1083 t4_detach(dev); 1084 else 1085 t4_sysctls(sc); 1086 1087 return (rc); 1088 } 1089 1090 /* 1091 * Idempotent 1092 */ 1093 static int 1094 t4_detach(device_t dev) 1095 { 1096 struct adapter *sc; 1097 struct port_info *pi; 1098 int i, rc; 1099 1100 sc = device_get_softc(dev); 1101 1102 if (sc->flags & FULL_INIT_DONE) 1103 t4_intr_disable(sc); 1104 1105 if (sc->cdev) { 1106 destroy_dev(sc->cdev); 1107 sc->cdev = NULL; 1108 } 1109 1110 rc = bus_generic_detach(dev); 1111 if (rc) { 1112 device_printf(dev, 1113 "failed to detach child devices: %d\n", rc); 1114 return (rc); 1115 } 1116 1117 for (i = 0; i < sc->intr_count; i++) 1118 t4_free_irq(sc, &sc->irq[i]); 1119 1120 for (i = 0; i < MAX_NPORTS; i++) { 1121 pi = sc->port[i]; 1122 if (pi) { 1123 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1124 if (pi->dev) 1125 device_delete_child(dev, pi->dev); 1126 1127 mtx_destroy(&pi->pi_lock); 1128 free(pi->vi, M_CXGBE); 1129 free(pi, M_CXGBE); 1130 } 1131 } 1132 1133 if (sc->flags & FULL_INIT_DONE) 1134 adapter_full_uninit(sc); 1135 1136 if (sc->flags & FW_OK) 1137 t4_fw_bye(sc, sc->mbox); 1138 1139 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1140 pci_release_msi(dev); 1141 1142 if (sc->regs_res) 1143 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1144 sc->regs_res); 1145 1146 if (sc->udbs_res) 1147 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1148 sc->udbs_res); 1149 1150 if (sc->msix_res) 1151 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1152 sc->msix_res); 1153 1154 if (sc->l2t) 1155 t4_free_l2t(sc->l2t); 1156 1157 #ifdef TCP_OFFLOAD 1158 free(sc->sge.ofld_rxq, M_CXGBE); 1159 free(sc->sge.ofld_txq, M_CXGBE); 1160 #endif 1161 #ifdef DEV_NETMAP 1162 free(sc->sge.nm_rxq, M_CXGBE); 1163 free(sc->sge.nm_txq, M_CXGBE); 1164 #endif 1165 free(sc->irq, M_CXGBE); 1166 free(sc->sge.rxq, M_CXGBE); 1167 free(sc->sge.txq, M_CXGBE); 1168 free(sc->sge.ctrlq, M_CXGBE); 1169 free(sc->sge.iqmap, M_CXGBE); 1170 free(sc->sge.eqmap, M_CXGBE); 1171 free(sc->tids.ftid_tab, M_CXGBE); 1172 t4_destroy_dma_tag(sc); 1173 if (mtx_initialized(&sc->sc_lock)) { 1174 sx_xlock(&t4_list_lock); 1175 SLIST_REMOVE(&t4_list, sc, adapter, link); 1176 sx_xunlock(&t4_list_lock); 1177 mtx_destroy(&sc->sc_lock); 1178 } 1179 1180 callout_drain(&sc->sfl_callout); 1181 if (mtx_initialized(&sc->tids.ftid_lock)) 1182 mtx_destroy(&sc->tids.ftid_lock); 1183 if (mtx_initialized(&sc->sfl_lock)) 1184 mtx_destroy(&sc->sfl_lock); 1185 if (mtx_initialized(&sc->ifp_lock)) 1186 mtx_destroy(&sc->ifp_lock); 1187 if (mtx_initialized(&sc->reg_lock)) 1188 mtx_destroy(&sc->reg_lock); 1189 1190 for (i = 0; i < NUM_MEMWIN; i++) { 1191 struct memwin *mw = &sc->memwin[i]; 1192 1193 if (rw_initialized(&mw->mw_lock)) 1194 rw_destroy(&mw->mw_lock); 1195 } 1196 1197 bzero(sc, sizeof(*sc)); 1198 1199 return (0); 1200 } 1201 1202 static int 1203 cxgbe_probe(device_t dev) 1204 { 1205 char buf[128]; 1206 struct port_info *pi = device_get_softc(dev); 1207 1208 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1209 device_set_desc_copy(dev, buf); 1210 1211 return (BUS_PROBE_DEFAULT); 1212 } 1213 1214 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1215 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1216 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1217 #define T4_CAP_ENABLE (T4_CAP) 1218 1219 static int 1220 cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1221 { 1222 struct ifnet *ifp; 1223 struct sbuf *sb; 1224 1225 vi->xact_addr_filt = -1; 1226 callout_init(&vi->tick, 1); 1227 1228 /* Allocate an ifnet and set it up */ 1229 ifp = if_alloc(IFT_ETHER); 1230 if (ifp == NULL) { 1231 device_printf(dev, "Cannot allocate ifnet\n"); 1232 return (ENOMEM); 1233 } 1234 vi->ifp = ifp; 1235 ifp->if_softc = vi; 1236 1237 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1238 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1239 1240 ifp->if_init = cxgbe_init; 1241 ifp->if_ioctl = cxgbe_ioctl; 1242 ifp->if_transmit = cxgbe_transmit; 1243 ifp->if_qflush = cxgbe_qflush; 1244 ifp->if_get_counter = cxgbe_get_counter; 1245 1246 ifp->if_capabilities = T4_CAP; 1247 #ifdef TCP_OFFLOAD 1248 if (vi->nofldrxq != 0) 1249 ifp->if_capabilities |= IFCAP_TOE; 1250 #endif 1251 ifp->if_capenable = T4_CAP_ENABLE; 1252 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1253 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1254 1255 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1256 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1257 ifp->if_hw_tsomaxsegsize = 65536; 1258 1259 /* Initialize ifmedia for this VI */ 1260 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1261 cxgbe_media_status); 1262 build_medialist(vi->pi, &vi->media); 1263 1264 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1265 EVENTHANDLER_PRI_ANY); 1266 1267 ether_ifattach(ifp, vi->hw_addr); 1268 1269 sb = sbuf_new_auto(); 1270 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1271 #ifdef TCP_OFFLOAD 1272 if (ifp->if_capabilities & IFCAP_TOE) 1273 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1274 vi->nofldtxq, vi->nofldrxq); 1275 #endif 1276 sbuf_finish(sb); 1277 device_printf(dev, "%s\n", sbuf_data(sb)); 1278 sbuf_delete(sb); 1279 1280 vi_sysctls(vi); 1281 1282 return (0); 1283 } 1284 1285 static int 1286 cxgbe_attach(device_t dev) 1287 { 1288 struct port_info *pi = device_get_softc(dev); 1289 struct vi_info *vi; 1290 int i, rc; 1291 1292 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1293 1294 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1295 if (rc) 1296 return (rc); 1297 1298 for_each_vi(pi, i, vi) { 1299 if (i == 0) 1300 continue; 1301 #ifdef DEV_NETMAP 1302 if (vi->flags & VI_NETMAP) { 1303 /* 1304 * media handled here to keep 1305 * implementation private to this file 1306 */ 1307 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1308 cxgbe_media_status); 1309 build_medialist(pi, &vi->media); 1310 vi->dev = device_add_child(dev, is_t4(pi->adapter) ? 1311 "ncxgbe" : "ncxl", device_get_unit(dev)); 1312 } else 1313 #endif 1314 vi->dev = device_add_child(dev, is_t4(pi->adapter) ? 1315 "vcxgbe" : "vcxl", -1); 1316 if (vi->dev == NULL) { 1317 device_printf(dev, "failed to add VI %d\n", i); 1318 continue; 1319 } 1320 device_set_softc(vi->dev, vi); 1321 } 1322 1323 cxgbe_sysctls(pi); 1324 1325 bus_generic_attach(dev); 1326 1327 return (0); 1328 } 1329 1330 static void 1331 cxgbe_vi_detach(struct vi_info *vi) 1332 { 1333 struct ifnet *ifp = vi->ifp; 1334 1335 ether_ifdetach(ifp); 1336 1337 if (vi->vlan_c) 1338 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1339 1340 /* Let detach proceed even if these fail. */ 1341 cxgbe_uninit_synchronized(vi); 1342 callout_drain(&vi->tick); 1343 vi_full_uninit(vi); 1344 1345 ifmedia_removeall(&vi->media); 1346 if_free(vi->ifp); 1347 vi->ifp = NULL; 1348 } 1349 1350 static int 1351 cxgbe_detach(device_t dev) 1352 { 1353 struct port_info *pi = device_get_softc(dev); 1354 struct adapter *sc = pi->adapter; 1355 int rc; 1356 1357 /* Detach the extra VIs first. */ 1358 rc = bus_generic_detach(dev); 1359 if (rc) 1360 return (rc); 1361 device_delete_children(dev); 1362 1363 doom_vi(sc, &pi->vi[0]); 1364 1365 if (pi->flags & HAS_TRACEQ) { 1366 sc->traceq = -1; /* cloner should not create ifnet */ 1367 t4_tracer_port_detach(sc); 1368 } 1369 1370 cxgbe_vi_detach(&pi->vi[0]); 1371 callout_drain(&pi->tick); 1372 1373 end_synchronized_op(sc, 0); 1374 1375 return (0); 1376 } 1377 1378 static void 1379 cxgbe_init(void *arg) 1380 { 1381 struct vi_info *vi = arg; 1382 struct adapter *sc = vi->pi->adapter; 1383 1384 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1385 return; 1386 cxgbe_init_synchronized(vi); 1387 end_synchronized_op(sc, 0); 1388 } 1389 1390 static int 1391 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1392 { 1393 int rc = 0, mtu, flags, can_sleep; 1394 struct vi_info *vi = ifp->if_softc; 1395 struct adapter *sc = vi->pi->adapter; 1396 struct ifreq *ifr = (struct ifreq *)data; 1397 uint32_t mask; 1398 1399 switch (cmd) { 1400 case SIOCSIFMTU: 1401 mtu = ifr->ifr_mtu; 1402 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 1403 return (EINVAL); 1404 1405 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1406 if (rc) 1407 return (rc); 1408 ifp->if_mtu = mtu; 1409 if (vi->flags & VI_INIT_DONE) { 1410 t4_update_fl_bufsize(ifp); 1411 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1412 rc = update_mac_settings(ifp, XGMAC_MTU); 1413 } 1414 end_synchronized_op(sc, 0); 1415 break; 1416 1417 case SIOCSIFFLAGS: 1418 can_sleep = 0; 1419 redo_sifflags: 1420 rc = begin_synchronized_op(sc, vi, 1421 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1422 if (rc) 1423 return (rc); 1424 1425 if (ifp->if_flags & IFF_UP) { 1426 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1427 flags = vi->if_flags; 1428 if ((ifp->if_flags ^ flags) & 1429 (IFF_PROMISC | IFF_ALLMULTI)) { 1430 if (can_sleep == 1) { 1431 end_synchronized_op(sc, 0); 1432 can_sleep = 0; 1433 goto redo_sifflags; 1434 } 1435 rc = update_mac_settings(ifp, 1436 XGMAC_PROMISC | XGMAC_ALLMULTI); 1437 } 1438 } else { 1439 if (can_sleep == 0) { 1440 end_synchronized_op(sc, LOCK_HELD); 1441 can_sleep = 1; 1442 goto redo_sifflags; 1443 } 1444 rc = cxgbe_init_synchronized(vi); 1445 } 1446 vi->if_flags = ifp->if_flags; 1447 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1448 if (can_sleep == 0) { 1449 end_synchronized_op(sc, LOCK_HELD); 1450 can_sleep = 1; 1451 goto redo_sifflags; 1452 } 1453 rc = cxgbe_uninit_synchronized(vi); 1454 } 1455 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1456 break; 1457 1458 case SIOCADDMULTI: 1459 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1460 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1461 if (rc) 1462 return (rc); 1463 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1464 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1465 end_synchronized_op(sc, LOCK_HELD); 1466 break; 1467 1468 case SIOCSIFCAP: 1469 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1470 if (rc) 1471 return (rc); 1472 1473 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1474 if (mask & IFCAP_TXCSUM) { 1475 ifp->if_capenable ^= IFCAP_TXCSUM; 1476 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1477 1478 if (IFCAP_TSO4 & ifp->if_capenable && 1479 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1480 ifp->if_capenable &= ~IFCAP_TSO4; 1481 if_printf(ifp, 1482 "tso4 disabled due to -txcsum.\n"); 1483 } 1484 } 1485 if (mask & IFCAP_TXCSUM_IPV6) { 1486 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1487 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1488 1489 if (IFCAP_TSO6 & ifp->if_capenable && 1490 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1491 ifp->if_capenable &= ~IFCAP_TSO6; 1492 if_printf(ifp, 1493 "tso6 disabled due to -txcsum6.\n"); 1494 } 1495 } 1496 if (mask & IFCAP_RXCSUM) 1497 ifp->if_capenable ^= IFCAP_RXCSUM; 1498 if (mask & IFCAP_RXCSUM_IPV6) 1499 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1500 1501 /* 1502 * Note that we leave CSUM_TSO alone (it is always set). The 1503 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1504 * sending a TSO request our way, so it's sufficient to toggle 1505 * IFCAP_TSOx only. 1506 */ 1507 if (mask & IFCAP_TSO4) { 1508 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1509 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1510 if_printf(ifp, "enable txcsum first.\n"); 1511 rc = EAGAIN; 1512 goto fail; 1513 } 1514 ifp->if_capenable ^= IFCAP_TSO4; 1515 } 1516 if (mask & IFCAP_TSO6) { 1517 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1518 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1519 if_printf(ifp, "enable txcsum6 first.\n"); 1520 rc = EAGAIN; 1521 goto fail; 1522 } 1523 ifp->if_capenable ^= IFCAP_TSO6; 1524 } 1525 if (mask & IFCAP_LRO) { 1526 #if defined(INET) || defined(INET6) 1527 int i; 1528 struct sge_rxq *rxq; 1529 1530 ifp->if_capenable ^= IFCAP_LRO; 1531 for_each_rxq(vi, i, rxq) { 1532 if (ifp->if_capenable & IFCAP_LRO) 1533 rxq->iq.flags |= IQ_LRO_ENABLED; 1534 else 1535 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1536 } 1537 #endif 1538 } 1539 #ifdef TCP_OFFLOAD 1540 if (mask & IFCAP_TOE) { 1541 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1542 1543 rc = toe_capability(vi, enable); 1544 if (rc != 0) 1545 goto fail; 1546 1547 ifp->if_capenable ^= mask; 1548 } 1549 #endif 1550 if (mask & IFCAP_VLAN_HWTAGGING) { 1551 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1552 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1553 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1554 } 1555 if (mask & IFCAP_VLAN_MTU) { 1556 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1557 1558 /* Need to find out how to disable auto-mtu-inflation */ 1559 } 1560 if (mask & IFCAP_VLAN_HWTSO) 1561 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1562 if (mask & IFCAP_VLAN_HWCSUM) 1563 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1564 1565 #ifdef VLAN_CAPABILITIES 1566 VLAN_CAPABILITIES(ifp); 1567 #endif 1568 fail: 1569 end_synchronized_op(sc, 0); 1570 break; 1571 1572 case SIOCSIFMEDIA: 1573 case SIOCGIFMEDIA: 1574 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1575 break; 1576 1577 case SIOCGI2C: { 1578 struct ifi2creq i2c; 1579 1580 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1581 if (rc != 0) 1582 break; 1583 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1584 rc = EPERM; 1585 break; 1586 } 1587 if (i2c.len > sizeof(i2c.data)) { 1588 rc = EINVAL; 1589 break; 1590 } 1591 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1592 if (rc) 1593 return (rc); 1594 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1595 i2c.offset, i2c.len, &i2c.data[0]); 1596 end_synchronized_op(sc, 0); 1597 if (rc == 0) 1598 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1599 break; 1600 } 1601 1602 default: 1603 rc = ether_ioctl(ifp, cmd, data); 1604 } 1605 1606 return (rc); 1607 } 1608 1609 static int 1610 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1611 { 1612 struct vi_info *vi = ifp->if_softc; 1613 struct port_info *pi = vi->pi; 1614 struct adapter *sc = pi->adapter; 1615 struct sge_txq *txq; 1616 void *items[1]; 1617 int rc; 1618 1619 M_ASSERTPKTHDR(m); 1620 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1621 1622 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1623 m_freem(m); 1624 return (ENETDOWN); 1625 } 1626 1627 rc = parse_pkt(&m); 1628 if (__predict_false(rc != 0)) { 1629 MPASS(m == NULL); /* was freed already */ 1630 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1631 return (rc); 1632 } 1633 1634 /* Select a txq. */ 1635 txq = &sc->sge.txq[vi->first_txq]; 1636 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1637 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1638 vi->rsrv_noflowq); 1639 1640 items[0] = m; 1641 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1642 if (__predict_false(rc != 0)) 1643 m_freem(m); 1644 1645 return (rc); 1646 } 1647 1648 static void 1649 cxgbe_qflush(struct ifnet *ifp) 1650 { 1651 struct vi_info *vi = ifp->if_softc; 1652 struct sge_txq *txq; 1653 int i; 1654 1655 /* queues do not exist if !VI_INIT_DONE. */ 1656 if (vi->flags & VI_INIT_DONE) { 1657 for_each_txq(vi, i, txq) { 1658 TXQ_LOCK(txq); 1659 txq->eq.flags &= ~EQ_ENABLED; 1660 TXQ_UNLOCK(txq); 1661 while (!mp_ring_is_idle(txq->r)) { 1662 mp_ring_check_drainage(txq->r, 0); 1663 pause("qflush", 1); 1664 } 1665 } 1666 } 1667 if_qflush(ifp); 1668 } 1669 1670 static uint64_t 1671 vi_get_counter(struct ifnet *ifp, ift_counter c) 1672 { 1673 struct vi_info *vi = ifp->if_softc; 1674 struct fw_vi_stats_vf *s = &vi->stats; 1675 1676 vi_refresh_stats(vi->pi->adapter, vi); 1677 1678 switch (c) { 1679 case IFCOUNTER_IPACKETS: 1680 return (s->rx_bcast_frames + s->rx_mcast_frames + 1681 s->rx_ucast_frames); 1682 case IFCOUNTER_IERRORS: 1683 return (s->rx_err_frames); 1684 case IFCOUNTER_OPACKETS: 1685 return (s->tx_bcast_frames + s->tx_mcast_frames + 1686 s->tx_ucast_frames + s->tx_offload_frames); 1687 case IFCOUNTER_OERRORS: 1688 return (s->tx_drop_frames); 1689 case IFCOUNTER_IBYTES: 1690 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 1691 s->rx_ucast_bytes); 1692 case IFCOUNTER_OBYTES: 1693 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 1694 s->tx_ucast_bytes + s->tx_offload_bytes); 1695 case IFCOUNTER_IMCASTS: 1696 return (s->rx_mcast_frames); 1697 case IFCOUNTER_OMCASTS: 1698 return (s->tx_mcast_frames); 1699 case IFCOUNTER_OQDROPS: { 1700 uint64_t drops; 1701 1702 drops = 0; 1703 if ((vi->flags & (VI_INIT_DONE | VI_NETMAP)) == VI_INIT_DONE) { 1704 int i; 1705 struct sge_txq *txq; 1706 1707 for_each_txq(vi, i, txq) 1708 drops += counter_u64_fetch(txq->r->drops); 1709 } 1710 1711 return (drops); 1712 1713 } 1714 1715 default: 1716 return (if_get_counter_default(ifp, c)); 1717 } 1718 } 1719 1720 uint64_t 1721 cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1722 { 1723 struct vi_info *vi = ifp->if_softc; 1724 struct port_info *pi = vi->pi; 1725 struct adapter *sc = pi->adapter; 1726 struct port_stats *s = &pi->stats; 1727 1728 if (pi->nvi > 1) 1729 return (vi_get_counter(ifp, c)); 1730 1731 cxgbe_refresh_stats(sc, pi); 1732 1733 switch (c) { 1734 case IFCOUNTER_IPACKETS: 1735 return (s->rx_frames); 1736 1737 case IFCOUNTER_IERRORS: 1738 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1739 s->rx_fcs_err + s->rx_len_err); 1740 1741 case IFCOUNTER_OPACKETS: 1742 return (s->tx_frames); 1743 1744 case IFCOUNTER_OERRORS: 1745 return (s->tx_error_frames); 1746 1747 case IFCOUNTER_IBYTES: 1748 return (s->rx_octets); 1749 1750 case IFCOUNTER_OBYTES: 1751 return (s->tx_octets); 1752 1753 case IFCOUNTER_IMCASTS: 1754 return (s->rx_mcast_frames); 1755 1756 case IFCOUNTER_OMCASTS: 1757 return (s->tx_mcast_frames); 1758 1759 case IFCOUNTER_IQDROPS: 1760 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1761 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1762 s->rx_trunc3 + pi->tnl_cong_drops); 1763 1764 case IFCOUNTER_OQDROPS: { 1765 uint64_t drops; 1766 1767 drops = s->tx_drop; 1768 if (vi->flags & VI_INIT_DONE) { 1769 int i; 1770 struct sge_txq *txq; 1771 1772 for_each_txq(vi, i, txq) 1773 drops += counter_u64_fetch(txq->r->drops); 1774 } 1775 1776 return (drops); 1777 1778 } 1779 1780 default: 1781 return (if_get_counter_default(ifp, c)); 1782 } 1783 } 1784 1785 static int 1786 cxgbe_media_change(struct ifnet *ifp) 1787 { 1788 struct vi_info *vi = ifp->if_softc; 1789 1790 device_printf(vi->dev, "%s unimplemented.\n", __func__); 1791 1792 return (EOPNOTSUPP); 1793 } 1794 1795 static void 1796 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1797 { 1798 struct vi_info *vi = ifp->if_softc; 1799 struct port_info *pi = vi->pi; 1800 struct ifmedia_entry *cur; 1801 int speed = pi->link_cfg.speed; 1802 1803 cur = vi->media.ifm_cur; 1804 1805 ifmr->ifm_status = IFM_AVALID; 1806 if (!pi->link_cfg.link_ok) 1807 return; 1808 1809 ifmr->ifm_status |= IFM_ACTIVE; 1810 1811 /* active and current will differ iff current media is autoselect. */ 1812 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1813 return; 1814 1815 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1816 if (speed == 10000) 1817 ifmr->ifm_active |= IFM_10G_T; 1818 else if (speed == 1000) 1819 ifmr->ifm_active |= IFM_1000_T; 1820 else if (speed == 100) 1821 ifmr->ifm_active |= IFM_100_TX; 1822 else if (speed == 10) 1823 ifmr->ifm_active |= IFM_10_T; 1824 else 1825 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1826 speed)); 1827 } 1828 1829 static int 1830 vcxgbe_probe(device_t dev) 1831 { 1832 char buf[128]; 1833 struct vi_info *vi = device_get_softc(dev); 1834 1835 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 1836 vi - vi->pi->vi); 1837 device_set_desc_copy(dev, buf); 1838 1839 return (BUS_PROBE_DEFAULT); 1840 } 1841 1842 static int 1843 vcxgbe_attach(device_t dev) 1844 { 1845 struct vi_info *vi; 1846 struct port_info *pi; 1847 struct adapter *sc; 1848 int func, index, rc; 1849 u32 param, val; 1850 1851 vi = device_get_softc(dev); 1852 pi = vi->pi; 1853 sc = pi->adapter; 1854 1855 index = vi - pi->vi; 1856 KASSERT(index < nitems(vi_mac_funcs), 1857 ("%s: VI %s doesn't have a MAC func", __func__, 1858 device_get_nameunit(dev))); 1859 func = vi_mac_funcs[index]; 1860 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 1861 vi->hw_addr, &vi->rss_size, func, 0); 1862 if (rc < 0) { 1863 device_printf(dev, "Failed to allocate virtual interface " 1864 "for port %d: %d\n", pi->port_id, -rc); 1865 return (-rc); 1866 } 1867 vi->viid = rc; 1868 1869 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1870 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 1871 V_FW_PARAMS_PARAM_YZ(vi->viid); 1872 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1873 if (rc) 1874 vi->rss_base = 0xffff; 1875 else { 1876 /* MPASS((val >> 16) == rss_size); */ 1877 vi->rss_base = val & 0xffff; 1878 } 1879 1880 rc = cxgbe_vi_attach(dev, vi); 1881 if (rc) { 1882 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1883 return (rc); 1884 } 1885 return (0); 1886 } 1887 1888 static int 1889 vcxgbe_detach(device_t dev) 1890 { 1891 struct vi_info *vi; 1892 struct adapter *sc; 1893 1894 vi = device_get_softc(dev); 1895 sc = vi->pi->adapter; 1896 1897 doom_vi(sc, vi); 1898 1899 cxgbe_vi_detach(vi); 1900 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1901 1902 end_synchronized_op(sc, 0); 1903 1904 return (0); 1905 } 1906 1907 void 1908 t4_fatal_err(struct adapter *sc) 1909 { 1910 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1911 t4_intr_disable(sc); 1912 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1913 device_get_nameunit(sc->dev)); 1914 } 1915 1916 static int 1917 map_bars_0_and_4(struct adapter *sc) 1918 { 1919 sc->regs_rid = PCIR_BAR(0); 1920 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1921 &sc->regs_rid, RF_ACTIVE); 1922 if (sc->regs_res == NULL) { 1923 device_printf(sc->dev, "cannot map registers.\n"); 1924 return (ENXIO); 1925 } 1926 sc->bt = rman_get_bustag(sc->regs_res); 1927 sc->bh = rman_get_bushandle(sc->regs_res); 1928 sc->mmio_len = rman_get_size(sc->regs_res); 1929 setbit(&sc->doorbells, DOORBELL_KDB); 1930 1931 sc->msix_rid = PCIR_BAR(4); 1932 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1933 &sc->msix_rid, RF_ACTIVE); 1934 if (sc->msix_res == NULL) { 1935 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1936 return (ENXIO); 1937 } 1938 1939 return (0); 1940 } 1941 1942 static int 1943 map_bar_2(struct adapter *sc) 1944 { 1945 1946 /* 1947 * T4: only iWARP driver uses the userspace doorbells. There is no need 1948 * to map it if RDMA is disabled. 1949 */ 1950 if (is_t4(sc) && sc->rdmacaps == 0) 1951 return (0); 1952 1953 sc->udbs_rid = PCIR_BAR(2); 1954 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1955 &sc->udbs_rid, RF_ACTIVE); 1956 if (sc->udbs_res == NULL) { 1957 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 1958 return (ENXIO); 1959 } 1960 sc->udbs_base = rman_get_virtual(sc->udbs_res); 1961 1962 if (is_t5(sc)) { 1963 setbit(&sc->doorbells, DOORBELL_UDB); 1964 #if defined(__i386__) || defined(__amd64__) 1965 if (t5_write_combine) { 1966 int rc; 1967 1968 /* 1969 * Enable write combining on BAR2. This is the 1970 * userspace doorbell BAR and is split into 128B 1971 * (UDBS_SEG_SIZE) doorbell regions, each associated 1972 * with an egress queue. The first 64B has the doorbell 1973 * and the second 64B can be used to submit a tx work 1974 * request with an implicit doorbell. 1975 */ 1976 1977 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 1978 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 1979 if (rc == 0) { 1980 clrbit(&sc->doorbells, DOORBELL_UDB); 1981 setbit(&sc->doorbells, DOORBELL_WCWR); 1982 setbit(&sc->doorbells, DOORBELL_UDBWC); 1983 } else { 1984 device_printf(sc->dev, 1985 "couldn't enable write combining: %d\n", 1986 rc); 1987 } 1988 1989 t4_write_reg(sc, A_SGE_STAT_CFG, 1990 V_STATSOURCE_T5(7) | V_STATMODE(0)); 1991 } 1992 #endif 1993 } 1994 1995 return (0); 1996 } 1997 1998 struct memwin_init { 1999 uint32_t base; 2000 uint32_t aperture; 2001 }; 2002 2003 static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2004 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2005 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2006 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2007 }; 2008 2009 static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2010 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2011 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2012 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2013 }; 2014 2015 static void 2016 setup_memwin(struct adapter *sc) 2017 { 2018 const struct memwin_init *mw_init; 2019 struct memwin *mw; 2020 int i; 2021 uint32_t bar0; 2022 2023 if (is_t4(sc)) { 2024 /* 2025 * Read low 32b of bar0 indirectly via the hardware backdoor 2026 * mechanism. Works from within PCI passthrough environments 2027 * too, where rman_get_start() can return a different value. We 2028 * need to program the T4 memory window decoders with the actual 2029 * addresses that will be coming across the PCIe link. 2030 */ 2031 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2032 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2033 2034 mw_init = &t4_memwin[0]; 2035 } else { 2036 /* T5+ use the relative offset inside the PCIe BAR */ 2037 bar0 = 0; 2038 2039 mw_init = &t5_memwin[0]; 2040 } 2041 2042 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2043 rw_init(&mw->mw_lock, "memory window access"); 2044 mw->mw_base = mw_init->base; 2045 mw->mw_aperture = mw_init->aperture; 2046 mw->mw_curpos = 0; 2047 t4_write_reg(sc, 2048 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2049 (mw->mw_base + bar0) | V_BIR(0) | 2050 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2051 rw_wlock(&mw->mw_lock); 2052 position_memwin(sc, i, 0); 2053 rw_wunlock(&mw->mw_lock); 2054 } 2055 2056 /* flush */ 2057 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2058 } 2059 2060 /* 2061 * Positions the memory window at the given address in the card's address space. 2062 * There are some alignment requirements and the actual position may be at an 2063 * address prior to the requested address. mw->mw_curpos always has the actual 2064 * position of the window. 2065 */ 2066 static void 2067 position_memwin(struct adapter *sc, int idx, uint32_t addr) 2068 { 2069 struct memwin *mw; 2070 uint32_t pf; 2071 uint32_t reg; 2072 2073 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2074 mw = &sc->memwin[idx]; 2075 rw_assert(&mw->mw_lock, RA_WLOCKED); 2076 2077 if (is_t4(sc)) { 2078 pf = 0; 2079 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2080 } else { 2081 pf = V_PFNUM(sc->pf); 2082 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2083 } 2084 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2085 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2086 t4_read_reg(sc, reg); /* flush */ 2087 } 2088 2089 static int 2090 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2091 int len, int rw) 2092 { 2093 struct memwin *mw; 2094 uint32_t mw_end, v; 2095 2096 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2097 2098 /* Memory can only be accessed in naturally aligned 4 byte units */ 2099 if (addr & 3 || len & 3 || len <= 0) 2100 return (EINVAL); 2101 2102 mw = &sc->memwin[idx]; 2103 while (len > 0) { 2104 rw_rlock(&mw->mw_lock); 2105 mw_end = mw->mw_curpos + mw->mw_aperture; 2106 if (addr >= mw_end || addr < mw->mw_curpos) { 2107 /* Will need to reposition the window */ 2108 if (!rw_try_upgrade(&mw->mw_lock)) { 2109 rw_runlock(&mw->mw_lock); 2110 rw_wlock(&mw->mw_lock); 2111 } 2112 rw_assert(&mw->mw_lock, RA_WLOCKED); 2113 position_memwin(sc, idx, addr); 2114 rw_downgrade(&mw->mw_lock); 2115 mw_end = mw->mw_curpos + mw->mw_aperture; 2116 } 2117 rw_assert(&mw->mw_lock, RA_RLOCKED); 2118 while (addr < mw_end && len > 0) { 2119 if (rw == 0) { 2120 v = t4_read_reg(sc, mw->mw_base + addr - 2121 mw->mw_curpos); 2122 *val++ = le32toh(v); 2123 } else { 2124 v = *val++; 2125 t4_write_reg(sc, mw->mw_base + addr - 2126 mw->mw_curpos, htole32(v));; 2127 } 2128 addr += 4; 2129 len -= 4; 2130 } 2131 rw_runlock(&mw->mw_lock); 2132 } 2133 2134 return (0); 2135 } 2136 2137 static inline int 2138 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2139 int len) 2140 { 2141 2142 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2143 } 2144 2145 static inline int 2146 write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2147 const uint32_t *val, int len) 2148 { 2149 2150 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2151 } 2152 2153 static int 2154 t4_range_cmp(const void *a, const void *b) 2155 { 2156 return ((const struct t4_range *)a)->start - 2157 ((const struct t4_range *)b)->start; 2158 } 2159 2160 /* 2161 * Verify that the memory range specified by the addr/len pair is valid within 2162 * the card's address space. 2163 */ 2164 static int 2165 validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2166 { 2167 struct t4_range mem_ranges[4], *r, *next; 2168 uint32_t em, addr_len; 2169 int i, n, remaining; 2170 2171 /* Memory can only be accessed in naturally aligned 4 byte units */ 2172 if (addr & 3 || len & 3 || len <= 0) 2173 return (EINVAL); 2174 2175 /* Enabled memories */ 2176 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2177 2178 r = &mem_ranges[0]; 2179 n = 0; 2180 bzero(r, sizeof(mem_ranges)); 2181 if (em & F_EDRAM0_ENABLE) { 2182 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2183 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2184 if (r->size > 0) { 2185 r->start = G_EDRAM0_BASE(addr_len) << 20; 2186 if (addr >= r->start && 2187 addr + len <= r->start + r->size) 2188 return (0); 2189 r++; 2190 n++; 2191 } 2192 } 2193 if (em & F_EDRAM1_ENABLE) { 2194 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2195 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2196 if (r->size > 0) { 2197 r->start = G_EDRAM1_BASE(addr_len) << 20; 2198 if (addr >= r->start && 2199 addr + len <= r->start + r->size) 2200 return (0); 2201 r++; 2202 n++; 2203 } 2204 } 2205 if (em & F_EXT_MEM_ENABLE) { 2206 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2207 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2208 if (r->size > 0) { 2209 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2210 if (addr >= r->start && 2211 addr + len <= r->start + r->size) 2212 return (0); 2213 r++; 2214 n++; 2215 } 2216 } 2217 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2218 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2219 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2220 if (r->size > 0) { 2221 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2222 if (addr >= r->start && 2223 addr + len <= r->start + r->size) 2224 return (0); 2225 r++; 2226 n++; 2227 } 2228 } 2229 MPASS(n <= nitems(mem_ranges)); 2230 2231 if (n > 1) { 2232 /* Sort and merge the ranges. */ 2233 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2234 2235 /* Start from index 0 and examine the next n - 1 entries. */ 2236 r = &mem_ranges[0]; 2237 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2238 2239 MPASS(r->size > 0); /* r is a valid entry. */ 2240 next = r + 1; 2241 MPASS(next->size > 0); /* and so is the next one. */ 2242 2243 while (r->start + r->size >= next->start) { 2244 /* Merge the next one into the current entry. */ 2245 r->size = max(r->start + r->size, 2246 next->start + next->size) - r->start; 2247 n--; /* One fewer entry in total. */ 2248 if (--remaining == 0) 2249 goto done; /* short circuit */ 2250 next++; 2251 } 2252 if (next != r + 1) { 2253 /* 2254 * Some entries were merged into r and next 2255 * points to the first valid entry that couldn't 2256 * be merged. 2257 */ 2258 MPASS(next->size > 0); /* must be valid */ 2259 memcpy(r + 1, next, remaining * sizeof(*r)); 2260 #ifdef INVARIANTS 2261 /* 2262 * This so that the foo->size assertion in the 2263 * next iteration of the loop do the right 2264 * thing for entries that were pulled up and are 2265 * no longer valid. 2266 */ 2267 MPASS(n < nitems(mem_ranges)); 2268 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2269 sizeof(struct t4_range)); 2270 #endif 2271 } 2272 } 2273 done: 2274 /* Done merging the ranges. */ 2275 MPASS(n > 0); 2276 r = &mem_ranges[0]; 2277 for (i = 0; i < n; i++, r++) { 2278 if (addr >= r->start && 2279 addr + len <= r->start + r->size) 2280 return (0); 2281 } 2282 } 2283 2284 return (EFAULT); 2285 } 2286 2287 static int 2288 fwmtype_to_hwmtype(int mtype) 2289 { 2290 2291 switch (mtype) { 2292 case FW_MEMTYPE_EDC0: 2293 return (MEM_EDC0); 2294 case FW_MEMTYPE_EDC1: 2295 return (MEM_EDC1); 2296 case FW_MEMTYPE_EXTMEM: 2297 return (MEM_MC0); 2298 case FW_MEMTYPE_EXTMEM1: 2299 return (MEM_MC1); 2300 default: 2301 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2302 } 2303 } 2304 2305 /* 2306 * Verify that the memory range specified by the memtype/offset/len pair is 2307 * valid and lies entirely within the memtype specified. The global address of 2308 * the start of the range is returned in addr. 2309 */ 2310 static int 2311 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2312 uint32_t *addr) 2313 { 2314 uint32_t em, addr_len, maddr; 2315 2316 /* Memory can only be accessed in naturally aligned 4 byte units */ 2317 if (off & 3 || len & 3 || len == 0) 2318 return (EINVAL); 2319 2320 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2321 switch (fwmtype_to_hwmtype(mtype)) { 2322 case MEM_EDC0: 2323 if (!(em & F_EDRAM0_ENABLE)) 2324 return (EINVAL); 2325 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2326 maddr = G_EDRAM0_BASE(addr_len) << 20; 2327 break; 2328 case MEM_EDC1: 2329 if (!(em & F_EDRAM1_ENABLE)) 2330 return (EINVAL); 2331 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2332 maddr = G_EDRAM1_BASE(addr_len) << 20; 2333 break; 2334 case MEM_MC: 2335 if (!(em & F_EXT_MEM_ENABLE)) 2336 return (EINVAL); 2337 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2338 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2339 break; 2340 case MEM_MC1: 2341 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2342 return (EINVAL); 2343 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2344 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2345 break; 2346 default: 2347 return (EINVAL); 2348 } 2349 2350 *addr = maddr + off; /* global address */ 2351 return (validate_mem_range(sc, *addr, len)); 2352 } 2353 2354 static int 2355 fixup_devlog_params(struct adapter *sc) 2356 { 2357 struct devlog_params *dparams = &sc->params.devlog; 2358 int rc; 2359 2360 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2361 dparams->size, &dparams->addr); 2362 2363 return (rc); 2364 } 2365 2366 static int 2367 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2368 struct intrs_and_queues *iaq) 2369 { 2370 int rc, itype, navail, nrxq10g, nrxq1g, n; 2371 int nofldrxq10g = 0, nofldrxq1g = 0; 2372 int nnmrxq10g = 0, nnmrxq1g = 0; 2373 2374 bzero(iaq, sizeof(*iaq)); 2375 2376 iaq->ntxq10g = t4_ntxq10g; 2377 iaq->ntxq1g = t4_ntxq1g; 2378 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2379 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2380 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2381 #ifdef TCP_OFFLOAD 2382 if (is_offload(sc)) { 2383 iaq->nofldtxq10g = t4_nofldtxq10g; 2384 iaq->nofldtxq1g = t4_nofldtxq1g; 2385 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2386 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2387 } 2388 #endif 2389 #ifdef DEV_NETMAP 2390 iaq->nnmtxq10g = t4_nnmtxq10g; 2391 iaq->nnmtxq1g = t4_nnmtxq1g; 2392 iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g; 2393 iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g; 2394 #endif 2395 2396 for (itype = INTR_MSIX; itype; itype >>= 1) { 2397 2398 if ((itype & t4_intr_types) == 0) 2399 continue; /* not allowed */ 2400 2401 if (itype == INTR_MSIX) 2402 navail = pci_msix_count(sc->dev); 2403 else if (itype == INTR_MSI) 2404 navail = pci_msi_count(sc->dev); 2405 else 2406 navail = 1; 2407 restart: 2408 if (navail == 0) 2409 continue; 2410 2411 iaq->intr_type = itype; 2412 iaq->intr_flags_10g = 0; 2413 iaq->intr_flags_1g = 0; 2414 2415 /* 2416 * Best option: an interrupt vector for errors, one for the 2417 * firmware event queue, and one for every rxq (NIC, TOE, and 2418 * netmap). 2419 */ 2420 iaq->nirq = T4_EXTRA_INTR; 2421 iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g); 2422 iaq->nirq += n10g * 2 * (num_vis - 1); 2423 iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g); 2424 iaq->nirq += n1g * 2 * (num_vis - 1); 2425 if (iaq->nirq <= navail && 2426 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2427 iaq->intr_flags_10g = INTR_ALL; 2428 iaq->intr_flags_1g = INTR_ALL; 2429 goto allocate; 2430 } 2431 2432 /* 2433 * Second best option: a vector for errors, one for the firmware 2434 * event queue, and vectors for either all the NIC rx queues or 2435 * all the TOE rx queues. The queues that don't get vectors 2436 * will forward their interrupts to those that do. 2437 * 2438 * Note: netmap rx queues cannot be created early and so they 2439 * can't be setup to receive forwarded interrupts for others. 2440 */ 2441 iaq->nirq = T4_EXTRA_INTR; 2442 if (nrxq10g >= nofldrxq10g) { 2443 iaq->intr_flags_10g = INTR_RXQ; 2444 iaq->nirq += n10g * nrxq10g; 2445 iaq->nirq += n10g * (num_vis - 1); 2446 #ifdef DEV_NETMAP 2447 iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g); 2448 #endif 2449 } else { 2450 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2451 iaq->nirq += n10g * nofldrxq10g; 2452 #ifdef DEV_NETMAP 2453 iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g); 2454 #endif 2455 } 2456 if (nrxq1g >= nofldrxq1g) { 2457 iaq->intr_flags_1g = INTR_RXQ; 2458 iaq->nirq += n1g * nrxq1g; 2459 iaq->nirq += n1g * (num_vis - 1); 2460 #ifdef DEV_NETMAP 2461 iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g); 2462 #endif 2463 } else { 2464 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2465 iaq->nirq += n1g * nofldrxq1g; 2466 #ifdef DEV_NETMAP 2467 iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g); 2468 #endif 2469 } 2470 if (iaq->nirq <= navail && 2471 (itype != INTR_MSI || powerof2(iaq->nirq))) 2472 goto allocate; 2473 2474 /* 2475 * Next best option: an interrupt vector for errors, one for the 2476 * firmware event queue, and at least one per VI. At this 2477 * point we know we'll have to downsize nrxq and/or nofldrxq 2478 * and/or nnmrxq to fit what's available to us. 2479 */ 2480 iaq->nirq = T4_EXTRA_INTR; 2481 iaq->nirq += (n10g + n1g) * num_vis; 2482 if (iaq->nirq <= navail) { 2483 int leftover = navail - iaq->nirq; 2484 2485 if (n10g > 0) { 2486 int target = max(nrxq10g, nofldrxq10g); 2487 2488 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2489 INTR_RXQ : INTR_OFLD_RXQ; 2490 2491 n = 1; 2492 while (n < target && leftover >= n10g) { 2493 leftover -= n10g; 2494 iaq->nirq += n10g; 2495 n++; 2496 } 2497 iaq->nrxq10g = min(n, nrxq10g); 2498 #ifdef TCP_OFFLOAD 2499 iaq->nofldrxq10g = min(n, nofldrxq10g); 2500 #endif 2501 #ifdef DEV_NETMAP 2502 iaq->nnmrxq10g = min(n, nnmrxq10g); 2503 #endif 2504 } 2505 2506 if (n1g > 0) { 2507 int target = max(nrxq1g, nofldrxq1g); 2508 2509 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2510 INTR_RXQ : INTR_OFLD_RXQ; 2511 2512 n = 1; 2513 while (n < target && leftover >= n1g) { 2514 leftover -= n1g; 2515 iaq->nirq += n1g; 2516 n++; 2517 } 2518 iaq->nrxq1g = min(n, nrxq1g); 2519 #ifdef TCP_OFFLOAD 2520 iaq->nofldrxq1g = min(n, nofldrxq1g); 2521 #endif 2522 #ifdef DEV_NETMAP 2523 iaq->nnmrxq1g = min(n, nnmrxq1g); 2524 #endif 2525 } 2526 2527 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2528 goto allocate; 2529 } 2530 2531 /* 2532 * Least desirable option: one interrupt vector for everything. 2533 */ 2534 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2535 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2536 #ifdef TCP_OFFLOAD 2537 if (is_offload(sc)) 2538 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2539 #endif 2540 #ifdef DEV_NETMAP 2541 iaq->nnmrxq10g = iaq->nnmrxq1g = 1; 2542 #endif 2543 2544 allocate: 2545 navail = iaq->nirq; 2546 rc = 0; 2547 if (itype == INTR_MSIX) 2548 rc = pci_alloc_msix(sc->dev, &navail); 2549 else if (itype == INTR_MSI) 2550 rc = pci_alloc_msi(sc->dev, &navail); 2551 2552 if (rc == 0) { 2553 if (navail == iaq->nirq) 2554 return (0); 2555 2556 /* 2557 * Didn't get the number requested. Use whatever number 2558 * the kernel is willing to allocate (it's in navail). 2559 */ 2560 device_printf(sc->dev, "fewer vectors than requested, " 2561 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2562 itype, iaq->nirq, navail); 2563 pci_release_msi(sc->dev); 2564 goto restart; 2565 } 2566 2567 device_printf(sc->dev, 2568 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2569 itype, rc, iaq->nirq, navail); 2570 } 2571 2572 device_printf(sc->dev, 2573 "failed to find a usable interrupt type. " 2574 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2575 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2576 2577 return (ENXIO); 2578 } 2579 2580 #define FW_VERSION(chip) ( \ 2581 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2582 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2583 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2584 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2585 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2586 2587 struct fw_info { 2588 uint8_t chip; 2589 char *kld_name; 2590 char *fw_mod_name; 2591 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2592 } fw_info[] = { 2593 { 2594 .chip = CHELSIO_T4, 2595 .kld_name = "t4fw_cfg", 2596 .fw_mod_name = "t4fw", 2597 .fw_hdr = { 2598 .chip = FW_HDR_CHIP_T4, 2599 .fw_ver = htobe32_const(FW_VERSION(T4)), 2600 .intfver_nic = FW_INTFVER(T4, NIC), 2601 .intfver_vnic = FW_INTFVER(T4, VNIC), 2602 .intfver_ofld = FW_INTFVER(T4, OFLD), 2603 .intfver_ri = FW_INTFVER(T4, RI), 2604 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2605 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2606 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2607 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2608 }, 2609 }, { 2610 .chip = CHELSIO_T5, 2611 .kld_name = "t5fw_cfg", 2612 .fw_mod_name = "t5fw", 2613 .fw_hdr = { 2614 .chip = FW_HDR_CHIP_T5, 2615 .fw_ver = htobe32_const(FW_VERSION(T5)), 2616 .intfver_nic = FW_INTFVER(T5, NIC), 2617 .intfver_vnic = FW_INTFVER(T5, VNIC), 2618 .intfver_ofld = FW_INTFVER(T5, OFLD), 2619 .intfver_ri = FW_INTFVER(T5, RI), 2620 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2621 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2622 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2623 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2624 }, 2625 } 2626 }; 2627 2628 static struct fw_info * 2629 find_fw_info(int chip) 2630 { 2631 int i; 2632 2633 for (i = 0; i < nitems(fw_info); i++) { 2634 if (fw_info[i].chip == chip) 2635 return (&fw_info[i]); 2636 } 2637 return (NULL); 2638 } 2639 2640 /* 2641 * Is the given firmware API compatible with the one the driver was compiled 2642 * with? 2643 */ 2644 static int 2645 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2646 { 2647 2648 /* short circuit if it's the exact same firmware version */ 2649 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2650 return (1); 2651 2652 /* 2653 * XXX: Is this too conservative? Perhaps I should limit this to the 2654 * features that are supported in the driver. 2655 */ 2656 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2657 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2658 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2659 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2660 return (1); 2661 #undef SAME_INTF 2662 2663 return (0); 2664 } 2665 2666 /* 2667 * The firmware in the KLD is usable, but should it be installed? This routine 2668 * explains itself in detail if it indicates the KLD firmware should be 2669 * installed. 2670 */ 2671 static int 2672 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2673 { 2674 const char *reason; 2675 2676 if (!card_fw_usable) { 2677 reason = "incompatible or unusable"; 2678 goto install; 2679 } 2680 2681 if (k > c) { 2682 reason = "older than the version bundled with this driver"; 2683 goto install; 2684 } 2685 2686 if (t4_fw_install == 2 && k != c) { 2687 reason = "different than the version bundled with this driver"; 2688 goto install; 2689 } 2690 2691 return (0); 2692 2693 install: 2694 if (t4_fw_install == 0) { 2695 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2696 "but the driver is prohibited from installing a different " 2697 "firmware on the card.\n", 2698 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2699 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2700 2701 return (0); 2702 } 2703 2704 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2705 "installing firmware %u.%u.%u.%u on card.\n", 2706 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2707 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2708 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2709 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2710 2711 return (1); 2712 } 2713 /* 2714 * Establish contact with the firmware and determine if we are the master driver 2715 * or not, and whether we are responsible for chip initialization. 2716 */ 2717 static int 2718 prep_firmware(struct adapter *sc) 2719 { 2720 const struct firmware *fw = NULL, *default_cfg; 2721 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2722 enum dev_state state; 2723 struct fw_info *fw_info; 2724 struct fw_hdr *card_fw; /* fw on the card */ 2725 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2726 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2727 against */ 2728 2729 /* Contact firmware. */ 2730 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2731 if (rc < 0 || state == DEV_STATE_ERR) { 2732 rc = -rc; 2733 device_printf(sc->dev, 2734 "failed to connect to the firmware: %d, %d.\n", rc, state); 2735 return (rc); 2736 } 2737 pf = rc; 2738 if (pf == sc->mbox) 2739 sc->flags |= MASTER_PF; 2740 else if (state == DEV_STATE_UNINIT) { 2741 /* 2742 * We didn't get to be the master so we definitely won't be 2743 * configuring the chip. It's a bug if someone else hasn't 2744 * configured it already. 2745 */ 2746 device_printf(sc->dev, "couldn't be master(%d), " 2747 "device not already initialized either(%d).\n", rc, state); 2748 return (EDOOFUS); 2749 } 2750 2751 /* This is the firmware whose headers the driver was compiled against */ 2752 fw_info = find_fw_info(chip_id(sc)); 2753 if (fw_info == NULL) { 2754 device_printf(sc->dev, 2755 "unable to look up firmware information for chip %d.\n", 2756 chip_id(sc)); 2757 return (EINVAL); 2758 } 2759 drv_fw = &fw_info->fw_hdr; 2760 2761 /* 2762 * The firmware KLD contains many modules. The KLD name is also the 2763 * name of the module that contains the default config file. 2764 */ 2765 default_cfg = firmware_get(fw_info->kld_name); 2766 2767 /* Read the header of the firmware on the card */ 2768 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2769 rc = -t4_read_flash(sc, FLASH_FW_START, 2770 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2771 if (rc == 0) 2772 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2773 else { 2774 device_printf(sc->dev, 2775 "Unable to read card's firmware header: %d\n", rc); 2776 card_fw_usable = 0; 2777 } 2778 2779 /* This is the firmware in the KLD */ 2780 fw = firmware_get(fw_info->fw_mod_name); 2781 if (fw != NULL) { 2782 kld_fw = (const void *)fw->data; 2783 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2784 } else { 2785 kld_fw = NULL; 2786 kld_fw_usable = 0; 2787 } 2788 2789 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2790 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2791 /* 2792 * Common case: the firmware on the card is an exact match and 2793 * the KLD is an exact match too, or the KLD is 2794 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2795 * here -- use cxgbetool loadfw if you want to reinstall the 2796 * same firmware as the one on the card. 2797 */ 2798 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2799 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2800 be32toh(card_fw->fw_ver))) { 2801 2802 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2803 if (rc != 0) { 2804 device_printf(sc->dev, 2805 "failed to install firmware: %d\n", rc); 2806 goto done; 2807 } 2808 2809 /* Installed successfully, update the cached header too. */ 2810 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2811 card_fw_usable = 1; 2812 need_fw_reset = 0; /* already reset as part of load_fw */ 2813 } 2814 2815 if (!card_fw_usable) { 2816 uint32_t d, c, k; 2817 2818 d = ntohl(drv_fw->fw_ver); 2819 c = ntohl(card_fw->fw_ver); 2820 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2821 2822 device_printf(sc->dev, "Cannot find a usable firmware: " 2823 "fw_install %d, chip state %d, " 2824 "driver compiled with %d.%d.%d.%d, " 2825 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2826 t4_fw_install, state, 2827 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2828 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2829 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2830 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2831 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2832 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2833 rc = EINVAL; 2834 goto done; 2835 } 2836 2837 /* We're using whatever's on the card and it's known to be good. */ 2838 sc->params.fw_vers = ntohl(card_fw->fw_ver); 2839 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 2840 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2841 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2842 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2843 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2844 2845 t4_get_tp_version(sc, &sc->params.tp_vers); 2846 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 2847 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 2848 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 2849 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 2850 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 2851 2852 if (t4_get_exprom_version(sc, &sc->params.exprom_vers) != 0) 2853 sc->params.exprom_vers = 0; 2854 else { 2855 snprintf(sc->exprom_version, sizeof(sc->exprom_version), 2856 "%u.%u.%u.%u", 2857 G_FW_HDR_FW_VER_MAJOR(sc->params.exprom_vers), 2858 G_FW_HDR_FW_VER_MINOR(sc->params.exprom_vers), 2859 G_FW_HDR_FW_VER_MICRO(sc->params.exprom_vers), 2860 G_FW_HDR_FW_VER_BUILD(sc->params.exprom_vers)); 2861 } 2862 2863 /* Reset device */ 2864 if (need_fw_reset && 2865 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2866 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2867 if (rc != ETIMEDOUT && rc != EIO) 2868 t4_fw_bye(sc, sc->mbox); 2869 goto done; 2870 } 2871 sc->flags |= FW_OK; 2872 2873 rc = get_params__pre_init(sc); 2874 if (rc != 0) 2875 goto done; /* error message displayed already */ 2876 2877 /* Partition adapter resources as specified in the config file. */ 2878 if (state == DEV_STATE_UNINIT) { 2879 2880 KASSERT(sc->flags & MASTER_PF, 2881 ("%s: trying to change chip settings when not master.", 2882 __func__)); 2883 2884 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2885 if (rc != 0) 2886 goto done; /* error message displayed already */ 2887 2888 t4_tweak_chip_settings(sc); 2889 2890 /* get basic stuff going */ 2891 rc = -t4_fw_initialize(sc, sc->mbox); 2892 if (rc != 0) { 2893 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2894 goto done; 2895 } 2896 } else { 2897 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2898 sc->cfcsum = 0; 2899 } 2900 2901 done: 2902 free(card_fw, M_CXGBE); 2903 if (fw != NULL) 2904 firmware_put(fw, FIRMWARE_UNLOAD); 2905 if (default_cfg != NULL) 2906 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2907 2908 return (rc); 2909 } 2910 2911 #define FW_PARAM_DEV(param) \ 2912 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2913 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2914 #define FW_PARAM_PFVF(param) \ 2915 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2916 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2917 2918 /* 2919 * Partition chip resources for use between various PFs, VFs, etc. 2920 */ 2921 static int 2922 partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2923 const char *name_prefix) 2924 { 2925 const struct firmware *cfg = NULL; 2926 int rc = 0; 2927 struct fw_caps_config_cmd caps; 2928 uint32_t mtype, moff, finicsum, cfcsum; 2929 2930 /* 2931 * Figure out what configuration file to use. Pick the default config 2932 * file for the card if the user hasn't specified one explicitly. 2933 */ 2934 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2935 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2936 /* Card specific overrides go here. */ 2937 if (pci_get_device(sc->dev) == 0x440a) 2938 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2939 if (is_fpga(sc)) 2940 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2941 } 2942 2943 /* 2944 * We need to load another module if the profile is anything except 2945 * "default" or "flash". 2946 */ 2947 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 2948 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2949 char s[32]; 2950 2951 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 2952 cfg = firmware_get(s); 2953 if (cfg == NULL) { 2954 if (default_cfg != NULL) { 2955 device_printf(sc->dev, 2956 "unable to load module \"%s\" for " 2957 "configuration profile \"%s\", will use " 2958 "the default config file instead.\n", 2959 s, sc->cfg_file); 2960 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2961 "%s", DEFAULT_CF); 2962 } else { 2963 device_printf(sc->dev, 2964 "unable to load module \"%s\" for " 2965 "configuration profile \"%s\", will use " 2966 "the config file on the card's flash " 2967 "instead.\n", s, sc->cfg_file); 2968 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2969 "%s", FLASH_CF); 2970 } 2971 } 2972 } 2973 2974 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 2975 default_cfg == NULL) { 2976 device_printf(sc->dev, 2977 "default config file not available, will use the config " 2978 "file on the card's flash instead.\n"); 2979 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 2980 } 2981 2982 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2983 u_int cflen; 2984 const uint32_t *cfdata; 2985 uint32_t param, val, addr; 2986 2987 KASSERT(cfg != NULL || default_cfg != NULL, 2988 ("%s: no config to upload", __func__)); 2989 2990 /* 2991 * Ask the firmware where it wants us to upload the config file. 2992 */ 2993 param = FW_PARAM_DEV(CF); 2994 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2995 if (rc != 0) { 2996 /* No support for config file? Shouldn't happen. */ 2997 device_printf(sc->dev, 2998 "failed to query config file location: %d.\n", rc); 2999 goto done; 3000 } 3001 mtype = G_FW_PARAMS_PARAM_Y(val); 3002 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 3003 3004 /* 3005 * XXX: sheer laziness. We deliberately added 4 bytes of 3006 * useless stuffing/comments at the end of the config file so 3007 * it's ok to simply throw away the last remaining bytes when 3008 * the config file is not an exact multiple of 4. This also 3009 * helps with the validate_mt_off_len check. 3010 */ 3011 if (cfg != NULL) { 3012 cflen = cfg->datasize & ~3; 3013 cfdata = cfg->data; 3014 } else { 3015 cflen = default_cfg->datasize & ~3; 3016 cfdata = default_cfg->data; 3017 } 3018 3019 if (cflen > FLASH_CFG_MAX_SIZE) { 3020 device_printf(sc->dev, 3021 "config file too long (%d, max allowed is %d). " 3022 "Will try to use the config on the card, if any.\n", 3023 cflen, FLASH_CFG_MAX_SIZE); 3024 goto use_config_on_flash; 3025 } 3026 3027 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3028 if (rc != 0) { 3029 device_printf(sc->dev, 3030 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 3031 "Will try to use the config on the card, if any.\n", 3032 __func__, mtype, moff, cflen, rc); 3033 goto use_config_on_flash; 3034 } 3035 write_via_memwin(sc, 2, addr, cfdata, cflen); 3036 } else { 3037 use_config_on_flash: 3038 mtype = FW_MEMTYPE_FLASH; 3039 moff = t4_flash_cfg_addr(sc); 3040 } 3041 3042 bzero(&caps, sizeof(caps)); 3043 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3044 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3045 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3046 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3047 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3048 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3049 if (rc != 0) { 3050 device_printf(sc->dev, 3051 "failed to pre-process config file: %d " 3052 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3053 goto done; 3054 } 3055 3056 finicsum = be32toh(caps.finicsum); 3057 cfcsum = be32toh(caps.cfcsum); 3058 if (finicsum != cfcsum) { 3059 device_printf(sc->dev, 3060 "WARNING: config file checksum mismatch: %08x %08x\n", 3061 finicsum, cfcsum); 3062 } 3063 sc->cfcsum = cfcsum; 3064 3065 #define LIMIT_CAPS(x) do { \ 3066 caps.x &= htobe16(t4_##x##_allowed); \ 3067 } while (0) 3068 3069 /* 3070 * Let the firmware know what features will (not) be used so it can tune 3071 * things accordingly. 3072 */ 3073 LIMIT_CAPS(nbmcaps); 3074 LIMIT_CAPS(linkcaps); 3075 LIMIT_CAPS(switchcaps); 3076 LIMIT_CAPS(niccaps); 3077 LIMIT_CAPS(toecaps); 3078 LIMIT_CAPS(rdmacaps); 3079 LIMIT_CAPS(tlscaps); 3080 LIMIT_CAPS(iscsicaps); 3081 LIMIT_CAPS(fcoecaps); 3082 #undef LIMIT_CAPS 3083 3084 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3085 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3086 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3087 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3088 if (rc != 0) { 3089 device_printf(sc->dev, 3090 "failed to process config file: %d.\n", rc); 3091 } 3092 done: 3093 if (cfg != NULL) 3094 firmware_put(cfg, FIRMWARE_UNLOAD); 3095 return (rc); 3096 } 3097 3098 /* 3099 * Retrieve parameters that are needed (or nice to have) very early. 3100 */ 3101 static int 3102 get_params__pre_init(struct adapter *sc) 3103 { 3104 int rc; 3105 uint32_t param[2], val[2]; 3106 3107 param[0] = FW_PARAM_DEV(PORTVEC); 3108 param[1] = FW_PARAM_DEV(CCLK); 3109 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3110 if (rc != 0) { 3111 device_printf(sc->dev, 3112 "failed to query parameters (pre_init): %d.\n", rc); 3113 return (rc); 3114 } 3115 3116 sc->params.portvec = val[0]; 3117 sc->params.nports = bitcount32(val[0]); 3118 sc->params.vpd.cclk = val[1]; 3119 3120 /* Read device log parameters. */ 3121 rc = -t4_init_devlog_params(sc, 1); 3122 if (rc == 0) 3123 fixup_devlog_params(sc); 3124 else { 3125 device_printf(sc->dev, 3126 "failed to get devlog parameters: %d.\n", rc); 3127 rc = 0; /* devlog isn't critical for device operation */ 3128 } 3129 3130 return (rc); 3131 } 3132 3133 /* 3134 * Retrieve various parameters that are of interest to the driver. The device 3135 * has been initialized by the firmware at this point. 3136 */ 3137 static int 3138 get_params__post_init(struct adapter *sc) 3139 { 3140 int rc; 3141 uint32_t param[7], val[7]; 3142 struct fw_caps_config_cmd caps; 3143 3144 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3145 param[1] = FW_PARAM_PFVF(EQ_START); 3146 param[2] = FW_PARAM_PFVF(FILTER_START); 3147 param[3] = FW_PARAM_PFVF(FILTER_END); 3148 param[4] = FW_PARAM_PFVF(L2T_START); 3149 param[5] = FW_PARAM_PFVF(L2T_END); 3150 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3151 if (rc != 0) { 3152 device_printf(sc->dev, 3153 "failed to query parameters (post_init): %d.\n", rc); 3154 return (rc); 3155 } 3156 3157 sc->sge.iq_start = val[0]; 3158 sc->sge.eq_start = val[1]; 3159 sc->tids.ftid_base = val[2]; 3160 sc->tids.nftids = val[3] - val[2] + 1; 3161 sc->params.ftid_min = val[2]; 3162 sc->params.ftid_max = val[3]; 3163 sc->vres.l2t.start = val[4]; 3164 sc->vres.l2t.size = val[5] - val[4] + 1; 3165 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3166 ("%s: L2 table size (%u) larger than expected (%u)", 3167 __func__, sc->vres.l2t.size, L2T_SIZE)); 3168 3169 /* get capabilites */ 3170 bzero(&caps, sizeof(caps)); 3171 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3172 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3173 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3174 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3175 if (rc != 0) { 3176 device_printf(sc->dev, 3177 "failed to get card capabilities: %d.\n", rc); 3178 return (rc); 3179 } 3180 3181 #define READ_CAPS(x) do { \ 3182 sc->x = htobe16(caps.x); \ 3183 } while (0) 3184 READ_CAPS(nbmcaps); 3185 READ_CAPS(linkcaps); 3186 READ_CAPS(switchcaps); 3187 READ_CAPS(niccaps); 3188 READ_CAPS(toecaps); 3189 READ_CAPS(rdmacaps); 3190 READ_CAPS(tlscaps); 3191 READ_CAPS(iscsicaps); 3192 READ_CAPS(fcoecaps); 3193 3194 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3195 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3196 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3197 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3198 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3199 if (rc != 0) { 3200 device_printf(sc->dev, 3201 "failed to query NIC parameters: %d.\n", rc); 3202 return (rc); 3203 } 3204 sc->tids.etid_base = val[0]; 3205 sc->params.etid_min = val[0]; 3206 sc->tids.netids = val[1] - val[0] + 1; 3207 sc->params.netids = sc->tids.netids; 3208 sc->params.eo_wr_cred = val[2]; 3209 sc->params.ethoffload = 1; 3210 } 3211 3212 if (sc->toecaps) { 3213 /* query offload-related parameters */ 3214 param[0] = FW_PARAM_DEV(NTID); 3215 param[1] = FW_PARAM_PFVF(SERVER_START); 3216 param[2] = FW_PARAM_PFVF(SERVER_END); 3217 param[3] = FW_PARAM_PFVF(TDDP_START); 3218 param[4] = FW_PARAM_PFVF(TDDP_END); 3219 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3220 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3221 if (rc != 0) { 3222 device_printf(sc->dev, 3223 "failed to query TOE parameters: %d.\n", rc); 3224 return (rc); 3225 } 3226 sc->tids.ntids = val[0]; 3227 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3228 sc->tids.stid_base = val[1]; 3229 sc->tids.nstids = val[2] - val[1] + 1; 3230 sc->vres.ddp.start = val[3]; 3231 sc->vres.ddp.size = val[4] - val[3] + 1; 3232 sc->params.ofldq_wr_cred = val[5]; 3233 sc->params.offload = 1; 3234 } 3235 if (sc->rdmacaps) { 3236 param[0] = FW_PARAM_PFVF(STAG_START); 3237 param[1] = FW_PARAM_PFVF(STAG_END); 3238 param[2] = FW_PARAM_PFVF(RQ_START); 3239 param[3] = FW_PARAM_PFVF(RQ_END); 3240 param[4] = FW_PARAM_PFVF(PBL_START); 3241 param[5] = FW_PARAM_PFVF(PBL_END); 3242 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3243 if (rc != 0) { 3244 device_printf(sc->dev, 3245 "failed to query RDMA parameters(1): %d.\n", rc); 3246 return (rc); 3247 } 3248 sc->vres.stag.start = val[0]; 3249 sc->vres.stag.size = val[1] - val[0] + 1; 3250 sc->vres.rq.start = val[2]; 3251 sc->vres.rq.size = val[3] - val[2] + 1; 3252 sc->vres.pbl.start = val[4]; 3253 sc->vres.pbl.size = val[5] - val[4] + 1; 3254 3255 param[0] = FW_PARAM_PFVF(SQRQ_START); 3256 param[1] = FW_PARAM_PFVF(SQRQ_END); 3257 param[2] = FW_PARAM_PFVF(CQ_START); 3258 param[3] = FW_PARAM_PFVF(CQ_END); 3259 param[4] = FW_PARAM_PFVF(OCQ_START); 3260 param[5] = FW_PARAM_PFVF(OCQ_END); 3261 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3262 if (rc != 0) { 3263 device_printf(sc->dev, 3264 "failed to query RDMA parameters(2): %d.\n", rc); 3265 return (rc); 3266 } 3267 sc->vres.qp.start = val[0]; 3268 sc->vres.qp.size = val[1] - val[0] + 1; 3269 sc->vres.cq.start = val[2]; 3270 sc->vres.cq.size = val[3] - val[2] + 1; 3271 sc->vres.ocq.start = val[4]; 3272 sc->vres.ocq.size = val[5] - val[4] + 1; 3273 } 3274 if (sc->iscsicaps) { 3275 param[0] = FW_PARAM_PFVF(ISCSI_START); 3276 param[1] = FW_PARAM_PFVF(ISCSI_END); 3277 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3278 if (rc != 0) { 3279 device_printf(sc->dev, 3280 "failed to query iSCSI parameters: %d.\n", rc); 3281 return (rc); 3282 } 3283 sc->vres.iscsi.start = val[0]; 3284 sc->vres.iscsi.size = val[1] - val[0] + 1; 3285 } 3286 3287 /* 3288 * We've got the params we wanted to query via the firmware. Now grab 3289 * some others directly from the chip. 3290 */ 3291 rc = t4_read_chip_settings(sc); 3292 3293 return (rc); 3294 } 3295 3296 static int 3297 set_params__post_init(struct adapter *sc) 3298 { 3299 uint32_t param, val; 3300 3301 /* ask for encapsulated CPLs */ 3302 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3303 val = 1; 3304 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3305 3306 return (0); 3307 } 3308 3309 #undef FW_PARAM_PFVF 3310 #undef FW_PARAM_DEV 3311 3312 static void 3313 t4_set_desc(struct adapter *sc) 3314 { 3315 char buf[128]; 3316 struct adapter_params *p = &sc->params; 3317 3318 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, " 3319 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "", 3320 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec); 3321 3322 device_set_desc_copy(sc->dev, buf); 3323 } 3324 3325 static void 3326 build_medialist(struct port_info *pi, struct ifmedia *media) 3327 { 3328 int m; 3329 3330 PORT_LOCK(pi); 3331 3332 ifmedia_removeall(media); 3333 3334 m = IFM_ETHER | IFM_FDX; 3335 3336 switch(pi->port_type) { 3337 case FW_PORT_TYPE_BT_XFI: 3338 case FW_PORT_TYPE_BT_XAUI: 3339 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3340 /* fall through */ 3341 3342 case FW_PORT_TYPE_BT_SGMII: 3343 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3344 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3345 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3346 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3347 break; 3348 3349 case FW_PORT_TYPE_CX4: 3350 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3351 ifmedia_set(media, m | IFM_10G_CX4); 3352 break; 3353 3354 case FW_PORT_TYPE_QSFP_10G: 3355 case FW_PORT_TYPE_SFP: 3356 case FW_PORT_TYPE_FIBER_XFI: 3357 case FW_PORT_TYPE_FIBER_XAUI: 3358 switch (pi->mod_type) { 3359 3360 case FW_PORT_MOD_TYPE_LR: 3361 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3362 ifmedia_set(media, m | IFM_10G_LR); 3363 break; 3364 3365 case FW_PORT_MOD_TYPE_SR: 3366 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3367 ifmedia_set(media, m | IFM_10G_SR); 3368 break; 3369 3370 case FW_PORT_MOD_TYPE_LRM: 3371 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3372 ifmedia_set(media, m | IFM_10G_LRM); 3373 break; 3374 3375 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3376 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3377 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3378 ifmedia_set(media, m | IFM_10G_TWINAX); 3379 break; 3380 3381 case FW_PORT_MOD_TYPE_NONE: 3382 m &= ~IFM_FDX; 3383 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3384 ifmedia_set(media, m | IFM_NONE); 3385 break; 3386 3387 case FW_PORT_MOD_TYPE_NA: 3388 case FW_PORT_MOD_TYPE_ER: 3389 default: 3390 device_printf(pi->dev, 3391 "unknown port_type (%d), mod_type (%d)\n", 3392 pi->port_type, pi->mod_type); 3393 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3394 ifmedia_set(media, m | IFM_UNKNOWN); 3395 break; 3396 } 3397 break; 3398 3399 case FW_PORT_TYPE_QSFP: 3400 switch (pi->mod_type) { 3401 3402 case FW_PORT_MOD_TYPE_LR: 3403 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3404 ifmedia_set(media, m | IFM_40G_LR4); 3405 break; 3406 3407 case FW_PORT_MOD_TYPE_SR: 3408 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3409 ifmedia_set(media, m | IFM_40G_SR4); 3410 break; 3411 3412 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3413 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3414 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3415 ifmedia_set(media, m | IFM_40G_CR4); 3416 break; 3417 3418 case FW_PORT_MOD_TYPE_NONE: 3419 m &= ~IFM_FDX; 3420 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3421 ifmedia_set(media, m | IFM_NONE); 3422 break; 3423 3424 default: 3425 device_printf(pi->dev, 3426 "unknown port_type (%d), mod_type (%d)\n", 3427 pi->port_type, pi->mod_type); 3428 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3429 ifmedia_set(media, m | IFM_UNKNOWN); 3430 break; 3431 } 3432 break; 3433 3434 default: 3435 device_printf(pi->dev, 3436 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3437 pi->mod_type); 3438 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3439 ifmedia_set(media, m | IFM_UNKNOWN); 3440 break; 3441 } 3442 3443 PORT_UNLOCK(pi); 3444 } 3445 3446 #define FW_MAC_EXACT_CHUNK 7 3447 3448 /* 3449 * Program the port's XGMAC based on parameters in ifnet. The caller also 3450 * indicates which parameters should be programmed (the rest are left alone). 3451 */ 3452 int 3453 update_mac_settings(struct ifnet *ifp, int flags) 3454 { 3455 int rc = 0; 3456 struct vi_info *vi = ifp->if_softc; 3457 struct port_info *pi = vi->pi; 3458 struct adapter *sc = pi->adapter; 3459 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3460 3461 ASSERT_SYNCHRONIZED_OP(sc); 3462 KASSERT(flags, ("%s: not told what to update.", __func__)); 3463 3464 if (flags & XGMAC_MTU) 3465 mtu = ifp->if_mtu; 3466 3467 if (flags & XGMAC_PROMISC) 3468 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3469 3470 if (flags & XGMAC_ALLMULTI) 3471 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3472 3473 if (flags & XGMAC_VLANEX) 3474 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3475 3476 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3477 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3478 allmulti, 1, vlanex, false); 3479 if (rc) { 3480 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3481 rc); 3482 return (rc); 3483 } 3484 } 3485 3486 if (flags & XGMAC_UCADDR) { 3487 uint8_t ucaddr[ETHER_ADDR_LEN]; 3488 3489 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3490 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3491 ucaddr, true, true); 3492 if (rc < 0) { 3493 rc = -rc; 3494 if_printf(ifp, "change_mac failed: %d\n", rc); 3495 return (rc); 3496 } else { 3497 vi->xact_addr_filt = rc; 3498 rc = 0; 3499 } 3500 } 3501 3502 if (flags & XGMAC_MCADDRS) { 3503 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3504 int del = 1; 3505 uint64_t hash = 0; 3506 struct ifmultiaddr *ifma; 3507 int i = 0, j; 3508 3509 if_maddr_rlock(ifp); 3510 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3511 if (ifma->ifma_addr->sa_family != AF_LINK) 3512 continue; 3513 mcaddr[i] = 3514 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3515 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3516 i++; 3517 3518 if (i == FW_MAC_EXACT_CHUNK) { 3519 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 3520 del, i, mcaddr, NULL, &hash, 0); 3521 if (rc < 0) { 3522 rc = -rc; 3523 for (j = 0; j < i; j++) { 3524 if_printf(ifp, 3525 "failed to add mc address" 3526 " %02x:%02x:%02x:" 3527 "%02x:%02x:%02x rc=%d\n", 3528 mcaddr[j][0], mcaddr[j][1], 3529 mcaddr[j][2], mcaddr[j][3], 3530 mcaddr[j][4], mcaddr[j][5], 3531 rc); 3532 } 3533 goto mcfail; 3534 } 3535 del = 0; 3536 i = 0; 3537 } 3538 } 3539 if (i > 0) { 3540 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 3541 mcaddr, NULL, &hash, 0); 3542 if (rc < 0) { 3543 rc = -rc; 3544 for (j = 0; j < i; j++) { 3545 if_printf(ifp, 3546 "failed to add mc address" 3547 " %02x:%02x:%02x:" 3548 "%02x:%02x:%02x rc=%d\n", 3549 mcaddr[j][0], mcaddr[j][1], 3550 mcaddr[j][2], mcaddr[j][3], 3551 mcaddr[j][4], mcaddr[j][5], 3552 rc); 3553 } 3554 goto mcfail; 3555 } 3556 } 3557 3558 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 3559 if (rc != 0) 3560 if_printf(ifp, "failed to set mc address hash: %d", rc); 3561 mcfail: 3562 if_maddr_runlock(ifp); 3563 } 3564 3565 return (rc); 3566 } 3567 3568 /* 3569 * {begin|end}_synchronized_op must be called from the same thread. 3570 */ 3571 int 3572 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 3573 char *wmesg) 3574 { 3575 int rc, pri; 3576 3577 #ifdef WITNESS 3578 /* the caller thinks it's ok to sleep, but is it really? */ 3579 if (flags & SLEEP_OK) 3580 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3581 "begin_synchronized_op"); 3582 #endif 3583 3584 if (INTR_OK) 3585 pri = PCATCH; 3586 else 3587 pri = 0; 3588 3589 ADAPTER_LOCK(sc); 3590 for (;;) { 3591 3592 if (vi && IS_DOOMED(vi)) { 3593 rc = ENXIO; 3594 goto done; 3595 } 3596 3597 if (!IS_BUSY(sc)) { 3598 rc = 0; 3599 break; 3600 } 3601 3602 if (!(flags & SLEEP_OK)) { 3603 rc = EBUSY; 3604 goto done; 3605 } 3606 3607 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3608 rc = EINTR; 3609 goto done; 3610 } 3611 } 3612 3613 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3614 SET_BUSY(sc); 3615 #ifdef INVARIANTS 3616 sc->last_op = wmesg; 3617 sc->last_op_thr = curthread; 3618 sc->last_op_flags = flags; 3619 #endif 3620 3621 done: 3622 if (!(flags & HOLD_LOCK) || rc) 3623 ADAPTER_UNLOCK(sc); 3624 3625 return (rc); 3626 } 3627 3628 /* 3629 * Tell if_ioctl and if_init that the VI is going away. This is 3630 * special variant of begin_synchronized_op and must be paired with a 3631 * call to end_synchronized_op. 3632 */ 3633 void 3634 doom_vi(struct adapter *sc, struct vi_info *vi) 3635 { 3636 3637 ADAPTER_LOCK(sc); 3638 SET_DOOMED(vi); 3639 wakeup(&sc->flags); 3640 while (IS_BUSY(sc)) 3641 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 3642 SET_BUSY(sc); 3643 #ifdef INVARIANTS 3644 sc->last_op = "t4detach"; 3645 sc->last_op_thr = curthread; 3646 sc->last_op_flags = 0; 3647 #endif 3648 ADAPTER_UNLOCK(sc); 3649 } 3650 3651 /* 3652 * {begin|end}_synchronized_op must be called from the same thread. 3653 */ 3654 void 3655 end_synchronized_op(struct adapter *sc, int flags) 3656 { 3657 3658 if (flags & LOCK_HELD) 3659 ADAPTER_LOCK_ASSERT_OWNED(sc); 3660 else 3661 ADAPTER_LOCK(sc); 3662 3663 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3664 CLR_BUSY(sc); 3665 wakeup(&sc->flags); 3666 ADAPTER_UNLOCK(sc); 3667 } 3668 3669 static int 3670 cxgbe_init_synchronized(struct vi_info *vi) 3671 { 3672 struct port_info *pi = vi->pi; 3673 struct adapter *sc = pi->adapter; 3674 struct ifnet *ifp = vi->ifp; 3675 int rc = 0, i; 3676 struct sge_txq *txq; 3677 3678 ASSERT_SYNCHRONIZED_OP(sc); 3679 3680 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3681 return (0); /* already running */ 3682 3683 if (!(sc->flags & FULL_INIT_DONE) && 3684 ((rc = adapter_full_init(sc)) != 0)) 3685 return (rc); /* error message displayed already */ 3686 3687 if (!(vi->flags & VI_INIT_DONE) && 3688 ((rc = vi_full_init(vi)) != 0)) 3689 return (rc); /* error message displayed already */ 3690 3691 rc = update_mac_settings(ifp, XGMAC_ALL); 3692 if (rc) 3693 goto done; /* error message displayed already */ 3694 3695 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 3696 if (rc != 0) { 3697 if_printf(ifp, "enable_vi failed: %d\n", rc); 3698 goto done; 3699 } 3700 3701 /* 3702 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3703 * if this changes. 3704 */ 3705 3706 for_each_txq(vi, i, txq) { 3707 TXQ_LOCK(txq); 3708 txq->eq.flags |= EQ_ENABLED; 3709 TXQ_UNLOCK(txq); 3710 } 3711 3712 /* 3713 * The first iq of the first port to come up is used for tracing. 3714 */ 3715 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 3716 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 3717 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3718 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3719 V_QUEUENUMBER(sc->traceq)); 3720 pi->flags |= HAS_TRACEQ; 3721 } 3722 3723 /* all ok */ 3724 PORT_LOCK(pi); 3725 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3726 pi->up_vis++; 3727 3728 if (pi->nvi > 1) 3729 callout_reset(&vi->tick, hz, vi_tick, vi); 3730 else 3731 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3732 PORT_UNLOCK(pi); 3733 done: 3734 if (rc != 0) 3735 cxgbe_uninit_synchronized(vi); 3736 3737 return (rc); 3738 } 3739 3740 /* 3741 * Idempotent. 3742 */ 3743 static int 3744 cxgbe_uninit_synchronized(struct vi_info *vi) 3745 { 3746 struct port_info *pi = vi->pi; 3747 struct adapter *sc = pi->adapter; 3748 struct ifnet *ifp = vi->ifp; 3749 int rc, i; 3750 struct sge_txq *txq; 3751 3752 ASSERT_SYNCHRONIZED_OP(sc); 3753 3754 if (!(vi->flags & VI_INIT_DONE)) { 3755 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 3756 ("uninited VI is running")); 3757 return (0); 3758 } 3759 3760 /* 3761 * Disable the VI so that all its data in either direction is discarded 3762 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3763 * tick) intact as the TP can deliver negative advice or data that it's 3764 * holding in its RAM (for an offloaded connection) even after the VI is 3765 * disabled. 3766 */ 3767 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 3768 if (rc) { 3769 if_printf(ifp, "disable_vi failed: %d\n", rc); 3770 return (rc); 3771 } 3772 3773 for_each_txq(vi, i, txq) { 3774 TXQ_LOCK(txq); 3775 txq->eq.flags &= ~EQ_ENABLED; 3776 TXQ_UNLOCK(txq); 3777 } 3778 3779 PORT_LOCK(pi); 3780 if (pi->nvi == 1) 3781 callout_stop(&pi->tick); 3782 else 3783 callout_stop(&vi->tick); 3784 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3785 PORT_UNLOCK(pi); 3786 return (0); 3787 } 3788 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3789 pi->up_vis--; 3790 if (pi->up_vis > 0) { 3791 PORT_UNLOCK(pi); 3792 return (0); 3793 } 3794 PORT_UNLOCK(pi); 3795 3796 pi->link_cfg.link_ok = 0; 3797 pi->link_cfg.speed = 0; 3798 pi->linkdnrc = -1; 3799 t4_os_link_changed(sc, pi->port_id, 0, -1); 3800 3801 return (0); 3802 } 3803 3804 /* 3805 * It is ok for this function to fail midway and return right away. t4_detach 3806 * will walk the entire sc->irq list and clean up whatever is valid. 3807 */ 3808 static int 3809 setup_intr_handlers(struct adapter *sc) 3810 { 3811 int rc, rid, p, q, v; 3812 char s[8]; 3813 struct irq *irq; 3814 struct port_info *pi; 3815 struct vi_info *vi; 3816 struct sge_rxq *rxq; 3817 #ifdef TCP_OFFLOAD 3818 struct sge_ofld_rxq *ofld_rxq; 3819 #endif 3820 #ifdef DEV_NETMAP 3821 struct sge_nm_rxq *nm_rxq; 3822 #endif 3823 #ifdef RSS 3824 int nbuckets = rss_getnumbuckets(); 3825 #endif 3826 3827 /* 3828 * Setup interrupts. 3829 */ 3830 irq = &sc->irq[0]; 3831 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3832 if (sc->intr_count == 1) 3833 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3834 3835 /* Multiple interrupts. */ 3836 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3837 ("%s: too few intr.", __func__)); 3838 3839 /* The first one is always error intr */ 3840 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3841 if (rc != 0) 3842 return (rc); 3843 irq++; 3844 rid++; 3845 3846 /* The second one is always the firmware event queue */ 3847 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt"); 3848 if (rc != 0) 3849 return (rc); 3850 irq++; 3851 rid++; 3852 3853 for_each_port(sc, p) { 3854 pi = sc->port[p]; 3855 for_each_vi(pi, v, vi) { 3856 vi->first_intr = rid - 1; 3857 #ifdef DEV_NETMAP 3858 if (vi->flags & VI_NETMAP) { 3859 for_each_nm_rxq(vi, q, nm_rxq) { 3860 snprintf(s, sizeof(s), "%d-%d", p, q); 3861 rc = t4_alloc_irq(sc, irq, rid, 3862 t4_nm_intr, nm_rxq, s); 3863 if (rc != 0) 3864 return (rc); 3865 irq++; 3866 rid++; 3867 vi->nintr++; 3868 } 3869 continue; 3870 } 3871 #endif 3872 if (vi->flags & INTR_RXQ) { 3873 for_each_rxq(vi, q, rxq) { 3874 if (v == 0) 3875 snprintf(s, sizeof(s), "%d.%d", 3876 p, q); 3877 else 3878 snprintf(s, sizeof(s), 3879 "%d(%d).%d", p, v, q); 3880 rc = t4_alloc_irq(sc, irq, rid, 3881 t4_intr, rxq, s); 3882 if (rc != 0) 3883 return (rc); 3884 #ifdef RSS 3885 bus_bind_intr(sc->dev, irq->res, 3886 rss_getcpu(q % nbuckets)); 3887 #endif 3888 irq++; 3889 rid++; 3890 vi->nintr++; 3891 } 3892 } 3893 #ifdef TCP_OFFLOAD 3894 if (vi->flags & INTR_OFLD_RXQ) { 3895 for_each_ofld_rxq(vi, q, ofld_rxq) { 3896 snprintf(s, sizeof(s), "%d,%d", p, q); 3897 rc = t4_alloc_irq(sc, irq, rid, 3898 t4_intr, ofld_rxq, s); 3899 if (rc != 0) 3900 return (rc); 3901 irq++; 3902 rid++; 3903 vi->nintr++; 3904 } 3905 } 3906 #endif 3907 } 3908 } 3909 MPASS(irq == &sc->irq[sc->intr_count]); 3910 3911 return (0); 3912 } 3913 3914 int 3915 adapter_full_init(struct adapter *sc) 3916 { 3917 int rc, i; 3918 3919 ASSERT_SYNCHRONIZED_OP(sc); 3920 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3921 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 3922 ("%s: FULL_INIT_DONE already", __func__)); 3923 3924 /* 3925 * queues that belong to the adapter (not any particular port). 3926 */ 3927 rc = t4_setup_adapter_queues(sc); 3928 if (rc != 0) 3929 goto done; 3930 3931 for (i = 0; i < nitems(sc->tq); i++) { 3932 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 3933 taskqueue_thread_enqueue, &sc->tq[i]); 3934 if (sc->tq[i] == NULL) { 3935 device_printf(sc->dev, 3936 "failed to allocate task queue %d\n", i); 3937 rc = ENOMEM; 3938 goto done; 3939 } 3940 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 3941 device_get_nameunit(sc->dev), i); 3942 } 3943 3944 t4_intr_enable(sc); 3945 sc->flags |= FULL_INIT_DONE; 3946 done: 3947 if (rc != 0) 3948 adapter_full_uninit(sc); 3949 3950 return (rc); 3951 } 3952 3953 int 3954 adapter_full_uninit(struct adapter *sc) 3955 { 3956 int i; 3957 3958 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3959 3960 t4_teardown_adapter_queues(sc); 3961 3962 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 3963 taskqueue_free(sc->tq[i]); 3964 sc->tq[i] = NULL; 3965 } 3966 3967 sc->flags &= ~FULL_INIT_DONE; 3968 3969 return (0); 3970 } 3971 3972 #ifdef RSS 3973 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 3974 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 3975 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 3976 RSS_HASHTYPE_RSS_UDP_IPV6) 3977 3978 /* Translates kernel hash types to hardware. */ 3979 static int 3980 hashconfig_to_hashen(int hashconfig) 3981 { 3982 int hashen = 0; 3983 3984 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 3985 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 3986 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 3987 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 3988 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 3989 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 3990 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 3991 } 3992 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 3993 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 3994 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 3995 } 3996 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 3997 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 3998 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 3999 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4000 4001 return (hashen); 4002 } 4003 4004 /* Translates hardware hash types to kernel. */ 4005 static int 4006 hashen_to_hashconfig(int hashen) 4007 { 4008 int hashconfig = 0; 4009 4010 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 4011 /* 4012 * If UDP hashing was enabled it must have been enabled for 4013 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 4014 * enabling any 4-tuple hash is nonsense configuration. 4015 */ 4016 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4017 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 4018 4019 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4020 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 4021 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4022 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 4023 } 4024 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4025 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 4026 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4027 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 4028 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 4029 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 4030 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 4031 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 4032 4033 return (hashconfig); 4034 } 4035 #endif 4036 4037 int 4038 vi_full_init(struct vi_info *vi) 4039 { 4040 struct adapter *sc = vi->pi->adapter; 4041 struct ifnet *ifp = vi->ifp; 4042 uint16_t *rss; 4043 struct sge_rxq *rxq; 4044 int rc, i, j, hashen; 4045 #ifdef RSS 4046 int nbuckets = rss_getnumbuckets(); 4047 int hashconfig = rss_gethashconfig(); 4048 int extra; 4049 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4050 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4051 #endif 4052 4053 ASSERT_SYNCHRONIZED_OP(sc); 4054 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4055 ("%s: VI_INIT_DONE already", __func__)); 4056 4057 sysctl_ctx_init(&vi->ctx); 4058 vi->flags |= VI_SYSCTL_CTX; 4059 4060 /* 4061 * Allocate tx/rx/fl queues for this VI. 4062 */ 4063 rc = t4_setup_vi_queues(vi); 4064 if (rc != 0) 4065 goto done; /* error message displayed already */ 4066 4067 #ifdef DEV_NETMAP 4068 /* Netmap VIs configure RSS when netmap is enabled. */ 4069 if (vi->flags & VI_NETMAP) { 4070 vi->flags |= VI_INIT_DONE; 4071 return (0); 4072 } 4073 #endif 4074 4075 /* 4076 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4077 */ 4078 if (vi->nrxq > vi->rss_size) { 4079 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4080 "some queues will never receive traffic.\n", vi->nrxq, 4081 vi->rss_size); 4082 } else if (vi->rss_size % vi->nrxq) { 4083 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4084 "expect uneven traffic distribution.\n", vi->nrxq, 4085 vi->rss_size); 4086 } 4087 #ifdef RSS 4088 MPASS(RSS_KEYSIZE == 40); 4089 if (vi->nrxq != nbuckets) { 4090 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4091 "performance will be impacted.\n", vi->nrxq, nbuckets); 4092 } 4093 4094 rss_getkey((void *)&raw_rss_key[0]); 4095 for (i = 0; i < nitems(rss_key); i++) { 4096 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4097 } 4098 t4_write_rss_key(sc, &rss_key[0], -1); 4099 #endif 4100 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4101 for (i = 0; i < vi->rss_size;) { 4102 #ifdef RSS 4103 j = rss_get_indirection_to_bucket(i); 4104 j %= vi->nrxq; 4105 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4106 rss[i++] = rxq->iq.abs_id; 4107 #else 4108 for_each_rxq(vi, j, rxq) { 4109 rss[i++] = rxq->iq.abs_id; 4110 if (i == vi->rss_size) 4111 break; 4112 } 4113 #endif 4114 } 4115 4116 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4117 vi->rss_size); 4118 if (rc != 0) { 4119 if_printf(ifp, "rss_config failed: %d\n", rc); 4120 goto done; 4121 } 4122 4123 #ifdef RSS 4124 hashen = hashconfig_to_hashen(hashconfig); 4125 4126 /* 4127 * We may have had to enable some hashes even though the global config 4128 * wants them disabled. This is a potential problem that must be 4129 * reported to the user. 4130 */ 4131 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4132 4133 /* 4134 * If we consider only the supported hash types, then the enabled hashes 4135 * are a superset of the requested hashes. In other words, there cannot 4136 * be any supported hash that was requested but not enabled, but there 4137 * can be hashes that were not requested but had to be enabled. 4138 */ 4139 extra &= SUPPORTED_RSS_HASHTYPES; 4140 MPASS((extra & hashconfig) == 0); 4141 4142 if (extra) { 4143 if_printf(ifp, 4144 "global RSS config (0x%x) cannot be accomodated.\n", 4145 hashconfig); 4146 } 4147 if (extra & RSS_HASHTYPE_RSS_IPV4) 4148 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4149 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4150 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4151 if (extra & RSS_HASHTYPE_RSS_IPV6) 4152 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4153 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4154 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4155 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4156 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4157 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4158 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4159 #else 4160 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4161 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4162 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4163 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4164 #endif 4165 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0]); 4166 if (rc != 0) { 4167 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4168 goto done; 4169 } 4170 4171 vi->rss = rss; 4172 vi->flags |= VI_INIT_DONE; 4173 done: 4174 if (rc != 0) 4175 vi_full_uninit(vi); 4176 4177 return (rc); 4178 } 4179 4180 /* 4181 * Idempotent. 4182 */ 4183 int 4184 vi_full_uninit(struct vi_info *vi) 4185 { 4186 struct port_info *pi = vi->pi; 4187 struct adapter *sc = pi->adapter; 4188 int i; 4189 struct sge_rxq *rxq; 4190 struct sge_txq *txq; 4191 #ifdef TCP_OFFLOAD 4192 struct sge_ofld_rxq *ofld_rxq; 4193 struct sge_wrq *ofld_txq; 4194 #endif 4195 4196 if (vi->flags & VI_INIT_DONE) { 4197 4198 /* Need to quiesce queues. */ 4199 #ifdef DEV_NETMAP 4200 if (vi->flags & VI_NETMAP) 4201 goto skip; 4202 #endif 4203 4204 /* XXX: Only for the first VI? */ 4205 if (IS_MAIN_VI(vi)) 4206 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4207 4208 for_each_txq(vi, i, txq) { 4209 quiesce_txq(sc, txq); 4210 } 4211 4212 #ifdef TCP_OFFLOAD 4213 for_each_ofld_txq(vi, i, ofld_txq) { 4214 quiesce_wrq(sc, ofld_txq); 4215 } 4216 #endif 4217 4218 for_each_rxq(vi, i, rxq) { 4219 quiesce_iq(sc, &rxq->iq); 4220 quiesce_fl(sc, &rxq->fl); 4221 } 4222 4223 #ifdef TCP_OFFLOAD 4224 for_each_ofld_rxq(vi, i, ofld_rxq) { 4225 quiesce_iq(sc, &ofld_rxq->iq); 4226 quiesce_fl(sc, &ofld_rxq->fl); 4227 } 4228 #endif 4229 free(vi->rss, M_CXGBE); 4230 } 4231 #ifdef DEV_NETMAP 4232 skip: 4233 #endif 4234 4235 t4_teardown_vi_queues(vi); 4236 vi->flags &= ~VI_INIT_DONE; 4237 4238 return (0); 4239 } 4240 4241 static void 4242 quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4243 { 4244 struct sge_eq *eq = &txq->eq; 4245 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4246 4247 (void) sc; /* unused */ 4248 4249 #ifdef INVARIANTS 4250 TXQ_LOCK(txq); 4251 MPASS((eq->flags & EQ_ENABLED) == 0); 4252 TXQ_UNLOCK(txq); 4253 #endif 4254 4255 /* Wait for the mp_ring to empty. */ 4256 while (!mp_ring_is_idle(txq->r)) { 4257 mp_ring_check_drainage(txq->r, 0); 4258 pause("rquiesce", 1); 4259 } 4260 4261 /* Then wait for the hardware to finish. */ 4262 while (spg->cidx != htobe16(eq->pidx)) 4263 pause("equiesce", 1); 4264 4265 /* Finally, wait for the driver to reclaim all descriptors. */ 4266 while (eq->cidx != eq->pidx) 4267 pause("dquiesce", 1); 4268 } 4269 4270 static void 4271 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4272 { 4273 4274 /* XXXTX */ 4275 } 4276 4277 static void 4278 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4279 { 4280 (void) sc; /* unused */ 4281 4282 /* Synchronize with the interrupt handler */ 4283 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4284 pause("iqfree", 1); 4285 } 4286 4287 static void 4288 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4289 { 4290 mtx_lock(&sc->sfl_lock); 4291 FL_LOCK(fl); 4292 fl->flags |= FL_DOOMED; 4293 FL_UNLOCK(fl); 4294 callout_stop(&sc->sfl_callout); 4295 mtx_unlock(&sc->sfl_lock); 4296 4297 KASSERT((fl->flags & FL_STARVING) == 0, 4298 ("%s: still starving", __func__)); 4299 } 4300 4301 static int 4302 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4303 driver_intr_t *handler, void *arg, char *name) 4304 { 4305 int rc; 4306 4307 irq->rid = rid; 4308 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4309 RF_SHAREABLE | RF_ACTIVE); 4310 if (irq->res == NULL) { 4311 device_printf(sc->dev, 4312 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4313 return (ENOMEM); 4314 } 4315 4316 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4317 NULL, handler, arg, &irq->tag); 4318 if (rc != 0) { 4319 device_printf(sc->dev, 4320 "failed to setup interrupt for rid %d, name %s: %d\n", 4321 rid, name, rc); 4322 } else if (name) 4323 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 4324 4325 return (rc); 4326 } 4327 4328 static int 4329 t4_free_irq(struct adapter *sc, struct irq *irq) 4330 { 4331 if (irq->tag) 4332 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4333 if (irq->res) 4334 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4335 4336 bzero(irq, sizeof(*irq)); 4337 4338 return (0); 4339 } 4340 4341 static void 4342 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4343 { 4344 4345 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4346 t4_get_regs(sc, buf, regs->len); 4347 } 4348 4349 #define A_PL_INDIR_CMD 0x1f8 4350 4351 #define S_PL_AUTOINC 31 4352 #define M_PL_AUTOINC 0x1U 4353 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4354 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4355 4356 #define S_PL_VFID 20 4357 #define M_PL_VFID 0xffU 4358 #define V_PL_VFID(x) ((x) << S_PL_VFID) 4359 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4360 4361 #define S_PL_ADDR 0 4362 #define M_PL_ADDR 0xfffffU 4363 #define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4364 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4365 4366 #define A_PL_INDIR_DATA 0x1fc 4367 4368 static uint64_t 4369 read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4370 { 4371 u32 stats[2]; 4372 4373 mtx_assert(&sc->reg_lock, MA_OWNED); 4374 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4375 V_PL_VFID(G_FW_VIID_VIN(viid)) | V_PL_ADDR(VF_MPS_REG(reg))); 4376 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4377 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4378 return (((uint64_t)stats[1]) << 32 | stats[0]); 4379 } 4380 4381 static void 4382 t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4383 struct fw_vi_stats_vf *stats) 4384 { 4385 4386 #define GET_STAT(name) \ 4387 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4388 4389 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4390 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4391 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4392 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4393 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4394 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4395 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4396 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4397 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4398 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4399 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4400 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4401 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4402 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4403 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4404 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4405 4406 #undef GET_STAT 4407 } 4408 4409 static void 4410 t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4411 { 4412 int reg; 4413 4414 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4415 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4416 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4417 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4418 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4419 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4420 } 4421 4422 static void 4423 vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4424 { 4425 struct timeval tv; 4426 const struct timeval interval = {0, 250000}; /* 250ms */ 4427 4428 if (!(vi->flags & VI_INIT_DONE)) 4429 return; 4430 4431 getmicrotime(&tv); 4432 timevalsub(&tv, &interval); 4433 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4434 return; 4435 4436 mtx_lock(&sc->reg_lock); 4437 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4438 getmicrotime(&vi->last_refreshed); 4439 mtx_unlock(&sc->reg_lock); 4440 } 4441 4442 static void 4443 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4444 { 4445 int i; 4446 u_int v, tnl_cong_drops; 4447 struct timeval tv; 4448 const struct timeval interval = {0, 250000}; /* 250ms */ 4449 4450 getmicrotime(&tv); 4451 timevalsub(&tv, &interval); 4452 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4453 return; 4454 4455 tnl_cong_drops = 0; 4456 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4457 for (i = 0; i < sc->chip_params->nchan; i++) { 4458 if (pi->rx_chan_map & (1 << i)) { 4459 mtx_lock(&sc->reg_lock); 4460 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4461 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4462 mtx_unlock(&sc->reg_lock); 4463 tnl_cong_drops += v; 4464 } 4465 } 4466 pi->tnl_cong_drops = tnl_cong_drops; 4467 getmicrotime(&pi->last_refreshed); 4468 } 4469 4470 static void 4471 cxgbe_tick(void *arg) 4472 { 4473 struct port_info *pi = arg; 4474 struct adapter *sc = pi->adapter; 4475 4476 PORT_LOCK_ASSERT_OWNED(pi); 4477 cxgbe_refresh_stats(sc, pi); 4478 4479 callout_schedule(&pi->tick, hz); 4480 } 4481 4482 void 4483 vi_tick(void *arg) 4484 { 4485 struct vi_info *vi = arg; 4486 struct adapter *sc = vi->pi->adapter; 4487 4488 vi_refresh_stats(sc, vi); 4489 4490 callout_schedule(&vi->tick, hz); 4491 } 4492 4493 static void 4494 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4495 { 4496 struct ifnet *vlan; 4497 4498 if (arg != ifp || ifp->if_type != IFT_ETHER) 4499 return; 4500 4501 vlan = VLAN_DEVAT(ifp, vid); 4502 VLAN_SETCOOKIE(vlan, ifp); 4503 } 4504 4505 static int 4506 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4507 { 4508 4509 #ifdef INVARIANTS 4510 panic("%s: opcode 0x%02x on iq %p with payload %p", 4511 __func__, rss->opcode, iq, m); 4512 #else 4513 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", 4514 __func__, rss->opcode, iq, m); 4515 m_freem(m); 4516 #endif 4517 return (EDOOFUS); 4518 } 4519 4520 int 4521 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 4522 { 4523 uintptr_t *loc, new; 4524 4525 if (opcode >= nitems(sc->cpl_handler)) 4526 return (EINVAL); 4527 4528 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 4529 loc = (uintptr_t *) &sc->cpl_handler[opcode]; 4530 atomic_store_rel_ptr(loc, new); 4531 4532 return (0); 4533 } 4534 4535 static int 4536 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) 4537 { 4538 4539 #ifdef INVARIANTS 4540 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); 4541 #else 4542 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", 4543 __func__, iq, ctrl); 4544 #endif 4545 return (EDOOFUS); 4546 } 4547 4548 int 4549 t4_register_an_handler(struct adapter *sc, an_handler_t h) 4550 { 4551 uintptr_t *loc, new; 4552 4553 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; 4554 loc = (uintptr_t *) &sc->an_handler; 4555 atomic_store_rel_ptr(loc, new); 4556 4557 return (0); 4558 } 4559 4560 static int 4561 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) 4562 { 4563 const struct cpl_fw6_msg *cpl = 4564 __containerof(rpl, struct cpl_fw6_msg, data[0]); 4565 4566 #ifdef INVARIANTS 4567 panic("%s: fw_msg type %d", __func__, cpl->type); 4568 #else 4569 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); 4570 #endif 4571 return (EDOOFUS); 4572 } 4573 4574 int 4575 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 4576 { 4577 uintptr_t *loc, new; 4578 4579 if (type >= nitems(sc->fw_msg_handler)) 4580 return (EINVAL); 4581 4582 /* 4583 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 4584 * handler dispatch table. Reject any attempt to install a handler for 4585 * this subtype. 4586 */ 4587 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) 4588 return (EINVAL); 4589 4590 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; 4591 loc = (uintptr_t *) &sc->fw_msg_handler[type]; 4592 atomic_store_rel_ptr(loc, new); 4593 4594 return (0); 4595 } 4596 4597 /* 4598 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 4599 */ 4600 static char *caps_decoder[] = { 4601 "\20\001IPMI\002NCSI", /* 0: NBM */ 4602 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 4603 "\20\001INGRESS\002EGRESS", /* 2: switch */ 4604 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 4605 "\006HASHFILTER\007ETHOFLD", 4606 "\20\001TOE", /* 4: TOE */ 4607 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 4608 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 4609 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 4610 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 4611 "\007T10DIF" 4612 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 4613 "\20\00KEYS", /* 7: TLS */ 4614 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 4615 "\004PO_INITIATOR\005PO_TARGET", 4616 }; 4617 4618 static void 4619 t4_sysctls(struct adapter *sc) 4620 { 4621 struct sysctl_ctx_list *ctx; 4622 struct sysctl_oid *oid; 4623 struct sysctl_oid_list *children, *c0; 4624 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4625 4626 ctx = device_get_sysctl_ctx(sc->dev); 4627 4628 /* 4629 * dev.t4nex.X. 4630 */ 4631 oid = device_get_sysctl_tree(sc->dev); 4632 c0 = children = SYSCTL_CHILDREN(oid); 4633 4634 sc->sc_do_rxcopy = 1; 4635 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4636 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4637 4638 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4639 sc->params.nports, "# of ports"); 4640 4641 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4642 NULL, chip_rev(sc), "chip hardware revision"); 4643 4644 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 4645 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 4646 4647 if (sc->params.exprom_vers != 0) { 4648 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "exprom_version", 4649 CTLFLAG_RD, sc->exprom_version, 0, "expansion ROM version"); 4650 } 4651 4652 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4653 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4654 4655 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4656 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4657 4658 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4659 sc->cfcsum, "config file checksum"); 4660 4661 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4662 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4663 sysctl_bitfield, "A", "available doorbells"); 4664 4665 #define SYSCTL_CAP(name, n, text) \ 4666 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 4667 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 4668 sysctl_bitfield, "A", "available " text "capabilities") 4669 4670 SYSCTL_CAP(nbmcaps, 0, "NBM"); 4671 SYSCTL_CAP(linkcaps, 1, "link"); 4672 SYSCTL_CAP(switchcaps, 2, "switch"); 4673 SYSCTL_CAP(niccaps, 3, "NIC"); 4674 SYSCTL_CAP(toecaps, 4, "TCP offload"); 4675 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 4676 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 4677 SYSCTL_CAP(tlscaps, 7, "TLS"); 4678 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 4679 #undef SYSCTL_CAP 4680 4681 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4682 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4683 4684 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4685 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 4686 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 4687 "interrupt holdoff timer values (us)"); 4688 4689 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4690 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 4691 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 4692 "interrupt holdoff packet counter values"); 4693 4694 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4695 NULL, sc->tids.nftids, "number of filters"); 4696 4697 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4698 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4699 "chip temperature (in Celsius)"); 4700 4701 t4_sge_sysctls(sc, ctx, children); 4702 4703 sc->lro_timeout = 100; 4704 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4705 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4706 4707 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_flags", CTLFLAG_RW, 4708 &sc->debug_flags, 0, "flags to enable runtime debugging"); 4709 4710 #ifdef SBUF_DRAIN 4711 /* 4712 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4713 */ 4714 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4715 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4716 "logs and miscellaneous information"); 4717 children = SYSCTL_CHILDREN(oid); 4718 4719 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4720 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4721 sysctl_cctrl, "A", "congestion control"); 4722 4723 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4724 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4725 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4726 4727 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4728 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4729 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4730 4731 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4732 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4733 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4734 4735 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4736 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4737 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4738 4739 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4740 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4741 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4742 4743 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4744 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4745 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4746 4747 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4748 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4749 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 4750 "A", "CIM logic analyzer"); 4751 4752 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4753 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4754 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4755 4756 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4757 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4758 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4759 4760 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4761 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4762 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4763 4764 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4765 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4766 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4767 4768 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4769 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4770 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4771 4772 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4773 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4774 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4775 4776 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4777 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4778 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4779 4780 if (chip_id(sc) > CHELSIO_T4) { 4781 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4782 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4783 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4784 4785 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4786 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4787 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4788 } 4789 4790 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4791 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4792 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4793 4794 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4795 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4796 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4797 4798 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4799 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4800 sysctl_cpl_stats, "A", "CPL statistics"); 4801 4802 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4803 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4804 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4805 4806 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4807 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4808 sysctl_devlog, "A", "firmware's device log"); 4809 4810 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4811 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4812 sysctl_fcoe_stats, "A", "FCoE statistics"); 4813 4814 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4815 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4816 sysctl_hw_sched, "A", "hardware scheduler "); 4817 4818 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4819 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4820 sysctl_l2t, "A", "hardware L2 table"); 4821 4822 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4823 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4824 sysctl_lb_stats, "A", "loopback statistics"); 4825 4826 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4827 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4828 sysctl_meminfo, "A", "memory regions"); 4829 4830 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4831 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4832 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 4833 "A", "MPS TCAM entries"); 4834 4835 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4836 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4837 sysctl_path_mtus, "A", "path MTUs"); 4838 4839 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4840 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4841 sysctl_pm_stats, "A", "PM statistics"); 4842 4843 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4844 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4845 sysctl_rdma_stats, "A", "RDMA statistics"); 4846 4847 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4848 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4849 sysctl_tcp_stats, "A", "TCP statistics"); 4850 4851 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4852 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4853 sysctl_tids, "A", "TID information"); 4854 4855 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4856 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4857 sysctl_tp_err_stats, "A", "TP error statistics"); 4858 4859 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 4860 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 4861 "TP logic analyzer event capture mask"); 4862 4863 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4864 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4865 sysctl_tp_la, "A", "TP logic analyzer"); 4866 4867 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4868 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4869 sysctl_tx_rate, "A", "Tx rate"); 4870 4871 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4872 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4873 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4874 4875 if (is_t5(sc)) { 4876 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4877 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4878 sysctl_wcwr_stats, "A", "write combined work requests"); 4879 } 4880 #endif 4881 4882 #ifdef TCP_OFFLOAD 4883 if (is_offload(sc)) { 4884 /* 4885 * dev.t4nex.X.toe. 4886 */ 4887 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4888 NULL, "TOE parameters"); 4889 children = SYSCTL_CHILDREN(oid); 4890 4891 sc->tt.sndbuf = 256 * 1024; 4892 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4893 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4894 4895 sc->tt.ddp = 0; 4896 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4897 &sc->tt.ddp, 0, "DDP allowed"); 4898 4899 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 4900 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 4901 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 4902 4903 sc->tt.ddp_thres = 4904 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 4905 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 4906 &sc->tt.ddp_thres, 0, "DDP threshold"); 4907 4908 sc->tt.rx_coalesce = 1; 4909 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 4910 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 4911 4912 sc->tt.tx_align = 1; 4913 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 4914 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 4915 4916 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 4917 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 4918 "TP timer tick (us)"); 4919 4920 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 4921 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 4922 "TCP timestamp tick (us)"); 4923 4924 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 4925 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 4926 "DACK tick (us)"); 4927 4928 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 4929 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 4930 "IU", "DACK timer (us)"); 4931 4932 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 4933 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 4934 sysctl_tp_timer, "LU", "Retransmit min (us)"); 4935 4936 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 4937 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 4938 sysctl_tp_timer, "LU", "Retransmit max (us)"); 4939 4940 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 4941 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 4942 sysctl_tp_timer, "LU", "Persist timer min (us)"); 4943 4944 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 4945 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 4946 sysctl_tp_timer, "LU", "Persist timer max (us)"); 4947 4948 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 4949 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 4950 sysctl_tp_timer, "LU", "Keepidle idle timer (us)"); 4951 4952 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl", 4953 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 4954 sysctl_tp_timer, "LU", "Keepidle interval (us)"); 4955 4956 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 4957 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 4958 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 4959 4960 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 4961 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 4962 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 4963 } 4964 #endif 4965 } 4966 4967 void 4968 vi_sysctls(struct vi_info *vi) 4969 { 4970 struct sysctl_ctx_list *ctx; 4971 struct sysctl_oid *oid; 4972 struct sysctl_oid_list *children; 4973 4974 ctx = device_get_sysctl_ctx(vi->dev); 4975 4976 /* 4977 * dev.[nv](cxgbe|cxl).X. 4978 */ 4979 oid = device_get_sysctl_tree(vi->dev); 4980 children = SYSCTL_CHILDREN(oid); 4981 4982 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 4983 vi->viid, "VI identifer"); 4984 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 4985 &vi->nrxq, 0, "# of rx queues"); 4986 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 4987 &vi->ntxq, 0, "# of tx queues"); 4988 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 4989 &vi->first_rxq, 0, "index of first rx queue"); 4990 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 4991 &vi->first_txq, 0, "index of first tx queue"); 4992 4993 if (vi->flags & VI_NETMAP) 4994 return; 4995 4996 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | 4997 CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 4998 "Reserve queue 0 for non-flowid packets"); 4999 5000 #ifdef TCP_OFFLOAD 5001 if (vi->nofldrxq != 0) { 5002 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 5003 &vi->nofldrxq, 0, 5004 "# of rx queues for offloaded TCP connections"); 5005 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 5006 &vi->nofldtxq, 0, 5007 "# of tx queues for offloaded TCP connections"); 5008 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 5009 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 5010 "index of first TOE rx queue"); 5011 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 5012 CTLFLAG_RD, &vi->first_ofld_txq, 0, 5013 "index of first TOE tx queue"); 5014 } 5015 #endif 5016 5017 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 5018 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 5019 "holdoff timer index"); 5020 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 5021 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 5022 "holdoff packet counter index"); 5023 5024 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 5025 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 5026 "rx queue size"); 5027 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 5028 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 5029 "tx queue size"); 5030 } 5031 5032 static void 5033 cxgbe_sysctls(struct port_info *pi) 5034 { 5035 struct sysctl_ctx_list *ctx; 5036 struct sysctl_oid *oid; 5037 struct sysctl_oid_list *children; 5038 struct adapter *sc = pi->adapter; 5039 5040 ctx = device_get_sysctl_ctx(pi->dev); 5041 5042 /* 5043 * dev.cxgbe.X. 5044 */ 5045 oid = device_get_sysctl_tree(pi->dev); 5046 children = SYSCTL_CHILDREN(oid); 5047 5048 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 5049 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 5050 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 5051 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 5052 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 5053 "PHY temperature (in Celsius)"); 5054 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 5055 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 5056 "PHY firmware version"); 5057 } 5058 5059 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 5060 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings, 5061 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 5062 5063 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 5064 port_top_speed(pi), "max speed (in Gbps)"); 5065 5066 /* 5067 * dev.cxgbe.X.stats. 5068 */ 5069 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 5070 NULL, "port statistics"); 5071 children = SYSCTL_CHILDREN(oid); 5072 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 5073 &pi->tx_parse_error, 0, 5074 "# of tx packets with invalid length or # of segments"); 5075 5076 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 5077 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 5078 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 5079 sysctl_handle_t4_reg64, "QU", desc) 5080 5081 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 5082 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 5083 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 5084 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 5085 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 5086 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 5087 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 5088 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 5089 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 5090 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 5091 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 5092 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 5093 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 5094 "# of tx frames in this range", 5095 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 5096 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 5097 "# of tx frames in this range", 5098 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 5099 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5100 "# of tx frames in this range", 5101 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5102 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5103 "# of tx frames in this range", 5104 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5105 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5106 "# of tx frames in this range", 5107 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5108 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5109 "# of tx frames in this range", 5110 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5111 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5112 "# of tx frames in this range", 5113 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5114 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5115 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5116 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5117 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5118 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5119 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5120 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5121 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5122 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5123 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5124 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5125 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5126 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5127 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5128 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5129 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5130 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5131 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5132 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5133 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5134 5135 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5136 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5137 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5138 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5139 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5140 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5141 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5142 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5143 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5144 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5145 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5146 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5147 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5148 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5149 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5150 "# of frames received with bad FCS", 5151 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5152 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5153 "# of frames received with length error", 5154 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5155 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5156 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5157 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5158 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5159 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5160 "# of rx frames in this range", 5161 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5162 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5163 "# of rx frames in this range", 5164 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5165 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5166 "# of rx frames in this range", 5167 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5168 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5169 "# of rx frames in this range", 5170 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5171 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5172 "# of rx frames in this range", 5173 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5174 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5175 "# of rx frames in this range", 5176 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5177 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5178 "# of rx frames in this range", 5179 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5180 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5181 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5182 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5183 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5184 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5185 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5186 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5187 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5188 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5189 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5190 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5191 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5192 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5193 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5194 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5195 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5196 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5197 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5198 5199 #undef SYSCTL_ADD_T4_REG64 5200 5201 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5202 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5203 &pi->stats.name, desc) 5204 5205 /* We get these from port_stats and they may be stale by upto 1s */ 5206 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5207 "# drops due to buffer-group 0 overflows"); 5208 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5209 "# drops due to buffer-group 1 overflows"); 5210 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5211 "# drops due to buffer-group 2 overflows"); 5212 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5213 "# drops due to buffer-group 3 overflows"); 5214 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5215 "# of buffer-group 0 truncated packets"); 5216 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5217 "# of buffer-group 1 truncated packets"); 5218 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5219 "# of buffer-group 2 truncated packets"); 5220 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5221 "# of buffer-group 3 truncated packets"); 5222 5223 #undef SYSCTL_ADD_T4_PORTSTAT 5224 } 5225 5226 static int 5227 sysctl_int_array(SYSCTL_HANDLER_ARGS) 5228 { 5229 int rc, *i, space = 0; 5230 struct sbuf sb; 5231 5232 sbuf_new_for_sysctl(&sb, NULL, 64, req); 5233 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5234 if (space) 5235 sbuf_printf(&sb, " "); 5236 sbuf_printf(&sb, "%d", *i); 5237 space = 1; 5238 } 5239 rc = sbuf_finish(&sb); 5240 sbuf_delete(&sb); 5241 return (rc); 5242 } 5243 5244 static int 5245 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5246 { 5247 int rc; 5248 struct sbuf *sb; 5249 5250 rc = sysctl_wire_old_buffer(req, 0); 5251 if (rc != 0) 5252 return(rc); 5253 5254 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5255 if (sb == NULL) 5256 return (ENOMEM); 5257 5258 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5259 rc = sbuf_finish(sb); 5260 sbuf_delete(sb); 5261 5262 return (rc); 5263 } 5264 5265 static int 5266 sysctl_btphy(SYSCTL_HANDLER_ARGS) 5267 { 5268 struct port_info *pi = arg1; 5269 int op = arg2; 5270 struct adapter *sc = pi->adapter; 5271 u_int v; 5272 int rc; 5273 5274 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5275 if (rc) 5276 return (rc); 5277 /* XXX: magic numbers */ 5278 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5279 &v); 5280 end_synchronized_op(sc, 0); 5281 if (rc) 5282 return (rc); 5283 if (op == 0) 5284 v /= 256; 5285 5286 rc = sysctl_handle_int(oidp, &v, 0, req); 5287 return (rc); 5288 } 5289 5290 static int 5291 sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5292 { 5293 struct vi_info *vi = arg1; 5294 int rc, val; 5295 5296 val = vi->rsrv_noflowq; 5297 rc = sysctl_handle_int(oidp, &val, 0, req); 5298 if (rc != 0 || req->newptr == NULL) 5299 return (rc); 5300 5301 if ((val >= 1) && (vi->ntxq > 1)) 5302 vi->rsrv_noflowq = 1; 5303 else 5304 vi->rsrv_noflowq = 0; 5305 5306 return (rc); 5307 } 5308 5309 static int 5310 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5311 { 5312 struct vi_info *vi = arg1; 5313 struct adapter *sc = vi->pi->adapter; 5314 int idx, rc, i; 5315 struct sge_rxq *rxq; 5316 #ifdef TCP_OFFLOAD 5317 struct sge_ofld_rxq *ofld_rxq; 5318 #endif 5319 uint8_t v; 5320 5321 idx = vi->tmr_idx; 5322 5323 rc = sysctl_handle_int(oidp, &idx, 0, req); 5324 if (rc != 0 || req->newptr == NULL) 5325 return (rc); 5326 5327 if (idx < 0 || idx >= SGE_NTIMERS) 5328 return (EINVAL); 5329 5330 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5331 "t4tmr"); 5332 if (rc) 5333 return (rc); 5334 5335 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5336 for_each_rxq(vi, i, rxq) { 5337 #ifdef atomic_store_rel_8 5338 atomic_store_rel_8(&rxq->iq.intr_params, v); 5339 #else 5340 rxq->iq.intr_params = v; 5341 #endif 5342 } 5343 #ifdef TCP_OFFLOAD 5344 for_each_ofld_rxq(vi, i, ofld_rxq) { 5345 #ifdef atomic_store_rel_8 5346 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5347 #else 5348 ofld_rxq->iq.intr_params = v; 5349 #endif 5350 } 5351 #endif 5352 vi->tmr_idx = idx; 5353 5354 end_synchronized_op(sc, LOCK_HELD); 5355 return (0); 5356 } 5357 5358 static int 5359 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5360 { 5361 struct vi_info *vi = arg1; 5362 struct adapter *sc = vi->pi->adapter; 5363 int idx, rc; 5364 5365 idx = vi->pktc_idx; 5366 5367 rc = sysctl_handle_int(oidp, &idx, 0, req); 5368 if (rc != 0 || req->newptr == NULL) 5369 return (rc); 5370 5371 if (idx < -1 || idx >= SGE_NCOUNTERS) 5372 return (EINVAL); 5373 5374 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5375 "t4pktc"); 5376 if (rc) 5377 return (rc); 5378 5379 if (vi->flags & VI_INIT_DONE) 5380 rc = EBUSY; /* cannot be changed once the queues are created */ 5381 else 5382 vi->pktc_idx = idx; 5383 5384 end_synchronized_op(sc, LOCK_HELD); 5385 return (rc); 5386 } 5387 5388 static int 5389 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5390 { 5391 struct vi_info *vi = arg1; 5392 struct adapter *sc = vi->pi->adapter; 5393 int qsize, rc; 5394 5395 qsize = vi->qsize_rxq; 5396 5397 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5398 if (rc != 0 || req->newptr == NULL) 5399 return (rc); 5400 5401 if (qsize < 128 || (qsize & 7)) 5402 return (EINVAL); 5403 5404 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5405 "t4rxqs"); 5406 if (rc) 5407 return (rc); 5408 5409 if (vi->flags & VI_INIT_DONE) 5410 rc = EBUSY; /* cannot be changed once the queues are created */ 5411 else 5412 vi->qsize_rxq = qsize; 5413 5414 end_synchronized_op(sc, LOCK_HELD); 5415 return (rc); 5416 } 5417 5418 static int 5419 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5420 { 5421 struct vi_info *vi = arg1; 5422 struct adapter *sc = vi->pi->adapter; 5423 int qsize, rc; 5424 5425 qsize = vi->qsize_txq; 5426 5427 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5428 if (rc != 0 || req->newptr == NULL) 5429 return (rc); 5430 5431 if (qsize < 128 || qsize > 65536) 5432 return (EINVAL); 5433 5434 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5435 "t4txqs"); 5436 if (rc) 5437 return (rc); 5438 5439 if (vi->flags & VI_INIT_DONE) 5440 rc = EBUSY; /* cannot be changed once the queues are created */ 5441 else 5442 vi->qsize_txq = qsize; 5443 5444 end_synchronized_op(sc, LOCK_HELD); 5445 return (rc); 5446 } 5447 5448 static int 5449 sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5450 { 5451 struct port_info *pi = arg1; 5452 struct adapter *sc = pi->adapter; 5453 struct link_config *lc = &pi->link_cfg; 5454 int rc; 5455 5456 if (req->newptr == NULL) { 5457 struct sbuf *sb; 5458 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5459 5460 rc = sysctl_wire_old_buffer(req, 0); 5461 if (rc != 0) 5462 return(rc); 5463 5464 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5465 if (sb == NULL) 5466 return (ENOMEM); 5467 5468 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5469 rc = sbuf_finish(sb); 5470 sbuf_delete(sb); 5471 } else { 5472 char s[2]; 5473 int n; 5474 5475 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5476 s[1] = 0; 5477 5478 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5479 if (rc != 0) 5480 return(rc); 5481 5482 if (s[1] != 0) 5483 return (EINVAL); 5484 if (s[0] < '0' || s[0] > '9') 5485 return (EINVAL); /* not a number */ 5486 n = s[0] - '0'; 5487 if (n & ~(PAUSE_TX | PAUSE_RX)) 5488 return (EINVAL); /* some other bit is set too */ 5489 5490 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5491 "t4PAUSE"); 5492 if (rc) 5493 return (rc); 5494 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5495 int link_ok = lc->link_ok; 5496 5497 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5498 lc->requested_fc |= n; 5499 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5500 lc->link_ok = link_ok; /* restore */ 5501 } 5502 end_synchronized_op(sc, 0); 5503 } 5504 5505 return (rc); 5506 } 5507 5508 static int 5509 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5510 { 5511 struct adapter *sc = arg1; 5512 int reg = arg2; 5513 uint64_t val; 5514 5515 val = t4_read_reg64(sc, reg); 5516 5517 return (sysctl_handle_64(oidp, &val, 0, req)); 5518 } 5519 5520 static int 5521 sysctl_temperature(SYSCTL_HANDLER_ARGS) 5522 { 5523 struct adapter *sc = arg1; 5524 int rc, t; 5525 uint32_t param, val; 5526 5527 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5528 if (rc) 5529 return (rc); 5530 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5531 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5532 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5533 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5534 end_synchronized_op(sc, 0); 5535 if (rc) 5536 return (rc); 5537 5538 /* unknown is returned as 0 but we display -1 in that case */ 5539 t = val == 0 ? -1 : val; 5540 5541 rc = sysctl_handle_int(oidp, &t, 0, req); 5542 return (rc); 5543 } 5544 5545 #ifdef SBUF_DRAIN 5546 static int 5547 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5548 { 5549 struct adapter *sc = arg1; 5550 struct sbuf *sb; 5551 int rc, i; 5552 uint16_t incr[NMTUS][NCCTRL_WIN]; 5553 static const char *dec_fac[] = { 5554 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5555 "0.9375" 5556 }; 5557 5558 rc = sysctl_wire_old_buffer(req, 0); 5559 if (rc != 0) 5560 return (rc); 5561 5562 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5563 if (sb == NULL) 5564 return (ENOMEM); 5565 5566 t4_read_cong_tbl(sc, incr); 5567 5568 for (i = 0; i < NCCTRL_WIN; ++i) { 5569 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5570 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5571 incr[5][i], incr[6][i], incr[7][i]); 5572 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5573 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5574 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5575 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5576 } 5577 5578 rc = sbuf_finish(sb); 5579 sbuf_delete(sb); 5580 5581 return (rc); 5582 } 5583 5584 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5585 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5586 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5587 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5588 }; 5589 5590 static int 5591 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5592 { 5593 struct adapter *sc = arg1; 5594 struct sbuf *sb; 5595 int rc, i, n, qid = arg2; 5596 uint32_t *buf, *p; 5597 char *qtype; 5598 u_int cim_num_obq = sc->chip_params->cim_num_obq; 5599 5600 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5601 ("%s: bad qid %d\n", __func__, qid)); 5602 5603 if (qid < CIM_NUM_IBQ) { 5604 /* inbound queue */ 5605 qtype = "IBQ"; 5606 n = 4 * CIM_IBQ_SIZE; 5607 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5608 rc = t4_read_cim_ibq(sc, qid, buf, n); 5609 } else { 5610 /* outbound queue */ 5611 qtype = "OBQ"; 5612 qid -= CIM_NUM_IBQ; 5613 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5614 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5615 rc = t4_read_cim_obq(sc, qid, buf, n); 5616 } 5617 5618 if (rc < 0) { 5619 rc = -rc; 5620 goto done; 5621 } 5622 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5623 5624 rc = sysctl_wire_old_buffer(req, 0); 5625 if (rc != 0) 5626 goto done; 5627 5628 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5629 if (sb == NULL) { 5630 rc = ENOMEM; 5631 goto done; 5632 } 5633 5634 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5635 for (i = 0, p = buf; i < n; i += 16, p += 4) 5636 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5637 p[2], p[3]); 5638 5639 rc = sbuf_finish(sb); 5640 sbuf_delete(sb); 5641 done: 5642 free(buf, M_CXGBE); 5643 return (rc); 5644 } 5645 5646 static int 5647 sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5648 { 5649 struct adapter *sc = arg1; 5650 u_int cfg; 5651 struct sbuf *sb; 5652 uint32_t *buf, *p; 5653 int rc; 5654 5655 MPASS(chip_id(sc) <= CHELSIO_T5); 5656 5657 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5658 if (rc != 0) 5659 return (rc); 5660 5661 rc = sysctl_wire_old_buffer(req, 0); 5662 if (rc != 0) 5663 return (rc); 5664 5665 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5666 if (sb == NULL) 5667 return (ENOMEM); 5668 5669 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5670 M_ZERO | M_WAITOK); 5671 5672 rc = -t4_cim_read_la(sc, buf, NULL); 5673 if (rc != 0) 5674 goto done; 5675 5676 sbuf_printf(sb, "Status Data PC%s", 5677 cfg & F_UPDBGLACAPTPCONLY ? "" : 5678 " LS0Stat LS0Addr LS0Data"); 5679 5680 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 5681 if (cfg & F_UPDBGLACAPTPCONLY) { 5682 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5683 p[6], p[7]); 5684 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5685 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5686 p[4] & 0xff, p[5] >> 8); 5687 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5688 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5689 p[1] & 0xf, p[2] >> 4); 5690 } else { 5691 sbuf_printf(sb, 5692 "\n %02x %x%07x %x%07x %08x %08x " 5693 "%08x%08x%08x%08x", 5694 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5695 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5696 p[6], p[7]); 5697 } 5698 } 5699 5700 rc = sbuf_finish(sb); 5701 sbuf_delete(sb); 5702 done: 5703 free(buf, M_CXGBE); 5704 return (rc); 5705 } 5706 5707 static int 5708 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 5709 { 5710 struct adapter *sc = arg1; 5711 u_int cfg; 5712 struct sbuf *sb; 5713 uint32_t *buf, *p; 5714 int rc; 5715 5716 MPASS(chip_id(sc) > CHELSIO_T5); 5717 5718 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5719 if (rc != 0) 5720 return (rc); 5721 5722 rc = sysctl_wire_old_buffer(req, 0); 5723 if (rc != 0) 5724 return (rc); 5725 5726 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5727 if (sb == NULL) 5728 return (ENOMEM); 5729 5730 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5731 M_ZERO | M_WAITOK); 5732 5733 rc = -t4_cim_read_la(sc, buf, NULL); 5734 if (rc != 0) 5735 goto done; 5736 5737 sbuf_printf(sb, "Status Inst Data PC%s", 5738 cfg & F_UPDBGLACAPTPCONLY ? "" : 5739 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 5740 5741 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 5742 if (cfg & F_UPDBGLACAPTPCONLY) { 5743 sbuf_printf(sb, "\n %02x %08x %08x %08x", 5744 p[3] & 0xff, p[2], p[1], p[0]); 5745 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 5746 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 5747 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 5748 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 5749 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 5750 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 5751 p[6] >> 16); 5752 } else { 5753 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 5754 "%08x %08x %08x %08x %08x %08x", 5755 (p[9] >> 16) & 0xff, 5756 p[9] & 0xffff, p[8] >> 16, 5757 p[8] & 0xffff, p[7] >> 16, 5758 p[7] & 0xffff, p[6] >> 16, 5759 p[2], p[1], p[0], p[5], p[4], p[3]); 5760 } 5761 } 5762 5763 rc = sbuf_finish(sb); 5764 sbuf_delete(sb); 5765 done: 5766 free(buf, M_CXGBE); 5767 return (rc); 5768 } 5769 5770 static int 5771 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 5772 { 5773 struct adapter *sc = arg1; 5774 u_int i; 5775 struct sbuf *sb; 5776 uint32_t *buf, *p; 5777 int rc; 5778 5779 rc = sysctl_wire_old_buffer(req, 0); 5780 if (rc != 0) 5781 return (rc); 5782 5783 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5784 if (sb == NULL) 5785 return (ENOMEM); 5786 5787 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 5788 M_ZERO | M_WAITOK); 5789 5790 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 5791 p = buf; 5792 5793 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5794 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 5795 p[1], p[0]); 5796 } 5797 5798 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 5799 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5800 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 5801 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 5802 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 5803 (p[1] >> 2) | ((p[2] & 3) << 30), 5804 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 5805 p[0] & 1); 5806 } 5807 5808 rc = sbuf_finish(sb); 5809 sbuf_delete(sb); 5810 free(buf, M_CXGBE); 5811 return (rc); 5812 } 5813 5814 static int 5815 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 5816 { 5817 struct adapter *sc = arg1; 5818 u_int i; 5819 struct sbuf *sb; 5820 uint32_t *buf, *p; 5821 int rc; 5822 5823 rc = sysctl_wire_old_buffer(req, 0); 5824 if (rc != 0) 5825 return (rc); 5826 5827 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5828 if (sb == NULL) 5829 return (ENOMEM); 5830 5831 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 5832 M_ZERO | M_WAITOK); 5833 5834 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 5835 p = buf; 5836 5837 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 5838 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5839 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 5840 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 5841 p[4], p[3], p[2], p[1], p[0]); 5842 } 5843 5844 sbuf_printf(sb, "\n\nCntl ID Data"); 5845 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5846 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 5847 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 5848 } 5849 5850 rc = sbuf_finish(sb); 5851 sbuf_delete(sb); 5852 free(buf, M_CXGBE); 5853 return (rc); 5854 } 5855 5856 static int 5857 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 5858 { 5859 struct adapter *sc = arg1; 5860 struct sbuf *sb; 5861 int rc, i; 5862 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5863 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5864 uint16_t thres[CIM_NUM_IBQ]; 5865 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 5866 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 5867 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 5868 5869 cim_num_obq = sc->chip_params->cim_num_obq; 5870 if (is_t4(sc)) { 5871 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 5872 obq_rdaddr = A_UP_OBQ_0_REALADDR; 5873 } else { 5874 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 5875 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 5876 } 5877 nq = CIM_NUM_IBQ + cim_num_obq; 5878 5879 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 5880 if (rc == 0) 5881 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 5882 if (rc != 0) 5883 return (rc); 5884 5885 t4_read_cimq_cfg(sc, base, size, thres); 5886 5887 rc = sysctl_wire_old_buffer(req, 0); 5888 if (rc != 0) 5889 return (rc); 5890 5891 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5892 if (sb == NULL) 5893 return (ENOMEM); 5894 5895 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 5896 5897 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 5898 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 5899 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 5900 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5901 G_QUEREMFLITS(p[2]) * 16); 5902 for ( ; i < nq; i++, p += 4, wr += 2) 5903 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 5904 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 5905 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5906 G_QUEREMFLITS(p[2]) * 16); 5907 5908 rc = sbuf_finish(sb); 5909 sbuf_delete(sb); 5910 5911 return (rc); 5912 } 5913 5914 static int 5915 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 5916 { 5917 struct adapter *sc = arg1; 5918 struct sbuf *sb; 5919 int rc; 5920 struct tp_cpl_stats stats; 5921 5922 rc = sysctl_wire_old_buffer(req, 0); 5923 if (rc != 0) 5924 return (rc); 5925 5926 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5927 if (sb == NULL) 5928 return (ENOMEM); 5929 5930 mtx_lock(&sc->reg_lock); 5931 t4_tp_get_cpl_stats(sc, &stats); 5932 mtx_unlock(&sc->reg_lock); 5933 5934 if (sc->chip_params->nchan > 2) { 5935 sbuf_printf(sb, " channel 0 channel 1" 5936 " channel 2 channel 3"); 5937 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 5938 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 5939 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 5940 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 5941 } else { 5942 sbuf_printf(sb, " channel 0 channel 1"); 5943 sbuf_printf(sb, "\nCPL requests: %10u %10u", 5944 stats.req[0], stats.req[1]); 5945 sbuf_printf(sb, "\nCPL responses: %10u %10u", 5946 stats.rsp[0], stats.rsp[1]); 5947 } 5948 5949 rc = sbuf_finish(sb); 5950 sbuf_delete(sb); 5951 5952 return (rc); 5953 } 5954 5955 static int 5956 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 5957 { 5958 struct adapter *sc = arg1; 5959 struct sbuf *sb; 5960 int rc; 5961 struct tp_usm_stats stats; 5962 5963 rc = sysctl_wire_old_buffer(req, 0); 5964 if (rc != 0) 5965 return(rc); 5966 5967 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5968 if (sb == NULL) 5969 return (ENOMEM); 5970 5971 t4_get_usm_stats(sc, &stats); 5972 5973 sbuf_printf(sb, "Frames: %u\n", stats.frames); 5974 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 5975 sbuf_printf(sb, "Drops: %u", stats.drops); 5976 5977 rc = sbuf_finish(sb); 5978 sbuf_delete(sb); 5979 5980 return (rc); 5981 } 5982 5983 static const char * const devlog_level_strings[] = { 5984 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 5985 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 5986 [FW_DEVLOG_LEVEL_ERR] = "ERR", 5987 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 5988 [FW_DEVLOG_LEVEL_INFO] = "INFO", 5989 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 5990 }; 5991 5992 static const char * const devlog_facility_strings[] = { 5993 [FW_DEVLOG_FACILITY_CORE] = "CORE", 5994 [FW_DEVLOG_FACILITY_CF] = "CF", 5995 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 5996 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 5997 [FW_DEVLOG_FACILITY_RES] = "RES", 5998 [FW_DEVLOG_FACILITY_HW] = "HW", 5999 [FW_DEVLOG_FACILITY_FLR] = "FLR", 6000 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 6001 [FW_DEVLOG_FACILITY_PHY] = "PHY", 6002 [FW_DEVLOG_FACILITY_MAC] = "MAC", 6003 [FW_DEVLOG_FACILITY_PORT] = "PORT", 6004 [FW_DEVLOG_FACILITY_VI] = "VI", 6005 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 6006 [FW_DEVLOG_FACILITY_ACL] = "ACL", 6007 [FW_DEVLOG_FACILITY_TM] = "TM", 6008 [FW_DEVLOG_FACILITY_QFC] = "QFC", 6009 [FW_DEVLOG_FACILITY_DCB] = "DCB", 6010 [FW_DEVLOG_FACILITY_ETH] = "ETH", 6011 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 6012 [FW_DEVLOG_FACILITY_RI] = "RI", 6013 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 6014 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 6015 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 6016 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 6017 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 6018 }; 6019 6020 static int 6021 sysctl_devlog(SYSCTL_HANDLER_ARGS) 6022 { 6023 struct adapter *sc = arg1; 6024 struct devlog_params *dparams = &sc->params.devlog; 6025 struct fw_devlog_e *buf, *e; 6026 int i, j, rc, nentries, first = 0; 6027 struct sbuf *sb; 6028 uint64_t ftstamp = UINT64_MAX; 6029 6030 if (dparams->addr == 0) 6031 return (ENXIO); 6032 6033 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 6034 if (buf == NULL) 6035 return (ENOMEM); 6036 6037 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 6038 if (rc != 0) 6039 goto done; 6040 6041 nentries = dparams->size / sizeof(struct fw_devlog_e); 6042 for (i = 0; i < nentries; i++) { 6043 e = &buf[i]; 6044 6045 if (e->timestamp == 0) 6046 break; /* end */ 6047 6048 e->timestamp = be64toh(e->timestamp); 6049 e->seqno = be32toh(e->seqno); 6050 for (j = 0; j < 8; j++) 6051 e->params[j] = be32toh(e->params[j]); 6052 6053 if (e->timestamp < ftstamp) { 6054 ftstamp = e->timestamp; 6055 first = i; 6056 } 6057 } 6058 6059 if (buf[first].timestamp == 0) 6060 goto done; /* nothing in the log */ 6061 6062 rc = sysctl_wire_old_buffer(req, 0); 6063 if (rc != 0) 6064 goto done; 6065 6066 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6067 if (sb == NULL) { 6068 rc = ENOMEM; 6069 goto done; 6070 } 6071 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 6072 "Seq#", "Tstamp", "Level", "Facility", "Message"); 6073 6074 i = first; 6075 do { 6076 e = &buf[i]; 6077 if (e->timestamp == 0) 6078 break; /* end */ 6079 6080 sbuf_printf(sb, "%10d %15ju %8s %8s ", 6081 e->seqno, e->timestamp, 6082 (e->level < nitems(devlog_level_strings) ? 6083 devlog_level_strings[e->level] : "UNKNOWN"), 6084 (e->facility < nitems(devlog_facility_strings) ? 6085 devlog_facility_strings[e->facility] : "UNKNOWN")); 6086 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 6087 e->params[2], e->params[3], e->params[4], 6088 e->params[5], e->params[6], e->params[7]); 6089 6090 if (++i == nentries) 6091 i = 0; 6092 } while (i != first); 6093 6094 rc = sbuf_finish(sb); 6095 sbuf_delete(sb); 6096 done: 6097 free(buf, M_CXGBE); 6098 return (rc); 6099 } 6100 6101 static int 6102 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6103 { 6104 struct adapter *sc = arg1; 6105 struct sbuf *sb; 6106 int rc; 6107 struct tp_fcoe_stats stats[MAX_NCHAN]; 6108 int i, nchan = sc->chip_params->nchan; 6109 6110 rc = sysctl_wire_old_buffer(req, 0); 6111 if (rc != 0) 6112 return (rc); 6113 6114 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6115 if (sb == NULL) 6116 return (ENOMEM); 6117 6118 for (i = 0; i < nchan; i++) 6119 t4_get_fcoe_stats(sc, i, &stats[i]); 6120 6121 if (nchan > 2) { 6122 sbuf_printf(sb, " channel 0 channel 1" 6123 " channel 2 channel 3"); 6124 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6125 stats[0].octets_ddp, stats[1].octets_ddp, 6126 stats[2].octets_ddp, stats[3].octets_ddp); 6127 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6128 stats[0].frames_ddp, stats[1].frames_ddp, 6129 stats[2].frames_ddp, stats[3].frames_ddp); 6130 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6131 stats[0].frames_drop, stats[1].frames_drop, 6132 stats[2].frames_drop, stats[3].frames_drop); 6133 } else { 6134 sbuf_printf(sb, " channel 0 channel 1"); 6135 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6136 stats[0].octets_ddp, stats[1].octets_ddp); 6137 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6138 stats[0].frames_ddp, stats[1].frames_ddp); 6139 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6140 stats[0].frames_drop, stats[1].frames_drop); 6141 } 6142 6143 rc = sbuf_finish(sb); 6144 sbuf_delete(sb); 6145 6146 return (rc); 6147 } 6148 6149 static int 6150 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6151 { 6152 struct adapter *sc = arg1; 6153 struct sbuf *sb; 6154 int rc, i; 6155 unsigned int map, kbps, ipg, mode; 6156 unsigned int pace_tab[NTX_SCHED]; 6157 6158 rc = sysctl_wire_old_buffer(req, 0); 6159 if (rc != 0) 6160 return (rc); 6161 6162 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6163 if (sb == NULL) 6164 return (ENOMEM); 6165 6166 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6167 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6168 t4_read_pace_tbl(sc, pace_tab); 6169 6170 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6171 "Class IPG (0.1 ns) Flow IPG (us)"); 6172 6173 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6174 t4_get_tx_sched(sc, i, &kbps, &ipg); 6175 sbuf_printf(sb, "\n %u %-5s %u ", i, 6176 (mode & (1 << i)) ? "flow" : "class", map & 3); 6177 if (kbps) 6178 sbuf_printf(sb, "%9u ", kbps); 6179 else 6180 sbuf_printf(sb, " disabled "); 6181 6182 if (ipg) 6183 sbuf_printf(sb, "%13u ", ipg); 6184 else 6185 sbuf_printf(sb, " disabled "); 6186 6187 if (pace_tab[i]) 6188 sbuf_printf(sb, "%10u", pace_tab[i]); 6189 else 6190 sbuf_printf(sb, " disabled"); 6191 } 6192 6193 rc = sbuf_finish(sb); 6194 sbuf_delete(sb); 6195 6196 return (rc); 6197 } 6198 6199 static int 6200 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6201 { 6202 struct adapter *sc = arg1; 6203 struct sbuf *sb; 6204 int rc, i, j; 6205 uint64_t *p0, *p1; 6206 struct lb_port_stats s[2]; 6207 static const char *stat_name[] = { 6208 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6209 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6210 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6211 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6212 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6213 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6214 "BG2FramesTrunc:", "BG3FramesTrunc:" 6215 }; 6216 6217 rc = sysctl_wire_old_buffer(req, 0); 6218 if (rc != 0) 6219 return (rc); 6220 6221 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6222 if (sb == NULL) 6223 return (ENOMEM); 6224 6225 memset(s, 0, sizeof(s)); 6226 6227 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6228 t4_get_lb_stats(sc, i, &s[0]); 6229 t4_get_lb_stats(sc, i + 1, &s[1]); 6230 6231 p0 = &s[0].octets; 6232 p1 = &s[1].octets; 6233 sbuf_printf(sb, "%s Loopback %u" 6234 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6235 6236 for (j = 0; j < nitems(stat_name); j++) 6237 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6238 *p0++, *p1++); 6239 } 6240 6241 rc = sbuf_finish(sb); 6242 sbuf_delete(sb); 6243 6244 return (rc); 6245 } 6246 6247 static int 6248 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6249 { 6250 int rc = 0; 6251 struct port_info *pi = arg1; 6252 struct sbuf *sb; 6253 6254 rc = sysctl_wire_old_buffer(req, 0); 6255 if (rc != 0) 6256 return(rc); 6257 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6258 if (sb == NULL) 6259 return (ENOMEM); 6260 6261 if (pi->linkdnrc < 0) 6262 sbuf_printf(sb, "n/a"); 6263 else 6264 sbuf_printf(sb, "%s", t4_link_down_rc_str(pi->linkdnrc)); 6265 6266 rc = sbuf_finish(sb); 6267 sbuf_delete(sb); 6268 6269 return (rc); 6270 } 6271 6272 struct mem_desc { 6273 unsigned int base; 6274 unsigned int limit; 6275 unsigned int idx; 6276 }; 6277 6278 static int 6279 mem_desc_cmp(const void *a, const void *b) 6280 { 6281 return ((const struct mem_desc *)a)->base - 6282 ((const struct mem_desc *)b)->base; 6283 } 6284 6285 static void 6286 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6287 unsigned int to) 6288 { 6289 unsigned int size; 6290 6291 if (from == to) 6292 return; 6293 6294 size = to - from + 1; 6295 if (size == 0) 6296 return; 6297 6298 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6299 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6300 } 6301 6302 static int 6303 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6304 { 6305 struct adapter *sc = arg1; 6306 struct sbuf *sb; 6307 int rc, i, n; 6308 uint32_t lo, hi, used, alloc; 6309 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6310 static const char *region[] = { 6311 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6312 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6313 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6314 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6315 "RQUDP region:", "PBL region:", "TXPBL region:", 6316 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6317 "On-chip queues:" 6318 }; 6319 struct mem_desc avail[4]; 6320 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6321 struct mem_desc *md = mem; 6322 6323 rc = sysctl_wire_old_buffer(req, 0); 6324 if (rc != 0) 6325 return (rc); 6326 6327 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6328 if (sb == NULL) 6329 return (ENOMEM); 6330 6331 for (i = 0; i < nitems(mem); i++) { 6332 mem[i].limit = 0; 6333 mem[i].idx = i; 6334 } 6335 6336 /* Find and sort the populated memory ranges */ 6337 i = 0; 6338 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6339 if (lo & F_EDRAM0_ENABLE) { 6340 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6341 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6342 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6343 avail[i].idx = 0; 6344 i++; 6345 } 6346 if (lo & F_EDRAM1_ENABLE) { 6347 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6348 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6349 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6350 avail[i].idx = 1; 6351 i++; 6352 } 6353 if (lo & F_EXT_MEM_ENABLE) { 6354 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6355 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6356 avail[i].limit = avail[i].base + 6357 (G_EXT_MEM_SIZE(hi) << 20); 6358 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6359 i++; 6360 } 6361 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6362 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6363 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6364 avail[i].limit = avail[i].base + 6365 (G_EXT_MEM1_SIZE(hi) << 20); 6366 avail[i].idx = 4; 6367 i++; 6368 } 6369 if (!i) /* no memory available */ 6370 return 0; 6371 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6372 6373 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6374 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6375 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6376 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6377 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6378 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6379 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6380 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6381 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6382 6383 /* the next few have explicit upper bounds */ 6384 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6385 md->limit = md->base - 1 + 6386 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6387 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6388 md++; 6389 6390 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6391 md->limit = md->base - 1 + 6392 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6393 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6394 md++; 6395 6396 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6397 if (chip_id(sc) <= CHELSIO_T5) 6398 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6399 else 6400 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 6401 md->limit = 0; 6402 } else { 6403 md->base = 0; 6404 md->idx = nitems(region); /* hide it */ 6405 } 6406 md++; 6407 6408 #define ulp_region(reg) \ 6409 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6410 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6411 6412 ulp_region(RX_ISCSI); 6413 ulp_region(RX_TDDP); 6414 ulp_region(TX_TPT); 6415 ulp_region(RX_STAG); 6416 ulp_region(RX_RQ); 6417 ulp_region(RX_RQUDP); 6418 ulp_region(RX_PBL); 6419 ulp_region(TX_PBL); 6420 #undef ulp_region 6421 6422 md->base = 0; 6423 md->idx = nitems(region); 6424 if (!is_t4(sc)) { 6425 uint32_t size = 0; 6426 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 6427 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 6428 6429 if (is_t5(sc)) { 6430 if (sge_ctrl & F_VFIFO_ENABLE) 6431 size = G_DBVFIFO_SIZE(fifo_size); 6432 } else 6433 size = G_T6_DBVFIFO_SIZE(fifo_size); 6434 6435 if (size) { 6436 md->base = G_BASEADDR(t4_read_reg(sc, 6437 A_SGE_DBVFIFO_BADDR)); 6438 md->limit = md->base + (size << 2) - 1; 6439 } 6440 } 6441 md++; 6442 6443 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6444 md->limit = 0; 6445 md++; 6446 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6447 md->limit = 0; 6448 md++; 6449 6450 md->base = sc->vres.ocq.start; 6451 if (sc->vres.ocq.size) 6452 md->limit = md->base + sc->vres.ocq.size - 1; 6453 else 6454 md->idx = nitems(region); /* hide it */ 6455 md++; 6456 6457 /* add any address-space holes, there can be up to 3 */ 6458 for (n = 0; n < i - 1; n++) 6459 if (avail[n].limit < avail[n + 1].base) 6460 (md++)->base = avail[n].limit; 6461 if (avail[n].limit) 6462 (md++)->base = avail[n].limit; 6463 6464 n = md - mem; 6465 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6466 6467 for (lo = 0; lo < i; lo++) 6468 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6469 avail[lo].limit - 1); 6470 6471 sbuf_printf(sb, "\n"); 6472 for (i = 0; i < n; i++) { 6473 if (mem[i].idx >= nitems(region)) 6474 continue; /* skip holes */ 6475 if (!mem[i].limit) 6476 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6477 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6478 mem[i].limit); 6479 } 6480 6481 sbuf_printf(sb, "\n"); 6482 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6483 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6484 mem_region_show(sb, "uP RAM:", lo, hi); 6485 6486 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6487 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6488 mem_region_show(sb, "uP Extmem2:", lo, hi); 6489 6490 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6491 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6492 G_PMRXMAXPAGE(lo), 6493 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6494 (lo & F_PMRXNUMCHN) ? 2 : 1); 6495 6496 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6497 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6498 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6499 G_PMTXMAXPAGE(lo), 6500 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6501 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6502 sbuf_printf(sb, "%u p-structs\n", 6503 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6504 6505 for (i = 0; i < 4; i++) { 6506 if (chip_id(sc) > CHELSIO_T5) 6507 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 6508 else 6509 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6510 if (is_t5(sc)) { 6511 used = G_T5_USED(lo); 6512 alloc = G_T5_ALLOC(lo); 6513 } else { 6514 used = G_USED(lo); 6515 alloc = G_ALLOC(lo); 6516 } 6517 /* For T6 these are MAC buffer groups */ 6518 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6519 i, used, alloc); 6520 } 6521 for (i = 0; i < sc->chip_params->nchan; i++) { 6522 if (chip_id(sc) > CHELSIO_T5) 6523 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 6524 else 6525 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6526 if (is_t5(sc)) { 6527 used = G_T5_USED(lo); 6528 alloc = G_T5_ALLOC(lo); 6529 } else { 6530 used = G_USED(lo); 6531 alloc = G_ALLOC(lo); 6532 } 6533 /* For T6 these are MAC buffer groups */ 6534 sbuf_printf(sb, 6535 "\nLoopback %d using %u pages out of %u allocated", 6536 i, used, alloc); 6537 } 6538 6539 rc = sbuf_finish(sb); 6540 sbuf_delete(sb); 6541 6542 return (rc); 6543 } 6544 6545 static inline void 6546 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6547 { 6548 *mask = x | y; 6549 y = htobe64(y); 6550 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6551 } 6552 6553 static int 6554 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6555 { 6556 struct adapter *sc = arg1; 6557 struct sbuf *sb; 6558 int rc, i; 6559 6560 MPASS(chip_id(sc) <= CHELSIO_T5); 6561 6562 rc = sysctl_wire_old_buffer(req, 0); 6563 if (rc != 0) 6564 return (rc); 6565 6566 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6567 if (sb == NULL) 6568 return (ENOMEM); 6569 6570 sbuf_printf(sb, 6571 "Idx Ethernet address Mask Vld Ports PF" 6572 " VF Replication P0 P1 P2 P3 ML"); 6573 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6574 uint64_t tcamx, tcamy, mask; 6575 uint32_t cls_lo, cls_hi; 6576 uint8_t addr[ETHER_ADDR_LEN]; 6577 6578 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6579 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6580 if (tcamx & tcamy) 6581 continue; 6582 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6583 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6584 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6585 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6586 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6587 addr[3], addr[4], addr[5], (uintmax_t)mask, 6588 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6589 G_PORTMAP(cls_hi), G_PF(cls_lo), 6590 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6591 6592 if (cls_lo & F_REPLICATE) { 6593 struct fw_ldst_cmd ldst_cmd; 6594 6595 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6596 ldst_cmd.op_to_addrspace = 6597 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6598 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6599 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6600 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6601 ldst_cmd.u.mps.rplc.fid_idx = 6602 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6603 V_FW_LDST_CMD_IDX(i)); 6604 6605 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6606 "t4mps"); 6607 if (rc) 6608 break; 6609 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6610 sizeof(ldst_cmd), &ldst_cmd); 6611 end_synchronized_op(sc, 0); 6612 6613 if (rc != 0) { 6614 sbuf_printf(sb, "%36d", rc); 6615 rc = 0; 6616 } else { 6617 sbuf_printf(sb, " %08x %08x %08x %08x", 6618 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6619 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6620 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6621 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6622 } 6623 } else 6624 sbuf_printf(sb, "%36s", ""); 6625 6626 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6627 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6628 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6629 } 6630 6631 if (rc) 6632 (void) sbuf_finish(sb); 6633 else 6634 rc = sbuf_finish(sb); 6635 sbuf_delete(sb); 6636 6637 return (rc); 6638 } 6639 6640 static int 6641 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 6642 { 6643 struct adapter *sc = arg1; 6644 struct sbuf *sb; 6645 int rc, i; 6646 6647 MPASS(chip_id(sc) > CHELSIO_T5); 6648 6649 rc = sysctl_wire_old_buffer(req, 0); 6650 if (rc != 0) 6651 return (rc); 6652 6653 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6654 if (sb == NULL) 6655 return (ENOMEM); 6656 6657 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 6658 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 6659 " Replication" 6660 " P0 P1 P2 P3 ML\n"); 6661 6662 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6663 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 6664 uint16_t ivlan; 6665 uint64_t tcamx, tcamy, val, mask; 6666 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 6667 uint8_t addr[ETHER_ADDR_LEN]; 6668 6669 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 6670 if (i < 256) 6671 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 6672 else 6673 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 6674 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6675 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6676 tcamy = G_DMACH(val) << 32; 6677 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6678 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6679 lookup_type = G_DATALKPTYPE(data2); 6680 port_num = G_DATAPORTNUM(data2); 6681 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6682 /* Inner header VNI */ 6683 vniy = ((data2 & F_DATAVIDH2) << 23) | 6684 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6685 dip_hit = data2 & F_DATADIPHIT; 6686 vlan_vld = 0; 6687 } else { 6688 vniy = 0; 6689 dip_hit = 0; 6690 vlan_vld = data2 & F_DATAVIDH2; 6691 ivlan = G_VIDL(val); 6692 } 6693 6694 ctl |= V_CTLXYBITSEL(1); 6695 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6696 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6697 tcamx = G_DMACH(val) << 32; 6698 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6699 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6700 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6701 /* Inner header VNI mask */ 6702 vnix = ((data2 & F_DATAVIDH2) << 23) | 6703 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6704 } else 6705 vnix = 0; 6706 6707 if (tcamx & tcamy) 6708 continue; 6709 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6710 6711 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6712 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6713 6714 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6715 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6716 "%012jx %06x %06x - - %3c" 6717 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 6718 addr[1], addr[2], addr[3], addr[4], addr[5], 6719 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 6720 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6721 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6722 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6723 } else { 6724 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6725 "%012jx - - ", i, addr[0], addr[1], 6726 addr[2], addr[3], addr[4], addr[5], 6727 (uintmax_t)mask); 6728 6729 if (vlan_vld) 6730 sbuf_printf(sb, "%4u Y ", ivlan); 6731 else 6732 sbuf_printf(sb, " - N "); 6733 6734 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 6735 lookup_type ? 'I' : 'O', port_num, 6736 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6737 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6738 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6739 } 6740 6741 6742 if (cls_lo & F_T6_REPLICATE) { 6743 struct fw_ldst_cmd ldst_cmd; 6744 6745 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6746 ldst_cmd.op_to_addrspace = 6747 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6748 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6749 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6750 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6751 ldst_cmd.u.mps.rplc.fid_idx = 6752 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6753 V_FW_LDST_CMD_IDX(i)); 6754 6755 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6756 "t6mps"); 6757 if (rc) 6758 break; 6759 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6760 sizeof(ldst_cmd), &ldst_cmd); 6761 end_synchronized_op(sc, 0); 6762 6763 if (rc != 0) { 6764 sbuf_printf(sb, "%72d", rc); 6765 rc = 0; 6766 } else { 6767 sbuf_printf(sb, " %08x %08x %08x %08x" 6768 " %08x %08x %08x %08x", 6769 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 6770 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 6771 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 6772 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 6773 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6774 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6775 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6776 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6777 } 6778 } else 6779 sbuf_printf(sb, "%72s", ""); 6780 6781 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 6782 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 6783 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 6784 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 6785 } 6786 6787 if (rc) 6788 (void) sbuf_finish(sb); 6789 else 6790 rc = sbuf_finish(sb); 6791 sbuf_delete(sb); 6792 6793 return (rc); 6794 } 6795 6796 static int 6797 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 6798 { 6799 struct adapter *sc = arg1; 6800 struct sbuf *sb; 6801 int rc; 6802 uint16_t mtus[NMTUS]; 6803 6804 rc = sysctl_wire_old_buffer(req, 0); 6805 if (rc != 0) 6806 return (rc); 6807 6808 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6809 if (sb == NULL) 6810 return (ENOMEM); 6811 6812 t4_read_mtu_tbl(sc, mtus, NULL); 6813 6814 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 6815 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 6816 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 6817 mtus[14], mtus[15]); 6818 6819 rc = sbuf_finish(sb); 6820 sbuf_delete(sb); 6821 6822 return (rc); 6823 } 6824 6825 static int 6826 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 6827 { 6828 struct adapter *sc = arg1; 6829 struct sbuf *sb; 6830 int rc, i; 6831 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 6832 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 6833 static const char *tx_stats[MAX_PM_NSTATS] = { 6834 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 6835 "Tx FIFO wait", NULL, "Tx latency" 6836 }; 6837 static const char *rx_stats[MAX_PM_NSTATS] = { 6838 "Read:", "Write bypass:", "Write mem:", "Flush:", 6839 " Rx FIFO wait", NULL, "Rx latency" 6840 }; 6841 6842 rc = sysctl_wire_old_buffer(req, 0); 6843 if (rc != 0) 6844 return (rc); 6845 6846 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6847 if (sb == NULL) 6848 return (ENOMEM); 6849 6850 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 6851 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 6852 6853 sbuf_printf(sb, " Tx pcmds Tx bytes"); 6854 for (i = 0; i < 4; i++) { 6855 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6856 tx_cyc[i]); 6857 } 6858 6859 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 6860 for (i = 0; i < 4; i++) { 6861 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6862 rx_cyc[i]); 6863 } 6864 6865 if (chip_id(sc) > CHELSIO_T5) { 6866 sbuf_printf(sb, 6867 "\n Total wait Total occupancy"); 6868 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6869 tx_cyc[i]); 6870 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6871 rx_cyc[i]); 6872 6873 i += 2; 6874 MPASS(i < nitems(tx_stats)); 6875 6876 sbuf_printf(sb, 6877 "\n Reads Total wait"); 6878 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6879 tx_cyc[i]); 6880 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6881 rx_cyc[i]); 6882 } 6883 6884 rc = sbuf_finish(sb); 6885 sbuf_delete(sb); 6886 6887 return (rc); 6888 } 6889 6890 static int 6891 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 6892 { 6893 struct adapter *sc = arg1; 6894 struct sbuf *sb; 6895 int rc; 6896 struct tp_rdma_stats stats; 6897 6898 rc = sysctl_wire_old_buffer(req, 0); 6899 if (rc != 0) 6900 return (rc); 6901 6902 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6903 if (sb == NULL) 6904 return (ENOMEM); 6905 6906 mtx_lock(&sc->reg_lock); 6907 t4_tp_get_rdma_stats(sc, &stats); 6908 mtx_unlock(&sc->reg_lock); 6909 6910 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 6911 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 6912 6913 rc = sbuf_finish(sb); 6914 sbuf_delete(sb); 6915 6916 return (rc); 6917 } 6918 6919 static int 6920 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 6921 { 6922 struct adapter *sc = arg1; 6923 struct sbuf *sb; 6924 int rc; 6925 struct tp_tcp_stats v4, v6; 6926 6927 rc = sysctl_wire_old_buffer(req, 0); 6928 if (rc != 0) 6929 return (rc); 6930 6931 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6932 if (sb == NULL) 6933 return (ENOMEM); 6934 6935 mtx_lock(&sc->reg_lock); 6936 t4_tp_get_tcp_stats(sc, &v4, &v6); 6937 mtx_unlock(&sc->reg_lock); 6938 6939 sbuf_printf(sb, 6940 " IP IPv6\n"); 6941 sbuf_printf(sb, "OutRsts: %20u %20u\n", 6942 v4.tcp_out_rsts, v6.tcp_out_rsts); 6943 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 6944 v4.tcp_in_segs, v6.tcp_in_segs); 6945 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 6946 v4.tcp_out_segs, v6.tcp_out_segs); 6947 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 6948 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 6949 6950 rc = sbuf_finish(sb); 6951 sbuf_delete(sb); 6952 6953 return (rc); 6954 } 6955 6956 static int 6957 sysctl_tids(SYSCTL_HANDLER_ARGS) 6958 { 6959 struct adapter *sc = arg1; 6960 struct sbuf *sb; 6961 int rc; 6962 struct tid_info *t = &sc->tids; 6963 6964 rc = sysctl_wire_old_buffer(req, 0); 6965 if (rc != 0) 6966 return (rc); 6967 6968 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6969 if (sb == NULL) 6970 return (ENOMEM); 6971 6972 if (t->natids) { 6973 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 6974 t->atids_in_use); 6975 } 6976 6977 if (t->ntids) { 6978 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6979 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 6980 6981 if (b) { 6982 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 6983 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6984 t->ntids - 1); 6985 } else { 6986 sbuf_printf(sb, "TID range: %u-%u", 6987 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6988 t->ntids - 1); 6989 } 6990 } else 6991 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 6992 sbuf_printf(sb, ", in use: %u\n", 6993 atomic_load_acq_int(&t->tids_in_use)); 6994 } 6995 6996 if (t->nstids) { 6997 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 6998 t->stid_base + t->nstids - 1, t->stids_in_use); 6999 } 7000 7001 if (t->nftids) { 7002 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 7003 t->ftid_base + t->nftids - 1); 7004 } 7005 7006 if (t->netids) { 7007 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 7008 t->etid_base + t->netids - 1); 7009 } 7010 7011 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 7012 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 7013 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 7014 7015 rc = sbuf_finish(sb); 7016 sbuf_delete(sb); 7017 7018 return (rc); 7019 } 7020 7021 static int 7022 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 7023 { 7024 struct adapter *sc = arg1; 7025 struct sbuf *sb; 7026 int rc; 7027 struct tp_err_stats stats; 7028 7029 rc = sysctl_wire_old_buffer(req, 0); 7030 if (rc != 0) 7031 return (rc); 7032 7033 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7034 if (sb == NULL) 7035 return (ENOMEM); 7036 7037 mtx_lock(&sc->reg_lock); 7038 t4_tp_get_err_stats(sc, &stats); 7039 mtx_unlock(&sc->reg_lock); 7040 7041 if (sc->chip_params->nchan > 2) { 7042 sbuf_printf(sb, " channel 0 channel 1" 7043 " channel 2 channel 3\n"); 7044 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 7045 stats.mac_in_errs[0], stats.mac_in_errs[1], 7046 stats.mac_in_errs[2], stats.mac_in_errs[3]); 7047 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 7048 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 7049 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 7050 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 7051 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 7052 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 7053 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 7054 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 7055 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 7056 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 7057 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 7058 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 7059 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 7060 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 7061 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 7062 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 7063 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 7064 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 7065 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 7066 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 7067 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 7068 } else { 7069 sbuf_printf(sb, " channel 0 channel 1\n"); 7070 sbuf_printf(sb, "macInErrs: %10u %10u\n", 7071 stats.mac_in_errs[0], stats.mac_in_errs[1]); 7072 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 7073 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 7074 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 7075 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 7076 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 7077 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 7078 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 7079 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 7080 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 7081 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 7082 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 7083 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 7084 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 7085 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 7086 } 7087 7088 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 7089 stats.ofld_no_neigh, stats.ofld_cong_defer); 7090 7091 rc = sbuf_finish(sb); 7092 sbuf_delete(sb); 7093 7094 return (rc); 7095 } 7096 7097 static int 7098 sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 7099 { 7100 struct adapter *sc = arg1; 7101 struct tp_params *tpp = &sc->params.tp; 7102 u_int mask; 7103 int rc; 7104 7105 mask = tpp->la_mask >> 16; 7106 rc = sysctl_handle_int(oidp, &mask, 0, req); 7107 if (rc != 0 || req->newptr == NULL) 7108 return (rc); 7109 if (mask > 0xffff) 7110 return (EINVAL); 7111 tpp->la_mask = mask << 16; 7112 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7113 7114 return (0); 7115 } 7116 7117 struct field_desc { 7118 const char *name; 7119 u_int start; 7120 u_int width; 7121 }; 7122 7123 static void 7124 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7125 { 7126 char buf[32]; 7127 int line_size = 0; 7128 7129 while (f->name) { 7130 uint64_t mask = (1ULL << f->width) - 1; 7131 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7132 ((uintmax_t)v >> f->start) & mask); 7133 7134 if (line_size + len >= 79) { 7135 line_size = 8; 7136 sbuf_printf(sb, "\n "); 7137 } 7138 sbuf_printf(sb, "%s ", buf); 7139 line_size += len + 1; 7140 f++; 7141 } 7142 sbuf_printf(sb, "\n"); 7143 } 7144 7145 static const struct field_desc tp_la0[] = { 7146 { "RcfOpCodeOut", 60, 4 }, 7147 { "State", 56, 4 }, 7148 { "WcfState", 52, 4 }, 7149 { "RcfOpcSrcOut", 50, 2 }, 7150 { "CRxError", 49, 1 }, 7151 { "ERxError", 48, 1 }, 7152 { "SanityFailed", 47, 1 }, 7153 { "SpuriousMsg", 46, 1 }, 7154 { "FlushInputMsg", 45, 1 }, 7155 { "FlushInputCpl", 44, 1 }, 7156 { "RssUpBit", 43, 1 }, 7157 { "RssFilterHit", 42, 1 }, 7158 { "Tid", 32, 10 }, 7159 { "InitTcb", 31, 1 }, 7160 { "LineNumber", 24, 7 }, 7161 { "Emsg", 23, 1 }, 7162 { "EdataOut", 22, 1 }, 7163 { "Cmsg", 21, 1 }, 7164 { "CdataOut", 20, 1 }, 7165 { "EreadPdu", 19, 1 }, 7166 { "CreadPdu", 18, 1 }, 7167 { "TunnelPkt", 17, 1 }, 7168 { "RcfPeerFin", 16, 1 }, 7169 { "RcfReasonOut", 12, 4 }, 7170 { "TxCchannel", 10, 2 }, 7171 { "RcfTxChannel", 8, 2 }, 7172 { "RxEchannel", 6, 2 }, 7173 { "RcfRxChannel", 5, 1 }, 7174 { "RcfDataOutSrdy", 4, 1 }, 7175 { "RxDvld", 3, 1 }, 7176 { "RxOoDvld", 2, 1 }, 7177 { "RxCongestion", 1, 1 }, 7178 { "TxCongestion", 0, 1 }, 7179 { NULL } 7180 }; 7181 7182 static const struct field_desc tp_la1[] = { 7183 { "CplCmdIn", 56, 8 }, 7184 { "CplCmdOut", 48, 8 }, 7185 { "ESynOut", 47, 1 }, 7186 { "EAckOut", 46, 1 }, 7187 { "EFinOut", 45, 1 }, 7188 { "ERstOut", 44, 1 }, 7189 { "SynIn", 43, 1 }, 7190 { "AckIn", 42, 1 }, 7191 { "FinIn", 41, 1 }, 7192 { "RstIn", 40, 1 }, 7193 { "DataIn", 39, 1 }, 7194 { "DataInVld", 38, 1 }, 7195 { "PadIn", 37, 1 }, 7196 { "RxBufEmpty", 36, 1 }, 7197 { "RxDdp", 35, 1 }, 7198 { "RxFbCongestion", 34, 1 }, 7199 { "TxFbCongestion", 33, 1 }, 7200 { "TxPktSumSrdy", 32, 1 }, 7201 { "RcfUlpType", 28, 4 }, 7202 { "Eread", 27, 1 }, 7203 { "Ebypass", 26, 1 }, 7204 { "Esave", 25, 1 }, 7205 { "Static0", 24, 1 }, 7206 { "Cread", 23, 1 }, 7207 { "Cbypass", 22, 1 }, 7208 { "Csave", 21, 1 }, 7209 { "CPktOut", 20, 1 }, 7210 { "RxPagePoolFull", 18, 2 }, 7211 { "RxLpbkPkt", 17, 1 }, 7212 { "TxLpbkPkt", 16, 1 }, 7213 { "RxVfValid", 15, 1 }, 7214 { "SynLearned", 14, 1 }, 7215 { "SetDelEntry", 13, 1 }, 7216 { "SetInvEntry", 12, 1 }, 7217 { "CpcmdDvld", 11, 1 }, 7218 { "CpcmdSave", 10, 1 }, 7219 { "RxPstructsFull", 8, 2 }, 7220 { "EpcmdDvld", 7, 1 }, 7221 { "EpcmdFlush", 6, 1 }, 7222 { "EpcmdTrimPrefix", 5, 1 }, 7223 { "EpcmdTrimPostfix", 4, 1 }, 7224 { "ERssIp4Pkt", 3, 1 }, 7225 { "ERssIp6Pkt", 2, 1 }, 7226 { "ERssTcpUdpPkt", 1, 1 }, 7227 { "ERssFceFipPkt", 0, 1 }, 7228 { NULL } 7229 }; 7230 7231 static const struct field_desc tp_la2[] = { 7232 { "CplCmdIn", 56, 8 }, 7233 { "MpsVfVld", 55, 1 }, 7234 { "MpsPf", 52, 3 }, 7235 { "MpsVf", 44, 8 }, 7236 { "SynIn", 43, 1 }, 7237 { "AckIn", 42, 1 }, 7238 { "FinIn", 41, 1 }, 7239 { "RstIn", 40, 1 }, 7240 { "DataIn", 39, 1 }, 7241 { "DataInVld", 38, 1 }, 7242 { "PadIn", 37, 1 }, 7243 { "RxBufEmpty", 36, 1 }, 7244 { "RxDdp", 35, 1 }, 7245 { "RxFbCongestion", 34, 1 }, 7246 { "TxFbCongestion", 33, 1 }, 7247 { "TxPktSumSrdy", 32, 1 }, 7248 { "RcfUlpType", 28, 4 }, 7249 { "Eread", 27, 1 }, 7250 { "Ebypass", 26, 1 }, 7251 { "Esave", 25, 1 }, 7252 { "Static0", 24, 1 }, 7253 { "Cread", 23, 1 }, 7254 { "Cbypass", 22, 1 }, 7255 { "Csave", 21, 1 }, 7256 { "CPktOut", 20, 1 }, 7257 { "RxPagePoolFull", 18, 2 }, 7258 { "RxLpbkPkt", 17, 1 }, 7259 { "TxLpbkPkt", 16, 1 }, 7260 { "RxVfValid", 15, 1 }, 7261 { "SynLearned", 14, 1 }, 7262 { "SetDelEntry", 13, 1 }, 7263 { "SetInvEntry", 12, 1 }, 7264 { "CpcmdDvld", 11, 1 }, 7265 { "CpcmdSave", 10, 1 }, 7266 { "RxPstructsFull", 8, 2 }, 7267 { "EpcmdDvld", 7, 1 }, 7268 { "EpcmdFlush", 6, 1 }, 7269 { "EpcmdTrimPrefix", 5, 1 }, 7270 { "EpcmdTrimPostfix", 4, 1 }, 7271 { "ERssIp4Pkt", 3, 1 }, 7272 { "ERssIp6Pkt", 2, 1 }, 7273 { "ERssTcpUdpPkt", 1, 1 }, 7274 { "ERssFceFipPkt", 0, 1 }, 7275 { NULL } 7276 }; 7277 7278 static void 7279 tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7280 { 7281 7282 field_desc_show(sb, *p, tp_la0); 7283 } 7284 7285 static void 7286 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7287 { 7288 7289 if (idx) 7290 sbuf_printf(sb, "\n"); 7291 field_desc_show(sb, p[0], tp_la0); 7292 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7293 field_desc_show(sb, p[1], tp_la0); 7294 } 7295 7296 static void 7297 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7298 { 7299 7300 if (idx) 7301 sbuf_printf(sb, "\n"); 7302 field_desc_show(sb, p[0], tp_la0); 7303 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7304 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7305 } 7306 7307 static int 7308 sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7309 { 7310 struct adapter *sc = arg1; 7311 struct sbuf *sb; 7312 uint64_t *buf, *p; 7313 int rc; 7314 u_int i, inc; 7315 void (*show_func)(struct sbuf *, uint64_t *, int); 7316 7317 rc = sysctl_wire_old_buffer(req, 0); 7318 if (rc != 0) 7319 return (rc); 7320 7321 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7322 if (sb == NULL) 7323 return (ENOMEM); 7324 7325 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7326 7327 t4_tp_read_la(sc, buf, NULL); 7328 p = buf; 7329 7330 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7331 case 2: 7332 inc = 2; 7333 show_func = tp_la_show2; 7334 break; 7335 case 3: 7336 inc = 2; 7337 show_func = tp_la_show3; 7338 break; 7339 default: 7340 inc = 1; 7341 show_func = tp_la_show; 7342 } 7343 7344 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7345 (*show_func)(sb, p, i); 7346 7347 rc = sbuf_finish(sb); 7348 sbuf_delete(sb); 7349 free(buf, M_CXGBE); 7350 return (rc); 7351 } 7352 7353 static int 7354 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7355 { 7356 struct adapter *sc = arg1; 7357 struct sbuf *sb; 7358 int rc; 7359 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7360 7361 rc = sysctl_wire_old_buffer(req, 0); 7362 if (rc != 0) 7363 return (rc); 7364 7365 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7366 if (sb == NULL) 7367 return (ENOMEM); 7368 7369 t4_get_chan_txrate(sc, nrate, orate); 7370 7371 if (sc->chip_params->nchan > 2) { 7372 sbuf_printf(sb, " channel 0 channel 1" 7373 " channel 2 channel 3\n"); 7374 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7375 nrate[0], nrate[1], nrate[2], nrate[3]); 7376 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7377 orate[0], orate[1], orate[2], orate[3]); 7378 } else { 7379 sbuf_printf(sb, " channel 0 channel 1\n"); 7380 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7381 nrate[0], nrate[1]); 7382 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7383 orate[0], orate[1]); 7384 } 7385 7386 rc = sbuf_finish(sb); 7387 sbuf_delete(sb); 7388 7389 return (rc); 7390 } 7391 7392 static int 7393 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7394 { 7395 struct adapter *sc = arg1; 7396 struct sbuf *sb; 7397 uint32_t *buf, *p; 7398 int rc, i; 7399 7400 rc = sysctl_wire_old_buffer(req, 0); 7401 if (rc != 0) 7402 return (rc); 7403 7404 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7405 if (sb == NULL) 7406 return (ENOMEM); 7407 7408 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 7409 M_ZERO | M_WAITOK); 7410 7411 t4_ulprx_read_la(sc, buf); 7412 p = buf; 7413 7414 sbuf_printf(sb, " Pcmd Type Message" 7415 " Data"); 7416 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 7417 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 7418 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 7419 } 7420 7421 rc = sbuf_finish(sb); 7422 sbuf_delete(sb); 7423 free(buf, M_CXGBE); 7424 return (rc); 7425 } 7426 7427 static int 7428 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 7429 { 7430 struct adapter *sc = arg1; 7431 struct sbuf *sb; 7432 int rc, v; 7433 7434 rc = sysctl_wire_old_buffer(req, 0); 7435 if (rc != 0) 7436 return (rc); 7437 7438 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7439 if (sb == NULL) 7440 return (ENOMEM); 7441 7442 v = t4_read_reg(sc, A_SGE_STAT_CFG); 7443 if (G_STATSOURCE_T5(v) == 7) { 7444 if (G_STATMODE(v) == 0) { 7445 sbuf_printf(sb, "total %d, incomplete %d", 7446 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7447 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7448 } else if (G_STATMODE(v) == 1) { 7449 sbuf_printf(sb, "total %d, data overflow %d", 7450 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7451 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7452 } 7453 } 7454 rc = sbuf_finish(sb); 7455 sbuf_delete(sb); 7456 7457 return (rc); 7458 } 7459 #endif 7460 7461 #ifdef TCP_OFFLOAD 7462 static void 7463 unit_conv(char *buf, size_t len, u_int val, u_int factor) 7464 { 7465 u_int rem = val % factor; 7466 7467 if (rem == 0) 7468 snprintf(buf, len, "%u", val / factor); 7469 else { 7470 while (rem % 10 == 0) 7471 rem /= 10; 7472 snprintf(buf, len, "%u.%u", val / factor, rem); 7473 } 7474 } 7475 7476 static int 7477 sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 7478 { 7479 struct adapter *sc = arg1; 7480 char buf[16]; 7481 u_int res, re; 7482 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7483 7484 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7485 switch (arg2) { 7486 case 0: 7487 /* timer_tick */ 7488 re = G_TIMERRESOLUTION(res); 7489 break; 7490 case 1: 7491 /* TCP timestamp tick */ 7492 re = G_TIMESTAMPRESOLUTION(res); 7493 break; 7494 case 2: 7495 /* DACK tick */ 7496 re = G_DELAYEDACKRESOLUTION(res); 7497 break; 7498 default: 7499 return (EDOOFUS); 7500 } 7501 7502 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 7503 7504 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 7505 } 7506 7507 static int 7508 sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 7509 { 7510 struct adapter *sc = arg1; 7511 u_int res, dack_re, v; 7512 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7513 7514 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7515 dack_re = G_DELAYEDACKRESOLUTION(res); 7516 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 7517 7518 return (sysctl_handle_int(oidp, &v, 0, req)); 7519 } 7520 7521 static int 7522 sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 7523 { 7524 struct adapter *sc = arg1; 7525 int reg = arg2; 7526 u_int tre; 7527 u_long tp_tick_us, v; 7528 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7529 7530 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 7531 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 7532 reg == A_TP_KEEP_IDLE || A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || 7533 reg == A_TP_FINWAIT2_TIMER); 7534 7535 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 7536 tp_tick_us = (cclk_ps << tre) / 1000000; 7537 7538 if (reg == A_TP_INIT_SRTT) 7539 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 7540 else 7541 v = tp_tick_us * t4_read_reg(sc, reg); 7542 7543 return (sysctl_handle_long(oidp, &v, 0, req)); 7544 } 7545 #endif 7546 7547 static uint32_t 7548 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 7549 { 7550 uint32_t mode; 7551 7552 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 7553 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 7554 7555 if (fconf & F_FRAGMENTATION) 7556 mode |= T4_FILTER_IP_FRAGMENT; 7557 7558 if (fconf & F_MPSHITTYPE) 7559 mode |= T4_FILTER_MPS_HIT_TYPE; 7560 7561 if (fconf & F_MACMATCH) 7562 mode |= T4_FILTER_MAC_IDX; 7563 7564 if (fconf & F_ETHERTYPE) 7565 mode |= T4_FILTER_ETH_TYPE; 7566 7567 if (fconf & F_PROTOCOL) 7568 mode |= T4_FILTER_IP_PROTO; 7569 7570 if (fconf & F_TOS) 7571 mode |= T4_FILTER_IP_TOS; 7572 7573 if (fconf & F_VLAN) 7574 mode |= T4_FILTER_VLAN; 7575 7576 if (fconf & F_VNIC_ID) { 7577 mode |= T4_FILTER_VNIC; 7578 if (iconf & F_VNIC) 7579 mode |= T4_FILTER_IC_VNIC; 7580 } 7581 7582 if (fconf & F_PORT) 7583 mode |= T4_FILTER_PORT; 7584 7585 if (fconf & F_FCOE) 7586 mode |= T4_FILTER_FCoE; 7587 7588 return (mode); 7589 } 7590 7591 static uint32_t 7592 mode_to_fconf(uint32_t mode) 7593 { 7594 uint32_t fconf = 0; 7595 7596 if (mode & T4_FILTER_IP_FRAGMENT) 7597 fconf |= F_FRAGMENTATION; 7598 7599 if (mode & T4_FILTER_MPS_HIT_TYPE) 7600 fconf |= F_MPSHITTYPE; 7601 7602 if (mode & T4_FILTER_MAC_IDX) 7603 fconf |= F_MACMATCH; 7604 7605 if (mode & T4_FILTER_ETH_TYPE) 7606 fconf |= F_ETHERTYPE; 7607 7608 if (mode & T4_FILTER_IP_PROTO) 7609 fconf |= F_PROTOCOL; 7610 7611 if (mode & T4_FILTER_IP_TOS) 7612 fconf |= F_TOS; 7613 7614 if (mode & T4_FILTER_VLAN) 7615 fconf |= F_VLAN; 7616 7617 if (mode & T4_FILTER_VNIC) 7618 fconf |= F_VNIC_ID; 7619 7620 if (mode & T4_FILTER_PORT) 7621 fconf |= F_PORT; 7622 7623 if (mode & T4_FILTER_FCoE) 7624 fconf |= F_FCOE; 7625 7626 return (fconf); 7627 } 7628 7629 static uint32_t 7630 mode_to_iconf(uint32_t mode) 7631 { 7632 7633 if (mode & T4_FILTER_IC_VNIC) 7634 return (F_VNIC); 7635 return (0); 7636 } 7637 7638 static int check_fspec_against_fconf_iconf(struct adapter *sc, 7639 struct t4_filter_specification *fs) 7640 { 7641 struct tp_params *tpp = &sc->params.tp; 7642 uint32_t fconf = 0; 7643 7644 if (fs->val.frag || fs->mask.frag) 7645 fconf |= F_FRAGMENTATION; 7646 7647 if (fs->val.matchtype || fs->mask.matchtype) 7648 fconf |= F_MPSHITTYPE; 7649 7650 if (fs->val.macidx || fs->mask.macidx) 7651 fconf |= F_MACMATCH; 7652 7653 if (fs->val.ethtype || fs->mask.ethtype) 7654 fconf |= F_ETHERTYPE; 7655 7656 if (fs->val.proto || fs->mask.proto) 7657 fconf |= F_PROTOCOL; 7658 7659 if (fs->val.tos || fs->mask.tos) 7660 fconf |= F_TOS; 7661 7662 if (fs->val.vlan_vld || fs->mask.vlan_vld) 7663 fconf |= F_VLAN; 7664 7665 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 7666 fconf |= F_VNIC_ID; 7667 if (tpp->ingress_config & F_VNIC) 7668 return (EINVAL); 7669 } 7670 7671 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 7672 fconf |= F_VNIC_ID; 7673 if ((tpp->ingress_config & F_VNIC) == 0) 7674 return (EINVAL); 7675 } 7676 7677 if (fs->val.iport || fs->mask.iport) 7678 fconf |= F_PORT; 7679 7680 if (fs->val.fcoe || fs->mask.fcoe) 7681 fconf |= F_FCOE; 7682 7683 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 7684 return (E2BIG); 7685 7686 return (0); 7687 } 7688 7689 static int 7690 get_filter_mode(struct adapter *sc, uint32_t *mode) 7691 { 7692 struct tp_params *tpp = &sc->params.tp; 7693 7694 /* 7695 * We trust the cached values of the relevant TP registers. This means 7696 * things work reliably only if writes to those registers are always via 7697 * t4_set_filter_mode. 7698 */ 7699 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 7700 7701 return (0); 7702 } 7703 7704 static int 7705 set_filter_mode(struct adapter *sc, uint32_t mode) 7706 { 7707 struct tp_params *tpp = &sc->params.tp; 7708 uint32_t fconf, iconf; 7709 int rc; 7710 7711 iconf = mode_to_iconf(mode); 7712 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 7713 /* 7714 * For now we just complain if A_TP_INGRESS_CONFIG is not 7715 * already set to the correct value for the requested filter 7716 * mode. It's not clear if it's safe to write to this register 7717 * on the fly. (And we trust the cached value of the register). 7718 */ 7719 return (EBUSY); 7720 } 7721 7722 fconf = mode_to_fconf(mode); 7723 7724 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7725 "t4setfm"); 7726 if (rc) 7727 return (rc); 7728 7729 if (sc->tids.ftids_in_use > 0) { 7730 rc = EBUSY; 7731 goto done; 7732 } 7733 7734 #ifdef TCP_OFFLOAD 7735 if (uld_active(sc, ULD_TOM)) { 7736 rc = EBUSY; 7737 goto done; 7738 } 7739 #endif 7740 7741 rc = -t4_set_filter_mode(sc, fconf); 7742 done: 7743 end_synchronized_op(sc, LOCK_HELD); 7744 return (rc); 7745 } 7746 7747 static inline uint64_t 7748 get_filter_hits(struct adapter *sc, uint32_t fid) 7749 { 7750 uint32_t tcb_addr; 7751 7752 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 7753 (fid + sc->tids.ftid_base) * TCB_SIZE; 7754 7755 if (is_t4(sc)) { 7756 uint64_t hits; 7757 7758 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 7759 return (be64toh(hits)); 7760 } else { 7761 uint32_t hits; 7762 7763 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 7764 return (be32toh(hits)); 7765 } 7766 } 7767 7768 static int 7769 get_filter(struct adapter *sc, struct t4_filter *t) 7770 { 7771 int i, rc, nfilters = sc->tids.nftids; 7772 struct filter_entry *f; 7773 7774 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7775 "t4getf"); 7776 if (rc) 7777 return (rc); 7778 7779 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 7780 t->idx >= nfilters) { 7781 t->idx = 0xffffffff; 7782 goto done; 7783 } 7784 7785 f = &sc->tids.ftid_tab[t->idx]; 7786 for (i = t->idx; i < nfilters; i++, f++) { 7787 if (f->valid) { 7788 t->idx = i; 7789 t->l2tidx = f->l2t ? f->l2t->idx : 0; 7790 t->smtidx = f->smtidx; 7791 if (f->fs.hitcnts) 7792 t->hits = get_filter_hits(sc, t->idx); 7793 else 7794 t->hits = UINT64_MAX; 7795 t->fs = f->fs; 7796 7797 goto done; 7798 } 7799 } 7800 7801 t->idx = 0xffffffff; 7802 done: 7803 end_synchronized_op(sc, LOCK_HELD); 7804 return (0); 7805 } 7806 7807 static int 7808 set_filter(struct adapter *sc, struct t4_filter *t) 7809 { 7810 unsigned int nfilters, nports; 7811 struct filter_entry *f; 7812 int i, rc; 7813 7814 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 7815 if (rc) 7816 return (rc); 7817 7818 nfilters = sc->tids.nftids; 7819 nports = sc->params.nports; 7820 7821 if (nfilters == 0) { 7822 rc = ENOTSUP; 7823 goto done; 7824 } 7825 7826 if (!(sc->flags & FULL_INIT_DONE)) { 7827 rc = EAGAIN; 7828 goto done; 7829 } 7830 7831 if (t->idx >= nfilters) { 7832 rc = EINVAL; 7833 goto done; 7834 } 7835 7836 /* Validate against the global filter mode and ingress config */ 7837 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 7838 if (rc != 0) 7839 goto done; 7840 7841 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 7842 rc = EINVAL; 7843 goto done; 7844 } 7845 7846 if (t->fs.val.iport >= nports) { 7847 rc = EINVAL; 7848 goto done; 7849 } 7850 7851 /* Can't specify an iq if not steering to it */ 7852 if (!t->fs.dirsteer && t->fs.iq) { 7853 rc = EINVAL; 7854 goto done; 7855 } 7856 7857 /* IPv6 filter idx must be 4 aligned */ 7858 if (t->fs.type == 1 && 7859 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 7860 rc = EINVAL; 7861 goto done; 7862 } 7863 7864 if (sc->tids.ftid_tab == NULL) { 7865 KASSERT(sc->tids.ftids_in_use == 0, 7866 ("%s: no memory allocated but filters_in_use > 0", 7867 __func__)); 7868 7869 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 7870 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 7871 if (sc->tids.ftid_tab == NULL) { 7872 rc = ENOMEM; 7873 goto done; 7874 } 7875 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 7876 } 7877 7878 for (i = 0; i < 4; i++) { 7879 f = &sc->tids.ftid_tab[t->idx + i]; 7880 7881 if (f->pending || f->valid) { 7882 rc = EBUSY; 7883 goto done; 7884 } 7885 if (f->locked) { 7886 rc = EPERM; 7887 goto done; 7888 } 7889 7890 if (t->fs.type == 0) 7891 break; 7892 } 7893 7894 f = &sc->tids.ftid_tab[t->idx]; 7895 f->fs = t->fs; 7896 7897 rc = set_filter_wr(sc, t->idx); 7898 done: 7899 end_synchronized_op(sc, 0); 7900 7901 if (rc == 0) { 7902 mtx_lock(&sc->tids.ftid_lock); 7903 for (;;) { 7904 if (f->pending == 0) { 7905 rc = f->valid ? 0 : EIO; 7906 break; 7907 } 7908 7909 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7910 PCATCH, "t4setfw", 0)) { 7911 rc = EINPROGRESS; 7912 break; 7913 } 7914 } 7915 mtx_unlock(&sc->tids.ftid_lock); 7916 } 7917 return (rc); 7918 } 7919 7920 static int 7921 del_filter(struct adapter *sc, struct t4_filter *t) 7922 { 7923 unsigned int nfilters; 7924 struct filter_entry *f; 7925 int rc; 7926 7927 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 7928 if (rc) 7929 return (rc); 7930 7931 nfilters = sc->tids.nftids; 7932 7933 if (nfilters == 0) { 7934 rc = ENOTSUP; 7935 goto done; 7936 } 7937 7938 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 7939 t->idx >= nfilters) { 7940 rc = EINVAL; 7941 goto done; 7942 } 7943 7944 if (!(sc->flags & FULL_INIT_DONE)) { 7945 rc = EAGAIN; 7946 goto done; 7947 } 7948 7949 f = &sc->tids.ftid_tab[t->idx]; 7950 7951 if (f->pending) { 7952 rc = EBUSY; 7953 goto done; 7954 } 7955 if (f->locked) { 7956 rc = EPERM; 7957 goto done; 7958 } 7959 7960 if (f->valid) { 7961 t->fs = f->fs; /* extra info for the caller */ 7962 rc = del_filter_wr(sc, t->idx); 7963 } 7964 7965 done: 7966 end_synchronized_op(sc, 0); 7967 7968 if (rc == 0) { 7969 mtx_lock(&sc->tids.ftid_lock); 7970 for (;;) { 7971 if (f->pending == 0) { 7972 rc = f->valid ? EIO : 0; 7973 break; 7974 } 7975 7976 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7977 PCATCH, "t4delfw", 0)) { 7978 rc = EINPROGRESS; 7979 break; 7980 } 7981 } 7982 mtx_unlock(&sc->tids.ftid_lock); 7983 } 7984 7985 return (rc); 7986 } 7987 7988 static void 7989 clear_filter(struct filter_entry *f) 7990 { 7991 if (f->l2t) 7992 t4_l2t_release(f->l2t); 7993 7994 bzero(f, sizeof (*f)); 7995 } 7996 7997 static int 7998 set_filter_wr(struct adapter *sc, int fidx) 7999 { 8000 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8001 struct fw_filter_wr *fwr; 8002 unsigned int ftid, vnic_vld, vnic_vld_mask; 8003 struct wrq_cookie cookie; 8004 8005 ASSERT_SYNCHRONIZED_OP(sc); 8006 8007 if (f->fs.newdmac || f->fs.newvlan) { 8008 /* This filter needs an L2T entry; allocate one. */ 8009 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8010 if (f->l2t == NULL) 8011 return (EAGAIN); 8012 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8013 f->fs.dmac)) { 8014 t4_l2t_release(f->l2t); 8015 f->l2t = NULL; 8016 return (ENOMEM); 8017 } 8018 } 8019 8020 /* Already validated against fconf, iconf */ 8021 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 8022 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 8023 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 8024 vnic_vld = 1; 8025 else 8026 vnic_vld = 0; 8027 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 8028 vnic_vld_mask = 1; 8029 else 8030 vnic_vld_mask = 0; 8031 8032 ftid = sc->tids.ftid_base + fidx; 8033 8034 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8035 if (fwr == NULL) 8036 return (ENOMEM); 8037 bzero(fwr, sizeof(*fwr)); 8038 8039 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 8040 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 8041 fwr->tid_to_iq = 8042 htobe32(V_FW_FILTER_WR_TID(ftid) | 8043 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 8044 V_FW_FILTER_WR_NOREPLY(0) | 8045 V_FW_FILTER_WR_IQ(f->fs.iq)); 8046 fwr->del_filter_to_l2tix = 8047 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 8048 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 8049 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 8050 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 8051 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 8052 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 8053 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 8054 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 8055 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 8056 f->fs.newvlan == VLAN_REWRITE) | 8057 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 8058 f->fs.newvlan == VLAN_REWRITE) | 8059 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 8060 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 8061 V_FW_FILTER_WR_PRIO(f->fs.prio) | 8062 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 8063 fwr->ethtype = htobe16(f->fs.val.ethtype); 8064 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 8065 fwr->frag_to_ovlan_vldm = 8066 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 8067 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 8068 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 8069 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 8070 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 8071 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 8072 fwr->smac_sel = 0; 8073 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 8074 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 8075 fwr->maci_to_matchtypem = 8076 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 8077 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 8078 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 8079 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 8080 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 8081 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 8082 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 8083 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 8084 fwr->ptcl = f->fs.val.proto; 8085 fwr->ptclm = f->fs.mask.proto; 8086 fwr->ttyp = f->fs.val.tos; 8087 fwr->ttypm = f->fs.mask.tos; 8088 fwr->ivlan = htobe16(f->fs.val.vlan); 8089 fwr->ivlanm = htobe16(f->fs.mask.vlan); 8090 fwr->ovlan = htobe16(f->fs.val.vnic); 8091 fwr->ovlanm = htobe16(f->fs.mask.vnic); 8092 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 8093 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 8094 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 8095 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 8096 fwr->lp = htobe16(f->fs.val.dport); 8097 fwr->lpm = htobe16(f->fs.mask.dport); 8098 fwr->fp = htobe16(f->fs.val.sport); 8099 fwr->fpm = htobe16(f->fs.mask.sport); 8100 if (f->fs.newsmac) 8101 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 8102 8103 f->pending = 1; 8104 sc->tids.ftids_in_use++; 8105 8106 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8107 return (0); 8108 } 8109 8110 static int 8111 del_filter_wr(struct adapter *sc, int fidx) 8112 { 8113 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8114 struct fw_filter_wr *fwr; 8115 unsigned int ftid; 8116 struct wrq_cookie cookie; 8117 8118 ftid = sc->tids.ftid_base + fidx; 8119 8120 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8121 if (fwr == NULL) 8122 return (ENOMEM); 8123 bzero(fwr, sizeof (*fwr)); 8124 8125 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 8126 8127 f->pending = 1; 8128 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8129 return (0); 8130 } 8131 8132 int 8133 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8134 { 8135 struct adapter *sc = iq->adapter; 8136 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 8137 unsigned int idx = GET_TID(rpl); 8138 unsigned int rc; 8139 struct filter_entry *f; 8140 8141 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 8142 rss->opcode)); 8143 8144 if (is_ftid(sc, idx)) { 8145 8146 idx -= sc->tids.ftid_base; 8147 f = &sc->tids.ftid_tab[idx]; 8148 rc = G_COOKIE(rpl->cookie); 8149 8150 mtx_lock(&sc->tids.ftid_lock); 8151 if (rc == FW_FILTER_WR_FLT_ADDED) { 8152 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 8153 __func__, idx)); 8154 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 8155 f->pending = 0; /* asynchronous setup completed */ 8156 f->valid = 1; 8157 } else { 8158 if (rc != FW_FILTER_WR_FLT_DELETED) { 8159 /* Add or delete failed, display an error */ 8160 log(LOG_ERR, 8161 "filter %u setup failed with error %u\n", 8162 idx, rc); 8163 } 8164 8165 clear_filter(f); 8166 sc->tids.ftids_in_use--; 8167 } 8168 wakeup(&sc->tids.ftid_tab); 8169 mtx_unlock(&sc->tids.ftid_lock); 8170 } 8171 8172 return (0); 8173 } 8174 8175 static int 8176 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 8177 { 8178 int rc; 8179 8180 if (cntxt->cid > M_CTXTQID) 8181 return (EINVAL); 8182 8183 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 8184 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 8185 return (EINVAL); 8186 8187 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 8188 if (rc) 8189 return (rc); 8190 8191 if (sc->flags & FW_OK) { 8192 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 8193 &cntxt->data[0]); 8194 if (rc == 0) 8195 goto done; 8196 } 8197 8198 /* 8199 * Read via firmware failed or wasn't even attempted. Read directly via 8200 * the backdoor. 8201 */ 8202 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 8203 done: 8204 end_synchronized_op(sc, 0); 8205 return (rc); 8206 } 8207 8208 static int 8209 load_fw(struct adapter *sc, struct t4_data *fw) 8210 { 8211 int rc; 8212 uint8_t *fw_data; 8213 8214 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 8215 if (rc) 8216 return (rc); 8217 8218 if (sc->flags & FULL_INIT_DONE) { 8219 rc = EBUSY; 8220 goto done; 8221 } 8222 8223 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 8224 if (fw_data == NULL) { 8225 rc = ENOMEM; 8226 goto done; 8227 } 8228 8229 rc = copyin(fw->data, fw_data, fw->len); 8230 if (rc == 0) 8231 rc = -t4_load_fw(sc, fw_data, fw->len); 8232 8233 free(fw_data, M_CXGBE); 8234 done: 8235 end_synchronized_op(sc, 0); 8236 return (rc); 8237 } 8238 8239 #define MAX_READ_BUF_SIZE (128 * 1024) 8240 static int 8241 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 8242 { 8243 uint32_t addr, remaining, n; 8244 uint32_t *buf; 8245 int rc; 8246 uint8_t *dst; 8247 8248 rc = validate_mem_range(sc, mr->addr, mr->len); 8249 if (rc != 0) 8250 return (rc); 8251 8252 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 8253 addr = mr->addr; 8254 remaining = mr->len; 8255 dst = (void *)mr->data; 8256 8257 while (remaining) { 8258 n = min(remaining, MAX_READ_BUF_SIZE); 8259 read_via_memwin(sc, 2, addr, buf, n); 8260 8261 rc = copyout(buf, dst, n); 8262 if (rc != 0) 8263 break; 8264 8265 dst += n; 8266 remaining -= n; 8267 addr += n; 8268 } 8269 8270 free(buf, M_CXGBE); 8271 return (rc); 8272 } 8273 #undef MAX_READ_BUF_SIZE 8274 8275 static int 8276 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 8277 { 8278 int rc; 8279 8280 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 8281 return (EINVAL); 8282 8283 if (i2cd->len > sizeof(i2cd->data)) 8284 return (EFBIG); 8285 8286 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 8287 if (rc) 8288 return (rc); 8289 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 8290 i2cd->offset, i2cd->len, &i2cd->data[0]); 8291 end_synchronized_op(sc, 0); 8292 8293 return (rc); 8294 } 8295 8296 static int 8297 in_range(int val, int lo, int hi) 8298 { 8299 8300 return (val < 0 || (val <= hi && val >= lo)); 8301 } 8302 8303 static int 8304 set_sched_class(struct adapter *sc, struct t4_sched_params *p) 8305 { 8306 int fw_subcmd, fw_type, rc; 8307 8308 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc"); 8309 if (rc) 8310 return (rc); 8311 8312 if (!(sc->flags & FULL_INIT_DONE)) { 8313 rc = EAGAIN; 8314 goto done; 8315 } 8316 8317 /* 8318 * Translate the cxgbetool parameters into T4 firmware parameters. (The 8319 * sub-command and type are in common locations.) 8320 */ 8321 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 8322 fw_subcmd = FW_SCHED_SC_CONFIG; 8323 else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 8324 fw_subcmd = FW_SCHED_SC_PARAMS; 8325 else { 8326 rc = EINVAL; 8327 goto done; 8328 } 8329 if (p->type == SCHED_CLASS_TYPE_PACKET) 8330 fw_type = FW_SCHED_TYPE_PKTSCHED; 8331 else { 8332 rc = EINVAL; 8333 goto done; 8334 } 8335 8336 if (fw_subcmd == FW_SCHED_SC_CONFIG) { 8337 /* Vet our parameters ..*/ 8338 if (p->u.config.minmax < 0) { 8339 rc = EINVAL; 8340 goto done; 8341 } 8342 8343 /* And pass the request to the firmware ...*/ 8344 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1); 8345 goto done; 8346 } 8347 8348 if (fw_subcmd == FW_SCHED_SC_PARAMS) { 8349 int fw_level; 8350 int fw_mode; 8351 int fw_rateunit; 8352 int fw_ratemode; 8353 8354 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL) 8355 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 8356 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) 8357 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 8358 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) 8359 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 8360 else { 8361 rc = EINVAL; 8362 goto done; 8363 } 8364 8365 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS) 8366 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 8367 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) 8368 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 8369 else { 8370 rc = EINVAL; 8371 goto done; 8372 } 8373 8374 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS) 8375 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 8376 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS) 8377 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 8378 else { 8379 rc = EINVAL; 8380 goto done; 8381 } 8382 8383 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL) 8384 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 8385 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS) 8386 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 8387 else { 8388 rc = EINVAL; 8389 goto done; 8390 } 8391 8392 /* Vet our parameters ... */ 8393 if (!in_range(p->u.params.channel, 0, 3) || 8394 !in_range(p->u.params.cl, 0, sc->chip_params->nsched_cls) || 8395 !in_range(p->u.params.minrate, 0, 10000000) || 8396 !in_range(p->u.params.maxrate, 0, 10000000) || 8397 !in_range(p->u.params.weight, 0, 100)) { 8398 rc = ERANGE; 8399 goto done; 8400 } 8401 8402 /* 8403 * Translate any unset parameters into the firmware's 8404 * nomenclature and/or fail the call if the parameters 8405 * are required ... 8406 */ 8407 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 || 8408 p->u.params.channel < 0 || p->u.params.cl < 0) { 8409 rc = EINVAL; 8410 goto done; 8411 } 8412 if (p->u.params.minrate < 0) 8413 p->u.params.minrate = 0; 8414 if (p->u.params.maxrate < 0) { 8415 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 8416 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 8417 rc = EINVAL; 8418 goto done; 8419 } else 8420 p->u.params.maxrate = 0; 8421 } 8422 if (p->u.params.weight < 0) { 8423 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) { 8424 rc = EINVAL; 8425 goto done; 8426 } else 8427 p->u.params.weight = 0; 8428 } 8429 if (p->u.params.pktsize < 0) { 8430 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 8431 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 8432 rc = EINVAL; 8433 goto done; 8434 } else 8435 p->u.params.pktsize = 0; 8436 } 8437 8438 /* See what the firmware thinks of the request ... */ 8439 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode, 8440 fw_rateunit, fw_ratemode, p->u.params.channel, 8441 p->u.params.cl, p->u.params.minrate, p->u.params.maxrate, 8442 p->u.params.weight, p->u.params.pktsize, 1); 8443 goto done; 8444 } 8445 8446 rc = EINVAL; 8447 done: 8448 end_synchronized_op(sc, 0); 8449 return (rc); 8450 } 8451 8452 static int 8453 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 8454 { 8455 struct port_info *pi = NULL; 8456 struct vi_info *vi; 8457 struct sge_txq *txq; 8458 uint32_t fw_mnem, fw_queue, fw_class; 8459 int i, rc; 8460 8461 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 8462 if (rc) 8463 return (rc); 8464 8465 if (!(sc->flags & FULL_INIT_DONE)) { 8466 rc = EAGAIN; 8467 goto done; 8468 } 8469 8470 if (p->port >= sc->params.nports) { 8471 rc = EINVAL; 8472 goto done; 8473 } 8474 8475 /* XXX: Only supported for the main VI. */ 8476 pi = sc->port[p->port]; 8477 vi = &pi->vi[0]; 8478 if (!in_range(p->queue, 0, vi->ntxq - 1) || !in_range(p->cl, 0, 7)) { 8479 rc = EINVAL; 8480 goto done; 8481 } 8482 8483 /* 8484 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 8485 * Scheduling Class in this case). 8486 */ 8487 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 8488 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 8489 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 8490 8491 /* 8492 * If op.queue is non-negative, then we're only changing the scheduling 8493 * on a single specified TX queue. 8494 */ 8495 if (p->queue >= 0) { 8496 txq = &sc->sge.txq[vi->first_txq + p->queue]; 8497 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8498 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8499 &fw_class); 8500 goto done; 8501 } 8502 8503 /* 8504 * Change the scheduling on all the TX queues for the 8505 * interface. 8506 */ 8507 for_each_txq(vi, i, txq) { 8508 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8509 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8510 &fw_class); 8511 if (rc) 8512 goto done; 8513 } 8514 8515 rc = 0; 8516 done: 8517 end_synchronized_op(sc, 0); 8518 return (rc); 8519 } 8520 8521 int 8522 t4_os_find_pci_capability(struct adapter *sc, int cap) 8523 { 8524 int i; 8525 8526 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 8527 } 8528 8529 int 8530 t4_os_pci_save_state(struct adapter *sc) 8531 { 8532 device_t dev; 8533 struct pci_devinfo *dinfo; 8534 8535 dev = sc->dev; 8536 dinfo = device_get_ivars(dev); 8537 8538 pci_cfg_save(dev, dinfo, 0); 8539 return (0); 8540 } 8541 8542 int 8543 t4_os_pci_restore_state(struct adapter *sc) 8544 { 8545 device_t dev; 8546 struct pci_devinfo *dinfo; 8547 8548 dev = sc->dev; 8549 dinfo = device_get_ivars(dev); 8550 8551 pci_cfg_restore(dev, dinfo); 8552 return (0); 8553 } 8554 8555 void 8556 t4_os_portmod_changed(const struct adapter *sc, int idx) 8557 { 8558 struct port_info *pi = sc->port[idx]; 8559 struct vi_info *vi; 8560 struct ifnet *ifp; 8561 int v; 8562 static const char *mod_str[] = { 8563 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 8564 }; 8565 8566 for_each_vi(pi, v, vi) { 8567 build_medialist(pi, &vi->media); 8568 } 8569 8570 ifp = pi->vi[0].ifp; 8571 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 8572 if_printf(ifp, "transceiver unplugged.\n"); 8573 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 8574 if_printf(ifp, "unknown transceiver inserted.\n"); 8575 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 8576 if_printf(ifp, "unsupported transceiver inserted.\n"); 8577 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 8578 if_printf(ifp, "%s transceiver inserted.\n", 8579 mod_str[pi->mod_type]); 8580 } else { 8581 if_printf(ifp, "transceiver (type %d) inserted.\n", 8582 pi->mod_type); 8583 } 8584 } 8585 8586 void 8587 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason) 8588 { 8589 struct port_info *pi = sc->port[idx]; 8590 struct vi_info *vi; 8591 struct ifnet *ifp; 8592 int v; 8593 8594 if (link_stat) 8595 pi->linkdnrc = -1; 8596 else { 8597 if (reason >= 0) 8598 pi->linkdnrc = reason; 8599 } 8600 for_each_vi(pi, v, vi) { 8601 ifp = vi->ifp; 8602 if (ifp == NULL) 8603 continue; 8604 8605 if (link_stat) { 8606 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 8607 if_link_state_change(ifp, LINK_STATE_UP); 8608 } else { 8609 if_link_state_change(ifp, LINK_STATE_DOWN); 8610 } 8611 } 8612 } 8613 8614 void 8615 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8616 { 8617 struct adapter *sc; 8618 8619 sx_slock(&t4_list_lock); 8620 SLIST_FOREACH(sc, &t4_list, link) { 8621 /* 8622 * func should not make any assumptions about what state sc is 8623 * in - the only guarantee is that sc->sc_lock is a valid lock. 8624 */ 8625 func(sc, arg); 8626 } 8627 sx_sunlock(&t4_list_lock); 8628 } 8629 8630 static int 8631 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 8632 { 8633 return (0); 8634 } 8635 8636 static int 8637 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 8638 { 8639 return (0); 8640 } 8641 8642 static int 8643 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8644 struct thread *td) 8645 { 8646 int rc; 8647 struct adapter *sc = dev->si_drv1; 8648 8649 rc = priv_check(td, PRIV_DRIVER); 8650 if (rc != 0) 8651 return (rc); 8652 8653 switch (cmd) { 8654 case CHELSIO_T4_GETREG: { 8655 struct t4_reg *edata = (struct t4_reg *)data; 8656 8657 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8658 return (EFAULT); 8659 8660 if (edata->size == 4) 8661 edata->val = t4_read_reg(sc, edata->addr); 8662 else if (edata->size == 8) 8663 edata->val = t4_read_reg64(sc, edata->addr); 8664 else 8665 return (EINVAL); 8666 8667 break; 8668 } 8669 case CHELSIO_T4_SETREG: { 8670 struct t4_reg *edata = (struct t4_reg *)data; 8671 8672 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8673 return (EFAULT); 8674 8675 if (edata->size == 4) { 8676 if (edata->val & 0xffffffff00000000) 8677 return (EINVAL); 8678 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 8679 } else if (edata->size == 8) 8680 t4_write_reg64(sc, edata->addr, edata->val); 8681 else 8682 return (EINVAL); 8683 break; 8684 } 8685 case CHELSIO_T4_REGDUMP: { 8686 struct t4_regdump *regs = (struct t4_regdump *)data; 8687 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE; 8688 uint8_t *buf; 8689 8690 if (regs->len < reglen) { 8691 regs->len = reglen; /* hint to the caller */ 8692 return (ENOBUFS); 8693 } 8694 8695 regs->len = reglen; 8696 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 8697 get_regs(sc, regs, buf); 8698 rc = copyout(buf, regs->data, reglen); 8699 free(buf, M_CXGBE); 8700 break; 8701 } 8702 case CHELSIO_T4_GET_FILTER_MODE: 8703 rc = get_filter_mode(sc, (uint32_t *)data); 8704 break; 8705 case CHELSIO_T4_SET_FILTER_MODE: 8706 rc = set_filter_mode(sc, *(uint32_t *)data); 8707 break; 8708 case CHELSIO_T4_GET_FILTER: 8709 rc = get_filter(sc, (struct t4_filter *)data); 8710 break; 8711 case CHELSIO_T4_SET_FILTER: 8712 rc = set_filter(sc, (struct t4_filter *)data); 8713 break; 8714 case CHELSIO_T4_DEL_FILTER: 8715 rc = del_filter(sc, (struct t4_filter *)data); 8716 break; 8717 case CHELSIO_T4_GET_SGE_CONTEXT: 8718 rc = get_sge_context(sc, (struct t4_sge_context *)data); 8719 break; 8720 case CHELSIO_T4_LOAD_FW: 8721 rc = load_fw(sc, (struct t4_data *)data); 8722 break; 8723 case CHELSIO_T4_GET_MEM: 8724 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 8725 break; 8726 case CHELSIO_T4_GET_I2C: 8727 rc = read_i2c(sc, (struct t4_i2c_data *)data); 8728 break; 8729 case CHELSIO_T4_CLEAR_STATS: { 8730 int i, v; 8731 u_int port_id = *(uint32_t *)data; 8732 struct port_info *pi; 8733 struct vi_info *vi; 8734 8735 if (port_id >= sc->params.nports) 8736 return (EINVAL); 8737 pi = sc->port[port_id]; 8738 8739 /* MAC stats */ 8740 t4_clr_port_stats(sc, pi->tx_chan); 8741 pi->tx_parse_error = 0; 8742 mtx_lock(&sc->reg_lock); 8743 for_each_vi(pi, v, vi) { 8744 if (vi->flags & VI_INIT_DONE) 8745 t4_clr_vi_stats(sc, vi->viid); 8746 } 8747 mtx_unlock(&sc->reg_lock); 8748 8749 /* 8750 * Since this command accepts a port, clear stats for 8751 * all VIs on this port. 8752 */ 8753 for_each_vi(pi, v, vi) { 8754 if (vi->flags & VI_INIT_DONE) { 8755 struct sge_rxq *rxq; 8756 struct sge_txq *txq; 8757 struct sge_wrq *wrq; 8758 8759 if (vi->flags & VI_NETMAP) 8760 continue; 8761 8762 for_each_rxq(vi, i, rxq) { 8763 #if defined(INET) || defined(INET6) 8764 rxq->lro.lro_queued = 0; 8765 rxq->lro.lro_flushed = 0; 8766 #endif 8767 rxq->rxcsum = 0; 8768 rxq->vlan_extraction = 0; 8769 } 8770 8771 for_each_txq(vi, i, txq) { 8772 txq->txcsum = 0; 8773 txq->tso_wrs = 0; 8774 txq->vlan_insertion = 0; 8775 txq->imm_wrs = 0; 8776 txq->sgl_wrs = 0; 8777 txq->txpkt_wrs = 0; 8778 txq->txpkts0_wrs = 0; 8779 txq->txpkts1_wrs = 0; 8780 txq->txpkts0_pkts = 0; 8781 txq->txpkts1_pkts = 0; 8782 mp_ring_reset_stats(txq->r); 8783 } 8784 8785 #ifdef TCP_OFFLOAD 8786 /* nothing to clear for each ofld_rxq */ 8787 8788 for_each_ofld_txq(vi, i, wrq) { 8789 wrq->tx_wrs_direct = 0; 8790 wrq->tx_wrs_copied = 0; 8791 } 8792 #endif 8793 8794 if (IS_MAIN_VI(vi)) { 8795 wrq = &sc->sge.ctrlq[pi->port_id]; 8796 wrq->tx_wrs_direct = 0; 8797 wrq->tx_wrs_copied = 0; 8798 } 8799 } 8800 } 8801 break; 8802 } 8803 case CHELSIO_T4_SCHED_CLASS: 8804 rc = set_sched_class(sc, (struct t4_sched_params *)data); 8805 break; 8806 case CHELSIO_T4_SCHED_QUEUE: 8807 rc = set_sched_queue(sc, (struct t4_sched_queue *)data); 8808 break; 8809 case CHELSIO_T4_GET_TRACER: 8810 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 8811 break; 8812 case CHELSIO_T4_SET_TRACER: 8813 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 8814 break; 8815 default: 8816 rc = EINVAL; 8817 } 8818 8819 return (rc); 8820 } 8821 8822 void 8823 t4_db_full(struct adapter *sc) 8824 { 8825 8826 CXGBE_UNIMPLEMENTED(__func__); 8827 } 8828 8829 void 8830 t4_db_dropped(struct adapter *sc) 8831 { 8832 8833 CXGBE_UNIMPLEMENTED(__func__); 8834 } 8835 8836 #ifdef TCP_OFFLOAD 8837 void 8838 t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) 8839 { 8840 8841 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 8842 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 8843 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 8844 V_HPZ3(pgsz_order[3])); 8845 } 8846 8847 static int 8848 toe_capability(struct vi_info *vi, int enable) 8849 { 8850 int rc; 8851 struct port_info *pi = vi->pi; 8852 struct adapter *sc = pi->adapter; 8853 8854 ASSERT_SYNCHRONIZED_OP(sc); 8855 8856 if (!is_offload(sc)) 8857 return (ENODEV); 8858 8859 if (enable) { 8860 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 8861 /* TOE is already enabled. */ 8862 return (0); 8863 } 8864 8865 /* 8866 * We need the port's queues around so that we're able to send 8867 * and receive CPLs to/from the TOE even if the ifnet for this 8868 * port has never been UP'd administratively. 8869 */ 8870 if (!(vi->flags & VI_INIT_DONE)) { 8871 rc = cxgbe_init_synchronized(vi); 8872 if (rc) 8873 return (rc); 8874 } 8875 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 8876 rc = cxgbe_init_synchronized(&pi->vi[0]); 8877 if (rc) 8878 return (rc); 8879 } 8880 8881 if (isset(&sc->offload_map, pi->port_id)) { 8882 /* TOE is enabled on another VI of this port. */ 8883 pi->uld_vis++; 8884 return (0); 8885 } 8886 8887 if (!uld_active(sc, ULD_TOM)) { 8888 rc = t4_activate_uld(sc, ULD_TOM); 8889 if (rc == EAGAIN) { 8890 log(LOG_WARNING, 8891 "You must kldload t4_tom.ko before trying " 8892 "to enable TOE on a cxgbe interface.\n"); 8893 } 8894 if (rc != 0) 8895 return (rc); 8896 KASSERT(sc->tom_softc != NULL, 8897 ("%s: TOM activated but softc NULL", __func__)); 8898 KASSERT(uld_active(sc, ULD_TOM), 8899 ("%s: TOM activated but flag not set", __func__)); 8900 } 8901 8902 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 8903 if (!uld_active(sc, ULD_IWARP)) 8904 (void) t4_activate_uld(sc, ULD_IWARP); 8905 if (!uld_active(sc, ULD_ISCSI)) 8906 (void) t4_activate_uld(sc, ULD_ISCSI); 8907 8908 pi->uld_vis++; 8909 setbit(&sc->offload_map, pi->port_id); 8910 } else { 8911 pi->uld_vis--; 8912 8913 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 8914 return (0); 8915 8916 KASSERT(uld_active(sc, ULD_TOM), 8917 ("%s: TOM never initialized?", __func__)); 8918 clrbit(&sc->offload_map, pi->port_id); 8919 } 8920 8921 return (0); 8922 } 8923 8924 /* 8925 * Add an upper layer driver to the global list. 8926 */ 8927 int 8928 t4_register_uld(struct uld_info *ui) 8929 { 8930 int rc = 0; 8931 struct uld_info *u; 8932 8933 sx_xlock(&t4_uld_list_lock); 8934 SLIST_FOREACH(u, &t4_uld_list, link) { 8935 if (u->uld_id == ui->uld_id) { 8936 rc = EEXIST; 8937 goto done; 8938 } 8939 } 8940 8941 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 8942 ui->refcount = 0; 8943 done: 8944 sx_xunlock(&t4_uld_list_lock); 8945 return (rc); 8946 } 8947 8948 int 8949 t4_unregister_uld(struct uld_info *ui) 8950 { 8951 int rc = EINVAL; 8952 struct uld_info *u; 8953 8954 sx_xlock(&t4_uld_list_lock); 8955 8956 SLIST_FOREACH(u, &t4_uld_list, link) { 8957 if (u == ui) { 8958 if (ui->refcount > 0) { 8959 rc = EBUSY; 8960 goto done; 8961 } 8962 8963 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 8964 rc = 0; 8965 goto done; 8966 } 8967 } 8968 done: 8969 sx_xunlock(&t4_uld_list_lock); 8970 return (rc); 8971 } 8972 8973 int 8974 t4_activate_uld(struct adapter *sc, int id) 8975 { 8976 int rc; 8977 struct uld_info *ui; 8978 8979 ASSERT_SYNCHRONIZED_OP(sc); 8980 8981 if (id < 0 || id > ULD_MAX) 8982 return (EINVAL); 8983 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 8984 8985 sx_slock(&t4_uld_list_lock); 8986 8987 SLIST_FOREACH(ui, &t4_uld_list, link) { 8988 if (ui->uld_id == id) { 8989 if (!(sc->flags & FULL_INIT_DONE)) { 8990 rc = adapter_full_init(sc); 8991 if (rc != 0) 8992 break; 8993 } 8994 8995 rc = ui->activate(sc); 8996 if (rc == 0) { 8997 setbit(&sc->active_ulds, id); 8998 ui->refcount++; 8999 } 9000 break; 9001 } 9002 } 9003 9004 sx_sunlock(&t4_uld_list_lock); 9005 9006 return (rc); 9007 } 9008 9009 int 9010 t4_deactivate_uld(struct adapter *sc, int id) 9011 { 9012 int rc; 9013 struct uld_info *ui; 9014 9015 ASSERT_SYNCHRONIZED_OP(sc); 9016 9017 if (id < 0 || id > ULD_MAX) 9018 return (EINVAL); 9019 rc = ENXIO; 9020 9021 sx_slock(&t4_uld_list_lock); 9022 9023 SLIST_FOREACH(ui, &t4_uld_list, link) { 9024 if (ui->uld_id == id) { 9025 rc = ui->deactivate(sc); 9026 if (rc == 0) { 9027 clrbit(&sc->active_ulds, id); 9028 ui->refcount--; 9029 } 9030 break; 9031 } 9032 } 9033 9034 sx_sunlock(&t4_uld_list_lock); 9035 9036 return (rc); 9037 } 9038 9039 int 9040 uld_active(struct adapter *sc, int uld_id) 9041 { 9042 9043 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9044 9045 return (isset(&sc->active_ulds, uld_id)); 9046 } 9047 #endif 9048 9049 /* 9050 * Come up with reasonable defaults for some of the tunables, provided they're 9051 * not set by the user (in which case we'll use the values as is). 9052 */ 9053 static void 9054 tweak_tunables(void) 9055 { 9056 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 9057 9058 if (t4_ntxq10g < 1) { 9059 #ifdef RSS 9060 t4_ntxq10g = rss_getnumbuckets(); 9061 #else 9062 t4_ntxq10g = min(nc, NTXQ_10G); 9063 #endif 9064 } 9065 9066 if (t4_ntxq1g < 1) { 9067 #ifdef RSS 9068 /* XXX: way too many for 1GbE? */ 9069 t4_ntxq1g = rss_getnumbuckets(); 9070 #else 9071 t4_ntxq1g = min(nc, NTXQ_1G); 9072 #endif 9073 } 9074 9075 if (t4_nrxq10g < 1) { 9076 #ifdef RSS 9077 t4_nrxq10g = rss_getnumbuckets(); 9078 #else 9079 t4_nrxq10g = min(nc, NRXQ_10G); 9080 #endif 9081 } 9082 9083 if (t4_nrxq1g < 1) { 9084 #ifdef RSS 9085 /* XXX: way too many for 1GbE? */ 9086 t4_nrxq1g = rss_getnumbuckets(); 9087 #else 9088 t4_nrxq1g = min(nc, NRXQ_1G); 9089 #endif 9090 } 9091 9092 #ifdef TCP_OFFLOAD 9093 if (t4_nofldtxq10g < 1) 9094 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 9095 9096 if (t4_nofldtxq1g < 1) 9097 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 9098 9099 if (t4_nofldrxq10g < 1) 9100 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 9101 9102 if (t4_nofldrxq1g < 1) 9103 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 9104 9105 if (t4_toecaps_allowed == -1) 9106 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 9107 9108 if (t4_rdmacaps_allowed == -1) { 9109 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 9110 FW_CAPS_CONFIG_RDMA_RDMAC; 9111 } 9112 9113 if (t4_iscsicaps_allowed == -1) { 9114 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 9115 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 9116 FW_CAPS_CONFIG_ISCSI_T10DIF; 9117 } 9118 #else 9119 if (t4_toecaps_allowed == -1) 9120 t4_toecaps_allowed = 0; 9121 9122 if (t4_rdmacaps_allowed == -1) 9123 t4_rdmacaps_allowed = 0; 9124 9125 if (t4_iscsicaps_allowed == -1) 9126 t4_iscsicaps_allowed = 0; 9127 #endif 9128 9129 #ifdef DEV_NETMAP 9130 if (t4_nnmtxq10g < 1) 9131 t4_nnmtxq10g = min(nc, NNMTXQ_10G); 9132 9133 if (t4_nnmtxq1g < 1) 9134 t4_nnmtxq1g = min(nc, NNMTXQ_1G); 9135 9136 if (t4_nnmrxq10g < 1) 9137 t4_nnmrxq10g = min(nc, NNMRXQ_10G); 9138 9139 if (t4_nnmrxq1g < 1) 9140 t4_nnmrxq1g = min(nc, NNMRXQ_1G); 9141 #endif 9142 9143 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 9144 t4_tmr_idx_10g = TMR_IDX_10G; 9145 9146 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 9147 t4_pktc_idx_10g = PKTC_IDX_10G; 9148 9149 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 9150 t4_tmr_idx_1g = TMR_IDX_1G; 9151 9152 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 9153 t4_pktc_idx_1g = PKTC_IDX_1G; 9154 9155 if (t4_qsize_txq < 128) 9156 t4_qsize_txq = 128; 9157 9158 if (t4_qsize_rxq < 128) 9159 t4_qsize_rxq = 128; 9160 while (t4_qsize_rxq & 7) 9161 t4_qsize_rxq++; 9162 9163 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 9164 } 9165 9166 static struct sx mlu; /* mod load unload */ 9167 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 9168 9169 static int 9170 mod_event(module_t mod, int cmd, void *arg) 9171 { 9172 int rc = 0; 9173 static int loaded = 0; 9174 9175 switch (cmd) { 9176 case MOD_LOAD: 9177 sx_xlock(&mlu); 9178 if (loaded++ == 0) { 9179 t4_sge_modload(); 9180 sx_init(&t4_list_lock, "T4/T5 adapters"); 9181 SLIST_INIT(&t4_list); 9182 #ifdef TCP_OFFLOAD 9183 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 9184 SLIST_INIT(&t4_uld_list); 9185 #endif 9186 t4_tracer_modload(); 9187 tweak_tunables(); 9188 } 9189 sx_xunlock(&mlu); 9190 break; 9191 9192 case MOD_UNLOAD: 9193 sx_xlock(&mlu); 9194 if (--loaded == 0) { 9195 int tries; 9196 9197 sx_slock(&t4_list_lock); 9198 if (!SLIST_EMPTY(&t4_list)) { 9199 rc = EBUSY; 9200 sx_sunlock(&t4_list_lock); 9201 goto done_unload; 9202 } 9203 #ifdef TCP_OFFLOAD 9204 sx_slock(&t4_uld_list_lock); 9205 if (!SLIST_EMPTY(&t4_uld_list)) { 9206 rc = EBUSY; 9207 sx_sunlock(&t4_uld_list_lock); 9208 sx_sunlock(&t4_list_lock); 9209 goto done_unload; 9210 } 9211 #endif 9212 tries = 0; 9213 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 9214 uprintf("%ju clusters with custom free routine " 9215 "still is use.\n", t4_sge_extfree_refs()); 9216 pause("t4unload", 2 * hz); 9217 } 9218 #ifdef TCP_OFFLOAD 9219 sx_sunlock(&t4_uld_list_lock); 9220 #endif 9221 sx_sunlock(&t4_list_lock); 9222 9223 if (t4_sge_extfree_refs() == 0) { 9224 t4_tracer_modunload(); 9225 #ifdef TCP_OFFLOAD 9226 sx_destroy(&t4_uld_list_lock); 9227 #endif 9228 sx_destroy(&t4_list_lock); 9229 t4_sge_modunload(); 9230 loaded = 0; 9231 } else { 9232 rc = EBUSY; 9233 loaded++; /* undo earlier decrement */ 9234 } 9235 } 9236 done_unload: 9237 sx_xunlock(&mlu); 9238 break; 9239 } 9240 9241 return (rc); 9242 } 9243 9244 static devclass_t t4_devclass, t5_devclass; 9245 static devclass_t cxgbe_devclass, cxl_devclass; 9246 static devclass_t vcxgbe_devclass, vcxl_devclass; 9247 9248 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 9249 MODULE_VERSION(t4nex, 1); 9250 MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 9251 #ifdef DEV_NETMAP 9252 MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 9253 #endif /* DEV_NETMAP */ 9254 9255 9256 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 9257 MODULE_VERSION(t5nex, 1); 9258 MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 9259 #ifdef DEV_NETMAP 9260 MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 9261 #endif /* DEV_NETMAP */ 9262 9263 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 9264 MODULE_VERSION(cxgbe, 1); 9265 9266 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 9267 MODULE_VERSION(cxl, 1); 9268 9269 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 9270 MODULE_VERSION(vcxgbe, 1); 9271 9272 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 9273 MODULE_VERSION(vcxl, 1); 9274