1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_rss.h" 34 35 #include <sys/param.h> 36 #include <sys/conf.h> 37 #include <sys/priv.h> 38 #include <sys/kernel.h> 39 #include <sys/bus.h> 40 #include <sys/module.h> 41 #include <sys/malloc.h> 42 #include <sys/queue.h> 43 #include <sys/taskqueue.h> 44 #include <sys/pciio.h> 45 #include <dev/pci/pcireg.h> 46 #include <dev/pci/pcivar.h> 47 #include <dev/pci/pci_private.h> 48 #include <sys/firmware.h> 49 #include <sys/sbuf.h> 50 #include <sys/smp.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/sysctl.h> 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_types.h> 57 #include <net/if_dl.h> 58 #include <net/if_vlan_var.h> 59 #ifdef RSS 60 #include <net/rss_config.h> 61 #endif 62 #if defined(__i386__) || defined(__amd64__) 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 #endif 66 67 #include "common/common.h" 68 #include "common/t4_msg.h" 69 #include "common/t4_regs.h" 70 #include "common/t4_regs_values.h" 71 #include "t4_ioctl.h" 72 #include "t4_l2t.h" 73 #include "t4_mp_ring.h" 74 75 /* T4 bus driver interface */ 76 static int t4_probe(device_t); 77 static int t4_attach(device_t); 78 static int t4_detach(device_t); 79 static device_method_t t4_methods[] = { 80 DEVMETHOD(device_probe, t4_probe), 81 DEVMETHOD(device_attach, t4_attach), 82 DEVMETHOD(device_detach, t4_detach), 83 84 DEVMETHOD_END 85 }; 86 static driver_t t4_driver = { 87 "t4nex", 88 t4_methods, 89 sizeof(struct adapter) 90 }; 91 92 93 /* T4 port (cxgbe) interface */ 94 static int cxgbe_probe(device_t); 95 static int cxgbe_attach(device_t); 96 static int cxgbe_detach(device_t); 97 static device_method_t cxgbe_methods[] = { 98 DEVMETHOD(device_probe, cxgbe_probe), 99 DEVMETHOD(device_attach, cxgbe_attach), 100 DEVMETHOD(device_detach, cxgbe_detach), 101 { 0, 0 } 102 }; 103 static driver_t cxgbe_driver = { 104 "cxgbe", 105 cxgbe_methods, 106 sizeof(struct port_info) 107 }; 108 109 /* T4 VI (vcxgbe) interface */ 110 static int vcxgbe_probe(device_t); 111 static int vcxgbe_attach(device_t); 112 static int vcxgbe_detach(device_t); 113 static device_method_t vcxgbe_methods[] = { 114 DEVMETHOD(device_probe, vcxgbe_probe), 115 DEVMETHOD(device_attach, vcxgbe_attach), 116 DEVMETHOD(device_detach, vcxgbe_detach), 117 { 0, 0 } 118 }; 119 static driver_t vcxgbe_driver = { 120 "vcxgbe", 121 vcxgbe_methods, 122 sizeof(struct vi_info) 123 }; 124 125 static d_ioctl_t t4_ioctl; 126 static d_open_t t4_open; 127 static d_close_t t4_close; 128 129 static struct cdevsw t4_cdevsw = { 130 .d_version = D_VERSION, 131 .d_flags = 0, 132 .d_open = t4_open, 133 .d_close = t4_close, 134 .d_ioctl = t4_ioctl, 135 .d_name = "t4nex", 136 }; 137 138 /* T5 bus driver interface */ 139 static int t5_probe(device_t); 140 static device_method_t t5_methods[] = { 141 DEVMETHOD(device_probe, t5_probe), 142 DEVMETHOD(device_attach, t4_attach), 143 DEVMETHOD(device_detach, t4_detach), 144 145 DEVMETHOD_END 146 }; 147 static driver_t t5_driver = { 148 "t5nex", 149 t5_methods, 150 sizeof(struct adapter) 151 }; 152 153 154 /* T5 port (cxl) interface */ 155 static driver_t cxl_driver = { 156 "cxl", 157 cxgbe_methods, 158 sizeof(struct port_info) 159 }; 160 161 /* T5 VI (vcxl) interface */ 162 static driver_t vcxl_driver = { 163 "vcxl", 164 vcxgbe_methods, 165 sizeof(struct vi_info) 166 }; 167 168 static struct cdevsw t5_cdevsw = { 169 .d_version = D_VERSION, 170 .d_flags = 0, 171 .d_open = t4_open, 172 .d_close = t4_close, 173 .d_ioctl = t4_ioctl, 174 .d_name = "t5nex", 175 }; 176 177 /* ifnet + media interface */ 178 static void cxgbe_init(void *); 179 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 180 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 181 static void cxgbe_qflush(struct ifnet *); 182 static int cxgbe_media_change(struct ifnet *); 183 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 184 185 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 186 187 /* 188 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 189 * then ADAPTER_LOCK, then t4_uld_list_lock. 190 */ 191 static struct sx t4_list_lock; 192 SLIST_HEAD(, adapter) t4_list; 193 #ifdef TCP_OFFLOAD 194 static struct sx t4_uld_list_lock; 195 SLIST_HEAD(, uld_info) t4_uld_list; 196 #endif 197 198 /* 199 * Tunables. See tweak_tunables() too. 200 * 201 * Each tunable is set to a default value here if it's known at compile-time. 202 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 203 * provide a reasonable default when the driver is loaded. 204 * 205 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 206 * T5 are under hw.cxl. 207 */ 208 209 /* 210 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 211 */ 212 #define NTXQ_10G 16 213 static int t4_ntxq10g = -1; 214 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 215 216 #define NRXQ_10G 8 217 static int t4_nrxq10g = -1; 218 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 219 220 #define NTXQ_1G 4 221 static int t4_ntxq1g = -1; 222 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 223 224 #define NRXQ_1G 2 225 static int t4_nrxq1g = -1; 226 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 227 228 static int t4_rsrv_noflowq = 0; 229 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 230 231 #ifdef TCP_OFFLOAD 232 #define NOFLDTXQ_10G 8 233 static int t4_nofldtxq10g = -1; 234 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 235 236 #define NOFLDRXQ_10G 2 237 static int t4_nofldrxq10g = -1; 238 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 239 240 #define NOFLDTXQ_1G 2 241 static int t4_nofldtxq1g = -1; 242 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 243 244 #define NOFLDRXQ_1G 1 245 static int t4_nofldrxq1g = -1; 246 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 247 #endif 248 249 #ifdef DEV_NETMAP 250 #define NNMTXQ_10G 2 251 static int t4_nnmtxq10g = -1; 252 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g); 253 254 #define NNMRXQ_10G 2 255 static int t4_nnmrxq10g = -1; 256 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g); 257 258 #define NNMTXQ_1G 1 259 static int t4_nnmtxq1g = -1; 260 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g); 261 262 #define NNMRXQ_1G 1 263 static int t4_nnmrxq1g = -1; 264 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g); 265 #endif 266 267 /* 268 * Holdoff parameters for 10G and 1G ports. 269 */ 270 #define TMR_IDX_10G 1 271 static int t4_tmr_idx_10g = TMR_IDX_10G; 272 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 273 274 #define PKTC_IDX_10G (-1) 275 static int t4_pktc_idx_10g = PKTC_IDX_10G; 276 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 277 278 #define TMR_IDX_1G 1 279 static int t4_tmr_idx_1g = TMR_IDX_1G; 280 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 281 282 #define PKTC_IDX_1G (-1) 283 static int t4_pktc_idx_1g = PKTC_IDX_1G; 284 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 285 286 /* 287 * Size (# of entries) of each tx and rx queue. 288 */ 289 static unsigned int t4_qsize_txq = TX_EQ_QSIZE; 290 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 291 292 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 293 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 294 295 /* 296 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 297 */ 298 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 299 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 300 301 /* 302 * Configuration file. 303 */ 304 #define DEFAULT_CF "default" 305 #define FLASH_CF "flash" 306 #define UWIRE_CF "uwire" 307 #define FPGA_CF "fpga" 308 static char t4_cfg_file[32] = DEFAULT_CF; 309 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 310 311 /* 312 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 313 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 314 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 315 * mark or when signalled to do so, 0 to never emit PAUSE. 316 */ 317 static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 318 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 319 320 /* 321 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 322 * encouraged respectively). 323 */ 324 static unsigned int t4_fw_install = 1; 325 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 326 327 /* 328 * ASIC features that will be used. Disable the ones you don't want so that the 329 * chip resources aren't wasted on features that will not be used. 330 */ 331 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 332 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 333 334 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 335 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 336 337 static int t4_toecaps_allowed = -1; 338 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 339 340 static int t4_rdmacaps_allowed = 0; 341 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 342 343 static int t4_iscsicaps_allowed = 0; 344 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 345 346 static int t4_fcoecaps_allowed = 0; 347 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 348 349 static int t5_write_combine = 0; 350 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 351 352 static int t4_num_vis = 1; 353 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 354 355 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 356 static int vi_mac_funcs[] = { 357 FW_VI_FUNC_OFLD, 358 FW_VI_FUNC_IWARP, 359 FW_VI_FUNC_OPENISCSI, 360 FW_VI_FUNC_OPENFCOE, 361 FW_VI_FUNC_FOISCSI, 362 FW_VI_FUNC_FOFCOE, 363 }; 364 365 struct intrs_and_queues { 366 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 367 uint16_t nirq; /* Total # of vectors */ 368 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 369 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 370 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 371 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 372 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 373 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 374 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 375 #ifdef TCP_OFFLOAD 376 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 377 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 378 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 379 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 380 #endif 381 #ifdef DEV_NETMAP 382 uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */ 383 uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */ 384 uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */ 385 uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */ 386 #endif 387 }; 388 389 struct filter_entry { 390 uint32_t valid:1; /* filter allocated and valid */ 391 uint32_t locked:1; /* filter is administratively locked */ 392 uint32_t pending:1; /* filter action is pending firmware reply */ 393 uint32_t smtidx:8; /* Source MAC Table index for smac */ 394 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 395 396 struct t4_filter_specification fs; 397 }; 398 399 static int map_bars_0_and_4(struct adapter *); 400 static int map_bar_2(struct adapter *); 401 static void setup_memwin(struct adapter *); 402 static void position_memwin(struct adapter *, int, uint32_t); 403 static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 404 static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 405 int); 406 static inline int write_via_memwin(struct adapter *, int, uint32_t, 407 const uint32_t *, int); 408 static int validate_mem_range(struct adapter *, uint32_t, int); 409 static int fwmtype_to_hwmtype(int); 410 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 411 uint32_t *); 412 static int fixup_devlog_params(struct adapter *); 413 static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 414 struct intrs_and_queues *); 415 static int prep_firmware(struct adapter *); 416 static int partition_resources(struct adapter *, const struct firmware *, 417 const char *); 418 static int get_params__pre_init(struct adapter *); 419 static int get_params__post_init(struct adapter *); 420 static int set_params__post_init(struct adapter *); 421 static void t4_set_desc(struct adapter *); 422 static void build_medialist(struct port_info *, struct ifmedia *); 423 static int cxgbe_init_synchronized(struct vi_info *); 424 static int cxgbe_uninit_synchronized(struct vi_info *); 425 static int setup_intr_handlers(struct adapter *); 426 static void quiesce_txq(struct adapter *, struct sge_txq *); 427 static void quiesce_wrq(struct adapter *, struct sge_wrq *); 428 static void quiesce_iq(struct adapter *, struct sge_iq *); 429 static void quiesce_fl(struct adapter *, struct sge_fl *); 430 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 431 driver_intr_t *, void *, char *); 432 static int t4_free_irq(struct adapter *, struct irq *); 433 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 434 static void vi_refresh_stats(struct adapter *, struct vi_info *); 435 static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 436 static void cxgbe_tick(void *); 437 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 438 static int cpl_not_handled(struct sge_iq *, const struct rss_header *, 439 struct mbuf *); 440 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *); 441 static int fw_msg_not_handled(struct adapter *, const __be64 *); 442 static void t4_sysctls(struct adapter *); 443 static void cxgbe_sysctls(struct port_info *); 444 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 445 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 446 static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 447 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 448 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 449 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 450 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 451 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 452 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 453 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 454 static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 455 #ifdef SBUF_DRAIN 456 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 457 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 458 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 459 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 460 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 461 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 462 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 463 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 464 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 465 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 466 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 467 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 468 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 469 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 470 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 471 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 472 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 473 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 474 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 475 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 476 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 477 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 478 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 479 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 480 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 481 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 482 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 483 #endif 484 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 485 static uint32_t mode_to_fconf(uint32_t); 486 static uint32_t mode_to_iconf(uint32_t); 487 static int check_fspec_against_fconf_iconf(struct adapter *, 488 struct t4_filter_specification *); 489 static int get_filter_mode(struct adapter *, uint32_t *); 490 static int set_filter_mode(struct adapter *, uint32_t); 491 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 492 static int get_filter(struct adapter *, struct t4_filter *); 493 static int set_filter(struct adapter *, struct t4_filter *); 494 static int del_filter(struct adapter *, struct t4_filter *); 495 static void clear_filter(struct filter_entry *); 496 static int set_filter_wr(struct adapter *, int); 497 static int del_filter_wr(struct adapter *, int); 498 static int get_sge_context(struct adapter *, struct t4_sge_context *); 499 static int load_fw(struct adapter *, struct t4_data *); 500 static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 501 static int read_i2c(struct adapter *, struct t4_i2c_data *); 502 static int set_sched_class(struct adapter *, struct t4_sched_params *); 503 static int set_sched_queue(struct adapter *, struct t4_sched_queue *); 504 #ifdef TCP_OFFLOAD 505 static int toe_capability(struct vi_info *, int); 506 #endif 507 static int mod_event(module_t, int, void *); 508 509 struct { 510 uint16_t device; 511 char *desc; 512 } t4_pciids[] = { 513 {0xa000, "Chelsio Terminator 4 FPGA"}, 514 {0x4400, "Chelsio T440-dbg"}, 515 {0x4401, "Chelsio T420-CR"}, 516 {0x4402, "Chelsio T422-CR"}, 517 {0x4403, "Chelsio T440-CR"}, 518 {0x4404, "Chelsio T420-BCH"}, 519 {0x4405, "Chelsio T440-BCH"}, 520 {0x4406, "Chelsio T440-CH"}, 521 {0x4407, "Chelsio T420-SO"}, 522 {0x4408, "Chelsio T420-CX"}, 523 {0x4409, "Chelsio T420-BT"}, 524 {0x440a, "Chelsio T404-BT"}, 525 {0x440e, "Chelsio T440-LP-CR"}, 526 }, t5_pciids[] = { 527 {0xb000, "Chelsio Terminator 5 FPGA"}, 528 {0x5400, "Chelsio T580-dbg"}, 529 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 530 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 531 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 532 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 533 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 534 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 535 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 536 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 537 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 538 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 539 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 540 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 541 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 542 #ifdef notyet 543 {0x5404, "Chelsio T520-BCH"}, 544 {0x5405, "Chelsio T540-BCH"}, 545 {0x5406, "Chelsio T540-CH"}, 546 {0x5408, "Chelsio T520-CX"}, 547 {0x540b, "Chelsio B520-SR"}, 548 {0x540c, "Chelsio B504-BT"}, 549 {0x540f, "Chelsio Amsterdam"}, 550 {0x5413, "Chelsio T580-CHR"}, 551 #endif 552 }; 553 554 #ifdef TCP_OFFLOAD 555 /* 556 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 557 * exactly the same for both rxq and ofld_rxq. 558 */ 559 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 560 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 561 #endif 562 563 /* No easy way to include t4_msg.h before adapter.h so we check this way */ 564 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS); 565 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES); 566 567 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 568 569 static int 570 t4_probe(device_t dev) 571 { 572 int i; 573 uint16_t v = pci_get_vendor(dev); 574 uint16_t d = pci_get_device(dev); 575 uint8_t f = pci_get_function(dev); 576 577 if (v != PCI_VENDOR_ID_CHELSIO) 578 return (ENXIO); 579 580 /* Attach only to PF0 of the FPGA */ 581 if (d == 0xa000 && f != 0) 582 return (ENXIO); 583 584 for (i = 0; i < nitems(t4_pciids); i++) { 585 if (d == t4_pciids[i].device) { 586 device_set_desc(dev, t4_pciids[i].desc); 587 return (BUS_PROBE_DEFAULT); 588 } 589 } 590 591 return (ENXIO); 592 } 593 594 static int 595 t5_probe(device_t dev) 596 { 597 int i; 598 uint16_t v = pci_get_vendor(dev); 599 uint16_t d = pci_get_device(dev); 600 uint8_t f = pci_get_function(dev); 601 602 if (v != PCI_VENDOR_ID_CHELSIO) 603 return (ENXIO); 604 605 /* Attach only to PF0 of the FPGA */ 606 if (d == 0xb000 && f != 0) 607 return (ENXIO); 608 609 for (i = 0; i < nitems(t5_pciids); i++) { 610 if (d == t5_pciids[i].device) { 611 device_set_desc(dev, t5_pciids[i].desc); 612 return (BUS_PROBE_DEFAULT); 613 } 614 } 615 616 return (ENXIO); 617 } 618 619 static void 620 t5_attribute_workaround(device_t dev) 621 { 622 device_t root_port; 623 uint32_t v; 624 625 /* 626 * The T5 chips do not properly echo the No Snoop and Relaxed 627 * Ordering attributes when replying to a TLP from a Root 628 * Port. As a workaround, find the parent Root Port and 629 * disable No Snoop and Relaxed Ordering. Note that this 630 * affects all devices under this root port. 631 */ 632 root_port = pci_find_pcie_root_port(dev); 633 if (root_port == NULL) { 634 device_printf(dev, "Unable to find parent root port\n"); 635 return; 636 } 637 638 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 639 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 640 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 641 0) 642 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 643 device_get_nameunit(root_port)); 644 } 645 646 static int 647 t4_attach(device_t dev) 648 { 649 struct adapter *sc; 650 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 651 struct intrs_and_queues iaq; 652 struct sge *s; 653 uint8_t *buf; 654 #ifdef TCP_OFFLOAD 655 int ofld_rqidx, ofld_tqidx; 656 #endif 657 #ifdef DEV_NETMAP 658 int nm_rqidx, nm_tqidx; 659 #endif 660 int num_vis; 661 662 sc = device_get_softc(dev); 663 sc->dev = dev; 664 TUNABLE_INT_FETCH("hw.cxgbe.debug_flags", &sc->debug_flags); 665 666 if ((pci_get_device(dev) & 0xff00) == 0x5400) 667 t5_attribute_workaround(dev); 668 pci_enable_busmaster(dev); 669 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 670 uint32_t v; 671 672 pci_set_max_read_req(dev, 4096); 673 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 674 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 675 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 676 677 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 678 } 679 680 sc->traceq = -1; 681 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 682 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 683 device_get_nameunit(dev)); 684 685 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 686 device_get_nameunit(dev)); 687 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 688 sx_xlock(&t4_list_lock); 689 SLIST_INSERT_HEAD(&t4_list, sc, link); 690 sx_xunlock(&t4_list_lock); 691 692 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 693 TAILQ_INIT(&sc->sfl); 694 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 695 696 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 697 698 rc = map_bars_0_and_4(sc); 699 if (rc != 0) 700 goto done; /* error message displayed already */ 701 702 /* 703 * This is the real PF# to which we're attaching. Works from within PCI 704 * passthrough environments too, where pci_get_function() could return a 705 * different PF# depending on the passthrough configuration. We need to 706 * use the real PF# in all our communication with the firmware. 707 */ 708 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI)); 709 sc->mbox = sc->pf; 710 711 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 712 sc->an_handler = an_not_handled; 713 for (i = 0; i < nitems(sc->cpl_handler); i++) 714 sc->cpl_handler[i] = cpl_not_handled; 715 for (i = 0; i < nitems(sc->fw_msg_handler); i++) 716 sc->fw_msg_handler[i] = fw_msg_not_handled; 717 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 718 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt); 719 t4_register_cpl_handler(sc, CPL_T5_TRACE_PKT, t5_trace_pkt); 720 t4_init_sge_cpl_handlers(sc); 721 722 /* Prepare the adapter for operation. */ 723 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 724 rc = -t4_prep_adapter(sc, buf); 725 free(buf, M_CXGBE); 726 if (rc != 0) { 727 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 728 goto done; 729 } 730 731 /* 732 * Do this really early, with the memory windows set up even before the 733 * character device. The userland tool's register i/o and mem read 734 * will work even in "recovery mode". 735 */ 736 setup_memwin(sc); 737 if (t4_init_devlog_params(sc, 0) == 0) 738 fixup_devlog_params(sc); 739 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw, 740 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s", 741 device_get_nameunit(dev)); 742 if (sc->cdev == NULL) 743 device_printf(dev, "failed to create nexus char device.\n"); 744 else 745 sc->cdev->si_drv1 = sc; 746 747 /* Go no further if recovery mode has been requested. */ 748 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 749 device_printf(dev, "recovery mode.\n"); 750 goto done; 751 } 752 753 #if defined(__i386__) 754 if ((cpu_feature & CPUID_CX8) == 0) { 755 device_printf(dev, "64 bit atomics not available.\n"); 756 rc = ENOTSUP; 757 goto done; 758 } 759 #endif 760 761 /* Prepare the firmware for operation */ 762 rc = prep_firmware(sc); 763 if (rc != 0) 764 goto done; /* error message displayed already */ 765 766 rc = get_params__post_init(sc); 767 if (rc != 0) 768 goto done; /* error message displayed already */ 769 770 rc = set_params__post_init(sc); 771 if (rc != 0) 772 goto done; /* error message displayed already */ 773 774 rc = map_bar_2(sc); 775 if (rc != 0) 776 goto done; /* error message displayed already */ 777 778 rc = t4_create_dma_tag(sc); 779 if (rc != 0) 780 goto done; /* error message displayed already */ 781 782 /* 783 * Number of VIs to create per-port. The first VI is the 784 * "main" regular VI for the port. The second VI is used for 785 * netmap if present, and any remaining VIs are used for 786 * additional virtual interfaces. 787 * 788 * Limit the number of VIs per port to the number of available 789 * MAC addresses per port. 790 */ 791 if (t4_num_vis >= 1) 792 num_vis = t4_num_vis; 793 else 794 num_vis = 1; 795 #ifdef DEV_NETMAP 796 num_vis++; 797 #endif 798 if (num_vis > nitems(vi_mac_funcs)) { 799 num_vis = nitems(vi_mac_funcs); 800 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 801 } 802 803 /* 804 * First pass over all the ports - allocate VIs and initialize some 805 * basic parameters like mac address, port type, etc. We also figure 806 * out whether a port is 10G or 1G and use that information when 807 * calculating how many interrupts to attempt to allocate. 808 */ 809 n10g = n1g = 0; 810 for_each_port(sc, i) { 811 struct port_info *pi; 812 struct vi_info *vi; 813 814 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 815 sc->port[i] = pi; 816 817 /* These must be set before t4_port_init */ 818 pi->adapter = sc; 819 pi->port_id = i; 820 pi->nvi = num_vis; 821 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 822 M_ZERO | M_WAITOK); 823 824 /* 825 * Allocate the "main" VI and initialize parameters 826 * like mac addr. 827 */ 828 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 829 if (rc != 0) { 830 device_printf(dev, "unable to initialize port %d: %d\n", 831 i, rc); 832 free(pi->vi, M_CXGBE); 833 free(pi, M_CXGBE); 834 sc->port[i] = NULL; 835 goto done; 836 } 837 838 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX); 839 pi->link_cfg.requested_fc |= t4_pause_settings; 840 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX); 841 pi->link_cfg.fc |= t4_pause_settings; 842 843 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 844 if (rc != 0) { 845 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 846 free(pi->vi, M_CXGBE); 847 free(pi, M_CXGBE); 848 sc->port[i] = NULL; 849 goto done; 850 } 851 852 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 853 device_get_nameunit(dev), i); 854 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 855 sc->chan_map[pi->tx_chan] = i; 856 857 if (is_10G_port(pi) || is_40G_port(pi)) { 858 n10g++; 859 for_each_vi(pi, j, vi) { 860 vi->tmr_idx = t4_tmr_idx_10g; 861 vi->pktc_idx = t4_pktc_idx_10g; 862 } 863 } else { 864 n1g++; 865 for_each_vi(pi, j, vi) { 866 vi->tmr_idx = t4_tmr_idx_1g; 867 vi->pktc_idx = t4_pktc_idx_1g; 868 } 869 } 870 871 pi->linkdnrc = -1; 872 873 for_each_vi(pi, j, vi) { 874 vi->qsize_rxq = t4_qsize_rxq; 875 vi->qsize_txq = t4_qsize_txq; 876 vi->pi = pi; 877 } 878 879 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1); 880 if (pi->dev == NULL) { 881 device_printf(dev, 882 "failed to add device for port %d.\n", i); 883 rc = ENXIO; 884 goto done; 885 } 886 pi->vi[0].dev = pi->dev; 887 device_set_softc(pi->dev, pi); 888 } 889 890 /* 891 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 892 */ 893 #ifdef DEV_NETMAP 894 num_vis--; 895 #endif 896 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 897 if (rc != 0) 898 goto done; /* error message displayed already */ 899 900 sc->intr_type = iaq.intr_type; 901 sc->intr_count = iaq.nirq; 902 903 s = &sc->sge; 904 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 905 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 906 if (num_vis > 1) { 907 s->nrxq += (n10g + n1g) * (num_vis - 1); 908 s->ntxq += (n10g + n1g) * (num_vis - 1); 909 } 910 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 911 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 912 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 913 #ifdef TCP_OFFLOAD 914 if (is_offload(sc)) { 915 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 916 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 917 if (num_vis > 1) { 918 s->nofldrxq += (n10g + n1g) * (num_vis - 1); 919 s->nofldtxq += (n10g + n1g) * (num_vis - 1); 920 } 921 s->neq += s->nofldtxq + s->nofldrxq; 922 s->niq += s->nofldrxq; 923 924 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 925 M_CXGBE, M_ZERO | M_WAITOK); 926 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 927 M_CXGBE, M_ZERO | M_WAITOK); 928 } 929 #endif 930 #ifdef DEV_NETMAP 931 s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g; 932 s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g; 933 s->neq += s->nnmtxq + s->nnmrxq; 934 s->niq += s->nnmrxq; 935 936 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 937 M_CXGBE, M_ZERO | M_WAITOK); 938 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 939 M_CXGBE, M_ZERO | M_WAITOK); 940 #endif 941 942 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 943 M_ZERO | M_WAITOK); 944 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 945 M_ZERO | M_WAITOK); 946 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 947 M_ZERO | M_WAITOK); 948 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 949 M_ZERO | M_WAITOK); 950 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 951 M_ZERO | M_WAITOK); 952 953 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 954 M_ZERO | M_WAITOK); 955 956 t4_init_l2t(sc, M_WAITOK); 957 958 /* 959 * Second pass over the ports. This time we know the number of rx and 960 * tx queues that each port should get. 961 */ 962 rqidx = tqidx = 0; 963 #ifdef TCP_OFFLOAD 964 ofld_rqidx = ofld_tqidx = 0; 965 #endif 966 #ifdef DEV_NETMAP 967 nm_rqidx = nm_tqidx = 0; 968 #endif 969 for_each_port(sc, i) { 970 struct port_info *pi = sc->port[i]; 971 struct vi_info *vi; 972 973 if (pi == NULL) 974 continue; 975 976 for_each_vi(pi, j, vi) { 977 #ifdef DEV_NETMAP 978 if (j == 1) { 979 vi->flags |= VI_NETMAP | INTR_RXQ; 980 vi->first_rxq = nm_rqidx; 981 vi->first_txq = nm_tqidx; 982 if (is_10G_port(pi) || is_40G_port(pi)) { 983 vi->nrxq = iaq.nnmrxq10g; 984 vi->ntxq = iaq.nnmtxq10g; 985 } else { 986 vi->nrxq = iaq.nnmrxq1g; 987 vi->ntxq = iaq.nnmtxq1g; 988 } 989 nm_rqidx += vi->nrxq; 990 nm_tqidx += vi->ntxq; 991 continue; 992 } 993 #endif 994 995 vi->first_rxq = rqidx; 996 vi->first_txq = tqidx; 997 if (is_10G_port(pi) || is_40G_port(pi)) { 998 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 999 vi->nrxq = j == 0 ? iaq.nrxq10g : 1; 1000 vi->ntxq = j == 0 ? iaq.ntxq10g : 1; 1001 } else { 1002 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 1003 vi->nrxq = j == 0 ? iaq.nrxq1g : 1; 1004 vi->ntxq = j == 0 ? iaq.ntxq1g : 1; 1005 } 1006 1007 if (vi->ntxq > 1) 1008 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1009 else 1010 vi->rsrv_noflowq = 0; 1011 1012 rqidx += vi->nrxq; 1013 tqidx += vi->ntxq; 1014 1015 #ifdef TCP_OFFLOAD 1016 if (!is_offload(sc)) 1017 continue; 1018 vi->first_ofld_rxq = ofld_rqidx; 1019 vi->first_ofld_txq = ofld_tqidx; 1020 if (is_10G_port(pi) || is_40G_port(pi)) { 1021 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1022 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1; 1023 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1; 1024 } else { 1025 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1026 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1; 1027 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1; 1028 } 1029 ofld_rqidx += vi->nofldrxq; 1030 ofld_tqidx += vi->nofldtxq; 1031 #endif 1032 } 1033 } 1034 1035 rc = setup_intr_handlers(sc); 1036 if (rc != 0) { 1037 device_printf(dev, 1038 "failed to setup interrupt handlers: %d\n", rc); 1039 goto done; 1040 } 1041 1042 rc = bus_generic_attach(dev); 1043 if (rc != 0) { 1044 device_printf(dev, 1045 "failed to attach all child ports: %d\n", rc); 1046 goto done; 1047 } 1048 1049 device_printf(dev, 1050 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1051 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1052 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1053 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1054 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1055 1056 t4_set_desc(sc); 1057 1058 done: 1059 if (rc != 0 && sc->cdev) { 1060 /* cdev was created and so cxgbetool works; recover that way. */ 1061 device_printf(dev, 1062 "error during attach, adapter is now in recovery mode.\n"); 1063 rc = 0; 1064 } 1065 1066 if (rc != 0) 1067 t4_detach(dev); 1068 else 1069 t4_sysctls(sc); 1070 1071 return (rc); 1072 } 1073 1074 /* 1075 * Idempotent 1076 */ 1077 static int 1078 t4_detach(device_t dev) 1079 { 1080 struct adapter *sc; 1081 struct port_info *pi; 1082 int i, rc; 1083 1084 sc = device_get_softc(dev); 1085 1086 if (sc->flags & FULL_INIT_DONE) 1087 t4_intr_disable(sc); 1088 1089 if (sc->cdev) { 1090 destroy_dev(sc->cdev); 1091 sc->cdev = NULL; 1092 } 1093 1094 rc = bus_generic_detach(dev); 1095 if (rc) { 1096 device_printf(dev, 1097 "failed to detach child devices: %d\n", rc); 1098 return (rc); 1099 } 1100 1101 for (i = 0; i < sc->intr_count; i++) 1102 t4_free_irq(sc, &sc->irq[i]); 1103 1104 for (i = 0; i < MAX_NPORTS; i++) { 1105 pi = sc->port[i]; 1106 if (pi) { 1107 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1108 if (pi->dev) 1109 device_delete_child(dev, pi->dev); 1110 1111 mtx_destroy(&pi->pi_lock); 1112 free(pi->vi, M_CXGBE); 1113 free(pi, M_CXGBE); 1114 } 1115 } 1116 1117 if (sc->flags & FULL_INIT_DONE) 1118 adapter_full_uninit(sc); 1119 1120 if (sc->flags & FW_OK) 1121 t4_fw_bye(sc, sc->mbox); 1122 1123 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1124 pci_release_msi(dev); 1125 1126 if (sc->regs_res) 1127 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1128 sc->regs_res); 1129 1130 if (sc->udbs_res) 1131 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1132 sc->udbs_res); 1133 1134 if (sc->msix_res) 1135 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1136 sc->msix_res); 1137 1138 if (sc->l2t) 1139 t4_free_l2t(sc->l2t); 1140 1141 #ifdef TCP_OFFLOAD 1142 free(sc->sge.ofld_rxq, M_CXGBE); 1143 free(sc->sge.ofld_txq, M_CXGBE); 1144 #endif 1145 #ifdef DEV_NETMAP 1146 free(sc->sge.nm_rxq, M_CXGBE); 1147 free(sc->sge.nm_txq, M_CXGBE); 1148 #endif 1149 free(sc->irq, M_CXGBE); 1150 free(sc->sge.rxq, M_CXGBE); 1151 free(sc->sge.txq, M_CXGBE); 1152 free(sc->sge.ctrlq, M_CXGBE); 1153 free(sc->sge.iqmap, M_CXGBE); 1154 free(sc->sge.eqmap, M_CXGBE); 1155 free(sc->tids.ftid_tab, M_CXGBE); 1156 t4_destroy_dma_tag(sc); 1157 if (mtx_initialized(&sc->sc_lock)) { 1158 sx_xlock(&t4_list_lock); 1159 SLIST_REMOVE(&t4_list, sc, adapter, link); 1160 sx_xunlock(&t4_list_lock); 1161 mtx_destroy(&sc->sc_lock); 1162 } 1163 1164 callout_drain(&sc->sfl_callout); 1165 if (mtx_initialized(&sc->tids.ftid_lock)) 1166 mtx_destroy(&sc->tids.ftid_lock); 1167 if (mtx_initialized(&sc->sfl_lock)) 1168 mtx_destroy(&sc->sfl_lock); 1169 if (mtx_initialized(&sc->ifp_lock)) 1170 mtx_destroy(&sc->ifp_lock); 1171 if (mtx_initialized(&sc->reg_lock)) 1172 mtx_destroy(&sc->reg_lock); 1173 1174 for (i = 0; i < NUM_MEMWIN; i++) { 1175 struct memwin *mw = &sc->memwin[i]; 1176 1177 if (rw_initialized(&mw->mw_lock)) 1178 rw_destroy(&mw->mw_lock); 1179 } 1180 1181 bzero(sc, sizeof(*sc)); 1182 1183 return (0); 1184 } 1185 1186 static int 1187 cxgbe_probe(device_t dev) 1188 { 1189 char buf[128]; 1190 struct port_info *pi = device_get_softc(dev); 1191 1192 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1193 device_set_desc_copy(dev, buf); 1194 1195 return (BUS_PROBE_DEFAULT); 1196 } 1197 1198 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1199 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1200 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1201 #define T4_CAP_ENABLE (T4_CAP) 1202 1203 static int 1204 cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1205 { 1206 struct ifnet *ifp; 1207 struct sbuf *sb; 1208 1209 vi->xact_addr_filt = -1; 1210 callout_init(&vi->tick, 1); 1211 1212 /* Allocate an ifnet and set it up */ 1213 ifp = if_alloc(IFT_ETHER); 1214 if (ifp == NULL) { 1215 device_printf(dev, "Cannot allocate ifnet\n"); 1216 return (ENOMEM); 1217 } 1218 vi->ifp = ifp; 1219 ifp->if_softc = vi; 1220 1221 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1222 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1223 1224 ifp->if_init = cxgbe_init; 1225 ifp->if_ioctl = cxgbe_ioctl; 1226 ifp->if_transmit = cxgbe_transmit; 1227 ifp->if_qflush = cxgbe_qflush; 1228 ifp->if_get_counter = cxgbe_get_counter; 1229 1230 ifp->if_capabilities = T4_CAP; 1231 #ifdef TCP_OFFLOAD 1232 if (vi->nofldrxq != 0) 1233 ifp->if_capabilities |= IFCAP_TOE; 1234 #endif 1235 ifp->if_capenable = T4_CAP_ENABLE; 1236 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1237 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1238 1239 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1240 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1241 ifp->if_hw_tsomaxsegsize = 65536; 1242 1243 /* Initialize ifmedia for this VI */ 1244 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1245 cxgbe_media_status); 1246 build_medialist(vi->pi, &vi->media); 1247 1248 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1249 EVENTHANDLER_PRI_ANY); 1250 1251 ether_ifattach(ifp, vi->hw_addr); 1252 1253 sb = sbuf_new_auto(); 1254 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1255 #ifdef TCP_OFFLOAD 1256 if (ifp->if_capabilities & IFCAP_TOE) 1257 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1258 vi->nofldtxq, vi->nofldrxq); 1259 #endif 1260 sbuf_finish(sb); 1261 device_printf(dev, "%s\n", sbuf_data(sb)); 1262 sbuf_delete(sb); 1263 1264 vi_sysctls(vi); 1265 1266 return (0); 1267 } 1268 1269 static int 1270 cxgbe_attach(device_t dev) 1271 { 1272 struct port_info *pi = device_get_softc(dev); 1273 struct vi_info *vi; 1274 int i, rc; 1275 1276 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1277 1278 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1279 if (rc) 1280 return (rc); 1281 1282 for_each_vi(pi, i, vi) { 1283 if (i == 0) 1284 continue; 1285 #ifdef DEV_NETMAP 1286 if (vi->flags & VI_NETMAP) { 1287 /* 1288 * media handled here to keep 1289 * implementation private to this file 1290 */ 1291 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1292 cxgbe_media_status); 1293 build_medialist(pi, &vi->media); 1294 vi->dev = device_add_child(dev, is_t4(pi->adapter) ? 1295 "ncxgbe" : "ncxl", device_get_unit(dev)); 1296 } else 1297 #endif 1298 vi->dev = device_add_child(dev, is_t4(pi->adapter) ? 1299 "vcxgbe" : "vcxl", -1); 1300 if (vi->dev == NULL) { 1301 device_printf(dev, "failed to add VI %d\n", i); 1302 continue; 1303 } 1304 device_set_softc(vi->dev, vi); 1305 } 1306 1307 cxgbe_sysctls(pi); 1308 1309 bus_generic_attach(dev); 1310 1311 return (0); 1312 } 1313 1314 static void 1315 cxgbe_vi_detach(struct vi_info *vi) 1316 { 1317 struct ifnet *ifp = vi->ifp; 1318 1319 ether_ifdetach(ifp); 1320 1321 if (vi->vlan_c) 1322 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1323 1324 /* Let detach proceed even if these fail. */ 1325 cxgbe_uninit_synchronized(vi); 1326 callout_drain(&vi->tick); 1327 vi_full_uninit(vi); 1328 1329 ifmedia_removeall(&vi->media); 1330 if_free(vi->ifp); 1331 vi->ifp = NULL; 1332 } 1333 1334 static int 1335 cxgbe_detach(device_t dev) 1336 { 1337 struct port_info *pi = device_get_softc(dev); 1338 struct adapter *sc = pi->adapter; 1339 int rc; 1340 1341 /* Detach the extra VIs first. */ 1342 rc = bus_generic_detach(dev); 1343 if (rc) 1344 return (rc); 1345 device_delete_children(dev); 1346 1347 doom_vi(sc, &pi->vi[0]); 1348 1349 if (pi->flags & HAS_TRACEQ) { 1350 sc->traceq = -1; /* cloner should not create ifnet */ 1351 t4_tracer_port_detach(sc); 1352 } 1353 1354 cxgbe_vi_detach(&pi->vi[0]); 1355 callout_drain(&pi->tick); 1356 1357 end_synchronized_op(sc, 0); 1358 1359 return (0); 1360 } 1361 1362 static void 1363 cxgbe_init(void *arg) 1364 { 1365 struct vi_info *vi = arg; 1366 struct adapter *sc = vi->pi->adapter; 1367 1368 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1369 return; 1370 cxgbe_init_synchronized(vi); 1371 end_synchronized_op(sc, 0); 1372 } 1373 1374 static int 1375 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1376 { 1377 int rc = 0, mtu, flags, can_sleep; 1378 struct vi_info *vi = ifp->if_softc; 1379 struct adapter *sc = vi->pi->adapter; 1380 struct ifreq *ifr = (struct ifreq *)data; 1381 uint32_t mask; 1382 1383 switch (cmd) { 1384 case SIOCSIFMTU: 1385 mtu = ifr->ifr_mtu; 1386 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 1387 return (EINVAL); 1388 1389 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1390 if (rc) 1391 return (rc); 1392 ifp->if_mtu = mtu; 1393 if (vi->flags & VI_INIT_DONE) { 1394 t4_update_fl_bufsize(ifp); 1395 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1396 rc = update_mac_settings(ifp, XGMAC_MTU); 1397 } 1398 end_synchronized_op(sc, 0); 1399 break; 1400 1401 case SIOCSIFFLAGS: 1402 can_sleep = 0; 1403 redo_sifflags: 1404 rc = begin_synchronized_op(sc, vi, 1405 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1406 if (rc) 1407 return (rc); 1408 1409 if (ifp->if_flags & IFF_UP) { 1410 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1411 flags = vi->if_flags; 1412 if ((ifp->if_flags ^ flags) & 1413 (IFF_PROMISC | IFF_ALLMULTI)) { 1414 if (can_sleep == 1) { 1415 end_synchronized_op(sc, 0); 1416 can_sleep = 0; 1417 goto redo_sifflags; 1418 } 1419 rc = update_mac_settings(ifp, 1420 XGMAC_PROMISC | XGMAC_ALLMULTI); 1421 } 1422 } else { 1423 if (can_sleep == 0) { 1424 end_synchronized_op(sc, LOCK_HELD); 1425 can_sleep = 1; 1426 goto redo_sifflags; 1427 } 1428 rc = cxgbe_init_synchronized(vi); 1429 } 1430 vi->if_flags = ifp->if_flags; 1431 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1432 if (can_sleep == 0) { 1433 end_synchronized_op(sc, LOCK_HELD); 1434 can_sleep = 1; 1435 goto redo_sifflags; 1436 } 1437 rc = cxgbe_uninit_synchronized(vi); 1438 } 1439 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1440 break; 1441 1442 case SIOCADDMULTI: 1443 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1444 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1445 if (rc) 1446 return (rc); 1447 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1448 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1449 end_synchronized_op(sc, LOCK_HELD); 1450 break; 1451 1452 case SIOCSIFCAP: 1453 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1454 if (rc) 1455 return (rc); 1456 1457 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1458 if (mask & IFCAP_TXCSUM) { 1459 ifp->if_capenable ^= IFCAP_TXCSUM; 1460 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1461 1462 if (IFCAP_TSO4 & ifp->if_capenable && 1463 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1464 ifp->if_capenable &= ~IFCAP_TSO4; 1465 if_printf(ifp, 1466 "tso4 disabled due to -txcsum.\n"); 1467 } 1468 } 1469 if (mask & IFCAP_TXCSUM_IPV6) { 1470 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1471 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1472 1473 if (IFCAP_TSO6 & ifp->if_capenable && 1474 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1475 ifp->if_capenable &= ~IFCAP_TSO6; 1476 if_printf(ifp, 1477 "tso6 disabled due to -txcsum6.\n"); 1478 } 1479 } 1480 if (mask & IFCAP_RXCSUM) 1481 ifp->if_capenable ^= IFCAP_RXCSUM; 1482 if (mask & IFCAP_RXCSUM_IPV6) 1483 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1484 1485 /* 1486 * Note that we leave CSUM_TSO alone (it is always set). The 1487 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1488 * sending a TSO request our way, so it's sufficient to toggle 1489 * IFCAP_TSOx only. 1490 */ 1491 if (mask & IFCAP_TSO4) { 1492 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1493 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1494 if_printf(ifp, "enable txcsum first.\n"); 1495 rc = EAGAIN; 1496 goto fail; 1497 } 1498 ifp->if_capenable ^= IFCAP_TSO4; 1499 } 1500 if (mask & IFCAP_TSO6) { 1501 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1502 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1503 if_printf(ifp, "enable txcsum6 first.\n"); 1504 rc = EAGAIN; 1505 goto fail; 1506 } 1507 ifp->if_capenable ^= IFCAP_TSO6; 1508 } 1509 if (mask & IFCAP_LRO) { 1510 #if defined(INET) || defined(INET6) 1511 int i; 1512 struct sge_rxq *rxq; 1513 1514 ifp->if_capenable ^= IFCAP_LRO; 1515 for_each_rxq(vi, i, rxq) { 1516 if (ifp->if_capenable & IFCAP_LRO) 1517 rxq->iq.flags |= IQ_LRO_ENABLED; 1518 else 1519 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1520 } 1521 #endif 1522 } 1523 #ifdef TCP_OFFLOAD 1524 if (mask & IFCAP_TOE) { 1525 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1526 1527 rc = toe_capability(vi, enable); 1528 if (rc != 0) 1529 goto fail; 1530 1531 ifp->if_capenable ^= mask; 1532 } 1533 #endif 1534 if (mask & IFCAP_VLAN_HWTAGGING) { 1535 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1536 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1537 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1538 } 1539 if (mask & IFCAP_VLAN_MTU) { 1540 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1541 1542 /* Need to find out how to disable auto-mtu-inflation */ 1543 } 1544 if (mask & IFCAP_VLAN_HWTSO) 1545 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1546 if (mask & IFCAP_VLAN_HWCSUM) 1547 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1548 1549 #ifdef VLAN_CAPABILITIES 1550 VLAN_CAPABILITIES(ifp); 1551 #endif 1552 fail: 1553 end_synchronized_op(sc, 0); 1554 break; 1555 1556 case SIOCSIFMEDIA: 1557 case SIOCGIFMEDIA: 1558 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1559 break; 1560 1561 case SIOCGI2C: { 1562 struct ifi2creq i2c; 1563 1564 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1565 if (rc != 0) 1566 break; 1567 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1568 rc = EPERM; 1569 break; 1570 } 1571 if (i2c.len > sizeof(i2c.data)) { 1572 rc = EINVAL; 1573 break; 1574 } 1575 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1576 if (rc) 1577 return (rc); 1578 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1579 i2c.offset, i2c.len, &i2c.data[0]); 1580 end_synchronized_op(sc, 0); 1581 if (rc == 0) 1582 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1583 break; 1584 } 1585 1586 default: 1587 rc = ether_ioctl(ifp, cmd, data); 1588 } 1589 1590 return (rc); 1591 } 1592 1593 static int 1594 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1595 { 1596 struct vi_info *vi = ifp->if_softc; 1597 struct port_info *pi = vi->pi; 1598 struct adapter *sc = pi->adapter; 1599 struct sge_txq *txq; 1600 void *items[1]; 1601 int rc; 1602 1603 M_ASSERTPKTHDR(m); 1604 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1605 1606 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1607 m_freem(m); 1608 return (ENETDOWN); 1609 } 1610 1611 rc = parse_pkt(&m); 1612 if (__predict_false(rc != 0)) { 1613 MPASS(m == NULL); /* was freed already */ 1614 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1615 return (rc); 1616 } 1617 1618 /* Select a txq. */ 1619 txq = &sc->sge.txq[vi->first_txq]; 1620 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1621 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1622 vi->rsrv_noflowq); 1623 1624 items[0] = m; 1625 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1626 if (__predict_false(rc != 0)) 1627 m_freem(m); 1628 1629 return (rc); 1630 } 1631 1632 static void 1633 cxgbe_qflush(struct ifnet *ifp) 1634 { 1635 struct vi_info *vi = ifp->if_softc; 1636 struct sge_txq *txq; 1637 int i; 1638 1639 /* queues do not exist if !VI_INIT_DONE. */ 1640 if (vi->flags & VI_INIT_DONE) { 1641 for_each_txq(vi, i, txq) { 1642 TXQ_LOCK(txq); 1643 txq->eq.flags &= ~EQ_ENABLED; 1644 TXQ_UNLOCK(txq); 1645 while (!mp_ring_is_idle(txq->r)) { 1646 mp_ring_check_drainage(txq->r, 0); 1647 pause("qflush", 1); 1648 } 1649 } 1650 } 1651 if_qflush(ifp); 1652 } 1653 1654 static uint64_t 1655 vi_get_counter(struct ifnet *ifp, ift_counter c) 1656 { 1657 struct vi_info *vi = ifp->if_softc; 1658 struct fw_vi_stats_vf *s = &vi->stats; 1659 1660 vi_refresh_stats(vi->pi->adapter, vi); 1661 1662 switch (c) { 1663 case IFCOUNTER_IPACKETS: 1664 return (s->rx_bcast_frames + s->rx_mcast_frames + 1665 s->rx_ucast_frames); 1666 case IFCOUNTER_IERRORS: 1667 return (s->rx_err_frames); 1668 case IFCOUNTER_OPACKETS: 1669 return (s->tx_bcast_frames + s->tx_mcast_frames + 1670 s->tx_ucast_frames + s->tx_offload_frames); 1671 case IFCOUNTER_OERRORS: 1672 return (s->tx_drop_frames); 1673 case IFCOUNTER_IBYTES: 1674 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 1675 s->rx_ucast_bytes); 1676 case IFCOUNTER_OBYTES: 1677 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 1678 s->tx_ucast_bytes + s->tx_offload_bytes); 1679 case IFCOUNTER_IMCASTS: 1680 return (s->rx_mcast_frames); 1681 case IFCOUNTER_OMCASTS: 1682 return (s->tx_mcast_frames); 1683 case IFCOUNTER_OQDROPS: { 1684 uint64_t drops; 1685 1686 drops = 0; 1687 if ((vi->flags & (VI_INIT_DONE | VI_NETMAP)) == VI_INIT_DONE) { 1688 int i; 1689 struct sge_txq *txq; 1690 1691 for_each_txq(vi, i, txq) 1692 drops += counter_u64_fetch(txq->r->drops); 1693 } 1694 1695 return (drops); 1696 1697 } 1698 1699 default: 1700 return (if_get_counter_default(ifp, c)); 1701 } 1702 } 1703 1704 uint64_t 1705 cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1706 { 1707 struct vi_info *vi = ifp->if_softc; 1708 struct port_info *pi = vi->pi; 1709 struct adapter *sc = pi->adapter; 1710 struct port_stats *s = &pi->stats; 1711 1712 if (pi->nvi > 1) 1713 return (vi_get_counter(ifp, c)); 1714 1715 cxgbe_refresh_stats(sc, pi); 1716 1717 switch (c) { 1718 case IFCOUNTER_IPACKETS: 1719 return (s->rx_frames - s->rx_pause); 1720 1721 case IFCOUNTER_IERRORS: 1722 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1723 s->rx_fcs_err + s->rx_len_err); 1724 1725 case IFCOUNTER_OPACKETS: 1726 return (s->tx_frames - s->tx_pause); 1727 1728 case IFCOUNTER_OERRORS: 1729 return (s->tx_error_frames); 1730 1731 case IFCOUNTER_IBYTES: 1732 return (s->rx_octets - s->rx_pause * 64); 1733 1734 case IFCOUNTER_OBYTES: 1735 return (s->tx_octets - s->tx_pause * 64); 1736 1737 case IFCOUNTER_IMCASTS: 1738 return (s->rx_mcast_frames - s->rx_pause); 1739 1740 case IFCOUNTER_OMCASTS: 1741 return (s->tx_mcast_frames - s->tx_pause); 1742 1743 case IFCOUNTER_IQDROPS: 1744 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1745 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1746 s->rx_trunc3 + pi->tnl_cong_drops); 1747 1748 case IFCOUNTER_OQDROPS: { 1749 uint64_t drops; 1750 1751 drops = s->tx_drop; 1752 if (vi->flags & VI_INIT_DONE) { 1753 int i; 1754 struct sge_txq *txq; 1755 1756 for_each_txq(vi, i, txq) 1757 drops += counter_u64_fetch(txq->r->drops); 1758 } 1759 1760 return (drops); 1761 1762 } 1763 1764 default: 1765 return (if_get_counter_default(ifp, c)); 1766 } 1767 } 1768 1769 static int 1770 cxgbe_media_change(struct ifnet *ifp) 1771 { 1772 struct vi_info *vi = ifp->if_softc; 1773 1774 device_printf(vi->dev, "%s unimplemented.\n", __func__); 1775 1776 return (EOPNOTSUPP); 1777 } 1778 1779 static void 1780 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1781 { 1782 struct vi_info *vi = ifp->if_softc; 1783 struct port_info *pi = vi->pi; 1784 struct ifmedia_entry *cur; 1785 int speed = pi->link_cfg.speed; 1786 1787 cur = vi->media.ifm_cur; 1788 1789 ifmr->ifm_status = IFM_AVALID; 1790 if (!pi->link_cfg.link_ok) 1791 return; 1792 1793 ifmr->ifm_status |= IFM_ACTIVE; 1794 1795 /* active and current will differ iff current media is autoselect. */ 1796 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1797 return; 1798 1799 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1800 if (speed == 10000) 1801 ifmr->ifm_active |= IFM_10G_T; 1802 else if (speed == 1000) 1803 ifmr->ifm_active |= IFM_1000_T; 1804 else if (speed == 100) 1805 ifmr->ifm_active |= IFM_100_TX; 1806 else if (speed == 10) 1807 ifmr->ifm_active |= IFM_10_T; 1808 else 1809 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1810 speed)); 1811 } 1812 1813 static int 1814 vcxgbe_probe(device_t dev) 1815 { 1816 char buf[128]; 1817 struct vi_info *vi = device_get_softc(dev); 1818 1819 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 1820 vi - vi->pi->vi); 1821 device_set_desc_copy(dev, buf); 1822 1823 return (BUS_PROBE_DEFAULT); 1824 } 1825 1826 static int 1827 vcxgbe_attach(device_t dev) 1828 { 1829 struct vi_info *vi; 1830 struct port_info *pi; 1831 struct adapter *sc; 1832 int func, index, rc; 1833 u32 param, val; 1834 1835 vi = device_get_softc(dev); 1836 pi = vi->pi; 1837 sc = pi->adapter; 1838 1839 index = vi - pi->vi; 1840 KASSERT(index < nitems(vi_mac_funcs), 1841 ("%s: VI %s doesn't have a MAC func", __func__, 1842 device_get_nameunit(dev))); 1843 func = vi_mac_funcs[index]; 1844 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 1845 vi->hw_addr, &vi->rss_size, func, 0); 1846 if (rc < 0) { 1847 device_printf(dev, "Failed to allocate virtual interface " 1848 "for port %d: %d\n", pi->port_id, -rc); 1849 return (-rc); 1850 } 1851 vi->viid = rc; 1852 1853 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1854 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 1855 V_FW_PARAMS_PARAM_YZ(vi->viid); 1856 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1857 if (rc) 1858 vi->rss_base = 0xffff; 1859 else { 1860 /* MPASS((val >> 16) == rss_size); */ 1861 vi->rss_base = val & 0xffff; 1862 } 1863 1864 rc = cxgbe_vi_attach(dev, vi); 1865 if (rc) { 1866 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1867 return (rc); 1868 } 1869 return (0); 1870 } 1871 1872 static int 1873 vcxgbe_detach(device_t dev) 1874 { 1875 struct vi_info *vi; 1876 struct adapter *sc; 1877 1878 vi = device_get_softc(dev); 1879 sc = vi->pi->adapter; 1880 1881 doom_vi(sc, vi); 1882 1883 cxgbe_vi_detach(vi); 1884 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1885 1886 end_synchronized_op(sc, 0); 1887 1888 return (0); 1889 } 1890 1891 void 1892 t4_fatal_err(struct adapter *sc) 1893 { 1894 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1895 t4_intr_disable(sc); 1896 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1897 device_get_nameunit(sc->dev)); 1898 } 1899 1900 static int 1901 map_bars_0_and_4(struct adapter *sc) 1902 { 1903 sc->regs_rid = PCIR_BAR(0); 1904 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1905 &sc->regs_rid, RF_ACTIVE); 1906 if (sc->regs_res == NULL) { 1907 device_printf(sc->dev, "cannot map registers.\n"); 1908 return (ENXIO); 1909 } 1910 sc->bt = rman_get_bustag(sc->regs_res); 1911 sc->bh = rman_get_bushandle(sc->regs_res); 1912 sc->mmio_len = rman_get_size(sc->regs_res); 1913 setbit(&sc->doorbells, DOORBELL_KDB); 1914 1915 sc->msix_rid = PCIR_BAR(4); 1916 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1917 &sc->msix_rid, RF_ACTIVE); 1918 if (sc->msix_res == NULL) { 1919 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1920 return (ENXIO); 1921 } 1922 1923 return (0); 1924 } 1925 1926 static int 1927 map_bar_2(struct adapter *sc) 1928 { 1929 1930 /* 1931 * T4: only iWARP driver uses the userspace doorbells. There is no need 1932 * to map it if RDMA is disabled. 1933 */ 1934 if (is_t4(sc) && sc->rdmacaps == 0) 1935 return (0); 1936 1937 sc->udbs_rid = PCIR_BAR(2); 1938 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1939 &sc->udbs_rid, RF_ACTIVE); 1940 if (sc->udbs_res == NULL) { 1941 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 1942 return (ENXIO); 1943 } 1944 sc->udbs_base = rman_get_virtual(sc->udbs_res); 1945 1946 if (is_t5(sc)) { 1947 setbit(&sc->doorbells, DOORBELL_UDB); 1948 #if defined(__i386__) || defined(__amd64__) 1949 if (t5_write_combine) { 1950 int rc; 1951 1952 /* 1953 * Enable write combining on BAR2. This is the 1954 * userspace doorbell BAR and is split into 128B 1955 * (UDBS_SEG_SIZE) doorbell regions, each associated 1956 * with an egress queue. The first 64B has the doorbell 1957 * and the second 64B can be used to submit a tx work 1958 * request with an implicit doorbell. 1959 */ 1960 1961 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 1962 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 1963 if (rc == 0) { 1964 clrbit(&sc->doorbells, DOORBELL_UDB); 1965 setbit(&sc->doorbells, DOORBELL_WCWR); 1966 setbit(&sc->doorbells, DOORBELL_UDBWC); 1967 } else { 1968 device_printf(sc->dev, 1969 "couldn't enable write combining: %d\n", 1970 rc); 1971 } 1972 1973 t4_write_reg(sc, A_SGE_STAT_CFG, 1974 V_STATSOURCE_T5(7) | V_STATMODE(0)); 1975 } 1976 #endif 1977 } 1978 1979 return (0); 1980 } 1981 1982 struct memwin_init { 1983 uint32_t base; 1984 uint32_t aperture; 1985 }; 1986 1987 static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 1988 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1989 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1990 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 1991 }; 1992 1993 static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 1994 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1995 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1996 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 1997 }; 1998 1999 static void 2000 setup_memwin(struct adapter *sc) 2001 { 2002 const struct memwin_init *mw_init; 2003 struct memwin *mw; 2004 int i; 2005 uint32_t bar0; 2006 2007 if (is_t4(sc)) { 2008 /* 2009 * Read low 32b of bar0 indirectly via the hardware backdoor 2010 * mechanism. Works from within PCI passthrough environments 2011 * too, where rman_get_start() can return a different value. We 2012 * need to program the T4 memory window decoders with the actual 2013 * addresses that will be coming across the PCIe link. 2014 */ 2015 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2016 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2017 2018 mw_init = &t4_memwin[0]; 2019 } else { 2020 /* T5+ use the relative offset inside the PCIe BAR */ 2021 bar0 = 0; 2022 2023 mw_init = &t5_memwin[0]; 2024 } 2025 2026 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2027 rw_init(&mw->mw_lock, "memory window access"); 2028 mw->mw_base = mw_init->base; 2029 mw->mw_aperture = mw_init->aperture; 2030 mw->mw_curpos = 0; 2031 t4_write_reg(sc, 2032 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2033 (mw->mw_base + bar0) | V_BIR(0) | 2034 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2035 rw_wlock(&mw->mw_lock); 2036 position_memwin(sc, i, 0); 2037 rw_wunlock(&mw->mw_lock); 2038 } 2039 2040 /* flush */ 2041 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2042 } 2043 2044 /* 2045 * Positions the memory window at the given address in the card's address space. 2046 * There are some alignment requirements and the actual position may be at an 2047 * address prior to the requested address. mw->mw_curpos always has the actual 2048 * position of the window. 2049 */ 2050 static void 2051 position_memwin(struct adapter *sc, int idx, uint32_t addr) 2052 { 2053 struct memwin *mw; 2054 uint32_t pf; 2055 uint32_t reg; 2056 2057 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2058 mw = &sc->memwin[idx]; 2059 rw_assert(&mw->mw_lock, RA_WLOCKED); 2060 2061 if (is_t4(sc)) { 2062 pf = 0; 2063 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2064 } else { 2065 pf = V_PFNUM(sc->pf); 2066 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2067 } 2068 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2069 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2070 t4_read_reg(sc, reg); /* flush */ 2071 } 2072 2073 static int 2074 rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2075 int len, int rw) 2076 { 2077 struct memwin *mw; 2078 uint32_t mw_end, v; 2079 2080 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2081 2082 /* Memory can only be accessed in naturally aligned 4 byte units */ 2083 if (addr & 3 || len & 3 || len <= 0) 2084 return (EINVAL); 2085 2086 mw = &sc->memwin[idx]; 2087 while (len > 0) { 2088 rw_rlock(&mw->mw_lock); 2089 mw_end = mw->mw_curpos + mw->mw_aperture; 2090 if (addr >= mw_end || addr < mw->mw_curpos) { 2091 /* Will need to reposition the window */ 2092 if (!rw_try_upgrade(&mw->mw_lock)) { 2093 rw_runlock(&mw->mw_lock); 2094 rw_wlock(&mw->mw_lock); 2095 } 2096 rw_assert(&mw->mw_lock, RA_WLOCKED); 2097 position_memwin(sc, idx, addr); 2098 rw_downgrade(&mw->mw_lock); 2099 mw_end = mw->mw_curpos + mw->mw_aperture; 2100 } 2101 rw_assert(&mw->mw_lock, RA_RLOCKED); 2102 while (addr < mw_end && len > 0) { 2103 if (rw == 0) { 2104 v = t4_read_reg(sc, mw->mw_base + addr - 2105 mw->mw_curpos); 2106 *val++ = le32toh(v); 2107 } else { 2108 v = *val++; 2109 t4_write_reg(sc, mw->mw_base + addr - 2110 mw->mw_curpos, htole32(v));; 2111 } 2112 addr += 4; 2113 len -= 4; 2114 } 2115 rw_runlock(&mw->mw_lock); 2116 } 2117 2118 return (0); 2119 } 2120 2121 static inline int 2122 read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2123 int len) 2124 { 2125 2126 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2127 } 2128 2129 static inline int 2130 write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2131 const uint32_t *val, int len) 2132 { 2133 2134 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2135 } 2136 2137 static int 2138 t4_range_cmp(const void *a, const void *b) 2139 { 2140 return ((const struct t4_range *)a)->start - 2141 ((const struct t4_range *)b)->start; 2142 } 2143 2144 /* 2145 * Verify that the memory range specified by the addr/len pair is valid within 2146 * the card's address space. 2147 */ 2148 static int 2149 validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2150 { 2151 struct t4_range mem_ranges[4], *r, *next; 2152 uint32_t em, addr_len; 2153 int i, n, remaining; 2154 2155 /* Memory can only be accessed in naturally aligned 4 byte units */ 2156 if (addr & 3 || len & 3 || len <= 0) 2157 return (EINVAL); 2158 2159 /* Enabled memories */ 2160 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2161 2162 r = &mem_ranges[0]; 2163 n = 0; 2164 bzero(r, sizeof(mem_ranges)); 2165 if (em & F_EDRAM0_ENABLE) { 2166 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2167 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2168 if (r->size > 0) { 2169 r->start = G_EDRAM0_BASE(addr_len) << 20; 2170 if (addr >= r->start && 2171 addr + len <= r->start + r->size) 2172 return (0); 2173 r++; 2174 n++; 2175 } 2176 } 2177 if (em & F_EDRAM1_ENABLE) { 2178 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2179 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2180 if (r->size > 0) { 2181 r->start = G_EDRAM1_BASE(addr_len) << 20; 2182 if (addr >= r->start && 2183 addr + len <= r->start + r->size) 2184 return (0); 2185 r++; 2186 n++; 2187 } 2188 } 2189 if (em & F_EXT_MEM_ENABLE) { 2190 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2191 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2192 if (r->size > 0) { 2193 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2194 if (addr >= r->start && 2195 addr + len <= r->start + r->size) 2196 return (0); 2197 r++; 2198 n++; 2199 } 2200 } 2201 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2202 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2203 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2204 if (r->size > 0) { 2205 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2206 if (addr >= r->start && 2207 addr + len <= r->start + r->size) 2208 return (0); 2209 r++; 2210 n++; 2211 } 2212 } 2213 MPASS(n <= nitems(mem_ranges)); 2214 2215 if (n > 1) { 2216 /* Sort and merge the ranges. */ 2217 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2218 2219 /* Start from index 0 and examine the next n - 1 entries. */ 2220 r = &mem_ranges[0]; 2221 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2222 2223 MPASS(r->size > 0); /* r is a valid entry. */ 2224 next = r + 1; 2225 MPASS(next->size > 0); /* and so is the next one. */ 2226 2227 while (r->start + r->size >= next->start) { 2228 /* Merge the next one into the current entry. */ 2229 r->size = max(r->start + r->size, 2230 next->start + next->size) - r->start; 2231 n--; /* One fewer entry in total. */ 2232 if (--remaining == 0) 2233 goto done; /* short circuit */ 2234 next++; 2235 } 2236 if (next != r + 1) { 2237 /* 2238 * Some entries were merged into r and next 2239 * points to the first valid entry that couldn't 2240 * be merged. 2241 */ 2242 MPASS(next->size > 0); /* must be valid */ 2243 memcpy(r + 1, next, remaining * sizeof(*r)); 2244 #ifdef INVARIANTS 2245 /* 2246 * This so that the foo->size assertion in the 2247 * next iteration of the loop do the right 2248 * thing for entries that were pulled up and are 2249 * no longer valid. 2250 */ 2251 MPASS(n < nitems(mem_ranges)); 2252 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2253 sizeof(struct t4_range)); 2254 #endif 2255 } 2256 } 2257 done: 2258 /* Done merging the ranges. */ 2259 MPASS(n > 0); 2260 r = &mem_ranges[0]; 2261 for (i = 0; i < n; i++, r++) { 2262 if (addr >= r->start && 2263 addr + len <= r->start + r->size) 2264 return (0); 2265 } 2266 } 2267 2268 return (EFAULT); 2269 } 2270 2271 static int 2272 fwmtype_to_hwmtype(int mtype) 2273 { 2274 2275 switch (mtype) { 2276 case FW_MEMTYPE_EDC0: 2277 return (MEM_EDC0); 2278 case FW_MEMTYPE_EDC1: 2279 return (MEM_EDC1); 2280 case FW_MEMTYPE_EXTMEM: 2281 return (MEM_MC0); 2282 case FW_MEMTYPE_EXTMEM1: 2283 return (MEM_MC1); 2284 default: 2285 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2286 } 2287 } 2288 2289 /* 2290 * Verify that the memory range specified by the memtype/offset/len pair is 2291 * valid and lies entirely within the memtype specified. The global address of 2292 * the start of the range is returned in addr. 2293 */ 2294 static int 2295 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2296 uint32_t *addr) 2297 { 2298 uint32_t em, addr_len, maddr; 2299 2300 /* Memory can only be accessed in naturally aligned 4 byte units */ 2301 if (off & 3 || len & 3 || len == 0) 2302 return (EINVAL); 2303 2304 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2305 switch (fwmtype_to_hwmtype(mtype)) { 2306 case MEM_EDC0: 2307 if (!(em & F_EDRAM0_ENABLE)) 2308 return (EINVAL); 2309 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2310 maddr = G_EDRAM0_BASE(addr_len) << 20; 2311 break; 2312 case MEM_EDC1: 2313 if (!(em & F_EDRAM1_ENABLE)) 2314 return (EINVAL); 2315 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2316 maddr = G_EDRAM1_BASE(addr_len) << 20; 2317 break; 2318 case MEM_MC: 2319 if (!(em & F_EXT_MEM_ENABLE)) 2320 return (EINVAL); 2321 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2322 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2323 break; 2324 case MEM_MC1: 2325 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2326 return (EINVAL); 2327 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2328 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2329 break; 2330 default: 2331 return (EINVAL); 2332 } 2333 2334 *addr = maddr + off; /* global address */ 2335 return (validate_mem_range(sc, *addr, len)); 2336 } 2337 2338 static int 2339 fixup_devlog_params(struct adapter *sc) 2340 { 2341 struct devlog_params *dparams = &sc->params.devlog; 2342 int rc; 2343 2344 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2345 dparams->size, &dparams->addr); 2346 2347 return (rc); 2348 } 2349 2350 static int 2351 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2352 struct intrs_and_queues *iaq) 2353 { 2354 int rc, itype, navail, nrxq10g, nrxq1g, n; 2355 int nofldrxq10g = 0, nofldrxq1g = 0; 2356 int nnmrxq10g = 0, nnmrxq1g = 0; 2357 2358 bzero(iaq, sizeof(*iaq)); 2359 2360 iaq->ntxq10g = t4_ntxq10g; 2361 iaq->ntxq1g = t4_ntxq1g; 2362 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2363 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2364 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2365 #ifdef TCP_OFFLOAD 2366 if (is_offload(sc)) { 2367 iaq->nofldtxq10g = t4_nofldtxq10g; 2368 iaq->nofldtxq1g = t4_nofldtxq1g; 2369 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2370 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2371 } 2372 #endif 2373 #ifdef DEV_NETMAP 2374 iaq->nnmtxq10g = t4_nnmtxq10g; 2375 iaq->nnmtxq1g = t4_nnmtxq1g; 2376 iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g; 2377 iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g; 2378 #endif 2379 2380 for (itype = INTR_MSIX; itype; itype >>= 1) { 2381 2382 if ((itype & t4_intr_types) == 0) 2383 continue; /* not allowed */ 2384 2385 if (itype == INTR_MSIX) 2386 navail = pci_msix_count(sc->dev); 2387 else if (itype == INTR_MSI) 2388 navail = pci_msi_count(sc->dev); 2389 else 2390 navail = 1; 2391 restart: 2392 if (navail == 0) 2393 continue; 2394 2395 iaq->intr_type = itype; 2396 iaq->intr_flags_10g = 0; 2397 iaq->intr_flags_1g = 0; 2398 2399 /* 2400 * Best option: an interrupt vector for errors, one for the 2401 * firmware event queue, and one for every rxq (NIC, TOE, and 2402 * netmap). 2403 */ 2404 iaq->nirq = T4_EXTRA_INTR; 2405 iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g); 2406 iaq->nirq += n10g * 2 * (num_vis - 1); 2407 iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g); 2408 iaq->nirq += n1g * 2 * (num_vis - 1); 2409 if (iaq->nirq <= navail && 2410 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2411 iaq->intr_flags_10g = INTR_ALL; 2412 iaq->intr_flags_1g = INTR_ALL; 2413 goto allocate; 2414 } 2415 2416 /* 2417 * Second best option: a vector for errors, one for the firmware 2418 * event queue, and vectors for either all the NIC rx queues or 2419 * all the TOE rx queues. The queues that don't get vectors 2420 * will forward their interrupts to those that do. 2421 * 2422 * Note: netmap rx queues cannot be created early and so they 2423 * can't be setup to receive forwarded interrupts for others. 2424 */ 2425 iaq->nirq = T4_EXTRA_INTR; 2426 if (nrxq10g >= nofldrxq10g) { 2427 iaq->intr_flags_10g = INTR_RXQ; 2428 iaq->nirq += n10g * nrxq10g; 2429 iaq->nirq += n10g * (num_vis - 1); 2430 #ifdef DEV_NETMAP 2431 iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g); 2432 #endif 2433 } else { 2434 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2435 iaq->nirq += n10g * nofldrxq10g; 2436 #ifdef DEV_NETMAP 2437 iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g); 2438 #endif 2439 } 2440 if (nrxq1g >= nofldrxq1g) { 2441 iaq->intr_flags_1g = INTR_RXQ; 2442 iaq->nirq += n1g * nrxq1g; 2443 iaq->nirq += n1g * (num_vis - 1); 2444 #ifdef DEV_NETMAP 2445 iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g); 2446 #endif 2447 } else { 2448 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2449 iaq->nirq += n1g * nofldrxq1g; 2450 #ifdef DEV_NETMAP 2451 iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g); 2452 #endif 2453 } 2454 if (iaq->nirq <= navail && 2455 (itype != INTR_MSI || powerof2(iaq->nirq))) 2456 goto allocate; 2457 2458 /* 2459 * Next best option: an interrupt vector for errors, one for the 2460 * firmware event queue, and at least one per VI. At this 2461 * point we know we'll have to downsize nrxq and/or nofldrxq 2462 * and/or nnmrxq to fit what's available to us. 2463 */ 2464 iaq->nirq = T4_EXTRA_INTR; 2465 iaq->nirq += (n10g + n1g) * num_vis; 2466 if (iaq->nirq <= navail) { 2467 int leftover = navail - iaq->nirq; 2468 2469 if (n10g > 0) { 2470 int target = max(nrxq10g, nofldrxq10g); 2471 2472 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2473 INTR_RXQ : INTR_OFLD_RXQ; 2474 2475 n = 1; 2476 while (n < target && leftover >= n10g) { 2477 leftover -= n10g; 2478 iaq->nirq += n10g; 2479 n++; 2480 } 2481 iaq->nrxq10g = min(n, nrxq10g); 2482 #ifdef TCP_OFFLOAD 2483 iaq->nofldrxq10g = min(n, nofldrxq10g); 2484 #endif 2485 #ifdef DEV_NETMAP 2486 iaq->nnmrxq10g = min(n, nnmrxq10g); 2487 #endif 2488 } 2489 2490 if (n1g > 0) { 2491 int target = max(nrxq1g, nofldrxq1g); 2492 2493 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2494 INTR_RXQ : INTR_OFLD_RXQ; 2495 2496 n = 1; 2497 while (n < target && leftover >= n1g) { 2498 leftover -= n1g; 2499 iaq->nirq += n1g; 2500 n++; 2501 } 2502 iaq->nrxq1g = min(n, nrxq1g); 2503 #ifdef TCP_OFFLOAD 2504 iaq->nofldrxq1g = min(n, nofldrxq1g); 2505 #endif 2506 #ifdef DEV_NETMAP 2507 iaq->nnmrxq1g = min(n, nnmrxq1g); 2508 #endif 2509 } 2510 2511 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2512 goto allocate; 2513 } 2514 2515 /* 2516 * Least desirable option: one interrupt vector for everything. 2517 */ 2518 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2519 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2520 #ifdef TCP_OFFLOAD 2521 if (is_offload(sc)) 2522 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2523 #endif 2524 #ifdef DEV_NETMAP 2525 iaq->nnmrxq10g = iaq->nnmrxq1g = 1; 2526 #endif 2527 2528 allocate: 2529 navail = iaq->nirq; 2530 rc = 0; 2531 if (itype == INTR_MSIX) 2532 rc = pci_alloc_msix(sc->dev, &navail); 2533 else if (itype == INTR_MSI) 2534 rc = pci_alloc_msi(sc->dev, &navail); 2535 2536 if (rc == 0) { 2537 if (navail == iaq->nirq) 2538 return (0); 2539 2540 /* 2541 * Didn't get the number requested. Use whatever number 2542 * the kernel is willing to allocate (it's in navail). 2543 */ 2544 device_printf(sc->dev, "fewer vectors than requested, " 2545 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2546 itype, iaq->nirq, navail); 2547 pci_release_msi(sc->dev); 2548 goto restart; 2549 } 2550 2551 device_printf(sc->dev, 2552 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2553 itype, rc, iaq->nirq, navail); 2554 } 2555 2556 device_printf(sc->dev, 2557 "failed to find a usable interrupt type. " 2558 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2559 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2560 2561 return (ENXIO); 2562 } 2563 2564 #define FW_VERSION(chip) ( \ 2565 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2566 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2567 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2568 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2569 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2570 2571 struct fw_info { 2572 uint8_t chip; 2573 char *kld_name; 2574 char *fw_mod_name; 2575 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2576 } fw_info[] = { 2577 { 2578 .chip = CHELSIO_T4, 2579 .kld_name = "t4fw_cfg", 2580 .fw_mod_name = "t4fw", 2581 .fw_hdr = { 2582 .chip = FW_HDR_CHIP_T4, 2583 .fw_ver = htobe32_const(FW_VERSION(T4)), 2584 .intfver_nic = FW_INTFVER(T4, NIC), 2585 .intfver_vnic = FW_INTFVER(T4, VNIC), 2586 .intfver_ofld = FW_INTFVER(T4, OFLD), 2587 .intfver_ri = FW_INTFVER(T4, RI), 2588 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2589 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2590 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2591 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2592 }, 2593 }, { 2594 .chip = CHELSIO_T5, 2595 .kld_name = "t5fw_cfg", 2596 .fw_mod_name = "t5fw", 2597 .fw_hdr = { 2598 .chip = FW_HDR_CHIP_T5, 2599 .fw_ver = htobe32_const(FW_VERSION(T5)), 2600 .intfver_nic = FW_INTFVER(T5, NIC), 2601 .intfver_vnic = FW_INTFVER(T5, VNIC), 2602 .intfver_ofld = FW_INTFVER(T5, OFLD), 2603 .intfver_ri = FW_INTFVER(T5, RI), 2604 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2605 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2606 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2607 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2608 }, 2609 } 2610 }; 2611 2612 static struct fw_info * 2613 find_fw_info(int chip) 2614 { 2615 int i; 2616 2617 for (i = 0; i < nitems(fw_info); i++) { 2618 if (fw_info[i].chip == chip) 2619 return (&fw_info[i]); 2620 } 2621 return (NULL); 2622 } 2623 2624 /* 2625 * Is the given firmware API compatible with the one the driver was compiled 2626 * with? 2627 */ 2628 static int 2629 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2630 { 2631 2632 /* short circuit if it's the exact same firmware version */ 2633 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2634 return (1); 2635 2636 /* 2637 * XXX: Is this too conservative? Perhaps I should limit this to the 2638 * features that are supported in the driver. 2639 */ 2640 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2641 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2642 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2643 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2644 return (1); 2645 #undef SAME_INTF 2646 2647 return (0); 2648 } 2649 2650 /* 2651 * The firmware in the KLD is usable, but should it be installed? This routine 2652 * explains itself in detail if it indicates the KLD firmware should be 2653 * installed. 2654 */ 2655 static int 2656 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2657 { 2658 const char *reason; 2659 2660 if (!card_fw_usable) { 2661 reason = "incompatible or unusable"; 2662 goto install; 2663 } 2664 2665 if (k > c) { 2666 reason = "older than the version bundled with this driver"; 2667 goto install; 2668 } 2669 2670 if (t4_fw_install == 2 && k != c) { 2671 reason = "different than the version bundled with this driver"; 2672 goto install; 2673 } 2674 2675 return (0); 2676 2677 install: 2678 if (t4_fw_install == 0) { 2679 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2680 "but the driver is prohibited from installing a different " 2681 "firmware on the card.\n", 2682 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2683 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2684 2685 return (0); 2686 } 2687 2688 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2689 "installing firmware %u.%u.%u.%u on card.\n", 2690 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2691 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2692 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2693 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2694 2695 return (1); 2696 } 2697 /* 2698 * Establish contact with the firmware and determine if we are the master driver 2699 * or not, and whether we are responsible for chip initialization. 2700 */ 2701 static int 2702 prep_firmware(struct adapter *sc) 2703 { 2704 const struct firmware *fw = NULL, *default_cfg; 2705 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2706 enum dev_state state; 2707 struct fw_info *fw_info; 2708 struct fw_hdr *card_fw; /* fw on the card */ 2709 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2710 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2711 against */ 2712 2713 /* Contact firmware. */ 2714 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2715 if (rc < 0 || state == DEV_STATE_ERR) { 2716 rc = -rc; 2717 device_printf(sc->dev, 2718 "failed to connect to the firmware: %d, %d.\n", rc, state); 2719 return (rc); 2720 } 2721 pf = rc; 2722 if (pf == sc->mbox) 2723 sc->flags |= MASTER_PF; 2724 else if (state == DEV_STATE_UNINIT) { 2725 /* 2726 * We didn't get to be the master so we definitely won't be 2727 * configuring the chip. It's a bug if someone else hasn't 2728 * configured it already. 2729 */ 2730 device_printf(sc->dev, "couldn't be master(%d), " 2731 "device not already initialized either(%d).\n", rc, state); 2732 return (EDOOFUS); 2733 } 2734 2735 /* This is the firmware whose headers the driver was compiled against */ 2736 fw_info = find_fw_info(chip_id(sc)); 2737 if (fw_info == NULL) { 2738 device_printf(sc->dev, 2739 "unable to look up firmware information for chip %d.\n", 2740 chip_id(sc)); 2741 return (EINVAL); 2742 } 2743 drv_fw = &fw_info->fw_hdr; 2744 2745 /* 2746 * The firmware KLD contains many modules. The KLD name is also the 2747 * name of the module that contains the default config file. 2748 */ 2749 default_cfg = firmware_get(fw_info->kld_name); 2750 2751 /* Read the header of the firmware on the card */ 2752 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2753 rc = -t4_read_flash(sc, FLASH_FW_START, 2754 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2755 if (rc == 0) 2756 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2757 else { 2758 device_printf(sc->dev, 2759 "Unable to read card's firmware header: %d\n", rc); 2760 card_fw_usable = 0; 2761 } 2762 2763 /* This is the firmware in the KLD */ 2764 fw = firmware_get(fw_info->fw_mod_name); 2765 if (fw != NULL) { 2766 kld_fw = (const void *)fw->data; 2767 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2768 } else { 2769 kld_fw = NULL; 2770 kld_fw_usable = 0; 2771 } 2772 2773 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2774 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2775 /* 2776 * Common case: the firmware on the card is an exact match and 2777 * the KLD is an exact match too, or the KLD is 2778 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2779 * here -- use cxgbetool loadfw if you want to reinstall the 2780 * same firmware as the one on the card. 2781 */ 2782 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2783 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2784 be32toh(card_fw->fw_ver))) { 2785 2786 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2787 if (rc != 0) { 2788 device_printf(sc->dev, 2789 "failed to install firmware: %d\n", rc); 2790 goto done; 2791 } 2792 2793 /* Installed successfully, update the cached header too. */ 2794 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2795 card_fw_usable = 1; 2796 need_fw_reset = 0; /* already reset as part of load_fw */ 2797 } 2798 2799 if (!card_fw_usable) { 2800 uint32_t d, c, k; 2801 2802 d = ntohl(drv_fw->fw_ver); 2803 c = ntohl(card_fw->fw_ver); 2804 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2805 2806 device_printf(sc->dev, "Cannot find a usable firmware: " 2807 "fw_install %d, chip state %d, " 2808 "driver compiled with %d.%d.%d.%d, " 2809 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2810 t4_fw_install, state, 2811 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2812 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2813 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2814 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2815 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2816 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2817 rc = EINVAL; 2818 goto done; 2819 } 2820 2821 /* We're using whatever's on the card and it's known to be good. */ 2822 sc->params.fw_vers = ntohl(card_fw->fw_ver); 2823 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 2824 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2825 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2826 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2827 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2828 t4_get_tp_version(sc, &sc->params.tp_vers); 2829 2830 /* Reset device */ 2831 if (need_fw_reset && 2832 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2833 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2834 if (rc != ETIMEDOUT && rc != EIO) 2835 t4_fw_bye(sc, sc->mbox); 2836 goto done; 2837 } 2838 sc->flags |= FW_OK; 2839 2840 rc = get_params__pre_init(sc); 2841 if (rc != 0) 2842 goto done; /* error message displayed already */ 2843 2844 /* Partition adapter resources as specified in the config file. */ 2845 if (state == DEV_STATE_UNINIT) { 2846 2847 KASSERT(sc->flags & MASTER_PF, 2848 ("%s: trying to change chip settings when not master.", 2849 __func__)); 2850 2851 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2852 if (rc != 0) 2853 goto done; /* error message displayed already */ 2854 2855 t4_tweak_chip_settings(sc); 2856 2857 /* get basic stuff going */ 2858 rc = -t4_fw_initialize(sc, sc->mbox); 2859 if (rc != 0) { 2860 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2861 goto done; 2862 } 2863 } else { 2864 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2865 sc->cfcsum = 0; 2866 } 2867 2868 done: 2869 free(card_fw, M_CXGBE); 2870 if (fw != NULL) 2871 firmware_put(fw, FIRMWARE_UNLOAD); 2872 if (default_cfg != NULL) 2873 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2874 2875 return (rc); 2876 } 2877 2878 #define FW_PARAM_DEV(param) \ 2879 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2880 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2881 #define FW_PARAM_PFVF(param) \ 2882 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2883 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2884 2885 /* 2886 * Partition chip resources for use between various PFs, VFs, etc. 2887 */ 2888 static int 2889 partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2890 const char *name_prefix) 2891 { 2892 const struct firmware *cfg = NULL; 2893 int rc = 0; 2894 struct fw_caps_config_cmd caps; 2895 uint32_t mtype, moff, finicsum, cfcsum; 2896 2897 /* 2898 * Figure out what configuration file to use. Pick the default config 2899 * file for the card if the user hasn't specified one explicitly. 2900 */ 2901 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2902 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2903 /* Card specific overrides go here. */ 2904 if (pci_get_device(sc->dev) == 0x440a) 2905 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2906 if (is_fpga(sc)) 2907 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2908 } 2909 2910 /* 2911 * We need to load another module if the profile is anything except 2912 * "default" or "flash". 2913 */ 2914 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 2915 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2916 char s[32]; 2917 2918 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 2919 cfg = firmware_get(s); 2920 if (cfg == NULL) { 2921 if (default_cfg != NULL) { 2922 device_printf(sc->dev, 2923 "unable to load module \"%s\" for " 2924 "configuration profile \"%s\", will use " 2925 "the default config file instead.\n", 2926 s, sc->cfg_file); 2927 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2928 "%s", DEFAULT_CF); 2929 } else { 2930 device_printf(sc->dev, 2931 "unable to load module \"%s\" for " 2932 "configuration profile \"%s\", will use " 2933 "the config file on the card's flash " 2934 "instead.\n", s, sc->cfg_file); 2935 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2936 "%s", FLASH_CF); 2937 } 2938 } 2939 } 2940 2941 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 2942 default_cfg == NULL) { 2943 device_printf(sc->dev, 2944 "default config file not available, will use the config " 2945 "file on the card's flash instead.\n"); 2946 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 2947 } 2948 2949 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2950 u_int cflen; 2951 const uint32_t *cfdata; 2952 uint32_t param, val, addr; 2953 2954 KASSERT(cfg != NULL || default_cfg != NULL, 2955 ("%s: no config to upload", __func__)); 2956 2957 /* 2958 * Ask the firmware where it wants us to upload the config file. 2959 */ 2960 param = FW_PARAM_DEV(CF); 2961 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2962 if (rc != 0) { 2963 /* No support for config file? Shouldn't happen. */ 2964 device_printf(sc->dev, 2965 "failed to query config file location: %d.\n", rc); 2966 goto done; 2967 } 2968 mtype = G_FW_PARAMS_PARAM_Y(val); 2969 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 2970 2971 /* 2972 * XXX: sheer laziness. We deliberately added 4 bytes of 2973 * useless stuffing/comments at the end of the config file so 2974 * it's ok to simply throw away the last remaining bytes when 2975 * the config file is not an exact multiple of 4. This also 2976 * helps with the validate_mt_off_len check. 2977 */ 2978 if (cfg != NULL) { 2979 cflen = cfg->datasize & ~3; 2980 cfdata = cfg->data; 2981 } else { 2982 cflen = default_cfg->datasize & ~3; 2983 cfdata = default_cfg->data; 2984 } 2985 2986 if (cflen > FLASH_CFG_MAX_SIZE) { 2987 device_printf(sc->dev, 2988 "config file too long (%d, max allowed is %d). " 2989 "Will try to use the config on the card, if any.\n", 2990 cflen, FLASH_CFG_MAX_SIZE); 2991 goto use_config_on_flash; 2992 } 2993 2994 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 2995 if (rc != 0) { 2996 device_printf(sc->dev, 2997 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 2998 "Will try to use the config on the card, if any.\n", 2999 __func__, mtype, moff, cflen, rc); 3000 goto use_config_on_flash; 3001 } 3002 write_via_memwin(sc, 2, addr, cfdata, cflen); 3003 } else { 3004 use_config_on_flash: 3005 mtype = FW_MEMTYPE_FLASH; 3006 moff = t4_flash_cfg_addr(sc); 3007 } 3008 3009 bzero(&caps, sizeof(caps)); 3010 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3011 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3012 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3013 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3014 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3015 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3016 if (rc != 0) { 3017 device_printf(sc->dev, 3018 "failed to pre-process config file: %d " 3019 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3020 goto done; 3021 } 3022 3023 finicsum = be32toh(caps.finicsum); 3024 cfcsum = be32toh(caps.cfcsum); 3025 if (finicsum != cfcsum) { 3026 device_printf(sc->dev, 3027 "WARNING: config file checksum mismatch: %08x %08x\n", 3028 finicsum, cfcsum); 3029 } 3030 sc->cfcsum = cfcsum; 3031 3032 #define LIMIT_CAPS(x) do { \ 3033 caps.x &= htobe16(t4_##x##_allowed); \ 3034 } while (0) 3035 3036 /* 3037 * Let the firmware know what features will (not) be used so it can tune 3038 * things accordingly. 3039 */ 3040 LIMIT_CAPS(linkcaps); 3041 LIMIT_CAPS(niccaps); 3042 LIMIT_CAPS(toecaps); 3043 LIMIT_CAPS(rdmacaps); 3044 LIMIT_CAPS(iscsicaps); 3045 LIMIT_CAPS(fcoecaps); 3046 #undef LIMIT_CAPS 3047 3048 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3049 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3050 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3051 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3052 if (rc != 0) { 3053 device_printf(sc->dev, 3054 "failed to process config file: %d.\n", rc); 3055 } 3056 done: 3057 if (cfg != NULL) 3058 firmware_put(cfg, FIRMWARE_UNLOAD); 3059 return (rc); 3060 } 3061 3062 /* 3063 * Retrieve parameters that are needed (or nice to have) very early. 3064 */ 3065 static int 3066 get_params__pre_init(struct adapter *sc) 3067 { 3068 int rc; 3069 uint32_t param[2], val[2]; 3070 3071 param[0] = FW_PARAM_DEV(PORTVEC); 3072 param[1] = FW_PARAM_DEV(CCLK); 3073 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3074 if (rc != 0) { 3075 device_printf(sc->dev, 3076 "failed to query parameters (pre_init): %d.\n", rc); 3077 return (rc); 3078 } 3079 3080 sc->params.portvec = val[0]; 3081 sc->params.nports = bitcount32(val[0]); 3082 sc->params.vpd.cclk = val[1]; 3083 3084 /* Read device log parameters. */ 3085 rc = -t4_init_devlog_params(sc, 1); 3086 if (rc == 0) 3087 fixup_devlog_params(sc); 3088 else { 3089 device_printf(sc->dev, 3090 "failed to get devlog parameters: %d.\n", rc); 3091 rc = 0; /* devlog isn't critical for device operation */ 3092 } 3093 3094 return (rc); 3095 } 3096 3097 /* 3098 * Retrieve various parameters that are of interest to the driver. The device 3099 * has been initialized by the firmware at this point. 3100 */ 3101 static int 3102 get_params__post_init(struct adapter *sc) 3103 { 3104 int rc; 3105 uint32_t param[7], val[7]; 3106 struct fw_caps_config_cmd caps; 3107 3108 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3109 param[1] = FW_PARAM_PFVF(EQ_START); 3110 param[2] = FW_PARAM_PFVF(FILTER_START); 3111 param[3] = FW_PARAM_PFVF(FILTER_END); 3112 param[4] = FW_PARAM_PFVF(L2T_START); 3113 param[5] = FW_PARAM_PFVF(L2T_END); 3114 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3115 if (rc != 0) { 3116 device_printf(sc->dev, 3117 "failed to query parameters (post_init): %d.\n", rc); 3118 return (rc); 3119 } 3120 3121 sc->sge.iq_start = val[0]; 3122 sc->sge.eq_start = val[1]; 3123 sc->tids.ftid_base = val[2]; 3124 sc->tids.nftids = val[3] - val[2] + 1; 3125 sc->params.ftid_min = val[2]; 3126 sc->params.ftid_max = val[3]; 3127 sc->vres.l2t.start = val[4]; 3128 sc->vres.l2t.size = val[5] - val[4] + 1; 3129 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3130 ("%s: L2 table size (%u) larger than expected (%u)", 3131 __func__, sc->vres.l2t.size, L2T_SIZE)); 3132 3133 /* get capabilites */ 3134 bzero(&caps, sizeof(caps)); 3135 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3136 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3137 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3138 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3139 if (rc != 0) { 3140 device_printf(sc->dev, 3141 "failed to get card capabilities: %d.\n", rc); 3142 return (rc); 3143 } 3144 3145 #define READ_CAPS(x) do { \ 3146 sc->x = htobe16(caps.x); \ 3147 } while (0) 3148 READ_CAPS(linkcaps); 3149 READ_CAPS(niccaps); 3150 READ_CAPS(toecaps); 3151 READ_CAPS(rdmacaps); 3152 READ_CAPS(iscsicaps); 3153 READ_CAPS(fcoecaps); 3154 3155 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3156 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3157 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3158 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3159 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3160 if (rc != 0) { 3161 device_printf(sc->dev, 3162 "failed to query NIC parameters: %d.\n", rc); 3163 return (rc); 3164 } 3165 sc->tids.etid_base = val[0]; 3166 sc->params.etid_min = val[0]; 3167 sc->tids.netids = val[1] - val[0] + 1; 3168 sc->params.netids = sc->tids.netids; 3169 sc->params.eo_wr_cred = val[2]; 3170 sc->params.ethoffload = 1; 3171 } 3172 3173 if (sc->toecaps) { 3174 /* query offload-related parameters */ 3175 param[0] = FW_PARAM_DEV(NTID); 3176 param[1] = FW_PARAM_PFVF(SERVER_START); 3177 param[2] = FW_PARAM_PFVF(SERVER_END); 3178 param[3] = FW_PARAM_PFVF(TDDP_START); 3179 param[4] = FW_PARAM_PFVF(TDDP_END); 3180 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3181 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3182 if (rc != 0) { 3183 device_printf(sc->dev, 3184 "failed to query TOE parameters: %d.\n", rc); 3185 return (rc); 3186 } 3187 sc->tids.ntids = val[0]; 3188 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3189 sc->tids.stid_base = val[1]; 3190 sc->tids.nstids = val[2] - val[1] + 1; 3191 sc->vres.ddp.start = val[3]; 3192 sc->vres.ddp.size = val[4] - val[3] + 1; 3193 sc->params.ofldq_wr_cred = val[5]; 3194 sc->params.offload = 1; 3195 } 3196 if (sc->rdmacaps) { 3197 param[0] = FW_PARAM_PFVF(STAG_START); 3198 param[1] = FW_PARAM_PFVF(STAG_END); 3199 param[2] = FW_PARAM_PFVF(RQ_START); 3200 param[3] = FW_PARAM_PFVF(RQ_END); 3201 param[4] = FW_PARAM_PFVF(PBL_START); 3202 param[5] = FW_PARAM_PFVF(PBL_END); 3203 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3204 if (rc != 0) { 3205 device_printf(sc->dev, 3206 "failed to query RDMA parameters(1): %d.\n", rc); 3207 return (rc); 3208 } 3209 sc->vres.stag.start = val[0]; 3210 sc->vres.stag.size = val[1] - val[0] + 1; 3211 sc->vres.rq.start = val[2]; 3212 sc->vres.rq.size = val[3] - val[2] + 1; 3213 sc->vres.pbl.start = val[4]; 3214 sc->vres.pbl.size = val[5] - val[4] + 1; 3215 3216 param[0] = FW_PARAM_PFVF(SQRQ_START); 3217 param[1] = FW_PARAM_PFVF(SQRQ_END); 3218 param[2] = FW_PARAM_PFVF(CQ_START); 3219 param[3] = FW_PARAM_PFVF(CQ_END); 3220 param[4] = FW_PARAM_PFVF(OCQ_START); 3221 param[5] = FW_PARAM_PFVF(OCQ_END); 3222 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3223 if (rc != 0) { 3224 device_printf(sc->dev, 3225 "failed to query RDMA parameters(2): %d.\n", rc); 3226 return (rc); 3227 } 3228 sc->vres.qp.start = val[0]; 3229 sc->vres.qp.size = val[1] - val[0] + 1; 3230 sc->vres.cq.start = val[2]; 3231 sc->vres.cq.size = val[3] - val[2] + 1; 3232 sc->vres.ocq.start = val[4]; 3233 sc->vres.ocq.size = val[5] - val[4] + 1; 3234 } 3235 if (sc->iscsicaps) { 3236 param[0] = FW_PARAM_PFVF(ISCSI_START); 3237 param[1] = FW_PARAM_PFVF(ISCSI_END); 3238 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3239 if (rc != 0) { 3240 device_printf(sc->dev, 3241 "failed to query iSCSI parameters: %d.\n", rc); 3242 return (rc); 3243 } 3244 sc->vres.iscsi.start = val[0]; 3245 sc->vres.iscsi.size = val[1] - val[0] + 1; 3246 } 3247 3248 /* 3249 * We've got the params we wanted to query via the firmware. Now grab 3250 * some others directly from the chip. 3251 */ 3252 rc = t4_read_chip_settings(sc); 3253 3254 return (rc); 3255 } 3256 3257 static int 3258 set_params__post_init(struct adapter *sc) 3259 { 3260 uint32_t param, val; 3261 3262 /* ask for encapsulated CPLs */ 3263 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3264 val = 1; 3265 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3266 3267 return (0); 3268 } 3269 3270 #undef FW_PARAM_PFVF 3271 #undef FW_PARAM_DEV 3272 3273 static void 3274 t4_set_desc(struct adapter *sc) 3275 { 3276 char buf[128]; 3277 struct adapter_params *p = &sc->params; 3278 3279 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, " 3280 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "", 3281 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec); 3282 3283 device_set_desc_copy(sc->dev, buf); 3284 } 3285 3286 static void 3287 build_medialist(struct port_info *pi, struct ifmedia *media) 3288 { 3289 int m; 3290 3291 PORT_LOCK(pi); 3292 3293 ifmedia_removeall(media); 3294 3295 m = IFM_ETHER | IFM_FDX; 3296 3297 switch(pi->port_type) { 3298 case FW_PORT_TYPE_BT_XFI: 3299 case FW_PORT_TYPE_BT_XAUI: 3300 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3301 /* fall through */ 3302 3303 case FW_PORT_TYPE_BT_SGMII: 3304 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3305 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3306 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3307 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3308 break; 3309 3310 case FW_PORT_TYPE_CX4: 3311 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3312 ifmedia_set(media, m | IFM_10G_CX4); 3313 break; 3314 3315 case FW_PORT_TYPE_QSFP_10G: 3316 case FW_PORT_TYPE_SFP: 3317 case FW_PORT_TYPE_FIBER_XFI: 3318 case FW_PORT_TYPE_FIBER_XAUI: 3319 switch (pi->mod_type) { 3320 3321 case FW_PORT_MOD_TYPE_LR: 3322 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3323 ifmedia_set(media, m | IFM_10G_LR); 3324 break; 3325 3326 case FW_PORT_MOD_TYPE_SR: 3327 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3328 ifmedia_set(media, m | IFM_10G_SR); 3329 break; 3330 3331 case FW_PORT_MOD_TYPE_LRM: 3332 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3333 ifmedia_set(media, m | IFM_10G_LRM); 3334 break; 3335 3336 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3337 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3338 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3339 ifmedia_set(media, m | IFM_10G_TWINAX); 3340 break; 3341 3342 case FW_PORT_MOD_TYPE_NONE: 3343 m &= ~IFM_FDX; 3344 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3345 ifmedia_set(media, m | IFM_NONE); 3346 break; 3347 3348 case FW_PORT_MOD_TYPE_NA: 3349 case FW_PORT_MOD_TYPE_ER: 3350 default: 3351 device_printf(pi->dev, 3352 "unknown port_type (%d), mod_type (%d)\n", 3353 pi->port_type, pi->mod_type); 3354 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3355 ifmedia_set(media, m | IFM_UNKNOWN); 3356 break; 3357 } 3358 break; 3359 3360 case FW_PORT_TYPE_QSFP: 3361 switch (pi->mod_type) { 3362 3363 case FW_PORT_MOD_TYPE_LR: 3364 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3365 ifmedia_set(media, m | IFM_40G_LR4); 3366 break; 3367 3368 case FW_PORT_MOD_TYPE_SR: 3369 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3370 ifmedia_set(media, m | IFM_40G_SR4); 3371 break; 3372 3373 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3374 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3375 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3376 ifmedia_set(media, m | IFM_40G_CR4); 3377 break; 3378 3379 case FW_PORT_MOD_TYPE_NONE: 3380 m &= ~IFM_FDX; 3381 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3382 ifmedia_set(media, m | IFM_NONE); 3383 break; 3384 3385 default: 3386 device_printf(pi->dev, 3387 "unknown port_type (%d), mod_type (%d)\n", 3388 pi->port_type, pi->mod_type); 3389 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3390 ifmedia_set(media, m | IFM_UNKNOWN); 3391 break; 3392 } 3393 break; 3394 3395 default: 3396 device_printf(pi->dev, 3397 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3398 pi->mod_type); 3399 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3400 ifmedia_set(media, m | IFM_UNKNOWN); 3401 break; 3402 } 3403 3404 PORT_UNLOCK(pi); 3405 } 3406 3407 #define FW_MAC_EXACT_CHUNK 7 3408 3409 /* 3410 * Program the port's XGMAC based on parameters in ifnet. The caller also 3411 * indicates which parameters should be programmed (the rest are left alone). 3412 */ 3413 int 3414 update_mac_settings(struct ifnet *ifp, int flags) 3415 { 3416 int rc = 0; 3417 struct vi_info *vi = ifp->if_softc; 3418 struct port_info *pi = vi->pi; 3419 struct adapter *sc = pi->adapter; 3420 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3421 3422 ASSERT_SYNCHRONIZED_OP(sc); 3423 KASSERT(flags, ("%s: not told what to update.", __func__)); 3424 3425 if (flags & XGMAC_MTU) 3426 mtu = ifp->if_mtu; 3427 3428 if (flags & XGMAC_PROMISC) 3429 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3430 3431 if (flags & XGMAC_ALLMULTI) 3432 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3433 3434 if (flags & XGMAC_VLANEX) 3435 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3436 3437 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3438 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3439 allmulti, 1, vlanex, false); 3440 if (rc) { 3441 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3442 rc); 3443 return (rc); 3444 } 3445 } 3446 3447 if (flags & XGMAC_UCADDR) { 3448 uint8_t ucaddr[ETHER_ADDR_LEN]; 3449 3450 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3451 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3452 ucaddr, true, true); 3453 if (rc < 0) { 3454 rc = -rc; 3455 if_printf(ifp, "change_mac failed: %d\n", rc); 3456 return (rc); 3457 } else { 3458 vi->xact_addr_filt = rc; 3459 rc = 0; 3460 } 3461 } 3462 3463 if (flags & XGMAC_MCADDRS) { 3464 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3465 int del = 1; 3466 uint64_t hash = 0; 3467 struct ifmultiaddr *ifma; 3468 int i = 0, j; 3469 3470 if_maddr_rlock(ifp); 3471 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3472 if (ifma->ifma_addr->sa_family != AF_LINK) 3473 continue; 3474 mcaddr[i] = 3475 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3476 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3477 i++; 3478 3479 if (i == FW_MAC_EXACT_CHUNK) { 3480 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 3481 del, i, mcaddr, NULL, &hash, 0); 3482 if (rc < 0) { 3483 rc = -rc; 3484 for (j = 0; j < i; j++) { 3485 if_printf(ifp, 3486 "failed to add mc address" 3487 " %02x:%02x:%02x:" 3488 "%02x:%02x:%02x rc=%d\n", 3489 mcaddr[j][0], mcaddr[j][1], 3490 mcaddr[j][2], mcaddr[j][3], 3491 mcaddr[j][4], mcaddr[j][5], 3492 rc); 3493 } 3494 goto mcfail; 3495 } 3496 del = 0; 3497 i = 0; 3498 } 3499 } 3500 if (i > 0) { 3501 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 3502 mcaddr, NULL, &hash, 0); 3503 if (rc < 0) { 3504 rc = -rc; 3505 for (j = 0; j < i; j++) { 3506 if_printf(ifp, 3507 "failed to add mc address" 3508 " %02x:%02x:%02x:" 3509 "%02x:%02x:%02x rc=%d\n", 3510 mcaddr[j][0], mcaddr[j][1], 3511 mcaddr[j][2], mcaddr[j][3], 3512 mcaddr[j][4], mcaddr[j][5], 3513 rc); 3514 } 3515 goto mcfail; 3516 } 3517 } 3518 3519 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 3520 if (rc != 0) 3521 if_printf(ifp, "failed to set mc address hash: %d", rc); 3522 mcfail: 3523 if_maddr_runlock(ifp); 3524 } 3525 3526 return (rc); 3527 } 3528 3529 /* 3530 * {begin|end}_synchronized_op must be called from the same thread. 3531 */ 3532 int 3533 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 3534 char *wmesg) 3535 { 3536 int rc, pri; 3537 3538 #ifdef WITNESS 3539 /* the caller thinks it's ok to sleep, but is it really? */ 3540 if (flags & SLEEP_OK) 3541 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3542 "begin_synchronized_op"); 3543 #endif 3544 3545 if (INTR_OK) 3546 pri = PCATCH; 3547 else 3548 pri = 0; 3549 3550 ADAPTER_LOCK(sc); 3551 for (;;) { 3552 3553 if (vi && IS_DOOMED(vi)) { 3554 rc = ENXIO; 3555 goto done; 3556 } 3557 3558 if (!IS_BUSY(sc)) { 3559 rc = 0; 3560 break; 3561 } 3562 3563 if (!(flags & SLEEP_OK)) { 3564 rc = EBUSY; 3565 goto done; 3566 } 3567 3568 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3569 rc = EINTR; 3570 goto done; 3571 } 3572 } 3573 3574 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3575 SET_BUSY(sc); 3576 #ifdef INVARIANTS 3577 sc->last_op = wmesg; 3578 sc->last_op_thr = curthread; 3579 sc->last_op_flags = flags; 3580 #endif 3581 3582 done: 3583 if (!(flags & HOLD_LOCK) || rc) 3584 ADAPTER_UNLOCK(sc); 3585 3586 return (rc); 3587 } 3588 3589 /* 3590 * Tell if_ioctl and if_init that the VI is going away. This is 3591 * special variant of begin_synchronized_op and must be paired with a 3592 * call to end_synchronized_op. 3593 */ 3594 void 3595 doom_vi(struct adapter *sc, struct vi_info *vi) 3596 { 3597 3598 ADAPTER_LOCK(sc); 3599 SET_DOOMED(vi); 3600 wakeup(&sc->flags); 3601 while (IS_BUSY(sc)) 3602 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 3603 SET_BUSY(sc); 3604 #ifdef INVARIANTS 3605 sc->last_op = "t4detach"; 3606 sc->last_op_thr = curthread; 3607 sc->last_op_flags = 0; 3608 #endif 3609 ADAPTER_UNLOCK(sc); 3610 } 3611 3612 /* 3613 * {begin|end}_synchronized_op must be called from the same thread. 3614 */ 3615 void 3616 end_synchronized_op(struct adapter *sc, int flags) 3617 { 3618 3619 if (flags & LOCK_HELD) 3620 ADAPTER_LOCK_ASSERT_OWNED(sc); 3621 else 3622 ADAPTER_LOCK(sc); 3623 3624 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3625 CLR_BUSY(sc); 3626 wakeup(&sc->flags); 3627 ADAPTER_UNLOCK(sc); 3628 } 3629 3630 static int 3631 cxgbe_init_synchronized(struct vi_info *vi) 3632 { 3633 struct port_info *pi = vi->pi; 3634 struct adapter *sc = pi->adapter; 3635 struct ifnet *ifp = vi->ifp; 3636 int rc = 0, i; 3637 struct sge_txq *txq; 3638 3639 ASSERT_SYNCHRONIZED_OP(sc); 3640 3641 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3642 return (0); /* already running */ 3643 3644 if (!(sc->flags & FULL_INIT_DONE) && 3645 ((rc = adapter_full_init(sc)) != 0)) 3646 return (rc); /* error message displayed already */ 3647 3648 if (!(vi->flags & VI_INIT_DONE) && 3649 ((rc = vi_full_init(vi)) != 0)) 3650 return (rc); /* error message displayed already */ 3651 3652 rc = update_mac_settings(ifp, XGMAC_ALL); 3653 if (rc) 3654 goto done; /* error message displayed already */ 3655 3656 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 3657 if (rc != 0) { 3658 if_printf(ifp, "enable_vi failed: %d\n", rc); 3659 goto done; 3660 } 3661 3662 /* 3663 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3664 * if this changes. 3665 */ 3666 3667 for_each_txq(vi, i, txq) { 3668 TXQ_LOCK(txq); 3669 txq->eq.flags |= EQ_ENABLED; 3670 TXQ_UNLOCK(txq); 3671 } 3672 3673 /* 3674 * The first iq of the first port to come up is used for tracing. 3675 */ 3676 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 3677 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 3678 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3679 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3680 V_QUEUENUMBER(sc->traceq)); 3681 pi->flags |= HAS_TRACEQ; 3682 } 3683 3684 /* all ok */ 3685 PORT_LOCK(pi); 3686 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3687 pi->up_vis++; 3688 3689 if (pi->nvi > 1) 3690 callout_reset(&vi->tick, hz, vi_tick, vi); 3691 else 3692 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3693 PORT_UNLOCK(pi); 3694 done: 3695 if (rc != 0) 3696 cxgbe_uninit_synchronized(vi); 3697 3698 return (rc); 3699 } 3700 3701 /* 3702 * Idempotent. 3703 */ 3704 static int 3705 cxgbe_uninit_synchronized(struct vi_info *vi) 3706 { 3707 struct port_info *pi = vi->pi; 3708 struct adapter *sc = pi->adapter; 3709 struct ifnet *ifp = vi->ifp; 3710 int rc, i; 3711 struct sge_txq *txq; 3712 3713 ASSERT_SYNCHRONIZED_OP(sc); 3714 3715 if (!(vi->flags & VI_INIT_DONE)) { 3716 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 3717 ("uninited VI is running")); 3718 return (0); 3719 } 3720 3721 /* 3722 * Disable the VI so that all its data in either direction is discarded 3723 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3724 * tick) intact as the TP can deliver negative advice or data that it's 3725 * holding in its RAM (for an offloaded connection) even after the VI is 3726 * disabled. 3727 */ 3728 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 3729 if (rc) { 3730 if_printf(ifp, "disable_vi failed: %d\n", rc); 3731 return (rc); 3732 } 3733 3734 for_each_txq(vi, i, txq) { 3735 TXQ_LOCK(txq); 3736 txq->eq.flags &= ~EQ_ENABLED; 3737 TXQ_UNLOCK(txq); 3738 } 3739 3740 PORT_LOCK(pi); 3741 if (pi->nvi == 1) 3742 callout_stop(&pi->tick); 3743 else 3744 callout_stop(&vi->tick); 3745 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3746 PORT_UNLOCK(pi); 3747 return (0); 3748 } 3749 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3750 pi->up_vis--; 3751 if (pi->up_vis > 0) { 3752 PORT_UNLOCK(pi); 3753 return (0); 3754 } 3755 PORT_UNLOCK(pi); 3756 3757 pi->link_cfg.link_ok = 0; 3758 pi->link_cfg.speed = 0; 3759 pi->linkdnrc = -1; 3760 t4_os_link_changed(sc, pi->port_id, 0, -1); 3761 3762 return (0); 3763 } 3764 3765 /* 3766 * It is ok for this function to fail midway and return right away. t4_detach 3767 * will walk the entire sc->irq list and clean up whatever is valid. 3768 */ 3769 static int 3770 setup_intr_handlers(struct adapter *sc) 3771 { 3772 int rc, rid, p, q, v; 3773 char s[8]; 3774 struct irq *irq; 3775 struct port_info *pi; 3776 struct vi_info *vi; 3777 struct sge_rxq *rxq; 3778 #ifdef TCP_OFFLOAD 3779 struct sge_ofld_rxq *ofld_rxq; 3780 #endif 3781 #ifdef DEV_NETMAP 3782 struct sge_nm_rxq *nm_rxq; 3783 #endif 3784 #ifdef RSS 3785 int nbuckets = rss_getnumbuckets(); 3786 #endif 3787 3788 /* 3789 * Setup interrupts. 3790 */ 3791 irq = &sc->irq[0]; 3792 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3793 if (sc->intr_count == 1) 3794 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3795 3796 /* Multiple interrupts. */ 3797 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3798 ("%s: too few intr.", __func__)); 3799 3800 /* The first one is always error intr */ 3801 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3802 if (rc != 0) 3803 return (rc); 3804 irq++; 3805 rid++; 3806 3807 /* The second one is always the firmware event queue */ 3808 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt"); 3809 if (rc != 0) 3810 return (rc); 3811 irq++; 3812 rid++; 3813 3814 for_each_port(sc, p) { 3815 pi = sc->port[p]; 3816 for_each_vi(pi, v, vi) { 3817 vi->first_intr = rid - 1; 3818 #ifdef DEV_NETMAP 3819 if (vi->flags & VI_NETMAP) { 3820 for_each_nm_rxq(vi, q, nm_rxq) { 3821 snprintf(s, sizeof(s), "%d-%d", p, q); 3822 rc = t4_alloc_irq(sc, irq, rid, 3823 t4_nm_intr, nm_rxq, s); 3824 if (rc != 0) 3825 return (rc); 3826 irq++; 3827 rid++; 3828 vi->nintr++; 3829 } 3830 continue; 3831 } 3832 #endif 3833 if (vi->flags & INTR_RXQ) { 3834 for_each_rxq(vi, q, rxq) { 3835 if (v == 0) 3836 snprintf(s, sizeof(s), "%d.%d", 3837 p, q); 3838 else 3839 snprintf(s, sizeof(s), 3840 "%d(%d).%d", p, v, q); 3841 rc = t4_alloc_irq(sc, irq, rid, 3842 t4_intr, rxq, s); 3843 if (rc != 0) 3844 return (rc); 3845 #ifdef RSS 3846 bus_bind_intr(sc->dev, irq->res, 3847 rss_getcpu(q % nbuckets)); 3848 #endif 3849 irq++; 3850 rid++; 3851 vi->nintr++; 3852 } 3853 } 3854 #ifdef TCP_OFFLOAD 3855 if (vi->flags & INTR_OFLD_RXQ) { 3856 for_each_ofld_rxq(vi, q, ofld_rxq) { 3857 snprintf(s, sizeof(s), "%d,%d", p, q); 3858 rc = t4_alloc_irq(sc, irq, rid, 3859 t4_intr, ofld_rxq, s); 3860 if (rc != 0) 3861 return (rc); 3862 irq++; 3863 rid++; 3864 vi->nintr++; 3865 } 3866 } 3867 #endif 3868 } 3869 } 3870 MPASS(irq == &sc->irq[sc->intr_count]); 3871 3872 return (0); 3873 } 3874 3875 int 3876 adapter_full_init(struct adapter *sc) 3877 { 3878 int rc, i; 3879 3880 ASSERT_SYNCHRONIZED_OP(sc); 3881 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3882 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 3883 ("%s: FULL_INIT_DONE already", __func__)); 3884 3885 /* 3886 * queues that belong to the adapter (not any particular port). 3887 */ 3888 rc = t4_setup_adapter_queues(sc); 3889 if (rc != 0) 3890 goto done; 3891 3892 for (i = 0; i < nitems(sc->tq); i++) { 3893 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 3894 taskqueue_thread_enqueue, &sc->tq[i]); 3895 if (sc->tq[i] == NULL) { 3896 device_printf(sc->dev, 3897 "failed to allocate task queue %d\n", i); 3898 rc = ENOMEM; 3899 goto done; 3900 } 3901 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 3902 device_get_nameunit(sc->dev), i); 3903 } 3904 3905 t4_intr_enable(sc); 3906 sc->flags |= FULL_INIT_DONE; 3907 done: 3908 if (rc != 0) 3909 adapter_full_uninit(sc); 3910 3911 return (rc); 3912 } 3913 3914 int 3915 adapter_full_uninit(struct adapter *sc) 3916 { 3917 int i; 3918 3919 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3920 3921 t4_teardown_adapter_queues(sc); 3922 3923 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 3924 taskqueue_free(sc->tq[i]); 3925 sc->tq[i] = NULL; 3926 } 3927 3928 sc->flags &= ~FULL_INIT_DONE; 3929 3930 return (0); 3931 } 3932 3933 #ifdef RSS 3934 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 3935 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 3936 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 3937 RSS_HASHTYPE_RSS_UDP_IPV6) 3938 3939 /* Translates kernel hash types to hardware. */ 3940 static int 3941 hashconfig_to_hashen(int hashconfig) 3942 { 3943 int hashen = 0; 3944 3945 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 3946 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 3947 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 3948 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 3949 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 3950 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 3951 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 3952 } 3953 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 3954 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 3955 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 3956 } 3957 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 3958 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 3959 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 3960 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 3961 3962 return (hashen); 3963 } 3964 3965 /* Translates hardware hash types to kernel. */ 3966 static int 3967 hashen_to_hashconfig(int hashen) 3968 { 3969 int hashconfig = 0; 3970 3971 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 3972 /* 3973 * If UDP hashing was enabled it must have been enabled for 3974 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 3975 * enabling any 4-tuple hash is nonsense configuration. 3976 */ 3977 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 3978 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 3979 3980 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 3981 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 3982 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 3983 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 3984 } 3985 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 3986 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 3987 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 3988 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 3989 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 3990 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 3991 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 3992 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 3993 3994 return (hashconfig); 3995 } 3996 #endif 3997 3998 int 3999 vi_full_init(struct vi_info *vi) 4000 { 4001 struct adapter *sc = vi->pi->adapter; 4002 struct ifnet *ifp = vi->ifp; 4003 uint16_t *rss; 4004 struct sge_rxq *rxq; 4005 int rc, i, j, hashen; 4006 #ifdef RSS 4007 int nbuckets = rss_getnumbuckets(); 4008 int hashconfig = rss_gethashconfig(); 4009 int extra; 4010 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4011 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4012 #endif 4013 4014 ASSERT_SYNCHRONIZED_OP(sc); 4015 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4016 ("%s: VI_INIT_DONE already", __func__)); 4017 4018 sysctl_ctx_init(&vi->ctx); 4019 vi->flags |= VI_SYSCTL_CTX; 4020 4021 /* 4022 * Allocate tx/rx/fl queues for this VI. 4023 */ 4024 rc = t4_setup_vi_queues(vi); 4025 if (rc != 0) 4026 goto done; /* error message displayed already */ 4027 4028 #ifdef DEV_NETMAP 4029 /* Netmap VIs configure RSS when netmap is enabled. */ 4030 if (vi->flags & VI_NETMAP) { 4031 vi->flags |= VI_INIT_DONE; 4032 return (0); 4033 } 4034 #endif 4035 4036 /* 4037 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4038 */ 4039 if (vi->nrxq > vi->rss_size) { 4040 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4041 "some queues will never receive traffic.\n", vi->nrxq, 4042 vi->rss_size); 4043 } else if (vi->rss_size % vi->nrxq) { 4044 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4045 "expect uneven traffic distribution.\n", vi->nrxq, 4046 vi->rss_size); 4047 } 4048 #ifdef RSS 4049 MPASS(RSS_KEYSIZE == 40); 4050 if (vi->nrxq != nbuckets) { 4051 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4052 "performance will be impacted.\n", vi->nrxq, nbuckets); 4053 } 4054 4055 rss_getkey((void *)&raw_rss_key[0]); 4056 for (i = 0; i < nitems(rss_key); i++) { 4057 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4058 } 4059 t4_write_rss_key(sc, &rss_key[0], -1); 4060 #endif 4061 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4062 for (i = 0; i < vi->rss_size;) { 4063 #ifdef RSS 4064 j = rss_get_indirection_to_bucket(i); 4065 j %= vi->nrxq; 4066 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4067 rss[i++] = rxq->iq.abs_id; 4068 #else 4069 for_each_rxq(vi, j, rxq) { 4070 rss[i++] = rxq->iq.abs_id; 4071 if (i == vi->rss_size) 4072 break; 4073 } 4074 #endif 4075 } 4076 4077 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4078 vi->rss_size); 4079 if (rc != 0) { 4080 if_printf(ifp, "rss_config failed: %d\n", rc); 4081 goto done; 4082 } 4083 4084 #ifdef RSS 4085 hashen = hashconfig_to_hashen(hashconfig); 4086 4087 /* 4088 * We may have had to enable some hashes even though the global config 4089 * wants them disabled. This is a potential problem that must be 4090 * reported to the user. 4091 */ 4092 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4093 4094 /* 4095 * If we consider only the supported hash types, then the enabled hashes 4096 * are a superset of the requested hashes. In other words, there cannot 4097 * be any supported hash that was requested but not enabled, but there 4098 * can be hashes that were not requested but had to be enabled. 4099 */ 4100 extra &= SUPPORTED_RSS_HASHTYPES; 4101 MPASS((extra & hashconfig) == 0); 4102 4103 if (extra) { 4104 if_printf(ifp, 4105 "global RSS config (0x%x) cannot be accomodated.\n", 4106 hashconfig); 4107 } 4108 if (extra & RSS_HASHTYPE_RSS_IPV4) 4109 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4110 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4111 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4112 if (extra & RSS_HASHTYPE_RSS_IPV6) 4113 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4114 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4115 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4116 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4117 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4118 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4119 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4120 #else 4121 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4122 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4123 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4124 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4125 #endif 4126 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0]); 4127 if (rc != 0) { 4128 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4129 goto done; 4130 } 4131 4132 vi->rss = rss; 4133 vi->flags |= VI_INIT_DONE; 4134 done: 4135 if (rc != 0) 4136 vi_full_uninit(vi); 4137 4138 return (rc); 4139 } 4140 4141 /* 4142 * Idempotent. 4143 */ 4144 int 4145 vi_full_uninit(struct vi_info *vi) 4146 { 4147 struct port_info *pi = vi->pi; 4148 struct adapter *sc = pi->adapter; 4149 int i; 4150 struct sge_rxq *rxq; 4151 struct sge_txq *txq; 4152 #ifdef TCP_OFFLOAD 4153 struct sge_ofld_rxq *ofld_rxq; 4154 struct sge_wrq *ofld_txq; 4155 #endif 4156 4157 if (vi->flags & VI_INIT_DONE) { 4158 4159 /* Need to quiesce queues. */ 4160 #ifdef DEV_NETMAP 4161 if (vi->flags & VI_NETMAP) 4162 goto skip; 4163 #endif 4164 4165 /* XXX: Only for the first VI? */ 4166 if (IS_MAIN_VI(vi)) 4167 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4168 4169 for_each_txq(vi, i, txq) { 4170 quiesce_txq(sc, txq); 4171 } 4172 4173 #ifdef TCP_OFFLOAD 4174 for_each_ofld_txq(vi, i, ofld_txq) { 4175 quiesce_wrq(sc, ofld_txq); 4176 } 4177 #endif 4178 4179 for_each_rxq(vi, i, rxq) { 4180 quiesce_iq(sc, &rxq->iq); 4181 quiesce_fl(sc, &rxq->fl); 4182 } 4183 4184 #ifdef TCP_OFFLOAD 4185 for_each_ofld_rxq(vi, i, ofld_rxq) { 4186 quiesce_iq(sc, &ofld_rxq->iq); 4187 quiesce_fl(sc, &ofld_rxq->fl); 4188 } 4189 #endif 4190 free(vi->rss, M_CXGBE); 4191 } 4192 #ifdef DEV_NETMAP 4193 skip: 4194 #endif 4195 4196 t4_teardown_vi_queues(vi); 4197 vi->flags &= ~VI_INIT_DONE; 4198 4199 return (0); 4200 } 4201 4202 static void 4203 quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4204 { 4205 struct sge_eq *eq = &txq->eq; 4206 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4207 4208 (void) sc; /* unused */ 4209 4210 #ifdef INVARIANTS 4211 TXQ_LOCK(txq); 4212 MPASS((eq->flags & EQ_ENABLED) == 0); 4213 TXQ_UNLOCK(txq); 4214 #endif 4215 4216 /* Wait for the mp_ring to empty. */ 4217 while (!mp_ring_is_idle(txq->r)) { 4218 mp_ring_check_drainage(txq->r, 0); 4219 pause("rquiesce", 1); 4220 } 4221 4222 /* Then wait for the hardware to finish. */ 4223 while (spg->cidx != htobe16(eq->pidx)) 4224 pause("equiesce", 1); 4225 4226 /* Finally, wait for the driver to reclaim all descriptors. */ 4227 while (eq->cidx != eq->pidx) 4228 pause("dquiesce", 1); 4229 } 4230 4231 static void 4232 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4233 { 4234 4235 /* XXXTX */ 4236 } 4237 4238 static void 4239 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4240 { 4241 (void) sc; /* unused */ 4242 4243 /* Synchronize with the interrupt handler */ 4244 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4245 pause("iqfree", 1); 4246 } 4247 4248 static void 4249 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4250 { 4251 mtx_lock(&sc->sfl_lock); 4252 FL_LOCK(fl); 4253 fl->flags |= FL_DOOMED; 4254 FL_UNLOCK(fl); 4255 callout_stop(&sc->sfl_callout); 4256 mtx_unlock(&sc->sfl_lock); 4257 4258 KASSERT((fl->flags & FL_STARVING) == 0, 4259 ("%s: still starving", __func__)); 4260 } 4261 4262 static int 4263 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4264 driver_intr_t *handler, void *arg, char *name) 4265 { 4266 int rc; 4267 4268 irq->rid = rid; 4269 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4270 RF_SHAREABLE | RF_ACTIVE); 4271 if (irq->res == NULL) { 4272 device_printf(sc->dev, 4273 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4274 return (ENOMEM); 4275 } 4276 4277 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4278 NULL, handler, arg, &irq->tag); 4279 if (rc != 0) { 4280 device_printf(sc->dev, 4281 "failed to setup interrupt for rid %d, name %s: %d\n", 4282 rid, name, rc); 4283 } else if (name) 4284 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 4285 4286 return (rc); 4287 } 4288 4289 static int 4290 t4_free_irq(struct adapter *sc, struct irq *irq) 4291 { 4292 if (irq->tag) 4293 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4294 if (irq->res) 4295 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4296 4297 bzero(irq, sizeof(*irq)); 4298 4299 return (0); 4300 } 4301 4302 static void 4303 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4304 { 4305 4306 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4307 t4_get_regs(sc, buf, regs->len); 4308 } 4309 4310 #define A_PL_INDIR_CMD 0x1f8 4311 4312 #define S_PL_AUTOINC 31 4313 #define M_PL_AUTOINC 0x1U 4314 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4315 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4316 4317 #define S_PL_VFID 20 4318 #define M_PL_VFID 0xffU 4319 #define V_PL_VFID(x) ((x) << S_PL_VFID) 4320 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4321 4322 #define S_PL_ADDR 0 4323 #define M_PL_ADDR 0xfffffU 4324 #define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4325 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4326 4327 #define A_PL_INDIR_DATA 0x1fc 4328 4329 static uint64_t 4330 read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4331 { 4332 u32 stats[2]; 4333 4334 mtx_assert(&sc->reg_lock, MA_OWNED); 4335 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4336 V_PL_VFID(G_FW_VIID_VIN(viid)) | V_PL_ADDR(VF_MPS_REG(reg))); 4337 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4338 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4339 return (((uint64_t)stats[1]) << 32 | stats[0]); 4340 } 4341 4342 static void 4343 t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4344 struct fw_vi_stats_vf *stats) 4345 { 4346 4347 #define GET_STAT(name) \ 4348 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4349 4350 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4351 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4352 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4353 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4354 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4355 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4356 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4357 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4358 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4359 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4360 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4361 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4362 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4363 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4364 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4365 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4366 4367 #undef GET_STAT 4368 } 4369 4370 static void 4371 t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4372 { 4373 int reg; 4374 4375 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4376 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4377 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4378 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4379 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4380 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4381 } 4382 4383 static void 4384 vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4385 { 4386 struct timeval tv; 4387 const struct timeval interval = {0, 250000}; /* 250ms */ 4388 4389 if (!(vi->flags & VI_INIT_DONE)) 4390 return; 4391 4392 getmicrotime(&tv); 4393 timevalsub(&tv, &interval); 4394 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4395 return; 4396 4397 mtx_lock(&sc->reg_lock); 4398 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4399 getmicrotime(&vi->last_refreshed); 4400 mtx_unlock(&sc->reg_lock); 4401 } 4402 4403 static void 4404 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4405 { 4406 int i; 4407 u_int v, tnl_cong_drops; 4408 struct timeval tv; 4409 const struct timeval interval = {0, 250000}; /* 250ms */ 4410 4411 getmicrotime(&tv); 4412 timevalsub(&tv, &interval); 4413 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4414 return; 4415 4416 tnl_cong_drops = 0; 4417 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4418 for (i = 0; i < sc->chip_params->nchan; i++) { 4419 if (pi->rx_chan_map & (1 << i)) { 4420 mtx_lock(&sc->reg_lock); 4421 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4422 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4423 mtx_unlock(&sc->reg_lock); 4424 tnl_cong_drops += v; 4425 } 4426 } 4427 pi->tnl_cong_drops = tnl_cong_drops; 4428 getmicrotime(&pi->last_refreshed); 4429 } 4430 4431 static void 4432 cxgbe_tick(void *arg) 4433 { 4434 struct port_info *pi = arg; 4435 struct adapter *sc = pi->adapter; 4436 4437 PORT_LOCK_ASSERT_OWNED(pi); 4438 cxgbe_refresh_stats(sc, pi); 4439 4440 callout_schedule(&pi->tick, hz); 4441 } 4442 4443 void 4444 vi_tick(void *arg) 4445 { 4446 struct vi_info *vi = arg; 4447 struct adapter *sc = vi->pi->adapter; 4448 4449 vi_refresh_stats(sc, vi); 4450 4451 callout_schedule(&vi->tick, hz); 4452 } 4453 4454 static void 4455 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4456 { 4457 struct ifnet *vlan; 4458 4459 if (arg != ifp || ifp->if_type != IFT_ETHER) 4460 return; 4461 4462 vlan = VLAN_DEVAT(ifp, vid); 4463 VLAN_SETCOOKIE(vlan, ifp); 4464 } 4465 4466 static int 4467 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4468 { 4469 4470 #ifdef INVARIANTS 4471 panic("%s: opcode 0x%02x on iq %p with payload %p", 4472 __func__, rss->opcode, iq, m); 4473 #else 4474 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", 4475 __func__, rss->opcode, iq, m); 4476 m_freem(m); 4477 #endif 4478 return (EDOOFUS); 4479 } 4480 4481 int 4482 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 4483 { 4484 uintptr_t *loc, new; 4485 4486 if (opcode >= nitems(sc->cpl_handler)) 4487 return (EINVAL); 4488 4489 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 4490 loc = (uintptr_t *) &sc->cpl_handler[opcode]; 4491 atomic_store_rel_ptr(loc, new); 4492 4493 return (0); 4494 } 4495 4496 static int 4497 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) 4498 { 4499 4500 #ifdef INVARIANTS 4501 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); 4502 #else 4503 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", 4504 __func__, iq, ctrl); 4505 #endif 4506 return (EDOOFUS); 4507 } 4508 4509 int 4510 t4_register_an_handler(struct adapter *sc, an_handler_t h) 4511 { 4512 uintptr_t *loc, new; 4513 4514 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; 4515 loc = (uintptr_t *) &sc->an_handler; 4516 atomic_store_rel_ptr(loc, new); 4517 4518 return (0); 4519 } 4520 4521 static int 4522 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) 4523 { 4524 const struct cpl_fw6_msg *cpl = 4525 __containerof(rpl, struct cpl_fw6_msg, data[0]); 4526 4527 #ifdef INVARIANTS 4528 panic("%s: fw_msg type %d", __func__, cpl->type); 4529 #else 4530 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); 4531 #endif 4532 return (EDOOFUS); 4533 } 4534 4535 int 4536 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 4537 { 4538 uintptr_t *loc, new; 4539 4540 if (type >= nitems(sc->fw_msg_handler)) 4541 return (EINVAL); 4542 4543 /* 4544 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 4545 * handler dispatch table. Reject any attempt to install a handler for 4546 * this subtype. 4547 */ 4548 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) 4549 return (EINVAL); 4550 4551 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; 4552 loc = (uintptr_t *) &sc->fw_msg_handler[type]; 4553 atomic_store_rel_ptr(loc, new); 4554 4555 return (0); 4556 } 4557 4558 static void 4559 t4_sysctls(struct adapter *sc) 4560 { 4561 struct sysctl_ctx_list *ctx; 4562 struct sysctl_oid *oid; 4563 struct sysctl_oid_list *children, *c0; 4564 static char *caps[] = { 4565 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */ 4566 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL" /* caps[1] niccaps */ 4567 "\6HASHFILTER\7ETHOFLD", 4568 "\20\1TOE", /* caps[2] toecaps */ 4569 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */ 4570 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */ 4571 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD" 4572 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD", 4573 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */ 4574 "\4PO_INITIAOR\5PO_TARGET" 4575 }; 4576 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4577 4578 ctx = device_get_sysctl_ctx(sc->dev); 4579 4580 /* 4581 * dev.t4nex.X. 4582 */ 4583 oid = device_get_sysctl_tree(sc->dev); 4584 c0 = children = SYSCTL_CHILDREN(oid); 4585 4586 sc->sc_do_rxcopy = 1; 4587 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4588 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4589 4590 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4591 sc->params.nports, "# of ports"); 4592 4593 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4594 NULL, chip_rev(sc), "chip hardware revision"); 4595 4596 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4597 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4598 4599 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4600 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4601 4602 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4603 sc->cfcsum, "config file checksum"); 4604 4605 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4606 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4607 sysctl_bitfield, "A", "available doorbells"); 4608 4609 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps", 4610 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps, 4611 sysctl_bitfield, "A", "available link capabilities"); 4612 4613 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps", 4614 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps, 4615 sysctl_bitfield, "A", "available NIC capabilities"); 4616 4617 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps", 4618 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps, 4619 sysctl_bitfield, "A", "available TCP offload capabilities"); 4620 4621 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps", 4622 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps, 4623 sysctl_bitfield, "A", "available RDMA capabilities"); 4624 4625 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps", 4626 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps, 4627 sysctl_bitfield, "A", "available iSCSI capabilities"); 4628 4629 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps", 4630 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps, 4631 sysctl_bitfield, "A", "available FCoE capabilities"); 4632 4633 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4634 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4635 4636 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4637 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 4638 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 4639 "interrupt holdoff timer values (us)"); 4640 4641 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4642 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 4643 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 4644 "interrupt holdoff packet counter values"); 4645 4646 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4647 NULL, sc->tids.nftids, "number of filters"); 4648 4649 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4650 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4651 "chip temperature (in Celsius)"); 4652 4653 t4_sge_sysctls(sc, ctx, children); 4654 4655 sc->lro_timeout = 100; 4656 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4657 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4658 4659 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_flags", CTLFLAG_RW, 4660 &sc->debug_flags, 0, "flags to enable runtime debugging"); 4661 4662 #ifdef SBUF_DRAIN 4663 /* 4664 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4665 */ 4666 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4667 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4668 "logs and miscellaneous information"); 4669 children = SYSCTL_CHILDREN(oid); 4670 4671 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4672 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4673 sysctl_cctrl, "A", "congestion control"); 4674 4675 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4676 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4677 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4678 4679 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4680 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4681 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4682 4683 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4684 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4685 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4686 4687 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4688 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4689 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4690 4691 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4692 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4693 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4694 4695 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4696 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4697 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4698 4699 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4700 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4701 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 4702 "A", "CIM logic analyzer"); 4703 4704 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4705 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4706 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4707 4708 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4709 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4710 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4711 4712 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4713 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4714 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4715 4716 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4717 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4718 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4719 4720 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4721 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4722 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4723 4724 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4725 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4726 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4727 4728 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4729 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4730 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4731 4732 if (chip_id(sc) > CHELSIO_T4) { 4733 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4734 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4735 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4736 4737 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4738 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4739 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4740 } 4741 4742 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4743 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4744 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4745 4746 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4747 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4748 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4749 4750 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4751 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4752 sysctl_cpl_stats, "A", "CPL statistics"); 4753 4754 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4755 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4756 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4757 4758 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4759 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4760 sysctl_devlog, "A", "firmware's device log"); 4761 4762 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4763 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4764 sysctl_fcoe_stats, "A", "FCoE statistics"); 4765 4766 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4767 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4768 sysctl_hw_sched, "A", "hardware scheduler "); 4769 4770 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4771 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4772 sysctl_l2t, "A", "hardware L2 table"); 4773 4774 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4775 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4776 sysctl_lb_stats, "A", "loopback statistics"); 4777 4778 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4779 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4780 sysctl_meminfo, "A", "memory regions"); 4781 4782 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4783 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4784 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 4785 "A", "MPS TCAM entries"); 4786 4787 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4788 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4789 sysctl_path_mtus, "A", "path MTUs"); 4790 4791 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4792 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4793 sysctl_pm_stats, "A", "PM statistics"); 4794 4795 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4796 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4797 sysctl_rdma_stats, "A", "RDMA statistics"); 4798 4799 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4800 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4801 sysctl_tcp_stats, "A", "TCP statistics"); 4802 4803 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4804 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4805 sysctl_tids, "A", "TID information"); 4806 4807 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4808 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4809 sysctl_tp_err_stats, "A", "TP error statistics"); 4810 4811 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4812 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4813 sysctl_tp_la, "A", "TP logic analyzer"); 4814 4815 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4816 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4817 sysctl_tx_rate, "A", "Tx rate"); 4818 4819 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4820 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4821 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4822 4823 if (is_t5(sc)) { 4824 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4825 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4826 sysctl_wcwr_stats, "A", "write combined work requests"); 4827 } 4828 #endif 4829 4830 #ifdef TCP_OFFLOAD 4831 if (is_offload(sc)) { 4832 /* 4833 * dev.t4nex.X.toe. 4834 */ 4835 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4836 NULL, "TOE parameters"); 4837 children = SYSCTL_CHILDREN(oid); 4838 4839 sc->tt.sndbuf = 256 * 1024; 4840 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4841 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4842 4843 sc->tt.ddp = 0; 4844 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4845 &sc->tt.ddp, 0, "DDP allowed"); 4846 4847 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 4848 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 4849 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 4850 4851 sc->tt.ddp_thres = 4852 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 4853 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 4854 &sc->tt.ddp_thres, 0, "DDP threshold"); 4855 4856 sc->tt.rx_coalesce = 1; 4857 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 4858 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 4859 4860 sc->tt.tx_align = 1; 4861 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 4862 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 4863 } 4864 #endif 4865 } 4866 4867 void 4868 vi_sysctls(struct vi_info *vi) 4869 { 4870 struct sysctl_ctx_list *ctx; 4871 struct sysctl_oid *oid; 4872 struct sysctl_oid_list *children; 4873 4874 ctx = device_get_sysctl_ctx(vi->dev); 4875 4876 /* 4877 * dev.[nv](cxgbe|cxl).X. 4878 */ 4879 oid = device_get_sysctl_tree(vi->dev); 4880 children = SYSCTL_CHILDREN(oid); 4881 4882 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 4883 vi->viid, "VI identifer"); 4884 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 4885 &vi->nrxq, 0, "# of rx queues"); 4886 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 4887 &vi->ntxq, 0, "# of tx queues"); 4888 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 4889 &vi->first_rxq, 0, "index of first rx queue"); 4890 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 4891 &vi->first_txq, 0, "index of first tx queue"); 4892 4893 if (vi->flags & VI_NETMAP) 4894 return; 4895 4896 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | 4897 CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 4898 "Reserve queue 0 for non-flowid packets"); 4899 4900 #ifdef TCP_OFFLOAD 4901 if (vi->nofldrxq != 0) { 4902 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 4903 &vi->nofldrxq, 0, 4904 "# of rx queues for offloaded TCP connections"); 4905 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 4906 &vi->nofldtxq, 0, 4907 "# of tx queues for offloaded TCP connections"); 4908 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 4909 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 4910 "index of first TOE rx queue"); 4911 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 4912 CTLFLAG_RD, &vi->first_ofld_txq, 0, 4913 "index of first TOE tx queue"); 4914 } 4915 #endif 4916 4917 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 4918 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 4919 "holdoff timer index"); 4920 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 4921 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 4922 "holdoff packet counter index"); 4923 4924 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 4925 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 4926 "rx queue size"); 4927 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 4928 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 4929 "tx queue size"); 4930 } 4931 4932 static void 4933 cxgbe_sysctls(struct port_info *pi) 4934 { 4935 struct sysctl_ctx_list *ctx; 4936 struct sysctl_oid *oid; 4937 struct sysctl_oid_list *children; 4938 struct adapter *sc = pi->adapter; 4939 4940 ctx = device_get_sysctl_ctx(pi->dev); 4941 4942 /* 4943 * dev.cxgbe.X. 4944 */ 4945 oid = device_get_sysctl_tree(pi->dev); 4946 children = SYSCTL_CHILDREN(oid); 4947 4948 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 4949 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 4950 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 4951 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 4952 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 4953 "PHY temperature (in Celsius)"); 4954 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 4955 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 4956 "PHY firmware version"); 4957 } 4958 4959 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 4960 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings, 4961 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 4962 4963 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 4964 port_top_speed(pi), "max speed (in Gbps)"); 4965 4966 /* 4967 * dev.cxgbe.X.stats. 4968 */ 4969 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4970 NULL, "port statistics"); 4971 children = SYSCTL_CHILDREN(oid); 4972 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 4973 &pi->tx_parse_error, 0, 4974 "# of tx packets with invalid length or # of segments"); 4975 4976 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 4977 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 4978 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 4979 sysctl_handle_t4_reg64, "QU", desc) 4980 4981 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 4982 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 4983 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 4984 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 4985 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 4986 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 4987 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 4988 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 4989 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 4990 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 4991 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 4992 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 4993 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 4994 "# of tx frames in this range", 4995 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 4996 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 4997 "# of tx frames in this range", 4998 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 4999 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5000 "# of tx frames in this range", 5001 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5002 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5003 "# of tx frames in this range", 5004 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5005 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5006 "# of tx frames in this range", 5007 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5008 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5009 "# of tx frames in this range", 5010 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5011 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5012 "# of tx frames in this range", 5013 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5014 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5015 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5016 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5017 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5018 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5019 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5020 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5021 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5022 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5023 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5024 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5025 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5026 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5027 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5028 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5029 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5030 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5031 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5032 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5033 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5034 5035 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5036 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5037 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5038 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5039 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5040 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5041 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5042 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5043 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5044 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5045 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5046 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5047 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5048 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5049 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5050 "# of frames received with bad FCS", 5051 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5052 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5053 "# of frames received with length error", 5054 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5055 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5056 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5057 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5058 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5059 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5060 "# of rx frames in this range", 5061 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5062 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5063 "# of rx frames in this range", 5064 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5065 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5066 "# of rx frames in this range", 5067 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5068 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5069 "# of rx frames in this range", 5070 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5071 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5072 "# of rx frames in this range", 5073 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5074 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5075 "# of rx frames in this range", 5076 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5077 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5078 "# of rx frames in this range", 5079 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5080 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5081 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5082 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5083 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5084 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5085 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5086 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5087 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5088 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5089 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5090 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5091 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5092 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5093 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5094 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5095 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5096 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5097 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5098 5099 #undef SYSCTL_ADD_T4_REG64 5100 5101 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5102 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5103 &pi->stats.name, desc) 5104 5105 /* We get these from port_stats and they may be stale by upto 1s */ 5106 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5107 "# drops due to buffer-group 0 overflows"); 5108 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5109 "# drops due to buffer-group 1 overflows"); 5110 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5111 "# drops due to buffer-group 2 overflows"); 5112 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5113 "# drops due to buffer-group 3 overflows"); 5114 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5115 "# of buffer-group 0 truncated packets"); 5116 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5117 "# of buffer-group 1 truncated packets"); 5118 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5119 "# of buffer-group 2 truncated packets"); 5120 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5121 "# of buffer-group 3 truncated packets"); 5122 5123 #undef SYSCTL_ADD_T4_PORTSTAT 5124 } 5125 5126 static int 5127 sysctl_int_array(SYSCTL_HANDLER_ARGS) 5128 { 5129 int rc, *i, space = 0; 5130 struct sbuf sb; 5131 5132 sbuf_new_for_sysctl(&sb, NULL, 64, req); 5133 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5134 if (space) 5135 sbuf_printf(&sb, " "); 5136 sbuf_printf(&sb, "%d", *i); 5137 space = 1; 5138 } 5139 rc = sbuf_finish(&sb); 5140 sbuf_delete(&sb); 5141 return (rc); 5142 } 5143 5144 static int 5145 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5146 { 5147 int rc; 5148 struct sbuf *sb; 5149 5150 rc = sysctl_wire_old_buffer(req, 0); 5151 if (rc != 0) 5152 return(rc); 5153 5154 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5155 if (sb == NULL) 5156 return (ENOMEM); 5157 5158 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5159 rc = sbuf_finish(sb); 5160 sbuf_delete(sb); 5161 5162 return (rc); 5163 } 5164 5165 static int 5166 sysctl_btphy(SYSCTL_HANDLER_ARGS) 5167 { 5168 struct port_info *pi = arg1; 5169 int op = arg2; 5170 struct adapter *sc = pi->adapter; 5171 u_int v; 5172 int rc; 5173 5174 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5175 if (rc) 5176 return (rc); 5177 /* XXX: magic numbers */ 5178 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5179 &v); 5180 end_synchronized_op(sc, 0); 5181 if (rc) 5182 return (rc); 5183 if (op == 0) 5184 v /= 256; 5185 5186 rc = sysctl_handle_int(oidp, &v, 0, req); 5187 return (rc); 5188 } 5189 5190 static int 5191 sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5192 { 5193 struct vi_info *vi = arg1; 5194 int rc, val; 5195 5196 val = vi->rsrv_noflowq; 5197 rc = sysctl_handle_int(oidp, &val, 0, req); 5198 if (rc != 0 || req->newptr == NULL) 5199 return (rc); 5200 5201 if ((val >= 1) && (vi->ntxq > 1)) 5202 vi->rsrv_noflowq = 1; 5203 else 5204 vi->rsrv_noflowq = 0; 5205 5206 return (rc); 5207 } 5208 5209 static int 5210 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5211 { 5212 struct vi_info *vi = arg1; 5213 struct adapter *sc = vi->pi->adapter; 5214 int idx, rc, i; 5215 struct sge_rxq *rxq; 5216 #ifdef TCP_OFFLOAD 5217 struct sge_ofld_rxq *ofld_rxq; 5218 #endif 5219 uint8_t v; 5220 5221 idx = vi->tmr_idx; 5222 5223 rc = sysctl_handle_int(oidp, &idx, 0, req); 5224 if (rc != 0 || req->newptr == NULL) 5225 return (rc); 5226 5227 if (idx < 0 || idx >= SGE_NTIMERS) 5228 return (EINVAL); 5229 5230 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5231 "t4tmr"); 5232 if (rc) 5233 return (rc); 5234 5235 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5236 for_each_rxq(vi, i, rxq) { 5237 #ifdef atomic_store_rel_8 5238 atomic_store_rel_8(&rxq->iq.intr_params, v); 5239 #else 5240 rxq->iq.intr_params = v; 5241 #endif 5242 } 5243 #ifdef TCP_OFFLOAD 5244 for_each_ofld_rxq(vi, i, ofld_rxq) { 5245 #ifdef atomic_store_rel_8 5246 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5247 #else 5248 ofld_rxq->iq.intr_params = v; 5249 #endif 5250 } 5251 #endif 5252 vi->tmr_idx = idx; 5253 5254 end_synchronized_op(sc, LOCK_HELD); 5255 return (0); 5256 } 5257 5258 static int 5259 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5260 { 5261 struct vi_info *vi = arg1; 5262 struct adapter *sc = vi->pi->adapter; 5263 int idx, rc; 5264 5265 idx = vi->pktc_idx; 5266 5267 rc = sysctl_handle_int(oidp, &idx, 0, req); 5268 if (rc != 0 || req->newptr == NULL) 5269 return (rc); 5270 5271 if (idx < -1 || idx >= SGE_NCOUNTERS) 5272 return (EINVAL); 5273 5274 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5275 "t4pktc"); 5276 if (rc) 5277 return (rc); 5278 5279 if (vi->flags & VI_INIT_DONE) 5280 rc = EBUSY; /* cannot be changed once the queues are created */ 5281 else 5282 vi->pktc_idx = idx; 5283 5284 end_synchronized_op(sc, LOCK_HELD); 5285 return (rc); 5286 } 5287 5288 static int 5289 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5290 { 5291 struct vi_info *vi = arg1; 5292 struct adapter *sc = vi->pi->adapter; 5293 int qsize, rc; 5294 5295 qsize = vi->qsize_rxq; 5296 5297 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5298 if (rc != 0 || req->newptr == NULL) 5299 return (rc); 5300 5301 if (qsize < 128 || (qsize & 7)) 5302 return (EINVAL); 5303 5304 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5305 "t4rxqs"); 5306 if (rc) 5307 return (rc); 5308 5309 if (vi->flags & VI_INIT_DONE) 5310 rc = EBUSY; /* cannot be changed once the queues are created */ 5311 else 5312 vi->qsize_rxq = qsize; 5313 5314 end_synchronized_op(sc, LOCK_HELD); 5315 return (rc); 5316 } 5317 5318 static int 5319 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5320 { 5321 struct vi_info *vi = arg1; 5322 struct adapter *sc = vi->pi->adapter; 5323 int qsize, rc; 5324 5325 qsize = vi->qsize_txq; 5326 5327 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5328 if (rc != 0 || req->newptr == NULL) 5329 return (rc); 5330 5331 if (qsize < 128 || qsize > 65536) 5332 return (EINVAL); 5333 5334 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5335 "t4txqs"); 5336 if (rc) 5337 return (rc); 5338 5339 if (vi->flags & VI_INIT_DONE) 5340 rc = EBUSY; /* cannot be changed once the queues are created */ 5341 else 5342 vi->qsize_txq = qsize; 5343 5344 end_synchronized_op(sc, LOCK_HELD); 5345 return (rc); 5346 } 5347 5348 static int 5349 sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5350 { 5351 struct port_info *pi = arg1; 5352 struct adapter *sc = pi->adapter; 5353 struct link_config *lc = &pi->link_cfg; 5354 int rc; 5355 5356 if (req->newptr == NULL) { 5357 struct sbuf *sb; 5358 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5359 5360 rc = sysctl_wire_old_buffer(req, 0); 5361 if (rc != 0) 5362 return(rc); 5363 5364 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5365 if (sb == NULL) 5366 return (ENOMEM); 5367 5368 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5369 rc = sbuf_finish(sb); 5370 sbuf_delete(sb); 5371 } else { 5372 char s[2]; 5373 int n; 5374 5375 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5376 s[1] = 0; 5377 5378 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5379 if (rc != 0) 5380 return(rc); 5381 5382 if (s[1] != 0) 5383 return (EINVAL); 5384 if (s[0] < '0' || s[0] > '9') 5385 return (EINVAL); /* not a number */ 5386 n = s[0] - '0'; 5387 if (n & ~(PAUSE_TX | PAUSE_RX)) 5388 return (EINVAL); /* some other bit is set too */ 5389 5390 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5391 "t4PAUSE"); 5392 if (rc) 5393 return (rc); 5394 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5395 int link_ok = lc->link_ok; 5396 5397 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5398 lc->requested_fc |= n; 5399 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5400 lc->link_ok = link_ok; /* restore */ 5401 } 5402 end_synchronized_op(sc, 0); 5403 } 5404 5405 return (rc); 5406 } 5407 5408 static int 5409 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5410 { 5411 struct adapter *sc = arg1; 5412 int reg = arg2; 5413 uint64_t val; 5414 5415 val = t4_read_reg64(sc, reg); 5416 5417 return (sysctl_handle_64(oidp, &val, 0, req)); 5418 } 5419 5420 static int 5421 sysctl_temperature(SYSCTL_HANDLER_ARGS) 5422 { 5423 struct adapter *sc = arg1; 5424 int rc, t; 5425 uint32_t param, val; 5426 5427 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5428 if (rc) 5429 return (rc); 5430 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5431 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5432 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5433 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5434 end_synchronized_op(sc, 0); 5435 if (rc) 5436 return (rc); 5437 5438 /* unknown is returned as 0 but we display -1 in that case */ 5439 t = val == 0 ? -1 : val; 5440 5441 rc = sysctl_handle_int(oidp, &t, 0, req); 5442 return (rc); 5443 } 5444 5445 #ifdef SBUF_DRAIN 5446 static int 5447 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5448 { 5449 struct adapter *sc = arg1; 5450 struct sbuf *sb; 5451 int rc, i; 5452 uint16_t incr[NMTUS][NCCTRL_WIN]; 5453 static const char *dec_fac[] = { 5454 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5455 "0.9375" 5456 }; 5457 5458 rc = sysctl_wire_old_buffer(req, 0); 5459 if (rc != 0) 5460 return (rc); 5461 5462 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5463 if (sb == NULL) 5464 return (ENOMEM); 5465 5466 t4_read_cong_tbl(sc, incr); 5467 5468 for (i = 0; i < NCCTRL_WIN; ++i) { 5469 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5470 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5471 incr[5][i], incr[6][i], incr[7][i]); 5472 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5473 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5474 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5475 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5476 } 5477 5478 rc = sbuf_finish(sb); 5479 sbuf_delete(sb); 5480 5481 return (rc); 5482 } 5483 5484 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5485 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5486 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5487 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5488 }; 5489 5490 static int 5491 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5492 { 5493 struct adapter *sc = arg1; 5494 struct sbuf *sb; 5495 int rc, i, n, qid = arg2; 5496 uint32_t *buf, *p; 5497 char *qtype; 5498 u_int cim_num_obq = sc->chip_params->cim_num_obq; 5499 5500 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5501 ("%s: bad qid %d\n", __func__, qid)); 5502 5503 if (qid < CIM_NUM_IBQ) { 5504 /* inbound queue */ 5505 qtype = "IBQ"; 5506 n = 4 * CIM_IBQ_SIZE; 5507 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5508 rc = t4_read_cim_ibq(sc, qid, buf, n); 5509 } else { 5510 /* outbound queue */ 5511 qtype = "OBQ"; 5512 qid -= CIM_NUM_IBQ; 5513 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5514 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5515 rc = t4_read_cim_obq(sc, qid, buf, n); 5516 } 5517 5518 if (rc < 0) { 5519 rc = -rc; 5520 goto done; 5521 } 5522 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5523 5524 rc = sysctl_wire_old_buffer(req, 0); 5525 if (rc != 0) 5526 goto done; 5527 5528 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5529 if (sb == NULL) { 5530 rc = ENOMEM; 5531 goto done; 5532 } 5533 5534 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5535 for (i = 0, p = buf; i < n; i += 16, p += 4) 5536 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5537 p[2], p[3]); 5538 5539 rc = sbuf_finish(sb); 5540 sbuf_delete(sb); 5541 done: 5542 free(buf, M_CXGBE); 5543 return (rc); 5544 } 5545 5546 static int 5547 sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5548 { 5549 struct adapter *sc = arg1; 5550 u_int cfg; 5551 struct sbuf *sb; 5552 uint32_t *buf, *p; 5553 int rc; 5554 5555 MPASS(chip_id(sc) <= CHELSIO_T5); 5556 5557 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5558 if (rc != 0) 5559 return (rc); 5560 5561 rc = sysctl_wire_old_buffer(req, 0); 5562 if (rc != 0) 5563 return (rc); 5564 5565 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5566 if (sb == NULL) 5567 return (ENOMEM); 5568 5569 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5570 M_ZERO | M_WAITOK); 5571 5572 rc = -t4_cim_read_la(sc, buf, NULL); 5573 if (rc != 0) 5574 goto done; 5575 5576 sbuf_printf(sb, "Status Data PC%s", 5577 cfg & F_UPDBGLACAPTPCONLY ? "" : 5578 " LS0Stat LS0Addr LS0Data"); 5579 5580 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 5581 if (cfg & F_UPDBGLACAPTPCONLY) { 5582 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5583 p[6], p[7]); 5584 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5585 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5586 p[4] & 0xff, p[5] >> 8); 5587 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5588 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5589 p[1] & 0xf, p[2] >> 4); 5590 } else { 5591 sbuf_printf(sb, 5592 "\n %02x %x%07x %x%07x %08x %08x " 5593 "%08x%08x%08x%08x", 5594 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5595 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5596 p[6], p[7]); 5597 } 5598 } 5599 5600 rc = sbuf_finish(sb); 5601 sbuf_delete(sb); 5602 done: 5603 free(buf, M_CXGBE); 5604 return (rc); 5605 } 5606 5607 static int 5608 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 5609 { 5610 struct adapter *sc = arg1; 5611 u_int cfg; 5612 struct sbuf *sb; 5613 uint32_t *buf, *p; 5614 int rc; 5615 5616 MPASS(chip_id(sc) > CHELSIO_T5); 5617 5618 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5619 if (rc != 0) 5620 return (rc); 5621 5622 rc = sysctl_wire_old_buffer(req, 0); 5623 if (rc != 0) 5624 return (rc); 5625 5626 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5627 if (sb == NULL) 5628 return (ENOMEM); 5629 5630 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5631 M_ZERO | M_WAITOK); 5632 5633 rc = -t4_cim_read_la(sc, buf, NULL); 5634 if (rc != 0) 5635 goto done; 5636 5637 sbuf_printf(sb, "Status Inst Data PC%s", 5638 cfg & F_UPDBGLACAPTPCONLY ? "" : 5639 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 5640 5641 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 5642 if (cfg & F_UPDBGLACAPTPCONLY) { 5643 sbuf_printf(sb, "\n %02x %08x %08x %08x", 5644 p[3] & 0xff, p[2], p[1], p[0]); 5645 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 5646 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 5647 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 5648 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 5649 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 5650 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 5651 p[6] >> 16); 5652 } else { 5653 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 5654 "%08x %08x %08x %08x %08x %08x", 5655 (p[9] >> 16) & 0xff, 5656 p[9] & 0xffff, p[8] >> 16, 5657 p[8] & 0xffff, p[7] >> 16, 5658 p[7] & 0xffff, p[6] >> 16, 5659 p[2], p[1], p[0], p[5], p[4], p[3]); 5660 } 5661 } 5662 5663 rc = sbuf_finish(sb); 5664 sbuf_delete(sb); 5665 done: 5666 free(buf, M_CXGBE); 5667 return (rc); 5668 } 5669 5670 static int 5671 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 5672 { 5673 struct adapter *sc = arg1; 5674 u_int i; 5675 struct sbuf *sb; 5676 uint32_t *buf, *p; 5677 int rc; 5678 5679 rc = sysctl_wire_old_buffer(req, 0); 5680 if (rc != 0) 5681 return (rc); 5682 5683 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5684 if (sb == NULL) 5685 return (ENOMEM); 5686 5687 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 5688 M_ZERO | M_WAITOK); 5689 5690 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 5691 p = buf; 5692 5693 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5694 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 5695 p[1], p[0]); 5696 } 5697 5698 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 5699 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5700 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 5701 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 5702 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 5703 (p[1] >> 2) | ((p[2] & 3) << 30), 5704 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 5705 p[0] & 1); 5706 } 5707 5708 rc = sbuf_finish(sb); 5709 sbuf_delete(sb); 5710 free(buf, M_CXGBE); 5711 return (rc); 5712 } 5713 5714 static int 5715 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 5716 { 5717 struct adapter *sc = arg1; 5718 u_int i; 5719 struct sbuf *sb; 5720 uint32_t *buf, *p; 5721 int rc; 5722 5723 rc = sysctl_wire_old_buffer(req, 0); 5724 if (rc != 0) 5725 return (rc); 5726 5727 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5728 if (sb == NULL) 5729 return (ENOMEM); 5730 5731 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 5732 M_ZERO | M_WAITOK); 5733 5734 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 5735 p = buf; 5736 5737 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 5738 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5739 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 5740 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 5741 p[4], p[3], p[2], p[1], p[0]); 5742 } 5743 5744 sbuf_printf(sb, "\n\nCntl ID Data"); 5745 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5746 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 5747 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 5748 } 5749 5750 rc = sbuf_finish(sb); 5751 sbuf_delete(sb); 5752 free(buf, M_CXGBE); 5753 return (rc); 5754 } 5755 5756 static int 5757 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 5758 { 5759 struct adapter *sc = arg1; 5760 struct sbuf *sb; 5761 int rc, i; 5762 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5763 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5764 uint16_t thres[CIM_NUM_IBQ]; 5765 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 5766 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 5767 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 5768 5769 cim_num_obq = sc->chip_params->cim_num_obq; 5770 if (is_t4(sc)) { 5771 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 5772 obq_rdaddr = A_UP_OBQ_0_REALADDR; 5773 } else { 5774 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 5775 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 5776 } 5777 nq = CIM_NUM_IBQ + cim_num_obq; 5778 5779 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 5780 if (rc == 0) 5781 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 5782 if (rc != 0) 5783 return (rc); 5784 5785 t4_read_cimq_cfg(sc, base, size, thres); 5786 5787 rc = sysctl_wire_old_buffer(req, 0); 5788 if (rc != 0) 5789 return (rc); 5790 5791 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5792 if (sb == NULL) 5793 return (ENOMEM); 5794 5795 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 5796 5797 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 5798 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 5799 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 5800 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5801 G_QUEREMFLITS(p[2]) * 16); 5802 for ( ; i < nq; i++, p += 4, wr += 2) 5803 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 5804 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 5805 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5806 G_QUEREMFLITS(p[2]) * 16); 5807 5808 rc = sbuf_finish(sb); 5809 sbuf_delete(sb); 5810 5811 return (rc); 5812 } 5813 5814 static int 5815 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 5816 { 5817 struct adapter *sc = arg1; 5818 struct sbuf *sb; 5819 int rc; 5820 struct tp_cpl_stats stats; 5821 5822 rc = sysctl_wire_old_buffer(req, 0); 5823 if (rc != 0) 5824 return (rc); 5825 5826 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5827 if (sb == NULL) 5828 return (ENOMEM); 5829 5830 mtx_lock(&sc->reg_lock); 5831 t4_tp_get_cpl_stats(sc, &stats); 5832 mtx_unlock(&sc->reg_lock); 5833 5834 if (sc->chip_params->nchan > 2) { 5835 sbuf_printf(sb, " channel 0 channel 1" 5836 " channel 2 channel 3"); 5837 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 5838 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 5839 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 5840 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 5841 } else { 5842 sbuf_printf(sb, " channel 0 channel 1"); 5843 sbuf_printf(sb, "\nCPL requests: %10u %10u", 5844 stats.req[0], stats.req[1]); 5845 sbuf_printf(sb, "\nCPL responses: %10u %10u", 5846 stats.rsp[0], stats.rsp[1]); 5847 } 5848 5849 rc = sbuf_finish(sb); 5850 sbuf_delete(sb); 5851 5852 return (rc); 5853 } 5854 5855 static int 5856 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 5857 { 5858 struct adapter *sc = arg1; 5859 struct sbuf *sb; 5860 int rc; 5861 struct tp_usm_stats stats; 5862 5863 rc = sysctl_wire_old_buffer(req, 0); 5864 if (rc != 0) 5865 return(rc); 5866 5867 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5868 if (sb == NULL) 5869 return (ENOMEM); 5870 5871 t4_get_usm_stats(sc, &stats); 5872 5873 sbuf_printf(sb, "Frames: %u\n", stats.frames); 5874 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 5875 sbuf_printf(sb, "Drops: %u", stats.drops); 5876 5877 rc = sbuf_finish(sb); 5878 sbuf_delete(sb); 5879 5880 return (rc); 5881 } 5882 5883 static const char * const devlog_level_strings[] = { 5884 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 5885 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 5886 [FW_DEVLOG_LEVEL_ERR] = "ERR", 5887 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 5888 [FW_DEVLOG_LEVEL_INFO] = "INFO", 5889 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 5890 }; 5891 5892 static const char * const devlog_facility_strings[] = { 5893 [FW_DEVLOG_FACILITY_CORE] = "CORE", 5894 [FW_DEVLOG_FACILITY_CF] = "CF", 5895 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 5896 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 5897 [FW_DEVLOG_FACILITY_RES] = "RES", 5898 [FW_DEVLOG_FACILITY_HW] = "HW", 5899 [FW_DEVLOG_FACILITY_FLR] = "FLR", 5900 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 5901 [FW_DEVLOG_FACILITY_PHY] = "PHY", 5902 [FW_DEVLOG_FACILITY_MAC] = "MAC", 5903 [FW_DEVLOG_FACILITY_PORT] = "PORT", 5904 [FW_DEVLOG_FACILITY_VI] = "VI", 5905 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 5906 [FW_DEVLOG_FACILITY_ACL] = "ACL", 5907 [FW_DEVLOG_FACILITY_TM] = "TM", 5908 [FW_DEVLOG_FACILITY_QFC] = "QFC", 5909 [FW_DEVLOG_FACILITY_DCB] = "DCB", 5910 [FW_DEVLOG_FACILITY_ETH] = "ETH", 5911 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 5912 [FW_DEVLOG_FACILITY_RI] = "RI", 5913 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 5914 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 5915 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 5916 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 5917 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 5918 }; 5919 5920 static int 5921 sysctl_devlog(SYSCTL_HANDLER_ARGS) 5922 { 5923 struct adapter *sc = arg1; 5924 struct devlog_params *dparams = &sc->params.devlog; 5925 struct fw_devlog_e *buf, *e; 5926 int i, j, rc, nentries, first = 0; 5927 struct sbuf *sb; 5928 uint64_t ftstamp = UINT64_MAX; 5929 5930 if (dparams->addr == 0) 5931 return (ENXIO); 5932 5933 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 5934 if (buf == NULL) 5935 return (ENOMEM); 5936 5937 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 5938 if (rc != 0) 5939 goto done; 5940 5941 nentries = dparams->size / sizeof(struct fw_devlog_e); 5942 for (i = 0; i < nentries; i++) { 5943 e = &buf[i]; 5944 5945 if (e->timestamp == 0) 5946 break; /* end */ 5947 5948 e->timestamp = be64toh(e->timestamp); 5949 e->seqno = be32toh(e->seqno); 5950 for (j = 0; j < 8; j++) 5951 e->params[j] = be32toh(e->params[j]); 5952 5953 if (e->timestamp < ftstamp) { 5954 ftstamp = e->timestamp; 5955 first = i; 5956 } 5957 } 5958 5959 if (buf[first].timestamp == 0) 5960 goto done; /* nothing in the log */ 5961 5962 rc = sysctl_wire_old_buffer(req, 0); 5963 if (rc != 0) 5964 goto done; 5965 5966 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5967 if (sb == NULL) { 5968 rc = ENOMEM; 5969 goto done; 5970 } 5971 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 5972 "Seq#", "Tstamp", "Level", "Facility", "Message"); 5973 5974 i = first; 5975 do { 5976 e = &buf[i]; 5977 if (e->timestamp == 0) 5978 break; /* end */ 5979 5980 sbuf_printf(sb, "%10d %15ju %8s %8s ", 5981 e->seqno, e->timestamp, 5982 (e->level < nitems(devlog_level_strings) ? 5983 devlog_level_strings[e->level] : "UNKNOWN"), 5984 (e->facility < nitems(devlog_facility_strings) ? 5985 devlog_facility_strings[e->facility] : "UNKNOWN")); 5986 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 5987 e->params[2], e->params[3], e->params[4], 5988 e->params[5], e->params[6], e->params[7]); 5989 5990 if (++i == nentries) 5991 i = 0; 5992 } while (i != first); 5993 5994 rc = sbuf_finish(sb); 5995 sbuf_delete(sb); 5996 done: 5997 free(buf, M_CXGBE); 5998 return (rc); 5999 } 6000 6001 static int 6002 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6003 { 6004 struct adapter *sc = arg1; 6005 struct sbuf *sb; 6006 int rc; 6007 struct tp_fcoe_stats stats[MAX_NCHAN]; 6008 int i, nchan = sc->chip_params->nchan; 6009 6010 rc = sysctl_wire_old_buffer(req, 0); 6011 if (rc != 0) 6012 return (rc); 6013 6014 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6015 if (sb == NULL) 6016 return (ENOMEM); 6017 6018 for (i = 0; i < nchan; i++) 6019 t4_get_fcoe_stats(sc, i, &stats[i]); 6020 6021 if (nchan > 2) { 6022 sbuf_printf(sb, " channel 0 channel 1" 6023 " channel 2 channel 3"); 6024 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6025 stats[0].octets_ddp, stats[1].octets_ddp, 6026 stats[2].octets_ddp, stats[3].octets_ddp); 6027 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6028 stats[0].frames_ddp, stats[1].frames_ddp, 6029 stats[2].frames_ddp, stats[3].frames_ddp); 6030 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6031 stats[0].frames_drop, stats[1].frames_drop, 6032 stats[2].frames_drop, stats[3].frames_drop); 6033 } else { 6034 sbuf_printf(sb, " channel 0 channel 1"); 6035 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6036 stats[0].octets_ddp, stats[1].octets_ddp); 6037 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6038 stats[0].frames_ddp, stats[1].frames_ddp); 6039 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6040 stats[0].frames_drop, stats[1].frames_drop); 6041 } 6042 6043 rc = sbuf_finish(sb); 6044 sbuf_delete(sb); 6045 6046 return (rc); 6047 } 6048 6049 static int 6050 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6051 { 6052 struct adapter *sc = arg1; 6053 struct sbuf *sb; 6054 int rc, i; 6055 unsigned int map, kbps, ipg, mode; 6056 unsigned int pace_tab[NTX_SCHED]; 6057 6058 rc = sysctl_wire_old_buffer(req, 0); 6059 if (rc != 0) 6060 return (rc); 6061 6062 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6063 if (sb == NULL) 6064 return (ENOMEM); 6065 6066 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6067 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6068 t4_read_pace_tbl(sc, pace_tab); 6069 6070 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6071 "Class IPG (0.1 ns) Flow IPG (us)"); 6072 6073 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6074 t4_get_tx_sched(sc, i, &kbps, &ipg); 6075 sbuf_printf(sb, "\n %u %-5s %u ", i, 6076 (mode & (1 << i)) ? "flow" : "class", map & 3); 6077 if (kbps) 6078 sbuf_printf(sb, "%9u ", kbps); 6079 else 6080 sbuf_printf(sb, " disabled "); 6081 6082 if (ipg) 6083 sbuf_printf(sb, "%13u ", ipg); 6084 else 6085 sbuf_printf(sb, " disabled "); 6086 6087 if (pace_tab[i]) 6088 sbuf_printf(sb, "%10u", pace_tab[i]); 6089 else 6090 sbuf_printf(sb, " disabled"); 6091 } 6092 6093 rc = sbuf_finish(sb); 6094 sbuf_delete(sb); 6095 6096 return (rc); 6097 } 6098 6099 static int 6100 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6101 { 6102 struct adapter *sc = arg1; 6103 struct sbuf *sb; 6104 int rc, i, j; 6105 uint64_t *p0, *p1; 6106 struct lb_port_stats s[2]; 6107 static const char *stat_name[] = { 6108 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6109 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6110 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6111 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6112 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6113 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6114 "BG2FramesTrunc:", "BG3FramesTrunc:" 6115 }; 6116 6117 rc = sysctl_wire_old_buffer(req, 0); 6118 if (rc != 0) 6119 return (rc); 6120 6121 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6122 if (sb == NULL) 6123 return (ENOMEM); 6124 6125 memset(s, 0, sizeof(s)); 6126 6127 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6128 t4_get_lb_stats(sc, i, &s[0]); 6129 t4_get_lb_stats(sc, i + 1, &s[1]); 6130 6131 p0 = &s[0].octets; 6132 p1 = &s[1].octets; 6133 sbuf_printf(sb, "%s Loopback %u" 6134 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6135 6136 for (j = 0; j < nitems(stat_name); j++) 6137 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6138 *p0++, *p1++); 6139 } 6140 6141 rc = sbuf_finish(sb); 6142 sbuf_delete(sb); 6143 6144 return (rc); 6145 } 6146 6147 static int 6148 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6149 { 6150 int rc = 0; 6151 struct port_info *pi = arg1; 6152 struct sbuf *sb; 6153 6154 rc = sysctl_wire_old_buffer(req, 0); 6155 if (rc != 0) 6156 return(rc); 6157 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6158 if (sb == NULL) 6159 return (ENOMEM); 6160 6161 if (pi->linkdnrc < 0) 6162 sbuf_printf(sb, "n/a"); 6163 else 6164 sbuf_printf(sb, "%s", t4_link_down_rc_str(pi->linkdnrc)); 6165 6166 rc = sbuf_finish(sb); 6167 sbuf_delete(sb); 6168 6169 return (rc); 6170 } 6171 6172 struct mem_desc { 6173 unsigned int base; 6174 unsigned int limit; 6175 unsigned int idx; 6176 }; 6177 6178 static int 6179 mem_desc_cmp(const void *a, const void *b) 6180 { 6181 return ((const struct mem_desc *)a)->base - 6182 ((const struct mem_desc *)b)->base; 6183 } 6184 6185 static void 6186 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6187 unsigned int to) 6188 { 6189 unsigned int size; 6190 6191 size = to - from + 1; 6192 if (size == 0) 6193 return; 6194 6195 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6196 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6197 } 6198 6199 static int 6200 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6201 { 6202 struct adapter *sc = arg1; 6203 struct sbuf *sb; 6204 int rc, i, n; 6205 uint32_t lo, hi, used, alloc; 6206 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6207 static const char *region[] = { 6208 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6209 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6210 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6211 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6212 "RQUDP region:", "PBL region:", "TXPBL region:", 6213 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6214 "On-chip queues:" 6215 }; 6216 struct mem_desc avail[4]; 6217 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6218 struct mem_desc *md = mem; 6219 6220 rc = sysctl_wire_old_buffer(req, 0); 6221 if (rc != 0) 6222 return (rc); 6223 6224 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6225 if (sb == NULL) 6226 return (ENOMEM); 6227 6228 for (i = 0; i < nitems(mem); i++) { 6229 mem[i].limit = 0; 6230 mem[i].idx = i; 6231 } 6232 6233 /* Find and sort the populated memory ranges */ 6234 i = 0; 6235 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6236 if (lo & F_EDRAM0_ENABLE) { 6237 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6238 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6239 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6240 avail[i].idx = 0; 6241 i++; 6242 } 6243 if (lo & F_EDRAM1_ENABLE) { 6244 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6245 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6246 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6247 avail[i].idx = 1; 6248 i++; 6249 } 6250 if (lo & F_EXT_MEM_ENABLE) { 6251 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6252 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6253 avail[i].limit = avail[i].base + 6254 (G_EXT_MEM_SIZE(hi) << 20); 6255 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6256 i++; 6257 } 6258 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6259 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6260 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6261 avail[i].limit = avail[i].base + 6262 (G_EXT_MEM1_SIZE(hi) << 20); 6263 avail[i].idx = 4; 6264 i++; 6265 } 6266 if (!i) /* no memory available */ 6267 return 0; 6268 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6269 6270 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6271 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6272 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6273 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6274 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6275 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6276 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6277 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6278 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6279 6280 /* the next few have explicit upper bounds */ 6281 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6282 md->limit = md->base - 1 + 6283 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6284 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6285 md++; 6286 6287 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6288 md->limit = md->base - 1 + 6289 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6290 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6291 md++; 6292 6293 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6294 if (chip_id(sc) <= CHELSIO_T5) { 6295 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 6296 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6297 } else { 6298 hi = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6299 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 6300 } 6301 md->limit = 0; 6302 } else { 6303 md->base = 0; 6304 md->idx = nitems(region); /* hide it */ 6305 } 6306 md++; 6307 6308 #define ulp_region(reg) \ 6309 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6310 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6311 6312 ulp_region(RX_ISCSI); 6313 ulp_region(RX_TDDP); 6314 ulp_region(TX_TPT); 6315 ulp_region(RX_STAG); 6316 ulp_region(RX_RQ); 6317 ulp_region(RX_RQUDP); 6318 ulp_region(RX_PBL); 6319 ulp_region(TX_PBL); 6320 #undef ulp_region 6321 6322 md->base = 0; 6323 md->idx = nitems(region); 6324 if (!is_t4(sc)) { 6325 uint32_t size = 0; 6326 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 6327 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 6328 6329 if (is_t5(sc)) { 6330 if (sge_ctrl & F_VFIFO_ENABLE) 6331 size = G_DBVFIFO_SIZE(fifo_size); 6332 } else 6333 size = G_T6_DBVFIFO_SIZE(fifo_size); 6334 6335 if (size) { 6336 md->base = G_BASEADDR(t4_read_reg(sc, 6337 A_SGE_DBVFIFO_BADDR)); 6338 md->limit = md->base + (size << 2) - 1; 6339 } 6340 } 6341 md++; 6342 6343 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6344 md->limit = 0; 6345 md++; 6346 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6347 md->limit = 0; 6348 md++; 6349 6350 md->base = sc->vres.ocq.start; 6351 if (sc->vres.ocq.size) 6352 md->limit = md->base + sc->vres.ocq.size - 1; 6353 else 6354 md->idx = nitems(region); /* hide it */ 6355 md++; 6356 6357 /* add any address-space holes, there can be up to 3 */ 6358 for (n = 0; n < i - 1; n++) 6359 if (avail[n].limit < avail[n + 1].base) 6360 (md++)->base = avail[n].limit; 6361 if (avail[n].limit) 6362 (md++)->base = avail[n].limit; 6363 6364 n = md - mem; 6365 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6366 6367 for (lo = 0; lo < i; lo++) 6368 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6369 avail[lo].limit - 1); 6370 6371 sbuf_printf(sb, "\n"); 6372 for (i = 0; i < n; i++) { 6373 if (mem[i].idx >= nitems(region)) 6374 continue; /* skip holes */ 6375 if (!mem[i].limit) 6376 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6377 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6378 mem[i].limit); 6379 } 6380 6381 sbuf_printf(sb, "\n"); 6382 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6383 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6384 mem_region_show(sb, "uP RAM:", lo, hi); 6385 6386 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6387 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6388 mem_region_show(sb, "uP Extmem2:", lo, hi); 6389 6390 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6391 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6392 G_PMRXMAXPAGE(lo), 6393 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6394 (lo & F_PMRXNUMCHN) ? 2 : 1); 6395 6396 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6397 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6398 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6399 G_PMTXMAXPAGE(lo), 6400 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6401 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6402 sbuf_printf(sb, "%u p-structs\n", 6403 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6404 6405 for (i = 0; i < 4; i++) { 6406 if (chip_id(sc) > CHELSIO_T5) 6407 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 6408 else 6409 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6410 if (is_t5(sc)) { 6411 used = G_T5_USED(lo); 6412 alloc = G_T5_ALLOC(lo); 6413 } else { 6414 used = G_USED(lo); 6415 alloc = G_ALLOC(lo); 6416 } 6417 /* For T6 these are MAC buffer groups */ 6418 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6419 i, used, alloc); 6420 } 6421 for (i = 0; i < sc->chip_params->nchan; i++) { 6422 if (chip_id(sc) > CHELSIO_T5) 6423 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 6424 else 6425 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6426 if (is_t5(sc)) { 6427 used = G_T5_USED(lo); 6428 alloc = G_T5_ALLOC(lo); 6429 } else { 6430 used = G_USED(lo); 6431 alloc = G_ALLOC(lo); 6432 } 6433 /* For T6 these are MAC buffer groups */ 6434 sbuf_printf(sb, 6435 "\nLoopback %d using %u pages out of %u allocated", 6436 i, used, alloc); 6437 } 6438 6439 rc = sbuf_finish(sb); 6440 sbuf_delete(sb); 6441 6442 return (rc); 6443 } 6444 6445 static inline void 6446 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6447 { 6448 *mask = x | y; 6449 y = htobe64(y); 6450 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6451 } 6452 6453 static int 6454 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6455 { 6456 struct adapter *sc = arg1; 6457 struct sbuf *sb; 6458 int rc, i; 6459 6460 MPASS(chip_id(sc) <= CHELSIO_T5); 6461 6462 rc = sysctl_wire_old_buffer(req, 0); 6463 if (rc != 0) 6464 return (rc); 6465 6466 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6467 if (sb == NULL) 6468 return (ENOMEM); 6469 6470 sbuf_printf(sb, 6471 "Idx Ethernet address Mask Vld Ports PF" 6472 " VF Replication P0 P1 P2 P3 ML"); 6473 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6474 uint64_t tcamx, tcamy, mask; 6475 uint32_t cls_lo, cls_hi; 6476 uint8_t addr[ETHER_ADDR_LEN]; 6477 6478 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6479 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6480 if (tcamx & tcamy) 6481 continue; 6482 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6483 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6484 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6485 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6486 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6487 addr[3], addr[4], addr[5], (uintmax_t)mask, 6488 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6489 G_PORTMAP(cls_hi), G_PF(cls_lo), 6490 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6491 6492 if (cls_lo & F_REPLICATE) { 6493 struct fw_ldst_cmd ldst_cmd; 6494 6495 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6496 ldst_cmd.op_to_addrspace = 6497 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6498 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6499 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6500 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6501 ldst_cmd.u.mps.rplc.fid_idx = 6502 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6503 V_FW_LDST_CMD_IDX(i)); 6504 6505 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6506 "t4mps"); 6507 if (rc) 6508 break; 6509 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6510 sizeof(ldst_cmd), &ldst_cmd); 6511 end_synchronized_op(sc, 0); 6512 6513 if (rc != 0) { 6514 sbuf_printf(sb, "%36d", rc); 6515 rc = 0; 6516 } else { 6517 sbuf_printf(sb, " %08x %08x %08x %08x", 6518 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6519 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6520 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6521 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6522 } 6523 } else 6524 sbuf_printf(sb, "%36s", ""); 6525 6526 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6527 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6528 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6529 } 6530 6531 if (rc) 6532 (void) sbuf_finish(sb); 6533 else 6534 rc = sbuf_finish(sb); 6535 sbuf_delete(sb); 6536 6537 return (rc); 6538 } 6539 6540 static int 6541 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 6542 { 6543 struct adapter *sc = arg1; 6544 struct sbuf *sb; 6545 int rc, i; 6546 6547 MPASS(chip_id(sc) > CHELSIO_T5); 6548 6549 rc = sysctl_wire_old_buffer(req, 0); 6550 if (rc != 0) 6551 return (rc); 6552 6553 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6554 if (sb == NULL) 6555 return (ENOMEM); 6556 6557 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 6558 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 6559 " Replication" 6560 " P0 P1 P2 P3 ML\n"); 6561 6562 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6563 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 6564 uint16_t ivlan; 6565 uint64_t tcamx, tcamy, val, mask; 6566 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 6567 uint8_t addr[ETHER_ADDR_LEN]; 6568 6569 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 6570 if (i < 256) 6571 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 6572 else 6573 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 6574 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6575 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6576 tcamy = G_DMACH(val) << 32; 6577 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6578 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6579 lookup_type = G_DATALKPTYPE(data2); 6580 port_num = G_DATAPORTNUM(data2); 6581 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6582 /* Inner header VNI */ 6583 vniy = ((data2 & F_DATAVIDH2) << 23) | 6584 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6585 dip_hit = data2 & F_DATADIPHIT; 6586 vlan_vld = 0; 6587 } else { 6588 vniy = 0; 6589 dip_hit = 0; 6590 vlan_vld = data2 & F_DATAVIDH2; 6591 ivlan = G_VIDL(val); 6592 } 6593 6594 ctl |= V_CTLXYBITSEL(1); 6595 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6596 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6597 tcamx = G_DMACH(val) << 32; 6598 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6599 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6600 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6601 /* Inner header VNI mask */ 6602 vnix = ((data2 & F_DATAVIDH2) << 23) | 6603 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6604 } else 6605 vnix = 0; 6606 6607 if (tcamx & tcamy) 6608 continue; 6609 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6610 6611 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6612 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6613 6614 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6615 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6616 "%012jx %06x %06x - - %3c" 6617 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 6618 addr[1], addr[2], addr[3], addr[4], addr[5], 6619 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 6620 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6621 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6622 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6623 } else { 6624 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6625 "%012jx - - ", i, addr[0], addr[1], 6626 addr[2], addr[3], addr[4], addr[5], 6627 (uintmax_t)mask); 6628 6629 if (vlan_vld) 6630 sbuf_printf(sb, "%4u Y ", ivlan); 6631 else 6632 sbuf_printf(sb, " - N "); 6633 6634 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 6635 lookup_type ? 'I' : 'O', port_num, 6636 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6637 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6638 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6639 } 6640 6641 6642 if (cls_lo & F_T6_REPLICATE) { 6643 struct fw_ldst_cmd ldst_cmd; 6644 6645 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6646 ldst_cmd.op_to_addrspace = 6647 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6648 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6649 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6650 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6651 ldst_cmd.u.mps.rplc.fid_idx = 6652 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6653 V_FW_LDST_CMD_IDX(i)); 6654 6655 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6656 "t6mps"); 6657 if (rc) 6658 break; 6659 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6660 sizeof(ldst_cmd), &ldst_cmd); 6661 end_synchronized_op(sc, 0); 6662 6663 if (rc != 0) { 6664 sbuf_printf(sb, "%72d", rc); 6665 rc = 0; 6666 } else { 6667 sbuf_printf(sb, " %08x %08x %08x %08x" 6668 " %08x %08x %08x %08x", 6669 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 6670 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 6671 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 6672 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 6673 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6674 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6675 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6676 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6677 } 6678 } else 6679 sbuf_printf(sb, "%72s", ""); 6680 6681 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 6682 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 6683 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 6684 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 6685 } 6686 6687 if (rc) 6688 (void) sbuf_finish(sb); 6689 else 6690 rc = sbuf_finish(sb); 6691 sbuf_delete(sb); 6692 6693 return (rc); 6694 } 6695 6696 static int 6697 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 6698 { 6699 struct adapter *sc = arg1; 6700 struct sbuf *sb; 6701 int rc; 6702 uint16_t mtus[NMTUS]; 6703 6704 rc = sysctl_wire_old_buffer(req, 0); 6705 if (rc != 0) 6706 return (rc); 6707 6708 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6709 if (sb == NULL) 6710 return (ENOMEM); 6711 6712 t4_read_mtu_tbl(sc, mtus, NULL); 6713 6714 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 6715 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 6716 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 6717 mtus[14], mtus[15]); 6718 6719 rc = sbuf_finish(sb); 6720 sbuf_delete(sb); 6721 6722 return (rc); 6723 } 6724 6725 static int 6726 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 6727 { 6728 struct adapter *sc = arg1; 6729 struct sbuf *sb; 6730 int rc, i; 6731 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 6732 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 6733 static const char *tx_stats[MAX_PM_NSTATS] = { 6734 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 6735 "Tx FIFO wait", NULL, "Tx latency" 6736 }; 6737 static const char *rx_stats[MAX_PM_NSTATS] = { 6738 "Read:", "Write bypass:", "Write mem:", "Flush:", 6739 " Rx FIFO wait", NULL, "Rx latency" 6740 }; 6741 6742 rc = sysctl_wire_old_buffer(req, 0); 6743 if (rc != 0) 6744 return (rc); 6745 6746 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6747 if (sb == NULL) 6748 return (ENOMEM); 6749 6750 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 6751 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 6752 6753 sbuf_printf(sb, " Tx pcmds Tx bytes"); 6754 for (i = 0; i < 4; i++) { 6755 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6756 tx_cyc[i]); 6757 } 6758 6759 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 6760 for (i = 0; i < 4; i++) { 6761 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6762 rx_cyc[i]); 6763 } 6764 6765 if (chip_id(sc) > CHELSIO_T5) { 6766 sbuf_printf(sb, 6767 "\n Total wait Total occupancy"); 6768 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6769 tx_cyc[i]); 6770 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6771 rx_cyc[i]); 6772 6773 i += 2; 6774 MPASS(i < nitems(tx_stats)); 6775 6776 sbuf_printf(sb, 6777 "\n Reads Total wait"); 6778 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6779 tx_cyc[i]); 6780 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6781 rx_cyc[i]); 6782 } 6783 6784 rc = sbuf_finish(sb); 6785 sbuf_delete(sb); 6786 6787 return (rc); 6788 } 6789 6790 static int 6791 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 6792 { 6793 struct adapter *sc = arg1; 6794 struct sbuf *sb; 6795 int rc; 6796 struct tp_rdma_stats stats; 6797 6798 rc = sysctl_wire_old_buffer(req, 0); 6799 if (rc != 0) 6800 return (rc); 6801 6802 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6803 if (sb == NULL) 6804 return (ENOMEM); 6805 6806 mtx_lock(&sc->reg_lock); 6807 t4_tp_get_rdma_stats(sc, &stats); 6808 mtx_unlock(&sc->reg_lock); 6809 6810 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 6811 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 6812 6813 rc = sbuf_finish(sb); 6814 sbuf_delete(sb); 6815 6816 return (rc); 6817 } 6818 6819 static int 6820 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 6821 { 6822 struct adapter *sc = arg1; 6823 struct sbuf *sb; 6824 int rc; 6825 struct tp_tcp_stats v4, v6; 6826 6827 rc = sysctl_wire_old_buffer(req, 0); 6828 if (rc != 0) 6829 return (rc); 6830 6831 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6832 if (sb == NULL) 6833 return (ENOMEM); 6834 6835 mtx_lock(&sc->reg_lock); 6836 t4_tp_get_tcp_stats(sc, &v4, &v6); 6837 mtx_unlock(&sc->reg_lock); 6838 6839 sbuf_printf(sb, 6840 " IP IPv6\n"); 6841 sbuf_printf(sb, "OutRsts: %20u %20u\n", 6842 v4.tcp_out_rsts, v6.tcp_out_rsts); 6843 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 6844 v4.tcp_in_segs, v6.tcp_in_segs); 6845 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 6846 v4.tcp_out_segs, v6.tcp_out_segs); 6847 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 6848 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 6849 6850 rc = sbuf_finish(sb); 6851 sbuf_delete(sb); 6852 6853 return (rc); 6854 } 6855 6856 static int 6857 sysctl_tids(SYSCTL_HANDLER_ARGS) 6858 { 6859 struct adapter *sc = arg1; 6860 struct sbuf *sb; 6861 int rc; 6862 struct tid_info *t = &sc->tids; 6863 6864 rc = sysctl_wire_old_buffer(req, 0); 6865 if (rc != 0) 6866 return (rc); 6867 6868 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6869 if (sb == NULL) 6870 return (ENOMEM); 6871 6872 if (t->natids) { 6873 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 6874 t->atids_in_use); 6875 } 6876 6877 if (t->ntids) { 6878 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6879 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 6880 6881 if (b) { 6882 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 6883 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6884 t->ntids - 1); 6885 } else { 6886 sbuf_printf(sb, "TID range: %u-%u", 6887 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6888 t->ntids - 1); 6889 } 6890 } else 6891 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 6892 sbuf_printf(sb, ", in use: %u\n", 6893 atomic_load_acq_int(&t->tids_in_use)); 6894 } 6895 6896 if (t->nstids) { 6897 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 6898 t->stid_base + t->nstids - 1, t->stids_in_use); 6899 } 6900 6901 if (t->nftids) { 6902 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 6903 t->ftid_base + t->nftids - 1); 6904 } 6905 6906 if (t->netids) { 6907 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 6908 t->etid_base + t->netids - 1); 6909 } 6910 6911 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 6912 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 6913 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 6914 6915 rc = sbuf_finish(sb); 6916 sbuf_delete(sb); 6917 6918 return (rc); 6919 } 6920 6921 static int 6922 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 6923 { 6924 struct adapter *sc = arg1; 6925 struct sbuf *sb; 6926 int rc; 6927 struct tp_err_stats stats; 6928 6929 rc = sysctl_wire_old_buffer(req, 0); 6930 if (rc != 0) 6931 return (rc); 6932 6933 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6934 if (sb == NULL) 6935 return (ENOMEM); 6936 6937 mtx_lock(&sc->reg_lock); 6938 t4_tp_get_err_stats(sc, &stats); 6939 mtx_unlock(&sc->reg_lock); 6940 6941 if (sc->chip_params->nchan > 2) { 6942 sbuf_printf(sb, " channel 0 channel 1" 6943 " channel 2 channel 3\n"); 6944 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 6945 stats.mac_in_errs[0], stats.mac_in_errs[1], 6946 stats.mac_in_errs[2], stats.mac_in_errs[3]); 6947 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 6948 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 6949 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 6950 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 6951 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 6952 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 6953 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 6954 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 6955 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 6956 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 6957 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 6958 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 6959 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 6960 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 6961 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 6962 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 6963 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 6964 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 6965 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 6966 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 6967 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 6968 } else { 6969 sbuf_printf(sb, " channel 0 channel 1\n"); 6970 sbuf_printf(sb, "macInErrs: %10u %10u\n", 6971 stats.mac_in_errs[0], stats.mac_in_errs[1]); 6972 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 6973 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 6974 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 6975 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 6976 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 6977 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 6978 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 6979 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 6980 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 6981 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 6982 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 6983 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 6984 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 6985 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 6986 } 6987 6988 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 6989 stats.ofld_no_neigh, stats.ofld_cong_defer); 6990 6991 rc = sbuf_finish(sb); 6992 sbuf_delete(sb); 6993 6994 return (rc); 6995 } 6996 6997 struct field_desc { 6998 const char *name; 6999 u_int start; 7000 u_int width; 7001 }; 7002 7003 static void 7004 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7005 { 7006 char buf[32]; 7007 int line_size = 0; 7008 7009 while (f->name) { 7010 uint64_t mask = (1ULL << f->width) - 1; 7011 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7012 ((uintmax_t)v >> f->start) & mask); 7013 7014 if (line_size + len >= 79) { 7015 line_size = 8; 7016 sbuf_printf(sb, "\n "); 7017 } 7018 sbuf_printf(sb, "%s ", buf); 7019 line_size += len + 1; 7020 f++; 7021 } 7022 sbuf_printf(sb, "\n"); 7023 } 7024 7025 static const struct field_desc tp_la0[] = { 7026 { "RcfOpCodeOut", 60, 4 }, 7027 { "State", 56, 4 }, 7028 { "WcfState", 52, 4 }, 7029 { "RcfOpcSrcOut", 50, 2 }, 7030 { "CRxError", 49, 1 }, 7031 { "ERxError", 48, 1 }, 7032 { "SanityFailed", 47, 1 }, 7033 { "SpuriousMsg", 46, 1 }, 7034 { "FlushInputMsg", 45, 1 }, 7035 { "FlushInputCpl", 44, 1 }, 7036 { "RssUpBit", 43, 1 }, 7037 { "RssFilterHit", 42, 1 }, 7038 { "Tid", 32, 10 }, 7039 { "InitTcb", 31, 1 }, 7040 { "LineNumber", 24, 7 }, 7041 { "Emsg", 23, 1 }, 7042 { "EdataOut", 22, 1 }, 7043 { "Cmsg", 21, 1 }, 7044 { "CdataOut", 20, 1 }, 7045 { "EreadPdu", 19, 1 }, 7046 { "CreadPdu", 18, 1 }, 7047 { "TunnelPkt", 17, 1 }, 7048 { "RcfPeerFin", 16, 1 }, 7049 { "RcfReasonOut", 12, 4 }, 7050 { "TxCchannel", 10, 2 }, 7051 { "RcfTxChannel", 8, 2 }, 7052 { "RxEchannel", 6, 2 }, 7053 { "RcfRxChannel", 5, 1 }, 7054 { "RcfDataOutSrdy", 4, 1 }, 7055 { "RxDvld", 3, 1 }, 7056 { "RxOoDvld", 2, 1 }, 7057 { "RxCongestion", 1, 1 }, 7058 { "TxCongestion", 0, 1 }, 7059 { NULL } 7060 }; 7061 7062 static const struct field_desc tp_la1[] = { 7063 { "CplCmdIn", 56, 8 }, 7064 { "CplCmdOut", 48, 8 }, 7065 { "ESynOut", 47, 1 }, 7066 { "EAckOut", 46, 1 }, 7067 { "EFinOut", 45, 1 }, 7068 { "ERstOut", 44, 1 }, 7069 { "SynIn", 43, 1 }, 7070 { "AckIn", 42, 1 }, 7071 { "FinIn", 41, 1 }, 7072 { "RstIn", 40, 1 }, 7073 { "DataIn", 39, 1 }, 7074 { "DataInVld", 38, 1 }, 7075 { "PadIn", 37, 1 }, 7076 { "RxBufEmpty", 36, 1 }, 7077 { "RxDdp", 35, 1 }, 7078 { "RxFbCongestion", 34, 1 }, 7079 { "TxFbCongestion", 33, 1 }, 7080 { "TxPktSumSrdy", 32, 1 }, 7081 { "RcfUlpType", 28, 4 }, 7082 { "Eread", 27, 1 }, 7083 { "Ebypass", 26, 1 }, 7084 { "Esave", 25, 1 }, 7085 { "Static0", 24, 1 }, 7086 { "Cread", 23, 1 }, 7087 { "Cbypass", 22, 1 }, 7088 { "Csave", 21, 1 }, 7089 { "CPktOut", 20, 1 }, 7090 { "RxPagePoolFull", 18, 2 }, 7091 { "RxLpbkPkt", 17, 1 }, 7092 { "TxLpbkPkt", 16, 1 }, 7093 { "RxVfValid", 15, 1 }, 7094 { "SynLearned", 14, 1 }, 7095 { "SetDelEntry", 13, 1 }, 7096 { "SetInvEntry", 12, 1 }, 7097 { "CpcmdDvld", 11, 1 }, 7098 { "CpcmdSave", 10, 1 }, 7099 { "RxPstructsFull", 8, 2 }, 7100 { "EpcmdDvld", 7, 1 }, 7101 { "EpcmdFlush", 6, 1 }, 7102 { "EpcmdTrimPrefix", 5, 1 }, 7103 { "EpcmdTrimPostfix", 4, 1 }, 7104 { "ERssIp4Pkt", 3, 1 }, 7105 { "ERssIp6Pkt", 2, 1 }, 7106 { "ERssTcpUdpPkt", 1, 1 }, 7107 { "ERssFceFipPkt", 0, 1 }, 7108 { NULL } 7109 }; 7110 7111 static const struct field_desc tp_la2[] = { 7112 { "CplCmdIn", 56, 8 }, 7113 { "MpsVfVld", 55, 1 }, 7114 { "MpsPf", 52, 3 }, 7115 { "MpsVf", 44, 8 }, 7116 { "SynIn", 43, 1 }, 7117 { "AckIn", 42, 1 }, 7118 { "FinIn", 41, 1 }, 7119 { "RstIn", 40, 1 }, 7120 { "DataIn", 39, 1 }, 7121 { "DataInVld", 38, 1 }, 7122 { "PadIn", 37, 1 }, 7123 { "RxBufEmpty", 36, 1 }, 7124 { "RxDdp", 35, 1 }, 7125 { "RxFbCongestion", 34, 1 }, 7126 { "TxFbCongestion", 33, 1 }, 7127 { "TxPktSumSrdy", 32, 1 }, 7128 { "RcfUlpType", 28, 4 }, 7129 { "Eread", 27, 1 }, 7130 { "Ebypass", 26, 1 }, 7131 { "Esave", 25, 1 }, 7132 { "Static0", 24, 1 }, 7133 { "Cread", 23, 1 }, 7134 { "Cbypass", 22, 1 }, 7135 { "Csave", 21, 1 }, 7136 { "CPktOut", 20, 1 }, 7137 { "RxPagePoolFull", 18, 2 }, 7138 { "RxLpbkPkt", 17, 1 }, 7139 { "TxLpbkPkt", 16, 1 }, 7140 { "RxVfValid", 15, 1 }, 7141 { "SynLearned", 14, 1 }, 7142 { "SetDelEntry", 13, 1 }, 7143 { "SetInvEntry", 12, 1 }, 7144 { "CpcmdDvld", 11, 1 }, 7145 { "CpcmdSave", 10, 1 }, 7146 { "RxPstructsFull", 8, 2 }, 7147 { "EpcmdDvld", 7, 1 }, 7148 { "EpcmdFlush", 6, 1 }, 7149 { "EpcmdTrimPrefix", 5, 1 }, 7150 { "EpcmdTrimPostfix", 4, 1 }, 7151 { "ERssIp4Pkt", 3, 1 }, 7152 { "ERssIp6Pkt", 2, 1 }, 7153 { "ERssTcpUdpPkt", 1, 1 }, 7154 { "ERssFceFipPkt", 0, 1 }, 7155 { NULL } 7156 }; 7157 7158 static void 7159 tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7160 { 7161 7162 field_desc_show(sb, *p, tp_la0); 7163 } 7164 7165 static void 7166 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7167 { 7168 7169 if (idx) 7170 sbuf_printf(sb, "\n"); 7171 field_desc_show(sb, p[0], tp_la0); 7172 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7173 field_desc_show(sb, p[1], tp_la0); 7174 } 7175 7176 static void 7177 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7178 { 7179 7180 if (idx) 7181 sbuf_printf(sb, "\n"); 7182 field_desc_show(sb, p[0], tp_la0); 7183 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7184 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7185 } 7186 7187 static int 7188 sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7189 { 7190 struct adapter *sc = arg1; 7191 struct sbuf *sb; 7192 uint64_t *buf, *p; 7193 int rc; 7194 u_int i, inc; 7195 void (*show_func)(struct sbuf *, uint64_t *, int); 7196 7197 rc = sysctl_wire_old_buffer(req, 0); 7198 if (rc != 0) 7199 return (rc); 7200 7201 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7202 if (sb == NULL) 7203 return (ENOMEM); 7204 7205 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7206 7207 t4_tp_read_la(sc, buf, NULL); 7208 p = buf; 7209 7210 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7211 case 2: 7212 inc = 2; 7213 show_func = tp_la_show2; 7214 break; 7215 case 3: 7216 inc = 2; 7217 show_func = tp_la_show3; 7218 break; 7219 default: 7220 inc = 1; 7221 show_func = tp_la_show; 7222 } 7223 7224 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7225 (*show_func)(sb, p, i); 7226 7227 rc = sbuf_finish(sb); 7228 sbuf_delete(sb); 7229 free(buf, M_CXGBE); 7230 return (rc); 7231 } 7232 7233 static int 7234 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7235 { 7236 struct adapter *sc = arg1; 7237 struct sbuf *sb; 7238 int rc; 7239 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7240 7241 rc = sysctl_wire_old_buffer(req, 0); 7242 if (rc != 0) 7243 return (rc); 7244 7245 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7246 if (sb == NULL) 7247 return (ENOMEM); 7248 7249 t4_get_chan_txrate(sc, nrate, orate); 7250 7251 if (sc->chip_params->nchan > 2) { 7252 sbuf_printf(sb, " channel 0 channel 1" 7253 " channel 2 channel 3\n"); 7254 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7255 nrate[0], nrate[1], nrate[2], nrate[3]); 7256 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7257 orate[0], orate[1], orate[2], orate[3]); 7258 } else { 7259 sbuf_printf(sb, " channel 0 channel 1\n"); 7260 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7261 nrate[0], nrate[1]); 7262 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7263 orate[0], orate[1]); 7264 } 7265 7266 rc = sbuf_finish(sb); 7267 sbuf_delete(sb); 7268 7269 return (rc); 7270 } 7271 7272 static int 7273 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7274 { 7275 struct adapter *sc = arg1; 7276 struct sbuf *sb; 7277 uint32_t *buf, *p; 7278 int rc, i; 7279 7280 rc = sysctl_wire_old_buffer(req, 0); 7281 if (rc != 0) 7282 return (rc); 7283 7284 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7285 if (sb == NULL) 7286 return (ENOMEM); 7287 7288 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 7289 M_ZERO | M_WAITOK); 7290 7291 t4_ulprx_read_la(sc, buf); 7292 p = buf; 7293 7294 sbuf_printf(sb, " Pcmd Type Message" 7295 " Data"); 7296 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 7297 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 7298 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 7299 } 7300 7301 rc = sbuf_finish(sb); 7302 sbuf_delete(sb); 7303 free(buf, M_CXGBE); 7304 return (rc); 7305 } 7306 7307 static int 7308 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 7309 { 7310 struct adapter *sc = arg1; 7311 struct sbuf *sb; 7312 int rc, v; 7313 7314 rc = sysctl_wire_old_buffer(req, 0); 7315 if (rc != 0) 7316 return (rc); 7317 7318 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7319 if (sb == NULL) 7320 return (ENOMEM); 7321 7322 v = t4_read_reg(sc, A_SGE_STAT_CFG); 7323 if (G_STATSOURCE_T5(v) == 7) { 7324 if (G_STATMODE(v) == 0) { 7325 sbuf_printf(sb, "total %d, incomplete %d", 7326 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7327 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7328 } else if (G_STATMODE(v) == 1) { 7329 sbuf_printf(sb, "total %d, data overflow %d", 7330 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7331 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7332 } 7333 } 7334 rc = sbuf_finish(sb); 7335 sbuf_delete(sb); 7336 7337 return (rc); 7338 } 7339 #endif 7340 7341 static uint32_t 7342 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 7343 { 7344 uint32_t mode; 7345 7346 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 7347 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 7348 7349 if (fconf & F_FRAGMENTATION) 7350 mode |= T4_FILTER_IP_FRAGMENT; 7351 7352 if (fconf & F_MPSHITTYPE) 7353 mode |= T4_FILTER_MPS_HIT_TYPE; 7354 7355 if (fconf & F_MACMATCH) 7356 mode |= T4_FILTER_MAC_IDX; 7357 7358 if (fconf & F_ETHERTYPE) 7359 mode |= T4_FILTER_ETH_TYPE; 7360 7361 if (fconf & F_PROTOCOL) 7362 mode |= T4_FILTER_IP_PROTO; 7363 7364 if (fconf & F_TOS) 7365 mode |= T4_FILTER_IP_TOS; 7366 7367 if (fconf & F_VLAN) 7368 mode |= T4_FILTER_VLAN; 7369 7370 if (fconf & F_VNIC_ID) { 7371 mode |= T4_FILTER_VNIC; 7372 if (iconf & F_VNIC) 7373 mode |= T4_FILTER_IC_VNIC; 7374 } 7375 7376 if (fconf & F_PORT) 7377 mode |= T4_FILTER_PORT; 7378 7379 if (fconf & F_FCOE) 7380 mode |= T4_FILTER_FCoE; 7381 7382 return (mode); 7383 } 7384 7385 static uint32_t 7386 mode_to_fconf(uint32_t mode) 7387 { 7388 uint32_t fconf = 0; 7389 7390 if (mode & T4_FILTER_IP_FRAGMENT) 7391 fconf |= F_FRAGMENTATION; 7392 7393 if (mode & T4_FILTER_MPS_HIT_TYPE) 7394 fconf |= F_MPSHITTYPE; 7395 7396 if (mode & T4_FILTER_MAC_IDX) 7397 fconf |= F_MACMATCH; 7398 7399 if (mode & T4_FILTER_ETH_TYPE) 7400 fconf |= F_ETHERTYPE; 7401 7402 if (mode & T4_FILTER_IP_PROTO) 7403 fconf |= F_PROTOCOL; 7404 7405 if (mode & T4_FILTER_IP_TOS) 7406 fconf |= F_TOS; 7407 7408 if (mode & T4_FILTER_VLAN) 7409 fconf |= F_VLAN; 7410 7411 if (mode & T4_FILTER_VNIC) 7412 fconf |= F_VNIC_ID; 7413 7414 if (mode & T4_FILTER_PORT) 7415 fconf |= F_PORT; 7416 7417 if (mode & T4_FILTER_FCoE) 7418 fconf |= F_FCOE; 7419 7420 return (fconf); 7421 } 7422 7423 static uint32_t 7424 mode_to_iconf(uint32_t mode) 7425 { 7426 7427 if (mode & T4_FILTER_IC_VNIC) 7428 return (F_VNIC); 7429 return (0); 7430 } 7431 7432 static int check_fspec_against_fconf_iconf(struct adapter *sc, 7433 struct t4_filter_specification *fs) 7434 { 7435 struct tp_params *tpp = &sc->params.tp; 7436 uint32_t fconf = 0; 7437 7438 if (fs->val.frag || fs->mask.frag) 7439 fconf |= F_FRAGMENTATION; 7440 7441 if (fs->val.matchtype || fs->mask.matchtype) 7442 fconf |= F_MPSHITTYPE; 7443 7444 if (fs->val.macidx || fs->mask.macidx) 7445 fconf |= F_MACMATCH; 7446 7447 if (fs->val.ethtype || fs->mask.ethtype) 7448 fconf |= F_ETHERTYPE; 7449 7450 if (fs->val.proto || fs->mask.proto) 7451 fconf |= F_PROTOCOL; 7452 7453 if (fs->val.tos || fs->mask.tos) 7454 fconf |= F_TOS; 7455 7456 if (fs->val.vlan_vld || fs->mask.vlan_vld) 7457 fconf |= F_VLAN; 7458 7459 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 7460 fconf |= F_VNIC_ID; 7461 if (tpp->ingress_config & F_VNIC) 7462 return (EINVAL); 7463 } 7464 7465 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 7466 fconf |= F_VNIC_ID; 7467 if ((tpp->ingress_config & F_VNIC) == 0) 7468 return (EINVAL); 7469 } 7470 7471 if (fs->val.iport || fs->mask.iport) 7472 fconf |= F_PORT; 7473 7474 if (fs->val.fcoe || fs->mask.fcoe) 7475 fconf |= F_FCOE; 7476 7477 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 7478 return (E2BIG); 7479 7480 return (0); 7481 } 7482 7483 static int 7484 get_filter_mode(struct adapter *sc, uint32_t *mode) 7485 { 7486 struct tp_params *tpp = &sc->params.tp; 7487 7488 /* 7489 * We trust the cached values of the relevant TP registers. This means 7490 * things work reliably only if writes to those registers are always via 7491 * t4_set_filter_mode. 7492 */ 7493 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 7494 7495 return (0); 7496 } 7497 7498 static int 7499 set_filter_mode(struct adapter *sc, uint32_t mode) 7500 { 7501 struct tp_params *tpp = &sc->params.tp; 7502 uint32_t fconf, iconf; 7503 int rc; 7504 7505 iconf = mode_to_iconf(mode); 7506 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 7507 /* 7508 * For now we just complain if A_TP_INGRESS_CONFIG is not 7509 * already set to the correct value for the requested filter 7510 * mode. It's not clear if it's safe to write to this register 7511 * on the fly. (And we trust the cached value of the register). 7512 */ 7513 return (EBUSY); 7514 } 7515 7516 fconf = mode_to_fconf(mode); 7517 7518 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7519 "t4setfm"); 7520 if (rc) 7521 return (rc); 7522 7523 if (sc->tids.ftids_in_use > 0) { 7524 rc = EBUSY; 7525 goto done; 7526 } 7527 7528 #ifdef TCP_OFFLOAD 7529 if (uld_active(sc, ULD_TOM)) { 7530 rc = EBUSY; 7531 goto done; 7532 } 7533 #endif 7534 7535 rc = -t4_set_filter_mode(sc, fconf); 7536 done: 7537 end_synchronized_op(sc, LOCK_HELD); 7538 return (rc); 7539 } 7540 7541 static inline uint64_t 7542 get_filter_hits(struct adapter *sc, uint32_t fid) 7543 { 7544 uint32_t tcb_addr; 7545 7546 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 7547 (fid + sc->tids.ftid_base) * TCB_SIZE; 7548 7549 if (is_t4(sc)) { 7550 uint64_t hits; 7551 7552 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 7553 return (be64toh(hits)); 7554 } else { 7555 uint32_t hits; 7556 7557 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 7558 return (be32toh(hits)); 7559 } 7560 } 7561 7562 static int 7563 get_filter(struct adapter *sc, struct t4_filter *t) 7564 { 7565 int i, rc, nfilters = sc->tids.nftids; 7566 struct filter_entry *f; 7567 7568 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7569 "t4getf"); 7570 if (rc) 7571 return (rc); 7572 7573 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 7574 t->idx >= nfilters) { 7575 t->idx = 0xffffffff; 7576 goto done; 7577 } 7578 7579 f = &sc->tids.ftid_tab[t->idx]; 7580 for (i = t->idx; i < nfilters; i++, f++) { 7581 if (f->valid) { 7582 t->idx = i; 7583 t->l2tidx = f->l2t ? f->l2t->idx : 0; 7584 t->smtidx = f->smtidx; 7585 if (f->fs.hitcnts) 7586 t->hits = get_filter_hits(sc, t->idx); 7587 else 7588 t->hits = UINT64_MAX; 7589 t->fs = f->fs; 7590 7591 goto done; 7592 } 7593 } 7594 7595 t->idx = 0xffffffff; 7596 done: 7597 end_synchronized_op(sc, LOCK_HELD); 7598 return (0); 7599 } 7600 7601 static int 7602 set_filter(struct adapter *sc, struct t4_filter *t) 7603 { 7604 unsigned int nfilters, nports; 7605 struct filter_entry *f; 7606 int i, rc; 7607 7608 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 7609 if (rc) 7610 return (rc); 7611 7612 nfilters = sc->tids.nftids; 7613 nports = sc->params.nports; 7614 7615 if (nfilters == 0) { 7616 rc = ENOTSUP; 7617 goto done; 7618 } 7619 7620 if (!(sc->flags & FULL_INIT_DONE)) { 7621 rc = EAGAIN; 7622 goto done; 7623 } 7624 7625 if (t->idx >= nfilters) { 7626 rc = EINVAL; 7627 goto done; 7628 } 7629 7630 /* Validate against the global filter mode and ingress config */ 7631 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 7632 if (rc != 0) 7633 goto done; 7634 7635 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 7636 rc = EINVAL; 7637 goto done; 7638 } 7639 7640 if (t->fs.val.iport >= nports) { 7641 rc = EINVAL; 7642 goto done; 7643 } 7644 7645 /* Can't specify an iq if not steering to it */ 7646 if (!t->fs.dirsteer && t->fs.iq) { 7647 rc = EINVAL; 7648 goto done; 7649 } 7650 7651 /* IPv6 filter idx must be 4 aligned */ 7652 if (t->fs.type == 1 && 7653 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 7654 rc = EINVAL; 7655 goto done; 7656 } 7657 7658 if (sc->tids.ftid_tab == NULL) { 7659 KASSERT(sc->tids.ftids_in_use == 0, 7660 ("%s: no memory allocated but filters_in_use > 0", 7661 __func__)); 7662 7663 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 7664 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 7665 if (sc->tids.ftid_tab == NULL) { 7666 rc = ENOMEM; 7667 goto done; 7668 } 7669 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 7670 } 7671 7672 for (i = 0; i < 4; i++) { 7673 f = &sc->tids.ftid_tab[t->idx + i]; 7674 7675 if (f->pending || f->valid) { 7676 rc = EBUSY; 7677 goto done; 7678 } 7679 if (f->locked) { 7680 rc = EPERM; 7681 goto done; 7682 } 7683 7684 if (t->fs.type == 0) 7685 break; 7686 } 7687 7688 f = &sc->tids.ftid_tab[t->idx]; 7689 f->fs = t->fs; 7690 7691 rc = set_filter_wr(sc, t->idx); 7692 done: 7693 end_synchronized_op(sc, 0); 7694 7695 if (rc == 0) { 7696 mtx_lock(&sc->tids.ftid_lock); 7697 for (;;) { 7698 if (f->pending == 0) { 7699 rc = f->valid ? 0 : EIO; 7700 break; 7701 } 7702 7703 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7704 PCATCH, "t4setfw", 0)) { 7705 rc = EINPROGRESS; 7706 break; 7707 } 7708 } 7709 mtx_unlock(&sc->tids.ftid_lock); 7710 } 7711 return (rc); 7712 } 7713 7714 static int 7715 del_filter(struct adapter *sc, struct t4_filter *t) 7716 { 7717 unsigned int nfilters; 7718 struct filter_entry *f; 7719 int rc; 7720 7721 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 7722 if (rc) 7723 return (rc); 7724 7725 nfilters = sc->tids.nftids; 7726 7727 if (nfilters == 0) { 7728 rc = ENOTSUP; 7729 goto done; 7730 } 7731 7732 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 7733 t->idx >= nfilters) { 7734 rc = EINVAL; 7735 goto done; 7736 } 7737 7738 if (!(sc->flags & FULL_INIT_DONE)) { 7739 rc = EAGAIN; 7740 goto done; 7741 } 7742 7743 f = &sc->tids.ftid_tab[t->idx]; 7744 7745 if (f->pending) { 7746 rc = EBUSY; 7747 goto done; 7748 } 7749 if (f->locked) { 7750 rc = EPERM; 7751 goto done; 7752 } 7753 7754 if (f->valid) { 7755 t->fs = f->fs; /* extra info for the caller */ 7756 rc = del_filter_wr(sc, t->idx); 7757 } 7758 7759 done: 7760 end_synchronized_op(sc, 0); 7761 7762 if (rc == 0) { 7763 mtx_lock(&sc->tids.ftid_lock); 7764 for (;;) { 7765 if (f->pending == 0) { 7766 rc = f->valid ? EIO : 0; 7767 break; 7768 } 7769 7770 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7771 PCATCH, "t4delfw", 0)) { 7772 rc = EINPROGRESS; 7773 break; 7774 } 7775 } 7776 mtx_unlock(&sc->tids.ftid_lock); 7777 } 7778 7779 return (rc); 7780 } 7781 7782 static void 7783 clear_filter(struct filter_entry *f) 7784 { 7785 if (f->l2t) 7786 t4_l2t_release(f->l2t); 7787 7788 bzero(f, sizeof (*f)); 7789 } 7790 7791 static int 7792 set_filter_wr(struct adapter *sc, int fidx) 7793 { 7794 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7795 struct fw_filter_wr *fwr; 7796 unsigned int ftid, vnic_vld, vnic_vld_mask; 7797 struct wrq_cookie cookie; 7798 7799 ASSERT_SYNCHRONIZED_OP(sc); 7800 7801 if (f->fs.newdmac || f->fs.newvlan) { 7802 /* This filter needs an L2T entry; allocate one. */ 7803 f->l2t = t4_l2t_alloc_switching(sc->l2t); 7804 if (f->l2t == NULL) 7805 return (EAGAIN); 7806 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 7807 f->fs.dmac)) { 7808 t4_l2t_release(f->l2t); 7809 f->l2t = NULL; 7810 return (ENOMEM); 7811 } 7812 } 7813 7814 /* Already validated against fconf, iconf */ 7815 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 7816 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 7817 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 7818 vnic_vld = 1; 7819 else 7820 vnic_vld = 0; 7821 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 7822 vnic_vld_mask = 1; 7823 else 7824 vnic_vld_mask = 0; 7825 7826 ftid = sc->tids.ftid_base + fidx; 7827 7828 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7829 if (fwr == NULL) 7830 return (ENOMEM); 7831 bzero(fwr, sizeof(*fwr)); 7832 7833 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 7834 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 7835 fwr->tid_to_iq = 7836 htobe32(V_FW_FILTER_WR_TID(ftid) | 7837 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 7838 V_FW_FILTER_WR_NOREPLY(0) | 7839 V_FW_FILTER_WR_IQ(f->fs.iq)); 7840 fwr->del_filter_to_l2tix = 7841 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 7842 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 7843 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 7844 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 7845 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 7846 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 7847 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 7848 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 7849 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 7850 f->fs.newvlan == VLAN_REWRITE) | 7851 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 7852 f->fs.newvlan == VLAN_REWRITE) | 7853 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 7854 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 7855 V_FW_FILTER_WR_PRIO(f->fs.prio) | 7856 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 7857 fwr->ethtype = htobe16(f->fs.val.ethtype); 7858 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 7859 fwr->frag_to_ovlan_vldm = 7860 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 7861 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 7862 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 7863 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 7864 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 7865 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 7866 fwr->smac_sel = 0; 7867 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 7868 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 7869 fwr->maci_to_matchtypem = 7870 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 7871 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 7872 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 7873 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 7874 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 7875 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 7876 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 7877 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 7878 fwr->ptcl = f->fs.val.proto; 7879 fwr->ptclm = f->fs.mask.proto; 7880 fwr->ttyp = f->fs.val.tos; 7881 fwr->ttypm = f->fs.mask.tos; 7882 fwr->ivlan = htobe16(f->fs.val.vlan); 7883 fwr->ivlanm = htobe16(f->fs.mask.vlan); 7884 fwr->ovlan = htobe16(f->fs.val.vnic); 7885 fwr->ovlanm = htobe16(f->fs.mask.vnic); 7886 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 7887 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 7888 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 7889 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 7890 fwr->lp = htobe16(f->fs.val.dport); 7891 fwr->lpm = htobe16(f->fs.mask.dport); 7892 fwr->fp = htobe16(f->fs.val.sport); 7893 fwr->fpm = htobe16(f->fs.mask.sport); 7894 if (f->fs.newsmac) 7895 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 7896 7897 f->pending = 1; 7898 sc->tids.ftids_in_use++; 7899 7900 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7901 return (0); 7902 } 7903 7904 static int 7905 del_filter_wr(struct adapter *sc, int fidx) 7906 { 7907 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7908 struct fw_filter_wr *fwr; 7909 unsigned int ftid; 7910 struct wrq_cookie cookie; 7911 7912 ftid = sc->tids.ftid_base + fidx; 7913 7914 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7915 if (fwr == NULL) 7916 return (ENOMEM); 7917 bzero(fwr, sizeof (*fwr)); 7918 7919 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 7920 7921 f->pending = 1; 7922 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7923 return (0); 7924 } 7925 7926 int 7927 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 7928 { 7929 struct adapter *sc = iq->adapter; 7930 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 7931 unsigned int idx = GET_TID(rpl); 7932 unsigned int rc; 7933 struct filter_entry *f; 7934 7935 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 7936 rss->opcode)); 7937 7938 if (is_ftid(sc, idx)) { 7939 7940 idx -= sc->tids.ftid_base; 7941 f = &sc->tids.ftid_tab[idx]; 7942 rc = G_COOKIE(rpl->cookie); 7943 7944 mtx_lock(&sc->tids.ftid_lock); 7945 if (rc == FW_FILTER_WR_FLT_ADDED) { 7946 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 7947 __func__, idx)); 7948 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 7949 f->pending = 0; /* asynchronous setup completed */ 7950 f->valid = 1; 7951 } else { 7952 if (rc != FW_FILTER_WR_FLT_DELETED) { 7953 /* Add or delete failed, display an error */ 7954 log(LOG_ERR, 7955 "filter %u setup failed with error %u\n", 7956 idx, rc); 7957 } 7958 7959 clear_filter(f); 7960 sc->tids.ftids_in_use--; 7961 } 7962 wakeup(&sc->tids.ftid_tab); 7963 mtx_unlock(&sc->tids.ftid_lock); 7964 } 7965 7966 return (0); 7967 } 7968 7969 static int 7970 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 7971 { 7972 int rc; 7973 7974 if (cntxt->cid > M_CTXTQID) 7975 return (EINVAL); 7976 7977 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 7978 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 7979 return (EINVAL); 7980 7981 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 7982 if (rc) 7983 return (rc); 7984 7985 if (sc->flags & FW_OK) { 7986 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 7987 &cntxt->data[0]); 7988 if (rc == 0) 7989 goto done; 7990 } 7991 7992 /* 7993 * Read via firmware failed or wasn't even attempted. Read directly via 7994 * the backdoor. 7995 */ 7996 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 7997 done: 7998 end_synchronized_op(sc, 0); 7999 return (rc); 8000 } 8001 8002 static int 8003 load_fw(struct adapter *sc, struct t4_data *fw) 8004 { 8005 int rc; 8006 uint8_t *fw_data; 8007 8008 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 8009 if (rc) 8010 return (rc); 8011 8012 if (sc->flags & FULL_INIT_DONE) { 8013 rc = EBUSY; 8014 goto done; 8015 } 8016 8017 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 8018 if (fw_data == NULL) { 8019 rc = ENOMEM; 8020 goto done; 8021 } 8022 8023 rc = copyin(fw->data, fw_data, fw->len); 8024 if (rc == 0) 8025 rc = -t4_load_fw(sc, fw_data, fw->len); 8026 8027 free(fw_data, M_CXGBE); 8028 done: 8029 end_synchronized_op(sc, 0); 8030 return (rc); 8031 } 8032 8033 #define MAX_READ_BUF_SIZE (128 * 1024) 8034 static int 8035 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 8036 { 8037 uint32_t addr, remaining, n; 8038 uint32_t *buf; 8039 int rc; 8040 uint8_t *dst; 8041 8042 rc = validate_mem_range(sc, mr->addr, mr->len); 8043 if (rc != 0) 8044 return (rc); 8045 8046 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 8047 addr = mr->addr; 8048 remaining = mr->len; 8049 dst = (void *)mr->data; 8050 8051 while (remaining) { 8052 n = min(remaining, MAX_READ_BUF_SIZE); 8053 read_via_memwin(sc, 2, addr, buf, n); 8054 8055 rc = copyout(buf, dst, n); 8056 if (rc != 0) 8057 break; 8058 8059 dst += n; 8060 remaining -= n; 8061 addr += n; 8062 } 8063 8064 free(buf, M_CXGBE); 8065 return (rc); 8066 } 8067 #undef MAX_READ_BUF_SIZE 8068 8069 static int 8070 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 8071 { 8072 int rc; 8073 8074 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 8075 return (EINVAL); 8076 8077 if (i2cd->len > sizeof(i2cd->data)) 8078 return (EFBIG); 8079 8080 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 8081 if (rc) 8082 return (rc); 8083 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 8084 i2cd->offset, i2cd->len, &i2cd->data[0]); 8085 end_synchronized_op(sc, 0); 8086 8087 return (rc); 8088 } 8089 8090 static int 8091 in_range(int val, int lo, int hi) 8092 { 8093 8094 return (val < 0 || (val <= hi && val >= lo)); 8095 } 8096 8097 static int 8098 set_sched_class(struct adapter *sc, struct t4_sched_params *p) 8099 { 8100 int fw_subcmd, fw_type, rc; 8101 8102 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc"); 8103 if (rc) 8104 return (rc); 8105 8106 if (!(sc->flags & FULL_INIT_DONE)) { 8107 rc = EAGAIN; 8108 goto done; 8109 } 8110 8111 /* 8112 * Translate the cxgbetool parameters into T4 firmware parameters. (The 8113 * sub-command and type are in common locations.) 8114 */ 8115 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 8116 fw_subcmd = FW_SCHED_SC_CONFIG; 8117 else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 8118 fw_subcmd = FW_SCHED_SC_PARAMS; 8119 else { 8120 rc = EINVAL; 8121 goto done; 8122 } 8123 if (p->type == SCHED_CLASS_TYPE_PACKET) 8124 fw_type = FW_SCHED_TYPE_PKTSCHED; 8125 else { 8126 rc = EINVAL; 8127 goto done; 8128 } 8129 8130 if (fw_subcmd == FW_SCHED_SC_CONFIG) { 8131 /* Vet our parameters ..*/ 8132 if (p->u.config.minmax < 0) { 8133 rc = EINVAL; 8134 goto done; 8135 } 8136 8137 /* And pass the request to the firmware ...*/ 8138 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1); 8139 goto done; 8140 } 8141 8142 if (fw_subcmd == FW_SCHED_SC_PARAMS) { 8143 int fw_level; 8144 int fw_mode; 8145 int fw_rateunit; 8146 int fw_ratemode; 8147 8148 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL) 8149 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 8150 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) 8151 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 8152 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) 8153 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 8154 else { 8155 rc = EINVAL; 8156 goto done; 8157 } 8158 8159 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS) 8160 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 8161 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) 8162 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 8163 else { 8164 rc = EINVAL; 8165 goto done; 8166 } 8167 8168 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS) 8169 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 8170 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS) 8171 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 8172 else { 8173 rc = EINVAL; 8174 goto done; 8175 } 8176 8177 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL) 8178 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 8179 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS) 8180 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 8181 else { 8182 rc = EINVAL; 8183 goto done; 8184 } 8185 8186 /* Vet our parameters ... */ 8187 if (!in_range(p->u.params.channel, 0, 3) || 8188 !in_range(p->u.params.cl, 0, sc->chip_params->nsched_cls) || 8189 !in_range(p->u.params.minrate, 0, 10000000) || 8190 !in_range(p->u.params.maxrate, 0, 10000000) || 8191 !in_range(p->u.params.weight, 0, 100)) { 8192 rc = ERANGE; 8193 goto done; 8194 } 8195 8196 /* 8197 * Translate any unset parameters into the firmware's 8198 * nomenclature and/or fail the call if the parameters 8199 * are required ... 8200 */ 8201 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 || 8202 p->u.params.channel < 0 || p->u.params.cl < 0) { 8203 rc = EINVAL; 8204 goto done; 8205 } 8206 if (p->u.params.minrate < 0) 8207 p->u.params.minrate = 0; 8208 if (p->u.params.maxrate < 0) { 8209 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 8210 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 8211 rc = EINVAL; 8212 goto done; 8213 } else 8214 p->u.params.maxrate = 0; 8215 } 8216 if (p->u.params.weight < 0) { 8217 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) { 8218 rc = EINVAL; 8219 goto done; 8220 } else 8221 p->u.params.weight = 0; 8222 } 8223 if (p->u.params.pktsize < 0) { 8224 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 8225 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 8226 rc = EINVAL; 8227 goto done; 8228 } else 8229 p->u.params.pktsize = 0; 8230 } 8231 8232 /* See what the firmware thinks of the request ... */ 8233 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode, 8234 fw_rateunit, fw_ratemode, p->u.params.channel, 8235 p->u.params.cl, p->u.params.minrate, p->u.params.maxrate, 8236 p->u.params.weight, p->u.params.pktsize, 1); 8237 goto done; 8238 } 8239 8240 rc = EINVAL; 8241 done: 8242 end_synchronized_op(sc, 0); 8243 return (rc); 8244 } 8245 8246 static int 8247 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 8248 { 8249 struct port_info *pi = NULL; 8250 struct vi_info *vi; 8251 struct sge_txq *txq; 8252 uint32_t fw_mnem, fw_queue, fw_class; 8253 int i, rc; 8254 8255 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 8256 if (rc) 8257 return (rc); 8258 8259 if (!(sc->flags & FULL_INIT_DONE)) { 8260 rc = EAGAIN; 8261 goto done; 8262 } 8263 8264 if (p->port >= sc->params.nports) { 8265 rc = EINVAL; 8266 goto done; 8267 } 8268 8269 /* XXX: Only supported for the main VI. */ 8270 pi = sc->port[p->port]; 8271 vi = &pi->vi[0]; 8272 if (!in_range(p->queue, 0, vi->ntxq - 1) || !in_range(p->cl, 0, 7)) { 8273 rc = EINVAL; 8274 goto done; 8275 } 8276 8277 /* 8278 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 8279 * Scheduling Class in this case). 8280 */ 8281 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 8282 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 8283 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 8284 8285 /* 8286 * If op.queue is non-negative, then we're only changing the scheduling 8287 * on a single specified TX queue. 8288 */ 8289 if (p->queue >= 0) { 8290 txq = &sc->sge.txq[vi->first_txq + p->queue]; 8291 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8292 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8293 &fw_class); 8294 goto done; 8295 } 8296 8297 /* 8298 * Change the scheduling on all the TX queues for the 8299 * interface. 8300 */ 8301 for_each_txq(vi, i, txq) { 8302 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8303 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8304 &fw_class); 8305 if (rc) 8306 goto done; 8307 } 8308 8309 rc = 0; 8310 done: 8311 end_synchronized_op(sc, 0); 8312 return (rc); 8313 } 8314 8315 int 8316 t4_os_find_pci_capability(struct adapter *sc, int cap) 8317 { 8318 int i; 8319 8320 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 8321 } 8322 8323 int 8324 t4_os_pci_save_state(struct adapter *sc) 8325 { 8326 device_t dev; 8327 struct pci_devinfo *dinfo; 8328 8329 dev = sc->dev; 8330 dinfo = device_get_ivars(dev); 8331 8332 pci_cfg_save(dev, dinfo, 0); 8333 return (0); 8334 } 8335 8336 int 8337 t4_os_pci_restore_state(struct adapter *sc) 8338 { 8339 device_t dev; 8340 struct pci_devinfo *dinfo; 8341 8342 dev = sc->dev; 8343 dinfo = device_get_ivars(dev); 8344 8345 pci_cfg_restore(dev, dinfo); 8346 return (0); 8347 } 8348 8349 void 8350 t4_os_portmod_changed(const struct adapter *sc, int idx) 8351 { 8352 struct port_info *pi = sc->port[idx]; 8353 struct vi_info *vi; 8354 struct ifnet *ifp; 8355 int v; 8356 static const char *mod_str[] = { 8357 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 8358 }; 8359 8360 for_each_vi(pi, v, vi) { 8361 build_medialist(pi, &vi->media); 8362 } 8363 8364 ifp = pi->vi[0].ifp; 8365 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 8366 if_printf(ifp, "transceiver unplugged.\n"); 8367 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 8368 if_printf(ifp, "unknown transceiver inserted.\n"); 8369 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 8370 if_printf(ifp, "unsupported transceiver inserted.\n"); 8371 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 8372 if_printf(ifp, "%s transceiver inserted.\n", 8373 mod_str[pi->mod_type]); 8374 } else { 8375 if_printf(ifp, "transceiver (type %d) inserted.\n", 8376 pi->mod_type); 8377 } 8378 } 8379 8380 void 8381 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason) 8382 { 8383 struct port_info *pi = sc->port[idx]; 8384 struct vi_info *vi; 8385 struct ifnet *ifp; 8386 int v; 8387 8388 if (link_stat) 8389 pi->linkdnrc = -1; 8390 else { 8391 if (reason >= 0) 8392 pi->linkdnrc = reason; 8393 } 8394 for_each_vi(pi, v, vi) { 8395 ifp = vi->ifp; 8396 if (ifp == NULL) 8397 continue; 8398 8399 if (link_stat) { 8400 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 8401 if_link_state_change(ifp, LINK_STATE_UP); 8402 } else { 8403 if_link_state_change(ifp, LINK_STATE_DOWN); 8404 } 8405 } 8406 } 8407 8408 void 8409 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8410 { 8411 struct adapter *sc; 8412 8413 sx_slock(&t4_list_lock); 8414 SLIST_FOREACH(sc, &t4_list, link) { 8415 /* 8416 * func should not make any assumptions about what state sc is 8417 * in - the only guarantee is that sc->sc_lock is a valid lock. 8418 */ 8419 func(sc, arg); 8420 } 8421 sx_sunlock(&t4_list_lock); 8422 } 8423 8424 static int 8425 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 8426 { 8427 return (0); 8428 } 8429 8430 static int 8431 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 8432 { 8433 return (0); 8434 } 8435 8436 static int 8437 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8438 struct thread *td) 8439 { 8440 int rc; 8441 struct adapter *sc = dev->si_drv1; 8442 8443 rc = priv_check(td, PRIV_DRIVER); 8444 if (rc != 0) 8445 return (rc); 8446 8447 switch (cmd) { 8448 case CHELSIO_T4_GETREG: { 8449 struct t4_reg *edata = (struct t4_reg *)data; 8450 8451 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8452 return (EFAULT); 8453 8454 if (edata->size == 4) 8455 edata->val = t4_read_reg(sc, edata->addr); 8456 else if (edata->size == 8) 8457 edata->val = t4_read_reg64(sc, edata->addr); 8458 else 8459 return (EINVAL); 8460 8461 break; 8462 } 8463 case CHELSIO_T4_SETREG: { 8464 struct t4_reg *edata = (struct t4_reg *)data; 8465 8466 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8467 return (EFAULT); 8468 8469 if (edata->size == 4) { 8470 if (edata->val & 0xffffffff00000000) 8471 return (EINVAL); 8472 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 8473 } else if (edata->size == 8) 8474 t4_write_reg64(sc, edata->addr, edata->val); 8475 else 8476 return (EINVAL); 8477 break; 8478 } 8479 case CHELSIO_T4_REGDUMP: { 8480 struct t4_regdump *regs = (struct t4_regdump *)data; 8481 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE; 8482 uint8_t *buf; 8483 8484 if (regs->len < reglen) { 8485 regs->len = reglen; /* hint to the caller */ 8486 return (ENOBUFS); 8487 } 8488 8489 regs->len = reglen; 8490 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 8491 get_regs(sc, regs, buf); 8492 rc = copyout(buf, regs->data, reglen); 8493 free(buf, M_CXGBE); 8494 break; 8495 } 8496 case CHELSIO_T4_GET_FILTER_MODE: 8497 rc = get_filter_mode(sc, (uint32_t *)data); 8498 break; 8499 case CHELSIO_T4_SET_FILTER_MODE: 8500 rc = set_filter_mode(sc, *(uint32_t *)data); 8501 break; 8502 case CHELSIO_T4_GET_FILTER: 8503 rc = get_filter(sc, (struct t4_filter *)data); 8504 break; 8505 case CHELSIO_T4_SET_FILTER: 8506 rc = set_filter(sc, (struct t4_filter *)data); 8507 break; 8508 case CHELSIO_T4_DEL_FILTER: 8509 rc = del_filter(sc, (struct t4_filter *)data); 8510 break; 8511 case CHELSIO_T4_GET_SGE_CONTEXT: 8512 rc = get_sge_context(sc, (struct t4_sge_context *)data); 8513 break; 8514 case CHELSIO_T4_LOAD_FW: 8515 rc = load_fw(sc, (struct t4_data *)data); 8516 break; 8517 case CHELSIO_T4_GET_MEM: 8518 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 8519 break; 8520 case CHELSIO_T4_GET_I2C: 8521 rc = read_i2c(sc, (struct t4_i2c_data *)data); 8522 break; 8523 case CHELSIO_T4_CLEAR_STATS: { 8524 int i, v; 8525 u_int port_id = *(uint32_t *)data; 8526 struct port_info *pi; 8527 struct vi_info *vi; 8528 8529 if (port_id >= sc->params.nports) 8530 return (EINVAL); 8531 pi = sc->port[port_id]; 8532 8533 /* MAC stats */ 8534 t4_clr_port_stats(sc, pi->tx_chan); 8535 pi->tx_parse_error = 0; 8536 mtx_lock(&sc->reg_lock); 8537 for_each_vi(pi, v, vi) { 8538 if (vi->flags & VI_INIT_DONE) 8539 t4_clr_vi_stats(sc, vi->viid); 8540 } 8541 mtx_unlock(&sc->reg_lock); 8542 8543 /* 8544 * Since this command accepts a port, clear stats for 8545 * all VIs on this port. 8546 */ 8547 for_each_vi(pi, v, vi) { 8548 if (vi->flags & VI_INIT_DONE) { 8549 struct sge_rxq *rxq; 8550 struct sge_txq *txq; 8551 struct sge_wrq *wrq; 8552 8553 if (vi->flags & VI_NETMAP) 8554 continue; 8555 8556 for_each_rxq(vi, i, rxq) { 8557 #if defined(INET) || defined(INET6) 8558 rxq->lro.lro_queued = 0; 8559 rxq->lro.lro_flushed = 0; 8560 #endif 8561 rxq->rxcsum = 0; 8562 rxq->vlan_extraction = 0; 8563 } 8564 8565 for_each_txq(vi, i, txq) { 8566 txq->txcsum = 0; 8567 txq->tso_wrs = 0; 8568 txq->vlan_insertion = 0; 8569 txq->imm_wrs = 0; 8570 txq->sgl_wrs = 0; 8571 txq->txpkt_wrs = 0; 8572 txq->txpkts0_wrs = 0; 8573 txq->txpkts1_wrs = 0; 8574 txq->txpkts0_pkts = 0; 8575 txq->txpkts1_pkts = 0; 8576 mp_ring_reset_stats(txq->r); 8577 } 8578 8579 #ifdef TCP_OFFLOAD 8580 /* nothing to clear for each ofld_rxq */ 8581 8582 for_each_ofld_txq(vi, i, wrq) { 8583 wrq->tx_wrs_direct = 0; 8584 wrq->tx_wrs_copied = 0; 8585 } 8586 #endif 8587 8588 if (IS_MAIN_VI(vi)) { 8589 wrq = &sc->sge.ctrlq[pi->port_id]; 8590 wrq->tx_wrs_direct = 0; 8591 wrq->tx_wrs_copied = 0; 8592 } 8593 } 8594 } 8595 break; 8596 } 8597 case CHELSIO_T4_SCHED_CLASS: 8598 rc = set_sched_class(sc, (struct t4_sched_params *)data); 8599 break; 8600 case CHELSIO_T4_SCHED_QUEUE: 8601 rc = set_sched_queue(sc, (struct t4_sched_queue *)data); 8602 break; 8603 case CHELSIO_T4_GET_TRACER: 8604 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 8605 break; 8606 case CHELSIO_T4_SET_TRACER: 8607 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 8608 break; 8609 default: 8610 rc = EINVAL; 8611 } 8612 8613 return (rc); 8614 } 8615 8616 void 8617 t4_db_full(struct adapter *sc) 8618 { 8619 8620 CXGBE_UNIMPLEMENTED(__func__); 8621 } 8622 8623 void 8624 t4_db_dropped(struct adapter *sc) 8625 { 8626 8627 CXGBE_UNIMPLEMENTED(__func__); 8628 } 8629 8630 #ifdef TCP_OFFLOAD 8631 void 8632 t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) 8633 { 8634 8635 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 8636 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 8637 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 8638 V_HPZ3(pgsz_order[3])); 8639 } 8640 8641 static int 8642 toe_capability(struct vi_info *vi, int enable) 8643 { 8644 int rc; 8645 struct port_info *pi = vi->pi; 8646 struct adapter *sc = pi->adapter; 8647 8648 ASSERT_SYNCHRONIZED_OP(sc); 8649 8650 if (!is_offload(sc)) 8651 return (ENODEV); 8652 8653 if (enable) { 8654 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 8655 /* TOE is already enabled. */ 8656 return (0); 8657 } 8658 8659 /* 8660 * We need the port's queues around so that we're able to send 8661 * and receive CPLs to/from the TOE even if the ifnet for this 8662 * port has never been UP'd administratively. 8663 */ 8664 if (!(vi->flags & VI_INIT_DONE)) { 8665 rc = cxgbe_init_synchronized(vi); 8666 if (rc) 8667 return (rc); 8668 } 8669 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 8670 rc = cxgbe_init_synchronized(&pi->vi[0]); 8671 if (rc) 8672 return (rc); 8673 } 8674 8675 if (isset(&sc->offload_map, pi->port_id)) { 8676 /* TOE is enabled on another VI of this port. */ 8677 pi->uld_vis++; 8678 return (0); 8679 } 8680 8681 if (!uld_active(sc, ULD_TOM)) { 8682 rc = t4_activate_uld(sc, ULD_TOM); 8683 if (rc == EAGAIN) { 8684 log(LOG_WARNING, 8685 "You must kldload t4_tom.ko before trying " 8686 "to enable TOE on a cxgbe interface.\n"); 8687 } 8688 if (rc != 0) 8689 return (rc); 8690 KASSERT(sc->tom_softc != NULL, 8691 ("%s: TOM activated but softc NULL", __func__)); 8692 KASSERT(uld_active(sc, ULD_TOM), 8693 ("%s: TOM activated but flag not set", __func__)); 8694 } 8695 8696 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 8697 if (!uld_active(sc, ULD_IWARP)) 8698 (void) t4_activate_uld(sc, ULD_IWARP); 8699 if (!uld_active(sc, ULD_ISCSI)) 8700 (void) t4_activate_uld(sc, ULD_ISCSI); 8701 8702 pi->uld_vis++; 8703 setbit(&sc->offload_map, pi->port_id); 8704 } else { 8705 pi->uld_vis--; 8706 8707 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 8708 return (0); 8709 8710 KASSERT(uld_active(sc, ULD_TOM), 8711 ("%s: TOM never initialized?", __func__)); 8712 clrbit(&sc->offload_map, pi->port_id); 8713 } 8714 8715 return (0); 8716 } 8717 8718 /* 8719 * Add an upper layer driver to the global list. 8720 */ 8721 int 8722 t4_register_uld(struct uld_info *ui) 8723 { 8724 int rc = 0; 8725 struct uld_info *u; 8726 8727 sx_xlock(&t4_uld_list_lock); 8728 SLIST_FOREACH(u, &t4_uld_list, link) { 8729 if (u->uld_id == ui->uld_id) { 8730 rc = EEXIST; 8731 goto done; 8732 } 8733 } 8734 8735 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 8736 ui->refcount = 0; 8737 done: 8738 sx_xunlock(&t4_uld_list_lock); 8739 return (rc); 8740 } 8741 8742 int 8743 t4_unregister_uld(struct uld_info *ui) 8744 { 8745 int rc = EINVAL; 8746 struct uld_info *u; 8747 8748 sx_xlock(&t4_uld_list_lock); 8749 8750 SLIST_FOREACH(u, &t4_uld_list, link) { 8751 if (u == ui) { 8752 if (ui->refcount > 0) { 8753 rc = EBUSY; 8754 goto done; 8755 } 8756 8757 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 8758 rc = 0; 8759 goto done; 8760 } 8761 } 8762 done: 8763 sx_xunlock(&t4_uld_list_lock); 8764 return (rc); 8765 } 8766 8767 int 8768 t4_activate_uld(struct adapter *sc, int id) 8769 { 8770 int rc; 8771 struct uld_info *ui; 8772 8773 ASSERT_SYNCHRONIZED_OP(sc); 8774 8775 if (id < 0 || id > ULD_MAX) 8776 return (EINVAL); 8777 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 8778 8779 sx_slock(&t4_uld_list_lock); 8780 8781 SLIST_FOREACH(ui, &t4_uld_list, link) { 8782 if (ui->uld_id == id) { 8783 if (!(sc->flags & FULL_INIT_DONE)) { 8784 rc = adapter_full_init(sc); 8785 if (rc != 0) 8786 break; 8787 } 8788 8789 rc = ui->activate(sc); 8790 if (rc == 0) { 8791 setbit(&sc->active_ulds, id); 8792 ui->refcount++; 8793 } 8794 break; 8795 } 8796 } 8797 8798 sx_sunlock(&t4_uld_list_lock); 8799 8800 return (rc); 8801 } 8802 8803 int 8804 t4_deactivate_uld(struct adapter *sc, int id) 8805 { 8806 int rc; 8807 struct uld_info *ui; 8808 8809 ASSERT_SYNCHRONIZED_OP(sc); 8810 8811 if (id < 0 || id > ULD_MAX) 8812 return (EINVAL); 8813 rc = ENXIO; 8814 8815 sx_slock(&t4_uld_list_lock); 8816 8817 SLIST_FOREACH(ui, &t4_uld_list, link) { 8818 if (ui->uld_id == id) { 8819 rc = ui->deactivate(sc); 8820 if (rc == 0) { 8821 clrbit(&sc->active_ulds, id); 8822 ui->refcount--; 8823 } 8824 break; 8825 } 8826 } 8827 8828 sx_sunlock(&t4_uld_list_lock); 8829 8830 return (rc); 8831 } 8832 8833 int 8834 uld_active(struct adapter *sc, int uld_id) 8835 { 8836 8837 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 8838 8839 return (isset(&sc->active_ulds, uld_id)); 8840 } 8841 #endif 8842 8843 /* 8844 * Come up with reasonable defaults for some of the tunables, provided they're 8845 * not set by the user (in which case we'll use the values as is). 8846 */ 8847 static void 8848 tweak_tunables(void) 8849 { 8850 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 8851 8852 if (t4_ntxq10g < 1) { 8853 #ifdef RSS 8854 t4_ntxq10g = rss_getnumbuckets(); 8855 #else 8856 t4_ntxq10g = min(nc, NTXQ_10G); 8857 #endif 8858 } 8859 8860 if (t4_ntxq1g < 1) { 8861 #ifdef RSS 8862 /* XXX: way too many for 1GbE? */ 8863 t4_ntxq1g = rss_getnumbuckets(); 8864 #else 8865 t4_ntxq1g = min(nc, NTXQ_1G); 8866 #endif 8867 } 8868 8869 if (t4_nrxq10g < 1) { 8870 #ifdef RSS 8871 t4_nrxq10g = rss_getnumbuckets(); 8872 #else 8873 t4_nrxq10g = min(nc, NRXQ_10G); 8874 #endif 8875 } 8876 8877 if (t4_nrxq1g < 1) { 8878 #ifdef RSS 8879 /* XXX: way too many for 1GbE? */ 8880 t4_nrxq1g = rss_getnumbuckets(); 8881 #else 8882 t4_nrxq1g = min(nc, NRXQ_1G); 8883 #endif 8884 } 8885 8886 #ifdef TCP_OFFLOAD 8887 if (t4_nofldtxq10g < 1) 8888 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 8889 8890 if (t4_nofldtxq1g < 1) 8891 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 8892 8893 if (t4_nofldrxq10g < 1) 8894 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 8895 8896 if (t4_nofldrxq1g < 1) 8897 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 8898 8899 if (t4_toecaps_allowed == -1) 8900 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 8901 #else 8902 if (t4_toecaps_allowed == -1) 8903 t4_toecaps_allowed = 0; 8904 #endif 8905 8906 #ifdef DEV_NETMAP 8907 if (t4_nnmtxq10g < 1) 8908 t4_nnmtxq10g = min(nc, NNMTXQ_10G); 8909 8910 if (t4_nnmtxq1g < 1) 8911 t4_nnmtxq1g = min(nc, NNMTXQ_1G); 8912 8913 if (t4_nnmrxq10g < 1) 8914 t4_nnmrxq10g = min(nc, NNMRXQ_10G); 8915 8916 if (t4_nnmrxq1g < 1) 8917 t4_nnmrxq1g = min(nc, NNMRXQ_1G); 8918 #endif 8919 8920 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 8921 t4_tmr_idx_10g = TMR_IDX_10G; 8922 8923 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 8924 t4_pktc_idx_10g = PKTC_IDX_10G; 8925 8926 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 8927 t4_tmr_idx_1g = TMR_IDX_1G; 8928 8929 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 8930 t4_pktc_idx_1g = PKTC_IDX_1G; 8931 8932 if (t4_qsize_txq < 128) 8933 t4_qsize_txq = 128; 8934 8935 if (t4_qsize_rxq < 128) 8936 t4_qsize_rxq = 128; 8937 while (t4_qsize_rxq & 7) 8938 t4_qsize_rxq++; 8939 8940 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 8941 } 8942 8943 static struct sx mlu; /* mod load unload */ 8944 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 8945 8946 static int 8947 mod_event(module_t mod, int cmd, void *arg) 8948 { 8949 int rc = 0; 8950 static int loaded = 0; 8951 8952 switch (cmd) { 8953 case MOD_LOAD: 8954 sx_xlock(&mlu); 8955 if (loaded++ == 0) { 8956 t4_sge_modload(); 8957 sx_init(&t4_list_lock, "T4/T5 adapters"); 8958 SLIST_INIT(&t4_list); 8959 #ifdef TCP_OFFLOAD 8960 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 8961 SLIST_INIT(&t4_uld_list); 8962 #endif 8963 t4_tracer_modload(); 8964 tweak_tunables(); 8965 } 8966 sx_xunlock(&mlu); 8967 break; 8968 8969 case MOD_UNLOAD: 8970 sx_xlock(&mlu); 8971 if (--loaded == 0) { 8972 int tries; 8973 8974 sx_slock(&t4_list_lock); 8975 if (!SLIST_EMPTY(&t4_list)) { 8976 rc = EBUSY; 8977 sx_sunlock(&t4_list_lock); 8978 goto done_unload; 8979 } 8980 #ifdef TCP_OFFLOAD 8981 sx_slock(&t4_uld_list_lock); 8982 if (!SLIST_EMPTY(&t4_uld_list)) { 8983 rc = EBUSY; 8984 sx_sunlock(&t4_uld_list_lock); 8985 sx_sunlock(&t4_list_lock); 8986 goto done_unload; 8987 } 8988 #endif 8989 tries = 0; 8990 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 8991 uprintf("%ju clusters with custom free routine " 8992 "still is use.\n", t4_sge_extfree_refs()); 8993 pause("t4unload", 2 * hz); 8994 } 8995 #ifdef TCP_OFFLOAD 8996 sx_sunlock(&t4_uld_list_lock); 8997 #endif 8998 sx_sunlock(&t4_list_lock); 8999 9000 if (t4_sge_extfree_refs() == 0) { 9001 t4_tracer_modunload(); 9002 #ifdef TCP_OFFLOAD 9003 sx_destroy(&t4_uld_list_lock); 9004 #endif 9005 sx_destroy(&t4_list_lock); 9006 t4_sge_modunload(); 9007 loaded = 0; 9008 } else { 9009 rc = EBUSY; 9010 loaded++; /* undo earlier decrement */ 9011 } 9012 } 9013 done_unload: 9014 sx_xunlock(&mlu); 9015 break; 9016 } 9017 9018 return (rc); 9019 } 9020 9021 static devclass_t t4_devclass, t5_devclass; 9022 static devclass_t cxgbe_devclass, cxl_devclass; 9023 static devclass_t vcxgbe_devclass, vcxl_devclass; 9024 9025 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 9026 MODULE_VERSION(t4nex, 1); 9027 MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 9028 #ifdef DEV_NETMAP 9029 MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 9030 #endif /* DEV_NETMAP */ 9031 9032 9033 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 9034 MODULE_VERSION(t5nex, 1); 9035 MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 9036 #ifdef DEV_NETMAP 9037 MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 9038 #endif /* DEV_NETMAP */ 9039 9040 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 9041 MODULE_VERSION(cxgbe, 1); 9042 9043 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 9044 MODULE_VERSION(cxl, 1); 9045 9046 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 9047 MODULE_VERSION(vcxgbe, 1); 9048 9049 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 9050 MODULE_VERSION(vcxl, 1); 9051