1 /*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include "opt_inet.h" 32 #include "opt_inet6.h" 33 #include "opt_rss.h" 34 35 #include <sys/param.h> 36 #include <sys/conf.h> 37 #include <sys/priv.h> 38 #include <sys/kernel.h> 39 #include <sys/bus.h> 40 #include <sys/module.h> 41 #include <sys/malloc.h> 42 #include <sys/queue.h> 43 #include <sys/taskqueue.h> 44 #include <sys/pciio.h> 45 #include <dev/pci/pcireg.h> 46 #include <dev/pci/pcivar.h> 47 #include <dev/pci/pci_private.h> 48 #include <sys/firmware.h> 49 #include <sys/sbuf.h> 50 #include <sys/smp.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/sysctl.h> 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_types.h> 57 #include <net/if_dl.h> 58 #include <net/if_vlan_var.h> 59 #ifdef RSS 60 #include <net/rss_config.h> 61 #endif 62 #if defined(__i386__) || defined(__amd64__) 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 #endif 66 67 #include "common/common.h" 68 #include "common/t4_msg.h" 69 #include "common/t4_regs.h" 70 #include "common/t4_regs_values.h" 71 #include "t4_ioctl.h" 72 #include "t4_l2t.h" 73 #include "t4_mp_ring.h" 74 75 /* T4 bus driver interface */ 76 static int t4_probe(device_t); 77 static int t4_attach(device_t); 78 static int t4_detach(device_t); 79 static device_method_t t4_methods[] = { 80 DEVMETHOD(device_probe, t4_probe), 81 DEVMETHOD(device_attach, t4_attach), 82 DEVMETHOD(device_detach, t4_detach), 83 84 DEVMETHOD_END 85 }; 86 static driver_t t4_driver = { 87 "t4nex", 88 t4_methods, 89 sizeof(struct adapter) 90 }; 91 92 93 /* T4 port (cxgbe) interface */ 94 static int cxgbe_probe(device_t); 95 static int cxgbe_attach(device_t); 96 static int cxgbe_detach(device_t); 97 static device_method_t cxgbe_methods[] = { 98 DEVMETHOD(device_probe, cxgbe_probe), 99 DEVMETHOD(device_attach, cxgbe_attach), 100 DEVMETHOD(device_detach, cxgbe_detach), 101 { 0, 0 } 102 }; 103 static driver_t cxgbe_driver = { 104 "cxgbe", 105 cxgbe_methods, 106 sizeof(struct port_info) 107 }; 108 109 /* T4 VI (vcxgbe) interface */ 110 static int vcxgbe_probe(device_t); 111 static int vcxgbe_attach(device_t); 112 static int vcxgbe_detach(device_t); 113 static device_method_t vcxgbe_methods[] = { 114 DEVMETHOD(device_probe, vcxgbe_probe), 115 DEVMETHOD(device_attach, vcxgbe_attach), 116 DEVMETHOD(device_detach, vcxgbe_detach), 117 { 0, 0 } 118 }; 119 static driver_t vcxgbe_driver = { 120 "vcxgbe", 121 vcxgbe_methods, 122 sizeof(struct vi_info) 123 }; 124 125 static d_ioctl_t t4_ioctl; 126 static d_open_t t4_open; 127 static d_close_t t4_close; 128 129 static struct cdevsw t4_cdevsw = { 130 .d_version = D_VERSION, 131 .d_flags = 0, 132 .d_open = t4_open, 133 .d_close = t4_close, 134 .d_ioctl = t4_ioctl, 135 .d_name = "t4nex", 136 }; 137 138 /* T5 bus driver interface */ 139 static int t5_probe(device_t); 140 static device_method_t t5_methods[] = { 141 DEVMETHOD(device_probe, t5_probe), 142 DEVMETHOD(device_attach, t4_attach), 143 DEVMETHOD(device_detach, t4_detach), 144 145 DEVMETHOD_END 146 }; 147 static driver_t t5_driver = { 148 "t5nex", 149 t5_methods, 150 sizeof(struct adapter) 151 }; 152 153 154 /* T5 port (cxl) interface */ 155 static driver_t cxl_driver = { 156 "cxl", 157 cxgbe_methods, 158 sizeof(struct port_info) 159 }; 160 161 /* T5 VI (vcxl) interface */ 162 static driver_t vcxl_driver = { 163 "vcxl", 164 vcxgbe_methods, 165 sizeof(struct vi_info) 166 }; 167 168 static struct cdevsw t5_cdevsw = { 169 .d_version = D_VERSION, 170 .d_flags = 0, 171 .d_open = t4_open, 172 .d_close = t4_close, 173 .d_ioctl = t4_ioctl, 174 .d_name = "t5nex", 175 }; 176 177 /* ifnet + media interface */ 178 static void cxgbe_init(void *); 179 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 180 static int cxgbe_transmit(struct ifnet *, struct mbuf *); 181 static void cxgbe_qflush(struct ifnet *); 182 static int cxgbe_media_change(struct ifnet *); 183 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 184 185 MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 186 187 /* 188 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 189 * then ADAPTER_LOCK, then t4_uld_list_lock. 190 */ 191 static struct sx t4_list_lock; 192 SLIST_HEAD(, adapter) t4_list; 193 #ifdef TCP_OFFLOAD 194 static struct sx t4_uld_list_lock; 195 SLIST_HEAD(, uld_info) t4_uld_list; 196 #endif 197 198 /* 199 * Tunables. See tweak_tunables() too. 200 * 201 * Each tunable is set to a default value here if it's known at compile-time. 202 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 203 * provide a reasonable default when the driver is loaded. 204 * 205 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 206 * T5 are under hw.cxl. 207 */ 208 209 /* 210 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 211 */ 212 #define NTXQ_10G 16 213 static int t4_ntxq10g = -1; 214 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 215 216 #define NRXQ_10G 8 217 static int t4_nrxq10g = -1; 218 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 219 220 #define NTXQ_1G 4 221 static int t4_ntxq1g = -1; 222 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 223 224 #define NRXQ_1G 2 225 static int t4_nrxq1g = -1; 226 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 227 228 static int t4_rsrv_noflowq = 0; 229 TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 230 231 #ifdef TCP_OFFLOAD 232 #define NOFLDTXQ_10G 8 233 static int t4_nofldtxq10g = -1; 234 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 235 236 #define NOFLDRXQ_10G 2 237 static int t4_nofldrxq10g = -1; 238 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 239 240 #define NOFLDTXQ_1G 2 241 static int t4_nofldtxq1g = -1; 242 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 243 244 #define NOFLDRXQ_1G 1 245 static int t4_nofldrxq1g = -1; 246 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 247 #endif 248 249 #ifdef DEV_NETMAP 250 #define NNMTXQ_10G 2 251 static int t4_nnmtxq10g = -1; 252 TUNABLE_INT("hw.cxgbe.nnmtxq10g", &t4_nnmtxq10g); 253 254 #define NNMRXQ_10G 2 255 static int t4_nnmrxq10g = -1; 256 TUNABLE_INT("hw.cxgbe.nnmrxq10g", &t4_nnmrxq10g); 257 258 #define NNMTXQ_1G 1 259 static int t4_nnmtxq1g = -1; 260 TUNABLE_INT("hw.cxgbe.nnmtxq1g", &t4_nnmtxq1g); 261 262 #define NNMRXQ_1G 1 263 static int t4_nnmrxq1g = -1; 264 TUNABLE_INT("hw.cxgbe.nnmrxq1g", &t4_nnmrxq1g); 265 #endif 266 267 /* 268 * Holdoff parameters for 10G and 1G ports. 269 */ 270 #define TMR_IDX_10G 1 271 static int t4_tmr_idx_10g = TMR_IDX_10G; 272 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 273 274 #define PKTC_IDX_10G (-1) 275 static int t4_pktc_idx_10g = PKTC_IDX_10G; 276 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 277 278 #define TMR_IDX_1G 1 279 static int t4_tmr_idx_1g = TMR_IDX_1G; 280 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 281 282 #define PKTC_IDX_1G (-1) 283 static int t4_pktc_idx_1g = PKTC_IDX_1G; 284 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 285 286 /* 287 * Size (# of entries) of each tx and rx queue. 288 */ 289 static unsigned int t4_qsize_txq = TX_EQ_QSIZE; 290 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 291 292 static unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 293 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 294 295 /* 296 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 297 */ 298 static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 299 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 300 301 /* 302 * Configuration file. 303 */ 304 #define DEFAULT_CF "default" 305 #define FLASH_CF "flash" 306 #define UWIRE_CF "uwire" 307 #define FPGA_CF "fpga" 308 static char t4_cfg_file[32] = DEFAULT_CF; 309 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 310 311 /* 312 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 313 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 314 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 315 * mark or when signalled to do so, 0 to never emit PAUSE. 316 */ 317 static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 318 TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 319 320 /* 321 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 322 * encouraged respectively). 323 */ 324 static unsigned int t4_fw_install = 1; 325 TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 326 327 /* 328 * ASIC features that will be used. Disable the ones you don't want so that the 329 * chip resources aren't wasted on features that will not be used. 330 */ 331 static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 332 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 333 334 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 335 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 336 337 static int t4_toecaps_allowed = -1; 338 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 339 340 static int t4_rdmacaps_allowed = 0; 341 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 342 343 static int t4_iscsicaps_allowed = 0; 344 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 345 346 static int t4_fcoecaps_allowed = 0; 347 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 348 349 static int t5_write_combine = 0; 350 TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 351 352 static int t4_num_vis = 1; 353 TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 354 355 /* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 356 static int vi_mac_funcs[] = { 357 FW_VI_FUNC_OFLD, 358 FW_VI_FUNC_IWARP, 359 FW_VI_FUNC_OPENISCSI, 360 FW_VI_FUNC_OPENFCOE, 361 FW_VI_FUNC_FOISCSI, 362 FW_VI_FUNC_FOFCOE, 363 }; 364 365 struct intrs_and_queues { 366 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 367 uint16_t nirq; /* Total # of vectors */ 368 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 369 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 370 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 371 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 372 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 373 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 374 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 375 #ifdef TCP_OFFLOAD 376 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 377 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 378 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 379 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 380 #endif 381 #ifdef DEV_NETMAP 382 uint16_t nnmtxq10g; /* # of netmap txq's for each 10G port */ 383 uint16_t nnmrxq10g; /* # of netmap rxq's for each 10G port */ 384 uint16_t nnmtxq1g; /* # of netmap txq's for each 1G port */ 385 uint16_t nnmrxq1g; /* # of netmap rxq's for each 1G port */ 386 #endif 387 }; 388 389 struct filter_entry { 390 uint32_t valid:1; /* filter allocated and valid */ 391 uint32_t locked:1; /* filter is administratively locked */ 392 uint32_t pending:1; /* filter action is pending firmware reply */ 393 uint32_t smtidx:8; /* Source MAC Table index for smac */ 394 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 395 396 struct t4_filter_specification fs; 397 }; 398 399 static int map_bars_0_and_4(struct adapter *); 400 static int map_bar_2(struct adapter *); 401 static void setup_memwin(struct adapter *); 402 static int validate_mem_range(struct adapter *, uint32_t, int); 403 static int fwmtype_to_hwmtype(int); 404 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 405 uint32_t *); 406 static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *); 407 static uint32_t position_memwin(struct adapter *, int, uint32_t); 408 static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 409 struct intrs_and_queues *); 410 static int prep_firmware(struct adapter *); 411 static int partition_resources(struct adapter *, const struct firmware *, 412 const char *); 413 static int get_params__pre_init(struct adapter *); 414 static int get_params__post_init(struct adapter *); 415 static int set_params__post_init(struct adapter *); 416 static void t4_set_desc(struct adapter *); 417 static void build_medialist(struct port_info *, struct ifmedia *); 418 static int cxgbe_init_synchronized(struct vi_info *); 419 static int cxgbe_uninit_synchronized(struct vi_info *); 420 static int setup_intr_handlers(struct adapter *); 421 static void quiesce_txq(struct adapter *, struct sge_txq *); 422 static void quiesce_wrq(struct adapter *, struct sge_wrq *); 423 static void quiesce_iq(struct adapter *, struct sge_iq *); 424 static void quiesce_fl(struct adapter *, struct sge_fl *); 425 static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 426 driver_intr_t *, void *, char *); 427 static int t4_free_irq(struct adapter *, struct irq *); 428 static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 429 static void vi_refresh_stats(struct adapter *, struct vi_info *); 430 static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 431 static void cxgbe_tick(void *); 432 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 433 static int cpl_not_handled(struct sge_iq *, const struct rss_header *, 434 struct mbuf *); 435 static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *); 436 static int fw_msg_not_handled(struct adapter *, const __be64 *); 437 static void t4_sysctls(struct adapter *); 438 static void cxgbe_sysctls(struct port_info *); 439 static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 440 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 441 static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 442 static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 443 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 444 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 445 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 446 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 447 static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 448 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 449 static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 450 #ifdef SBUF_DRAIN 451 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 452 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 453 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 454 static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 455 static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 456 static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 457 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 458 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 459 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 460 static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 461 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 462 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 463 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 464 static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 465 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 466 static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 467 static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 468 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 469 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 470 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 471 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 472 static int sysctl_tids(SYSCTL_HANDLER_ARGS); 473 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 474 static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 475 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 476 static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 477 static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 478 #endif 479 static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 480 static uint32_t mode_to_fconf(uint32_t); 481 static uint32_t mode_to_iconf(uint32_t); 482 static int check_fspec_against_fconf_iconf(struct adapter *, 483 struct t4_filter_specification *); 484 static int get_filter_mode(struct adapter *, uint32_t *); 485 static int set_filter_mode(struct adapter *, uint32_t); 486 static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 487 static int get_filter(struct adapter *, struct t4_filter *); 488 static int set_filter(struct adapter *, struct t4_filter *); 489 static int del_filter(struct adapter *, struct t4_filter *); 490 static void clear_filter(struct filter_entry *); 491 static int set_filter_wr(struct adapter *, int); 492 static int del_filter_wr(struct adapter *, int); 493 static int get_sge_context(struct adapter *, struct t4_sge_context *); 494 static int load_fw(struct adapter *, struct t4_data *); 495 static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 496 static int read_i2c(struct adapter *, struct t4_i2c_data *); 497 static int set_sched_class(struct adapter *, struct t4_sched_params *); 498 static int set_sched_queue(struct adapter *, struct t4_sched_queue *); 499 #ifdef TCP_OFFLOAD 500 static int toe_capability(struct vi_info *, int); 501 #endif 502 static int mod_event(module_t, int, void *); 503 504 struct { 505 uint16_t device; 506 char *desc; 507 } t4_pciids[] = { 508 {0xa000, "Chelsio Terminator 4 FPGA"}, 509 {0x4400, "Chelsio T440-dbg"}, 510 {0x4401, "Chelsio T420-CR"}, 511 {0x4402, "Chelsio T422-CR"}, 512 {0x4403, "Chelsio T440-CR"}, 513 {0x4404, "Chelsio T420-BCH"}, 514 {0x4405, "Chelsio T440-BCH"}, 515 {0x4406, "Chelsio T440-CH"}, 516 {0x4407, "Chelsio T420-SO"}, 517 {0x4408, "Chelsio T420-CX"}, 518 {0x4409, "Chelsio T420-BT"}, 519 {0x440a, "Chelsio T404-BT"}, 520 {0x440e, "Chelsio T440-LP-CR"}, 521 }, t5_pciids[] = { 522 {0xb000, "Chelsio Terminator 5 FPGA"}, 523 {0x5400, "Chelsio T580-dbg"}, 524 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 525 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 526 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 527 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 528 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 529 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 530 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 531 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 532 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 533 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 534 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 535 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 536 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 537 #ifdef notyet 538 {0x5404, "Chelsio T520-BCH"}, 539 {0x5405, "Chelsio T540-BCH"}, 540 {0x5406, "Chelsio T540-CH"}, 541 {0x5408, "Chelsio T520-CX"}, 542 {0x540b, "Chelsio B520-SR"}, 543 {0x540c, "Chelsio B504-BT"}, 544 {0x540f, "Chelsio Amsterdam"}, 545 {0x5413, "Chelsio T580-CHR"}, 546 #endif 547 }; 548 549 #ifdef TCP_OFFLOAD 550 /* 551 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 552 * exactly the same for both rxq and ofld_rxq. 553 */ 554 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 555 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 556 #endif 557 558 /* No easy way to include t4_msg.h before adapter.h so we check this way */ 559 CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS); 560 CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES); 561 562 CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 563 564 static int 565 t4_probe(device_t dev) 566 { 567 int i; 568 uint16_t v = pci_get_vendor(dev); 569 uint16_t d = pci_get_device(dev); 570 uint8_t f = pci_get_function(dev); 571 572 if (v != PCI_VENDOR_ID_CHELSIO) 573 return (ENXIO); 574 575 /* Attach only to PF0 of the FPGA */ 576 if (d == 0xa000 && f != 0) 577 return (ENXIO); 578 579 for (i = 0; i < nitems(t4_pciids); i++) { 580 if (d == t4_pciids[i].device) { 581 device_set_desc(dev, t4_pciids[i].desc); 582 return (BUS_PROBE_DEFAULT); 583 } 584 } 585 586 return (ENXIO); 587 } 588 589 static int 590 t5_probe(device_t dev) 591 { 592 int i; 593 uint16_t v = pci_get_vendor(dev); 594 uint16_t d = pci_get_device(dev); 595 uint8_t f = pci_get_function(dev); 596 597 if (v != PCI_VENDOR_ID_CHELSIO) 598 return (ENXIO); 599 600 /* Attach only to PF0 of the FPGA */ 601 if (d == 0xb000 && f != 0) 602 return (ENXIO); 603 604 for (i = 0; i < nitems(t5_pciids); i++) { 605 if (d == t5_pciids[i].device) { 606 device_set_desc(dev, t5_pciids[i].desc); 607 return (BUS_PROBE_DEFAULT); 608 } 609 } 610 611 return (ENXIO); 612 } 613 614 static void 615 t5_attribute_workaround(device_t dev) 616 { 617 device_t root_port; 618 uint32_t v; 619 620 /* 621 * The T5 chips do not properly echo the No Snoop and Relaxed 622 * Ordering attributes when replying to a TLP from a Root 623 * Port. As a workaround, find the parent Root Port and 624 * disable No Snoop and Relaxed Ordering. Note that this 625 * affects all devices under this root port. 626 */ 627 root_port = pci_find_pcie_root_port(dev); 628 if (root_port == NULL) { 629 device_printf(dev, "Unable to find parent root port\n"); 630 return; 631 } 632 633 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 634 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 635 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 636 0) 637 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 638 device_get_nameunit(root_port)); 639 } 640 641 static int 642 t4_attach(device_t dev) 643 { 644 struct adapter *sc; 645 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 646 struct intrs_and_queues iaq; 647 struct sge *s; 648 uint8_t *buf; 649 #ifdef TCP_OFFLOAD 650 int ofld_rqidx, ofld_tqidx; 651 #endif 652 #ifdef DEV_NETMAP 653 int nm_rqidx, nm_tqidx; 654 #endif 655 int num_vis; 656 657 sc = device_get_softc(dev); 658 sc->dev = dev; 659 TUNABLE_INT_FETCH("hw.cxgbe.debug_flags", &sc->debug_flags); 660 661 if ((pci_get_device(dev) & 0xff00) == 0x5400) 662 t5_attribute_workaround(dev); 663 pci_enable_busmaster(dev); 664 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 665 uint32_t v; 666 667 pci_set_max_read_req(dev, 4096); 668 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 669 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 670 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 671 672 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 673 } 674 675 sc->traceq = -1; 676 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 677 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 678 device_get_nameunit(dev)); 679 680 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 681 device_get_nameunit(dev)); 682 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 683 sx_xlock(&t4_list_lock); 684 SLIST_INSERT_HEAD(&t4_list, sc, link); 685 sx_xunlock(&t4_list_lock); 686 687 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 688 TAILQ_INIT(&sc->sfl); 689 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 690 691 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 692 693 rc = map_bars_0_and_4(sc); 694 if (rc != 0) 695 goto done; /* error message displayed already */ 696 697 /* 698 * This is the real PF# to which we're attaching. Works from within PCI 699 * passthrough environments too, where pci_get_function() could return a 700 * different PF# depending on the passthrough configuration. We need to 701 * use the real PF# in all our communication with the firmware. 702 */ 703 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI)); 704 sc->mbox = sc->pf; 705 706 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 707 sc->an_handler = an_not_handled; 708 for (i = 0; i < nitems(sc->cpl_handler); i++) 709 sc->cpl_handler[i] = cpl_not_handled; 710 for (i = 0; i < nitems(sc->fw_msg_handler); i++) 711 sc->fw_msg_handler[i] = fw_msg_not_handled; 712 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl); 713 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt); 714 t4_register_cpl_handler(sc, CPL_T5_TRACE_PKT, t5_trace_pkt); 715 t4_init_sge_cpl_handlers(sc); 716 717 /* Prepare the adapter for operation. */ 718 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 719 rc = -t4_prep_adapter(sc, buf); 720 free(buf, M_CXGBE); 721 if (rc != 0) { 722 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 723 goto done; 724 } 725 726 /* 727 * Do this really early, with the memory windows set up even before the 728 * character device. The userland tool's register i/o and mem read 729 * will work even in "recovery mode". 730 */ 731 setup_memwin(sc); 732 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw, 733 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s", 734 device_get_nameunit(dev)); 735 if (sc->cdev == NULL) 736 device_printf(dev, "failed to create nexus char device.\n"); 737 else 738 sc->cdev->si_drv1 = sc; 739 740 /* Go no further if recovery mode has been requested. */ 741 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 742 device_printf(dev, "recovery mode.\n"); 743 goto done; 744 } 745 746 #if defined(__i386__) 747 if ((cpu_feature & CPUID_CX8) == 0) { 748 device_printf(dev, "64 bit atomics not available.\n"); 749 rc = ENOTSUP; 750 goto done; 751 } 752 #endif 753 754 /* Prepare the firmware for operation */ 755 rc = prep_firmware(sc); 756 if (rc != 0) 757 goto done; /* error message displayed already */ 758 759 rc = get_params__post_init(sc); 760 if (rc != 0) 761 goto done; /* error message displayed already */ 762 763 rc = set_params__post_init(sc); 764 if (rc != 0) 765 goto done; /* error message displayed already */ 766 767 rc = map_bar_2(sc); 768 if (rc != 0) 769 goto done; /* error message displayed already */ 770 771 rc = t4_create_dma_tag(sc); 772 if (rc != 0) 773 goto done; /* error message displayed already */ 774 775 /* 776 * Number of VIs to create per-port. The first VI is the 777 * "main" regular VI for the port. The second VI is used for 778 * netmap if present, and any remaining VIs are used for 779 * additional virtual interfaces. 780 * 781 * Limit the number of VIs per port to the number of available 782 * MAC addresses per port. 783 */ 784 if (t4_num_vis >= 1) 785 num_vis = t4_num_vis; 786 else 787 num_vis = 1; 788 #ifdef DEV_NETMAP 789 num_vis++; 790 #endif 791 if (num_vis > nitems(vi_mac_funcs)) { 792 num_vis = nitems(vi_mac_funcs); 793 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 794 } 795 796 /* 797 * First pass over all the ports - allocate VIs and initialize some 798 * basic parameters like mac address, port type, etc. We also figure 799 * out whether a port is 10G or 1G and use that information when 800 * calculating how many interrupts to attempt to allocate. 801 */ 802 n10g = n1g = 0; 803 for_each_port(sc, i) { 804 struct port_info *pi; 805 struct vi_info *vi; 806 807 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 808 sc->port[i] = pi; 809 810 /* These must be set before t4_port_init */ 811 pi->adapter = sc; 812 pi->port_id = i; 813 pi->nvi = num_vis; 814 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 815 M_ZERO | M_WAITOK); 816 817 /* 818 * Allocate the "main" VI and initialize parameters 819 * like mac addr. 820 */ 821 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 822 if (rc != 0) { 823 device_printf(dev, "unable to initialize port %d: %d\n", 824 i, rc); 825 free(pi->vi, M_CXGBE); 826 free(pi, M_CXGBE); 827 sc->port[i] = NULL; 828 goto done; 829 } 830 831 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX); 832 pi->link_cfg.requested_fc |= t4_pause_settings; 833 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX); 834 pi->link_cfg.fc |= t4_pause_settings; 835 836 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 837 if (rc != 0) { 838 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 839 free(pi->vi, M_CXGBE); 840 free(pi, M_CXGBE); 841 sc->port[i] = NULL; 842 goto done; 843 } 844 845 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 846 device_get_nameunit(dev), i); 847 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 848 sc->chan_map[pi->tx_chan] = i; 849 850 if (is_10G_port(pi) || is_40G_port(pi)) { 851 n10g++; 852 for_each_vi(pi, j, vi) { 853 vi->tmr_idx = t4_tmr_idx_10g; 854 vi->pktc_idx = t4_pktc_idx_10g; 855 } 856 } else { 857 n1g++; 858 for_each_vi(pi, j, vi) { 859 vi->tmr_idx = t4_tmr_idx_1g; 860 vi->pktc_idx = t4_pktc_idx_1g; 861 } 862 } 863 864 pi->linkdnrc = -1; 865 866 for_each_vi(pi, j, vi) { 867 vi->qsize_rxq = t4_qsize_rxq; 868 vi->qsize_txq = t4_qsize_txq; 869 vi->pi = pi; 870 } 871 872 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1); 873 if (pi->dev == NULL) { 874 device_printf(dev, 875 "failed to add device for port %d.\n", i); 876 rc = ENXIO; 877 goto done; 878 } 879 pi->vi[0].dev = pi->dev; 880 device_set_softc(pi->dev, pi); 881 } 882 883 /* 884 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 885 */ 886 #ifdef DEV_NETMAP 887 num_vis--; 888 #endif 889 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 890 if (rc != 0) 891 goto done; /* error message displayed already */ 892 893 sc->intr_type = iaq.intr_type; 894 sc->intr_count = iaq.nirq; 895 896 s = &sc->sge; 897 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 898 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 899 if (num_vis > 1) { 900 s->nrxq += (n10g + n1g) * (num_vis - 1); 901 s->ntxq += (n10g + n1g) * (num_vis - 1); 902 } 903 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 904 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 905 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 906 #ifdef TCP_OFFLOAD 907 if (is_offload(sc)) { 908 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 909 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 910 if (num_vis > 1) { 911 s->nofldrxq += (n10g + n1g) * (num_vis - 1); 912 s->nofldtxq += (n10g + n1g) * (num_vis - 1); 913 } 914 s->neq += s->nofldtxq + s->nofldrxq; 915 s->niq += s->nofldrxq; 916 917 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 918 M_CXGBE, M_ZERO | M_WAITOK); 919 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 920 M_CXGBE, M_ZERO | M_WAITOK); 921 } 922 #endif 923 #ifdef DEV_NETMAP 924 s->nnmrxq = n10g * iaq.nnmrxq10g + n1g * iaq.nnmrxq1g; 925 s->nnmtxq = n10g * iaq.nnmtxq10g + n1g * iaq.nnmtxq1g; 926 s->neq += s->nnmtxq + s->nnmrxq; 927 s->niq += s->nnmrxq; 928 929 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 930 M_CXGBE, M_ZERO | M_WAITOK); 931 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 932 M_CXGBE, M_ZERO | M_WAITOK); 933 #endif 934 935 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 936 M_ZERO | M_WAITOK); 937 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 938 M_ZERO | M_WAITOK); 939 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 940 M_ZERO | M_WAITOK); 941 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 942 M_ZERO | M_WAITOK); 943 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 944 M_ZERO | M_WAITOK); 945 946 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 947 M_ZERO | M_WAITOK); 948 949 t4_init_l2t(sc, M_WAITOK); 950 951 /* 952 * Second pass over the ports. This time we know the number of rx and 953 * tx queues that each port should get. 954 */ 955 rqidx = tqidx = 0; 956 #ifdef TCP_OFFLOAD 957 ofld_rqidx = ofld_tqidx = 0; 958 #endif 959 #ifdef DEV_NETMAP 960 nm_rqidx = nm_tqidx = 0; 961 #endif 962 for_each_port(sc, i) { 963 struct port_info *pi = sc->port[i]; 964 struct vi_info *vi; 965 966 if (pi == NULL) 967 continue; 968 969 for_each_vi(pi, j, vi) { 970 #ifdef DEV_NETMAP 971 if (j == 1) { 972 vi->flags |= VI_NETMAP | INTR_RXQ; 973 vi->first_rxq = nm_rqidx; 974 vi->first_txq = nm_tqidx; 975 if (is_10G_port(pi) || is_40G_port(pi)) { 976 vi->nrxq = iaq.nnmrxq10g; 977 vi->ntxq = iaq.nnmtxq10g; 978 } else { 979 vi->nrxq = iaq.nnmrxq1g; 980 vi->ntxq = iaq.nnmtxq1g; 981 } 982 nm_rqidx += vi->nrxq; 983 nm_tqidx += vi->ntxq; 984 continue; 985 } 986 #endif 987 988 vi->first_rxq = rqidx; 989 vi->first_txq = tqidx; 990 if (is_10G_port(pi) || is_40G_port(pi)) { 991 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 992 vi->nrxq = j == 0 ? iaq.nrxq10g : 1; 993 vi->ntxq = j == 0 ? iaq.ntxq10g : 1; 994 } else { 995 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 996 vi->nrxq = j == 0 ? iaq.nrxq1g : 1; 997 vi->ntxq = j == 0 ? iaq.ntxq1g : 1; 998 } 999 1000 if (vi->ntxq > 1) 1001 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1002 else 1003 vi->rsrv_noflowq = 0; 1004 1005 rqidx += vi->nrxq; 1006 tqidx += vi->ntxq; 1007 1008 #ifdef TCP_OFFLOAD 1009 if (!is_offload(sc)) 1010 continue; 1011 vi->first_ofld_rxq = ofld_rqidx; 1012 vi->first_ofld_txq = ofld_tqidx; 1013 if (is_10G_port(pi) || is_40G_port(pi)) { 1014 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1015 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1; 1016 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1; 1017 } else { 1018 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1019 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1; 1020 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1; 1021 } 1022 ofld_rqidx += vi->nofldrxq; 1023 ofld_tqidx += vi->nofldtxq; 1024 #endif 1025 } 1026 } 1027 1028 rc = setup_intr_handlers(sc); 1029 if (rc != 0) { 1030 device_printf(dev, 1031 "failed to setup interrupt handlers: %d\n", rc); 1032 goto done; 1033 } 1034 1035 rc = bus_generic_attach(dev); 1036 if (rc != 0) { 1037 device_printf(dev, 1038 "failed to attach all child ports: %d\n", rc); 1039 goto done; 1040 } 1041 1042 device_printf(dev, 1043 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1044 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1045 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1046 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1047 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1048 1049 t4_set_desc(sc); 1050 1051 done: 1052 if (rc != 0 && sc->cdev) { 1053 /* cdev was created and so cxgbetool works; recover that way. */ 1054 device_printf(dev, 1055 "error during attach, adapter is now in recovery mode.\n"); 1056 rc = 0; 1057 } 1058 1059 if (rc != 0) 1060 t4_detach(dev); 1061 else 1062 t4_sysctls(sc); 1063 1064 return (rc); 1065 } 1066 1067 /* 1068 * Idempotent 1069 */ 1070 static int 1071 t4_detach(device_t dev) 1072 { 1073 struct adapter *sc; 1074 struct port_info *pi; 1075 int i, rc; 1076 1077 sc = device_get_softc(dev); 1078 1079 if (sc->flags & FULL_INIT_DONE) 1080 t4_intr_disable(sc); 1081 1082 if (sc->cdev) { 1083 destroy_dev(sc->cdev); 1084 sc->cdev = NULL; 1085 } 1086 1087 rc = bus_generic_detach(dev); 1088 if (rc) { 1089 device_printf(dev, 1090 "failed to detach child devices: %d\n", rc); 1091 return (rc); 1092 } 1093 1094 for (i = 0; i < sc->intr_count; i++) 1095 t4_free_irq(sc, &sc->irq[i]); 1096 1097 for (i = 0; i < MAX_NPORTS; i++) { 1098 pi = sc->port[i]; 1099 if (pi) { 1100 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1101 if (pi->dev) 1102 device_delete_child(dev, pi->dev); 1103 1104 mtx_destroy(&pi->pi_lock); 1105 free(pi->vi, M_CXGBE); 1106 free(pi, M_CXGBE); 1107 } 1108 } 1109 1110 if (sc->flags & FULL_INIT_DONE) 1111 adapter_full_uninit(sc); 1112 1113 if (sc->flags & FW_OK) 1114 t4_fw_bye(sc, sc->mbox); 1115 1116 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1117 pci_release_msi(dev); 1118 1119 if (sc->regs_res) 1120 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1121 sc->regs_res); 1122 1123 if (sc->udbs_res) 1124 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1125 sc->udbs_res); 1126 1127 if (sc->msix_res) 1128 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1129 sc->msix_res); 1130 1131 if (sc->l2t) 1132 t4_free_l2t(sc->l2t); 1133 1134 #ifdef TCP_OFFLOAD 1135 free(sc->sge.ofld_rxq, M_CXGBE); 1136 free(sc->sge.ofld_txq, M_CXGBE); 1137 #endif 1138 #ifdef DEV_NETMAP 1139 free(sc->sge.nm_rxq, M_CXGBE); 1140 free(sc->sge.nm_txq, M_CXGBE); 1141 #endif 1142 free(sc->irq, M_CXGBE); 1143 free(sc->sge.rxq, M_CXGBE); 1144 free(sc->sge.txq, M_CXGBE); 1145 free(sc->sge.ctrlq, M_CXGBE); 1146 free(sc->sge.iqmap, M_CXGBE); 1147 free(sc->sge.eqmap, M_CXGBE); 1148 free(sc->tids.ftid_tab, M_CXGBE); 1149 t4_destroy_dma_tag(sc); 1150 if (mtx_initialized(&sc->sc_lock)) { 1151 sx_xlock(&t4_list_lock); 1152 SLIST_REMOVE(&t4_list, sc, adapter, link); 1153 sx_xunlock(&t4_list_lock); 1154 mtx_destroy(&sc->sc_lock); 1155 } 1156 1157 callout_drain(&sc->sfl_callout); 1158 if (mtx_initialized(&sc->tids.ftid_lock)) 1159 mtx_destroy(&sc->tids.ftid_lock); 1160 if (mtx_initialized(&sc->sfl_lock)) 1161 mtx_destroy(&sc->sfl_lock); 1162 if (mtx_initialized(&sc->ifp_lock)) 1163 mtx_destroy(&sc->ifp_lock); 1164 if (mtx_initialized(&sc->reg_lock)) 1165 mtx_destroy(&sc->reg_lock); 1166 1167 bzero(sc, sizeof(*sc)); 1168 1169 return (0); 1170 } 1171 1172 static int 1173 cxgbe_probe(device_t dev) 1174 { 1175 char buf[128]; 1176 struct port_info *pi = device_get_softc(dev); 1177 1178 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1179 device_set_desc_copy(dev, buf); 1180 1181 return (BUS_PROBE_DEFAULT); 1182 } 1183 1184 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1185 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1186 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1187 #define T4_CAP_ENABLE (T4_CAP) 1188 1189 static int 1190 cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1191 { 1192 struct ifnet *ifp; 1193 struct sbuf *sb; 1194 1195 vi->xact_addr_filt = -1; 1196 callout_init(&vi->tick, 1); 1197 1198 /* Allocate an ifnet and set it up */ 1199 ifp = if_alloc(IFT_ETHER); 1200 if (ifp == NULL) { 1201 device_printf(dev, "Cannot allocate ifnet\n"); 1202 return (ENOMEM); 1203 } 1204 vi->ifp = ifp; 1205 ifp->if_softc = vi; 1206 1207 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1208 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1209 1210 ifp->if_init = cxgbe_init; 1211 ifp->if_ioctl = cxgbe_ioctl; 1212 ifp->if_transmit = cxgbe_transmit; 1213 ifp->if_qflush = cxgbe_qflush; 1214 ifp->if_get_counter = cxgbe_get_counter; 1215 1216 ifp->if_capabilities = T4_CAP; 1217 #ifdef TCP_OFFLOAD 1218 if (vi->nofldrxq != 0) 1219 ifp->if_capabilities |= IFCAP_TOE; 1220 #endif 1221 ifp->if_capenable = T4_CAP_ENABLE; 1222 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1223 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1224 1225 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1226 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1227 ifp->if_hw_tsomaxsegsize = 65536; 1228 1229 /* Initialize ifmedia for this VI */ 1230 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1231 cxgbe_media_status); 1232 build_medialist(vi->pi, &vi->media); 1233 1234 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1235 EVENTHANDLER_PRI_ANY); 1236 1237 ether_ifattach(ifp, vi->hw_addr); 1238 1239 sb = sbuf_new_auto(); 1240 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1241 #ifdef TCP_OFFLOAD 1242 if (ifp->if_capabilities & IFCAP_TOE) 1243 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1244 vi->nofldtxq, vi->nofldrxq); 1245 #endif 1246 sbuf_finish(sb); 1247 device_printf(dev, "%s\n", sbuf_data(sb)); 1248 sbuf_delete(sb); 1249 1250 vi_sysctls(vi); 1251 1252 return (0); 1253 } 1254 1255 static int 1256 cxgbe_attach(device_t dev) 1257 { 1258 struct port_info *pi = device_get_softc(dev); 1259 struct vi_info *vi; 1260 int i, rc; 1261 1262 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1263 1264 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1265 if (rc) 1266 return (rc); 1267 1268 for_each_vi(pi, i, vi) { 1269 if (i == 0) 1270 continue; 1271 #ifdef DEV_NETMAP 1272 if (vi->flags & VI_NETMAP) { 1273 /* 1274 * media handled here to keep 1275 * implementation private to this file 1276 */ 1277 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1278 cxgbe_media_status); 1279 build_medialist(pi, &vi->media); 1280 vi->dev = device_add_child(dev, is_t4(pi->adapter) ? 1281 "ncxgbe" : "ncxl", device_get_unit(dev)); 1282 } else 1283 #endif 1284 vi->dev = device_add_child(dev, is_t4(pi->adapter) ? 1285 "vcxgbe" : "vcxl", -1); 1286 if (vi->dev == NULL) { 1287 device_printf(dev, "failed to add VI %d\n", i); 1288 continue; 1289 } 1290 device_set_softc(vi->dev, vi); 1291 } 1292 1293 cxgbe_sysctls(pi); 1294 1295 bus_generic_attach(dev); 1296 1297 return (0); 1298 } 1299 1300 static void 1301 cxgbe_vi_detach(struct vi_info *vi) 1302 { 1303 struct ifnet *ifp = vi->ifp; 1304 1305 ether_ifdetach(ifp); 1306 1307 if (vi->vlan_c) 1308 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1309 1310 /* Let detach proceed even if these fail. */ 1311 cxgbe_uninit_synchronized(vi); 1312 callout_drain(&vi->tick); 1313 vi_full_uninit(vi); 1314 1315 ifmedia_removeall(&vi->media); 1316 if_free(vi->ifp); 1317 vi->ifp = NULL; 1318 } 1319 1320 static int 1321 cxgbe_detach(device_t dev) 1322 { 1323 struct port_info *pi = device_get_softc(dev); 1324 struct adapter *sc = pi->adapter; 1325 int rc; 1326 1327 /* Detach the extra VIs first. */ 1328 rc = bus_generic_detach(dev); 1329 if (rc) 1330 return (rc); 1331 device_delete_children(dev); 1332 1333 doom_vi(sc, &pi->vi[0]); 1334 1335 if (pi->flags & HAS_TRACEQ) { 1336 sc->traceq = -1; /* cloner should not create ifnet */ 1337 t4_tracer_port_detach(sc); 1338 } 1339 1340 cxgbe_vi_detach(&pi->vi[0]); 1341 callout_drain(&pi->tick); 1342 1343 end_synchronized_op(sc, 0); 1344 1345 return (0); 1346 } 1347 1348 static void 1349 cxgbe_init(void *arg) 1350 { 1351 struct vi_info *vi = arg; 1352 struct adapter *sc = vi->pi->adapter; 1353 1354 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1355 return; 1356 cxgbe_init_synchronized(vi); 1357 end_synchronized_op(sc, 0); 1358 } 1359 1360 static int 1361 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1362 { 1363 int rc = 0, mtu, flags, can_sleep; 1364 struct vi_info *vi = ifp->if_softc; 1365 struct adapter *sc = vi->pi->adapter; 1366 struct ifreq *ifr = (struct ifreq *)data; 1367 uint32_t mask; 1368 1369 switch (cmd) { 1370 case SIOCSIFMTU: 1371 mtu = ifr->ifr_mtu; 1372 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 1373 return (EINVAL); 1374 1375 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1376 if (rc) 1377 return (rc); 1378 ifp->if_mtu = mtu; 1379 if (vi->flags & VI_INIT_DONE) { 1380 t4_update_fl_bufsize(ifp); 1381 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1382 rc = update_mac_settings(ifp, XGMAC_MTU); 1383 } 1384 end_synchronized_op(sc, 0); 1385 break; 1386 1387 case SIOCSIFFLAGS: 1388 can_sleep = 0; 1389 redo_sifflags: 1390 rc = begin_synchronized_op(sc, vi, 1391 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1392 if (rc) 1393 return (rc); 1394 1395 if (ifp->if_flags & IFF_UP) { 1396 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1397 flags = vi->if_flags; 1398 if ((ifp->if_flags ^ flags) & 1399 (IFF_PROMISC | IFF_ALLMULTI)) { 1400 if (can_sleep == 1) { 1401 end_synchronized_op(sc, 0); 1402 can_sleep = 0; 1403 goto redo_sifflags; 1404 } 1405 rc = update_mac_settings(ifp, 1406 XGMAC_PROMISC | XGMAC_ALLMULTI); 1407 } 1408 } else { 1409 if (can_sleep == 0) { 1410 end_synchronized_op(sc, LOCK_HELD); 1411 can_sleep = 1; 1412 goto redo_sifflags; 1413 } 1414 rc = cxgbe_init_synchronized(vi); 1415 } 1416 vi->if_flags = ifp->if_flags; 1417 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1418 if (can_sleep == 0) { 1419 end_synchronized_op(sc, LOCK_HELD); 1420 can_sleep = 1; 1421 goto redo_sifflags; 1422 } 1423 rc = cxgbe_uninit_synchronized(vi); 1424 } 1425 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1426 break; 1427 1428 case SIOCADDMULTI: 1429 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1430 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1431 if (rc) 1432 return (rc); 1433 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1434 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1435 end_synchronized_op(sc, LOCK_HELD); 1436 break; 1437 1438 case SIOCSIFCAP: 1439 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1440 if (rc) 1441 return (rc); 1442 1443 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1444 if (mask & IFCAP_TXCSUM) { 1445 ifp->if_capenable ^= IFCAP_TXCSUM; 1446 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1447 1448 if (IFCAP_TSO4 & ifp->if_capenable && 1449 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1450 ifp->if_capenable &= ~IFCAP_TSO4; 1451 if_printf(ifp, 1452 "tso4 disabled due to -txcsum.\n"); 1453 } 1454 } 1455 if (mask & IFCAP_TXCSUM_IPV6) { 1456 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1457 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1458 1459 if (IFCAP_TSO6 & ifp->if_capenable && 1460 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1461 ifp->if_capenable &= ~IFCAP_TSO6; 1462 if_printf(ifp, 1463 "tso6 disabled due to -txcsum6.\n"); 1464 } 1465 } 1466 if (mask & IFCAP_RXCSUM) 1467 ifp->if_capenable ^= IFCAP_RXCSUM; 1468 if (mask & IFCAP_RXCSUM_IPV6) 1469 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1470 1471 /* 1472 * Note that we leave CSUM_TSO alone (it is always set). The 1473 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1474 * sending a TSO request our way, so it's sufficient to toggle 1475 * IFCAP_TSOx only. 1476 */ 1477 if (mask & IFCAP_TSO4) { 1478 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1479 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1480 if_printf(ifp, "enable txcsum first.\n"); 1481 rc = EAGAIN; 1482 goto fail; 1483 } 1484 ifp->if_capenable ^= IFCAP_TSO4; 1485 } 1486 if (mask & IFCAP_TSO6) { 1487 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1488 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1489 if_printf(ifp, "enable txcsum6 first.\n"); 1490 rc = EAGAIN; 1491 goto fail; 1492 } 1493 ifp->if_capenable ^= IFCAP_TSO6; 1494 } 1495 if (mask & IFCAP_LRO) { 1496 #if defined(INET) || defined(INET6) 1497 int i; 1498 struct sge_rxq *rxq; 1499 1500 ifp->if_capenable ^= IFCAP_LRO; 1501 for_each_rxq(vi, i, rxq) { 1502 if (ifp->if_capenable & IFCAP_LRO) 1503 rxq->iq.flags |= IQ_LRO_ENABLED; 1504 else 1505 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1506 } 1507 #endif 1508 } 1509 #ifdef TCP_OFFLOAD 1510 if (mask & IFCAP_TOE) { 1511 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1512 1513 rc = toe_capability(vi, enable); 1514 if (rc != 0) 1515 goto fail; 1516 1517 ifp->if_capenable ^= mask; 1518 } 1519 #endif 1520 if (mask & IFCAP_VLAN_HWTAGGING) { 1521 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1522 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1523 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1524 } 1525 if (mask & IFCAP_VLAN_MTU) { 1526 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1527 1528 /* Need to find out how to disable auto-mtu-inflation */ 1529 } 1530 if (mask & IFCAP_VLAN_HWTSO) 1531 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1532 if (mask & IFCAP_VLAN_HWCSUM) 1533 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1534 1535 #ifdef VLAN_CAPABILITIES 1536 VLAN_CAPABILITIES(ifp); 1537 #endif 1538 fail: 1539 end_synchronized_op(sc, 0); 1540 break; 1541 1542 case SIOCSIFMEDIA: 1543 case SIOCGIFMEDIA: 1544 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1545 break; 1546 1547 case SIOCGI2C: { 1548 struct ifi2creq i2c; 1549 1550 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1551 if (rc != 0) 1552 break; 1553 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1554 rc = EPERM; 1555 break; 1556 } 1557 if (i2c.len > sizeof(i2c.data)) { 1558 rc = EINVAL; 1559 break; 1560 } 1561 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1562 if (rc) 1563 return (rc); 1564 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1565 i2c.offset, i2c.len, &i2c.data[0]); 1566 end_synchronized_op(sc, 0); 1567 if (rc == 0) 1568 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1569 break; 1570 } 1571 1572 default: 1573 rc = ether_ioctl(ifp, cmd, data); 1574 } 1575 1576 return (rc); 1577 } 1578 1579 static int 1580 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1581 { 1582 struct vi_info *vi = ifp->if_softc; 1583 struct port_info *pi = vi->pi; 1584 struct adapter *sc = pi->adapter; 1585 struct sge_txq *txq; 1586 void *items[1]; 1587 int rc; 1588 1589 M_ASSERTPKTHDR(m); 1590 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1591 1592 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1593 m_freem(m); 1594 return (ENETDOWN); 1595 } 1596 1597 rc = parse_pkt(&m); 1598 if (__predict_false(rc != 0)) { 1599 MPASS(m == NULL); /* was freed already */ 1600 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1601 return (rc); 1602 } 1603 1604 /* Select a txq. */ 1605 txq = &sc->sge.txq[vi->first_txq]; 1606 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1607 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1608 vi->rsrv_noflowq); 1609 1610 items[0] = m; 1611 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1612 if (__predict_false(rc != 0)) 1613 m_freem(m); 1614 1615 return (rc); 1616 } 1617 1618 static void 1619 cxgbe_qflush(struct ifnet *ifp) 1620 { 1621 struct vi_info *vi = ifp->if_softc; 1622 struct sge_txq *txq; 1623 int i; 1624 1625 /* queues do not exist if !VI_INIT_DONE. */ 1626 if (vi->flags & VI_INIT_DONE) { 1627 for_each_txq(vi, i, txq) { 1628 TXQ_LOCK(txq); 1629 txq->eq.flags &= ~EQ_ENABLED; 1630 TXQ_UNLOCK(txq); 1631 while (!mp_ring_is_idle(txq->r)) { 1632 mp_ring_check_drainage(txq->r, 0); 1633 pause("qflush", 1); 1634 } 1635 } 1636 } 1637 if_qflush(ifp); 1638 } 1639 1640 static uint64_t 1641 vi_get_counter(struct ifnet *ifp, ift_counter c) 1642 { 1643 struct vi_info *vi = ifp->if_softc; 1644 struct fw_vi_stats_vf *s = &vi->stats; 1645 1646 vi_refresh_stats(vi->pi->adapter, vi); 1647 1648 switch (c) { 1649 case IFCOUNTER_IPACKETS: 1650 return (s->rx_bcast_frames + s->rx_mcast_frames + 1651 s->rx_ucast_frames); 1652 case IFCOUNTER_IERRORS: 1653 return (s->rx_err_frames); 1654 case IFCOUNTER_OPACKETS: 1655 return (s->tx_bcast_frames + s->tx_mcast_frames + 1656 s->tx_ucast_frames + s->tx_offload_frames); 1657 case IFCOUNTER_OERRORS: 1658 return (s->tx_drop_frames); 1659 case IFCOUNTER_IBYTES: 1660 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 1661 s->rx_ucast_bytes); 1662 case IFCOUNTER_OBYTES: 1663 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 1664 s->tx_ucast_bytes + s->tx_offload_bytes); 1665 case IFCOUNTER_IMCASTS: 1666 return (s->rx_mcast_frames); 1667 case IFCOUNTER_OMCASTS: 1668 return (s->tx_mcast_frames); 1669 case IFCOUNTER_OQDROPS: { 1670 uint64_t drops; 1671 1672 drops = 0; 1673 if ((vi->flags & (VI_INIT_DONE | VI_NETMAP)) == VI_INIT_DONE) { 1674 int i; 1675 struct sge_txq *txq; 1676 1677 for_each_txq(vi, i, txq) 1678 drops += counter_u64_fetch(txq->r->drops); 1679 } 1680 1681 return (drops); 1682 1683 } 1684 1685 default: 1686 return (if_get_counter_default(ifp, c)); 1687 } 1688 } 1689 1690 uint64_t 1691 cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1692 { 1693 struct vi_info *vi = ifp->if_softc; 1694 struct port_info *pi = vi->pi; 1695 struct adapter *sc = pi->adapter; 1696 struct port_stats *s = &pi->stats; 1697 1698 if (pi->nvi > 1) 1699 return (vi_get_counter(ifp, c)); 1700 1701 cxgbe_refresh_stats(sc, pi); 1702 1703 switch (c) { 1704 case IFCOUNTER_IPACKETS: 1705 return (s->rx_frames - s->rx_pause); 1706 1707 case IFCOUNTER_IERRORS: 1708 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1709 s->rx_fcs_err + s->rx_len_err); 1710 1711 case IFCOUNTER_OPACKETS: 1712 return (s->tx_frames - s->tx_pause); 1713 1714 case IFCOUNTER_OERRORS: 1715 return (s->tx_error_frames); 1716 1717 case IFCOUNTER_IBYTES: 1718 return (s->rx_octets - s->rx_pause * 64); 1719 1720 case IFCOUNTER_OBYTES: 1721 return (s->tx_octets - s->tx_pause * 64); 1722 1723 case IFCOUNTER_IMCASTS: 1724 return (s->rx_mcast_frames - s->rx_pause); 1725 1726 case IFCOUNTER_OMCASTS: 1727 return (s->tx_mcast_frames - s->tx_pause); 1728 1729 case IFCOUNTER_IQDROPS: 1730 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1731 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1732 s->rx_trunc3 + pi->tnl_cong_drops); 1733 1734 case IFCOUNTER_OQDROPS: { 1735 uint64_t drops; 1736 1737 drops = s->tx_drop; 1738 if (vi->flags & VI_INIT_DONE) { 1739 int i; 1740 struct sge_txq *txq; 1741 1742 for_each_txq(vi, i, txq) 1743 drops += counter_u64_fetch(txq->r->drops); 1744 } 1745 1746 return (drops); 1747 1748 } 1749 1750 default: 1751 return (if_get_counter_default(ifp, c)); 1752 } 1753 } 1754 1755 static int 1756 cxgbe_media_change(struct ifnet *ifp) 1757 { 1758 struct vi_info *vi = ifp->if_softc; 1759 1760 device_printf(vi->dev, "%s unimplemented.\n", __func__); 1761 1762 return (EOPNOTSUPP); 1763 } 1764 1765 static void 1766 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1767 { 1768 struct vi_info *vi = ifp->if_softc; 1769 struct port_info *pi = vi->pi; 1770 struct ifmedia_entry *cur; 1771 int speed = pi->link_cfg.speed; 1772 1773 cur = vi->media.ifm_cur; 1774 1775 ifmr->ifm_status = IFM_AVALID; 1776 if (!pi->link_cfg.link_ok) 1777 return; 1778 1779 ifmr->ifm_status |= IFM_ACTIVE; 1780 1781 /* active and current will differ iff current media is autoselect. */ 1782 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1783 return; 1784 1785 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1786 if (speed == 10000) 1787 ifmr->ifm_active |= IFM_10G_T; 1788 else if (speed == 1000) 1789 ifmr->ifm_active |= IFM_1000_T; 1790 else if (speed == 100) 1791 ifmr->ifm_active |= IFM_100_TX; 1792 else if (speed == 10) 1793 ifmr->ifm_active |= IFM_10_T; 1794 else 1795 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1796 speed)); 1797 } 1798 1799 static int 1800 vcxgbe_probe(device_t dev) 1801 { 1802 char buf[128]; 1803 struct vi_info *vi = device_get_softc(dev); 1804 1805 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 1806 vi - vi->pi->vi); 1807 device_set_desc_copy(dev, buf); 1808 1809 return (BUS_PROBE_DEFAULT); 1810 } 1811 1812 static int 1813 vcxgbe_attach(device_t dev) 1814 { 1815 struct vi_info *vi; 1816 struct port_info *pi; 1817 struct adapter *sc; 1818 int func, index, rc; 1819 u32 param, val; 1820 1821 vi = device_get_softc(dev); 1822 pi = vi->pi; 1823 sc = pi->adapter; 1824 1825 index = vi - pi->vi; 1826 KASSERT(index < nitems(vi_mac_funcs), 1827 ("%s: VI %s doesn't have a MAC func", __func__, 1828 device_get_nameunit(dev))); 1829 func = vi_mac_funcs[index]; 1830 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 1831 vi->hw_addr, &vi->rss_size, func, 0); 1832 if (rc < 0) { 1833 device_printf(dev, "Failed to allocate virtual interface " 1834 "for port %d: %d\n", pi->port_id, -rc); 1835 return (-rc); 1836 } 1837 vi->viid = rc; 1838 1839 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1840 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 1841 V_FW_PARAMS_PARAM_YZ(vi->viid); 1842 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1843 if (rc) 1844 vi->rss_base = 0xffff; 1845 else { 1846 /* MPASS((val >> 16) == rss_size); */ 1847 vi->rss_base = val & 0xffff; 1848 } 1849 1850 rc = cxgbe_vi_attach(dev, vi); 1851 if (rc) { 1852 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1853 return (rc); 1854 } 1855 return (0); 1856 } 1857 1858 static int 1859 vcxgbe_detach(device_t dev) 1860 { 1861 struct vi_info *vi; 1862 struct adapter *sc; 1863 1864 vi = device_get_softc(dev); 1865 sc = vi->pi->adapter; 1866 1867 doom_vi(sc, vi); 1868 1869 cxgbe_vi_detach(vi); 1870 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1871 1872 end_synchronized_op(sc, 0); 1873 1874 return (0); 1875 } 1876 1877 void 1878 t4_fatal_err(struct adapter *sc) 1879 { 1880 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1881 t4_intr_disable(sc); 1882 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1883 device_get_nameunit(sc->dev)); 1884 } 1885 1886 static int 1887 map_bars_0_and_4(struct adapter *sc) 1888 { 1889 sc->regs_rid = PCIR_BAR(0); 1890 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1891 &sc->regs_rid, RF_ACTIVE); 1892 if (sc->regs_res == NULL) { 1893 device_printf(sc->dev, "cannot map registers.\n"); 1894 return (ENXIO); 1895 } 1896 sc->bt = rman_get_bustag(sc->regs_res); 1897 sc->bh = rman_get_bushandle(sc->regs_res); 1898 sc->mmio_len = rman_get_size(sc->regs_res); 1899 setbit(&sc->doorbells, DOORBELL_KDB); 1900 1901 sc->msix_rid = PCIR_BAR(4); 1902 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1903 &sc->msix_rid, RF_ACTIVE); 1904 if (sc->msix_res == NULL) { 1905 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1906 return (ENXIO); 1907 } 1908 1909 return (0); 1910 } 1911 1912 static int 1913 map_bar_2(struct adapter *sc) 1914 { 1915 1916 /* 1917 * T4: only iWARP driver uses the userspace doorbells. There is no need 1918 * to map it if RDMA is disabled. 1919 */ 1920 if (is_t4(sc) && sc->rdmacaps == 0) 1921 return (0); 1922 1923 sc->udbs_rid = PCIR_BAR(2); 1924 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1925 &sc->udbs_rid, RF_ACTIVE); 1926 if (sc->udbs_res == NULL) { 1927 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 1928 return (ENXIO); 1929 } 1930 sc->udbs_base = rman_get_virtual(sc->udbs_res); 1931 1932 if (is_t5(sc)) { 1933 setbit(&sc->doorbells, DOORBELL_UDB); 1934 #if defined(__i386__) || defined(__amd64__) 1935 if (t5_write_combine) { 1936 int rc; 1937 1938 /* 1939 * Enable write combining on BAR2. This is the 1940 * userspace doorbell BAR and is split into 128B 1941 * (UDBS_SEG_SIZE) doorbell regions, each associated 1942 * with an egress queue. The first 64B has the doorbell 1943 * and the second 64B can be used to submit a tx work 1944 * request with an implicit doorbell. 1945 */ 1946 1947 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 1948 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 1949 if (rc == 0) { 1950 clrbit(&sc->doorbells, DOORBELL_UDB); 1951 setbit(&sc->doorbells, DOORBELL_WCWR); 1952 setbit(&sc->doorbells, DOORBELL_UDBWC); 1953 } else { 1954 device_printf(sc->dev, 1955 "couldn't enable write combining: %d\n", 1956 rc); 1957 } 1958 1959 t4_write_reg(sc, A_SGE_STAT_CFG, 1960 V_STATSOURCE_T5(7) | V_STATMODE(0)); 1961 } 1962 #endif 1963 } 1964 1965 return (0); 1966 } 1967 1968 static const struct memwin t4_memwin[] = { 1969 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1970 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1971 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 1972 }; 1973 1974 static const struct memwin t5_memwin[] = { 1975 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1976 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1977 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 1978 }; 1979 1980 static void 1981 setup_memwin(struct adapter *sc) 1982 { 1983 const struct memwin *mw; 1984 int i, n; 1985 uint32_t bar0; 1986 1987 if (is_t4(sc)) { 1988 /* 1989 * Read low 32b of bar0 indirectly via the hardware backdoor 1990 * mechanism. Works from within PCI passthrough environments 1991 * too, where rman_get_start() can return a different value. We 1992 * need to program the T4 memory window decoders with the actual 1993 * addresses that will be coming across the PCIe link. 1994 */ 1995 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 1996 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 1997 1998 mw = &t4_memwin[0]; 1999 n = nitems(t4_memwin); 2000 } else { 2001 /* T5 uses the relative offset inside the PCIe BAR */ 2002 bar0 = 0; 2003 2004 mw = &t5_memwin[0]; 2005 n = nitems(t5_memwin); 2006 } 2007 2008 for (i = 0; i < n; i++, mw++) { 2009 t4_write_reg(sc, 2010 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2011 (mw->base + bar0) | V_BIR(0) | 2012 V_WINDOW(ilog2(mw->aperture) - 10)); 2013 } 2014 2015 /* flush */ 2016 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2017 } 2018 2019 /* 2020 * Verify that the memory range specified by the addr/len pair is valid and lies 2021 * entirely within a single region (EDCx or MCx). 2022 */ 2023 static int 2024 validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2025 { 2026 uint32_t em, addr_len, maddr, mlen; 2027 2028 /* Memory can only be accessed in naturally aligned 4 byte units */ 2029 if (addr & 3 || len & 3 || len == 0) 2030 return (EINVAL); 2031 2032 /* Enabled memories */ 2033 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2034 if (em & F_EDRAM0_ENABLE) { 2035 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2036 maddr = G_EDRAM0_BASE(addr_len) << 20; 2037 mlen = G_EDRAM0_SIZE(addr_len) << 20; 2038 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 2039 addr + len <= maddr + mlen) 2040 return (0); 2041 } 2042 if (em & F_EDRAM1_ENABLE) { 2043 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2044 maddr = G_EDRAM1_BASE(addr_len) << 20; 2045 mlen = G_EDRAM1_SIZE(addr_len) << 20; 2046 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 2047 addr + len <= maddr + mlen) 2048 return (0); 2049 } 2050 if (em & F_EXT_MEM_ENABLE) { 2051 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2052 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2053 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 2054 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 2055 addr + len <= maddr + mlen) 2056 return (0); 2057 } 2058 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) { 2059 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2060 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2061 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 2062 if (mlen > 0 && addr >= maddr && addr < maddr + mlen && 2063 addr + len <= maddr + mlen) 2064 return (0); 2065 } 2066 2067 return (EFAULT); 2068 } 2069 2070 static int 2071 fwmtype_to_hwmtype(int mtype) 2072 { 2073 2074 switch (mtype) { 2075 case FW_MEMTYPE_EDC0: 2076 return (MEM_EDC0); 2077 case FW_MEMTYPE_EDC1: 2078 return (MEM_EDC1); 2079 case FW_MEMTYPE_EXTMEM: 2080 return (MEM_MC0); 2081 case FW_MEMTYPE_EXTMEM1: 2082 return (MEM_MC1); 2083 default: 2084 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2085 } 2086 } 2087 2088 /* 2089 * Verify that the memory range specified by the memtype/offset/len pair is 2090 * valid and lies entirely within the memtype specified. The global address of 2091 * the start of the range is returned in addr. 2092 */ 2093 static int 2094 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2095 uint32_t *addr) 2096 { 2097 uint32_t em, addr_len, maddr, mlen; 2098 2099 /* Memory can only be accessed in naturally aligned 4 byte units */ 2100 if (off & 3 || len & 3 || len == 0) 2101 return (EINVAL); 2102 2103 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2104 switch (fwmtype_to_hwmtype(mtype)) { 2105 case MEM_EDC0: 2106 if (!(em & F_EDRAM0_ENABLE)) 2107 return (EINVAL); 2108 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2109 maddr = G_EDRAM0_BASE(addr_len) << 20; 2110 mlen = G_EDRAM0_SIZE(addr_len) << 20; 2111 break; 2112 case MEM_EDC1: 2113 if (!(em & F_EDRAM1_ENABLE)) 2114 return (EINVAL); 2115 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2116 maddr = G_EDRAM1_BASE(addr_len) << 20; 2117 mlen = G_EDRAM1_SIZE(addr_len) << 20; 2118 break; 2119 case MEM_MC: 2120 if (!(em & F_EXT_MEM_ENABLE)) 2121 return (EINVAL); 2122 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2123 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2124 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 2125 break; 2126 case MEM_MC1: 2127 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE)) 2128 return (EINVAL); 2129 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2130 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2131 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 2132 break; 2133 default: 2134 return (EINVAL); 2135 } 2136 2137 if (mlen > 0 && off < mlen && off + len <= mlen) { 2138 *addr = maddr + off; /* global address */ 2139 return (0); 2140 } 2141 2142 return (EFAULT); 2143 } 2144 2145 static void 2146 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture) 2147 { 2148 const struct memwin *mw; 2149 2150 if (is_t4(sc)) { 2151 KASSERT(win >= 0 && win < nitems(t4_memwin), 2152 ("%s: incorrect memwin# (%d)", __func__, win)); 2153 mw = &t4_memwin[win]; 2154 } else { 2155 KASSERT(win >= 0 && win < nitems(t5_memwin), 2156 ("%s: incorrect memwin# (%d)", __func__, win)); 2157 mw = &t5_memwin[win]; 2158 } 2159 2160 if (base != NULL) 2161 *base = mw->base; 2162 if (aperture != NULL) 2163 *aperture = mw->aperture; 2164 } 2165 2166 /* 2167 * Positions the memory window such that it can be used to access the specified 2168 * address in the chip's address space. The return value is the offset of addr 2169 * from the start of the window. 2170 */ 2171 static uint32_t 2172 position_memwin(struct adapter *sc, int n, uint32_t addr) 2173 { 2174 uint32_t start, pf; 2175 uint32_t reg; 2176 2177 KASSERT(n >= 0 && n <= 3, 2178 ("%s: invalid window %d.", __func__, n)); 2179 KASSERT((addr & 3) == 0, 2180 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr)); 2181 2182 if (is_t4(sc)) { 2183 pf = 0; 2184 start = addr & ~0xf; /* start must be 16B aligned */ 2185 } else { 2186 pf = V_PFNUM(sc->pf); 2187 start = addr & ~0x7f; /* start must be 128B aligned */ 2188 } 2189 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n); 2190 2191 t4_write_reg(sc, reg, start | pf); 2192 t4_read_reg(sc, reg); 2193 2194 return (addr - start); 2195 } 2196 2197 static int 2198 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2199 struct intrs_and_queues *iaq) 2200 { 2201 int rc, itype, navail, nrxq10g, nrxq1g, n; 2202 int nofldrxq10g = 0, nofldrxq1g = 0; 2203 int nnmrxq10g = 0, nnmrxq1g = 0; 2204 2205 bzero(iaq, sizeof(*iaq)); 2206 2207 iaq->ntxq10g = t4_ntxq10g; 2208 iaq->ntxq1g = t4_ntxq1g; 2209 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2210 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2211 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2212 #ifdef TCP_OFFLOAD 2213 if (is_offload(sc)) { 2214 iaq->nofldtxq10g = t4_nofldtxq10g; 2215 iaq->nofldtxq1g = t4_nofldtxq1g; 2216 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2217 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2218 } 2219 #endif 2220 #ifdef DEV_NETMAP 2221 iaq->nnmtxq10g = t4_nnmtxq10g; 2222 iaq->nnmtxq1g = t4_nnmtxq1g; 2223 iaq->nnmrxq10g = nnmrxq10g = t4_nnmrxq10g; 2224 iaq->nnmrxq1g = nnmrxq1g = t4_nnmrxq1g; 2225 #endif 2226 2227 for (itype = INTR_MSIX; itype; itype >>= 1) { 2228 2229 if ((itype & t4_intr_types) == 0) 2230 continue; /* not allowed */ 2231 2232 if (itype == INTR_MSIX) 2233 navail = pci_msix_count(sc->dev); 2234 else if (itype == INTR_MSI) 2235 navail = pci_msi_count(sc->dev); 2236 else 2237 navail = 1; 2238 restart: 2239 if (navail == 0) 2240 continue; 2241 2242 iaq->intr_type = itype; 2243 iaq->intr_flags_10g = 0; 2244 iaq->intr_flags_1g = 0; 2245 2246 /* 2247 * Best option: an interrupt vector for errors, one for the 2248 * firmware event queue, and one for every rxq (NIC, TOE, and 2249 * netmap). 2250 */ 2251 iaq->nirq = T4_EXTRA_INTR; 2252 iaq->nirq += n10g * (nrxq10g + nofldrxq10g + nnmrxq10g); 2253 iaq->nirq += n10g * 2 * (num_vis - 1); 2254 iaq->nirq += n1g * (nrxq1g + nofldrxq1g + nnmrxq1g); 2255 iaq->nirq += n1g * 2 * (num_vis - 1); 2256 if (iaq->nirq <= navail && 2257 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2258 iaq->intr_flags_10g = INTR_ALL; 2259 iaq->intr_flags_1g = INTR_ALL; 2260 goto allocate; 2261 } 2262 2263 /* 2264 * Second best option: a vector for errors, one for the firmware 2265 * event queue, and vectors for either all the NIC rx queues or 2266 * all the TOE rx queues. The queues that don't get vectors 2267 * will forward their interrupts to those that do. 2268 * 2269 * Note: netmap rx queues cannot be created early and so they 2270 * can't be setup to receive forwarded interrupts for others. 2271 */ 2272 iaq->nirq = T4_EXTRA_INTR; 2273 if (nrxq10g >= nofldrxq10g) { 2274 iaq->intr_flags_10g = INTR_RXQ; 2275 iaq->nirq += n10g * nrxq10g; 2276 iaq->nirq += n10g * (num_vis - 1); 2277 #ifdef DEV_NETMAP 2278 iaq->nnmrxq10g = min(nnmrxq10g, nrxq10g); 2279 #endif 2280 } else { 2281 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2282 iaq->nirq += n10g * nofldrxq10g; 2283 #ifdef DEV_NETMAP 2284 iaq->nnmrxq10g = min(nnmrxq10g, nofldrxq10g); 2285 #endif 2286 } 2287 if (nrxq1g >= nofldrxq1g) { 2288 iaq->intr_flags_1g = INTR_RXQ; 2289 iaq->nirq += n1g * nrxq1g; 2290 iaq->nirq += n1g * (num_vis - 1); 2291 #ifdef DEV_NETMAP 2292 iaq->nnmrxq1g = min(nnmrxq1g, nrxq1g); 2293 #endif 2294 } else { 2295 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2296 iaq->nirq += n1g * nofldrxq1g; 2297 #ifdef DEV_NETMAP 2298 iaq->nnmrxq1g = min(nnmrxq1g, nofldrxq1g); 2299 #endif 2300 } 2301 if (iaq->nirq <= navail && 2302 (itype != INTR_MSI || powerof2(iaq->nirq))) 2303 goto allocate; 2304 2305 /* 2306 * Next best option: an interrupt vector for errors, one for the 2307 * firmware event queue, and at least one per VI. At this 2308 * point we know we'll have to downsize nrxq and/or nofldrxq 2309 * and/or nnmrxq to fit what's available to us. 2310 */ 2311 iaq->nirq = T4_EXTRA_INTR; 2312 iaq->nirq += (n10g + n1g) * num_vis; 2313 if (iaq->nirq <= navail) { 2314 int leftover = navail - iaq->nirq; 2315 2316 if (n10g > 0) { 2317 int target = max(nrxq10g, nofldrxq10g); 2318 2319 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2320 INTR_RXQ : INTR_OFLD_RXQ; 2321 2322 n = 1; 2323 while (n < target && leftover >= n10g) { 2324 leftover -= n10g; 2325 iaq->nirq += n10g; 2326 n++; 2327 } 2328 iaq->nrxq10g = min(n, nrxq10g); 2329 #ifdef TCP_OFFLOAD 2330 iaq->nofldrxq10g = min(n, nofldrxq10g); 2331 #endif 2332 #ifdef DEV_NETMAP 2333 iaq->nnmrxq10g = min(n, nnmrxq10g); 2334 #endif 2335 } 2336 2337 if (n1g > 0) { 2338 int target = max(nrxq1g, nofldrxq1g); 2339 2340 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2341 INTR_RXQ : INTR_OFLD_RXQ; 2342 2343 n = 1; 2344 while (n < target && leftover >= n1g) { 2345 leftover -= n1g; 2346 iaq->nirq += n1g; 2347 n++; 2348 } 2349 iaq->nrxq1g = min(n, nrxq1g); 2350 #ifdef TCP_OFFLOAD 2351 iaq->nofldrxq1g = min(n, nofldrxq1g); 2352 #endif 2353 #ifdef DEV_NETMAP 2354 iaq->nnmrxq1g = min(n, nnmrxq1g); 2355 #endif 2356 } 2357 2358 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2359 goto allocate; 2360 } 2361 2362 /* 2363 * Least desirable option: one interrupt vector for everything. 2364 */ 2365 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2366 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2367 #ifdef TCP_OFFLOAD 2368 if (is_offload(sc)) 2369 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2370 #endif 2371 #ifdef DEV_NETMAP 2372 iaq->nnmrxq10g = iaq->nnmrxq1g = 1; 2373 #endif 2374 2375 allocate: 2376 navail = iaq->nirq; 2377 rc = 0; 2378 if (itype == INTR_MSIX) 2379 rc = pci_alloc_msix(sc->dev, &navail); 2380 else if (itype == INTR_MSI) 2381 rc = pci_alloc_msi(sc->dev, &navail); 2382 2383 if (rc == 0) { 2384 if (navail == iaq->nirq) 2385 return (0); 2386 2387 /* 2388 * Didn't get the number requested. Use whatever number 2389 * the kernel is willing to allocate (it's in navail). 2390 */ 2391 device_printf(sc->dev, "fewer vectors than requested, " 2392 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2393 itype, iaq->nirq, navail); 2394 pci_release_msi(sc->dev); 2395 goto restart; 2396 } 2397 2398 device_printf(sc->dev, 2399 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2400 itype, rc, iaq->nirq, navail); 2401 } 2402 2403 device_printf(sc->dev, 2404 "failed to find a usable interrupt type. " 2405 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2406 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2407 2408 return (ENXIO); 2409 } 2410 2411 #define FW_VERSION(chip) ( \ 2412 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2413 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2414 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2415 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2416 #define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2417 2418 struct fw_info { 2419 uint8_t chip; 2420 char *kld_name; 2421 char *fw_mod_name; 2422 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2423 } fw_info[] = { 2424 { 2425 .chip = CHELSIO_T4, 2426 .kld_name = "t4fw_cfg", 2427 .fw_mod_name = "t4fw", 2428 .fw_hdr = { 2429 .chip = FW_HDR_CHIP_T4, 2430 .fw_ver = htobe32_const(FW_VERSION(T4)), 2431 .intfver_nic = FW_INTFVER(T4, NIC), 2432 .intfver_vnic = FW_INTFVER(T4, VNIC), 2433 .intfver_ofld = FW_INTFVER(T4, OFLD), 2434 .intfver_ri = FW_INTFVER(T4, RI), 2435 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2436 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2437 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2438 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2439 }, 2440 }, { 2441 .chip = CHELSIO_T5, 2442 .kld_name = "t5fw_cfg", 2443 .fw_mod_name = "t5fw", 2444 .fw_hdr = { 2445 .chip = FW_HDR_CHIP_T5, 2446 .fw_ver = htobe32_const(FW_VERSION(T5)), 2447 .intfver_nic = FW_INTFVER(T5, NIC), 2448 .intfver_vnic = FW_INTFVER(T5, VNIC), 2449 .intfver_ofld = FW_INTFVER(T5, OFLD), 2450 .intfver_ri = FW_INTFVER(T5, RI), 2451 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2452 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2453 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2454 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2455 }, 2456 } 2457 }; 2458 2459 static struct fw_info * 2460 find_fw_info(int chip) 2461 { 2462 int i; 2463 2464 for (i = 0; i < nitems(fw_info); i++) { 2465 if (fw_info[i].chip == chip) 2466 return (&fw_info[i]); 2467 } 2468 return (NULL); 2469 } 2470 2471 /* 2472 * Is the given firmware API compatible with the one the driver was compiled 2473 * with? 2474 */ 2475 static int 2476 fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2477 { 2478 2479 /* short circuit if it's the exact same firmware version */ 2480 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2481 return (1); 2482 2483 /* 2484 * XXX: Is this too conservative? Perhaps I should limit this to the 2485 * features that are supported in the driver. 2486 */ 2487 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2488 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2489 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2490 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2491 return (1); 2492 #undef SAME_INTF 2493 2494 return (0); 2495 } 2496 2497 /* 2498 * The firmware in the KLD is usable, but should it be installed? This routine 2499 * explains itself in detail if it indicates the KLD firmware should be 2500 * installed. 2501 */ 2502 static int 2503 should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2504 { 2505 const char *reason; 2506 2507 if (!card_fw_usable) { 2508 reason = "incompatible or unusable"; 2509 goto install; 2510 } 2511 2512 if (k > c) { 2513 reason = "older than the version bundled with this driver"; 2514 goto install; 2515 } 2516 2517 if (t4_fw_install == 2 && k != c) { 2518 reason = "different than the version bundled with this driver"; 2519 goto install; 2520 } 2521 2522 return (0); 2523 2524 install: 2525 if (t4_fw_install == 0) { 2526 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2527 "but the driver is prohibited from installing a different " 2528 "firmware on the card.\n", 2529 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2530 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2531 2532 return (0); 2533 } 2534 2535 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2536 "installing firmware %u.%u.%u.%u on card.\n", 2537 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2538 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2539 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2540 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2541 2542 return (1); 2543 } 2544 /* 2545 * Establish contact with the firmware and determine if we are the master driver 2546 * or not, and whether we are responsible for chip initialization. 2547 */ 2548 static int 2549 prep_firmware(struct adapter *sc) 2550 { 2551 const struct firmware *fw = NULL, *default_cfg; 2552 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2553 enum dev_state state; 2554 struct fw_info *fw_info; 2555 struct fw_hdr *card_fw; /* fw on the card */ 2556 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2557 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2558 against */ 2559 2560 /* Contact firmware. */ 2561 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2562 if (rc < 0 || state == DEV_STATE_ERR) { 2563 rc = -rc; 2564 device_printf(sc->dev, 2565 "failed to connect to the firmware: %d, %d.\n", rc, state); 2566 return (rc); 2567 } 2568 pf = rc; 2569 if (pf == sc->mbox) 2570 sc->flags |= MASTER_PF; 2571 else if (state == DEV_STATE_UNINIT) { 2572 /* 2573 * We didn't get to be the master so we definitely won't be 2574 * configuring the chip. It's a bug if someone else hasn't 2575 * configured it already. 2576 */ 2577 device_printf(sc->dev, "couldn't be master(%d), " 2578 "device not already initialized either(%d).\n", rc, state); 2579 return (EDOOFUS); 2580 } 2581 2582 /* This is the firmware whose headers the driver was compiled against */ 2583 fw_info = find_fw_info(chip_id(sc)); 2584 if (fw_info == NULL) { 2585 device_printf(sc->dev, 2586 "unable to look up firmware information for chip %d.\n", 2587 chip_id(sc)); 2588 return (EINVAL); 2589 } 2590 drv_fw = &fw_info->fw_hdr; 2591 2592 /* 2593 * The firmware KLD contains many modules. The KLD name is also the 2594 * name of the module that contains the default config file. 2595 */ 2596 default_cfg = firmware_get(fw_info->kld_name); 2597 2598 /* Read the header of the firmware on the card */ 2599 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2600 rc = -t4_read_flash(sc, FLASH_FW_START, 2601 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2602 if (rc == 0) 2603 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2604 else { 2605 device_printf(sc->dev, 2606 "Unable to read card's firmware header: %d\n", rc); 2607 card_fw_usable = 0; 2608 } 2609 2610 /* This is the firmware in the KLD */ 2611 fw = firmware_get(fw_info->fw_mod_name); 2612 if (fw != NULL) { 2613 kld_fw = (const void *)fw->data; 2614 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2615 } else { 2616 kld_fw = NULL; 2617 kld_fw_usable = 0; 2618 } 2619 2620 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2621 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2622 /* 2623 * Common case: the firmware on the card is an exact match and 2624 * the KLD is an exact match too, or the KLD is 2625 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2626 * here -- use cxgbetool loadfw if you want to reinstall the 2627 * same firmware as the one on the card. 2628 */ 2629 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2630 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2631 be32toh(card_fw->fw_ver))) { 2632 2633 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2634 if (rc != 0) { 2635 device_printf(sc->dev, 2636 "failed to install firmware: %d\n", rc); 2637 goto done; 2638 } 2639 2640 /* Installed successfully, update the cached header too. */ 2641 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2642 card_fw_usable = 1; 2643 need_fw_reset = 0; /* already reset as part of load_fw */ 2644 } 2645 2646 if (!card_fw_usable) { 2647 uint32_t d, c, k; 2648 2649 d = ntohl(drv_fw->fw_ver); 2650 c = ntohl(card_fw->fw_ver); 2651 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2652 2653 device_printf(sc->dev, "Cannot find a usable firmware: " 2654 "fw_install %d, chip state %d, " 2655 "driver compiled with %d.%d.%d.%d, " 2656 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2657 t4_fw_install, state, 2658 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2659 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2660 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2661 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2662 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2663 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2664 rc = EINVAL; 2665 goto done; 2666 } 2667 2668 /* We're using whatever's on the card and it's known to be good. */ 2669 sc->params.fw_vers = ntohl(card_fw->fw_ver); 2670 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 2671 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2672 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2673 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2674 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2675 t4_get_tp_version(sc, &sc->params.tp_vers); 2676 2677 /* Reset device */ 2678 if (need_fw_reset && 2679 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2680 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2681 if (rc != ETIMEDOUT && rc != EIO) 2682 t4_fw_bye(sc, sc->mbox); 2683 goto done; 2684 } 2685 sc->flags |= FW_OK; 2686 2687 rc = get_params__pre_init(sc); 2688 if (rc != 0) 2689 goto done; /* error message displayed already */ 2690 2691 /* Partition adapter resources as specified in the config file. */ 2692 if (state == DEV_STATE_UNINIT) { 2693 2694 KASSERT(sc->flags & MASTER_PF, 2695 ("%s: trying to change chip settings when not master.", 2696 __func__)); 2697 2698 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2699 if (rc != 0) 2700 goto done; /* error message displayed already */ 2701 2702 t4_tweak_chip_settings(sc); 2703 2704 /* get basic stuff going */ 2705 rc = -t4_fw_initialize(sc, sc->mbox); 2706 if (rc != 0) { 2707 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2708 goto done; 2709 } 2710 } else { 2711 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2712 sc->cfcsum = 0; 2713 } 2714 2715 done: 2716 free(card_fw, M_CXGBE); 2717 if (fw != NULL) 2718 firmware_put(fw, FIRMWARE_UNLOAD); 2719 if (default_cfg != NULL) 2720 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2721 2722 return (rc); 2723 } 2724 2725 #define FW_PARAM_DEV(param) \ 2726 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2727 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2728 #define FW_PARAM_PFVF(param) \ 2729 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2730 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2731 2732 /* 2733 * Partition chip resources for use between various PFs, VFs, etc. 2734 */ 2735 static int 2736 partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2737 const char *name_prefix) 2738 { 2739 const struct firmware *cfg = NULL; 2740 int rc = 0; 2741 struct fw_caps_config_cmd caps; 2742 uint32_t mtype, moff, finicsum, cfcsum; 2743 2744 /* 2745 * Figure out what configuration file to use. Pick the default config 2746 * file for the card if the user hasn't specified one explicitly. 2747 */ 2748 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2749 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2750 /* Card specific overrides go here. */ 2751 if (pci_get_device(sc->dev) == 0x440a) 2752 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2753 if (is_fpga(sc)) 2754 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2755 } 2756 2757 /* 2758 * We need to load another module if the profile is anything except 2759 * "default" or "flash". 2760 */ 2761 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 2762 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2763 char s[32]; 2764 2765 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 2766 cfg = firmware_get(s); 2767 if (cfg == NULL) { 2768 if (default_cfg != NULL) { 2769 device_printf(sc->dev, 2770 "unable to load module \"%s\" for " 2771 "configuration profile \"%s\", will use " 2772 "the default config file instead.\n", 2773 s, sc->cfg_file); 2774 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2775 "%s", DEFAULT_CF); 2776 } else { 2777 device_printf(sc->dev, 2778 "unable to load module \"%s\" for " 2779 "configuration profile \"%s\", will use " 2780 "the config file on the card's flash " 2781 "instead.\n", s, sc->cfg_file); 2782 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2783 "%s", FLASH_CF); 2784 } 2785 } 2786 } 2787 2788 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 2789 default_cfg == NULL) { 2790 device_printf(sc->dev, 2791 "default config file not available, will use the config " 2792 "file on the card's flash instead.\n"); 2793 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 2794 } 2795 2796 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2797 u_int cflen, i, n; 2798 const uint32_t *cfdata; 2799 uint32_t param, val, addr, off, mw_base, mw_aperture; 2800 2801 KASSERT(cfg != NULL || default_cfg != NULL, 2802 ("%s: no config to upload", __func__)); 2803 2804 /* 2805 * Ask the firmware where it wants us to upload the config file. 2806 */ 2807 param = FW_PARAM_DEV(CF); 2808 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2809 if (rc != 0) { 2810 /* No support for config file? Shouldn't happen. */ 2811 device_printf(sc->dev, 2812 "failed to query config file location: %d.\n", rc); 2813 goto done; 2814 } 2815 mtype = G_FW_PARAMS_PARAM_Y(val); 2816 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 2817 2818 /* 2819 * XXX: sheer laziness. We deliberately added 4 bytes of 2820 * useless stuffing/comments at the end of the config file so 2821 * it's ok to simply throw away the last remaining bytes when 2822 * the config file is not an exact multiple of 4. This also 2823 * helps with the validate_mt_off_len check. 2824 */ 2825 if (cfg != NULL) { 2826 cflen = cfg->datasize & ~3; 2827 cfdata = cfg->data; 2828 } else { 2829 cflen = default_cfg->datasize & ~3; 2830 cfdata = default_cfg->data; 2831 } 2832 2833 if (cflen > FLASH_CFG_MAX_SIZE) { 2834 device_printf(sc->dev, 2835 "config file too long (%d, max allowed is %d). " 2836 "Will try to use the config on the card, if any.\n", 2837 cflen, FLASH_CFG_MAX_SIZE); 2838 goto use_config_on_flash; 2839 } 2840 2841 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 2842 if (rc != 0) { 2843 device_printf(sc->dev, 2844 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 2845 "Will try to use the config on the card, if any.\n", 2846 __func__, mtype, moff, cflen, rc); 2847 goto use_config_on_flash; 2848 } 2849 2850 memwin_info(sc, 2, &mw_base, &mw_aperture); 2851 while (cflen) { 2852 off = position_memwin(sc, 2, addr); 2853 n = min(cflen, mw_aperture - off); 2854 for (i = 0; i < n; i += 4) 2855 t4_write_reg(sc, mw_base + off + i, *cfdata++); 2856 cflen -= n; 2857 addr += n; 2858 } 2859 } else { 2860 use_config_on_flash: 2861 mtype = FW_MEMTYPE_FLASH; 2862 moff = t4_flash_cfg_addr(sc); 2863 } 2864 2865 bzero(&caps, sizeof(caps)); 2866 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2867 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2868 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 2869 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 2870 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 2871 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 2872 if (rc != 0) { 2873 device_printf(sc->dev, 2874 "failed to pre-process config file: %d " 2875 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 2876 goto done; 2877 } 2878 2879 finicsum = be32toh(caps.finicsum); 2880 cfcsum = be32toh(caps.cfcsum); 2881 if (finicsum != cfcsum) { 2882 device_printf(sc->dev, 2883 "WARNING: config file checksum mismatch: %08x %08x\n", 2884 finicsum, cfcsum); 2885 } 2886 sc->cfcsum = cfcsum; 2887 2888 #define LIMIT_CAPS(x) do { \ 2889 caps.x &= htobe16(t4_##x##_allowed); \ 2890 } while (0) 2891 2892 /* 2893 * Let the firmware know what features will (not) be used so it can tune 2894 * things accordingly. 2895 */ 2896 LIMIT_CAPS(linkcaps); 2897 LIMIT_CAPS(niccaps); 2898 LIMIT_CAPS(toecaps); 2899 LIMIT_CAPS(rdmacaps); 2900 LIMIT_CAPS(iscsicaps); 2901 LIMIT_CAPS(fcoecaps); 2902 #undef LIMIT_CAPS 2903 2904 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2905 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 2906 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 2907 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 2908 if (rc != 0) { 2909 device_printf(sc->dev, 2910 "failed to process config file: %d.\n", rc); 2911 } 2912 done: 2913 if (cfg != NULL) 2914 firmware_put(cfg, FIRMWARE_UNLOAD); 2915 return (rc); 2916 } 2917 2918 /* 2919 * Retrieve parameters that are needed (or nice to have) very early. 2920 */ 2921 static int 2922 get_params__pre_init(struct adapter *sc) 2923 { 2924 int rc; 2925 uint32_t param[2], val[2]; 2926 struct fw_devlog_cmd cmd; 2927 struct devlog_params *dlog = &sc->params.devlog; 2928 2929 param[0] = FW_PARAM_DEV(PORTVEC); 2930 param[1] = FW_PARAM_DEV(CCLK); 2931 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 2932 if (rc != 0) { 2933 device_printf(sc->dev, 2934 "failed to query parameters (pre_init): %d.\n", rc); 2935 return (rc); 2936 } 2937 2938 sc->params.portvec = val[0]; 2939 sc->params.nports = bitcount32(val[0]); 2940 sc->params.vpd.cclk = val[1]; 2941 2942 /* Read device log parameters. */ 2943 bzero(&cmd, sizeof(cmd)); 2944 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) | 2945 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2946 cmd.retval_len16 = htobe32(FW_LEN16(cmd)); 2947 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd); 2948 if (rc != 0) { 2949 device_printf(sc->dev, 2950 "failed to get devlog parameters: %d.\n", rc); 2951 bzero(dlog, sizeof (*dlog)); 2952 rc = 0; /* devlog isn't critical for device operation */ 2953 } else { 2954 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog); 2955 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]); 2956 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4; 2957 dlog->size = be32toh(cmd.memsize_devlog); 2958 } 2959 2960 return (rc); 2961 } 2962 2963 /* 2964 * Retrieve various parameters that are of interest to the driver. The device 2965 * has been initialized by the firmware at this point. 2966 */ 2967 static int 2968 get_params__post_init(struct adapter *sc) 2969 { 2970 int rc; 2971 uint32_t param[7], val[7]; 2972 struct fw_caps_config_cmd caps; 2973 2974 param[0] = FW_PARAM_PFVF(IQFLINT_START); 2975 param[1] = FW_PARAM_PFVF(EQ_START); 2976 param[2] = FW_PARAM_PFVF(FILTER_START); 2977 param[3] = FW_PARAM_PFVF(FILTER_END); 2978 param[4] = FW_PARAM_PFVF(L2T_START); 2979 param[5] = FW_PARAM_PFVF(L2T_END); 2980 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 2981 if (rc != 0) { 2982 device_printf(sc->dev, 2983 "failed to query parameters (post_init): %d.\n", rc); 2984 return (rc); 2985 } 2986 2987 sc->sge.iq_start = val[0]; 2988 sc->sge.eq_start = val[1]; 2989 sc->tids.ftid_base = val[2]; 2990 sc->tids.nftids = val[3] - val[2] + 1; 2991 sc->params.ftid_min = val[2]; 2992 sc->params.ftid_max = val[3]; 2993 sc->vres.l2t.start = val[4]; 2994 sc->vres.l2t.size = val[5] - val[4] + 1; 2995 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 2996 ("%s: L2 table size (%u) larger than expected (%u)", 2997 __func__, sc->vres.l2t.size, L2T_SIZE)); 2998 2999 /* get capabilites */ 3000 bzero(&caps, sizeof(caps)); 3001 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3002 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3003 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3004 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3005 if (rc != 0) { 3006 device_printf(sc->dev, 3007 "failed to get card capabilities: %d.\n", rc); 3008 return (rc); 3009 } 3010 3011 #define READ_CAPS(x) do { \ 3012 sc->x = htobe16(caps.x); \ 3013 } while (0) 3014 READ_CAPS(linkcaps); 3015 READ_CAPS(niccaps); 3016 READ_CAPS(toecaps); 3017 READ_CAPS(rdmacaps); 3018 READ_CAPS(iscsicaps); 3019 READ_CAPS(fcoecaps); 3020 3021 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3022 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3023 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3024 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3025 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3026 if (rc != 0) { 3027 device_printf(sc->dev, 3028 "failed to query NIC parameters: %d.\n", rc); 3029 return (rc); 3030 } 3031 sc->tids.etid_base = val[0]; 3032 sc->params.etid_min = val[0]; 3033 sc->tids.netids = val[1] - val[0] + 1; 3034 sc->params.netids = sc->tids.netids; 3035 sc->params.eo_wr_cred = val[2]; 3036 sc->params.ethoffload = 1; 3037 } 3038 3039 if (sc->toecaps) { 3040 /* query offload-related parameters */ 3041 param[0] = FW_PARAM_DEV(NTID); 3042 param[1] = FW_PARAM_PFVF(SERVER_START); 3043 param[2] = FW_PARAM_PFVF(SERVER_END); 3044 param[3] = FW_PARAM_PFVF(TDDP_START); 3045 param[4] = FW_PARAM_PFVF(TDDP_END); 3046 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3047 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3048 if (rc != 0) { 3049 device_printf(sc->dev, 3050 "failed to query TOE parameters: %d.\n", rc); 3051 return (rc); 3052 } 3053 sc->tids.ntids = val[0]; 3054 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3055 sc->tids.stid_base = val[1]; 3056 sc->tids.nstids = val[2] - val[1] + 1; 3057 sc->vres.ddp.start = val[3]; 3058 sc->vres.ddp.size = val[4] - val[3] + 1; 3059 sc->params.ofldq_wr_cred = val[5]; 3060 sc->params.offload = 1; 3061 } 3062 if (sc->rdmacaps) { 3063 param[0] = FW_PARAM_PFVF(STAG_START); 3064 param[1] = FW_PARAM_PFVF(STAG_END); 3065 param[2] = FW_PARAM_PFVF(RQ_START); 3066 param[3] = FW_PARAM_PFVF(RQ_END); 3067 param[4] = FW_PARAM_PFVF(PBL_START); 3068 param[5] = FW_PARAM_PFVF(PBL_END); 3069 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3070 if (rc != 0) { 3071 device_printf(sc->dev, 3072 "failed to query RDMA parameters(1): %d.\n", rc); 3073 return (rc); 3074 } 3075 sc->vres.stag.start = val[0]; 3076 sc->vres.stag.size = val[1] - val[0] + 1; 3077 sc->vres.rq.start = val[2]; 3078 sc->vres.rq.size = val[3] - val[2] + 1; 3079 sc->vres.pbl.start = val[4]; 3080 sc->vres.pbl.size = val[5] - val[4] + 1; 3081 3082 param[0] = FW_PARAM_PFVF(SQRQ_START); 3083 param[1] = FW_PARAM_PFVF(SQRQ_END); 3084 param[2] = FW_PARAM_PFVF(CQ_START); 3085 param[3] = FW_PARAM_PFVF(CQ_END); 3086 param[4] = FW_PARAM_PFVF(OCQ_START); 3087 param[5] = FW_PARAM_PFVF(OCQ_END); 3088 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3089 if (rc != 0) { 3090 device_printf(sc->dev, 3091 "failed to query RDMA parameters(2): %d.\n", rc); 3092 return (rc); 3093 } 3094 sc->vres.qp.start = val[0]; 3095 sc->vres.qp.size = val[1] - val[0] + 1; 3096 sc->vres.cq.start = val[2]; 3097 sc->vres.cq.size = val[3] - val[2] + 1; 3098 sc->vres.ocq.start = val[4]; 3099 sc->vres.ocq.size = val[5] - val[4] + 1; 3100 } 3101 if (sc->iscsicaps) { 3102 param[0] = FW_PARAM_PFVF(ISCSI_START); 3103 param[1] = FW_PARAM_PFVF(ISCSI_END); 3104 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3105 if (rc != 0) { 3106 device_printf(sc->dev, 3107 "failed to query iSCSI parameters: %d.\n", rc); 3108 return (rc); 3109 } 3110 sc->vres.iscsi.start = val[0]; 3111 sc->vres.iscsi.size = val[1] - val[0] + 1; 3112 } 3113 3114 /* 3115 * We've got the params we wanted to query via the firmware. Now grab 3116 * some others directly from the chip. 3117 */ 3118 rc = t4_read_chip_settings(sc); 3119 3120 return (rc); 3121 } 3122 3123 static int 3124 set_params__post_init(struct adapter *sc) 3125 { 3126 uint32_t param, val; 3127 3128 /* ask for encapsulated CPLs */ 3129 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3130 val = 1; 3131 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3132 3133 return (0); 3134 } 3135 3136 #undef FW_PARAM_PFVF 3137 #undef FW_PARAM_DEV 3138 3139 static void 3140 t4_set_desc(struct adapter *sc) 3141 { 3142 char buf[128]; 3143 struct adapter_params *p = &sc->params; 3144 3145 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, " 3146 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "", 3147 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec); 3148 3149 device_set_desc_copy(sc->dev, buf); 3150 } 3151 3152 static void 3153 build_medialist(struct port_info *pi, struct ifmedia *media) 3154 { 3155 int m; 3156 3157 PORT_LOCK(pi); 3158 3159 ifmedia_removeall(media); 3160 3161 m = IFM_ETHER | IFM_FDX; 3162 3163 switch(pi->port_type) { 3164 case FW_PORT_TYPE_BT_XFI: 3165 case FW_PORT_TYPE_BT_XAUI: 3166 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3167 /* fall through */ 3168 3169 case FW_PORT_TYPE_BT_SGMII: 3170 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3171 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3172 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3173 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3174 break; 3175 3176 case FW_PORT_TYPE_CX4: 3177 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3178 ifmedia_set(media, m | IFM_10G_CX4); 3179 break; 3180 3181 case FW_PORT_TYPE_QSFP_10G: 3182 case FW_PORT_TYPE_SFP: 3183 case FW_PORT_TYPE_FIBER_XFI: 3184 case FW_PORT_TYPE_FIBER_XAUI: 3185 switch (pi->mod_type) { 3186 3187 case FW_PORT_MOD_TYPE_LR: 3188 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3189 ifmedia_set(media, m | IFM_10G_LR); 3190 break; 3191 3192 case FW_PORT_MOD_TYPE_SR: 3193 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3194 ifmedia_set(media, m | IFM_10G_SR); 3195 break; 3196 3197 case FW_PORT_MOD_TYPE_LRM: 3198 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3199 ifmedia_set(media, m | IFM_10G_LRM); 3200 break; 3201 3202 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3203 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3204 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3205 ifmedia_set(media, m | IFM_10G_TWINAX); 3206 break; 3207 3208 case FW_PORT_MOD_TYPE_NONE: 3209 m &= ~IFM_FDX; 3210 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3211 ifmedia_set(media, m | IFM_NONE); 3212 break; 3213 3214 case FW_PORT_MOD_TYPE_NA: 3215 case FW_PORT_MOD_TYPE_ER: 3216 default: 3217 device_printf(pi->dev, 3218 "unknown port_type (%d), mod_type (%d)\n", 3219 pi->port_type, pi->mod_type); 3220 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3221 ifmedia_set(media, m | IFM_UNKNOWN); 3222 break; 3223 } 3224 break; 3225 3226 case FW_PORT_TYPE_QSFP: 3227 switch (pi->mod_type) { 3228 3229 case FW_PORT_MOD_TYPE_LR: 3230 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3231 ifmedia_set(media, m | IFM_40G_LR4); 3232 break; 3233 3234 case FW_PORT_MOD_TYPE_SR: 3235 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3236 ifmedia_set(media, m | IFM_40G_SR4); 3237 break; 3238 3239 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3240 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3241 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3242 ifmedia_set(media, m | IFM_40G_CR4); 3243 break; 3244 3245 case FW_PORT_MOD_TYPE_NONE: 3246 m &= ~IFM_FDX; 3247 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3248 ifmedia_set(media, m | IFM_NONE); 3249 break; 3250 3251 default: 3252 device_printf(pi->dev, 3253 "unknown port_type (%d), mod_type (%d)\n", 3254 pi->port_type, pi->mod_type); 3255 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3256 ifmedia_set(media, m | IFM_UNKNOWN); 3257 break; 3258 } 3259 break; 3260 3261 default: 3262 device_printf(pi->dev, 3263 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3264 pi->mod_type); 3265 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3266 ifmedia_set(media, m | IFM_UNKNOWN); 3267 break; 3268 } 3269 3270 PORT_UNLOCK(pi); 3271 } 3272 3273 #define FW_MAC_EXACT_CHUNK 7 3274 3275 /* 3276 * Program the port's XGMAC based on parameters in ifnet. The caller also 3277 * indicates which parameters should be programmed (the rest are left alone). 3278 */ 3279 int 3280 update_mac_settings(struct ifnet *ifp, int flags) 3281 { 3282 int rc = 0; 3283 struct vi_info *vi = ifp->if_softc; 3284 struct port_info *pi = vi->pi; 3285 struct adapter *sc = pi->adapter; 3286 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3287 3288 ASSERT_SYNCHRONIZED_OP(sc); 3289 KASSERT(flags, ("%s: not told what to update.", __func__)); 3290 3291 if (flags & XGMAC_MTU) 3292 mtu = ifp->if_mtu; 3293 3294 if (flags & XGMAC_PROMISC) 3295 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3296 3297 if (flags & XGMAC_ALLMULTI) 3298 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3299 3300 if (flags & XGMAC_VLANEX) 3301 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3302 3303 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3304 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3305 allmulti, 1, vlanex, false); 3306 if (rc) { 3307 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3308 rc); 3309 return (rc); 3310 } 3311 } 3312 3313 if (flags & XGMAC_UCADDR) { 3314 uint8_t ucaddr[ETHER_ADDR_LEN]; 3315 3316 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3317 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3318 ucaddr, true, true); 3319 if (rc < 0) { 3320 rc = -rc; 3321 if_printf(ifp, "change_mac failed: %d\n", rc); 3322 return (rc); 3323 } else { 3324 vi->xact_addr_filt = rc; 3325 rc = 0; 3326 } 3327 } 3328 3329 if (flags & XGMAC_MCADDRS) { 3330 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3331 int del = 1; 3332 uint64_t hash = 0; 3333 struct ifmultiaddr *ifma; 3334 int i = 0, j; 3335 3336 if_maddr_rlock(ifp); 3337 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3338 if (ifma->ifma_addr->sa_family != AF_LINK) 3339 continue; 3340 mcaddr[i] = 3341 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3342 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3343 i++; 3344 3345 if (i == FW_MAC_EXACT_CHUNK) { 3346 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 3347 del, i, mcaddr, NULL, &hash, 0); 3348 if (rc < 0) { 3349 rc = -rc; 3350 for (j = 0; j < i; j++) { 3351 if_printf(ifp, 3352 "failed to add mc address" 3353 " %02x:%02x:%02x:" 3354 "%02x:%02x:%02x rc=%d\n", 3355 mcaddr[j][0], mcaddr[j][1], 3356 mcaddr[j][2], mcaddr[j][3], 3357 mcaddr[j][4], mcaddr[j][5], 3358 rc); 3359 } 3360 goto mcfail; 3361 } 3362 del = 0; 3363 i = 0; 3364 } 3365 } 3366 if (i > 0) { 3367 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 3368 mcaddr, NULL, &hash, 0); 3369 if (rc < 0) { 3370 rc = -rc; 3371 for (j = 0; j < i; j++) { 3372 if_printf(ifp, 3373 "failed to add mc address" 3374 " %02x:%02x:%02x:" 3375 "%02x:%02x:%02x rc=%d\n", 3376 mcaddr[j][0], mcaddr[j][1], 3377 mcaddr[j][2], mcaddr[j][3], 3378 mcaddr[j][4], mcaddr[j][5], 3379 rc); 3380 } 3381 goto mcfail; 3382 } 3383 } 3384 3385 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 3386 if (rc != 0) 3387 if_printf(ifp, "failed to set mc address hash: %d", rc); 3388 mcfail: 3389 if_maddr_runlock(ifp); 3390 } 3391 3392 return (rc); 3393 } 3394 3395 /* 3396 * {begin|end}_synchronized_op must be called from the same thread. 3397 */ 3398 int 3399 begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 3400 char *wmesg) 3401 { 3402 int rc, pri; 3403 3404 #ifdef WITNESS 3405 /* the caller thinks it's ok to sleep, but is it really? */ 3406 if (flags & SLEEP_OK) 3407 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3408 "begin_synchronized_op"); 3409 #endif 3410 3411 if (INTR_OK) 3412 pri = PCATCH; 3413 else 3414 pri = 0; 3415 3416 ADAPTER_LOCK(sc); 3417 for (;;) { 3418 3419 if (vi && IS_DOOMED(vi)) { 3420 rc = ENXIO; 3421 goto done; 3422 } 3423 3424 if (!IS_BUSY(sc)) { 3425 rc = 0; 3426 break; 3427 } 3428 3429 if (!(flags & SLEEP_OK)) { 3430 rc = EBUSY; 3431 goto done; 3432 } 3433 3434 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3435 rc = EINTR; 3436 goto done; 3437 } 3438 } 3439 3440 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3441 SET_BUSY(sc); 3442 #ifdef INVARIANTS 3443 sc->last_op = wmesg; 3444 sc->last_op_thr = curthread; 3445 sc->last_op_flags = flags; 3446 #endif 3447 3448 done: 3449 if (!(flags & HOLD_LOCK) || rc) 3450 ADAPTER_UNLOCK(sc); 3451 3452 return (rc); 3453 } 3454 3455 /* 3456 * Tell if_ioctl and if_init that the VI is going away. This is 3457 * special variant of begin_synchronized_op and must be paired with a 3458 * call to end_synchronized_op. 3459 */ 3460 void 3461 doom_vi(struct adapter *sc, struct vi_info *vi) 3462 { 3463 3464 ADAPTER_LOCK(sc); 3465 SET_DOOMED(vi); 3466 wakeup(&sc->flags); 3467 while (IS_BUSY(sc)) 3468 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 3469 SET_BUSY(sc); 3470 #ifdef INVARIANTS 3471 sc->last_op = "t4detach"; 3472 sc->last_op_thr = curthread; 3473 sc->last_op_flags = 0; 3474 #endif 3475 ADAPTER_UNLOCK(sc); 3476 } 3477 3478 /* 3479 * {begin|end}_synchronized_op must be called from the same thread. 3480 */ 3481 void 3482 end_synchronized_op(struct adapter *sc, int flags) 3483 { 3484 3485 if (flags & LOCK_HELD) 3486 ADAPTER_LOCK_ASSERT_OWNED(sc); 3487 else 3488 ADAPTER_LOCK(sc); 3489 3490 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3491 CLR_BUSY(sc); 3492 wakeup(&sc->flags); 3493 ADAPTER_UNLOCK(sc); 3494 } 3495 3496 static int 3497 cxgbe_init_synchronized(struct vi_info *vi) 3498 { 3499 struct port_info *pi = vi->pi; 3500 struct adapter *sc = pi->adapter; 3501 struct ifnet *ifp = vi->ifp; 3502 int rc = 0, i; 3503 struct sge_txq *txq; 3504 3505 ASSERT_SYNCHRONIZED_OP(sc); 3506 3507 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3508 return (0); /* already running */ 3509 3510 if (!(sc->flags & FULL_INIT_DONE) && 3511 ((rc = adapter_full_init(sc)) != 0)) 3512 return (rc); /* error message displayed already */ 3513 3514 if (!(vi->flags & VI_INIT_DONE) && 3515 ((rc = vi_full_init(vi)) != 0)) 3516 return (rc); /* error message displayed already */ 3517 3518 rc = update_mac_settings(ifp, XGMAC_ALL); 3519 if (rc) 3520 goto done; /* error message displayed already */ 3521 3522 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 3523 if (rc != 0) { 3524 if_printf(ifp, "enable_vi failed: %d\n", rc); 3525 goto done; 3526 } 3527 3528 /* 3529 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3530 * if this changes. 3531 */ 3532 3533 for_each_txq(vi, i, txq) { 3534 TXQ_LOCK(txq); 3535 txq->eq.flags |= EQ_ENABLED; 3536 TXQ_UNLOCK(txq); 3537 } 3538 3539 /* 3540 * The first iq of the first port to come up is used for tracing. 3541 */ 3542 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 3543 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 3544 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3545 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3546 V_QUEUENUMBER(sc->traceq)); 3547 pi->flags |= HAS_TRACEQ; 3548 } 3549 3550 /* all ok */ 3551 PORT_LOCK(pi); 3552 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3553 pi->up_vis++; 3554 3555 if (pi->nvi > 1) 3556 callout_reset(&vi->tick, hz, vi_tick, vi); 3557 else 3558 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3559 PORT_UNLOCK(pi); 3560 done: 3561 if (rc != 0) 3562 cxgbe_uninit_synchronized(vi); 3563 3564 return (rc); 3565 } 3566 3567 /* 3568 * Idempotent. 3569 */ 3570 static int 3571 cxgbe_uninit_synchronized(struct vi_info *vi) 3572 { 3573 struct port_info *pi = vi->pi; 3574 struct adapter *sc = pi->adapter; 3575 struct ifnet *ifp = vi->ifp; 3576 int rc, i; 3577 struct sge_txq *txq; 3578 3579 ASSERT_SYNCHRONIZED_OP(sc); 3580 3581 if (!(vi->flags & VI_INIT_DONE)) { 3582 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 3583 ("uninited VI is running")); 3584 return (0); 3585 } 3586 3587 /* 3588 * Disable the VI so that all its data in either direction is discarded 3589 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3590 * tick) intact as the TP can deliver negative advice or data that it's 3591 * holding in its RAM (for an offloaded connection) even after the VI is 3592 * disabled. 3593 */ 3594 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 3595 if (rc) { 3596 if_printf(ifp, "disable_vi failed: %d\n", rc); 3597 return (rc); 3598 } 3599 3600 for_each_txq(vi, i, txq) { 3601 TXQ_LOCK(txq); 3602 txq->eq.flags &= ~EQ_ENABLED; 3603 TXQ_UNLOCK(txq); 3604 } 3605 3606 PORT_LOCK(pi); 3607 if (pi->nvi == 1) 3608 callout_stop(&pi->tick); 3609 else 3610 callout_stop(&vi->tick); 3611 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3612 PORT_UNLOCK(pi); 3613 return (0); 3614 } 3615 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3616 pi->up_vis--; 3617 if (pi->up_vis > 0) { 3618 PORT_UNLOCK(pi); 3619 return (0); 3620 } 3621 PORT_UNLOCK(pi); 3622 3623 pi->link_cfg.link_ok = 0; 3624 pi->link_cfg.speed = 0; 3625 pi->linkdnrc = -1; 3626 t4_os_link_changed(sc, pi->port_id, 0, -1); 3627 3628 return (0); 3629 } 3630 3631 /* 3632 * It is ok for this function to fail midway and return right away. t4_detach 3633 * will walk the entire sc->irq list and clean up whatever is valid. 3634 */ 3635 static int 3636 setup_intr_handlers(struct adapter *sc) 3637 { 3638 int rc, rid, p, q, v; 3639 char s[8]; 3640 struct irq *irq; 3641 struct port_info *pi; 3642 struct vi_info *vi; 3643 struct sge_rxq *rxq; 3644 #ifdef TCP_OFFLOAD 3645 struct sge_ofld_rxq *ofld_rxq; 3646 #endif 3647 #ifdef DEV_NETMAP 3648 struct sge_nm_rxq *nm_rxq; 3649 #endif 3650 #ifdef RSS 3651 int nbuckets = rss_getnumbuckets(); 3652 #endif 3653 3654 /* 3655 * Setup interrupts. 3656 */ 3657 irq = &sc->irq[0]; 3658 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3659 if (sc->intr_count == 1) 3660 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3661 3662 /* Multiple interrupts. */ 3663 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3664 ("%s: too few intr.", __func__)); 3665 3666 /* The first one is always error intr */ 3667 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3668 if (rc != 0) 3669 return (rc); 3670 irq++; 3671 rid++; 3672 3673 /* The second one is always the firmware event queue */ 3674 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq, "evt"); 3675 if (rc != 0) 3676 return (rc); 3677 irq++; 3678 rid++; 3679 3680 for_each_port(sc, p) { 3681 pi = sc->port[p]; 3682 for_each_vi(pi, v, vi) { 3683 vi->first_intr = rid - 1; 3684 #ifdef DEV_NETMAP 3685 if (vi->flags & VI_NETMAP) { 3686 for_each_nm_rxq(vi, q, nm_rxq) { 3687 snprintf(s, sizeof(s), "%d-%d", p, q); 3688 rc = t4_alloc_irq(sc, irq, rid, 3689 t4_nm_intr, nm_rxq, s); 3690 if (rc != 0) 3691 return (rc); 3692 irq++; 3693 rid++; 3694 vi->nintr++; 3695 } 3696 continue; 3697 } 3698 #endif 3699 if (vi->flags & INTR_RXQ) { 3700 for_each_rxq(vi, q, rxq) { 3701 if (v == 0) 3702 snprintf(s, sizeof(s), "%d.%d", 3703 p, q); 3704 else 3705 snprintf(s, sizeof(s), 3706 "%d(%d).%d", p, v, q); 3707 rc = t4_alloc_irq(sc, irq, rid, 3708 t4_intr, rxq, s); 3709 if (rc != 0) 3710 return (rc); 3711 #ifdef RSS 3712 bus_bind_intr(sc->dev, irq->res, 3713 rss_getcpu(q % nbuckets)); 3714 #endif 3715 irq++; 3716 rid++; 3717 vi->nintr++; 3718 } 3719 } 3720 #ifdef TCP_OFFLOAD 3721 if (vi->flags & INTR_OFLD_RXQ) { 3722 for_each_ofld_rxq(vi, q, ofld_rxq) { 3723 snprintf(s, sizeof(s), "%d,%d", p, q); 3724 rc = t4_alloc_irq(sc, irq, rid, 3725 t4_intr, ofld_rxq, s); 3726 if (rc != 0) 3727 return (rc); 3728 irq++; 3729 rid++; 3730 vi->nintr++; 3731 } 3732 } 3733 #endif 3734 } 3735 } 3736 MPASS(irq == &sc->irq[sc->intr_count]); 3737 3738 return (0); 3739 } 3740 3741 int 3742 adapter_full_init(struct adapter *sc) 3743 { 3744 int rc, i; 3745 3746 ASSERT_SYNCHRONIZED_OP(sc); 3747 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3748 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 3749 ("%s: FULL_INIT_DONE already", __func__)); 3750 3751 /* 3752 * queues that belong to the adapter (not any particular port). 3753 */ 3754 rc = t4_setup_adapter_queues(sc); 3755 if (rc != 0) 3756 goto done; 3757 3758 for (i = 0; i < nitems(sc->tq); i++) { 3759 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 3760 taskqueue_thread_enqueue, &sc->tq[i]); 3761 if (sc->tq[i] == NULL) { 3762 device_printf(sc->dev, 3763 "failed to allocate task queue %d\n", i); 3764 rc = ENOMEM; 3765 goto done; 3766 } 3767 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 3768 device_get_nameunit(sc->dev), i); 3769 } 3770 3771 t4_intr_enable(sc); 3772 sc->flags |= FULL_INIT_DONE; 3773 done: 3774 if (rc != 0) 3775 adapter_full_uninit(sc); 3776 3777 return (rc); 3778 } 3779 3780 int 3781 adapter_full_uninit(struct adapter *sc) 3782 { 3783 int i; 3784 3785 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3786 3787 t4_teardown_adapter_queues(sc); 3788 3789 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 3790 taskqueue_free(sc->tq[i]); 3791 sc->tq[i] = NULL; 3792 } 3793 3794 sc->flags &= ~FULL_INIT_DONE; 3795 3796 return (0); 3797 } 3798 3799 #ifdef RSS 3800 #define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 3801 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 3802 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 3803 RSS_HASHTYPE_RSS_UDP_IPV6) 3804 3805 /* Translates kernel hash types to hardware. */ 3806 static int 3807 hashconfig_to_hashen(int hashconfig) 3808 { 3809 int hashen = 0; 3810 3811 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 3812 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 3813 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 3814 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 3815 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 3816 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 3817 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 3818 } 3819 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 3820 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 3821 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 3822 } 3823 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 3824 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 3825 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 3826 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 3827 3828 return (hashen); 3829 } 3830 3831 /* Translates hardware hash types to kernel. */ 3832 static int 3833 hashen_to_hashconfig(int hashen) 3834 { 3835 int hashconfig = 0; 3836 3837 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 3838 /* 3839 * If UDP hashing was enabled it must have been enabled for 3840 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 3841 * enabling any 4-tuple hash is nonsense configuration. 3842 */ 3843 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 3844 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 3845 3846 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 3847 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 3848 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 3849 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 3850 } 3851 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 3852 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 3853 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 3854 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 3855 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 3856 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 3857 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 3858 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 3859 3860 return (hashconfig); 3861 } 3862 #endif 3863 3864 int 3865 vi_full_init(struct vi_info *vi) 3866 { 3867 struct adapter *sc = vi->pi->adapter; 3868 struct ifnet *ifp = vi->ifp; 3869 uint16_t *rss; 3870 struct sge_rxq *rxq; 3871 int rc, i, j, hashen; 3872 #ifdef RSS 3873 int nbuckets = rss_getnumbuckets(); 3874 int hashconfig = rss_gethashconfig(); 3875 int extra; 3876 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 3877 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 3878 #endif 3879 3880 ASSERT_SYNCHRONIZED_OP(sc); 3881 KASSERT((vi->flags & VI_INIT_DONE) == 0, 3882 ("%s: VI_INIT_DONE already", __func__)); 3883 3884 sysctl_ctx_init(&vi->ctx); 3885 vi->flags |= VI_SYSCTL_CTX; 3886 3887 /* 3888 * Allocate tx/rx/fl queues for this VI. 3889 */ 3890 rc = t4_setup_vi_queues(vi); 3891 if (rc != 0) 3892 goto done; /* error message displayed already */ 3893 3894 #ifdef DEV_NETMAP 3895 /* Netmap VIs configure RSS when netmap is enabled. */ 3896 if (vi->flags & VI_NETMAP) { 3897 vi->flags |= VI_INIT_DONE; 3898 return (0); 3899 } 3900 #endif 3901 3902 /* 3903 * Setup RSS for this VI. Save a copy of the RSS table for later use. 3904 */ 3905 if (vi->nrxq > vi->rss_size) { 3906 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 3907 "some queues will never receive traffic.\n", vi->nrxq, 3908 vi->rss_size); 3909 } else if (vi->rss_size % vi->nrxq) { 3910 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 3911 "expect uneven traffic distribution.\n", vi->nrxq, 3912 vi->rss_size); 3913 } 3914 #ifdef RSS 3915 MPASS(RSS_KEYSIZE == 40); 3916 if (vi->nrxq != nbuckets) { 3917 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 3918 "performance will be impacted.\n", vi->nrxq, nbuckets); 3919 } 3920 3921 rss_getkey((void *)&raw_rss_key[0]); 3922 for (i = 0; i < nitems(rss_key); i++) { 3923 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 3924 } 3925 t4_write_rss_key(sc, &rss_key[0], -1); 3926 #endif 3927 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 3928 for (i = 0; i < vi->rss_size;) { 3929 #ifdef RSS 3930 j = rss_get_indirection_to_bucket(i); 3931 j %= vi->nrxq; 3932 rxq = &sc->sge.rxq[vi->first_rxq + j]; 3933 rss[i++] = rxq->iq.abs_id; 3934 #else 3935 for_each_rxq(vi, j, rxq) { 3936 rss[i++] = rxq->iq.abs_id; 3937 if (i == vi->rss_size) 3938 break; 3939 } 3940 #endif 3941 } 3942 3943 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 3944 vi->rss_size); 3945 if (rc != 0) { 3946 if_printf(ifp, "rss_config failed: %d\n", rc); 3947 goto done; 3948 } 3949 3950 #ifdef RSS 3951 hashen = hashconfig_to_hashen(hashconfig); 3952 3953 /* 3954 * We may have had to enable some hashes even though the global config 3955 * wants them disabled. This is a potential problem that must be 3956 * reported to the user. 3957 */ 3958 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 3959 3960 /* 3961 * If we consider only the supported hash types, then the enabled hashes 3962 * are a superset of the requested hashes. In other words, there cannot 3963 * be any supported hash that was requested but not enabled, but there 3964 * can be hashes that were not requested but had to be enabled. 3965 */ 3966 extra &= SUPPORTED_RSS_HASHTYPES; 3967 MPASS((extra & hashconfig) == 0); 3968 3969 if (extra) { 3970 if_printf(ifp, 3971 "global RSS config (0x%x) cannot be accomodated.\n", 3972 hashconfig); 3973 } 3974 if (extra & RSS_HASHTYPE_RSS_IPV4) 3975 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 3976 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 3977 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 3978 if (extra & RSS_HASHTYPE_RSS_IPV6) 3979 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 3980 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 3981 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 3982 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 3983 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 3984 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 3985 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 3986 #else 3987 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 3988 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 3989 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 3990 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 3991 #endif 3992 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0]); 3993 if (rc != 0) { 3994 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 3995 goto done; 3996 } 3997 3998 vi->rss = rss; 3999 vi->flags |= VI_INIT_DONE; 4000 done: 4001 if (rc != 0) 4002 vi_full_uninit(vi); 4003 4004 return (rc); 4005 } 4006 4007 /* 4008 * Idempotent. 4009 */ 4010 int 4011 vi_full_uninit(struct vi_info *vi) 4012 { 4013 struct port_info *pi = vi->pi; 4014 struct adapter *sc = pi->adapter; 4015 int i; 4016 struct sge_rxq *rxq; 4017 struct sge_txq *txq; 4018 #ifdef TCP_OFFLOAD 4019 struct sge_ofld_rxq *ofld_rxq; 4020 struct sge_wrq *ofld_txq; 4021 #endif 4022 4023 if (vi->flags & VI_INIT_DONE) { 4024 4025 /* Need to quiesce queues. */ 4026 #ifdef DEV_NETMAP 4027 if (vi->flags & VI_NETMAP) 4028 goto skip; 4029 #endif 4030 4031 /* XXX: Only for the first VI? */ 4032 if (IS_MAIN_VI(vi)) 4033 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4034 4035 for_each_txq(vi, i, txq) { 4036 quiesce_txq(sc, txq); 4037 } 4038 4039 #ifdef TCP_OFFLOAD 4040 for_each_ofld_txq(vi, i, ofld_txq) { 4041 quiesce_wrq(sc, ofld_txq); 4042 } 4043 #endif 4044 4045 for_each_rxq(vi, i, rxq) { 4046 quiesce_iq(sc, &rxq->iq); 4047 quiesce_fl(sc, &rxq->fl); 4048 } 4049 4050 #ifdef TCP_OFFLOAD 4051 for_each_ofld_rxq(vi, i, ofld_rxq) { 4052 quiesce_iq(sc, &ofld_rxq->iq); 4053 quiesce_fl(sc, &ofld_rxq->fl); 4054 } 4055 #endif 4056 free(vi->rss, M_CXGBE); 4057 } 4058 #ifdef DEV_NETMAP 4059 skip: 4060 #endif 4061 4062 t4_teardown_vi_queues(vi); 4063 vi->flags &= ~VI_INIT_DONE; 4064 4065 return (0); 4066 } 4067 4068 static void 4069 quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4070 { 4071 struct sge_eq *eq = &txq->eq; 4072 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4073 4074 (void) sc; /* unused */ 4075 4076 #ifdef INVARIANTS 4077 TXQ_LOCK(txq); 4078 MPASS((eq->flags & EQ_ENABLED) == 0); 4079 TXQ_UNLOCK(txq); 4080 #endif 4081 4082 /* Wait for the mp_ring to empty. */ 4083 while (!mp_ring_is_idle(txq->r)) { 4084 mp_ring_check_drainage(txq->r, 0); 4085 pause("rquiesce", 1); 4086 } 4087 4088 /* Then wait for the hardware to finish. */ 4089 while (spg->cidx != htobe16(eq->pidx)) 4090 pause("equiesce", 1); 4091 4092 /* Finally, wait for the driver to reclaim all descriptors. */ 4093 while (eq->cidx != eq->pidx) 4094 pause("dquiesce", 1); 4095 } 4096 4097 static void 4098 quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4099 { 4100 4101 /* XXXTX */ 4102 } 4103 4104 static void 4105 quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4106 { 4107 (void) sc; /* unused */ 4108 4109 /* Synchronize with the interrupt handler */ 4110 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4111 pause("iqfree", 1); 4112 } 4113 4114 static void 4115 quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4116 { 4117 mtx_lock(&sc->sfl_lock); 4118 FL_LOCK(fl); 4119 fl->flags |= FL_DOOMED; 4120 FL_UNLOCK(fl); 4121 callout_stop(&sc->sfl_callout); 4122 mtx_unlock(&sc->sfl_lock); 4123 4124 KASSERT((fl->flags & FL_STARVING) == 0, 4125 ("%s: still starving", __func__)); 4126 } 4127 4128 static int 4129 t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4130 driver_intr_t *handler, void *arg, char *name) 4131 { 4132 int rc; 4133 4134 irq->rid = rid; 4135 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4136 RF_SHAREABLE | RF_ACTIVE); 4137 if (irq->res == NULL) { 4138 device_printf(sc->dev, 4139 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4140 return (ENOMEM); 4141 } 4142 4143 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4144 NULL, handler, arg, &irq->tag); 4145 if (rc != 0) { 4146 device_printf(sc->dev, 4147 "failed to setup interrupt for rid %d, name %s: %d\n", 4148 rid, name, rc); 4149 } else if (name) 4150 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 4151 4152 return (rc); 4153 } 4154 4155 static int 4156 t4_free_irq(struct adapter *sc, struct irq *irq) 4157 { 4158 if (irq->tag) 4159 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4160 if (irq->res) 4161 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4162 4163 bzero(irq, sizeof(*irq)); 4164 4165 return (0); 4166 } 4167 4168 static void 4169 get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4170 { 4171 4172 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4173 t4_get_regs(sc, buf, regs->len); 4174 } 4175 4176 #define A_PL_INDIR_CMD 0x1f8 4177 4178 #define S_PL_AUTOINC 31 4179 #define M_PL_AUTOINC 0x1U 4180 #define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4181 #define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4182 4183 #define S_PL_VFID 20 4184 #define M_PL_VFID 0xffU 4185 #define V_PL_VFID(x) ((x) << S_PL_VFID) 4186 #define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4187 4188 #define S_PL_ADDR 0 4189 #define M_PL_ADDR 0xfffffU 4190 #define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4191 #define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4192 4193 #define A_PL_INDIR_DATA 0x1fc 4194 4195 static uint64_t 4196 read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4197 { 4198 u32 stats[2]; 4199 4200 mtx_assert(&sc->reg_lock, MA_OWNED); 4201 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4202 V_PL_VFID(G_FW_VIID_VIN(viid)) | V_PL_ADDR(VF_MPS_REG(reg))); 4203 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4204 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4205 return (((uint64_t)stats[1]) << 32 | stats[0]); 4206 } 4207 4208 static void 4209 t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4210 struct fw_vi_stats_vf *stats) 4211 { 4212 4213 #define GET_STAT(name) \ 4214 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4215 4216 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4217 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4218 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4219 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4220 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4221 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4222 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4223 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4224 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4225 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4226 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4227 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4228 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4229 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4230 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4231 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4232 4233 #undef GET_STAT 4234 } 4235 4236 static void 4237 t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4238 { 4239 int reg; 4240 4241 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4242 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4243 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4244 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4245 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4246 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4247 } 4248 4249 static void 4250 vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4251 { 4252 struct timeval tv; 4253 const struct timeval interval = {0, 250000}; /* 250ms */ 4254 4255 if (!(vi->flags & VI_INIT_DONE)) 4256 return; 4257 4258 getmicrotime(&tv); 4259 timevalsub(&tv, &interval); 4260 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4261 return; 4262 4263 mtx_lock(&sc->reg_lock); 4264 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4265 getmicrotime(&vi->last_refreshed); 4266 mtx_unlock(&sc->reg_lock); 4267 } 4268 4269 static void 4270 cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4271 { 4272 int i; 4273 u_int v, tnl_cong_drops; 4274 struct timeval tv; 4275 const struct timeval interval = {0, 250000}; /* 250ms */ 4276 4277 getmicrotime(&tv); 4278 timevalsub(&tv, &interval); 4279 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4280 return; 4281 4282 tnl_cong_drops = 0; 4283 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4284 for (i = 0; i < sc->chip_params->nchan; i++) { 4285 if (pi->rx_chan_map & (1 << i)) { 4286 mtx_lock(&sc->reg_lock); 4287 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4288 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4289 mtx_unlock(&sc->reg_lock); 4290 tnl_cong_drops += v; 4291 } 4292 } 4293 pi->tnl_cong_drops = tnl_cong_drops; 4294 getmicrotime(&pi->last_refreshed); 4295 } 4296 4297 static void 4298 cxgbe_tick(void *arg) 4299 { 4300 struct port_info *pi = arg; 4301 struct adapter *sc = pi->adapter; 4302 4303 PORT_LOCK_ASSERT_OWNED(pi); 4304 cxgbe_refresh_stats(sc, pi); 4305 4306 callout_schedule(&pi->tick, hz); 4307 } 4308 4309 void 4310 vi_tick(void *arg) 4311 { 4312 struct vi_info *vi = arg; 4313 struct adapter *sc = vi->pi->adapter; 4314 4315 vi_refresh_stats(sc, vi); 4316 4317 callout_schedule(&vi->tick, hz); 4318 } 4319 4320 static void 4321 cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4322 { 4323 struct ifnet *vlan; 4324 4325 if (arg != ifp || ifp->if_type != IFT_ETHER) 4326 return; 4327 4328 vlan = VLAN_DEVAT(ifp, vid); 4329 VLAN_SETCOOKIE(vlan, ifp); 4330 } 4331 4332 static int 4333 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4334 { 4335 4336 #ifdef INVARIANTS 4337 panic("%s: opcode 0x%02x on iq %p with payload %p", 4338 __func__, rss->opcode, iq, m); 4339 #else 4340 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n", 4341 __func__, rss->opcode, iq, m); 4342 m_freem(m); 4343 #endif 4344 return (EDOOFUS); 4345 } 4346 4347 int 4348 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 4349 { 4350 uintptr_t *loc, new; 4351 4352 if (opcode >= nitems(sc->cpl_handler)) 4353 return (EINVAL); 4354 4355 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 4356 loc = (uintptr_t *) &sc->cpl_handler[opcode]; 4357 atomic_store_rel_ptr(loc, new); 4358 4359 return (0); 4360 } 4361 4362 static int 4363 an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl) 4364 { 4365 4366 #ifdef INVARIANTS 4367 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl); 4368 #else 4369 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n", 4370 __func__, iq, ctrl); 4371 #endif 4372 return (EDOOFUS); 4373 } 4374 4375 int 4376 t4_register_an_handler(struct adapter *sc, an_handler_t h) 4377 { 4378 uintptr_t *loc, new; 4379 4380 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled; 4381 loc = (uintptr_t *) &sc->an_handler; 4382 atomic_store_rel_ptr(loc, new); 4383 4384 return (0); 4385 } 4386 4387 static int 4388 fw_msg_not_handled(struct adapter *sc, const __be64 *rpl) 4389 { 4390 const struct cpl_fw6_msg *cpl = 4391 __containerof(rpl, struct cpl_fw6_msg, data[0]); 4392 4393 #ifdef INVARIANTS 4394 panic("%s: fw_msg type %d", __func__, cpl->type); 4395 #else 4396 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type); 4397 #endif 4398 return (EDOOFUS); 4399 } 4400 4401 int 4402 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 4403 { 4404 uintptr_t *loc, new; 4405 4406 if (type >= nitems(sc->fw_msg_handler)) 4407 return (EINVAL); 4408 4409 /* 4410 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 4411 * handler dispatch table. Reject any attempt to install a handler for 4412 * this subtype. 4413 */ 4414 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) 4415 return (EINVAL); 4416 4417 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled; 4418 loc = (uintptr_t *) &sc->fw_msg_handler[type]; 4419 atomic_store_rel_ptr(loc, new); 4420 4421 return (0); 4422 } 4423 4424 static void 4425 t4_sysctls(struct adapter *sc) 4426 { 4427 struct sysctl_ctx_list *ctx; 4428 struct sysctl_oid *oid; 4429 struct sysctl_oid_list *children, *c0; 4430 static char *caps[] = { 4431 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */ 4432 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL" /* caps[1] niccaps */ 4433 "\6HASHFILTER\7ETHOFLD", 4434 "\20\1TOE", /* caps[2] toecaps */ 4435 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */ 4436 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */ 4437 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD" 4438 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD", 4439 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */ 4440 "\4PO_INITIAOR\5PO_TARGET" 4441 }; 4442 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4443 4444 ctx = device_get_sysctl_ctx(sc->dev); 4445 4446 /* 4447 * dev.t4nex.X. 4448 */ 4449 oid = device_get_sysctl_tree(sc->dev); 4450 c0 = children = SYSCTL_CHILDREN(oid); 4451 4452 sc->sc_do_rxcopy = 1; 4453 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4454 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4455 4456 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4457 sc->params.nports, "# of ports"); 4458 4459 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4460 NULL, chip_rev(sc), "chip hardware revision"); 4461 4462 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4463 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4464 4465 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4466 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4467 4468 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4469 sc->cfcsum, "config file checksum"); 4470 4471 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4472 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4473 sysctl_bitfield, "A", "available doorbells"); 4474 4475 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps", 4476 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps, 4477 sysctl_bitfield, "A", "available link capabilities"); 4478 4479 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps", 4480 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps, 4481 sysctl_bitfield, "A", "available NIC capabilities"); 4482 4483 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps", 4484 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps, 4485 sysctl_bitfield, "A", "available TCP offload capabilities"); 4486 4487 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps", 4488 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps, 4489 sysctl_bitfield, "A", "available RDMA capabilities"); 4490 4491 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps", 4492 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps, 4493 sysctl_bitfield, "A", "available iSCSI capabilities"); 4494 4495 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps", 4496 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps, 4497 sysctl_bitfield, "A", "available FCoE capabilities"); 4498 4499 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4500 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4501 4502 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4503 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 4504 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 4505 "interrupt holdoff timer values (us)"); 4506 4507 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4508 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 4509 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 4510 "interrupt holdoff packet counter values"); 4511 4512 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4513 NULL, sc->tids.nftids, "number of filters"); 4514 4515 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4516 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4517 "chip temperature (in Celsius)"); 4518 4519 t4_sge_sysctls(sc, ctx, children); 4520 4521 sc->lro_timeout = 100; 4522 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4523 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4524 4525 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_flags", CTLFLAG_RW, 4526 &sc->debug_flags, 0, "flags to enable runtime debugging"); 4527 4528 #ifdef SBUF_DRAIN 4529 /* 4530 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4531 */ 4532 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4533 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4534 "logs and miscellaneous information"); 4535 children = SYSCTL_CHILDREN(oid); 4536 4537 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4538 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4539 sysctl_cctrl, "A", "congestion control"); 4540 4541 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4542 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4543 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4544 4545 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4546 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4547 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4548 4549 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4550 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4551 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4552 4553 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4554 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4555 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4556 4557 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4558 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4559 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4560 4561 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4562 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4563 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4564 4565 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4566 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4567 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 4568 "A", "CIM logic analyzer"); 4569 4570 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4571 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4572 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4573 4574 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4575 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4576 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4577 4578 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4579 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4580 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4581 4582 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4583 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4584 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4585 4586 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4587 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4588 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4589 4590 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4591 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4592 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4593 4594 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4595 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4596 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4597 4598 if (chip_id(sc) > CHELSIO_T4) { 4599 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4600 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4601 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4602 4603 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4604 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4605 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4606 } 4607 4608 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4609 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4610 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4611 4612 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4613 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4614 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4615 4616 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4617 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4618 sysctl_cpl_stats, "A", "CPL statistics"); 4619 4620 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4621 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4622 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4623 4624 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4625 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4626 sysctl_devlog, "A", "firmware's device log"); 4627 4628 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4629 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4630 sysctl_fcoe_stats, "A", "FCoE statistics"); 4631 4632 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4633 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4634 sysctl_hw_sched, "A", "hardware scheduler "); 4635 4636 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4637 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4638 sysctl_l2t, "A", "hardware L2 table"); 4639 4640 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4641 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4642 sysctl_lb_stats, "A", "loopback statistics"); 4643 4644 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4645 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4646 sysctl_meminfo, "A", "memory regions"); 4647 4648 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4649 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4650 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 4651 "A", "MPS TCAM entries"); 4652 4653 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4654 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4655 sysctl_path_mtus, "A", "path MTUs"); 4656 4657 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4658 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4659 sysctl_pm_stats, "A", "PM statistics"); 4660 4661 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4662 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4663 sysctl_rdma_stats, "A", "RDMA statistics"); 4664 4665 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4666 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4667 sysctl_tcp_stats, "A", "TCP statistics"); 4668 4669 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4670 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4671 sysctl_tids, "A", "TID information"); 4672 4673 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4674 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4675 sysctl_tp_err_stats, "A", "TP error statistics"); 4676 4677 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4678 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4679 sysctl_tp_la, "A", "TP logic analyzer"); 4680 4681 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4682 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4683 sysctl_tx_rate, "A", "Tx rate"); 4684 4685 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4686 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4687 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4688 4689 if (is_t5(sc)) { 4690 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4691 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4692 sysctl_wcwr_stats, "A", "write combined work requests"); 4693 } 4694 #endif 4695 4696 #ifdef TCP_OFFLOAD 4697 if (is_offload(sc)) { 4698 /* 4699 * dev.t4nex.X.toe. 4700 */ 4701 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4702 NULL, "TOE parameters"); 4703 children = SYSCTL_CHILDREN(oid); 4704 4705 sc->tt.sndbuf = 256 * 1024; 4706 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4707 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4708 4709 sc->tt.ddp = 0; 4710 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4711 &sc->tt.ddp, 0, "DDP allowed"); 4712 4713 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 4714 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 4715 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 4716 4717 sc->tt.ddp_thres = 4718 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 4719 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 4720 &sc->tt.ddp_thres, 0, "DDP threshold"); 4721 4722 sc->tt.rx_coalesce = 1; 4723 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 4724 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 4725 4726 sc->tt.tx_align = 1; 4727 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 4728 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 4729 } 4730 #endif 4731 } 4732 4733 void 4734 vi_sysctls(struct vi_info *vi) 4735 { 4736 struct sysctl_ctx_list *ctx; 4737 struct sysctl_oid *oid; 4738 struct sysctl_oid_list *children; 4739 4740 ctx = device_get_sysctl_ctx(vi->dev); 4741 4742 /* 4743 * dev.[nv](cxgbe|cxl).X. 4744 */ 4745 oid = device_get_sysctl_tree(vi->dev); 4746 children = SYSCTL_CHILDREN(oid); 4747 4748 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 4749 vi->viid, "VI identifer"); 4750 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 4751 &vi->nrxq, 0, "# of rx queues"); 4752 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 4753 &vi->ntxq, 0, "# of tx queues"); 4754 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 4755 &vi->first_rxq, 0, "index of first rx queue"); 4756 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 4757 &vi->first_txq, 0, "index of first tx queue"); 4758 4759 if (vi->flags & VI_NETMAP) 4760 return; 4761 4762 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT | 4763 CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 4764 "Reserve queue 0 for non-flowid packets"); 4765 4766 #ifdef TCP_OFFLOAD 4767 if (vi->nofldrxq != 0) { 4768 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 4769 &vi->nofldrxq, 0, 4770 "# of rx queues for offloaded TCP connections"); 4771 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 4772 &vi->nofldtxq, 0, 4773 "# of tx queues for offloaded TCP connections"); 4774 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 4775 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 4776 "index of first TOE rx queue"); 4777 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 4778 CTLFLAG_RD, &vi->first_ofld_txq, 0, 4779 "index of first TOE tx queue"); 4780 } 4781 #endif 4782 4783 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 4784 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 4785 "holdoff timer index"); 4786 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 4787 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 4788 "holdoff packet counter index"); 4789 4790 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 4791 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 4792 "rx queue size"); 4793 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 4794 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 4795 "tx queue size"); 4796 } 4797 4798 static void 4799 cxgbe_sysctls(struct port_info *pi) 4800 { 4801 struct sysctl_ctx_list *ctx; 4802 struct sysctl_oid *oid; 4803 struct sysctl_oid_list *children; 4804 struct adapter *sc = pi->adapter; 4805 4806 ctx = device_get_sysctl_ctx(pi->dev); 4807 4808 /* 4809 * dev.cxgbe.X. 4810 */ 4811 oid = device_get_sysctl_tree(pi->dev); 4812 children = SYSCTL_CHILDREN(oid); 4813 4814 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 4815 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 4816 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 4817 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 4818 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 4819 "PHY temperature (in Celsius)"); 4820 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 4821 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 4822 "PHY firmware version"); 4823 } 4824 4825 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 4826 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings, 4827 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 4828 4829 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 4830 port_top_speed(pi), "max speed (in Gbps)"); 4831 4832 /* 4833 * dev.cxgbe.X.stats. 4834 */ 4835 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4836 NULL, "port statistics"); 4837 children = SYSCTL_CHILDREN(oid); 4838 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 4839 &pi->tx_parse_error, 0, 4840 "# of tx packets with invalid length or # of segments"); 4841 4842 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 4843 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 4844 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 4845 sysctl_handle_t4_reg64, "QU", desc) 4846 4847 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 4848 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 4849 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 4850 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 4851 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 4852 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 4853 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 4854 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 4855 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 4856 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 4857 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 4858 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 4859 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 4860 "# of tx frames in this range", 4861 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 4862 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 4863 "# of tx frames in this range", 4864 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 4865 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 4866 "# of tx frames in this range", 4867 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 4868 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 4869 "# of tx frames in this range", 4870 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 4871 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 4872 "# of tx frames in this range", 4873 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 4874 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 4875 "# of tx frames in this range", 4876 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 4877 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 4878 "# of tx frames in this range", 4879 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 4880 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 4881 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 4882 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 4883 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 4884 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 4885 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 4886 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 4887 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 4888 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 4889 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 4890 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 4891 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 4892 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 4893 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 4894 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 4895 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 4896 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 4897 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 4898 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 4899 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 4900 4901 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 4902 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 4903 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 4904 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 4905 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 4906 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 4907 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 4908 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 4909 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 4910 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 4911 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 4912 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 4913 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 4914 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 4915 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 4916 "# of frames received with bad FCS", 4917 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 4918 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 4919 "# of frames received with length error", 4920 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 4921 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 4922 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 4923 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 4924 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 4925 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 4926 "# of rx frames in this range", 4927 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 4928 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 4929 "# of rx frames in this range", 4930 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 4931 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 4932 "# of rx frames in this range", 4933 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 4934 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 4935 "# of rx frames in this range", 4936 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 4937 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 4938 "# of rx frames in this range", 4939 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 4940 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 4941 "# of rx frames in this range", 4942 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 4943 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 4944 "# of rx frames in this range", 4945 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 4946 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 4947 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 4948 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 4949 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 4950 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 4951 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 4952 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 4953 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 4954 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 4955 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 4956 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 4957 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 4958 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 4959 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 4960 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 4961 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 4962 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 4963 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 4964 4965 #undef SYSCTL_ADD_T4_REG64 4966 4967 #define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 4968 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 4969 &pi->stats.name, desc) 4970 4971 /* We get these from port_stats and they may be stale by upto 1s */ 4972 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 4973 "# drops due to buffer-group 0 overflows"); 4974 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 4975 "# drops due to buffer-group 1 overflows"); 4976 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 4977 "# drops due to buffer-group 2 overflows"); 4978 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 4979 "# drops due to buffer-group 3 overflows"); 4980 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 4981 "# of buffer-group 0 truncated packets"); 4982 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 4983 "# of buffer-group 1 truncated packets"); 4984 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 4985 "# of buffer-group 2 truncated packets"); 4986 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 4987 "# of buffer-group 3 truncated packets"); 4988 4989 #undef SYSCTL_ADD_T4_PORTSTAT 4990 } 4991 4992 static int 4993 sysctl_int_array(SYSCTL_HANDLER_ARGS) 4994 { 4995 int rc, *i, space = 0; 4996 struct sbuf sb; 4997 4998 sbuf_new_for_sysctl(&sb, NULL, 64, req); 4999 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5000 if (space) 5001 sbuf_printf(&sb, " "); 5002 sbuf_printf(&sb, "%d", *i); 5003 space = 1; 5004 } 5005 rc = sbuf_finish(&sb); 5006 sbuf_delete(&sb); 5007 return (rc); 5008 } 5009 5010 static int 5011 sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5012 { 5013 int rc; 5014 struct sbuf *sb; 5015 5016 rc = sysctl_wire_old_buffer(req, 0); 5017 if (rc != 0) 5018 return(rc); 5019 5020 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5021 if (sb == NULL) 5022 return (ENOMEM); 5023 5024 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5025 rc = sbuf_finish(sb); 5026 sbuf_delete(sb); 5027 5028 return (rc); 5029 } 5030 5031 static int 5032 sysctl_btphy(SYSCTL_HANDLER_ARGS) 5033 { 5034 struct port_info *pi = arg1; 5035 int op = arg2; 5036 struct adapter *sc = pi->adapter; 5037 u_int v; 5038 int rc; 5039 5040 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5041 if (rc) 5042 return (rc); 5043 /* XXX: magic numbers */ 5044 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5045 &v); 5046 end_synchronized_op(sc, 0); 5047 if (rc) 5048 return (rc); 5049 if (op == 0) 5050 v /= 256; 5051 5052 rc = sysctl_handle_int(oidp, &v, 0, req); 5053 return (rc); 5054 } 5055 5056 static int 5057 sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5058 { 5059 struct vi_info *vi = arg1; 5060 int rc, val; 5061 5062 val = vi->rsrv_noflowq; 5063 rc = sysctl_handle_int(oidp, &val, 0, req); 5064 if (rc != 0 || req->newptr == NULL) 5065 return (rc); 5066 5067 if ((val >= 1) && (vi->ntxq > 1)) 5068 vi->rsrv_noflowq = 1; 5069 else 5070 vi->rsrv_noflowq = 0; 5071 5072 return (rc); 5073 } 5074 5075 static int 5076 sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5077 { 5078 struct vi_info *vi = arg1; 5079 struct adapter *sc = vi->pi->adapter; 5080 int idx, rc, i; 5081 struct sge_rxq *rxq; 5082 #ifdef TCP_OFFLOAD 5083 struct sge_ofld_rxq *ofld_rxq; 5084 #endif 5085 uint8_t v; 5086 5087 idx = vi->tmr_idx; 5088 5089 rc = sysctl_handle_int(oidp, &idx, 0, req); 5090 if (rc != 0 || req->newptr == NULL) 5091 return (rc); 5092 5093 if (idx < 0 || idx >= SGE_NTIMERS) 5094 return (EINVAL); 5095 5096 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5097 "t4tmr"); 5098 if (rc) 5099 return (rc); 5100 5101 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5102 for_each_rxq(vi, i, rxq) { 5103 #ifdef atomic_store_rel_8 5104 atomic_store_rel_8(&rxq->iq.intr_params, v); 5105 #else 5106 rxq->iq.intr_params = v; 5107 #endif 5108 } 5109 #ifdef TCP_OFFLOAD 5110 for_each_ofld_rxq(vi, i, ofld_rxq) { 5111 #ifdef atomic_store_rel_8 5112 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5113 #else 5114 ofld_rxq->iq.intr_params = v; 5115 #endif 5116 } 5117 #endif 5118 vi->tmr_idx = idx; 5119 5120 end_synchronized_op(sc, LOCK_HELD); 5121 return (0); 5122 } 5123 5124 static int 5125 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5126 { 5127 struct vi_info *vi = arg1; 5128 struct adapter *sc = vi->pi->adapter; 5129 int idx, rc; 5130 5131 idx = vi->pktc_idx; 5132 5133 rc = sysctl_handle_int(oidp, &idx, 0, req); 5134 if (rc != 0 || req->newptr == NULL) 5135 return (rc); 5136 5137 if (idx < -1 || idx >= SGE_NCOUNTERS) 5138 return (EINVAL); 5139 5140 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5141 "t4pktc"); 5142 if (rc) 5143 return (rc); 5144 5145 if (vi->flags & VI_INIT_DONE) 5146 rc = EBUSY; /* cannot be changed once the queues are created */ 5147 else 5148 vi->pktc_idx = idx; 5149 5150 end_synchronized_op(sc, LOCK_HELD); 5151 return (rc); 5152 } 5153 5154 static int 5155 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5156 { 5157 struct vi_info *vi = arg1; 5158 struct adapter *sc = vi->pi->adapter; 5159 int qsize, rc; 5160 5161 qsize = vi->qsize_rxq; 5162 5163 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5164 if (rc != 0 || req->newptr == NULL) 5165 return (rc); 5166 5167 if (qsize < 128 || (qsize & 7)) 5168 return (EINVAL); 5169 5170 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5171 "t4rxqs"); 5172 if (rc) 5173 return (rc); 5174 5175 if (vi->flags & VI_INIT_DONE) 5176 rc = EBUSY; /* cannot be changed once the queues are created */ 5177 else 5178 vi->qsize_rxq = qsize; 5179 5180 end_synchronized_op(sc, LOCK_HELD); 5181 return (rc); 5182 } 5183 5184 static int 5185 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5186 { 5187 struct vi_info *vi = arg1; 5188 struct adapter *sc = vi->pi->adapter; 5189 int qsize, rc; 5190 5191 qsize = vi->qsize_txq; 5192 5193 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5194 if (rc != 0 || req->newptr == NULL) 5195 return (rc); 5196 5197 if (qsize < 128 || qsize > 65536) 5198 return (EINVAL); 5199 5200 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5201 "t4txqs"); 5202 if (rc) 5203 return (rc); 5204 5205 if (vi->flags & VI_INIT_DONE) 5206 rc = EBUSY; /* cannot be changed once the queues are created */ 5207 else 5208 vi->qsize_txq = qsize; 5209 5210 end_synchronized_op(sc, LOCK_HELD); 5211 return (rc); 5212 } 5213 5214 static int 5215 sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5216 { 5217 struct port_info *pi = arg1; 5218 struct adapter *sc = pi->adapter; 5219 struct link_config *lc = &pi->link_cfg; 5220 int rc; 5221 5222 if (req->newptr == NULL) { 5223 struct sbuf *sb; 5224 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5225 5226 rc = sysctl_wire_old_buffer(req, 0); 5227 if (rc != 0) 5228 return(rc); 5229 5230 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5231 if (sb == NULL) 5232 return (ENOMEM); 5233 5234 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5235 rc = sbuf_finish(sb); 5236 sbuf_delete(sb); 5237 } else { 5238 char s[2]; 5239 int n; 5240 5241 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5242 s[1] = 0; 5243 5244 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5245 if (rc != 0) 5246 return(rc); 5247 5248 if (s[1] != 0) 5249 return (EINVAL); 5250 if (s[0] < '0' || s[0] > '9') 5251 return (EINVAL); /* not a number */ 5252 n = s[0] - '0'; 5253 if (n & ~(PAUSE_TX | PAUSE_RX)) 5254 return (EINVAL); /* some other bit is set too */ 5255 5256 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5257 "t4PAUSE"); 5258 if (rc) 5259 return (rc); 5260 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5261 int link_ok = lc->link_ok; 5262 5263 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5264 lc->requested_fc |= n; 5265 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5266 lc->link_ok = link_ok; /* restore */ 5267 } 5268 end_synchronized_op(sc, 0); 5269 } 5270 5271 return (rc); 5272 } 5273 5274 static int 5275 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5276 { 5277 struct adapter *sc = arg1; 5278 int reg = arg2; 5279 uint64_t val; 5280 5281 val = t4_read_reg64(sc, reg); 5282 5283 return (sysctl_handle_64(oidp, &val, 0, req)); 5284 } 5285 5286 static int 5287 sysctl_temperature(SYSCTL_HANDLER_ARGS) 5288 { 5289 struct adapter *sc = arg1; 5290 int rc, t; 5291 uint32_t param, val; 5292 5293 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5294 if (rc) 5295 return (rc); 5296 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5297 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5298 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5299 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5300 end_synchronized_op(sc, 0); 5301 if (rc) 5302 return (rc); 5303 5304 /* unknown is returned as 0 but we display -1 in that case */ 5305 t = val == 0 ? -1 : val; 5306 5307 rc = sysctl_handle_int(oidp, &t, 0, req); 5308 return (rc); 5309 } 5310 5311 #ifdef SBUF_DRAIN 5312 static int 5313 sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5314 { 5315 struct adapter *sc = arg1; 5316 struct sbuf *sb; 5317 int rc, i; 5318 uint16_t incr[NMTUS][NCCTRL_WIN]; 5319 static const char *dec_fac[] = { 5320 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5321 "0.9375" 5322 }; 5323 5324 rc = sysctl_wire_old_buffer(req, 0); 5325 if (rc != 0) 5326 return (rc); 5327 5328 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5329 if (sb == NULL) 5330 return (ENOMEM); 5331 5332 t4_read_cong_tbl(sc, incr); 5333 5334 for (i = 0; i < NCCTRL_WIN; ++i) { 5335 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5336 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5337 incr[5][i], incr[6][i], incr[7][i]); 5338 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5339 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5340 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5341 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5342 } 5343 5344 rc = sbuf_finish(sb); 5345 sbuf_delete(sb); 5346 5347 return (rc); 5348 } 5349 5350 static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5351 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5352 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5353 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5354 }; 5355 5356 static int 5357 sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5358 { 5359 struct adapter *sc = arg1; 5360 struct sbuf *sb; 5361 int rc, i, n, qid = arg2; 5362 uint32_t *buf, *p; 5363 char *qtype; 5364 u_int cim_num_obq = sc->chip_params->cim_num_obq; 5365 5366 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5367 ("%s: bad qid %d\n", __func__, qid)); 5368 5369 if (qid < CIM_NUM_IBQ) { 5370 /* inbound queue */ 5371 qtype = "IBQ"; 5372 n = 4 * CIM_IBQ_SIZE; 5373 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5374 rc = t4_read_cim_ibq(sc, qid, buf, n); 5375 } else { 5376 /* outbound queue */ 5377 qtype = "OBQ"; 5378 qid -= CIM_NUM_IBQ; 5379 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5380 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5381 rc = t4_read_cim_obq(sc, qid, buf, n); 5382 } 5383 5384 if (rc < 0) { 5385 rc = -rc; 5386 goto done; 5387 } 5388 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5389 5390 rc = sysctl_wire_old_buffer(req, 0); 5391 if (rc != 0) 5392 goto done; 5393 5394 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5395 if (sb == NULL) { 5396 rc = ENOMEM; 5397 goto done; 5398 } 5399 5400 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5401 for (i = 0, p = buf; i < n; i += 16, p += 4) 5402 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5403 p[2], p[3]); 5404 5405 rc = sbuf_finish(sb); 5406 sbuf_delete(sb); 5407 done: 5408 free(buf, M_CXGBE); 5409 return (rc); 5410 } 5411 5412 static int 5413 sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5414 { 5415 struct adapter *sc = arg1; 5416 u_int cfg; 5417 struct sbuf *sb; 5418 uint32_t *buf, *p; 5419 int rc; 5420 5421 MPASS(chip_id(sc) <= CHELSIO_T5); 5422 5423 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5424 if (rc != 0) 5425 return (rc); 5426 5427 rc = sysctl_wire_old_buffer(req, 0); 5428 if (rc != 0) 5429 return (rc); 5430 5431 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5432 if (sb == NULL) 5433 return (ENOMEM); 5434 5435 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5436 M_ZERO | M_WAITOK); 5437 5438 rc = -t4_cim_read_la(sc, buf, NULL); 5439 if (rc != 0) 5440 goto done; 5441 5442 sbuf_printf(sb, "Status Data PC%s", 5443 cfg & F_UPDBGLACAPTPCONLY ? "" : 5444 " LS0Stat LS0Addr LS0Data"); 5445 5446 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 5447 if (cfg & F_UPDBGLACAPTPCONLY) { 5448 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5449 p[6], p[7]); 5450 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5451 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5452 p[4] & 0xff, p[5] >> 8); 5453 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5454 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5455 p[1] & 0xf, p[2] >> 4); 5456 } else { 5457 sbuf_printf(sb, 5458 "\n %02x %x%07x %x%07x %08x %08x " 5459 "%08x%08x%08x%08x", 5460 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5461 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5462 p[6], p[7]); 5463 } 5464 } 5465 5466 rc = sbuf_finish(sb); 5467 sbuf_delete(sb); 5468 done: 5469 free(buf, M_CXGBE); 5470 return (rc); 5471 } 5472 5473 static int 5474 sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 5475 { 5476 struct adapter *sc = arg1; 5477 u_int cfg; 5478 struct sbuf *sb; 5479 uint32_t *buf, *p; 5480 int rc; 5481 5482 MPASS(chip_id(sc) > CHELSIO_T5); 5483 5484 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5485 if (rc != 0) 5486 return (rc); 5487 5488 rc = sysctl_wire_old_buffer(req, 0); 5489 if (rc != 0) 5490 return (rc); 5491 5492 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5493 if (sb == NULL) 5494 return (ENOMEM); 5495 5496 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5497 M_ZERO | M_WAITOK); 5498 5499 rc = -t4_cim_read_la(sc, buf, NULL); 5500 if (rc != 0) 5501 goto done; 5502 5503 sbuf_printf(sb, "Status Inst Data PC%s", 5504 cfg & F_UPDBGLACAPTPCONLY ? "" : 5505 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 5506 5507 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 5508 if (cfg & F_UPDBGLACAPTPCONLY) { 5509 sbuf_printf(sb, "\n %02x %08x %08x %08x", 5510 p[3] & 0xff, p[2], p[1], p[0]); 5511 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 5512 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 5513 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 5514 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 5515 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 5516 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 5517 p[6] >> 16); 5518 } else { 5519 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 5520 "%08x %08x %08x %08x %08x %08x", 5521 (p[9] >> 16) & 0xff, 5522 p[9] & 0xffff, p[8] >> 16, 5523 p[8] & 0xffff, p[7] >> 16, 5524 p[7] & 0xffff, p[6] >> 16, 5525 p[2], p[1], p[0], p[5], p[4], p[3]); 5526 } 5527 } 5528 5529 rc = sbuf_finish(sb); 5530 sbuf_delete(sb); 5531 done: 5532 free(buf, M_CXGBE); 5533 return (rc); 5534 } 5535 5536 static int 5537 sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 5538 { 5539 struct adapter *sc = arg1; 5540 u_int i; 5541 struct sbuf *sb; 5542 uint32_t *buf, *p; 5543 int rc; 5544 5545 rc = sysctl_wire_old_buffer(req, 0); 5546 if (rc != 0) 5547 return (rc); 5548 5549 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5550 if (sb == NULL) 5551 return (ENOMEM); 5552 5553 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 5554 M_ZERO | M_WAITOK); 5555 5556 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 5557 p = buf; 5558 5559 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5560 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 5561 p[1], p[0]); 5562 } 5563 5564 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 5565 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5566 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 5567 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 5568 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 5569 (p[1] >> 2) | ((p[2] & 3) << 30), 5570 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 5571 p[0] & 1); 5572 } 5573 5574 rc = sbuf_finish(sb); 5575 sbuf_delete(sb); 5576 free(buf, M_CXGBE); 5577 return (rc); 5578 } 5579 5580 static int 5581 sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 5582 { 5583 struct adapter *sc = arg1; 5584 u_int i; 5585 struct sbuf *sb; 5586 uint32_t *buf, *p; 5587 int rc; 5588 5589 rc = sysctl_wire_old_buffer(req, 0); 5590 if (rc != 0) 5591 return (rc); 5592 5593 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5594 if (sb == NULL) 5595 return (ENOMEM); 5596 5597 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 5598 M_ZERO | M_WAITOK); 5599 5600 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 5601 p = buf; 5602 5603 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 5604 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5605 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 5606 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 5607 p[4], p[3], p[2], p[1], p[0]); 5608 } 5609 5610 sbuf_printf(sb, "\n\nCntl ID Data"); 5611 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5612 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 5613 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 5614 } 5615 5616 rc = sbuf_finish(sb); 5617 sbuf_delete(sb); 5618 free(buf, M_CXGBE); 5619 return (rc); 5620 } 5621 5622 static int 5623 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 5624 { 5625 struct adapter *sc = arg1; 5626 struct sbuf *sb; 5627 int rc, i; 5628 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5629 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5630 uint16_t thres[CIM_NUM_IBQ]; 5631 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 5632 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 5633 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 5634 5635 cim_num_obq = sc->chip_params->cim_num_obq; 5636 if (is_t4(sc)) { 5637 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 5638 obq_rdaddr = A_UP_OBQ_0_REALADDR; 5639 } else { 5640 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 5641 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 5642 } 5643 nq = CIM_NUM_IBQ + cim_num_obq; 5644 5645 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 5646 if (rc == 0) 5647 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 5648 if (rc != 0) 5649 return (rc); 5650 5651 t4_read_cimq_cfg(sc, base, size, thres); 5652 5653 rc = sysctl_wire_old_buffer(req, 0); 5654 if (rc != 0) 5655 return (rc); 5656 5657 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5658 if (sb == NULL) 5659 return (ENOMEM); 5660 5661 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 5662 5663 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 5664 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 5665 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 5666 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5667 G_QUEREMFLITS(p[2]) * 16); 5668 for ( ; i < nq; i++, p += 4, wr += 2) 5669 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 5670 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 5671 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5672 G_QUEREMFLITS(p[2]) * 16); 5673 5674 rc = sbuf_finish(sb); 5675 sbuf_delete(sb); 5676 5677 return (rc); 5678 } 5679 5680 static int 5681 sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 5682 { 5683 struct adapter *sc = arg1; 5684 struct sbuf *sb; 5685 int rc; 5686 struct tp_cpl_stats stats; 5687 5688 rc = sysctl_wire_old_buffer(req, 0); 5689 if (rc != 0) 5690 return (rc); 5691 5692 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5693 if (sb == NULL) 5694 return (ENOMEM); 5695 5696 mtx_lock(&sc->reg_lock); 5697 t4_tp_get_cpl_stats(sc, &stats); 5698 mtx_unlock(&sc->reg_lock); 5699 5700 if (sc->chip_params->nchan > 2) { 5701 sbuf_printf(sb, " channel 0 channel 1" 5702 " channel 2 channel 3"); 5703 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 5704 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 5705 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 5706 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 5707 } else { 5708 sbuf_printf(sb, " channel 0 channel 1"); 5709 sbuf_printf(sb, "\nCPL requests: %10u %10u", 5710 stats.req[0], stats.req[1]); 5711 sbuf_printf(sb, "\nCPL responses: %10u %10u", 5712 stats.rsp[0], stats.rsp[1]); 5713 } 5714 5715 rc = sbuf_finish(sb); 5716 sbuf_delete(sb); 5717 5718 return (rc); 5719 } 5720 5721 static int 5722 sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 5723 { 5724 struct adapter *sc = arg1; 5725 struct sbuf *sb; 5726 int rc; 5727 struct tp_usm_stats stats; 5728 5729 rc = sysctl_wire_old_buffer(req, 0); 5730 if (rc != 0) 5731 return(rc); 5732 5733 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5734 if (sb == NULL) 5735 return (ENOMEM); 5736 5737 t4_get_usm_stats(sc, &stats); 5738 5739 sbuf_printf(sb, "Frames: %u\n", stats.frames); 5740 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 5741 sbuf_printf(sb, "Drops: %u", stats.drops); 5742 5743 rc = sbuf_finish(sb); 5744 sbuf_delete(sb); 5745 5746 return (rc); 5747 } 5748 5749 const char *devlog_level_strings[] = { 5750 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 5751 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 5752 [FW_DEVLOG_LEVEL_ERR] = "ERR", 5753 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 5754 [FW_DEVLOG_LEVEL_INFO] = "INFO", 5755 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 5756 }; 5757 5758 const char *devlog_facility_strings[] = { 5759 [FW_DEVLOG_FACILITY_CORE] = "CORE", 5760 [FW_DEVLOG_FACILITY_CF] = "CF", 5761 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 5762 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 5763 [FW_DEVLOG_FACILITY_RES] = "RES", 5764 [FW_DEVLOG_FACILITY_HW] = "HW", 5765 [FW_DEVLOG_FACILITY_FLR] = "FLR", 5766 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 5767 [FW_DEVLOG_FACILITY_PHY] = "PHY", 5768 [FW_DEVLOG_FACILITY_MAC] = "MAC", 5769 [FW_DEVLOG_FACILITY_PORT] = "PORT", 5770 [FW_DEVLOG_FACILITY_VI] = "VI", 5771 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 5772 [FW_DEVLOG_FACILITY_ACL] = "ACL", 5773 [FW_DEVLOG_FACILITY_TM] = "TM", 5774 [FW_DEVLOG_FACILITY_QFC] = "QFC", 5775 [FW_DEVLOG_FACILITY_DCB] = "DCB", 5776 [FW_DEVLOG_FACILITY_ETH] = "ETH", 5777 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 5778 [FW_DEVLOG_FACILITY_RI] = "RI", 5779 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 5780 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 5781 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 5782 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE" 5783 }; 5784 5785 static int 5786 sysctl_devlog(SYSCTL_HANDLER_ARGS) 5787 { 5788 struct adapter *sc = arg1; 5789 struct devlog_params *dparams = &sc->params.devlog; 5790 struct fw_devlog_e *buf, *e; 5791 int i, j, rc, nentries, first = 0, m; 5792 struct sbuf *sb; 5793 uint64_t ftstamp = UINT64_MAX; 5794 5795 if (dparams->start == 0) { 5796 dparams->memtype = FW_MEMTYPE_EDC0; 5797 dparams->start = 0x84000; 5798 dparams->size = 32768; 5799 } 5800 5801 nentries = dparams->size / sizeof(struct fw_devlog_e); 5802 5803 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 5804 if (buf == NULL) 5805 return (ENOMEM); 5806 5807 m = fwmtype_to_hwmtype(dparams->memtype); 5808 rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf); 5809 if (rc != 0) 5810 goto done; 5811 5812 for (i = 0; i < nentries; i++) { 5813 e = &buf[i]; 5814 5815 if (e->timestamp == 0) 5816 break; /* end */ 5817 5818 e->timestamp = be64toh(e->timestamp); 5819 e->seqno = be32toh(e->seqno); 5820 for (j = 0; j < 8; j++) 5821 e->params[j] = be32toh(e->params[j]); 5822 5823 if (e->timestamp < ftstamp) { 5824 ftstamp = e->timestamp; 5825 first = i; 5826 } 5827 } 5828 5829 if (buf[first].timestamp == 0) 5830 goto done; /* nothing in the log */ 5831 5832 rc = sysctl_wire_old_buffer(req, 0); 5833 if (rc != 0) 5834 goto done; 5835 5836 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5837 if (sb == NULL) { 5838 rc = ENOMEM; 5839 goto done; 5840 } 5841 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 5842 "Seq#", "Tstamp", "Level", "Facility", "Message"); 5843 5844 i = first; 5845 do { 5846 e = &buf[i]; 5847 if (e->timestamp == 0) 5848 break; /* end */ 5849 5850 sbuf_printf(sb, "%10d %15ju %8s %8s ", 5851 e->seqno, e->timestamp, 5852 (e->level < nitems(devlog_level_strings) ? 5853 devlog_level_strings[e->level] : "UNKNOWN"), 5854 (e->facility < nitems(devlog_facility_strings) ? 5855 devlog_facility_strings[e->facility] : "UNKNOWN")); 5856 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 5857 e->params[2], e->params[3], e->params[4], 5858 e->params[5], e->params[6], e->params[7]); 5859 5860 if (++i == nentries) 5861 i = 0; 5862 } while (i != first); 5863 5864 rc = sbuf_finish(sb); 5865 sbuf_delete(sb); 5866 done: 5867 free(buf, M_CXGBE); 5868 return (rc); 5869 } 5870 5871 static int 5872 sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 5873 { 5874 struct adapter *sc = arg1; 5875 struct sbuf *sb; 5876 int rc; 5877 struct tp_fcoe_stats stats[MAX_NCHAN]; 5878 int i, nchan = sc->chip_params->nchan; 5879 5880 rc = sysctl_wire_old_buffer(req, 0); 5881 if (rc != 0) 5882 return (rc); 5883 5884 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5885 if (sb == NULL) 5886 return (ENOMEM); 5887 5888 for (i = 0; i < nchan; i++) 5889 t4_get_fcoe_stats(sc, i, &stats[i]); 5890 5891 if (nchan > 2) { 5892 sbuf_printf(sb, " channel 0 channel 1" 5893 " channel 2 channel 3"); 5894 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 5895 stats[0].octets_ddp, stats[1].octets_ddp, 5896 stats[2].octets_ddp, stats[3].octets_ddp); 5897 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 5898 stats[0].frames_ddp, stats[1].frames_ddp, 5899 stats[2].frames_ddp, stats[3].frames_ddp); 5900 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 5901 stats[0].frames_drop, stats[1].frames_drop, 5902 stats[2].frames_drop, stats[3].frames_drop); 5903 } else { 5904 sbuf_printf(sb, " channel 0 channel 1"); 5905 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 5906 stats[0].octets_ddp, stats[1].octets_ddp); 5907 sbuf_printf(sb, "\nframesDDP: %16u %16u", 5908 stats[0].frames_ddp, stats[1].frames_ddp); 5909 sbuf_printf(sb, "\nframesDrop: %16u %16u", 5910 stats[0].frames_drop, stats[1].frames_drop); 5911 } 5912 5913 rc = sbuf_finish(sb); 5914 sbuf_delete(sb); 5915 5916 return (rc); 5917 } 5918 5919 static int 5920 sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 5921 { 5922 struct adapter *sc = arg1; 5923 struct sbuf *sb; 5924 int rc, i; 5925 unsigned int map, kbps, ipg, mode; 5926 unsigned int pace_tab[NTX_SCHED]; 5927 5928 rc = sysctl_wire_old_buffer(req, 0); 5929 if (rc != 0) 5930 return (rc); 5931 5932 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5933 if (sb == NULL) 5934 return (ENOMEM); 5935 5936 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 5937 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 5938 t4_read_pace_tbl(sc, pace_tab); 5939 5940 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 5941 "Class IPG (0.1 ns) Flow IPG (us)"); 5942 5943 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 5944 t4_get_tx_sched(sc, i, &kbps, &ipg); 5945 sbuf_printf(sb, "\n %u %-5s %u ", i, 5946 (mode & (1 << i)) ? "flow" : "class", map & 3); 5947 if (kbps) 5948 sbuf_printf(sb, "%9u ", kbps); 5949 else 5950 sbuf_printf(sb, " disabled "); 5951 5952 if (ipg) 5953 sbuf_printf(sb, "%13u ", ipg); 5954 else 5955 sbuf_printf(sb, " disabled "); 5956 5957 if (pace_tab[i]) 5958 sbuf_printf(sb, "%10u", pace_tab[i]); 5959 else 5960 sbuf_printf(sb, " disabled"); 5961 } 5962 5963 rc = sbuf_finish(sb); 5964 sbuf_delete(sb); 5965 5966 return (rc); 5967 } 5968 5969 static int 5970 sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 5971 { 5972 struct adapter *sc = arg1; 5973 struct sbuf *sb; 5974 int rc, i, j; 5975 uint64_t *p0, *p1; 5976 struct lb_port_stats s[2]; 5977 static const char *stat_name[] = { 5978 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 5979 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 5980 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 5981 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 5982 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 5983 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 5984 "BG2FramesTrunc:", "BG3FramesTrunc:" 5985 }; 5986 5987 rc = sysctl_wire_old_buffer(req, 0); 5988 if (rc != 0) 5989 return (rc); 5990 5991 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5992 if (sb == NULL) 5993 return (ENOMEM); 5994 5995 memset(s, 0, sizeof(s)); 5996 5997 for (i = 0; i < sc->chip_params->nchan; i += 2) { 5998 t4_get_lb_stats(sc, i, &s[0]); 5999 t4_get_lb_stats(sc, i + 1, &s[1]); 6000 6001 p0 = &s[0].octets; 6002 p1 = &s[1].octets; 6003 sbuf_printf(sb, "%s Loopback %u" 6004 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6005 6006 for (j = 0; j < nitems(stat_name); j++) 6007 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6008 *p0++, *p1++); 6009 } 6010 6011 rc = sbuf_finish(sb); 6012 sbuf_delete(sb); 6013 6014 return (rc); 6015 } 6016 6017 static int 6018 sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6019 { 6020 int rc = 0; 6021 struct port_info *pi = arg1; 6022 struct sbuf *sb; 6023 6024 rc = sysctl_wire_old_buffer(req, 0); 6025 if (rc != 0) 6026 return(rc); 6027 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6028 if (sb == NULL) 6029 return (ENOMEM); 6030 6031 if (pi->linkdnrc < 0) 6032 sbuf_printf(sb, "n/a"); 6033 else 6034 sbuf_printf(sb, "%s", t4_link_down_rc_str(pi->linkdnrc)); 6035 6036 rc = sbuf_finish(sb); 6037 sbuf_delete(sb); 6038 6039 return (rc); 6040 } 6041 6042 struct mem_desc { 6043 unsigned int base; 6044 unsigned int limit; 6045 unsigned int idx; 6046 }; 6047 6048 static int 6049 mem_desc_cmp(const void *a, const void *b) 6050 { 6051 return ((const struct mem_desc *)a)->base - 6052 ((const struct mem_desc *)b)->base; 6053 } 6054 6055 static void 6056 mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6057 unsigned int to) 6058 { 6059 unsigned int size; 6060 6061 size = to - from + 1; 6062 if (size == 0) 6063 return; 6064 6065 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6066 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6067 } 6068 6069 static int 6070 sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6071 { 6072 struct adapter *sc = arg1; 6073 struct sbuf *sb; 6074 int rc, i, n; 6075 uint32_t lo, hi, used, alloc; 6076 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6077 static const char *region[] = { 6078 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6079 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6080 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6081 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6082 "RQUDP region:", "PBL region:", "TXPBL region:", 6083 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6084 "On-chip queues:" 6085 }; 6086 struct mem_desc avail[4]; 6087 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6088 struct mem_desc *md = mem; 6089 6090 rc = sysctl_wire_old_buffer(req, 0); 6091 if (rc != 0) 6092 return (rc); 6093 6094 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6095 if (sb == NULL) 6096 return (ENOMEM); 6097 6098 for (i = 0; i < nitems(mem); i++) { 6099 mem[i].limit = 0; 6100 mem[i].idx = i; 6101 } 6102 6103 /* Find and sort the populated memory ranges */ 6104 i = 0; 6105 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6106 if (lo & F_EDRAM0_ENABLE) { 6107 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6108 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6109 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6110 avail[i].idx = 0; 6111 i++; 6112 } 6113 if (lo & F_EDRAM1_ENABLE) { 6114 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6115 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6116 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6117 avail[i].idx = 1; 6118 i++; 6119 } 6120 if (lo & F_EXT_MEM_ENABLE) { 6121 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6122 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6123 avail[i].limit = avail[i].base + 6124 (G_EXT_MEM_SIZE(hi) << 20); 6125 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6126 i++; 6127 } 6128 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6129 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6130 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6131 avail[i].limit = avail[i].base + 6132 (G_EXT_MEM1_SIZE(hi) << 20); 6133 avail[i].idx = 4; 6134 i++; 6135 } 6136 if (!i) /* no memory available */ 6137 return 0; 6138 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6139 6140 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6141 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6142 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6143 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6144 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6145 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6146 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6147 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6148 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6149 6150 /* the next few have explicit upper bounds */ 6151 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6152 md->limit = md->base - 1 + 6153 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6154 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6155 md++; 6156 6157 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6158 md->limit = md->base - 1 + 6159 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6160 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6161 md++; 6162 6163 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6164 if (chip_id(sc) <= CHELSIO_T5) { 6165 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 6166 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6167 } else { 6168 hi = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6169 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 6170 } 6171 md->limit = 0; 6172 } else { 6173 md->base = 0; 6174 md->idx = nitems(region); /* hide it */ 6175 } 6176 md++; 6177 6178 #define ulp_region(reg) \ 6179 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6180 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6181 6182 ulp_region(RX_ISCSI); 6183 ulp_region(RX_TDDP); 6184 ulp_region(TX_TPT); 6185 ulp_region(RX_STAG); 6186 ulp_region(RX_RQ); 6187 ulp_region(RX_RQUDP); 6188 ulp_region(RX_PBL); 6189 ulp_region(TX_PBL); 6190 #undef ulp_region 6191 6192 md->base = 0; 6193 md->idx = nitems(region); 6194 if (!is_t4(sc)) { 6195 uint32_t size = 0; 6196 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 6197 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 6198 6199 if (is_t5(sc)) { 6200 if (sge_ctrl & F_VFIFO_ENABLE) 6201 size = G_DBVFIFO_SIZE(fifo_size); 6202 } else 6203 size = G_T6_DBVFIFO_SIZE(fifo_size); 6204 6205 if (size) { 6206 md->base = G_BASEADDR(t4_read_reg(sc, 6207 A_SGE_DBVFIFO_BADDR)); 6208 md->limit = md->base + (size << 2) - 1; 6209 } 6210 } 6211 md++; 6212 6213 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6214 md->limit = 0; 6215 md++; 6216 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6217 md->limit = 0; 6218 md++; 6219 6220 md->base = sc->vres.ocq.start; 6221 if (sc->vres.ocq.size) 6222 md->limit = md->base + sc->vres.ocq.size - 1; 6223 else 6224 md->idx = nitems(region); /* hide it */ 6225 md++; 6226 6227 /* add any address-space holes, there can be up to 3 */ 6228 for (n = 0; n < i - 1; n++) 6229 if (avail[n].limit < avail[n + 1].base) 6230 (md++)->base = avail[n].limit; 6231 if (avail[n].limit) 6232 (md++)->base = avail[n].limit; 6233 6234 n = md - mem; 6235 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6236 6237 for (lo = 0; lo < i; lo++) 6238 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6239 avail[lo].limit - 1); 6240 6241 sbuf_printf(sb, "\n"); 6242 for (i = 0; i < n; i++) { 6243 if (mem[i].idx >= nitems(region)) 6244 continue; /* skip holes */ 6245 if (!mem[i].limit) 6246 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6247 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6248 mem[i].limit); 6249 } 6250 6251 sbuf_printf(sb, "\n"); 6252 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6253 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6254 mem_region_show(sb, "uP RAM:", lo, hi); 6255 6256 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6257 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6258 mem_region_show(sb, "uP Extmem2:", lo, hi); 6259 6260 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6261 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6262 G_PMRXMAXPAGE(lo), 6263 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6264 (lo & F_PMRXNUMCHN) ? 2 : 1); 6265 6266 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6267 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6268 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6269 G_PMTXMAXPAGE(lo), 6270 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6271 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6272 sbuf_printf(sb, "%u p-structs\n", 6273 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6274 6275 for (i = 0; i < 4; i++) { 6276 if (chip_id(sc) > CHELSIO_T5) 6277 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 6278 else 6279 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6280 if (is_t5(sc)) { 6281 used = G_T5_USED(lo); 6282 alloc = G_T5_ALLOC(lo); 6283 } else { 6284 used = G_USED(lo); 6285 alloc = G_ALLOC(lo); 6286 } 6287 /* For T6 these are MAC buffer groups */ 6288 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6289 i, used, alloc); 6290 } 6291 for (i = 0; i < sc->chip_params->nchan; i++) { 6292 if (chip_id(sc) > CHELSIO_T5) 6293 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 6294 else 6295 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6296 if (is_t5(sc)) { 6297 used = G_T5_USED(lo); 6298 alloc = G_T5_ALLOC(lo); 6299 } else { 6300 used = G_USED(lo); 6301 alloc = G_ALLOC(lo); 6302 } 6303 /* For T6 these are MAC buffer groups */ 6304 sbuf_printf(sb, 6305 "\nLoopback %d using %u pages out of %u allocated", 6306 i, used, alloc); 6307 } 6308 6309 rc = sbuf_finish(sb); 6310 sbuf_delete(sb); 6311 6312 return (rc); 6313 } 6314 6315 static inline void 6316 tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6317 { 6318 *mask = x | y; 6319 y = htobe64(y); 6320 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6321 } 6322 6323 static int 6324 sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6325 { 6326 struct adapter *sc = arg1; 6327 struct sbuf *sb; 6328 int rc, i; 6329 6330 MPASS(chip_id(sc) <= CHELSIO_T5); 6331 6332 rc = sysctl_wire_old_buffer(req, 0); 6333 if (rc != 0) 6334 return (rc); 6335 6336 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6337 if (sb == NULL) 6338 return (ENOMEM); 6339 6340 sbuf_printf(sb, 6341 "Idx Ethernet address Mask Vld Ports PF" 6342 " VF Replication P0 P1 P2 P3 ML"); 6343 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6344 uint64_t tcamx, tcamy, mask; 6345 uint32_t cls_lo, cls_hi; 6346 uint8_t addr[ETHER_ADDR_LEN]; 6347 6348 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6349 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6350 if (tcamx & tcamy) 6351 continue; 6352 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6353 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6354 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6355 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6356 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6357 addr[3], addr[4], addr[5], (uintmax_t)mask, 6358 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6359 G_PORTMAP(cls_hi), G_PF(cls_lo), 6360 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6361 6362 if (cls_lo & F_REPLICATE) { 6363 struct fw_ldst_cmd ldst_cmd; 6364 6365 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6366 ldst_cmd.op_to_addrspace = 6367 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6368 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6369 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6370 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6371 ldst_cmd.u.mps.rplc.fid_idx = 6372 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6373 V_FW_LDST_CMD_IDX(i)); 6374 6375 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6376 "t4mps"); 6377 if (rc) 6378 break; 6379 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6380 sizeof(ldst_cmd), &ldst_cmd); 6381 end_synchronized_op(sc, 0); 6382 6383 if (rc != 0) { 6384 sbuf_printf(sb, "%36d", rc); 6385 rc = 0; 6386 } else { 6387 sbuf_printf(sb, " %08x %08x %08x %08x", 6388 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6389 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6390 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6391 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6392 } 6393 } else 6394 sbuf_printf(sb, "%36s", ""); 6395 6396 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6397 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6398 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6399 } 6400 6401 if (rc) 6402 (void) sbuf_finish(sb); 6403 else 6404 rc = sbuf_finish(sb); 6405 sbuf_delete(sb); 6406 6407 return (rc); 6408 } 6409 6410 static int 6411 sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 6412 { 6413 struct adapter *sc = arg1; 6414 struct sbuf *sb; 6415 int rc, i; 6416 6417 MPASS(chip_id(sc) > CHELSIO_T5); 6418 6419 rc = sysctl_wire_old_buffer(req, 0); 6420 if (rc != 0) 6421 return (rc); 6422 6423 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6424 if (sb == NULL) 6425 return (ENOMEM); 6426 6427 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 6428 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 6429 " Replication" 6430 " P0 P1 P2 P3 ML\n"); 6431 6432 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6433 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 6434 uint16_t ivlan; 6435 uint64_t tcamx, tcamy, val, mask; 6436 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 6437 uint8_t addr[ETHER_ADDR_LEN]; 6438 6439 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 6440 if (i < 256) 6441 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 6442 else 6443 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 6444 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6445 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6446 tcamy = G_DMACH(val) << 32; 6447 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6448 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6449 lookup_type = G_DATALKPTYPE(data2); 6450 port_num = G_DATAPORTNUM(data2); 6451 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6452 /* Inner header VNI */ 6453 vniy = ((data2 & F_DATAVIDH2) << 23) | 6454 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6455 dip_hit = data2 & F_DATADIPHIT; 6456 vlan_vld = 0; 6457 } else { 6458 vniy = 0; 6459 dip_hit = 0; 6460 vlan_vld = data2 & F_DATAVIDH2; 6461 ivlan = G_VIDL(val); 6462 } 6463 6464 ctl |= V_CTLXYBITSEL(1); 6465 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6466 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6467 tcamx = G_DMACH(val) << 32; 6468 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6469 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6470 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6471 /* Inner header VNI mask */ 6472 vnix = ((data2 & F_DATAVIDH2) << 23) | 6473 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6474 } else 6475 vnix = 0; 6476 6477 if (tcamx & tcamy) 6478 continue; 6479 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6480 6481 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6482 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6483 6484 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6485 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6486 "%012jx %06x %06x - - %3c" 6487 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 6488 addr[1], addr[2], addr[3], addr[4], addr[5], 6489 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 6490 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6491 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6492 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6493 } else { 6494 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6495 "%012jx - - ", i, addr[0], addr[1], 6496 addr[2], addr[3], addr[4], addr[5], 6497 (uintmax_t)mask); 6498 6499 if (vlan_vld) 6500 sbuf_printf(sb, "%4u Y ", ivlan); 6501 else 6502 sbuf_printf(sb, " - N "); 6503 6504 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 6505 lookup_type ? 'I' : 'O', port_num, 6506 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6507 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6508 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6509 } 6510 6511 6512 if (cls_lo & F_T6_REPLICATE) { 6513 struct fw_ldst_cmd ldst_cmd; 6514 6515 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6516 ldst_cmd.op_to_addrspace = 6517 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6518 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6519 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6520 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6521 ldst_cmd.u.mps.rplc.fid_idx = 6522 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6523 V_FW_LDST_CMD_IDX(i)); 6524 6525 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6526 "t6mps"); 6527 if (rc) 6528 break; 6529 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6530 sizeof(ldst_cmd), &ldst_cmd); 6531 end_synchronized_op(sc, 0); 6532 6533 if (rc != 0) { 6534 sbuf_printf(sb, "%72d", rc); 6535 rc = 0; 6536 } else { 6537 sbuf_printf(sb, " %08x %08x %08x %08x" 6538 " %08x %08x %08x %08x", 6539 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 6540 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 6541 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 6542 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 6543 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6544 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6545 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6546 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6547 } 6548 } else 6549 sbuf_printf(sb, "%72s", ""); 6550 6551 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 6552 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 6553 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 6554 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 6555 } 6556 6557 if (rc) 6558 (void) sbuf_finish(sb); 6559 else 6560 rc = sbuf_finish(sb); 6561 sbuf_delete(sb); 6562 6563 return (rc); 6564 } 6565 6566 static int 6567 sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 6568 { 6569 struct adapter *sc = arg1; 6570 struct sbuf *sb; 6571 int rc; 6572 uint16_t mtus[NMTUS]; 6573 6574 rc = sysctl_wire_old_buffer(req, 0); 6575 if (rc != 0) 6576 return (rc); 6577 6578 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6579 if (sb == NULL) 6580 return (ENOMEM); 6581 6582 t4_read_mtu_tbl(sc, mtus, NULL); 6583 6584 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 6585 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 6586 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 6587 mtus[14], mtus[15]); 6588 6589 rc = sbuf_finish(sb); 6590 sbuf_delete(sb); 6591 6592 return (rc); 6593 } 6594 6595 static int 6596 sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 6597 { 6598 struct adapter *sc = arg1; 6599 struct sbuf *sb; 6600 int rc, i; 6601 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 6602 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 6603 static const char *tx_stats[MAX_PM_NSTATS] = { 6604 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 6605 "Tx FIFO wait", NULL, "Tx latency" 6606 }; 6607 static const char *rx_stats[MAX_PM_NSTATS] = { 6608 "Read:", "Write bypass:", "Write mem:", "Flush:", 6609 " Rx FIFO wait", NULL, "Rx latency" 6610 }; 6611 6612 rc = sysctl_wire_old_buffer(req, 0); 6613 if (rc != 0) 6614 return (rc); 6615 6616 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6617 if (sb == NULL) 6618 return (ENOMEM); 6619 6620 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 6621 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 6622 6623 sbuf_printf(sb, " Tx pcmds Tx bytes"); 6624 for (i = 0; i < 4; i++) { 6625 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6626 tx_cyc[i]); 6627 } 6628 6629 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 6630 for (i = 0; i < 4; i++) { 6631 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6632 rx_cyc[i]); 6633 } 6634 6635 if (chip_id(sc) > CHELSIO_T5) { 6636 sbuf_printf(sb, 6637 "\n Total wait Total occupancy"); 6638 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6639 tx_cyc[i]); 6640 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6641 rx_cyc[i]); 6642 6643 i += 2; 6644 MPASS(i < nitems(tx_stats)); 6645 6646 sbuf_printf(sb, 6647 "\n Reads Total wait"); 6648 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6649 tx_cyc[i]); 6650 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6651 rx_cyc[i]); 6652 } 6653 6654 rc = sbuf_finish(sb); 6655 sbuf_delete(sb); 6656 6657 return (rc); 6658 } 6659 6660 static int 6661 sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 6662 { 6663 struct adapter *sc = arg1; 6664 struct sbuf *sb; 6665 int rc; 6666 struct tp_rdma_stats stats; 6667 6668 rc = sysctl_wire_old_buffer(req, 0); 6669 if (rc != 0) 6670 return (rc); 6671 6672 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6673 if (sb == NULL) 6674 return (ENOMEM); 6675 6676 mtx_lock(&sc->reg_lock); 6677 t4_tp_get_rdma_stats(sc, &stats); 6678 mtx_unlock(&sc->reg_lock); 6679 6680 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 6681 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 6682 6683 rc = sbuf_finish(sb); 6684 sbuf_delete(sb); 6685 6686 return (rc); 6687 } 6688 6689 static int 6690 sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 6691 { 6692 struct adapter *sc = arg1; 6693 struct sbuf *sb; 6694 int rc; 6695 struct tp_tcp_stats v4, v6; 6696 6697 rc = sysctl_wire_old_buffer(req, 0); 6698 if (rc != 0) 6699 return (rc); 6700 6701 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6702 if (sb == NULL) 6703 return (ENOMEM); 6704 6705 mtx_lock(&sc->reg_lock); 6706 t4_tp_get_tcp_stats(sc, &v4, &v6); 6707 mtx_unlock(&sc->reg_lock); 6708 6709 sbuf_printf(sb, 6710 " IP IPv6\n"); 6711 sbuf_printf(sb, "OutRsts: %20u %20u\n", 6712 v4.tcp_out_rsts, v6.tcp_out_rsts); 6713 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 6714 v4.tcp_in_segs, v6.tcp_in_segs); 6715 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 6716 v4.tcp_out_segs, v6.tcp_out_segs); 6717 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 6718 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 6719 6720 rc = sbuf_finish(sb); 6721 sbuf_delete(sb); 6722 6723 return (rc); 6724 } 6725 6726 static int 6727 sysctl_tids(SYSCTL_HANDLER_ARGS) 6728 { 6729 struct adapter *sc = arg1; 6730 struct sbuf *sb; 6731 int rc; 6732 struct tid_info *t = &sc->tids; 6733 6734 rc = sysctl_wire_old_buffer(req, 0); 6735 if (rc != 0) 6736 return (rc); 6737 6738 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6739 if (sb == NULL) 6740 return (ENOMEM); 6741 6742 if (t->natids) { 6743 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 6744 t->atids_in_use); 6745 } 6746 6747 if (t->ntids) { 6748 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6749 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 6750 6751 if (b) { 6752 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 6753 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6754 t->ntids - 1); 6755 } else { 6756 sbuf_printf(sb, "TID range: %u-%u", 6757 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6758 t->ntids - 1); 6759 } 6760 } else 6761 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 6762 sbuf_printf(sb, ", in use: %u\n", 6763 atomic_load_acq_int(&t->tids_in_use)); 6764 } 6765 6766 if (t->nstids) { 6767 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 6768 t->stid_base + t->nstids - 1, t->stids_in_use); 6769 } 6770 6771 if (t->nftids) { 6772 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 6773 t->ftid_base + t->nftids - 1); 6774 } 6775 6776 if (t->netids) { 6777 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 6778 t->etid_base + t->netids - 1); 6779 } 6780 6781 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 6782 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 6783 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 6784 6785 rc = sbuf_finish(sb); 6786 sbuf_delete(sb); 6787 6788 return (rc); 6789 } 6790 6791 static int 6792 sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 6793 { 6794 struct adapter *sc = arg1; 6795 struct sbuf *sb; 6796 int rc; 6797 struct tp_err_stats stats; 6798 6799 rc = sysctl_wire_old_buffer(req, 0); 6800 if (rc != 0) 6801 return (rc); 6802 6803 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6804 if (sb == NULL) 6805 return (ENOMEM); 6806 6807 mtx_lock(&sc->reg_lock); 6808 t4_tp_get_err_stats(sc, &stats); 6809 mtx_unlock(&sc->reg_lock); 6810 6811 if (sc->chip_params->nchan > 2) { 6812 sbuf_printf(sb, " channel 0 channel 1" 6813 " channel 2 channel 3\n"); 6814 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 6815 stats.mac_in_errs[0], stats.mac_in_errs[1], 6816 stats.mac_in_errs[2], stats.mac_in_errs[3]); 6817 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 6818 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 6819 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 6820 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 6821 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 6822 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 6823 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 6824 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 6825 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 6826 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 6827 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 6828 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 6829 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 6830 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 6831 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 6832 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 6833 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 6834 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 6835 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 6836 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 6837 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 6838 } else { 6839 sbuf_printf(sb, " channel 0 channel 1\n"); 6840 sbuf_printf(sb, "macInErrs: %10u %10u\n", 6841 stats.mac_in_errs[0], stats.mac_in_errs[1]); 6842 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 6843 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 6844 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 6845 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 6846 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 6847 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 6848 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 6849 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 6850 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 6851 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 6852 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 6853 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 6854 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 6855 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 6856 } 6857 6858 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 6859 stats.ofld_no_neigh, stats.ofld_cong_defer); 6860 6861 rc = sbuf_finish(sb); 6862 sbuf_delete(sb); 6863 6864 return (rc); 6865 } 6866 6867 struct field_desc { 6868 const char *name; 6869 u_int start; 6870 u_int width; 6871 }; 6872 6873 static void 6874 field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 6875 { 6876 char buf[32]; 6877 int line_size = 0; 6878 6879 while (f->name) { 6880 uint64_t mask = (1ULL << f->width) - 1; 6881 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 6882 ((uintmax_t)v >> f->start) & mask); 6883 6884 if (line_size + len >= 79) { 6885 line_size = 8; 6886 sbuf_printf(sb, "\n "); 6887 } 6888 sbuf_printf(sb, "%s ", buf); 6889 line_size += len + 1; 6890 f++; 6891 } 6892 sbuf_printf(sb, "\n"); 6893 } 6894 6895 static const struct field_desc tp_la0[] = { 6896 { "RcfOpCodeOut", 60, 4 }, 6897 { "State", 56, 4 }, 6898 { "WcfState", 52, 4 }, 6899 { "RcfOpcSrcOut", 50, 2 }, 6900 { "CRxError", 49, 1 }, 6901 { "ERxError", 48, 1 }, 6902 { "SanityFailed", 47, 1 }, 6903 { "SpuriousMsg", 46, 1 }, 6904 { "FlushInputMsg", 45, 1 }, 6905 { "FlushInputCpl", 44, 1 }, 6906 { "RssUpBit", 43, 1 }, 6907 { "RssFilterHit", 42, 1 }, 6908 { "Tid", 32, 10 }, 6909 { "InitTcb", 31, 1 }, 6910 { "LineNumber", 24, 7 }, 6911 { "Emsg", 23, 1 }, 6912 { "EdataOut", 22, 1 }, 6913 { "Cmsg", 21, 1 }, 6914 { "CdataOut", 20, 1 }, 6915 { "EreadPdu", 19, 1 }, 6916 { "CreadPdu", 18, 1 }, 6917 { "TunnelPkt", 17, 1 }, 6918 { "RcfPeerFin", 16, 1 }, 6919 { "RcfReasonOut", 12, 4 }, 6920 { "TxCchannel", 10, 2 }, 6921 { "RcfTxChannel", 8, 2 }, 6922 { "RxEchannel", 6, 2 }, 6923 { "RcfRxChannel", 5, 1 }, 6924 { "RcfDataOutSrdy", 4, 1 }, 6925 { "RxDvld", 3, 1 }, 6926 { "RxOoDvld", 2, 1 }, 6927 { "RxCongestion", 1, 1 }, 6928 { "TxCongestion", 0, 1 }, 6929 { NULL } 6930 }; 6931 6932 static const struct field_desc tp_la1[] = { 6933 { "CplCmdIn", 56, 8 }, 6934 { "CplCmdOut", 48, 8 }, 6935 { "ESynOut", 47, 1 }, 6936 { "EAckOut", 46, 1 }, 6937 { "EFinOut", 45, 1 }, 6938 { "ERstOut", 44, 1 }, 6939 { "SynIn", 43, 1 }, 6940 { "AckIn", 42, 1 }, 6941 { "FinIn", 41, 1 }, 6942 { "RstIn", 40, 1 }, 6943 { "DataIn", 39, 1 }, 6944 { "DataInVld", 38, 1 }, 6945 { "PadIn", 37, 1 }, 6946 { "RxBufEmpty", 36, 1 }, 6947 { "RxDdp", 35, 1 }, 6948 { "RxFbCongestion", 34, 1 }, 6949 { "TxFbCongestion", 33, 1 }, 6950 { "TxPktSumSrdy", 32, 1 }, 6951 { "RcfUlpType", 28, 4 }, 6952 { "Eread", 27, 1 }, 6953 { "Ebypass", 26, 1 }, 6954 { "Esave", 25, 1 }, 6955 { "Static0", 24, 1 }, 6956 { "Cread", 23, 1 }, 6957 { "Cbypass", 22, 1 }, 6958 { "Csave", 21, 1 }, 6959 { "CPktOut", 20, 1 }, 6960 { "RxPagePoolFull", 18, 2 }, 6961 { "RxLpbkPkt", 17, 1 }, 6962 { "TxLpbkPkt", 16, 1 }, 6963 { "RxVfValid", 15, 1 }, 6964 { "SynLearned", 14, 1 }, 6965 { "SetDelEntry", 13, 1 }, 6966 { "SetInvEntry", 12, 1 }, 6967 { "CpcmdDvld", 11, 1 }, 6968 { "CpcmdSave", 10, 1 }, 6969 { "RxPstructsFull", 8, 2 }, 6970 { "EpcmdDvld", 7, 1 }, 6971 { "EpcmdFlush", 6, 1 }, 6972 { "EpcmdTrimPrefix", 5, 1 }, 6973 { "EpcmdTrimPostfix", 4, 1 }, 6974 { "ERssIp4Pkt", 3, 1 }, 6975 { "ERssIp6Pkt", 2, 1 }, 6976 { "ERssTcpUdpPkt", 1, 1 }, 6977 { "ERssFceFipPkt", 0, 1 }, 6978 { NULL } 6979 }; 6980 6981 static const struct field_desc tp_la2[] = { 6982 { "CplCmdIn", 56, 8 }, 6983 { "MpsVfVld", 55, 1 }, 6984 { "MpsPf", 52, 3 }, 6985 { "MpsVf", 44, 8 }, 6986 { "SynIn", 43, 1 }, 6987 { "AckIn", 42, 1 }, 6988 { "FinIn", 41, 1 }, 6989 { "RstIn", 40, 1 }, 6990 { "DataIn", 39, 1 }, 6991 { "DataInVld", 38, 1 }, 6992 { "PadIn", 37, 1 }, 6993 { "RxBufEmpty", 36, 1 }, 6994 { "RxDdp", 35, 1 }, 6995 { "RxFbCongestion", 34, 1 }, 6996 { "TxFbCongestion", 33, 1 }, 6997 { "TxPktSumSrdy", 32, 1 }, 6998 { "RcfUlpType", 28, 4 }, 6999 { "Eread", 27, 1 }, 7000 { "Ebypass", 26, 1 }, 7001 { "Esave", 25, 1 }, 7002 { "Static0", 24, 1 }, 7003 { "Cread", 23, 1 }, 7004 { "Cbypass", 22, 1 }, 7005 { "Csave", 21, 1 }, 7006 { "CPktOut", 20, 1 }, 7007 { "RxPagePoolFull", 18, 2 }, 7008 { "RxLpbkPkt", 17, 1 }, 7009 { "TxLpbkPkt", 16, 1 }, 7010 { "RxVfValid", 15, 1 }, 7011 { "SynLearned", 14, 1 }, 7012 { "SetDelEntry", 13, 1 }, 7013 { "SetInvEntry", 12, 1 }, 7014 { "CpcmdDvld", 11, 1 }, 7015 { "CpcmdSave", 10, 1 }, 7016 { "RxPstructsFull", 8, 2 }, 7017 { "EpcmdDvld", 7, 1 }, 7018 { "EpcmdFlush", 6, 1 }, 7019 { "EpcmdTrimPrefix", 5, 1 }, 7020 { "EpcmdTrimPostfix", 4, 1 }, 7021 { "ERssIp4Pkt", 3, 1 }, 7022 { "ERssIp6Pkt", 2, 1 }, 7023 { "ERssTcpUdpPkt", 1, 1 }, 7024 { "ERssFceFipPkt", 0, 1 }, 7025 { NULL } 7026 }; 7027 7028 static void 7029 tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7030 { 7031 7032 field_desc_show(sb, *p, tp_la0); 7033 } 7034 7035 static void 7036 tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7037 { 7038 7039 if (idx) 7040 sbuf_printf(sb, "\n"); 7041 field_desc_show(sb, p[0], tp_la0); 7042 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7043 field_desc_show(sb, p[1], tp_la0); 7044 } 7045 7046 static void 7047 tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7048 { 7049 7050 if (idx) 7051 sbuf_printf(sb, "\n"); 7052 field_desc_show(sb, p[0], tp_la0); 7053 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7054 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7055 } 7056 7057 static int 7058 sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7059 { 7060 struct adapter *sc = arg1; 7061 struct sbuf *sb; 7062 uint64_t *buf, *p; 7063 int rc; 7064 u_int i, inc; 7065 void (*show_func)(struct sbuf *, uint64_t *, int); 7066 7067 rc = sysctl_wire_old_buffer(req, 0); 7068 if (rc != 0) 7069 return (rc); 7070 7071 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7072 if (sb == NULL) 7073 return (ENOMEM); 7074 7075 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7076 7077 t4_tp_read_la(sc, buf, NULL); 7078 p = buf; 7079 7080 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7081 case 2: 7082 inc = 2; 7083 show_func = tp_la_show2; 7084 break; 7085 case 3: 7086 inc = 2; 7087 show_func = tp_la_show3; 7088 break; 7089 default: 7090 inc = 1; 7091 show_func = tp_la_show; 7092 } 7093 7094 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7095 (*show_func)(sb, p, i); 7096 7097 rc = sbuf_finish(sb); 7098 sbuf_delete(sb); 7099 free(buf, M_CXGBE); 7100 return (rc); 7101 } 7102 7103 static int 7104 sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7105 { 7106 struct adapter *sc = arg1; 7107 struct sbuf *sb; 7108 int rc; 7109 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7110 7111 rc = sysctl_wire_old_buffer(req, 0); 7112 if (rc != 0) 7113 return (rc); 7114 7115 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7116 if (sb == NULL) 7117 return (ENOMEM); 7118 7119 t4_get_chan_txrate(sc, nrate, orate); 7120 7121 if (sc->chip_params->nchan > 2) { 7122 sbuf_printf(sb, " channel 0 channel 1" 7123 " channel 2 channel 3\n"); 7124 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7125 nrate[0], nrate[1], nrate[2], nrate[3]); 7126 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7127 orate[0], orate[1], orate[2], orate[3]); 7128 } else { 7129 sbuf_printf(sb, " channel 0 channel 1\n"); 7130 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7131 nrate[0], nrate[1]); 7132 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7133 orate[0], orate[1]); 7134 } 7135 7136 rc = sbuf_finish(sb); 7137 sbuf_delete(sb); 7138 7139 return (rc); 7140 } 7141 7142 static int 7143 sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7144 { 7145 struct adapter *sc = arg1; 7146 struct sbuf *sb; 7147 uint32_t *buf, *p; 7148 int rc, i; 7149 7150 rc = sysctl_wire_old_buffer(req, 0); 7151 if (rc != 0) 7152 return (rc); 7153 7154 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7155 if (sb == NULL) 7156 return (ENOMEM); 7157 7158 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 7159 M_ZERO | M_WAITOK); 7160 7161 t4_ulprx_read_la(sc, buf); 7162 p = buf; 7163 7164 sbuf_printf(sb, " Pcmd Type Message" 7165 " Data"); 7166 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 7167 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 7168 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 7169 } 7170 7171 rc = sbuf_finish(sb); 7172 sbuf_delete(sb); 7173 free(buf, M_CXGBE); 7174 return (rc); 7175 } 7176 7177 static int 7178 sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 7179 { 7180 struct adapter *sc = arg1; 7181 struct sbuf *sb; 7182 int rc, v; 7183 7184 rc = sysctl_wire_old_buffer(req, 0); 7185 if (rc != 0) 7186 return (rc); 7187 7188 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7189 if (sb == NULL) 7190 return (ENOMEM); 7191 7192 v = t4_read_reg(sc, A_SGE_STAT_CFG); 7193 if (G_STATSOURCE_T5(v) == 7) { 7194 if (G_STATMODE(v) == 0) { 7195 sbuf_printf(sb, "total %d, incomplete %d", 7196 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7197 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7198 } else if (G_STATMODE(v) == 1) { 7199 sbuf_printf(sb, "total %d, data overflow %d", 7200 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7201 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7202 } 7203 } 7204 rc = sbuf_finish(sb); 7205 sbuf_delete(sb); 7206 7207 return (rc); 7208 } 7209 #endif 7210 7211 static uint32_t 7212 fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 7213 { 7214 uint32_t mode; 7215 7216 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 7217 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 7218 7219 if (fconf & F_FRAGMENTATION) 7220 mode |= T4_FILTER_IP_FRAGMENT; 7221 7222 if (fconf & F_MPSHITTYPE) 7223 mode |= T4_FILTER_MPS_HIT_TYPE; 7224 7225 if (fconf & F_MACMATCH) 7226 mode |= T4_FILTER_MAC_IDX; 7227 7228 if (fconf & F_ETHERTYPE) 7229 mode |= T4_FILTER_ETH_TYPE; 7230 7231 if (fconf & F_PROTOCOL) 7232 mode |= T4_FILTER_IP_PROTO; 7233 7234 if (fconf & F_TOS) 7235 mode |= T4_FILTER_IP_TOS; 7236 7237 if (fconf & F_VLAN) 7238 mode |= T4_FILTER_VLAN; 7239 7240 if (fconf & F_VNIC_ID) { 7241 mode |= T4_FILTER_VNIC; 7242 if (iconf & F_VNIC) 7243 mode |= T4_FILTER_IC_VNIC; 7244 } 7245 7246 if (fconf & F_PORT) 7247 mode |= T4_FILTER_PORT; 7248 7249 if (fconf & F_FCOE) 7250 mode |= T4_FILTER_FCoE; 7251 7252 return (mode); 7253 } 7254 7255 static uint32_t 7256 mode_to_fconf(uint32_t mode) 7257 { 7258 uint32_t fconf = 0; 7259 7260 if (mode & T4_FILTER_IP_FRAGMENT) 7261 fconf |= F_FRAGMENTATION; 7262 7263 if (mode & T4_FILTER_MPS_HIT_TYPE) 7264 fconf |= F_MPSHITTYPE; 7265 7266 if (mode & T4_FILTER_MAC_IDX) 7267 fconf |= F_MACMATCH; 7268 7269 if (mode & T4_FILTER_ETH_TYPE) 7270 fconf |= F_ETHERTYPE; 7271 7272 if (mode & T4_FILTER_IP_PROTO) 7273 fconf |= F_PROTOCOL; 7274 7275 if (mode & T4_FILTER_IP_TOS) 7276 fconf |= F_TOS; 7277 7278 if (mode & T4_FILTER_VLAN) 7279 fconf |= F_VLAN; 7280 7281 if (mode & T4_FILTER_VNIC) 7282 fconf |= F_VNIC_ID; 7283 7284 if (mode & T4_FILTER_PORT) 7285 fconf |= F_PORT; 7286 7287 if (mode & T4_FILTER_FCoE) 7288 fconf |= F_FCOE; 7289 7290 return (fconf); 7291 } 7292 7293 static uint32_t 7294 mode_to_iconf(uint32_t mode) 7295 { 7296 7297 if (mode & T4_FILTER_IC_VNIC) 7298 return (F_VNIC); 7299 return (0); 7300 } 7301 7302 static int check_fspec_against_fconf_iconf(struct adapter *sc, 7303 struct t4_filter_specification *fs) 7304 { 7305 struct tp_params *tpp = &sc->params.tp; 7306 uint32_t fconf = 0; 7307 7308 if (fs->val.frag || fs->mask.frag) 7309 fconf |= F_FRAGMENTATION; 7310 7311 if (fs->val.matchtype || fs->mask.matchtype) 7312 fconf |= F_MPSHITTYPE; 7313 7314 if (fs->val.macidx || fs->mask.macidx) 7315 fconf |= F_MACMATCH; 7316 7317 if (fs->val.ethtype || fs->mask.ethtype) 7318 fconf |= F_ETHERTYPE; 7319 7320 if (fs->val.proto || fs->mask.proto) 7321 fconf |= F_PROTOCOL; 7322 7323 if (fs->val.tos || fs->mask.tos) 7324 fconf |= F_TOS; 7325 7326 if (fs->val.vlan_vld || fs->mask.vlan_vld) 7327 fconf |= F_VLAN; 7328 7329 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 7330 fconf |= F_VNIC_ID; 7331 if (tpp->ingress_config & F_VNIC) 7332 return (EINVAL); 7333 } 7334 7335 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 7336 fconf |= F_VNIC_ID; 7337 if ((tpp->ingress_config & F_VNIC) == 0) 7338 return (EINVAL); 7339 } 7340 7341 if (fs->val.iport || fs->mask.iport) 7342 fconf |= F_PORT; 7343 7344 if (fs->val.fcoe || fs->mask.fcoe) 7345 fconf |= F_FCOE; 7346 7347 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 7348 return (E2BIG); 7349 7350 return (0); 7351 } 7352 7353 static int 7354 get_filter_mode(struct adapter *sc, uint32_t *mode) 7355 { 7356 struct tp_params *tpp = &sc->params.tp; 7357 7358 /* 7359 * We trust the cached values of the relevant TP registers. This means 7360 * things work reliably only if writes to those registers are always via 7361 * t4_set_filter_mode. 7362 */ 7363 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 7364 7365 return (0); 7366 } 7367 7368 static int 7369 set_filter_mode(struct adapter *sc, uint32_t mode) 7370 { 7371 struct tp_params *tpp = &sc->params.tp; 7372 uint32_t fconf, iconf; 7373 int rc; 7374 7375 iconf = mode_to_iconf(mode); 7376 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 7377 /* 7378 * For now we just complain if A_TP_INGRESS_CONFIG is not 7379 * already set to the correct value for the requested filter 7380 * mode. It's not clear if it's safe to write to this register 7381 * on the fly. (And we trust the cached value of the register). 7382 */ 7383 return (EBUSY); 7384 } 7385 7386 fconf = mode_to_fconf(mode); 7387 7388 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7389 "t4setfm"); 7390 if (rc) 7391 return (rc); 7392 7393 if (sc->tids.ftids_in_use > 0) { 7394 rc = EBUSY; 7395 goto done; 7396 } 7397 7398 #ifdef TCP_OFFLOAD 7399 if (uld_active(sc, ULD_TOM)) { 7400 rc = EBUSY; 7401 goto done; 7402 } 7403 #endif 7404 7405 rc = -t4_set_filter_mode(sc, fconf); 7406 done: 7407 end_synchronized_op(sc, LOCK_HELD); 7408 return (rc); 7409 } 7410 7411 static inline uint64_t 7412 get_filter_hits(struct adapter *sc, uint32_t fid) 7413 { 7414 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 7415 uint64_t hits; 7416 7417 memwin_info(sc, 0, &mw_base, NULL); 7418 7419 off = position_memwin(sc, 0, 7420 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE); 7421 if (is_t4(sc)) { 7422 hits = t4_read_reg64(sc, mw_base + off + 16); 7423 hits = be64toh(hits); 7424 } else { 7425 hits = t4_read_reg(sc, mw_base + off + 24); 7426 hits = be32toh(hits); 7427 } 7428 7429 return (hits); 7430 } 7431 7432 static int 7433 get_filter(struct adapter *sc, struct t4_filter *t) 7434 { 7435 int i, rc, nfilters = sc->tids.nftids; 7436 struct filter_entry *f; 7437 7438 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7439 "t4getf"); 7440 if (rc) 7441 return (rc); 7442 7443 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 7444 t->idx >= nfilters) { 7445 t->idx = 0xffffffff; 7446 goto done; 7447 } 7448 7449 f = &sc->tids.ftid_tab[t->idx]; 7450 for (i = t->idx; i < nfilters; i++, f++) { 7451 if (f->valid) { 7452 t->idx = i; 7453 t->l2tidx = f->l2t ? f->l2t->idx : 0; 7454 t->smtidx = f->smtidx; 7455 if (f->fs.hitcnts) 7456 t->hits = get_filter_hits(sc, t->idx); 7457 else 7458 t->hits = UINT64_MAX; 7459 t->fs = f->fs; 7460 7461 goto done; 7462 } 7463 } 7464 7465 t->idx = 0xffffffff; 7466 done: 7467 end_synchronized_op(sc, LOCK_HELD); 7468 return (0); 7469 } 7470 7471 static int 7472 set_filter(struct adapter *sc, struct t4_filter *t) 7473 { 7474 unsigned int nfilters, nports; 7475 struct filter_entry *f; 7476 int i, rc; 7477 7478 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 7479 if (rc) 7480 return (rc); 7481 7482 nfilters = sc->tids.nftids; 7483 nports = sc->params.nports; 7484 7485 if (nfilters == 0) { 7486 rc = ENOTSUP; 7487 goto done; 7488 } 7489 7490 if (!(sc->flags & FULL_INIT_DONE)) { 7491 rc = EAGAIN; 7492 goto done; 7493 } 7494 7495 if (t->idx >= nfilters) { 7496 rc = EINVAL; 7497 goto done; 7498 } 7499 7500 /* Validate against the global filter mode and ingress config */ 7501 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 7502 if (rc != 0) 7503 goto done; 7504 7505 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 7506 rc = EINVAL; 7507 goto done; 7508 } 7509 7510 if (t->fs.val.iport >= nports) { 7511 rc = EINVAL; 7512 goto done; 7513 } 7514 7515 /* Can't specify an iq if not steering to it */ 7516 if (!t->fs.dirsteer && t->fs.iq) { 7517 rc = EINVAL; 7518 goto done; 7519 } 7520 7521 /* IPv6 filter idx must be 4 aligned */ 7522 if (t->fs.type == 1 && 7523 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 7524 rc = EINVAL; 7525 goto done; 7526 } 7527 7528 if (sc->tids.ftid_tab == NULL) { 7529 KASSERT(sc->tids.ftids_in_use == 0, 7530 ("%s: no memory allocated but filters_in_use > 0", 7531 __func__)); 7532 7533 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 7534 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 7535 if (sc->tids.ftid_tab == NULL) { 7536 rc = ENOMEM; 7537 goto done; 7538 } 7539 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 7540 } 7541 7542 for (i = 0; i < 4; i++) { 7543 f = &sc->tids.ftid_tab[t->idx + i]; 7544 7545 if (f->pending || f->valid) { 7546 rc = EBUSY; 7547 goto done; 7548 } 7549 if (f->locked) { 7550 rc = EPERM; 7551 goto done; 7552 } 7553 7554 if (t->fs.type == 0) 7555 break; 7556 } 7557 7558 f = &sc->tids.ftid_tab[t->idx]; 7559 f->fs = t->fs; 7560 7561 rc = set_filter_wr(sc, t->idx); 7562 done: 7563 end_synchronized_op(sc, 0); 7564 7565 if (rc == 0) { 7566 mtx_lock(&sc->tids.ftid_lock); 7567 for (;;) { 7568 if (f->pending == 0) { 7569 rc = f->valid ? 0 : EIO; 7570 break; 7571 } 7572 7573 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7574 PCATCH, "t4setfw", 0)) { 7575 rc = EINPROGRESS; 7576 break; 7577 } 7578 } 7579 mtx_unlock(&sc->tids.ftid_lock); 7580 } 7581 return (rc); 7582 } 7583 7584 static int 7585 del_filter(struct adapter *sc, struct t4_filter *t) 7586 { 7587 unsigned int nfilters; 7588 struct filter_entry *f; 7589 int rc; 7590 7591 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 7592 if (rc) 7593 return (rc); 7594 7595 nfilters = sc->tids.nftids; 7596 7597 if (nfilters == 0) { 7598 rc = ENOTSUP; 7599 goto done; 7600 } 7601 7602 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 7603 t->idx >= nfilters) { 7604 rc = EINVAL; 7605 goto done; 7606 } 7607 7608 if (!(sc->flags & FULL_INIT_DONE)) { 7609 rc = EAGAIN; 7610 goto done; 7611 } 7612 7613 f = &sc->tids.ftid_tab[t->idx]; 7614 7615 if (f->pending) { 7616 rc = EBUSY; 7617 goto done; 7618 } 7619 if (f->locked) { 7620 rc = EPERM; 7621 goto done; 7622 } 7623 7624 if (f->valid) { 7625 t->fs = f->fs; /* extra info for the caller */ 7626 rc = del_filter_wr(sc, t->idx); 7627 } 7628 7629 done: 7630 end_synchronized_op(sc, 0); 7631 7632 if (rc == 0) { 7633 mtx_lock(&sc->tids.ftid_lock); 7634 for (;;) { 7635 if (f->pending == 0) { 7636 rc = f->valid ? EIO : 0; 7637 break; 7638 } 7639 7640 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7641 PCATCH, "t4delfw", 0)) { 7642 rc = EINPROGRESS; 7643 break; 7644 } 7645 } 7646 mtx_unlock(&sc->tids.ftid_lock); 7647 } 7648 7649 return (rc); 7650 } 7651 7652 static void 7653 clear_filter(struct filter_entry *f) 7654 { 7655 if (f->l2t) 7656 t4_l2t_release(f->l2t); 7657 7658 bzero(f, sizeof (*f)); 7659 } 7660 7661 static int 7662 set_filter_wr(struct adapter *sc, int fidx) 7663 { 7664 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7665 struct fw_filter_wr *fwr; 7666 unsigned int ftid, vnic_vld, vnic_vld_mask; 7667 struct wrq_cookie cookie; 7668 7669 ASSERT_SYNCHRONIZED_OP(sc); 7670 7671 if (f->fs.newdmac || f->fs.newvlan) { 7672 /* This filter needs an L2T entry; allocate one. */ 7673 f->l2t = t4_l2t_alloc_switching(sc->l2t); 7674 if (f->l2t == NULL) 7675 return (EAGAIN); 7676 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 7677 f->fs.dmac)) { 7678 t4_l2t_release(f->l2t); 7679 f->l2t = NULL; 7680 return (ENOMEM); 7681 } 7682 } 7683 7684 /* Already validated against fconf, iconf */ 7685 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 7686 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 7687 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 7688 vnic_vld = 1; 7689 else 7690 vnic_vld = 0; 7691 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 7692 vnic_vld_mask = 1; 7693 else 7694 vnic_vld_mask = 0; 7695 7696 ftid = sc->tids.ftid_base + fidx; 7697 7698 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7699 if (fwr == NULL) 7700 return (ENOMEM); 7701 bzero(fwr, sizeof(*fwr)); 7702 7703 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 7704 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 7705 fwr->tid_to_iq = 7706 htobe32(V_FW_FILTER_WR_TID(ftid) | 7707 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 7708 V_FW_FILTER_WR_NOREPLY(0) | 7709 V_FW_FILTER_WR_IQ(f->fs.iq)); 7710 fwr->del_filter_to_l2tix = 7711 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 7712 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 7713 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 7714 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 7715 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 7716 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 7717 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 7718 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 7719 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 7720 f->fs.newvlan == VLAN_REWRITE) | 7721 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 7722 f->fs.newvlan == VLAN_REWRITE) | 7723 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 7724 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 7725 V_FW_FILTER_WR_PRIO(f->fs.prio) | 7726 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 7727 fwr->ethtype = htobe16(f->fs.val.ethtype); 7728 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 7729 fwr->frag_to_ovlan_vldm = 7730 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 7731 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 7732 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 7733 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 7734 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 7735 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 7736 fwr->smac_sel = 0; 7737 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 7738 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 7739 fwr->maci_to_matchtypem = 7740 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 7741 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 7742 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 7743 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 7744 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 7745 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 7746 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 7747 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 7748 fwr->ptcl = f->fs.val.proto; 7749 fwr->ptclm = f->fs.mask.proto; 7750 fwr->ttyp = f->fs.val.tos; 7751 fwr->ttypm = f->fs.mask.tos; 7752 fwr->ivlan = htobe16(f->fs.val.vlan); 7753 fwr->ivlanm = htobe16(f->fs.mask.vlan); 7754 fwr->ovlan = htobe16(f->fs.val.vnic); 7755 fwr->ovlanm = htobe16(f->fs.mask.vnic); 7756 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 7757 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 7758 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 7759 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 7760 fwr->lp = htobe16(f->fs.val.dport); 7761 fwr->lpm = htobe16(f->fs.mask.dport); 7762 fwr->fp = htobe16(f->fs.val.sport); 7763 fwr->fpm = htobe16(f->fs.mask.sport); 7764 if (f->fs.newsmac) 7765 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 7766 7767 f->pending = 1; 7768 sc->tids.ftids_in_use++; 7769 7770 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7771 return (0); 7772 } 7773 7774 static int 7775 del_filter_wr(struct adapter *sc, int fidx) 7776 { 7777 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7778 struct fw_filter_wr *fwr; 7779 unsigned int ftid; 7780 struct wrq_cookie cookie; 7781 7782 ftid = sc->tids.ftid_base + fidx; 7783 7784 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 7785 if (fwr == NULL) 7786 return (ENOMEM); 7787 bzero(fwr, sizeof (*fwr)); 7788 7789 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 7790 7791 f->pending = 1; 7792 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 7793 return (0); 7794 } 7795 7796 int 7797 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 7798 { 7799 struct adapter *sc = iq->adapter; 7800 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 7801 unsigned int idx = GET_TID(rpl); 7802 unsigned int rc; 7803 struct filter_entry *f; 7804 7805 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 7806 rss->opcode)); 7807 7808 if (is_ftid(sc, idx)) { 7809 7810 idx -= sc->tids.ftid_base; 7811 f = &sc->tids.ftid_tab[idx]; 7812 rc = G_COOKIE(rpl->cookie); 7813 7814 mtx_lock(&sc->tids.ftid_lock); 7815 if (rc == FW_FILTER_WR_FLT_ADDED) { 7816 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 7817 __func__, idx)); 7818 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 7819 f->pending = 0; /* asynchronous setup completed */ 7820 f->valid = 1; 7821 } else { 7822 if (rc != FW_FILTER_WR_FLT_DELETED) { 7823 /* Add or delete failed, display an error */ 7824 log(LOG_ERR, 7825 "filter %u setup failed with error %u\n", 7826 idx, rc); 7827 } 7828 7829 clear_filter(f); 7830 sc->tids.ftids_in_use--; 7831 } 7832 wakeup(&sc->tids.ftid_tab); 7833 mtx_unlock(&sc->tids.ftid_lock); 7834 } 7835 7836 return (0); 7837 } 7838 7839 static int 7840 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 7841 { 7842 int rc; 7843 7844 if (cntxt->cid > M_CTXTQID) 7845 return (EINVAL); 7846 7847 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 7848 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 7849 return (EINVAL); 7850 7851 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 7852 if (rc) 7853 return (rc); 7854 7855 if (sc->flags & FW_OK) { 7856 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 7857 &cntxt->data[0]); 7858 if (rc == 0) 7859 goto done; 7860 } 7861 7862 /* 7863 * Read via firmware failed or wasn't even attempted. Read directly via 7864 * the backdoor. 7865 */ 7866 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 7867 done: 7868 end_synchronized_op(sc, 0); 7869 return (rc); 7870 } 7871 7872 static int 7873 load_fw(struct adapter *sc, struct t4_data *fw) 7874 { 7875 int rc; 7876 uint8_t *fw_data; 7877 7878 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 7879 if (rc) 7880 return (rc); 7881 7882 if (sc->flags & FULL_INIT_DONE) { 7883 rc = EBUSY; 7884 goto done; 7885 } 7886 7887 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 7888 if (fw_data == NULL) { 7889 rc = ENOMEM; 7890 goto done; 7891 } 7892 7893 rc = copyin(fw->data, fw_data, fw->len); 7894 if (rc == 0) 7895 rc = -t4_load_fw(sc, fw_data, fw->len); 7896 7897 free(fw_data, M_CXGBE); 7898 done: 7899 end_synchronized_op(sc, 0); 7900 return (rc); 7901 } 7902 7903 static int 7904 read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 7905 { 7906 uint32_t addr, off, remaining, i, n; 7907 uint32_t *buf, *b; 7908 uint32_t mw_base, mw_aperture; 7909 int rc; 7910 uint8_t *dst; 7911 7912 rc = validate_mem_range(sc, mr->addr, mr->len); 7913 if (rc != 0) 7914 return (rc); 7915 7916 memwin_info(sc, win, &mw_base, &mw_aperture); 7917 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK); 7918 addr = mr->addr; 7919 remaining = mr->len; 7920 dst = (void *)mr->data; 7921 7922 while (remaining) { 7923 off = position_memwin(sc, win, addr); 7924 7925 /* number of bytes that we'll copy in the inner loop */ 7926 n = min(remaining, mw_aperture - off); 7927 for (i = 0; i < n; i += 4) 7928 *b++ = t4_read_reg(sc, mw_base + off + i); 7929 7930 rc = copyout(buf, dst, n); 7931 if (rc != 0) 7932 break; 7933 7934 b = buf; 7935 dst += n; 7936 remaining -= n; 7937 addr += n; 7938 } 7939 7940 free(buf, M_CXGBE); 7941 return (rc); 7942 } 7943 7944 static int 7945 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 7946 { 7947 int rc; 7948 7949 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 7950 return (EINVAL); 7951 7952 if (i2cd->len > sizeof(i2cd->data)) 7953 return (EFBIG); 7954 7955 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 7956 if (rc) 7957 return (rc); 7958 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 7959 i2cd->offset, i2cd->len, &i2cd->data[0]); 7960 end_synchronized_op(sc, 0); 7961 7962 return (rc); 7963 } 7964 7965 static int 7966 in_range(int val, int lo, int hi) 7967 { 7968 7969 return (val < 0 || (val <= hi && val >= lo)); 7970 } 7971 7972 static int 7973 set_sched_class(struct adapter *sc, struct t4_sched_params *p) 7974 { 7975 int fw_subcmd, fw_type, rc; 7976 7977 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc"); 7978 if (rc) 7979 return (rc); 7980 7981 if (!(sc->flags & FULL_INIT_DONE)) { 7982 rc = EAGAIN; 7983 goto done; 7984 } 7985 7986 /* 7987 * Translate the cxgbetool parameters into T4 firmware parameters. (The 7988 * sub-command and type are in common locations.) 7989 */ 7990 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 7991 fw_subcmd = FW_SCHED_SC_CONFIG; 7992 else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 7993 fw_subcmd = FW_SCHED_SC_PARAMS; 7994 else { 7995 rc = EINVAL; 7996 goto done; 7997 } 7998 if (p->type == SCHED_CLASS_TYPE_PACKET) 7999 fw_type = FW_SCHED_TYPE_PKTSCHED; 8000 else { 8001 rc = EINVAL; 8002 goto done; 8003 } 8004 8005 if (fw_subcmd == FW_SCHED_SC_CONFIG) { 8006 /* Vet our parameters ..*/ 8007 if (p->u.config.minmax < 0) { 8008 rc = EINVAL; 8009 goto done; 8010 } 8011 8012 /* And pass the request to the firmware ...*/ 8013 rc = -t4_sched_config(sc, fw_type, p->u.config.minmax, 1); 8014 goto done; 8015 } 8016 8017 if (fw_subcmd == FW_SCHED_SC_PARAMS) { 8018 int fw_level; 8019 int fw_mode; 8020 int fw_rateunit; 8021 int fw_ratemode; 8022 8023 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL) 8024 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 8025 else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) 8026 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 8027 else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) 8028 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 8029 else { 8030 rc = EINVAL; 8031 goto done; 8032 } 8033 8034 if (p->u.params.mode == SCHED_CLASS_MODE_CLASS) 8035 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 8036 else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW) 8037 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 8038 else { 8039 rc = EINVAL; 8040 goto done; 8041 } 8042 8043 if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS) 8044 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 8045 else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS) 8046 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 8047 else { 8048 rc = EINVAL; 8049 goto done; 8050 } 8051 8052 if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL) 8053 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 8054 else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS) 8055 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 8056 else { 8057 rc = EINVAL; 8058 goto done; 8059 } 8060 8061 /* Vet our parameters ... */ 8062 if (!in_range(p->u.params.channel, 0, 3) || 8063 !in_range(p->u.params.cl, 0, sc->chip_params->nsched_cls) || 8064 !in_range(p->u.params.minrate, 0, 10000000) || 8065 !in_range(p->u.params.maxrate, 0, 10000000) || 8066 !in_range(p->u.params.weight, 0, 100)) { 8067 rc = ERANGE; 8068 goto done; 8069 } 8070 8071 /* 8072 * Translate any unset parameters into the firmware's 8073 * nomenclature and/or fail the call if the parameters 8074 * are required ... 8075 */ 8076 if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 || 8077 p->u.params.channel < 0 || p->u.params.cl < 0) { 8078 rc = EINVAL; 8079 goto done; 8080 } 8081 if (p->u.params.minrate < 0) 8082 p->u.params.minrate = 0; 8083 if (p->u.params.maxrate < 0) { 8084 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 8085 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 8086 rc = EINVAL; 8087 goto done; 8088 } else 8089 p->u.params.maxrate = 0; 8090 } 8091 if (p->u.params.weight < 0) { 8092 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) { 8093 rc = EINVAL; 8094 goto done; 8095 } else 8096 p->u.params.weight = 0; 8097 } 8098 if (p->u.params.pktsize < 0) { 8099 if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL || 8100 p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) { 8101 rc = EINVAL; 8102 goto done; 8103 } else 8104 p->u.params.pktsize = 0; 8105 } 8106 8107 /* See what the firmware thinks of the request ... */ 8108 rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode, 8109 fw_rateunit, fw_ratemode, p->u.params.channel, 8110 p->u.params.cl, p->u.params.minrate, p->u.params.maxrate, 8111 p->u.params.weight, p->u.params.pktsize, 1); 8112 goto done; 8113 } 8114 8115 rc = EINVAL; 8116 done: 8117 end_synchronized_op(sc, 0); 8118 return (rc); 8119 } 8120 8121 static int 8122 set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 8123 { 8124 struct port_info *pi = NULL; 8125 struct vi_info *vi; 8126 struct sge_txq *txq; 8127 uint32_t fw_mnem, fw_queue, fw_class; 8128 int i, rc; 8129 8130 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 8131 if (rc) 8132 return (rc); 8133 8134 if (!(sc->flags & FULL_INIT_DONE)) { 8135 rc = EAGAIN; 8136 goto done; 8137 } 8138 8139 if (p->port >= sc->params.nports) { 8140 rc = EINVAL; 8141 goto done; 8142 } 8143 8144 /* XXX: Only supported for the main VI. */ 8145 pi = sc->port[p->port]; 8146 vi = &pi->vi[0]; 8147 if (!in_range(p->queue, 0, vi->ntxq - 1) || !in_range(p->cl, 0, 7)) { 8148 rc = EINVAL; 8149 goto done; 8150 } 8151 8152 /* 8153 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 8154 * Scheduling Class in this case). 8155 */ 8156 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 8157 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 8158 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 8159 8160 /* 8161 * If op.queue is non-negative, then we're only changing the scheduling 8162 * on a single specified TX queue. 8163 */ 8164 if (p->queue >= 0) { 8165 txq = &sc->sge.txq[vi->first_txq + p->queue]; 8166 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8167 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8168 &fw_class); 8169 goto done; 8170 } 8171 8172 /* 8173 * Change the scheduling on all the TX queues for the 8174 * interface. 8175 */ 8176 for_each_txq(vi, i, txq) { 8177 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8178 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8179 &fw_class); 8180 if (rc) 8181 goto done; 8182 } 8183 8184 rc = 0; 8185 done: 8186 end_synchronized_op(sc, 0); 8187 return (rc); 8188 } 8189 8190 int 8191 t4_os_find_pci_capability(struct adapter *sc, int cap) 8192 { 8193 int i; 8194 8195 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 8196 } 8197 8198 int 8199 t4_os_pci_save_state(struct adapter *sc) 8200 { 8201 device_t dev; 8202 struct pci_devinfo *dinfo; 8203 8204 dev = sc->dev; 8205 dinfo = device_get_ivars(dev); 8206 8207 pci_cfg_save(dev, dinfo, 0); 8208 return (0); 8209 } 8210 8211 int 8212 t4_os_pci_restore_state(struct adapter *sc) 8213 { 8214 device_t dev; 8215 struct pci_devinfo *dinfo; 8216 8217 dev = sc->dev; 8218 dinfo = device_get_ivars(dev); 8219 8220 pci_cfg_restore(dev, dinfo); 8221 return (0); 8222 } 8223 8224 void 8225 t4_os_portmod_changed(const struct adapter *sc, int idx) 8226 { 8227 struct port_info *pi = sc->port[idx]; 8228 struct vi_info *vi; 8229 struct ifnet *ifp; 8230 int v; 8231 static const char *mod_str[] = { 8232 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 8233 }; 8234 8235 for_each_vi(pi, v, vi) { 8236 build_medialist(pi, &vi->media); 8237 } 8238 8239 ifp = pi->vi[0].ifp; 8240 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 8241 if_printf(ifp, "transceiver unplugged.\n"); 8242 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 8243 if_printf(ifp, "unknown transceiver inserted.\n"); 8244 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 8245 if_printf(ifp, "unsupported transceiver inserted.\n"); 8246 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 8247 if_printf(ifp, "%s transceiver inserted.\n", 8248 mod_str[pi->mod_type]); 8249 } else { 8250 if_printf(ifp, "transceiver (type %d) inserted.\n", 8251 pi->mod_type); 8252 } 8253 } 8254 8255 void 8256 t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason) 8257 { 8258 struct port_info *pi = sc->port[idx]; 8259 struct vi_info *vi; 8260 struct ifnet *ifp; 8261 int v; 8262 8263 if (link_stat) 8264 pi->linkdnrc = -1; 8265 else { 8266 if (reason >= 0) 8267 pi->linkdnrc = reason; 8268 } 8269 for_each_vi(pi, v, vi) { 8270 ifp = vi->ifp; 8271 if (ifp == NULL) 8272 continue; 8273 8274 if (link_stat) { 8275 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 8276 if_link_state_change(ifp, LINK_STATE_UP); 8277 } else { 8278 if_link_state_change(ifp, LINK_STATE_DOWN); 8279 } 8280 } 8281 } 8282 8283 void 8284 t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8285 { 8286 struct adapter *sc; 8287 8288 sx_slock(&t4_list_lock); 8289 SLIST_FOREACH(sc, &t4_list, link) { 8290 /* 8291 * func should not make any assumptions about what state sc is 8292 * in - the only guarantee is that sc->sc_lock is a valid lock. 8293 */ 8294 func(sc, arg); 8295 } 8296 sx_sunlock(&t4_list_lock); 8297 } 8298 8299 static int 8300 t4_open(struct cdev *dev, int flags, int type, struct thread *td) 8301 { 8302 return (0); 8303 } 8304 8305 static int 8306 t4_close(struct cdev *dev, int flags, int type, struct thread *td) 8307 { 8308 return (0); 8309 } 8310 8311 static int 8312 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8313 struct thread *td) 8314 { 8315 int rc; 8316 struct adapter *sc = dev->si_drv1; 8317 8318 rc = priv_check(td, PRIV_DRIVER); 8319 if (rc != 0) 8320 return (rc); 8321 8322 switch (cmd) { 8323 case CHELSIO_T4_GETREG: { 8324 struct t4_reg *edata = (struct t4_reg *)data; 8325 8326 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8327 return (EFAULT); 8328 8329 if (edata->size == 4) 8330 edata->val = t4_read_reg(sc, edata->addr); 8331 else if (edata->size == 8) 8332 edata->val = t4_read_reg64(sc, edata->addr); 8333 else 8334 return (EINVAL); 8335 8336 break; 8337 } 8338 case CHELSIO_T4_SETREG: { 8339 struct t4_reg *edata = (struct t4_reg *)data; 8340 8341 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8342 return (EFAULT); 8343 8344 if (edata->size == 4) { 8345 if (edata->val & 0xffffffff00000000) 8346 return (EINVAL); 8347 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 8348 } else if (edata->size == 8) 8349 t4_write_reg64(sc, edata->addr, edata->val); 8350 else 8351 return (EINVAL); 8352 break; 8353 } 8354 case CHELSIO_T4_REGDUMP: { 8355 struct t4_regdump *regs = (struct t4_regdump *)data; 8356 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE; 8357 uint8_t *buf; 8358 8359 if (regs->len < reglen) { 8360 regs->len = reglen; /* hint to the caller */ 8361 return (ENOBUFS); 8362 } 8363 8364 regs->len = reglen; 8365 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 8366 get_regs(sc, regs, buf); 8367 rc = copyout(buf, regs->data, reglen); 8368 free(buf, M_CXGBE); 8369 break; 8370 } 8371 case CHELSIO_T4_GET_FILTER_MODE: 8372 rc = get_filter_mode(sc, (uint32_t *)data); 8373 break; 8374 case CHELSIO_T4_SET_FILTER_MODE: 8375 rc = set_filter_mode(sc, *(uint32_t *)data); 8376 break; 8377 case CHELSIO_T4_GET_FILTER: 8378 rc = get_filter(sc, (struct t4_filter *)data); 8379 break; 8380 case CHELSIO_T4_SET_FILTER: 8381 rc = set_filter(sc, (struct t4_filter *)data); 8382 break; 8383 case CHELSIO_T4_DEL_FILTER: 8384 rc = del_filter(sc, (struct t4_filter *)data); 8385 break; 8386 case CHELSIO_T4_GET_SGE_CONTEXT: 8387 rc = get_sge_context(sc, (struct t4_sge_context *)data); 8388 break; 8389 case CHELSIO_T4_LOAD_FW: 8390 rc = load_fw(sc, (struct t4_data *)data); 8391 break; 8392 case CHELSIO_T4_GET_MEM: 8393 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 8394 break; 8395 case CHELSIO_T4_GET_I2C: 8396 rc = read_i2c(sc, (struct t4_i2c_data *)data); 8397 break; 8398 case CHELSIO_T4_CLEAR_STATS: { 8399 int i, v; 8400 u_int port_id = *(uint32_t *)data; 8401 struct port_info *pi; 8402 struct vi_info *vi; 8403 8404 if (port_id >= sc->params.nports) 8405 return (EINVAL); 8406 pi = sc->port[port_id]; 8407 8408 /* MAC stats */ 8409 t4_clr_port_stats(sc, pi->tx_chan); 8410 pi->tx_parse_error = 0; 8411 mtx_lock(&sc->reg_lock); 8412 for_each_vi(pi, v, vi) { 8413 if (vi->flags & VI_INIT_DONE) 8414 t4_clr_vi_stats(sc, vi->viid); 8415 } 8416 mtx_unlock(&sc->reg_lock); 8417 8418 /* 8419 * Since this command accepts a port, clear stats for 8420 * all VIs on this port. 8421 */ 8422 for_each_vi(pi, v, vi) { 8423 if (vi->flags & VI_INIT_DONE) { 8424 struct sge_rxq *rxq; 8425 struct sge_txq *txq; 8426 struct sge_wrq *wrq; 8427 8428 if (vi->flags & VI_NETMAP) 8429 continue; 8430 8431 for_each_rxq(vi, i, rxq) { 8432 #if defined(INET) || defined(INET6) 8433 rxq->lro.lro_queued = 0; 8434 rxq->lro.lro_flushed = 0; 8435 #endif 8436 rxq->rxcsum = 0; 8437 rxq->vlan_extraction = 0; 8438 } 8439 8440 for_each_txq(vi, i, txq) { 8441 txq->txcsum = 0; 8442 txq->tso_wrs = 0; 8443 txq->vlan_insertion = 0; 8444 txq->imm_wrs = 0; 8445 txq->sgl_wrs = 0; 8446 txq->txpkt_wrs = 0; 8447 txq->txpkts0_wrs = 0; 8448 txq->txpkts1_wrs = 0; 8449 txq->txpkts0_pkts = 0; 8450 txq->txpkts1_pkts = 0; 8451 mp_ring_reset_stats(txq->r); 8452 } 8453 8454 #ifdef TCP_OFFLOAD 8455 /* nothing to clear for each ofld_rxq */ 8456 8457 for_each_ofld_txq(vi, i, wrq) { 8458 wrq->tx_wrs_direct = 0; 8459 wrq->tx_wrs_copied = 0; 8460 } 8461 #endif 8462 8463 if (IS_MAIN_VI(vi)) { 8464 wrq = &sc->sge.ctrlq[pi->port_id]; 8465 wrq->tx_wrs_direct = 0; 8466 wrq->tx_wrs_copied = 0; 8467 } 8468 } 8469 } 8470 break; 8471 } 8472 case CHELSIO_T4_SCHED_CLASS: 8473 rc = set_sched_class(sc, (struct t4_sched_params *)data); 8474 break; 8475 case CHELSIO_T4_SCHED_QUEUE: 8476 rc = set_sched_queue(sc, (struct t4_sched_queue *)data); 8477 break; 8478 case CHELSIO_T4_GET_TRACER: 8479 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 8480 break; 8481 case CHELSIO_T4_SET_TRACER: 8482 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 8483 break; 8484 default: 8485 rc = EINVAL; 8486 } 8487 8488 return (rc); 8489 } 8490 8491 void 8492 t4_db_full(struct adapter *sc) 8493 { 8494 8495 CXGBE_UNIMPLEMENTED(__func__); 8496 } 8497 8498 void 8499 t4_db_dropped(struct adapter *sc) 8500 { 8501 8502 CXGBE_UNIMPLEMENTED(__func__); 8503 } 8504 8505 #ifdef TCP_OFFLOAD 8506 void 8507 t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) 8508 { 8509 8510 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 8511 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 8512 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 8513 V_HPZ3(pgsz_order[3])); 8514 } 8515 8516 static int 8517 toe_capability(struct vi_info *vi, int enable) 8518 { 8519 int rc; 8520 struct port_info *pi = vi->pi; 8521 struct adapter *sc = pi->adapter; 8522 8523 ASSERT_SYNCHRONIZED_OP(sc); 8524 8525 if (!is_offload(sc)) 8526 return (ENODEV); 8527 8528 if (enable) { 8529 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 8530 /* TOE is already enabled. */ 8531 return (0); 8532 } 8533 8534 /* 8535 * We need the port's queues around so that we're able to send 8536 * and receive CPLs to/from the TOE even if the ifnet for this 8537 * port has never been UP'd administratively. 8538 */ 8539 if (!(vi->flags & VI_INIT_DONE)) { 8540 rc = cxgbe_init_synchronized(vi); 8541 if (rc) 8542 return (rc); 8543 } 8544 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 8545 rc = cxgbe_init_synchronized(&pi->vi[0]); 8546 if (rc) 8547 return (rc); 8548 } 8549 8550 if (isset(&sc->offload_map, pi->port_id)) { 8551 /* TOE is enabled on another VI of this port. */ 8552 pi->uld_vis++; 8553 return (0); 8554 } 8555 8556 if (!uld_active(sc, ULD_TOM)) { 8557 rc = t4_activate_uld(sc, ULD_TOM); 8558 if (rc == EAGAIN) { 8559 log(LOG_WARNING, 8560 "You must kldload t4_tom.ko before trying " 8561 "to enable TOE on a cxgbe interface.\n"); 8562 } 8563 if (rc != 0) 8564 return (rc); 8565 KASSERT(sc->tom_softc != NULL, 8566 ("%s: TOM activated but softc NULL", __func__)); 8567 KASSERT(uld_active(sc, ULD_TOM), 8568 ("%s: TOM activated but flag not set", __func__)); 8569 } 8570 8571 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 8572 if (!uld_active(sc, ULD_IWARP)) 8573 (void) t4_activate_uld(sc, ULD_IWARP); 8574 if (!uld_active(sc, ULD_ISCSI)) 8575 (void) t4_activate_uld(sc, ULD_ISCSI); 8576 8577 pi->uld_vis++; 8578 setbit(&sc->offload_map, pi->port_id); 8579 } else { 8580 pi->uld_vis--; 8581 8582 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 8583 return (0); 8584 8585 KASSERT(uld_active(sc, ULD_TOM), 8586 ("%s: TOM never initialized?", __func__)); 8587 clrbit(&sc->offload_map, pi->port_id); 8588 } 8589 8590 return (0); 8591 } 8592 8593 /* 8594 * Add an upper layer driver to the global list. 8595 */ 8596 int 8597 t4_register_uld(struct uld_info *ui) 8598 { 8599 int rc = 0; 8600 struct uld_info *u; 8601 8602 sx_xlock(&t4_uld_list_lock); 8603 SLIST_FOREACH(u, &t4_uld_list, link) { 8604 if (u->uld_id == ui->uld_id) { 8605 rc = EEXIST; 8606 goto done; 8607 } 8608 } 8609 8610 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 8611 ui->refcount = 0; 8612 done: 8613 sx_xunlock(&t4_uld_list_lock); 8614 return (rc); 8615 } 8616 8617 int 8618 t4_unregister_uld(struct uld_info *ui) 8619 { 8620 int rc = EINVAL; 8621 struct uld_info *u; 8622 8623 sx_xlock(&t4_uld_list_lock); 8624 8625 SLIST_FOREACH(u, &t4_uld_list, link) { 8626 if (u == ui) { 8627 if (ui->refcount > 0) { 8628 rc = EBUSY; 8629 goto done; 8630 } 8631 8632 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 8633 rc = 0; 8634 goto done; 8635 } 8636 } 8637 done: 8638 sx_xunlock(&t4_uld_list_lock); 8639 return (rc); 8640 } 8641 8642 int 8643 t4_activate_uld(struct adapter *sc, int id) 8644 { 8645 int rc; 8646 struct uld_info *ui; 8647 8648 ASSERT_SYNCHRONIZED_OP(sc); 8649 8650 if (id < 0 || id > ULD_MAX) 8651 return (EINVAL); 8652 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 8653 8654 sx_slock(&t4_uld_list_lock); 8655 8656 SLIST_FOREACH(ui, &t4_uld_list, link) { 8657 if (ui->uld_id == id) { 8658 if (!(sc->flags & FULL_INIT_DONE)) { 8659 rc = adapter_full_init(sc); 8660 if (rc != 0) 8661 break; 8662 } 8663 8664 rc = ui->activate(sc); 8665 if (rc == 0) { 8666 setbit(&sc->active_ulds, id); 8667 ui->refcount++; 8668 } 8669 break; 8670 } 8671 } 8672 8673 sx_sunlock(&t4_uld_list_lock); 8674 8675 return (rc); 8676 } 8677 8678 int 8679 t4_deactivate_uld(struct adapter *sc, int id) 8680 { 8681 int rc; 8682 struct uld_info *ui; 8683 8684 ASSERT_SYNCHRONIZED_OP(sc); 8685 8686 if (id < 0 || id > ULD_MAX) 8687 return (EINVAL); 8688 rc = ENXIO; 8689 8690 sx_slock(&t4_uld_list_lock); 8691 8692 SLIST_FOREACH(ui, &t4_uld_list, link) { 8693 if (ui->uld_id == id) { 8694 rc = ui->deactivate(sc); 8695 if (rc == 0) { 8696 clrbit(&sc->active_ulds, id); 8697 ui->refcount--; 8698 } 8699 break; 8700 } 8701 } 8702 8703 sx_sunlock(&t4_uld_list_lock); 8704 8705 return (rc); 8706 } 8707 8708 int 8709 uld_active(struct adapter *sc, int uld_id) 8710 { 8711 8712 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 8713 8714 return (isset(&sc->active_ulds, uld_id)); 8715 } 8716 #endif 8717 8718 /* 8719 * Come up with reasonable defaults for some of the tunables, provided they're 8720 * not set by the user (in which case we'll use the values as is). 8721 */ 8722 static void 8723 tweak_tunables(void) 8724 { 8725 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 8726 8727 if (t4_ntxq10g < 1) { 8728 #ifdef RSS 8729 t4_ntxq10g = rss_getnumbuckets(); 8730 #else 8731 t4_ntxq10g = min(nc, NTXQ_10G); 8732 #endif 8733 } 8734 8735 if (t4_ntxq1g < 1) { 8736 #ifdef RSS 8737 /* XXX: way too many for 1GbE? */ 8738 t4_ntxq1g = rss_getnumbuckets(); 8739 #else 8740 t4_ntxq1g = min(nc, NTXQ_1G); 8741 #endif 8742 } 8743 8744 if (t4_nrxq10g < 1) { 8745 #ifdef RSS 8746 t4_nrxq10g = rss_getnumbuckets(); 8747 #else 8748 t4_nrxq10g = min(nc, NRXQ_10G); 8749 #endif 8750 } 8751 8752 if (t4_nrxq1g < 1) { 8753 #ifdef RSS 8754 /* XXX: way too many for 1GbE? */ 8755 t4_nrxq1g = rss_getnumbuckets(); 8756 #else 8757 t4_nrxq1g = min(nc, NRXQ_1G); 8758 #endif 8759 } 8760 8761 #ifdef TCP_OFFLOAD 8762 if (t4_nofldtxq10g < 1) 8763 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 8764 8765 if (t4_nofldtxq1g < 1) 8766 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 8767 8768 if (t4_nofldrxq10g < 1) 8769 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 8770 8771 if (t4_nofldrxq1g < 1) 8772 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 8773 8774 if (t4_toecaps_allowed == -1) 8775 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 8776 #else 8777 if (t4_toecaps_allowed == -1) 8778 t4_toecaps_allowed = 0; 8779 #endif 8780 8781 #ifdef DEV_NETMAP 8782 if (t4_nnmtxq10g < 1) 8783 t4_nnmtxq10g = min(nc, NNMTXQ_10G); 8784 8785 if (t4_nnmtxq1g < 1) 8786 t4_nnmtxq1g = min(nc, NNMTXQ_1G); 8787 8788 if (t4_nnmrxq10g < 1) 8789 t4_nnmrxq10g = min(nc, NNMRXQ_10G); 8790 8791 if (t4_nnmrxq1g < 1) 8792 t4_nnmrxq1g = min(nc, NNMRXQ_1G); 8793 #endif 8794 8795 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 8796 t4_tmr_idx_10g = TMR_IDX_10G; 8797 8798 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 8799 t4_pktc_idx_10g = PKTC_IDX_10G; 8800 8801 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 8802 t4_tmr_idx_1g = TMR_IDX_1G; 8803 8804 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 8805 t4_pktc_idx_1g = PKTC_IDX_1G; 8806 8807 if (t4_qsize_txq < 128) 8808 t4_qsize_txq = 128; 8809 8810 if (t4_qsize_rxq < 128) 8811 t4_qsize_rxq = 128; 8812 while (t4_qsize_rxq & 7) 8813 t4_qsize_rxq++; 8814 8815 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 8816 } 8817 8818 static struct sx mlu; /* mod load unload */ 8819 SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 8820 8821 static int 8822 mod_event(module_t mod, int cmd, void *arg) 8823 { 8824 int rc = 0; 8825 static int loaded = 0; 8826 8827 switch (cmd) { 8828 case MOD_LOAD: 8829 sx_xlock(&mlu); 8830 if (loaded++ == 0) { 8831 t4_sge_modload(); 8832 sx_init(&t4_list_lock, "T4/T5 adapters"); 8833 SLIST_INIT(&t4_list); 8834 #ifdef TCP_OFFLOAD 8835 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 8836 SLIST_INIT(&t4_uld_list); 8837 #endif 8838 t4_tracer_modload(); 8839 tweak_tunables(); 8840 } 8841 sx_xunlock(&mlu); 8842 break; 8843 8844 case MOD_UNLOAD: 8845 sx_xlock(&mlu); 8846 if (--loaded == 0) { 8847 int tries; 8848 8849 sx_slock(&t4_list_lock); 8850 if (!SLIST_EMPTY(&t4_list)) { 8851 rc = EBUSY; 8852 sx_sunlock(&t4_list_lock); 8853 goto done_unload; 8854 } 8855 #ifdef TCP_OFFLOAD 8856 sx_slock(&t4_uld_list_lock); 8857 if (!SLIST_EMPTY(&t4_uld_list)) { 8858 rc = EBUSY; 8859 sx_sunlock(&t4_uld_list_lock); 8860 sx_sunlock(&t4_list_lock); 8861 goto done_unload; 8862 } 8863 #endif 8864 tries = 0; 8865 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 8866 uprintf("%ju clusters with custom free routine " 8867 "still is use.\n", t4_sge_extfree_refs()); 8868 pause("t4unload", 2 * hz); 8869 } 8870 #ifdef TCP_OFFLOAD 8871 sx_sunlock(&t4_uld_list_lock); 8872 #endif 8873 sx_sunlock(&t4_list_lock); 8874 8875 if (t4_sge_extfree_refs() == 0) { 8876 t4_tracer_modunload(); 8877 #ifdef TCP_OFFLOAD 8878 sx_destroy(&t4_uld_list_lock); 8879 #endif 8880 sx_destroy(&t4_list_lock); 8881 t4_sge_modunload(); 8882 loaded = 0; 8883 } else { 8884 rc = EBUSY; 8885 loaded++; /* undo earlier decrement */ 8886 } 8887 } 8888 done_unload: 8889 sx_xunlock(&mlu); 8890 break; 8891 } 8892 8893 return (rc); 8894 } 8895 8896 static devclass_t t4_devclass, t5_devclass; 8897 static devclass_t cxgbe_devclass, cxl_devclass; 8898 static devclass_t vcxgbe_devclass, vcxl_devclass; 8899 8900 DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 8901 MODULE_VERSION(t4nex, 1); 8902 MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 8903 #ifdef DEV_NETMAP 8904 MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 8905 #endif /* DEV_NETMAP */ 8906 8907 8908 DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 8909 MODULE_VERSION(t5nex, 1); 8910 MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 8911 #ifdef DEV_NETMAP 8912 MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 8913 #endif /* DEV_NETMAP */ 8914 8915 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 8916 MODULE_VERSION(cxgbe, 1); 8917 8918 DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 8919 MODULE_VERSION(cxl, 1); 8920 8921 DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 8922 MODULE_VERSION(vcxgbe, 1); 8923 8924 DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 8925 MODULE_VERSION(vcxl, 1); 8926