1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/gro.h> 41 #include <net/ip.h> 42 #include <net/tcp.h> 43 #include <net/udp.h> 44 #include <net/checksum.h> 45 #include <net/ip6_checksum.h> 46 #include <net/udp_tunnel.h> 47 #include <linux/workqueue.h> 48 #include <linux/prefetch.h> 49 #include <linux/cache.h> 50 #include <linux/log2.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <net/page_pool/helpers.h> 56 #include <linux/align.h> 57 #include <net/netdev_lock.h> 58 #include <net/netdev_queues.h> 59 #include <net/netdev_rx_queue.h> 60 #include <linux/pci-tph.h> 61 #include <linux/bnxt/hsi.h> 62 63 #include "bnxt.h" 64 #include "bnxt_hwrm.h" 65 #include "bnxt_ulp.h" 66 #include "bnxt_sriov.h" 67 #include "bnxt_ethtool.h" 68 #include "bnxt_dcb.h" 69 #include "bnxt_xdp.h" 70 #include "bnxt_ptp.h" 71 #include "bnxt_vfr.h" 72 #include "bnxt_tc.h" 73 #include "bnxt_devlink.h" 74 #include "bnxt_debugfs.h" 75 #include "bnxt_coredump.h" 76 #include "bnxt_hwmon.h" 77 78 #define BNXT_TX_TIMEOUT (5 * HZ) 79 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ 80 NETIF_MSG_TX_ERR) 81 82 MODULE_IMPORT_NS("NETDEV_INTERNAL"); 83 MODULE_LICENSE("GPL"); 84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver"); 85 86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 88 89 #define BNXT_TX_PUSH_THRESH 164 90 91 /* indexed by enum board_idx */ 92 static const struct { 93 char *name; 94 } board_info[] = { 95 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 96 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 97 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 98 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 99 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 100 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 101 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 102 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 103 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 104 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 105 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 106 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 107 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 108 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 109 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 110 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 111 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 112 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 113 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 114 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 115 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 116 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 117 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 118 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 119 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 120 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 121 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 122 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 123 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 124 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 125 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 126 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 127 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 128 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 129 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" }, 130 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 131 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 132 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 133 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 134 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 135 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 136 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 137 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 138 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 139 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 140 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, 141 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, 142 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 143 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, 144 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" }, 145 }; 146 147 static const struct pci_device_id bnxt_pci_tbl[] = { 148 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 149 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 150 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 151 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 152 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 153 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 154 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 155 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 156 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 157 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 158 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 159 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 160 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 161 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 162 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 163 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 164 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 165 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 166 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 167 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 168 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 169 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 170 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 171 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 172 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 173 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 174 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 175 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 176 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 177 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 179 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 180 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 181 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 182 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 183 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 184 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 185 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 186 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 }, 187 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 }, 188 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 }, 189 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 }, 190 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 192 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, 193 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, 194 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 195 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, 196 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 197 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 198 #ifdef CONFIG_BNXT_SRIOV 199 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 200 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, 201 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, 202 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 203 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, 204 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 205 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, 206 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, 207 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, 208 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, 209 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 210 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 211 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 212 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 213 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 214 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, 215 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 216 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 217 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, 218 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, 219 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF }, 220 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 221 #endif 222 { 0 } 223 }; 224 225 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 226 227 static const u16 bnxt_vf_req_snif[] = { 228 HWRM_FUNC_CFG, 229 HWRM_FUNC_VF_CFG, 230 HWRM_PORT_PHY_QCFG, 231 HWRM_CFA_L2_FILTER_ALLOC, 232 }; 233 234 static const u16 bnxt_async_events_arr[] = { 235 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 236 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 237 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 238 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 239 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 240 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 241 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 242 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 243 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 244 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, 245 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, 246 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 247 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, 248 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, 249 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, 250 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, 251 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER, 252 }; 253 254 const u16 bnxt_bstore_to_trace[] = { 255 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE, 256 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE, 257 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE, 258 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE, 259 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE, 260 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE, 261 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE, 262 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE, 263 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE, 264 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE, 265 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE, 266 }; 267 268 static struct workqueue_struct *bnxt_pf_wq; 269 270 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ 271 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} 272 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} 273 274 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = { 275 .ports = { 276 .src = 0, 277 .dst = 0, 278 }, 279 .addrs = { 280 .v6addrs = { 281 .src = BNXT_IPV6_MASK_NONE, 282 .dst = BNXT_IPV6_MASK_NONE, 283 }, 284 }, 285 }; 286 287 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = { 288 .ports = { 289 .src = cpu_to_be16(0xffff), 290 .dst = cpu_to_be16(0xffff), 291 }, 292 .addrs = { 293 .v6addrs = { 294 .src = BNXT_IPV6_MASK_ALL, 295 .dst = BNXT_IPV6_MASK_ALL, 296 }, 297 }, 298 }; 299 300 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = { 301 .ports = { 302 .src = cpu_to_be16(0xffff), 303 .dst = cpu_to_be16(0xffff), 304 }, 305 .addrs = { 306 .v4addrs = { 307 .src = cpu_to_be32(0xffffffff), 308 .dst = cpu_to_be32(0xffffffff), 309 }, 310 }, 311 }; 312 313 static bool bnxt_vf_pciid(enum board_idx idx) 314 { 315 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 316 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || 317 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || 318 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF); 319 } 320 321 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 322 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 323 324 #define BNXT_DB_CQ(db, idx) \ 325 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 326 327 #define BNXT_DB_NQ_P5(db, idx) \ 328 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ 329 (db)->doorbell) 330 331 #define BNXT_DB_NQ_P7(db, idx) \ 332 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \ 333 DB_RING_IDX(db, idx), (db)->doorbell) 334 335 #define BNXT_DB_CQ_ARM(db, idx) \ 336 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 337 338 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 339 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ 340 DB_RING_IDX(db, idx), (db)->doorbell) 341 342 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 343 { 344 if (bp->flags & BNXT_FLAG_CHIP_P7) 345 BNXT_DB_NQ_P7(db, idx); 346 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 347 BNXT_DB_NQ_P5(db, idx); 348 else 349 BNXT_DB_CQ(db, idx); 350 } 351 352 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 353 { 354 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 355 BNXT_DB_NQ_ARM_P5(db, idx); 356 else 357 BNXT_DB_CQ_ARM(db, idx); 358 } 359 360 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 361 { 362 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 363 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | 364 DB_RING_IDX(db, idx), db->doorbell); 365 else 366 BNXT_DB_CQ(db, idx); 367 } 368 369 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 370 { 371 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 372 return; 373 374 if (BNXT_PF(bp)) 375 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 376 else 377 schedule_delayed_work(&bp->fw_reset_task, delay); 378 } 379 380 static void __bnxt_queue_sp_work(struct bnxt *bp) 381 { 382 if (BNXT_PF(bp)) 383 queue_work(bnxt_pf_wq, &bp->sp_task); 384 else 385 schedule_work(&bp->sp_task); 386 } 387 388 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) 389 { 390 set_bit(event, &bp->sp_event); 391 __bnxt_queue_sp_work(bp); 392 } 393 394 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 395 { 396 if (!rxr->bnapi->in_reset) { 397 rxr->bnapi->in_reset = true; 398 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 399 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 400 else 401 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 402 __bnxt_queue_sp_work(bp); 403 } 404 rxr->rx_next_cons = 0xffff; 405 } 406 407 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 408 u16 curr) 409 { 410 struct bnxt_napi *bnapi = txr->bnapi; 411 412 if (bnapi->tx_fault) 413 return; 414 415 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", 416 txr->txq_index, txr->tx_hw_cons, 417 txr->tx_cons, txr->tx_prod, curr); 418 WARN_ON_ONCE(1); 419 bnapi->tx_fault = 1; 420 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 421 } 422 423 const u16 bnxt_lhint_arr[] = { 424 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 425 TX_BD_FLAGS_LHINT_512_TO_1023, 426 TX_BD_FLAGS_LHINT_1024_TO_2047, 427 TX_BD_FLAGS_LHINT_1024_TO_2047, 428 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 429 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 430 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 431 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 432 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 433 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 434 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 435 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 436 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 437 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 438 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 439 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 440 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 441 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 442 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 443 }; 444 445 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 446 { 447 struct metadata_dst *md_dst = skb_metadata_dst(skb); 448 449 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 450 return 0; 451 452 return md_dst->u.port_info.port_id; 453 } 454 455 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 456 u16 prod) 457 { 458 /* Sync BD data before updating doorbell */ 459 wmb(); 460 bnxt_db_write(bp, &txr->tx_db, prod); 461 txr->kick_pending = 0; 462 } 463 464 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 465 { 466 struct bnxt *bp = netdev_priv(dev); 467 struct tx_bd *txbd, *txbd0; 468 struct tx_bd_ext *txbd1; 469 struct netdev_queue *txq; 470 int i; 471 dma_addr_t mapping; 472 unsigned int length, pad = 0; 473 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 474 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 475 struct pci_dev *pdev = bp->pdev; 476 u16 prod, last_frag, txts_prod; 477 struct bnxt_tx_ring_info *txr; 478 struct bnxt_sw_tx_bd *tx_buf; 479 __le32 lflags = 0; 480 skb_frag_t *frag; 481 482 i = skb_get_queue_mapping(skb); 483 if (unlikely(i >= bp->tx_nr_rings)) { 484 dev_kfree_skb_any(skb); 485 dev_core_stats_tx_dropped_inc(dev); 486 return NETDEV_TX_OK; 487 } 488 489 txq = netdev_get_tx_queue(dev, i); 490 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 491 prod = txr->tx_prod; 492 493 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS) 494 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) { 495 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n", 496 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS); 497 if (skb_linearize(skb)) { 498 dev_kfree_skb_any(skb); 499 dev_core_stats_tx_dropped_inc(dev); 500 return NETDEV_TX_OK; 501 } 502 } 503 #endif 504 free_size = bnxt_tx_avail(bp, txr); 505 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 506 /* We must have raced with NAPI cleanup */ 507 if (net_ratelimit() && txr->kick_pending) 508 netif_warn(bp, tx_err, dev, 509 "bnxt: ring busy w/ flush pending!\n"); 510 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 511 bp->tx_wake_thresh)) 512 return NETDEV_TX_BUSY; 513 } 514 515 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 516 goto tx_free; 517 518 length = skb->len; 519 len = skb_headlen(skb); 520 last_frag = skb_shinfo(skb)->nr_frags; 521 522 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 523 524 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 525 tx_buf->skb = skb; 526 tx_buf->nr_frags = last_frag; 527 528 vlan_tag_flags = 0; 529 cfa_action = bnxt_xmit_get_cfa_action(skb); 530 if (skb_vlan_tag_present(skb)) { 531 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 532 skb_vlan_tag_get(skb); 533 /* Currently supports 8021Q, 8021AD vlan offloads 534 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 535 */ 536 if (skb->vlan_proto == htons(ETH_P_8021Q)) 537 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 538 } 539 540 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp && 541 ptp->tx_tstamp_en) { 542 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) { 543 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 544 tx_buf->is_ts_pkt = 1; 545 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 546 } else if (!skb_is_gso(skb)) { 547 u16 seq_id, hdr_off; 548 549 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) && 550 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) { 551 if (vlan_tag_flags) 552 hdr_off += VLAN_HLEN; 553 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 554 tx_buf->is_ts_pkt = 1; 555 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 556 557 ptp->txts_req[txts_prod].tx_seqid = seq_id; 558 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off; 559 tx_buf->txts_prod = txts_prod; 560 } 561 } 562 } 563 if (unlikely(skb->no_fcs)) 564 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 565 566 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && 567 skb_frags_readable(skb) && !lflags) { 568 struct tx_push_buffer *tx_push_buf = txr->tx_push; 569 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 570 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 571 void __iomem *db = txr->tx_db.doorbell; 572 void *pdata = tx_push_buf->data; 573 u64 *end; 574 int j, push_len; 575 576 /* Set COAL_NOW to be ready quickly for the next push */ 577 tx_push->tx_bd_len_flags_type = 578 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 579 TX_BD_TYPE_LONG_TX_BD | 580 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 581 TX_BD_FLAGS_COAL_NOW | 582 TX_BD_FLAGS_PACKET_END | 583 TX_BD_CNT(2)); 584 585 if (skb->ip_summed == CHECKSUM_PARTIAL) 586 tx_push1->tx_bd_hsize_lflags = 587 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 588 else 589 tx_push1->tx_bd_hsize_lflags = 0; 590 591 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 592 tx_push1->tx_bd_cfa_action = 593 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 594 595 end = pdata + length; 596 end = PTR_ALIGN(end, 8) - 1; 597 *end = 0; 598 599 skb_copy_from_linear_data(skb, pdata, len); 600 pdata += len; 601 for (j = 0; j < last_frag; j++) { 602 void *fptr; 603 604 frag = &skb_shinfo(skb)->frags[j]; 605 fptr = skb_frag_address_safe(frag); 606 if (!fptr) 607 goto normal_tx; 608 609 memcpy(pdata, fptr, skb_frag_size(frag)); 610 pdata += skb_frag_size(frag); 611 } 612 613 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 614 txbd->tx_bd_haddr = txr->data_mapping; 615 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); 616 prod = NEXT_TX(prod); 617 tx_push->tx_bd_opaque = txbd->tx_bd_opaque; 618 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 619 memcpy(txbd, tx_push1, sizeof(*txbd)); 620 prod = NEXT_TX(prod); 621 tx_push->doorbell = 622 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | 623 DB_RING_IDX(&txr->tx_db, prod)); 624 WRITE_ONCE(txr->tx_prod, prod); 625 626 tx_buf->is_push = 1; 627 netdev_tx_sent_queue(txq, skb->len); 628 wmb(); /* Sync is_push and byte queue before pushing data */ 629 630 push_len = (length + sizeof(*tx_push) + 7) / 8; 631 if (push_len > 16) { 632 __iowrite64_copy(db, tx_push_buf, 16); 633 __iowrite32_copy(db + 4, tx_push_buf + 1, 634 (push_len - 16) << 1); 635 } else { 636 __iowrite64_copy(db, tx_push_buf, push_len); 637 } 638 639 goto tx_done; 640 } 641 642 normal_tx: 643 if (length < BNXT_MIN_PKT_SIZE) { 644 pad = BNXT_MIN_PKT_SIZE - length; 645 if (skb_pad(skb, pad)) 646 /* SKB already freed. */ 647 goto tx_kick_pending; 648 length = BNXT_MIN_PKT_SIZE; 649 } 650 651 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 652 653 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 654 goto tx_free; 655 656 dma_unmap_addr_set(tx_buf, mapping, mapping); 657 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 658 TX_BD_CNT(last_frag + 2); 659 660 txbd->tx_bd_haddr = cpu_to_le64(mapping); 661 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); 662 663 prod = NEXT_TX(prod); 664 txbd1 = (struct tx_bd_ext *) 665 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 666 667 txbd1->tx_bd_hsize_lflags = lflags; 668 if (skb_is_gso(skb)) { 669 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); 670 u32 hdr_len; 671 672 if (skb->encapsulation) { 673 if (udp_gso) 674 hdr_len = skb_inner_transport_offset(skb) + 675 sizeof(struct udphdr); 676 else 677 hdr_len = skb_inner_tcp_all_headers(skb); 678 } else if (udp_gso) { 679 hdr_len = skb_transport_offset(skb) + 680 sizeof(struct udphdr); 681 } else { 682 hdr_len = skb_tcp_all_headers(skb); 683 } 684 685 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | 686 TX_BD_FLAGS_T_IPID | 687 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 688 length = skb_shinfo(skb)->gso_size; 689 txbd1->tx_bd_mss = cpu_to_le32(length); 690 length += hdr_len; 691 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 692 txbd1->tx_bd_hsize_lflags |= 693 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 694 txbd1->tx_bd_mss = 0; 695 } 696 697 length >>= 9; 698 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 699 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 700 skb->len); 701 i = 0; 702 goto tx_dma_error; 703 } 704 flags |= bnxt_lhint_arr[length]; 705 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 706 707 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 708 txbd1->tx_bd_cfa_action = 709 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 710 txbd0 = txbd; 711 for (i = 0; i < last_frag; i++) { 712 frag = &skb_shinfo(skb)->frags[i]; 713 prod = NEXT_TX(prod); 714 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 715 716 len = skb_frag_size(frag); 717 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 718 DMA_TO_DEVICE); 719 720 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 721 goto tx_dma_error; 722 723 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 724 netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf, 725 mapping, mapping); 726 727 txbd->tx_bd_haddr = cpu_to_le64(mapping); 728 729 flags = len << TX_BD_LEN_SHIFT; 730 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 731 } 732 733 flags &= ~TX_BD_LEN; 734 txbd->tx_bd_len_flags_type = 735 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 736 TX_BD_FLAGS_PACKET_END); 737 738 netdev_tx_sent_queue(txq, skb->len); 739 740 skb_tx_timestamp(skb); 741 742 prod = NEXT_TX(prod); 743 WRITE_ONCE(txr->tx_prod, prod); 744 745 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 746 bnxt_txr_db_kick(bp, txr, prod); 747 } else { 748 if (free_size >= bp->tx_wake_thresh) 749 txbd0->tx_bd_len_flags_type |= 750 cpu_to_le32(TX_BD_FLAGS_NO_CMPL); 751 txr->kick_pending = 1; 752 } 753 754 tx_done: 755 756 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 757 if (netdev_xmit_more() && !tx_buf->is_push) { 758 txbd0->tx_bd_len_flags_type &= 759 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL); 760 bnxt_txr_db_kick(bp, txr, prod); 761 } 762 763 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 764 bp->tx_wake_thresh); 765 } 766 return NETDEV_TX_OK; 767 768 tx_dma_error: 769 last_frag = i; 770 771 /* start back at beginning and unmap skb */ 772 prod = txr->tx_prod; 773 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 774 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 775 skb_headlen(skb), DMA_TO_DEVICE); 776 prod = NEXT_TX(prod); 777 778 /* unmap remaining mapped pages */ 779 for (i = 0; i < last_frag; i++) { 780 prod = NEXT_TX(prod); 781 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 782 frag = &skb_shinfo(skb)->frags[i]; 783 netmem_dma_unmap_page_attrs(&pdev->dev, 784 dma_unmap_addr(tx_buf, mapping), 785 skb_frag_size(frag), 786 DMA_TO_DEVICE, 0); 787 } 788 789 tx_free: 790 dev_kfree_skb_any(skb); 791 tx_kick_pending: 792 if (BNXT_TX_PTP_IS_SET(lflags)) { 793 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0; 794 atomic64_inc(&bp->ptp_cfg->stats.ts_err); 795 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 796 /* set SKB to err so PTP worker will clean up */ 797 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO); 798 } 799 if (txr->kick_pending) 800 bnxt_txr_db_kick(bp, txr, txr->tx_prod); 801 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL; 802 dev_core_stats_tx_dropped_inc(dev); 803 return NETDEV_TX_OK; 804 } 805 806 /* Returns true if some remaining TX packets not processed. */ 807 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 808 int budget) 809 { 810 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 811 struct pci_dev *pdev = bp->pdev; 812 u16 hw_cons = txr->tx_hw_cons; 813 unsigned int tx_bytes = 0; 814 u16 cons = txr->tx_cons; 815 skb_frag_t *frag; 816 int tx_pkts = 0; 817 bool rc = false; 818 819 while (RING_TX(bp, cons) != hw_cons) { 820 struct bnxt_sw_tx_bd *tx_buf; 821 struct sk_buff *skb; 822 bool is_ts_pkt; 823 int j, last; 824 825 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 826 skb = tx_buf->skb; 827 828 if (unlikely(!skb)) { 829 bnxt_sched_reset_txr(bp, txr, cons); 830 return rc; 831 } 832 833 is_ts_pkt = tx_buf->is_ts_pkt; 834 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) { 835 rc = true; 836 break; 837 } 838 839 cons = NEXT_TX(cons); 840 tx_pkts++; 841 tx_bytes += skb->len; 842 tx_buf->skb = NULL; 843 tx_buf->is_ts_pkt = 0; 844 845 if (tx_buf->is_push) { 846 tx_buf->is_push = 0; 847 goto next_tx_int; 848 } 849 850 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 851 skb_headlen(skb), DMA_TO_DEVICE); 852 last = tx_buf->nr_frags; 853 854 for (j = 0; j < last; j++) { 855 frag = &skb_shinfo(skb)->frags[j]; 856 cons = NEXT_TX(cons); 857 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 858 netmem_dma_unmap_page_attrs(&pdev->dev, 859 dma_unmap_addr(tx_buf, 860 mapping), 861 skb_frag_size(frag), 862 DMA_TO_DEVICE, 0); 863 } 864 if (unlikely(is_ts_pkt)) { 865 if (BNXT_CHIP_P5(bp)) { 866 /* PTP worker takes ownership of the skb */ 867 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod); 868 skb = NULL; 869 } 870 } 871 872 next_tx_int: 873 cons = NEXT_TX(cons); 874 875 dev_consume_skb_any(skb); 876 } 877 878 WRITE_ONCE(txr->tx_cons, cons); 879 880 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, 881 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, 882 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); 883 884 return rc; 885 } 886 887 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 888 { 889 struct bnxt_tx_ring_info *txr; 890 bool more = false; 891 int i; 892 893 bnxt_for_each_napi_tx(i, bnapi, txr) { 894 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) 895 more |= __bnxt_tx_int(bp, txr, budget); 896 } 897 if (!more) 898 bnapi->events &= ~BNXT_TX_CMP_EVENT; 899 } 900 901 static bool bnxt_separate_head_pool(struct bnxt_rx_ring_info *rxr) 902 { 903 return rxr->need_head_pool || PAGE_SIZE > BNXT_RX_PAGE_SIZE; 904 } 905 906 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 907 struct bnxt_rx_ring_info *rxr, 908 unsigned int *offset, 909 gfp_t gfp) 910 { 911 struct page *page; 912 913 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 914 page = page_pool_dev_alloc_frag(rxr->page_pool, offset, 915 BNXT_RX_PAGE_SIZE); 916 } else { 917 page = page_pool_dev_alloc_pages(rxr->page_pool); 918 *offset = 0; 919 } 920 if (!page) 921 return NULL; 922 923 *mapping = page_pool_get_dma_addr(page) + *offset; 924 return page; 925 } 926 927 static netmem_ref __bnxt_alloc_rx_netmem(struct bnxt *bp, dma_addr_t *mapping, 928 struct bnxt_rx_ring_info *rxr, 929 gfp_t gfp) 930 { 931 netmem_ref netmem; 932 933 netmem = page_pool_alloc_netmems(rxr->page_pool, gfp); 934 if (!netmem) 935 return 0; 936 937 *mapping = page_pool_get_dma_addr_netmem(netmem); 938 return netmem; 939 } 940 941 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, 942 struct bnxt_rx_ring_info *rxr, 943 gfp_t gfp) 944 { 945 unsigned int offset; 946 struct page *page; 947 948 page = page_pool_alloc_frag(rxr->head_pool, &offset, 949 bp->rx_buf_size, gfp); 950 if (!page) 951 return NULL; 952 953 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset; 954 return page_address(page) + offset; 955 } 956 957 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 958 u16 prod, gfp_t gfp) 959 { 960 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 961 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 962 dma_addr_t mapping; 963 964 if (BNXT_RX_PAGE_MODE(bp)) { 965 unsigned int offset; 966 struct page *page = 967 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 968 969 if (!page) 970 return -ENOMEM; 971 972 mapping += bp->rx_dma_offset; 973 rx_buf->data = page; 974 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; 975 } else { 976 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp); 977 978 if (!data) 979 return -ENOMEM; 980 981 rx_buf->data = data; 982 rx_buf->data_ptr = data + bp->rx_offset; 983 } 984 rx_buf->mapping = mapping; 985 986 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 987 return 0; 988 } 989 990 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 991 { 992 u16 prod = rxr->rx_prod; 993 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 994 struct bnxt *bp = rxr->bnapi->bp; 995 struct rx_bd *cons_bd, *prod_bd; 996 997 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 998 cons_rx_buf = &rxr->rx_buf_ring[cons]; 999 1000 prod_rx_buf->data = data; 1001 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 1002 1003 prod_rx_buf->mapping = cons_rx_buf->mapping; 1004 1005 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 1006 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; 1007 1008 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 1009 } 1010 1011 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1012 { 1013 u16 next, max = rxr->rx_agg_bmap_size; 1014 1015 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 1016 if (next >= max) 1017 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 1018 return next; 1019 } 1020 1021 static int bnxt_alloc_rx_netmem(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1022 u16 prod, gfp_t gfp) 1023 { 1024 struct rx_bd *rxbd = 1025 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 1026 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 1027 u16 sw_prod = rxr->rx_sw_agg_prod; 1028 unsigned int offset = 0; 1029 dma_addr_t mapping; 1030 netmem_ref netmem; 1031 1032 netmem = __bnxt_alloc_rx_netmem(bp, &mapping, rxr, gfp); 1033 if (!netmem) 1034 return -ENOMEM; 1035 1036 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 1037 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 1038 1039 __set_bit(sw_prod, rxr->rx_agg_bmap); 1040 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 1041 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1042 1043 rx_agg_buf->netmem = netmem; 1044 rx_agg_buf->offset = offset; 1045 rx_agg_buf->mapping = mapping; 1046 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 1047 rxbd->rx_bd_opaque = sw_prod; 1048 return 0; 1049 } 1050 1051 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 1052 struct bnxt_cp_ring_info *cpr, 1053 u16 cp_cons, u16 curr) 1054 { 1055 struct rx_agg_cmp *agg; 1056 1057 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 1058 agg = (struct rx_agg_cmp *) 1059 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1060 return agg; 1061 } 1062 1063 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 1064 struct bnxt_rx_ring_info *rxr, 1065 u16 agg_id, u16 curr) 1066 { 1067 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 1068 1069 return &tpa_info->agg_arr[curr]; 1070 } 1071 1072 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 1073 u16 start, u32 agg_bufs, bool tpa) 1074 { 1075 struct bnxt_napi *bnapi = cpr->bnapi; 1076 struct bnxt *bp = bnapi->bp; 1077 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1078 u16 prod = rxr->rx_agg_prod; 1079 u16 sw_prod = rxr->rx_sw_agg_prod; 1080 bool p5_tpa = false; 1081 u32 i; 1082 1083 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1084 p5_tpa = true; 1085 1086 for (i = 0; i < agg_bufs; i++) { 1087 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 1088 struct rx_agg_cmp *agg; 1089 struct rx_bd *prod_bd; 1090 netmem_ref netmem; 1091 u16 cons; 1092 1093 if (p5_tpa) 1094 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 1095 else 1096 agg = bnxt_get_agg(bp, cpr, idx, start + i); 1097 cons = agg->rx_agg_cmp_opaque; 1098 __clear_bit(cons, rxr->rx_agg_bmap); 1099 1100 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 1101 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 1102 1103 __set_bit(sw_prod, rxr->rx_agg_bmap); 1104 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 1105 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1106 1107 /* It is possible for sw_prod to be equal to cons, so 1108 * set cons_rx_buf->netmem to 0 first. 1109 */ 1110 netmem = cons_rx_buf->netmem; 1111 cons_rx_buf->netmem = 0; 1112 prod_rx_buf->netmem = netmem; 1113 prod_rx_buf->offset = cons_rx_buf->offset; 1114 1115 prod_rx_buf->mapping = cons_rx_buf->mapping; 1116 1117 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 1118 1119 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 1120 prod_bd->rx_bd_opaque = sw_prod; 1121 1122 prod = NEXT_RX_AGG(prod); 1123 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1124 } 1125 rxr->rx_agg_prod = prod; 1126 rxr->rx_sw_agg_prod = sw_prod; 1127 } 1128 1129 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, 1130 struct bnxt_rx_ring_info *rxr, 1131 u16 cons, void *data, u8 *data_ptr, 1132 dma_addr_t dma_addr, 1133 unsigned int offset_and_len) 1134 { 1135 unsigned int len = offset_and_len & 0xffff; 1136 struct page *page = data; 1137 u16 prod = rxr->rx_prod; 1138 struct sk_buff *skb; 1139 int err; 1140 1141 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1142 if (unlikely(err)) { 1143 bnxt_reuse_rx_data(rxr, cons, data); 1144 return NULL; 1145 } 1146 dma_addr -= bp->rx_dma_offset; 1147 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1148 bp->rx_dir); 1149 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); 1150 if (!skb) { 1151 page_pool_recycle_direct(rxr->page_pool, page); 1152 return NULL; 1153 } 1154 skb_mark_for_recycle(skb); 1155 skb_reserve(skb, bp->rx_offset); 1156 __skb_put(skb, len); 1157 1158 return skb; 1159 } 1160 1161 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 1162 struct bnxt_rx_ring_info *rxr, 1163 u16 cons, void *data, u8 *data_ptr, 1164 dma_addr_t dma_addr, 1165 unsigned int offset_and_len) 1166 { 1167 unsigned int payload = offset_and_len >> 16; 1168 unsigned int len = offset_and_len & 0xffff; 1169 skb_frag_t *frag; 1170 struct page *page = data; 1171 u16 prod = rxr->rx_prod; 1172 struct sk_buff *skb; 1173 int off, err; 1174 1175 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1176 if (unlikely(err)) { 1177 bnxt_reuse_rx_data(rxr, cons, data); 1178 return NULL; 1179 } 1180 dma_addr -= bp->rx_dma_offset; 1181 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1182 bp->rx_dir); 1183 1184 if (unlikely(!payload)) 1185 payload = eth_get_headlen(bp->dev, data_ptr, len); 1186 1187 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 1188 if (!skb) { 1189 page_pool_recycle_direct(rxr->page_pool, page); 1190 return NULL; 1191 } 1192 1193 skb_mark_for_recycle(skb); 1194 off = (void *)data_ptr - page_address(page); 1195 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); 1196 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 1197 payload + NET_IP_ALIGN); 1198 1199 frag = &skb_shinfo(skb)->frags[0]; 1200 skb_frag_size_sub(frag, payload); 1201 skb_frag_off_add(frag, payload); 1202 skb->data_len -= payload; 1203 skb->tail += payload; 1204 1205 return skb; 1206 } 1207 1208 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 1209 struct bnxt_rx_ring_info *rxr, u16 cons, 1210 void *data, u8 *data_ptr, 1211 dma_addr_t dma_addr, 1212 unsigned int offset_and_len) 1213 { 1214 u16 prod = rxr->rx_prod; 1215 struct sk_buff *skb; 1216 int err; 1217 1218 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1219 if (unlikely(err)) { 1220 bnxt_reuse_rx_data(rxr, cons, data); 1221 return NULL; 1222 } 1223 1224 skb = napi_build_skb(data, bp->rx_buf_size); 1225 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 1226 bp->rx_dir); 1227 if (!skb) { 1228 page_pool_free_va(rxr->head_pool, data, true); 1229 return NULL; 1230 } 1231 1232 skb_mark_for_recycle(skb); 1233 skb_reserve(skb, bp->rx_offset); 1234 skb_put(skb, offset_and_len & 0xffff); 1235 return skb; 1236 } 1237 1238 static u32 __bnxt_rx_agg_netmems(struct bnxt *bp, 1239 struct bnxt_cp_ring_info *cpr, 1240 u16 idx, u32 agg_bufs, bool tpa, 1241 struct sk_buff *skb, 1242 struct xdp_buff *xdp) 1243 { 1244 struct bnxt_napi *bnapi = cpr->bnapi; 1245 struct skb_shared_info *shinfo; 1246 struct bnxt_rx_ring_info *rxr; 1247 u32 i, total_frag_len = 0; 1248 bool p5_tpa = false; 1249 u16 prod; 1250 1251 rxr = bnapi->rx_ring; 1252 prod = rxr->rx_agg_prod; 1253 1254 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1255 p5_tpa = true; 1256 1257 if (skb) 1258 shinfo = skb_shinfo(skb); 1259 else 1260 shinfo = xdp_get_shared_info_from_buff(xdp); 1261 1262 for (i = 0; i < agg_bufs; i++) { 1263 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1264 struct rx_agg_cmp *agg; 1265 u16 cons, frag_len; 1266 netmem_ref netmem; 1267 1268 if (p5_tpa) 1269 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1270 else 1271 agg = bnxt_get_agg(bp, cpr, idx, i); 1272 cons = agg->rx_agg_cmp_opaque; 1273 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1274 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1275 1276 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1277 if (skb) { 1278 skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem, 1279 cons_rx_buf->offset, 1280 frag_len, BNXT_RX_PAGE_SIZE); 1281 } else { 1282 skb_frag_t *frag = &shinfo->frags[i]; 1283 1284 skb_frag_fill_netmem_desc(frag, cons_rx_buf->netmem, 1285 cons_rx_buf->offset, 1286 frag_len); 1287 shinfo->nr_frags = i + 1; 1288 } 1289 __clear_bit(cons, rxr->rx_agg_bmap); 1290 1291 /* It is possible for bnxt_alloc_rx_netmem() to allocate 1292 * a sw_prod index that equals the cons index, so we 1293 * need to clear the cons entry now. 1294 */ 1295 netmem = cons_rx_buf->netmem; 1296 cons_rx_buf->netmem = 0; 1297 1298 if (xdp && netmem_is_pfmemalloc(netmem)) 1299 xdp_buff_set_frag_pfmemalloc(xdp); 1300 1301 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_ATOMIC) != 0) { 1302 if (skb) { 1303 skb->len -= frag_len; 1304 skb->data_len -= frag_len; 1305 skb->truesize -= BNXT_RX_PAGE_SIZE; 1306 } 1307 1308 --shinfo->nr_frags; 1309 cons_rx_buf->netmem = netmem; 1310 1311 /* Update prod since possibly some netmems have been 1312 * allocated already. 1313 */ 1314 rxr->rx_agg_prod = prod; 1315 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1316 return 0; 1317 } 1318 1319 page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0, 1320 BNXT_RX_PAGE_SIZE); 1321 1322 total_frag_len += frag_len; 1323 prod = NEXT_RX_AGG(prod); 1324 } 1325 rxr->rx_agg_prod = prod; 1326 return total_frag_len; 1327 } 1328 1329 static struct sk_buff *bnxt_rx_agg_netmems_skb(struct bnxt *bp, 1330 struct bnxt_cp_ring_info *cpr, 1331 struct sk_buff *skb, u16 idx, 1332 u32 agg_bufs, bool tpa) 1333 { 1334 u32 total_frag_len = 0; 1335 1336 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa, 1337 skb, NULL); 1338 if (!total_frag_len) { 1339 skb_mark_for_recycle(skb); 1340 dev_kfree_skb(skb); 1341 return NULL; 1342 } 1343 1344 return skb; 1345 } 1346 1347 static u32 bnxt_rx_agg_netmems_xdp(struct bnxt *bp, 1348 struct bnxt_cp_ring_info *cpr, 1349 struct xdp_buff *xdp, u16 idx, 1350 u32 agg_bufs, bool tpa) 1351 { 1352 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); 1353 u32 total_frag_len = 0; 1354 1355 if (!xdp_buff_has_frags(xdp)) 1356 shinfo->nr_frags = 0; 1357 1358 total_frag_len = __bnxt_rx_agg_netmems(bp, cpr, idx, agg_bufs, tpa, 1359 NULL, xdp); 1360 if (total_frag_len) { 1361 xdp_buff_set_frags_flag(xdp); 1362 shinfo->nr_frags = agg_bufs; 1363 shinfo->xdp_frags_size = total_frag_len; 1364 } 1365 return total_frag_len; 1366 } 1367 1368 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1369 u8 agg_bufs, u32 *raw_cons) 1370 { 1371 u16 last; 1372 struct rx_agg_cmp *agg; 1373 1374 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1375 last = RING_CMP(*raw_cons); 1376 agg = (struct rx_agg_cmp *) 1377 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1378 return RX_AGG_CMP_VALID(agg, *raw_cons); 1379 } 1380 1381 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, 1382 unsigned int len, 1383 dma_addr_t mapping) 1384 { 1385 struct bnxt *bp = bnapi->bp; 1386 struct pci_dev *pdev = bp->pdev; 1387 struct sk_buff *skb; 1388 1389 skb = napi_alloc_skb(&bnapi->napi, len); 1390 if (!skb) 1391 return NULL; 1392 1393 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak, 1394 bp->rx_dir); 1395 1396 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1397 len + NET_IP_ALIGN); 1398 1399 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak, 1400 bp->rx_dir); 1401 1402 skb_put(skb, len); 1403 1404 return skb; 1405 } 1406 1407 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1408 unsigned int len, 1409 dma_addr_t mapping) 1410 { 1411 return bnxt_copy_data(bnapi, data, len, mapping); 1412 } 1413 1414 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, 1415 struct xdp_buff *xdp, 1416 unsigned int len, 1417 dma_addr_t mapping) 1418 { 1419 unsigned int metasize = 0; 1420 u8 *data = xdp->data; 1421 struct sk_buff *skb; 1422 1423 len = xdp->data_end - xdp->data_meta; 1424 metasize = xdp->data - xdp->data_meta; 1425 data = xdp->data_meta; 1426 1427 skb = bnxt_copy_data(bnapi, data, len, mapping); 1428 if (!skb) 1429 return skb; 1430 1431 if (metasize) { 1432 skb_metadata_set(skb, metasize); 1433 __skb_pull(skb, metasize); 1434 } 1435 1436 return skb; 1437 } 1438 1439 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1440 u32 *raw_cons, void *cmp) 1441 { 1442 struct rx_cmp *rxcmp = cmp; 1443 u32 tmp_raw_cons = *raw_cons; 1444 u8 cmp_type, agg_bufs = 0; 1445 1446 cmp_type = RX_CMP_TYPE(rxcmp); 1447 1448 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1449 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1450 RX_CMP_AGG_BUFS) >> 1451 RX_CMP_AGG_BUFS_SHIFT; 1452 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1453 struct rx_tpa_end_cmp *tpa_end = cmp; 1454 1455 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1456 return 0; 1457 1458 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1459 } 1460 1461 if (agg_bufs) { 1462 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1463 return -EBUSY; 1464 } 1465 *raw_cons = tmp_raw_cons; 1466 return 0; 1467 } 1468 1469 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1470 { 1471 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1472 u16 idx = agg_id & MAX_TPA_P5_MASK; 1473 1474 if (test_bit(idx, map->agg_idx_bmap)) 1475 idx = find_first_zero_bit(map->agg_idx_bmap, 1476 BNXT_AGG_IDX_BMAP_SIZE); 1477 __set_bit(idx, map->agg_idx_bmap); 1478 map->agg_id_tbl[agg_id] = idx; 1479 return idx; 1480 } 1481 1482 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1483 { 1484 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1485 1486 __clear_bit(idx, map->agg_idx_bmap); 1487 } 1488 1489 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1490 { 1491 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1492 1493 return map->agg_id_tbl[agg_id]; 1494 } 1495 1496 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info, 1497 struct rx_tpa_start_cmp *tpa_start, 1498 struct rx_tpa_start_cmp_ext *tpa_start1) 1499 { 1500 tpa_info->cfa_code_valid = 1; 1501 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1502 tpa_info->vlan_valid = 0; 1503 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1504 tpa_info->vlan_valid = 1; 1505 tpa_info->metadata = 1506 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1507 } 1508 } 1509 1510 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info, 1511 struct rx_tpa_start_cmp *tpa_start, 1512 struct rx_tpa_start_cmp_ext *tpa_start1) 1513 { 1514 tpa_info->vlan_valid = 0; 1515 if (TPA_START_VLAN_VALID(tpa_start)) { 1516 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); 1517 u32 vlan_proto = ETH_P_8021Q; 1518 1519 tpa_info->vlan_valid = 1; 1520 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) 1521 vlan_proto = ETH_P_8021AD; 1522 tpa_info->metadata = vlan_proto << 16 | 1523 TPA_START_METADATA0_TCI(tpa_start1); 1524 } 1525 } 1526 1527 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1528 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, 1529 struct rx_tpa_start_cmp_ext *tpa_start1) 1530 { 1531 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1532 struct bnxt_tpa_info *tpa_info; 1533 u16 cons, prod, agg_id; 1534 struct rx_bd *prod_bd; 1535 dma_addr_t mapping; 1536 1537 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1538 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1539 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1540 } else { 1541 agg_id = TPA_START_AGG_ID(tpa_start); 1542 } 1543 cons = tpa_start->rx_tpa_start_cmp_opaque; 1544 prod = rxr->rx_prod; 1545 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1546 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 1547 tpa_info = &rxr->rx_tpa[agg_id]; 1548 1549 if (unlikely(cons != rxr->rx_next_cons || 1550 TPA_START_ERROR(tpa_start))) { 1551 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1552 cons, rxr->rx_next_cons, 1553 TPA_START_ERROR_CODE(tpa_start1)); 1554 bnxt_sched_reset_rxr(bp, rxr); 1555 return; 1556 } 1557 prod_rx_buf->data = tpa_info->data; 1558 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1559 1560 mapping = tpa_info->mapping; 1561 prod_rx_buf->mapping = mapping; 1562 1563 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 1564 1565 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1566 1567 tpa_info->data = cons_rx_buf->data; 1568 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1569 cons_rx_buf->data = NULL; 1570 tpa_info->mapping = cons_rx_buf->mapping; 1571 1572 tpa_info->len = 1573 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1574 RX_TPA_START_CMP_LEN_SHIFT; 1575 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1576 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1577 tpa_info->gso_type = SKB_GSO_TCPV4; 1578 if (TPA_START_IS_IPV6(tpa_start1)) 1579 tpa_info->gso_type = SKB_GSO_TCPV6; 1580 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1581 else if (!BNXT_CHIP_P4_PLUS(bp) && 1582 TPA_START_HASH_TYPE(tpa_start) == 3) 1583 tpa_info->gso_type = SKB_GSO_TCPV6; 1584 tpa_info->rss_hash = 1585 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1586 } else { 1587 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1588 tpa_info->gso_type = 0; 1589 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); 1590 } 1591 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1592 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1593 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) 1594 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1); 1595 else 1596 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); 1597 tpa_info->agg_count = 0; 1598 1599 rxr->rx_prod = NEXT_RX(prod); 1600 cons = RING_RX(bp, NEXT_RX(cons)); 1601 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 1602 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1603 1604 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1605 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1606 cons_rx_buf->data = NULL; 1607 } 1608 1609 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1610 { 1611 if (agg_bufs) 1612 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1613 } 1614 1615 #ifdef CONFIG_INET 1616 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1617 { 1618 struct udphdr *uh = NULL; 1619 1620 if (ip_proto == htons(ETH_P_IP)) { 1621 struct iphdr *iph = (struct iphdr *)skb->data; 1622 1623 if (iph->protocol == IPPROTO_UDP) 1624 uh = (struct udphdr *)(iph + 1); 1625 } else { 1626 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1627 1628 if (iph->nexthdr == IPPROTO_UDP) 1629 uh = (struct udphdr *)(iph + 1); 1630 } 1631 if (uh) { 1632 if (uh->check) 1633 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1634 else 1635 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1636 } 1637 } 1638 #endif 1639 1640 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1641 int payload_off, int tcp_ts, 1642 struct sk_buff *skb) 1643 { 1644 #ifdef CONFIG_INET 1645 struct tcphdr *th; 1646 int len, nw_off; 1647 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1648 u32 hdr_info = tpa_info->hdr_info; 1649 bool loopback = false; 1650 1651 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1652 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1653 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1654 1655 /* If the packet is an internal loopback packet, the offsets will 1656 * have an extra 4 bytes. 1657 */ 1658 if (inner_mac_off == 4) { 1659 loopback = true; 1660 } else if (inner_mac_off > 4) { 1661 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1662 ETH_HLEN - 2)); 1663 1664 /* We only support inner iPv4/ipv6. If we don't see the 1665 * correct protocol ID, it must be a loopback packet where 1666 * the offsets are off by 4. 1667 */ 1668 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1669 loopback = true; 1670 } 1671 if (loopback) { 1672 /* internal loopback packet, subtract all offsets by 4 */ 1673 inner_ip_off -= 4; 1674 inner_mac_off -= 4; 1675 outer_ip_off -= 4; 1676 } 1677 1678 nw_off = inner_ip_off - ETH_HLEN; 1679 skb_set_network_header(skb, nw_off); 1680 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1681 struct ipv6hdr *iph = ipv6_hdr(skb); 1682 1683 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1684 len = skb->len - skb_transport_offset(skb); 1685 th = tcp_hdr(skb); 1686 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1687 } else { 1688 struct iphdr *iph = ip_hdr(skb); 1689 1690 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1691 len = skb->len - skb_transport_offset(skb); 1692 th = tcp_hdr(skb); 1693 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1694 } 1695 1696 if (inner_mac_off) { /* tunnel */ 1697 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1698 ETH_HLEN - 2)); 1699 1700 bnxt_gro_tunnel(skb, proto); 1701 } 1702 #endif 1703 return skb; 1704 } 1705 1706 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1707 int payload_off, int tcp_ts, 1708 struct sk_buff *skb) 1709 { 1710 #ifdef CONFIG_INET 1711 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1712 u32 hdr_info = tpa_info->hdr_info; 1713 int iphdr_len, nw_off; 1714 1715 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1716 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1717 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1718 1719 nw_off = inner_ip_off - ETH_HLEN; 1720 skb_set_network_header(skb, nw_off); 1721 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1722 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1723 skb_set_transport_header(skb, nw_off + iphdr_len); 1724 1725 if (inner_mac_off) { /* tunnel */ 1726 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1727 ETH_HLEN - 2)); 1728 1729 bnxt_gro_tunnel(skb, proto); 1730 } 1731 #endif 1732 return skb; 1733 } 1734 1735 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1736 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1737 1738 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1739 int payload_off, int tcp_ts, 1740 struct sk_buff *skb) 1741 { 1742 #ifdef CONFIG_INET 1743 struct tcphdr *th; 1744 int len, nw_off, tcp_opt_len = 0; 1745 1746 if (tcp_ts) 1747 tcp_opt_len = 12; 1748 1749 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1750 struct iphdr *iph; 1751 1752 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1753 ETH_HLEN; 1754 skb_set_network_header(skb, nw_off); 1755 iph = ip_hdr(skb); 1756 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1757 len = skb->len - skb_transport_offset(skb); 1758 th = tcp_hdr(skb); 1759 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1760 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1761 struct ipv6hdr *iph; 1762 1763 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1764 ETH_HLEN; 1765 skb_set_network_header(skb, nw_off); 1766 iph = ipv6_hdr(skb); 1767 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1768 len = skb->len - skb_transport_offset(skb); 1769 th = tcp_hdr(skb); 1770 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1771 } else { 1772 dev_kfree_skb_any(skb); 1773 return NULL; 1774 } 1775 1776 if (nw_off) /* tunnel */ 1777 bnxt_gro_tunnel(skb, skb->protocol); 1778 #endif 1779 return skb; 1780 } 1781 1782 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1783 struct bnxt_tpa_info *tpa_info, 1784 struct rx_tpa_end_cmp *tpa_end, 1785 struct rx_tpa_end_cmp_ext *tpa_end1, 1786 struct sk_buff *skb) 1787 { 1788 #ifdef CONFIG_INET 1789 int payload_off; 1790 u16 segs; 1791 1792 segs = TPA_END_TPA_SEGS(tpa_end); 1793 if (segs == 1) 1794 return skb; 1795 1796 NAPI_GRO_CB(skb)->count = segs; 1797 skb_shinfo(skb)->gso_size = 1798 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1799 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1800 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1801 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1802 else 1803 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1804 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1805 if (likely(skb)) 1806 tcp_gro_complete(skb); 1807 #endif 1808 return skb; 1809 } 1810 1811 /* Given the cfa_code of a received packet determine which 1812 * netdev (vf-rep or PF) the packet is destined to. 1813 */ 1814 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1815 { 1816 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1817 1818 /* if vf-rep dev is NULL, it must belong to the PF */ 1819 return dev ? dev : bp->dev; 1820 } 1821 1822 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1823 struct bnxt_cp_ring_info *cpr, 1824 u32 *raw_cons, 1825 struct rx_tpa_end_cmp *tpa_end, 1826 struct rx_tpa_end_cmp_ext *tpa_end1, 1827 u8 *event) 1828 { 1829 struct bnxt_napi *bnapi = cpr->bnapi; 1830 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1831 struct net_device *dev = bp->dev; 1832 u8 *data_ptr, agg_bufs; 1833 unsigned int len; 1834 struct bnxt_tpa_info *tpa_info; 1835 dma_addr_t mapping; 1836 struct sk_buff *skb; 1837 u16 idx = 0, agg_id; 1838 void *data; 1839 bool gro; 1840 1841 if (unlikely(bnapi->in_reset)) { 1842 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1843 1844 if (rc < 0) 1845 return ERR_PTR(-EBUSY); 1846 return NULL; 1847 } 1848 1849 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1850 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1851 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1852 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1853 tpa_info = &rxr->rx_tpa[agg_id]; 1854 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1855 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1856 agg_bufs, tpa_info->agg_count); 1857 agg_bufs = tpa_info->agg_count; 1858 } 1859 tpa_info->agg_count = 0; 1860 *event |= BNXT_AGG_EVENT; 1861 bnxt_free_agg_idx(rxr, agg_id); 1862 idx = agg_id; 1863 gro = !!(bp->flags & BNXT_FLAG_GRO); 1864 } else { 1865 agg_id = TPA_END_AGG_ID(tpa_end); 1866 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1867 tpa_info = &rxr->rx_tpa[agg_id]; 1868 idx = RING_CMP(*raw_cons); 1869 if (agg_bufs) { 1870 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1871 return ERR_PTR(-EBUSY); 1872 1873 *event |= BNXT_AGG_EVENT; 1874 idx = NEXT_CMP(idx); 1875 } 1876 gro = !!TPA_END_GRO(tpa_end); 1877 } 1878 data = tpa_info->data; 1879 data_ptr = tpa_info->data_ptr; 1880 prefetch(data_ptr); 1881 len = tpa_info->len; 1882 mapping = tpa_info->mapping; 1883 1884 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1885 bnxt_abort_tpa(cpr, idx, agg_bufs); 1886 if (agg_bufs > MAX_SKB_FRAGS) 1887 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1888 agg_bufs, (int)MAX_SKB_FRAGS); 1889 return NULL; 1890 } 1891 1892 if (len <= bp->rx_copybreak) { 1893 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1894 if (!skb) { 1895 bnxt_abort_tpa(cpr, idx, agg_bufs); 1896 cpr->sw_stats->rx.rx_oom_discards += 1; 1897 return NULL; 1898 } 1899 } else { 1900 u8 *new_data; 1901 dma_addr_t new_mapping; 1902 1903 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr, 1904 GFP_ATOMIC); 1905 if (!new_data) { 1906 bnxt_abort_tpa(cpr, idx, agg_bufs); 1907 cpr->sw_stats->rx.rx_oom_discards += 1; 1908 return NULL; 1909 } 1910 1911 tpa_info->data = new_data; 1912 tpa_info->data_ptr = new_data + bp->rx_offset; 1913 tpa_info->mapping = new_mapping; 1914 1915 skb = napi_build_skb(data, bp->rx_buf_size); 1916 dma_sync_single_for_cpu(&bp->pdev->dev, mapping, 1917 bp->rx_buf_use_size, bp->rx_dir); 1918 1919 if (!skb) { 1920 page_pool_free_va(rxr->head_pool, data, true); 1921 bnxt_abort_tpa(cpr, idx, agg_bufs); 1922 cpr->sw_stats->rx.rx_oom_discards += 1; 1923 return NULL; 1924 } 1925 skb_mark_for_recycle(skb); 1926 skb_reserve(skb, bp->rx_offset); 1927 skb_put(skb, len); 1928 } 1929 1930 if (agg_bufs) { 1931 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, idx, agg_bufs, 1932 true); 1933 if (!skb) { 1934 /* Page reuse already handled by bnxt_rx_pages(). */ 1935 cpr->sw_stats->rx.rx_oom_discards += 1; 1936 return NULL; 1937 } 1938 } 1939 1940 if (tpa_info->cfa_code_valid) 1941 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code); 1942 skb->protocol = eth_type_trans(skb, dev); 1943 1944 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1945 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1946 1947 if (tpa_info->vlan_valid && 1948 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1949 __be16 vlan_proto = htons(tpa_info->metadata >> 1950 RX_CMP_FLAGS2_METADATA_TPID_SFT); 1951 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1952 1953 if (eth_type_vlan(vlan_proto)) { 1954 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1955 } else { 1956 dev_kfree_skb(skb); 1957 return NULL; 1958 } 1959 } 1960 1961 skb_checksum_none_assert(skb); 1962 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1963 skb->ip_summed = CHECKSUM_UNNECESSARY; 1964 skb->csum_level = 1965 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1966 } 1967 1968 if (gro) 1969 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1970 1971 return skb; 1972 } 1973 1974 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1975 struct rx_agg_cmp *rx_agg) 1976 { 1977 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1978 struct bnxt_tpa_info *tpa_info; 1979 1980 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1981 tpa_info = &rxr->rx_tpa[agg_id]; 1982 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1983 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1984 } 1985 1986 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1987 struct sk_buff *skb) 1988 { 1989 skb_mark_for_recycle(skb); 1990 1991 if (skb->dev != bp->dev) { 1992 /* this packet belongs to a vf-rep */ 1993 bnxt_vf_rep_rx(bp, skb); 1994 return; 1995 } 1996 skb_record_rx_queue(skb, bnapi->index); 1997 napi_gro_receive(&bnapi->napi, skb); 1998 } 1999 2000 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, 2001 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) 2002 { 2003 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); 2004 2005 if (BNXT_PTP_RX_TS_VALID(flags)) 2006 goto ts_valid; 2007 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) 2008 return false; 2009 2010 ts_valid: 2011 *cmpl_ts = ts; 2012 return true; 2013 } 2014 2015 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, 2016 struct rx_cmp *rxcmp, 2017 struct rx_cmp_ext *rxcmp1) 2018 { 2019 __be16 vlan_proto; 2020 u16 vtag; 2021 2022 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 2023 __le32 flags2 = rxcmp1->rx_cmp_flags2; 2024 u32 meta_data; 2025 2026 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) 2027 return skb; 2028 2029 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 2030 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 2031 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); 2032 if (eth_type_vlan(vlan_proto)) 2033 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 2034 else 2035 goto vlan_err; 2036 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2037 if (RX_CMP_VLAN_VALID(rxcmp)) { 2038 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); 2039 2040 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) 2041 vlan_proto = htons(ETH_P_8021Q); 2042 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) 2043 vlan_proto = htons(ETH_P_8021AD); 2044 else 2045 goto vlan_err; 2046 vtag = RX_CMP_METADATA0_TCI(rxcmp1); 2047 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 2048 } 2049 } 2050 return skb; 2051 vlan_err: 2052 skb_mark_for_recycle(skb); 2053 dev_kfree_skb(skb); 2054 return NULL; 2055 } 2056 2057 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp, 2058 struct rx_cmp *rxcmp) 2059 { 2060 u8 ext_op; 2061 2062 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); 2063 switch (ext_op) { 2064 case EXT_OP_INNER_4: 2065 case EXT_OP_OUTER_4: 2066 case EXT_OP_INNFL_3: 2067 case EXT_OP_OUTFL_3: 2068 return PKT_HASH_TYPE_L4; 2069 default: 2070 return PKT_HASH_TYPE_L3; 2071 } 2072 } 2073 2074 /* returns the following: 2075 * 1 - 1 packet successfully received 2076 * 0 - successful TPA_START, packet not completed yet 2077 * -EBUSY - completion ring does not have all the agg buffers yet 2078 * -ENOMEM - packet aborted due to out of memory 2079 * -EIO - packet aborted due to hw error indicated in BD 2080 */ 2081 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2082 u32 *raw_cons, u8 *event) 2083 { 2084 struct bnxt_napi *bnapi = cpr->bnapi; 2085 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2086 struct net_device *dev = bp->dev; 2087 struct rx_cmp *rxcmp; 2088 struct rx_cmp_ext *rxcmp1; 2089 u32 tmp_raw_cons = *raw_cons; 2090 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 2091 struct skb_shared_info *sinfo; 2092 struct bnxt_sw_rx_bd *rx_buf; 2093 unsigned int len; 2094 u8 *data_ptr, agg_bufs, cmp_type; 2095 bool xdp_active = false; 2096 dma_addr_t dma_addr; 2097 struct sk_buff *skb; 2098 struct xdp_buff xdp; 2099 u32 flags, misc; 2100 u32 cmpl_ts; 2101 void *data; 2102 int rc = 0; 2103 2104 rxcmp = (struct rx_cmp *) 2105 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2106 2107 cmp_type = RX_CMP_TYPE(rxcmp); 2108 2109 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 2110 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 2111 goto next_rx_no_prod_no_len; 2112 } 2113 2114 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2115 cp_cons = RING_CMP(tmp_raw_cons); 2116 rxcmp1 = (struct rx_cmp_ext *) 2117 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2118 2119 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2120 return -EBUSY; 2121 2122 /* The valid test of the entry must be done first before 2123 * reading any further. 2124 */ 2125 dma_rmb(); 2126 prod = rxr->rx_prod; 2127 2128 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || 2129 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 2130 bnxt_tpa_start(bp, rxr, cmp_type, 2131 (struct rx_tpa_start_cmp *)rxcmp, 2132 (struct rx_tpa_start_cmp_ext *)rxcmp1); 2133 2134 *event |= BNXT_RX_EVENT; 2135 goto next_rx_no_prod_no_len; 2136 2137 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2138 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 2139 (struct rx_tpa_end_cmp *)rxcmp, 2140 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 2141 2142 if (IS_ERR(skb)) 2143 return -EBUSY; 2144 2145 rc = -ENOMEM; 2146 if (likely(skb)) { 2147 bnxt_deliver_skb(bp, bnapi, skb); 2148 rc = 1; 2149 } 2150 *event |= BNXT_RX_EVENT; 2151 goto next_rx_no_prod_no_len; 2152 } 2153 2154 cons = rxcmp->rx_cmp_opaque; 2155 if (unlikely(cons != rxr->rx_next_cons)) { 2156 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 2157 2158 /* 0xffff is forced error, don't print it */ 2159 if (rxr->rx_next_cons != 0xffff) 2160 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 2161 cons, rxr->rx_next_cons); 2162 bnxt_sched_reset_rxr(bp, rxr); 2163 if (rc1) 2164 return rc1; 2165 goto next_rx_no_prod_no_len; 2166 } 2167 rx_buf = &rxr->rx_buf_ring[cons]; 2168 data = rx_buf->data; 2169 data_ptr = rx_buf->data_ptr; 2170 prefetch(data_ptr); 2171 2172 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 2173 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 2174 2175 if (agg_bufs) { 2176 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 2177 return -EBUSY; 2178 2179 cp_cons = NEXT_CMP(cp_cons); 2180 *event |= BNXT_AGG_EVENT; 2181 } 2182 *event |= BNXT_RX_EVENT; 2183 2184 rx_buf->data = NULL; 2185 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 2186 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 2187 2188 bnxt_reuse_rx_data(rxr, cons, data); 2189 if (agg_bufs) 2190 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 2191 false); 2192 2193 rc = -EIO; 2194 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 2195 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++; 2196 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 2197 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 2198 netdev_warn_once(bp->dev, "RX buffer error %x\n", 2199 rx_err); 2200 bnxt_sched_reset_rxr(bp, rxr); 2201 } 2202 } 2203 goto next_rx_no_len; 2204 } 2205 2206 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); 2207 len = flags >> RX_CMP_LEN_SHIFT; 2208 dma_addr = rx_buf->mapping; 2209 2210 if (bnxt_xdp_attached(bp, rxr)) { 2211 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); 2212 if (agg_bufs) { 2213 u32 frag_len = bnxt_rx_agg_netmems_xdp(bp, cpr, &xdp, 2214 cp_cons, 2215 agg_bufs, 2216 false); 2217 if (!frag_len) 2218 goto oom_next_rx; 2219 2220 } 2221 xdp_active = true; 2222 } 2223 2224 if (xdp_active) { 2225 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { 2226 rc = 1; 2227 goto next_rx; 2228 } 2229 if (xdp_buff_has_frags(&xdp)) { 2230 sinfo = xdp_get_shared_info_from_buff(&xdp); 2231 agg_bufs = sinfo->nr_frags; 2232 } else { 2233 agg_bufs = 0; 2234 } 2235 } 2236 2237 if (len <= bp->rx_copybreak) { 2238 if (!xdp_active) 2239 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 2240 else 2241 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr); 2242 bnxt_reuse_rx_data(rxr, cons, data); 2243 if (!skb) { 2244 if (agg_bufs) { 2245 if (!xdp_active) 2246 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 2247 agg_bufs, false); 2248 else 2249 bnxt_xdp_buff_frags_free(rxr, &xdp); 2250 } 2251 goto oom_next_rx; 2252 } 2253 } else { 2254 u32 payload; 2255 2256 if (rx_buf->data_ptr == data_ptr) 2257 payload = misc & RX_CMP_PAYLOAD_OFFSET; 2258 else 2259 payload = 0; 2260 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 2261 payload | len); 2262 if (!skb) 2263 goto oom_next_rx; 2264 } 2265 2266 if (agg_bufs) { 2267 if (!xdp_active) { 2268 skb = bnxt_rx_agg_netmems_skb(bp, cpr, skb, cp_cons, 2269 agg_bufs, false); 2270 if (!skb) 2271 goto oom_next_rx; 2272 } else { 2273 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, 2274 rxr->page_pool, &xdp); 2275 if (!skb) { 2276 /* we should be able to free the old skb here */ 2277 bnxt_xdp_buff_frags_free(rxr, &xdp); 2278 goto oom_next_rx; 2279 } 2280 } 2281 } 2282 2283 if (RX_CMP_HASH_VALID(rxcmp)) { 2284 enum pkt_hash_types type; 2285 2286 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2287 type = bnxt_rss_ext_op(bp, rxcmp); 2288 } else { 2289 u32 itypes = RX_CMP_ITYPES(rxcmp); 2290 2291 if (itypes == RX_CMP_FLAGS_ITYPE_TCP || 2292 itypes == RX_CMP_FLAGS_ITYPE_UDP) 2293 type = PKT_HASH_TYPE_L4; 2294 else 2295 type = PKT_HASH_TYPE_L3; 2296 } 2297 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 2298 } 2299 2300 if (cmp_type == CMP_TYPE_RX_L2_CMP) 2301 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1)); 2302 skb->protocol = eth_type_trans(skb, dev); 2303 2304 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { 2305 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); 2306 if (!skb) 2307 goto next_rx; 2308 } 2309 2310 skb_checksum_none_assert(skb); 2311 if (RX_CMP_L4_CS_OK(rxcmp1)) { 2312 if (dev->features & NETIF_F_RXCSUM) { 2313 skb->ip_summed = CHECKSUM_UNNECESSARY; 2314 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 2315 } 2316 } else { 2317 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 2318 if (dev->features & NETIF_F_RXCSUM) 2319 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++; 2320 } 2321 } 2322 2323 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { 2324 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 2325 u64 ns, ts; 2326 2327 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { 2328 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2329 2330 ns = bnxt_timecounter_cyc2time(ptp, ts); 2331 memset(skb_hwtstamps(skb), 0, 2332 sizeof(*skb_hwtstamps(skb))); 2333 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 2334 } 2335 } 2336 } 2337 bnxt_deliver_skb(bp, bnapi, skb); 2338 rc = 1; 2339 2340 next_rx: 2341 cpr->rx_packets += 1; 2342 cpr->rx_bytes += len; 2343 2344 next_rx_no_len: 2345 rxr->rx_prod = NEXT_RX(prod); 2346 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 2347 2348 next_rx_no_prod_no_len: 2349 *raw_cons = tmp_raw_cons; 2350 2351 return rc; 2352 2353 oom_next_rx: 2354 cpr->sw_stats->rx.rx_oom_discards += 1; 2355 rc = -ENOMEM; 2356 goto next_rx; 2357 } 2358 2359 /* In netpoll mode, if we are using a combined completion ring, we need to 2360 * discard the rx packets and recycle the buffers. 2361 */ 2362 static int bnxt_force_rx_discard(struct bnxt *bp, 2363 struct bnxt_cp_ring_info *cpr, 2364 u32 *raw_cons, u8 *event) 2365 { 2366 u32 tmp_raw_cons = *raw_cons; 2367 struct rx_cmp_ext *rxcmp1; 2368 struct rx_cmp *rxcmp; 2369 u16 cp_cons; 2370 u8 cmp_type; 2371 int rc; 2372 2373 cp_cons = RING_CMP(tmp_raw_cons); 2374 rxcmp = (struct rx_cmp *) 2375 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2376 2377 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2378 cp_cons = RING_CMP(tmp_raw_cons); 2379 rxcmp1 = (struct rx_cmp_ext *) 2380 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2381 2382 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2383 return -EBUSY; 2384 2385 /* The valid test of the entry must be done first before 2386 * reading any further. 2387 */ 2388 dma_rmb(); 2389 cmp_type = RX_CMP_TYPE(rxcmp); 2390 if (cmp_type == CMP_TYPE_RX_L2_CMP || 2391 cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2392 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2393 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2394 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2395 struct rx_tpa_end_cmp_ext *tpa_end1; 2396 2397 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 2398 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 2399 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 2400 } 2401 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); 2402 if (rc && rc != -EBUSY) 2403 cpr->sw_stats->rx.rx_netpoll_discards += 1; 2404 return rc; 2405 } 2406 2407 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 2408 { 2409 struct bnxt_fw_health *fw_health = bp->fw_health; 2410 u32 reg = fw_health->regs[reg_idx]; 2411 u32 reg_type, reg_off, val = 0; 2412 2413 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 2414 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 2415 switch (reg_type) { 2416 case BNXT_FW_HEALTH_REG_TYPE_CFG: 2417 pci_read_config_dword(bp->pdev, reg_off, &val); 2418 break; 2419 case BNXT_FW_HEALTH_REG_TYPE_GRC: 2420 reg_off = fw_health->mapped_regs[reg_idx]; 2421 fallthrough; 2422 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 2423 val = readl(bp->bar0 + reg_off); 2424 break; 2425 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 2426 val = readl(bp->bar1 + reg_off); 2427 break; 2428 } 2429 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 2430 val &= fw_health->fw_reset_inprog_reg_mask; 2431 return val; 2432 } 2433 2434 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 2435 { 2436 int i; 2437 2438 for (i = 0; i < bp->rx_nr_rings; i++) { 2439 u16 grp_idx = bp->rx_ring[i].bnapi->index; 2440 struct bnxt_ring_grp_info *grp_info; 2441 2442 grp_info = &bp->grp_info[grp_idx]; 2443 if (grp_info->agg_fw_ring_id == ring_id) 2444 return grp_idx; 2445 } 2446 return INVALID_HW_RING_ID; 2447 } 2448 2449 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) 2450 { 2451 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2452 2453 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 2454 return link_info->force_link_speed2; 2455 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) 2456 return link_info->force_pam4_link_speed; 2457 return link_info->force_link_speed; 2458 } 2459 2460 static void bnxt_set_force_speed(struct bnxt_link_info *link_info) 2461 { 2462 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2463 2464 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2465 link_info->req_link_speed = link_info->force_link_speed2; 2466 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2467 switch (link_info->req_link_speed) { 2468 case BNXT_LINK_SPEED_50GB_PAM4: 2469 case BNXT_LINK_SPEED_100GB_PAM4: 2470 case BNXT_LINK_SPEED_200GB_PAM4: 2471 case BNXT_LINK_SPEED_400GB_PAM4: 2472 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2473 break; 2474 case BNXT_LINK_SPEED_100GB_PAM4_112: 2475 case BNXT_LINK_SPEED_200GB_PAM4_112: 2476 case BNXT_LINK_SPEED_400GB_PAM4_112: 2477 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; 2478 break; 2479 default: 2480 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2481 } 2482 return; 2483 } 2484 link_info->req_link_speed = link_info->force_link_speed; 2485 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2486 if (link_info->force_pam4_link_speed) { 2487 link_info->req_link_speed = link_info->force_pam4_link_speed; 2488 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2489 } 2490 } 2491 2492 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) 2493 { 2494 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2495 2496 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2497 link_info->advertising = link_info->auto_link_speeds2; 2498 return; 2499 } 2500 link_info->advertising = link_info->auto_link_speeds; 2501 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 2502 } 2503 2504 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) 2505 { 2506 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2507 2508 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2509 if (link_info->req_link_speed != link_info->force_link_speed2) 2510 return true; 2511 return false; 2512 } 2513 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 2514 link_info->req_link_speed != link_info->force_link_speed) 2515 return true; 2516 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 2517 link_info->req_link_speed != link_info->force_pam4_link_speed) 2518 return true; 2519 return false; 2520 } 2521 2522 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) 2523 { 2524 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2525 2526 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2527 if (link_info->advertising != link_info->auto_link_speeds2) 2528 return true; 2529 return false; 2530 } 2531 if (link_info->advertising != link_info->auto_link_speeds || 2532 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 2533 return true; 2534 return false; 2535 } 2536 2537 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type) 2538 { 2539 u32 flags = bp->ctx->ctx_arr[type].flags; 2540 2541 return (flags & BNXT_CTX_MEM_TYPE_VALID) && 2542 ((flags & BNXT_CTX_MEM_FW_TRACE) || 2543 (flags & BNXT_CTX_MEM_FW_BIN_TRACE)); 2544 } 2545 2546 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm) 2547 { 2548 u32 mem_size, pages, rem_bytes, magic_byte_offset; 2549 u16 trace_type = bnxt_bstore_to_trace[ctxm->type]; 2550 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 2551 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl; 2552 struct bnxt_bs_trace_info *bs_trace; 2553 int last_pg; 2554 2555 if (ctxm->instance_bmap && ctxm->instance_bmap > 1) 2556 return; 2557 2558 mem_size = ctxm->max_entries * ctxm->entry_size; 2559 rem_bytes = mem_size % BNXT_PAGE_SIZE; 2560 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 2561 2562 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1); 2563 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1; 2564 2565 rmem = &ctx_pg[0].ring_mem; 2566 bs_trace = &bp->bs_trace[trace_type]; 2567 bs_trace->ctx_type = ctxm->type; 2568 bs_trace->trace_type = trace_type; 2569 if (pages > MAX_CTX_PAGES) { 2570 int last_pg_dir = rmem->nr_pages - 1; 2571 2572 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem; 2573 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg]; 2574 } else { 2575 bs_trace->magic_byte = rmem->pg_arr[last_pg]; 2576 } 2577 bs_trace->magic_byte += magic_byte_offset; 2578 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE; 2579 } 2580 2581 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \ 2582 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\ 2583 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT) 2584 2585 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \ 2586 (((data2) & \ 2587 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\ 2588 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT) 2589 2590 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ 2591 ((data2) & \ 2592 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) 2593 2594 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ 2595 (((data2) & \ 2596 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ 2597 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) 2598 2599 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ 2600 ((data1) & \ 2601 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) 2602 2603 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ 2604 (((data1) & \ 2605 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ 2606 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) 2607 2608 /* Return true if the workqueue has to be scheduled */ 2609 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) 2610 { 2611 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); 2612 2613 switch (err_type) { 2614 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: 2615 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", 2616 BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); 2617 break; 2618 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: 2619 netdev_warn(bp->dev, "Pause Storm detected!\n"); 2620 break; 2621 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: 2622 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); 2623 break; 2624 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { 2625 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); 2626 char *threshold_type; 2627 bool notify = false; 2628 char *dir_str; 2629 2630 switch (type) { 2631 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: 2632 threshold_type = "warning"; 2633 break; 2634 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: 2635 threshold_type = "critical"; 2636 break; 2637 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: 2638 threshold_type = "fatal"; 2639 break; 2640 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: 2641 threshold_type = "shutdown"; 2642 break; 2643 default: 2644 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); 2645 return false; 2646 } 2647 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { 2648 dir_str = "above"; 2649 notify = true; 2650 } else { 2651 dir_str = "below"; 2652 } 2653 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", 2654 dir_str, threshold_type); 2655 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", 2656 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), 2657 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); 2658 if (notify) { 2659 bp->thermal_threshold_type = type; 2660 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); 2661 return true; 2662 } 2663 return false; 2664 } 2665 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED: 2666 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n"); 2667 break; 2668 default: 2669 netdev_err(bp->dev, "FW reported unknown error type %u\n", 2670 err_type); 2671 break; 2672 } 2673 return false; 2674 } 2675 2676 #define BNXT_GET_EVENT_PORT(data) \ 2677 ((data) & \ 2678 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 2679 2680 #define BNXT_EVENT_RING_TYPE(data2) \ 2681 ((data2) & \ 2682 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 2683 2684 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 2685 (BNXT_EVENT_RING_TYPE(data2) == \ 2686 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 2687 2688 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ 2689 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ 2690 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) 2691 2692 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ 2693 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ 2694 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) 2695 2696 #define BNXT_PHC_BITS 48 2697 2698 static int bnxt_async_event_process(struct bnxt *bp, 2699 struct hwrm_async_event_cmpl *cmpl) 2700 { 2701 u16 event_id = le16_to_cpu(cmpl->event_id); 2702 u32 data1 = le32_to_cpu(cmpl->event_data1); 2703 u32 data2 = le32_to_cpu(cmpl->event_data2); 2704 2705 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", 2706 event_id, data1, data2); 2707 2708 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 2709 switch (event_id) { 2710 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 2711 struct bnxt_link_info *link_info = &bp->link_info; 2712 2713 if (BNXT_VF(bp)) 2714 goto async_event_process_exit; 2715 2716 /* print unsupported speed warning in forced speed mode only */ 2717 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 2718 (data1 & 0x20000)) { 2719 u16 fw_speed = bnxt_get_force_speed(link_info); 2720 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 2721 2722 if (speed != SPEED_UNKNOWN) 2723 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 2724 speed); 2725 } 2726 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 2727 } 2728 fallthrough; 2729 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 2730 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 2731 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2732 fallthrough; 2733 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2734 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2735 break; 2736 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2737 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2738 break; 2739 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2740 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2741 2742 if (BNXT_VF(bp)) 2743 break; 2744 2745 if (bp->pf.port_id != port_id) 2746 break; 2747 2748 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2749 break; 2750 } 2751 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2752 if (BNXT_PF(bp)) 2753 goto async_event_process_exit; 2754 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2755 break; 2756 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2757 char *type_str = "Solicited"; 2758 2759 if (!bp->fw_health) 2760 goto async_event_process_exit; 2761 2762 bp->fw_reset_timestamp = jiffies; 2763 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2764 if (!bp->fw_reset_min_dsecs) 2765 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2766 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2767 if (!bp->fw_reset_max_dsecs) 2768 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2769 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { 2770 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); 2771 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2772 type_str = "Fatal"; 2773 bp->fw_health->fatalities++; 2774 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2775 } else if (data2 && BNXT_FW_STATUS_HEALTHY != 2776 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { 2777 type_str = "Non-fatal"; 2778 bp->fw_health->survivals++; 2779 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 2780 } 2781 netif_warn(bp, hw, bp->dev, 2782 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", 2783 type_str, data1, data2, 2784 bp->fw_reset_min_dsecs * 100, 2785 bp->fw_reset_max_dsecs * 100); 2786 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2787 break; 2788 } 2789 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2790 struct bnxt_fw_health *fw_health = bp->fw_health; 2791 char *status_desc = "healthy"; 2792 u32 status; 2793 2794 if (!fw_health) 2795 goto async_event_process_exit; 2796 2797 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { 2798 fw_health->enabled = false; 2799 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); 2800 break; 2801 } 2802 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2803 fw_health->tmr_multiplier = 2804 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2805 bp->current_interval * 10); 2806 fw_health->tmr_counter = fw_health->tmr_multiplier; 2807 if (!fw_health->enabled) 2808 fw_health->last_fw_heartbeat = 2809 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2810 fw_health->last_fw_reset_cnt = 2811 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2812 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 2813 if (status != BNXT_FW_STATUS_HEALTHY) 2814 status_desc = "unhealthy"; 2815 netif_info(bp, drv, bp->dev, 2816 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", 2817 fw_health->primary ? "primary" : "backup", status, 2818 status_desc, fw_health->last_fw_reset_cnt); 2819 if (!fw_health->enabled) { 2820 /* Make sure tmr_counter is set and visible to 2821 * bnxt_health_check() before setting enabled to true. 2822 */ 2823 smp_wmb(); 2824 fw_health->enabled = true; 2825 } 2826 goto async_event_process_exit; 2827 } 2828 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 2829 netif_notice(bp, hw, bp->dev, 2830 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", 2831 data1, data2); 2832 goto async_event_process_exit; 2833 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2834 struct bnxt_rx_ring_info *rxr; 2835 u16 grp_idx; 2836 2837 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 2838 goto async_event_process_exit; 2839 2840 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2841 BNXT_EVENT_RING_TYPE(data2), data1); 2842 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2843 goto async_event_process_exit; 2844 2845 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2846 if (grp_idx == INVALID_HW_RING_ID) { 2847 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2848 data1); 2849 goto async_event_process_exit; 2850 } 2851 rxr = bp->bnapi[grp_idx]->rx_ring; 2852 bnxt_sched_reset_rxr(bp, rxr); 2853 goto async_event_process_exit; 2854 } 2855 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { 2856 struct bnxt_fw_health *fw_health = bp->fw_health; 2857 2858 netif_notice(bp, hw, bp->dev, 2859 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", 2860 data1, data2); 2861 if (fw_health) { 2862 fw_health->echo_req_data1 = data1; 2863 fw_health->echo_req_data2 = data2; 2864 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); 2865 break; 2866 } 2867 goto async_event_process_exit; 2868 } 2869 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { 2870 bnxt_ptp_pps_event(bp, data1, data2); 2871 goto async_event_process_exit; 2872 } 2873 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { 2874 if (bnxt_event_error_report(bp, data1, data2)) 2875 break; 2876 goto async_event_process_exit; 2877 } 2878 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { 2879 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { 2880 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: 2881 if (BNXT_PTP_USE_RTC(bp)) { 2882 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2883 unsigned long flags; 2884 u64 ns; 2885 2886 if (!ptp) 2887 goto async_event_process_exit; 2888 2889 bnxt_ptp_update_current_time(bp); 2890 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << 2891 BNXT_PHC_BITS) | ptp->current_time); 2892 write_seqlock_irqsave(&ptp->ptp_lock, flags); 2893 bnxt_ptp_rtc_timecounter_init(ptp, ns); 2894 write_sequnlock_irqrestore(&ptp->ptp_lock, flags); 2895 } 2896 break; 2897 } 2898 goto async_event_process_exit; 2899 } 2900 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { 2901 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; 2902 2903 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); 2904 goto async_event_process_exit; 2905 } 2906 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: { 2907 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1); 2908 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2); 2909 2910 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset); 2911 goto async_event_process_exit; 2912 } 2913 default: 2914 goto async_event_process_exit; 2915 } 2916 __bnxt_queue_sp_work(bp); 2917 async_event_process_exit: 2918 bnxt_ulp_async_events(bp, cmpl); 2919 return 0; 2920 } 2921 2922 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2923 { 2924 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2925 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2926 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2927 (struct hwrm_fwd_req_cmpl *)txcmp; 2928 2929 switch (cmpl_type) { 2930 case CMPL_BASE_TYPE_HWRM_DONE: 2931 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2932 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); 2933 break; 2934 2935 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2936 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2937 2938 if ((vf_id < bp->pf.first_vf_id) || 2939 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2940 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2941 vf_id); 2942 return -EINVAL; 2943 } 2944 2945 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2946 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); 2947 break; 2948 2949 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2950 bnxt_async_event_process(bp, 2951 (struct hwrm_async_event_cmpl *)txcmp); 2952 break; 2953 2954 default: 2955 break; 2956 } 2957 2958 return 0; 2959 } 2960 2961 static bool bnxt_vnic_is_active(struct bnxt *bp) 2962 { 2963 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 2964 2965 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0; 2966 } 2967 2968 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2969 { 2970 struct bnxt_napi *bnapi = dev_instance; 2971 struct bnxt *bp = bnapi->bp; 2972 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2973 u32 cons = RING_CMP(cpr->cp_raw_cons); 2974 2975 cpr->event_ctr++; 2976 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2977 napi_schedule(&bnapi->napi); 2978 return IRQ_HANDLED; 2979 } 2980 2981 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2982 { 2983 u32 raw_cons = cpr->cp_raw_cons; 2984 u16 cons = RING_CMP(raw_cons); 2985 struct tx_cmp *txcmp; 2986 2987 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2988 2989 return TX_CMP_VALID(txcmp, raw_cons); 2990 } 2991 2992 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2993 int budget) 2994 { 2995 struct bnxt_napi *bnapi = cpr->bnapi; 2996 u32 raw_cons = cpr->cp_raw_cons; 2997 bool flush_xdp = false; 2998 u32 cons; 2999 int rx_pkts = 0; 3000 u8 event = 0; 3001 struct tx_cmp *txcmp; 3002 3003 cpr->has_more_work = 0; 3004 cpr->had_work_done = 1; 3005 while (1) { 3006 u8 cmp_type; 3007 int rc; 3008 3009 cons = RING_CMP(raw_cons); 3010 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3011 3012 if (!TX_CMP_VALID(txcmp, raw_cons)) 3013 break; 3014 3015 /* The valid test of the entry must be done first before 3016 * reading any further. 3017 */ 3018 dma_rmb(); 3019 cmp_type = TX_CMP_TYPE(txcmp); 3020 if (cmp_type == CMP_TYPE_TX_L2_CMP || 3021 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { 3022 u32 opaque = txcmp->tx_cmp_opaque; 3023 struct bnxt_tx_ring_info *txr; 3024 u16 tx_freed; 3025 3026 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; 3027 event |= BNXT_TX_CMP_EVENT; 3028 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) 3029 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); 3030 else 3031 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); 3032 tx_freed = (txr->tx_hw_cons - txr->tx_cons) & 3033 bp->tx_ring_mask; 3034 /* return full budget so NAPI will complete. */ 3035 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { 3036 rx_pkts = budget; 3037 raw_cons = NEXT_RAW_CMP(raw_cons); 3038 if (budget) 3039 cpr->has_more_work = 1; 3040 break; 3041 } 3042 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) { 3043 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp); 3044 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && 3045 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 3046 if (likely(budget)) 3047 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 3048 else 3049 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 3050 &event); 3051 if (event & BNXT_REDIRECT_EVENT) 3052 flush_xdp = true; 3053 if (likely(rc >= 0)) 3054 rx_pkts += rc; 3055 /* Increment rx_pkts when rc is -ENOMEM to count towards 3056 * the NAPI budget. Otherwise, we may potentially loop 3057 * here forever if we consistently cannot allocate 3058 * buffers. 3059 */ 3060 else if (rc == -ENOMEM && budget) 3061 rx_pkts++; 3062 else if (rc == -EBUSY) /* partial completion */ 3063 break; 3064 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || 3065 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || 3066 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { 3067 bnxt_hwrm_handler(bp, txcmp); 3068 } 3069 raw_cons = NEXT_RAW_CMP(raw_cons); 3070 3071 if (rx_pkts && rx_pkts == budget) { 3072 cpr->has_more_work = 1; 3073 break; 3074 } 3075 } 3076 3077 if (flush_xdp) { 3078 xdp_do_flush(); 3079 event &= ~BNXT_REDIRECT_EVENT; 3080 } 3081 3082 if (event & BNXT_TX_EVENT) { 3083 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; 3084 u16 prod = txr->tx_prod; 3085 3086 /* Sync BD data before updating doorbell */ 3087 wmb(); 3088 3089 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 3090 event &= ~BNXT_TX_EVENT; 3091 } 3092 3093 cpr->cp_raw_cons = raw_cons; 3094 bnapi->events |= event; 3095 return rx_pkts; 3096 } 3097 3098 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3099 int budget) 3100 { 3101 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) 3102 bnapi->tx_int(bp, bnapi, budget); 3103 3104 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 3105 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3106 3107 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 3108 bnapi->events &= ~BNXT_RX_EVENT; 3109 } 3110 if (bnapi->events & BNXT_AGG_EVENT) { 3111 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3112 3113 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 3114 bnapi->events &= ~BNXT_AGG_EVENT; 3115 } 3116 } 3117 3118 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 3119 int budget) 3120 { 3121 struct bnxt_napi *bnapi = cpr->bnapi; 3122 int rx_pkts; 3123 3124 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 3125 3126 /* ACK completion ring before freeing tx ring and producing new 3127 * buffers in rx/agg rings to prevent overflowing the completion 3128 * ring. 3129 */ 3130 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 3131 3132 __bnxt_poll_work_done(bp, bnapi, budget); 3133 return rx_pkts; 3134 } 3135 3136 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 3137 { 3138 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3139 struct bnxt *bp = bnapi->bp; 3140 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3141 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3142 struct tx_cmp *txcmp; 3143 struct rx_cmp_ext *rxcmp1; 3144 u32 cp_cons, tmp_raw_cons; 3145 u32 raw_cons = cpr->cp_raw_cons; 3146 bool flush_xdp = false; 3147 u32 rx_pkts = 0; 3148 u8 event = 0; 3149 3150 while (1) { 3151 int rc; 3152 3153 cp_cons = RING_CMP(raw_cons); 3154 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3155 3156 if (!TX_CMP_VALID(txcmp, raw_cons)) 3157 break; 3158 3159 /* The valid test of the entry must be done first before 3160 * reading any further. 3161 */ 3162 dma_rmb(); 3163 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 3164 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 3165 cp_cons = RING_CMP(tmp_raw_cons); 3166 rxcmp1 = (struct rx_cmp_ext *) 3167 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3168 3169 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 3170 break; 3171 3172 /* force an error to recycle the buffer */ 3173 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 3174 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 3175 3176 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 3177 if (likely(rc == -EIO) && budget) 3178 rx_pkts++; 3179 else if (rc == -EBUSY) /* partial completion */ 3180 break; 3181 if (event & BNXT_REDIRECT_EVENT) 3182 flush_xdp = true; 3183 } else if (unlikely(TX_CMP_TYPE(txcmp) == 3184 CMPL_BASE_TYPE_HWRM_DONE)) { 3185 bnxt_hwrm_handler(bp, txcmp); 3186 } else { 3187 netdev_err(bp->dev, 3188 "Invalid completion received on special ring\n"); 3189 } 3190 raw_cons = NEXT_RAW_CMP(raw_cons); 3191 3192 if (rx_pkts == budget) 3193 break; 3194 } 3195 3196 cpr->cp_raw_cons = raw_cons; 3197 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 3198 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 3199 3200 if (event & BNXT_AGG_EVENT) 3201 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 3202 if (flush_xdp) 3203 xdp_do_flush(); 3204 3205 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 3206 napi_complete_done(napi, rx_pkts); 3207 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3208 } 3209 return rx_pkts; 3210 } 3211 3212 static int bnxt_poll(struct napi_struct *napi, int budget) 3213 { 3214 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3215 struct bnxt *bp = bnapi->bp; 3216 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3217 int work_done = 0; 3218 3219 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3220 napi_complete(napi); 3221 return 0; 3222 } 3223 while (1) { 3224 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 3225 3226 if (work_done >= budget) { 3227 if (!budget) 3228 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3229 break; 3230 } 3231 3232 if (!bnxt_has_work(bp, cpr)) { 3233 if (napi_complete_done(napi, work_done)) 3234 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3235 break; 3236 } 3237 } 3238 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { 3239 struct dim_sample dim_sample = {}; 3240 3241 dim_update_sample(cpr->event_ctr, 3242 cpr->rx_packets, 3243 cpr->rx_bytes, 3244 &dim_sample); 3245 net_dim(&cpr->dim, &dim_sample); 3246 } 3247 return work_done; 3248 } 3249 3250 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 3251 { 3252 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3253 int i, work_done = 0; 3254 3255 for (i = 0; i < cpr->cp_ring_count; i++) { 3256 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3257 3258 if (cpr2->had_nqe_notify) { 3259 work_done += __bnxt_poll_work(bp, cpr2, 3260 budget - work_done); 3261 cpr->has_more_work |= cpr2->has_more_work; 3262 } 3263 } 3264 return work_done; 3265 } 3266 3267 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3268 u64 dbr_type, int budget) 3269 { 3270 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3271 int i; 3272 3273 for (i = 0; i < cpr->cp_ring_count; i++) { 3274 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3275 struct bnxt_db_info *db; 3276 3277 if (cpr2->had_work_done) { 3278 u32 tgl = 0; 3279 3280 if (dbr_type == DBR_TYPE_CQ_ARMALL) { 3281 cpr2->had_nqe_notify = 0; 3282 tgl = cpr2->toggle; 3283 } 3284 db = &cpr2->cp_db; 3285 bnxt_writeq(bp, 3286 db->db_key64 | dbr_type | DB_TOGGLE(tgl) | 3287 DB_RING_IDX(db, cpr2->cp_raw_cons), 3288 db->doorbell); 3289 cpr2->had_work_done = 0; 3290 } 3291 } 3292 __bnxt_poll_work_done(bp, bnapi, budget); 3293 } 3294 3295 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 3296 { 3297 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3298 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3299 struct bnxt_cp_ring_info *cpr_rx; 3300 u32 raw_cons = cpr->cp_raw_cons; 3301 struct bnxt *bp = bnapi->bp; 3302 struct nqe_cn *nqcmp; 3303 int work_done = 0; 3304 u32 cons; 3305 3306 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3307 napi_complete(napi); 3308 return 0; 3309 } 3310 if (cpr->has_more_work) { 3311 cpr->has_more_work = 0; 3312 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 3313 } 3314 while (1) { 3315 u16 type; 3316 3317 cons = RING_CMP(raw_cons); 3318 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3319 3320 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 3321 if (cpr->has_more_work) 3322 break; 3323 3324 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 3325 budget); 3326 cpr->cp_raw_cons = raw_cons; 3327 if (napi_complete_done(napi, work_done)) 3328 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 3329 cpr->cp_raw_cons); 3330 goto poll_done; 3331 } 3332 3333 /* The valid test of the entry must be done first before 3334 * reading any further. 3335 */ 3336 dma_rmb(); 3337 3338 type = le16_to_cpu(nqcmp->type); 3339 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { 3340 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 3341 u32 cq_type = BNXT_NQ_HDL_TYPE(idx); 3342 struct bnxt_cp_ring_info *cpr2; 3343 3344 /* No more budget for RX work */ 3345 if (budget && work_done >= budget && 3346 cq_type == BNXT_NQ_HDL_TYPE_RX) 3347 break; 3348 3349 idx = BNXT_NQ_HDL_IDX(idx); 3350 cpr2 = &cpr->cp_ring_arr[idx]; 3351 cpr2->had_nqe_notify = 1; 3352 cpr2->toggle = NQE_CN_TOGGLE(type); 3353 work_done += __bnxt_poll_work(bp, cpr2, 3354 budget - work_done); 3355 cpr->has_more_work |= cpr2->has_more_work; 3356 } else { 3357 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 3358 } 3359 raw_cons = NEXT_RAW_CMP(raw_cons); 3360 } 3361 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); 3362 if (raw_cons != cpr->cp_raw_cons) { 3363 cpr->cp_raw_cons = raw_cons; 3364 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 3365 } 3366 poll_done: 3367 cpr_rx = &cpr->cp_ring_arr[0]; 3368 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && 3369 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { 3370 struct dim_sample dim_sample = {}; 3371 3372 dim_update_sample(cpr->event_ctr, 3373 cpr_rx->rx_packets, 3374 cpr_rx->rx_bytes, 3375 &dim_sample); 3376 net_dim(&cpr->dim, &dim_sample); 3377 } 3378 return work_done; 3379 } 3380 3381 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp, 3382 struct bnxt_tx_ring_info *txr, int idx) 3383 { 3384 int i, max_idx; 3385 struct pci_dev *pdev = bp->pdev; 3386 3387 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 3388 3389 for (i = 0; i < max_idx;) { 3390 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i]; 3391 struct sk_buff *skb; 3392 int j, last; 3393 3394 if (idx < bp->tx_nr_rings_xdp && 3395 tx_buf->action == XDP_REDIRECT) { 3396 dma_unmap_single(&pdev->dev, 3397 dma_unmap_addr(tx_buf, mapping), 3398 dma_unmap_len(tx_buf, len), 3399 DMA_TO_DEVICE); 3400 xdp_return_frame(tx_buf->xdpf); 3401 tx_buf->action = 0; 3402 tx_buf->xdpf = NULL; 3403 i++; 3404 continue; 3405 } 3406 3407 skb = tx_buf->skb; 3408 if (!skb) { 3409 i++; 3410 continue; 3411 } 3412 3413 tx_buf->skb = NULL; 3414 3415 if (tx_buf->is_push) { 3416 dev_kfree_skb(skb); 3417 i += 2; 3418 continue; 3419 } 3420 3421 dma_unmap_single(&pdev->dev, 3422 dma_unmap_addr(tx_buf, mapping), 3423 skb_headlen(skb), 3424 DMA_TO_DEVICE); 3425 3426 last = tx_buf->nr_frags; 3427 i += 2; 3428 for (j = 0; j < last; j++, i++) { 3429 int ring_idx = i & bp->tx_ring_mask; 3430 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 3431 3432 tx_buf = &txr->tx_buf_ring[ring_idx]; 3433 netmem_dma_unmap_page_attrs(&pdev->dev, 3434 dma_unmap_addr(tx_buf, 3435 mapping), 3436 skb_frag_size(frag), 3437 DMA_TO_DEVICE, 0); 3438 } 3439 dev_kfree_skb(skb); 3440 } 3441 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx)); 3442 } 3443 3444 static void bnxt_free_tx_skbs(struct bnxt *bp) 3445 { 3446 int i; 3447 3448 if (!bp->tx_ring) 3449 return; 3450 3451 for (i = 0; i < bp->tx_nr_rings; i++) { 3452 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3453 3454 if (!txr->tx_buf_ring) 3455 continue; 3456 3457 bnxt_free_one_tx_ring_skbs(bp, txr, i); 3458 } 3459 3460 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 3461 bnxt_ptp_free_txts_skbs(bp->ptp_cfg); 3462 } 3463 3464 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3465 { 3466 int i, max_idx; 3467 3468 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 3469 3470 for (i = 0; i < max_idx; i++) { 3471 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 3472 void *data = rx_buf->data; 3473 3474 if (!data) 3475 continue; 3476 3477 rx_buf->data = NULL; 3478 if (BNXT_RX_PAGE_MODE(bp)) 3479 page_pool_recycle_direct(rxr->page_pool, data); 3480 else 3481 page_pool_free_va(rxr->head_pool, data, true); 3482 } 3483 } 3484 3485 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3486 { 3487 int i, max_idx; 3488 3489 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 3490 3491 for (i = 0; i < max_idx; i++) { 3492 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 3493 netmem_ref netmem = rx_agg_buf->netmem; 3494 3495 if (!netmem) 3496 continue; 3497 3498 rx_agg_buf->netmem = 0; 3499 __clear_bit(i, rxr->rx_agg_bmap); 3500 3501 page_pool_recycle_direct_netmem(rxr->page_pool, netmem); 3502 } 3503 } 3504 3505 static void bnxt_free_one_tpa_info_data(struct bnxt *bp, 3506 struct bnxt_rx_ring_info *rxr) 3507 { 3508 int i; 3509 3510 for (i = 0; i < bp->max_tpa; i++) { 3511 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 3512 u8 *data = tpa_info->data; 3513 3514 if (!data) 3515 continue; 3516 3517 tpa_info->data = NULL; 3518 page_pool_free_va(rxr->head_pool, data, false); 3519 } 3520 } 3521 3522 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, 3523 struct bnxt_rx_ring_info *rxr) 3524 { 3525 struct bnxt_tpa_idx_map *map; 3526 3527 if (!rxr->rx_tpa) 3528 goto skip_rx_tpa_free; 3529 3530 bnxt_free_one_tpa_info_data(bp, rxr); 3531 3532 skip_rx_tpa_free: 3533 if (!rxr->rx_buf_ring) 3534 goto skip_rx_buf_free; 3535 3536 bnxt_free_one_rx_ring(bp, rxr); 3537 3538 skip_rx_buf_free: 3539 if (!rxr->rx_agg_ring) 3540 goto skip_rx_agg_free; 3541 3542 bnxt_free_one_rx_agg_ring(bp, rxr); 3543 3544 skip_rx_agg_free: 3545 map = rxr->rx_tpa_idx_map; 3546 if (map) 3547 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 3548 } 3549 3550 static void bnxt_free_rx_skbs(struct bnxt *bp) 3551 { 3552 int i; 3553 3554 if (!bp->rx_ring) 3555 return; 3556 3557 for (i = 0; i < bp->rx_nr_rings; i++) 3558 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]); 3559 } 3560 3561 static void bnxt_free_skbs(struct bnxt *bp) 3562 { 3563 bnxt_free_tx_skbs(bp); 3564 bnxt_free_rx_skbs(bp); 3565 } 3566 3567 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) 3568 { 3569 u8 init_val = ctxm->init_value; 3570 u16 offset = ctxm->init_offset; 3571 u8 *p2 = p; 3572 int i; 3573 3574 if (!init_val) 3575 return; 3576 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { 3577 memset(p, init_val, len); 3578 return; 3579 } 3580 for (i = 0; i < len; i += ctxm->entry_size) 3581 *(p2 + i + offset) = init_val; 3582 } 3583 3584 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem, 3585 void *buf, size_t offset, size_t head, 3586 size_t tail) 3587 { 3588 int i, head_page, start_idx, source_offset; 3589 size_t len, rem_len, total_len, max_bytes; 3590 3591 head_page = head / rmem->page_size; 3592 source_offset = head % rmem->page_size; 3593 total_len = (tail - head) & MAX_CTX_BYTES_MASK; 3594 if (!total_len) 3595 total_len = MAX_CTX_BYTES; 3596 start_idx = head_page % MAX_CTX_PAGES; 3597 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size - 3598 source_offset; 3599 total_len = min(total_len, max_bytes); 3600 rem_len = total_len; 3601 3602 for (i = start_idx; rem_len; i++, source_offset = 0) { 3603 len = min((size_t)(rmem->page_size - source_offset), rem_len); 3604 if (buf) 3605 memcpy(buf + offset, rmem->pg_arr[i] + source_offset, 3606 len); 3607 offset += len; 3608 rem_len -= len; 3609 } 3610 return total_len; 3611 } 3612 3613 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3614 { 3615 struct pci_dev *pdev = bp->pdev; 3616 int i; 3617 3618 if (!rmem->pg_arr) 3619 goto skip_pages; 3620 3621 for (i = 0; i < rmem->nr_pages; i++) { 3622 if (!rmem->pg_arr[i]) 3623 continue; 3624 3625 dma_free_coherent(&pdev->dev, rmem->page_size, 3626 rmem->pg_arr[i], rmem->dma_arr[i]); 3627 3628 rmem->pg_arr[i] = NULL; 3629 } 3630 skip_pages: 3631 if (rmem->pg_tbl) { 3632 size_t pg_tbl_size = rmem->nr_pages * 8; 3633 3634 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3635 pg_tbl_size = rmem->page_size; 3636 dma_free_coherent(&pdev->dev, pg_tbl_size, 3637 rmem->pg_tbl, rmem->pg_tbl_map); 3638 rmem->pg_tbl = NULL; 3639 } 3640 if (rmem->vmem_size && *rmem->vmem) { 3641 vfree(*rmem->vmem); 3642 *rmem->vmem = NULL; 3643 } 3644 } 3645 3646 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3647 { 3648 struct pci_dev *pdev = bp->pdev; 3649 u64 valid_bit = 0; 3650 int i; 3651 3652 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 3653 valid_bit = PTU_PTE_VALID; 3654 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 3655 size_t pg_tbl_size = rmem->nr_pages * 8; 3656 3657 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3658 pg_tbl_size = rmem->page_size; 3659 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 3660 &rmem->pg_tbl_map, 3661 GFP_KERNEL); 3662 if (!rmem->pg_tbl) 3663 return -ENOMEM; 3664 } 3665 3666 for (i = 0; i < rmem->nr_pages; i++) { 3667 u64 extra_bits = valid_bit; 3668 3669 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 3670 rmem->page_size, 3671 &rmem->dma_arr[i], 3672 GFP_KERNEL); 3673 if (!rmem->pg_arr[i]) 3674 return -ENOMEM; 3675 3676 if (rmem->ctx_mem) 3677 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], 3678 rmem->page_size); 3679 if (rmem->nr_pages > 1 || rmem->depth > 0) { 3680 if (i == rmem->nr_pages - 2 && 3681 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3682 extra_bits |= PTU_PTE_NEXT_TO_LAST; 3683 else if (i == rmem->nr_pages - 1 && 3684 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3685 extra_bits |= PTU_PTE_LAST; 3686 rmem->pg_tbl[i] = 3687 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 3688 } 3689 } 3690 3691 if (rmem->vmem_size) { 3692 *rmem->vmem = vzalloc(rmem->vmem_size); 3693 if (!(*rmem->vmem)) 3694 return -ENOMEM; 3695 } 3696 return 0; 3697 } 3698 3699 static void bnxt_free_one_tpa_info(struct bnxt *bp, 3700 struct bnxt_rx_ring_info *rxr) 3701 { 3702 int i; 3703 3704 kfree(rxr->rx_tpa_idx_map); 3705 rxr->rx_tpa_idx_map = NULL; 3706 if (rxr->rx_tpa) { 3707 for (i = 0; i < bp->max_tpa; i++) { 3708 kfree(rxr->rx_tpa[i].agg_arr); 3709 rxr->rx_tpa[i].agg_arr = NULL; 3710 } 3711 } 3712 kfree(rxr->rx_tpa); 3713 rxr->rx_tpa = NULL; 3714 } 3715 3716 static void bnxt_free_tpa_info(struct bnxt *bp) 3717 { 3718 int i; 3719 3720 for (i = 0; i < bp->rx_nr_rings; i++) { 3721 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3722 3723 bnxt_free_one_tpa_info(bp, rxr); 3724 } 3725 } 3726 3727 static int bnxt_alloc_one_tpa_info(struct bnxt *bp, 3728 struct bnxt_rx_ring_info *rxr) 3729 { 3730 struct rx_agg_cmp *agg; 3731 int i; 3732 3733 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 3734 GFP_KERNEL); 3735 if (!rxr->rx_tpa) 3736 return -ENOMEM; 3737 3738 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3739 return 0; 3740 for (i = 0; i < bp->max_tpa; i++) { 3741 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); 3742 if (!agg) 3743 return -ENOMEM; 3744 rxr->rx_tpa[i].agg_arr = agg; 3745 } 3746 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 3747 GFP_KERNEL); 3748 if (!rxr->rx_tpa_idx_map) 3749 return -ENOMEM; 3750 3751 return 0; 3752 } 3753 3754 static int bnxt_alloc_tpa_info(struct bnxt *bp) 3755 { 3756 int i, rc; 3757 3758 bp->max_tpa = MAX_TPA; 3759 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 3760 if (!bp->max_tpa_v2) 3761 return 0; 3762 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 3763 } 3764 3765 for (i = 0; i < bp->rx_nr_rings; i++) { 3766 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3767 3768 rc = bnxt_alloc_one_tpa_info(bp, rxr); 3769 if (rc) 3770 return rc; 3771 } 3772 return 0; 3773 } 3774 3775 static void bnxt_free_rx_rings(struct bnxt *bp) 3776 { 3777 int i; 3778 3779 if (!bp->rx_ring) 3780 return; 3781 3782 bnxt_free_tpa_info(bp); 3783 for (i = 0; i < bp->rx_nr_rings; i++) { 3784 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3785 struct bnxt_ring_struct *ring; 3786 3787 if (rxr->xdp_prog) 3788 bpf_prog_put(rxr->xdp_prog); 3789 3790 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 3791 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3792 3793 page_pool_destroy(rxr->page_pool); 3794 if (bnxt_separate_head_pool(rxr)) 3795 page_pool_destroy(rxr->head_pool); 3796 rxr->page_pool = rxr->head_pool = NULL; 3797 3798 kfree(rxr->rx_agg_bmap); 3799 rxr->rx_agg_bmap = NULL; 3800 3801 ring = &rxr->rx_ring_struct; 3802 bnxt_free_ring(bp, &ring->ring_mem); 3803 3804 ring = &rxr->rx_agg_ring_struct; 3805 bnxt_free_ring(bp, &ring->ring_mem); 3806 } 3807 } 3808 3809 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 3810 struct bnxt_rx_ring_info *rxr, 3811 int numa_node) 3812 { 3813 const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE; 3814 const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K; 3815 struct page_pool_params pp = { 0 }; 3816 struct page_pool *pool; 3817 3818 pp.pool_size = bp->rx_agg_ring_size / agg_size_fac; 3819 if (BNXT_RX_PAGE_MODE(bp)) 3820 pp.pool_size += bp->rx_ring_size / rx_size_fac; 3821 pp.nid = numa_node; 3822 pp.netdev = bp->dev; 3823 pp.dev = &bp->pdev->dev; 3824 pp.dma_dir = bp->rx_dir; 3825 pp.max_len = PAGE_SIZE; 3826 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | 3827 PP_FLAG_ALLOW_UNREADABLE_NETMEM; 3828 pp.queue_idx = rxr->bnapi->index; 3829 3830 pool = page_pool_create(&pp); 3831 if (IS_ERR(pool)) 3832 return PTR_ERR(pool); 3833 rxr->page_pool = pool; 3834 3835 rxr->need_head_pool = page_pool_is_unreadable(pool); 3836 if (bnxt_separate_head_pool(rxr)) { 3837 pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024); 3838 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 3839 pool = page_pool_create(&pp); 3840 if (IS_ERR(pool)) 3841 goto err_destroy_pp; 3842 } 3843 rxr->head_pool = pool; 3844 3845 return 0; 3846 3847 err_destroy_pp: 3848 page_pool_destroy(rxr->page_pool); 3849 rxr->page_pool = NULL; 3850 return PTR_ERR(pool); 3851 } 3852 3853 static void bnxt_enable_rx_page_pool(struct bnxt_rx_ring_info *rxr) 3854 { 3855 page_pool_enable_direct_recycling(rxr->head_pool, &rxr->bnapi->napi); 3856 page_pool_enable_direct_recycling(rxr->page_pool, &rxr->bnapi->napi); 3857 } 3858 3859 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3860 { 3861 u16 mem_size; 3862 3863 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 3864 mem_size = rxr->rx_agg_bmap_size / 8; 3865 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 3866 if (!rxr->rx_agg_bmap) 3867 return -ENOMEM; 3868 3869 return 0; 3870 } 3871 3872 static int bnxt_alloc_rx_rings(struct bnxt *bp) 3873 { 3874 int numa_node = dev_to_node(&bp->pdev->dev); 3875 int i, rc = 0, agg_rings = 0, cpu; 3876 3877 if (!bp->rx_ring) 3878 return -ENOMEM; 3879 3880 if (bp->flags & BNXT_FLAG_AGG_RINGS) 3881 agg_rings = 1; 3882 3883 for (i = 0; i < bp->rx_nr_rings; i++) { 3884 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3885 struct bnxt_ring_struct *ring; 3886 int cpu_node; 3887 3888 ring = &rxr->rx_ring_struct; 3889 3890 cpu = cpumask_local_spread(i, numa_node); 3891 cpu_node = cpu_to_node(cpu); 3892 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", 3893 i, cpu_node); 3894 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); 3895 if (rc) 3896 return rc; 3897 bnxt_enable_rx_page_pool(rxr); 3898 3899 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 3900 if (rc < 0) 3901 return rc; 3902 3903 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 3904 MEM_TYPE_PAGE_POOL, 3905 rxr->page_pool); 3906 if (rc) { 3907 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3908 return rc; 3909 } 3910 3911 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3912 if (rc) 3913 return rc; 3914 3915 ring->grp_idx = i; 3916 if (agg_rings) { 3917 ring = &rxr->rx_agg_ring_struct; 3918 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3919 if (rc) 3920 return rc; 3921 3922 ring->grp_idx = i; 3923 rc = bnxt_alloc_rx_agg_bmap(bp, rxr); 3924 if (rc) 3925 return rc; 3926 } 3927 } 3928 if (bp->flags & BNXT_FLAG_TPA) 3929 rc = bnxt_alloc_tpa_info(bp); 3930 return rc; 3931 } 3932 3933 static void bnxt_free_tx_rings(struct bnxt *bp) 3934 { 3935 int i; 3936 struct pci_dev *pdev = bp->pdev; 3937 3938 if (!bp->tx_ring) 3939 return; 3940 3941 for (i = 0; i < bp->tx_nr_rings; i++) { 3942 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3943 struct bnxt_ring_struct *ring; 3944 3945 if (txr->tx_push) { 3946 dma_free_coherent(&pdev->dev, bp->tx_push_size, 3947 txr->tx_push, txr->tx_push_mapping); 3948 txr->tx_push = NULL; 3949 } 3950 3951 ring = &txr->tx_ring_struct; 3952 3953 bnxt_free_ring(bp, &ring->ring_mem); 3954 } 3955 } 3956 3957 #define BNXT_TC_TO_RING_BASE(bp, tc) \ 3958 ((tc) * (bp)->tx_nr_rings_per_tc) 3959 3960 #define BNXT_RING_TO_TC_OFF(bp, tx) \ 3961 ((tx) % (bp)->tx_nr_rings_per_tc) 3962 3963 #define BNXT_RING_TO_TC(bp, tx) \ 3964 ((tx) / (bp)->tx_nr_rings_per_tc) 3965 3966 static int bnxt_alloc_tx_rings(struct bnxt *bp) 3967 { 3968 int i, j, rc; 3969 struct pci_dev *pdev = bp->pdev; 3970 3971 bp->tx_push_size = 0; 3972 if (bp->tx_push_thresh) { 3973 int push_size; 3974 3975 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 3976 bp->tx_push_thresh); 3977 3978 if (push_size > 256) { 3979 push_size = 0; 3980 bp->tx_push_thresh = 0; 3981 } 3982 3983 bp->tx_push_size = push_size; 3984 } 3985 3986 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 3987 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3988 struct bnxt_ring_struct *ring; 3989 u8 qidx; 3990 3991 ring = &txr->tx_ring_struct; 3992 3993 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3994 if (rc) 3995 return rc; 3996 3997 ring->grp_idx = txr->bnapi->index; 3998 if (bp->tx_push_size) { 3999 dma_addr_t mapping; 4000 4001 /* One pre-allocated DMA buffer to backup 4002 * TX push operation 4003 */ 4004 txr->tx_push = dma_alloc_coherent(&pdev->dev, 4005 bp->tx_push_size, 4006 &txr->tx_push_mapping, 4007 GFP_KERNEL); 4008 4009 if (!txr->tx_push) 4010 return -ENOMEM; 4011 4012 mapping = txr->tx_push_mapping + 4013 sizeof(struct tx_push_bd); 4014 txr->data_mapping = cpu_to_le64(mapping); 4015 } 4016 qidx = bp->tc_to_qidx[j]; 4017 ring->queue_id = bp->q_info[qidx].queue_id; 4018 spin_lock_init(&txr->xdp_tx_lock); 4019 if (i < bp->tx_nr_rings_xdp) 4020 continue; 4021 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) 4022 j++; 4023 } 4024 return 0; 4025 } 4026 4027 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) 4028 { 4029 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4030 4031 kfree(cpr->cp_desc_ring); 4032 cpr->cp_desc_ring = NULL; 4033 ring->ring_mem.pg_arr = NULL; 4034 kfree(cpr->cp_desc_mapping); 4035 cpr->cp_desc_mapping = NULL; 4036 ring->ring_mem.dma_arr = NULL; 4037 } 4038 4039 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) 4040 { 4041 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); 4042 if (!cpr->cp_desc_ring) 4043 return -ENOMEM; 4044 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), 4045 GFP_KERNEL); 4046 if (!cpr->cp_desc_mapping) 4047 return -ENOMEM; 4048 return 0; 4049 } 4050 4051 static void bnxt_free_all_cp_arrays(struct bnxt *bp) 4052 { 4053 int i; 4054 4055 if (!bp->bnapi) 4056 return; 4057 for (i = 0; i < bp->cp_nr_rings; i++) { 4058 struct bnxt_napi *bnapi = bp->bnapi[i]; 4059 4060 if (!bnapi) 4061 continue; 4062 bnxt_free_cp_arrays(&bnapi->cp_ring); 4063 } 4064 } 4065 4066 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) 4067 { 4068 int i, n = bp->cp_nr_pages; 4069 4070 for (i = 0; i < bp->cp_nr_rings; i++) { 4071 struct bnxt_napi *bnapi = bp->bnapi[i]; 4072 int rc; 4073 4074 if (!bnapi) 4075 continue; 4076 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); 4077 if (rc) 4078 return rc; 4079 } 4080 return 0; 4081 } 4082 4083 static void bnxt_free_cp_rings(struct bnxt *bp) 4084 { 4085 int i; 4086 4087 if (!bp->bnapi) 4088 return; 4089 4090 for (i = 0; i < bp->cp_nr_rings; i++) { 4091 struct bnxt_napi *bnapi = bp->bnapi[i]; 4092 struct bnxt_cp_ring_info *cpr; 4093 struct bnxt_ring_struct *ring; 4094 int j; 4095 4096 if (!bnapi) 4097 continue; 4098 4099 cpr = &bnapi->cp_ring; 4100 ring = &cpr->cp_ring_struct; 4101 4102 bnxt_free_ring(bp, &ring->ring_mem); 4103 4104 if (!cpr->cp_ring_arr) 4105 continue; 4106 4107 for (j = 0; j < cpr->cp_ring_count; j++) { 4108 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4109 4110 ring = &cpr2->cp_ring_struct; 4111 bnxt_free_ring(bp, &ring->ring_mem); 4112 bnxt_free_cp_arrays(cpr2); 4113 } 4114 kfree(cpr->cp_ring_arr); 4115 cpr->cp_ring_arr = NULL; 4116 cpr->cp_ring_count = 0; 4117 } 4118 } 4119 4120 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, 4121 struct bnxt_cp_ring_info *cpr) 4122 { 4123 struct bnxt_ring_mem_info *rmem; 4124 struct bnxt_ring_struct *ring; 4125 int rc; 4126 4127 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); 4128 if (rc) { 4129 bnxt_free_cp_arrays(cpr); 4130 return -ENOMEM; 4131 } 4132 ring = &cpr->cp_ring_struct; 4133 rmem = &ring->ring_mem; 4134 rmem->nr_pages = bp->cp_nr_pages; 4135 rmem->page_size = HW_CMPD_RING_SIZE; 4136 rmem->pg_arr = (void **)cpr->cp_desc_ring; 4137 rmem->dma_arr = cpr->cp_desc_mapping; 4138 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 4139 rc = bnxt_alloc_ring(bp, rmem); 4140 if (rc) { 4141 bnxt_free_ring(bp, rmem); 4142 bnxt_free_cp_arrays(cpr); 4143 } 4144 return rc; 4145 } 4146 4147 static int bnxt_alloc_cp_rings(struct bnxt *bp) 4148 { 4149 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 4150 int i, j, rc, ulp_msix; 4151 int tcs = bp->num_tc; 4152 4153 if (!tcs) 4154 tcs = 1; 4155 ulp_msix = bnxt_get_ulp_msix_num(bp); 4156 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 4157 struct bnxt_napi *bnapi = bp->bnapi[i]; 4158 struct bnxt_cp_ring_info *cpr, *cpr2; 4159 struct bnxt_ring_struct *ring; 4160 int cp_count = 0, k; 4161 int rx = 0, tx = 0; 4162 4163 if (!bnapi) 4164 continue; 4165 4166 cpr = &bnapi->cp_ring; 4167 cpr->bnapi = bnapi; 4168 ring = &cpr->cp_ring_struct; 4169 4170 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 4171 if (rc) 4172 return rc; 4173 4174 ring->map_idx = ulp_msix + i; 4175 4176 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4177 continue; 4178 4179 if (i < bp->rx_nr_rings) { 4180 cp_count++; 4181 rx = 1; 4182 } 4183 if (i < bp->tx_nr_rings_xdp) { 4184 cp_count++; 4185 tx = 1; 4186 } else if ((sh && i < bp->tx_nr_rings) || 4187 (!sh && i >= bp->rx_nr_rings)) { 4188 cp_count += tcs; 4189 tx = 1; 4190 } 4191 4192 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), 4193 GFP_KERNEL); 4194 if (!cpr->cp_ring_arr) 4195 return -ENOMEM; 4196 cpr->cp_ring_count = cp_count; 4197 4198 for (k = 0; k < cp_count; k++) { 4199 cpr2 = &cpr->cp_ring_arr[k]; 4200 rc = bnxt_alloc_cp_sub_ring(bp, cpr2); 4201 if (rc) 4202 return rc; 4203 cpr2->bnapi = bnapi; 4204 cpr2->sw_stats = cpr->sw_stats; 4205 cpr2->cp_idx = k; 4206 if (!k && rx) { 4207 bp->rx_ring[i].rx_cpr = cpr2; 4208 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; 4209 } else { 4210 int n, tc = k - rx; 4211 4212 n = BNXT_TC_TO_RING_BASE(bp, tc) + j; 4213 bp->tx_ring[n].tx_cpr = cpr2; 4214 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; 4215 } 4216 } 4217 if (tx) 4218 j++; 4219 } 4220 return 0; 4221 } 4222 4223 static void bnxt_init_rx_ring_struct(struct bnxt *bp, 4224 struct bnxt_rx_ring_info *rxr) 4225 { 4226 struct bnxt_ring_mem_info *rmem; 4227 struct bnxt_ring_struct *ring; 4228 4229 ring = &rxr->rx_ring_struct; 4230 rmem = &ring->ring_mem; 4231 rmem->nr_pages = bp->rx_nr_pages; 4232 rmem->page_size = HW_RXBD_RING_SIZE; 4233 rmem->pg_arr = (void **)rxr->rx_desc_ring; 4234 rmem->dma_arr = rxr->rx_desc_mapping; 4235 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 4236 rmem->vmem = (void **)&rxr->rx_buf_ring; 4237 4238 ring = &rxr->rx_agg_ring_struct; 4239 rmem = &ring->ring_mem; 4240 rmem->nr_pages = bp->rx_agg_nr_pages; 4241 rmem->page_size = HW_RXBD_RING_SIZE; 4242 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 4243 rmem->dma_arr = rxr->rx_agg_desc_mapping; 4244 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 4245 rmem->vmem = (void **)&rxr->rx_agg_ring; 4246 } 4247 4248 static void bnxt_reset_rx_ring_struct(struct bnxt *bp, 4249 struct bnxt_rx_ring_info *rxr) 4250 { 4251 struct bnxt_ring_mem_info *rmem; 4252 struct bnxt_ring_struct *ring; 4253 int i; 4254 4255 rxr->page_pool->p.napi = NULL; 4256 rxr->page_pool = NULL; 4257 rxr->head_pool->p.napi = NULL; 4258 rxr->head_pool = NULL; 4259 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info)); 4260 4261 ring = &rxr->rx_ring_struct; 4262 rmem = &ring->ring_mem; 4263 rmem->pg_tbl = NULL; 4264 rmem->pg_tbl_map = 0; 4265 for (i = 0; i < rmem->nr_pages; i++) { 4266 rmem->pg_arr[i] = NULL; 4267 rmem->dma_arr[i] = 0; 4268 } 4269 *rmem->vmem = NULL; 4270 4271 ring = &rxr->rx_agg_ring_struct; 4272 rmem = &ring->ring_mem; 4273 rmem->pg_tbl = NULL; 4274 rmem->pg_tbl_map = 0; 4275 for (i = 0; i < rmem->nr_pages; i++) { 4276 rmem->pg_arr[i] = NULL; 4277 rmem->dma_arr[i] = 0; 4278 } 4279 *rmem->vmem = NULL; 4280 } 4281 4282 static void bnxt_init_ring_struct(struct bnxt *bp) 4283 { 4284 int i, j; 4285 4286 for (i = 0; i < bp->cp_nr_rings; i++) { 4287 struct bnxt_napi *bnapi = bp->bnapi[i]; 4288 struct bnxt_ring_mem_info *rmem; 4289 struct bnxt_cp_ring_info *cpr; 4290 struct bnxt_rx_ring_info *rxr; 4291 struct bnxt_tx_ring_info *txr; 4292 struct bnxt_ring_struct *ring; 4293 4294 if (!bnapi) 4295 continue; 4296 4297 cpr = &bnapi->cp_ring; 4298 ring = &cpr->cp_ring_struct; 4299 rmem = &ring->ring_mem; 4300 rmem->nr_pages = bp->cp_nr_pages; 4301 rmem->page_size = HW_CMPD_RING_SIZE; 4302 rmem->pg_arr = (void **)cpr->cp_desc_ring; 4303 rmem->dma_arr = cpr->cp_desc_mapping; 4304 rmem->vmem_size = 0; 4305 4306 rxr = bnapi->rx_ring; 4307 if (!rxr) 4308 goto skip_rx; 4309 4310 ring = &rxr->rx_ring_struct; 4311 rmem = &ring->ring_mem; 4312 rmem->nr_pages = bp->rx_nr_pages; 4313 rmem->page_size = HW_RXBD_RING_SIZE; 4314 rmem->pg_arr = (void **)rxr->rx_desc_ring; 4315 rmem->dma_arr = rxr->rx_desc_mapping; 4316 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 4317 rmem->vmem = (void **)&rxr->rx_buf_ring; 4318 4319 ring = &rxr->rx_agg_ring_struct; 4320 rmem = &ring->ring_mem; 4321 rmem->nr_pages = bp->rx_agg_nr_pages; 4322 rmem->page_size = HW_RXBD_RING_SIZE; 4323 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 4324 rmem->dma_arr = rxr->rx_agg_desc_mapping; 4325 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 4326 rmem->vmem = (void **)&rxr->rx_agg_ring; 4327 4328 skip_rx: 4329 bnxt_for_each_napi_tx(j, bnapi, txr) { 4330 ring = &txr->tx_ring_struct; 4331 rmem = &ring->ring_mem; 4332 rmem->nr_pages = bp->tx_nr_pages; 4333 rmem->page_size = HW_TXBD_RING_SIZE; 4334 rmem->pg_arr = (void **)txr->tx_desc_ring; 4335 rmem->dma_arr = txr->tx_desc_mapping; 4336 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 4337 rmem->vmem = (void **)&txr->tx_buf_ring; 4338 } 4339 } 4340 } 4341 4342 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 4343 { 4344 int i; 4345 u32 prod; 4346 struct rx_bd **rx_buf_ring; 4347 4348 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 4349 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 4350 int j; 4351 struct rx_bd *rxbd; 4352 4353 rxbd = rx_buf_ring[i]; 4354 if (!rxbd) 4355 continue; 4356 4357 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 4358 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 4359 rxbd->rx_bd_opaque = prod; 4360 } 4361 } 4362 } 4363 4364 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp, 4365 struct bnxt_rx_ring_info *rxr, 4366 int ring_nr) 4367 { 4368 u32 prod; 4369 int i; 4370 4371 prod = rxr->rx_prod; 4372 for (i = 0; i < bp->rx_ring_size; i++) { 4373 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 4374 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", 4375 ring_nr, i, bp->rx_ring_size); 4376 break; 4377 } 4378 prod = NEXT_RX(prod); 4379 } 4380 rxr->rx_prod = prod; 4381 } 4382 4383 static void bnxt_alloc_one_rx_ring_netmem(struct bnxt *bp, 4384 struct bnxt_rx_ring_info *rxr, 4385 int ring_nr) 4386 { 4387 u32 prod; 4388 int i; 4389 4390 prod = rxr->rx_agg_prod; 4391 for (i = 0; i < bp->rx_agg_ring_size; i++) { 4392 if (bnxt_alloc_rx_netmem(bp, rxr, prod, GFP_KERNEL)) { 4393 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", 4394 ring_nr, i, bp->rx_ring_size); 4395 break; 4396 } 4397 prod = NEXT_RX_AGG(prod); 4398 } 4399 rxr->rx_agg_prod = prod; 4400 } 4401 4402 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp, 4403 struct bnxt_rx_ring_info *rxr) 4404 { 4405 dma_addr_t mapping; 4406 u8 *data; 4407 int i; 4408 4409 for (i = 0; i < bp->max_tpa; i++) { 4410 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, 4411 GFP_KERNEL); 4412 if (!data) 4413 return -ENOMEM; 4414 4415 rxr->rx_tpa[i].data = data; 4416 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 4417 rxr->rx_tpa[i].mapping = mapping; 4418 } 4419 4420 return 0; 4421 } 4422 4423 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 4424 { 4425 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 4426 int rc; 4427 4428 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); 4429 4430 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 4431 return 0; 4432 4433 bnxt_alloc_one_rx_ring_netmem(bp, rxr, ring_nr); 4434 4435 if (rxr->rx_tpa) { 4436 rc = bnxt_alloc_one_tpa_info_data(bp, rxr); 4437 if (rc) 4438 return rc; 4439 } 4440 return 0; 4441 } 4442 4443 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp, 4444 struct bnxt_rx_ring_info *rxr) 4445 { 4446 struct bnxt_ring_struct *ring; 4447 u32 type; 4448 4449 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 4450 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 4451 4452 if (NET_IP_ALIGN == 2) 4453 type |= RX_BD_FLAGS_SOP; 4454 4455 ring = &rxr->rx_ring_struct; 4456 bnxt_init_rxbd_pages(ring, type); 4457 ring->fw_ring_id = INVALID_HW_RING_ID; 4458 } 4459 4460 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, 4461 struct bnxt_rx_ring_info *rxr) 4462 { 4463 struct bnxt_ring_struct *ring; 4464 u32 type; 4465 4466 ring = &rxr->rx_agg_ring_struct; 4467 ring->fw_ring_id = INVALID_HW_RING_ID; 4468 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 4469 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 4470 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 4471 4472 bnxt_init_rxbd_pages(ring, type); 4473 } 4474 } 4475 4476 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 4477 { 4478 struct bnxt_rx_ring_info *rxr; 4479 4480 rxr = &bp->rx_ring[ring_nr]; 4481 bnxt_init_one_rx_ring_rxbd(bp, rxr); 4482 4483 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, 4484 &rxr->bnapi->napi); 4485 4486 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 4487 bpf_prog_add(bp->xdp_prog, 1); 4488 rxr->xdp_prog = bp->xdp_prog; 4489 } 4490 4491 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr); 4492 4493 return bnxt_alloc_one_rx_ring(bp, ring_nr); 4494 } 4495 4496 static void bnxt_init_cp_rings(struct bnxt *bp) 4497 { 4498 int i, j; 4499 4500 for (i = 0; i < bp->cp_nr_rings; i++) { 4501 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 4502 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4503 4504 ring->fw_ring_id = INVALID_HW_RING_ID; 4505 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4506 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4507 if (!cpr->cp_ring_arr) 4508 continue; 4509 for (j = 0; j < cpr->cp_ring_count; j++) { 4510 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4511 4512 ring = &cpr2->cp_ring_struct; 4513 ring->fw_ring_id = INVALID_HW_RING_ID; 4514 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4515 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4516 } 4517 } 4518 } 4519 4520 static int bnxt_init_rx_rings(struct bnxt *bp) 4521 { 4522 int i, rc = 0; 4523 4524 if (BNXT_RX_PAGE_MODE(bp)) { 4525 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 4526 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 4527 } else { 4528 bp->rx_offset = BNXT_RX_OFFSET; 4529 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 4530 } 4531 4532 for (i = 0; i < bp->rx_nr_rings; i++) { 4533 rc = bnxt_init_one_rx_ring(bp, i); 4534 if (rc) 4535 break; 4536 } 4537 4538 return rc; 4539 } 4540 4541 static int bnxt_init_tx_rings(struct bnxt *bp) 4542 { 4543 u16 i; 4544 4545 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 4546 BNXT_MIN_TX_DESC_CNT); 4547 4548 for (i = 0; i < bp->tx_nr_rings; i++) { 4549 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4550 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4551 4552 ring->fw_ring_id = INVALID_HW_RING_ID; 4553 4554 if (i >= bp->tx_nr_rings_xdp) 4555 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, 4556 NETDEV_QUEUE_TYPE_TX, 4557 &txr->bnapi->napi); 4558 } 4559 4560 return 0; 4561 } 4562 4563 static void bnxt_free_ring_grps(struct bnxt *bp) 4564 { 4565 kfree(bp->grp_info); 4566 bp->grp_info = NULL; 4567 } 4568 4569 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 4570 { 4571 int i; 4572 4573 if (irq_re_init) { 4574 bp->grp_info = kcalloc(bp->cp_nr_rings, 4575 sizeof(struct bnxt_ring_grp_info), 4576 GFP_KERNEL); 4577 if (!bp->grp_info) 4578 return -ENOMEM; 4579 } 4580 for (i = 0; i < bp->cp_nr_rings; i++) { 4581 if (irq_re_init) 4582 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 4583 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4584 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 4585 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 4586 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4587 } 4588 return 0; 4589 } 4590 4591 static void bnxt_free_vnics(struct bnxt *bp) 4592 { 4593 kfree(bp->vnic_info); 4594 bp->vnic_info = NULL; 4595 bp->nr_vnics = 0; 4596 } 4597 4598 static int bnxt_alloc_vnics(struct bnxt *bp) 4599 { 4600 int num_vnics = 1; 4601 4602 #ifdef CONFIG_RFS_ACCEL 4603 if (bp->flags & BNXT_FLAG_RFS) { 4604 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 4605 num_vnics++; 4606 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4607 num_vnics += bp->rx_nr_rings; 4608 } 4609 #endif 4610 4611 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4612 num_vnics++; 4613 4614 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 4615 GFP_KERNEL); 4616 if (!bp->vnic_info) 4617 return -ENOMEM; 4618 4619 bp->nr_vnics = num_vnics; 4620 return 0; 4621 } 4622 4623 static void bnxt_init_vnics(struct bnxt *bp) 4624 { 4625 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 4626 int i; 4627 4628 for (i = 0; i < bp->nr_vnics; i++) { 4629 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4630 int j; 4631 4632 vnic->fw_vnic_id = INVALID_HW_RING_ID; 4633 vnic->vnic_id = i; 4634 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 4635 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 4636 4637 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 4638 4639 if (bp->vnic_info[i].rss_hash_key) { 4640 if (i == BNXT_VNIC_DEFAULT) { 4641 u8 *key = (void *)vnic->rss_hash_key; 4642 int k; 4643 4644 if (!bp->rss_hash_key_valid && 4645 !bp->rss_hash_key_updated) { 4646 get_random_bytes(bp->rss_hash_key, 4647 HW_HASH_KEY_SIZE); 4648 bp->rss_hash_key_updated = true; 4649 } 4650 4651 memcpy(vnic->rss_hash_key, bp->rss_hash_key, 4652 HW_HASH_KEY_SIZE); 4653 4654 if (!bp->rss_hash_key_updated) 4655 continue; 4656 4657 bp->rss_hash_key_updated = false; 4658 bp->rss_hash_key_valid = true; 4659 4660 bp->toeplitz_prefix = 0; 4661 for (k = 0; k < 8; k++) { 4662 bp->toeplitz_prefix <<= 8; 4663 bp->toeplitz_prefix |= key[k]; 4664 } 4665 } else { 4666 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, 4667 HW_HASH_KEY_SIZE); 4668 } 4669 } 4670 } 4671 } 4672 4673 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 4674 { 4675 int pages; 4676 4677 pages = ring_size / desc_per_pg; 4678 4679 if (!pages) 4680 return 1; 4681 4682 pages++; 4683 4684 while (pages & (pages - 1)) 4685 pages++; 4686 4687 return pages; 4688 } 4689 4690 void bnxt_set_tpa_flags(struct bnxt *bp) 4691 { 4692 bp->flags &= ~BNXT_FLAG_TPA; 4693 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 4694 return; 4695 if (bp->dev->features & NETIF_F_LRO) 4696 bp->flags |= BNXT_FLAG_LRO; 4697 else if (bp->dev->features & NETIF_F_GRO_HW) 4698 bp->flags |= BNXT_FLAG_GRO; 4699 } 4700 4701 static void bnxt_init_ring_params(struct bnxt *bp) 4702 { 4703 unsigned int rx_size; 4704 4705 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK; 4706 /* Try to fit 4 chunks into a 4k page */ 4707 rx_size = SZ_1K - 4708 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4709 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size); 4710 } 4711 4712 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 4713 * be set on entry. 4714 */ 4715 void bnxt_set_ring_params(struct bnxt *bp) 4716 { 4717 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 4718 u32 agg_factor = 0, agg_ring_size = 0; 4719 4720 /* 8 for CRC and VLAN */ 4721 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 4722 4723 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + 4724 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4725 4726 ring_size = bp->rx_ring_size; 4727 bp->rx_agg_ring_size = 0; 4728 bp->rx_agg_nr_pages = 0; 4729 4730 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS) 4731 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 4732 4733 bp->flags &= ~BNXT_FLAG_JUMBO; 4734 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 4735 u32 jumbo_factor; 4736 4737 bp->flags |= BNXT_FLAG_JUMBO; 4738 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 4739 if (jumbo_factor > agg_factor) 4740 agg_factor = jumbo_factor; 4741 } 4742 if (agg_factor) { 4743 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { 4744 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 4745 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", 4746 bp->rx_ring_size, ring_size); 4747 bp->rx_ring_size = ring_size; 4748 } 4749 agg_ring_size = ring_size * agg_factor; 4750 4751 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 4752 RX_DESC_CNT); 4753 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 4754 u32 tmp = agg_ring_size; 4755 4756 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 4757 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 4758 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 4759 tmp, agg_ring_size); 4760 } 4761 bp->rx_agg_ring_size = agg_ring_size; 4762 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 4763 4764 if (BNXT_RX_PAGE_MODE(bp)) { 4765 rx_space = PAGE_SIZE; 4766 rx_size = PAGE_SIZE - 4767 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - 4768 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4769 } else { 4770 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK, 4771 bp->rx_copybreak, 4772 bp->dev->cfg_pending->hds_thresh); 4773 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN); 4774 rx_space = rx_size + NET_SKB_PAD + 4775 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4776 } 4777 } 4778 4779 bp->rx_buf_use_size = rx_size; 4780 bp->rx_buf_size = rx_space; 4781 4782 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 4783 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 4784 4785 ring_size = bp->tx_ring_size; 4786 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 4787 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 4788 4789 max_rx_cmpl = bp->rx_ring_size; 4790 /* MAX TPA needs to be added because TPA_START completions are 4791 * immediately recycled, so the TPA completions are not bound by 4792 * the RX ring size. 4793 */ 4794 if (bp->flags & BNXT_FLAG_TPA) 4795 max_rx_cmpl += bp->max_tpa; 4796 /* RX and TPA completions are 32-byte, all others are 16-byte */ 4797 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 4798 bp->cp_ring_size = ring_size; 4799 4800 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 4801 if (bp->cp_nr_pages > MAX_CP_PAGES) { 4802 bp->cp_nr_pages = MAX_CP_PAGES; 4803 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 4804 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 4805 ring_size, bp->cp_ring_size); 4806 } 4807 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 4808 bp->cp_ring_mask = bp->cp_bit - 1; 4809 } 4810 4811 /* Changing allocation mode of RX rings. 4812 * TODO: Update when extending xdp_rxq_info to support allocation modes. 4813 */ 4814 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4815 { 4816 struct net_device *dev = bp->dev; 4817 4818 if (page_mode) { 4819 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS); 4820 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; 4821 4822 if (bp->xdp_prog->aux->xdp_has_frags) 4823 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); 4824 else 4825 dev->max_mtu = 4826 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 4827 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 4828 bp->flags |= BNXT_FLAG_JUMBO; 4829 bp->rx_skb_func = bnxt_rx_multi_page_skb; 4830 } else { 4831 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 4832 bp->rx_skb_func = bnxt_rx_page_skb; 4833 } 4834 bp->rx_dir = DMA_BIDIRECTIONAL; 4835 } else { 4836 dev->max_mtu = bp->max_mtu; 4837 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 4838 bp->rx_dir = DMA_FROM_DEVICE; 4839 bp->rx_skb_func = bnxt_rx_skb; 4840 } 4841 } 4842 4843 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4844 { 4845 __bnxt_set_rx_skb_mode(bp, page_mode); 4846 4847 if (!page_mode) { 4848 int rx, tx; 4849 4850 bnxt_get_max_rings(bp, &rx, &tx, true); 4851 if (rx > 1) { 4852 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; 4853 bp->dev->hw_features |= NETIF_F_LRO; 4854 } 4855 } 4856 4857 /* Update LRO and GRO_HW availability */ 4858 netdev_update_features(bp->dev); 4859 } 4860 4861 static void bnxt_free_vnic_attributes(struct bnxt *bp) 4862 { 4863 int i; 4864 struct bnxt_vnic_info *vnic; 4865 struct pci_dev *pdev = bp->pdev; 4866 4867 if (!bp->vnic_info) 4868 return; 4869 4870 for (i = 0; i < bp->nr_vnics; i++) { 4871 vnic = &bp->vnic_info[i]; 4872 4873 kfree(vnic->fw_grp_ids); 4874 vnic->fw_grp_ids = NULL; 4875 4876 kfree(vnic->uc_list); 4877 vnic->uc_list = NULL; 4878 4879 if (vnic->mc_list) { 4880 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 4881 vnic->mc_list, vnic->mc_list_mapping); 4882 vnic->mc_list = NULL; 4883 } 4884 4885 if (vnic->rss_table) { 4886 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 4887 vnic->rss_table, 4888 vnic->rss_table_dma_addr); 4889 vnic->rss_table = NULL; 4890 } 4891 4892 vnic->rss_hash_key = NULL; 4893 vnic->flags = 0; 4894 } 4895 } 4896 4897 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 4898 { 4899 int i, rc = 0, size; 4900 struct bnxt_vnic_info *vnic; 4901 struct pci_dev *pdev = bp->pdev; 4902 int max_rings; 4903 4904 for (i = 0; i < bp->nr_vnics; i++) { 4905 vnic = &bp->vnic_info[i]; 4906 4907 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 4908 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 4909 4910 if (mem_size > 0) { 4911 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 4912 if (!vnic->uc_list) { 4913 rc = -ENOMEM; 4914 goto out; 4915 } 4916 } 4917 } 4918 4919 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 4920 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 4921 vnic->mc_list = 4922 dma_alloc_coherent(&pdev->dev, 4923 vnic->mc_list_size, 4924 &vnic->mc_list_mapping, 4925 GFP_KERNEL); 4926 if (!vnic->mc_list) { 4927 rc = -ENOMEM; 4928 goto out; 4929 } 4930 } 4931 4932 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4933 goto vnic_skip_grps; 4934 4935 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4936 max_rings = bp->rx_nr_rings; 4937 else 4938 max_rings = 1; 4939 4940 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 4941 if (!vnic->fw_grp_ids) { 4942 rc = -ENOMEM; 4943 goto out; 4944 } 4945 vnic_skip_grps: 4946 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 4947 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 4948 continue; 4949 4950 /* Allocate rss table and hash key */ 4951 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 4952 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4953 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 4954 4955 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 4956 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 4957 vnic->rss_table_size, 4958 &vnic->rss_table_dma_addr, 4959 GFP_KERNEL); 4960 if (!vnic->rss_table) { 4961 rc = -ENOMEM; 4962 goto out; 4963 } 4964 4965 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 4966 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 4967 } 4968 return 0; 4969 4970 out: 4971 return rc; 4972 } 4973 4974 static void bnxt_free_hwrm_resources(struct bnxt *bp) 4975 { 4976 struct bnxt_hwrm_wait_token *token; 4977 4978 dma_pool_destroy(bp->hwrm_dma_pool); 4979 bp->hwrm_dma_pool = NULL; 4980 4981 rcu_read_lock(); 4982 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) 4983 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); 4984 rcu_read_unlock(); 4985 } 4986 4987 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 4988 { 4989 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, 4990 BNXT_HWRM_DMA_SIZE, 4991 BNXT_HWRM_DMA_ALIGN, 0); 4992 if (!bp->hwrm_dma_pool) 4993 return -ENOMEM; 4994 4995 INIT_HLIST_HEAD(&bp->hwrm_pending_list); 4996 4997 return 0; 4998 } 4999 5000 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 5001 { 5002 kfree(stats->hw_masks); 5003 stats->hw_masks = NULL; 5004 kfree(stats->sw_stats); 5005 stats->sw_stats = NULL; 5006 if (stats->hw_stats) { 5007 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 5008 stats->hw_stats_map); 5009 stats->hw_stats = NULL; 5010 } 5011 } 5012 5013 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 5014 bool alloc_masks) 5015 { 5016 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 5017 &stats->hw_stats_map, GFP_KERNEL); 5018 if (!stats->hw_stats) 5019 return -ENOMEM; 5020 5021 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 5022 if (!stats->sw_stats) 5023 goto stats_mem_err; 5024 5025 if (alloc_masks) { 5026 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 5027 if (!stats->hw_masks) 5028 goto stats_mem_err; 5029 } 5030 return 0; 5031 5032 stats_mem_err: 5033 bnxt_free_stats_mem(bp, stats); 5034 return -ENOMEM; 5035 } 5036 5037 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 5038 { 5039 int i; 5040 5041 for (i = 0; i < count; i++) 5042 mask_arr[i] = mask; 5043 } 5044 5045 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 5046 { 5047 int i; 5048 5049 for (i = 0; i < count; i++) 5050 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 5051 } 5052 5053 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 5054 struct bnxt_stats_mem *stats) 5055 { 5056 struct hwrm_func_qstats_ext_output *resp; 5057 struct hwrm_func_qstats_ext_input *req; 5058 __le64 *hw_masks; 5059 int rc; 5060 5061 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 5062 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5063 return -EOPNOTSUPP; 5064 5065 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); 5066 if (rc) 5067 return rc; 5068 5069 req->fid = cpu_to_le16(0xffff); 5070 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 5071 5072 resp = hwrm_req_hold(bp, req); 5073 rc = hwrm_req_send(bp, req); 5074 if (!rc) { 5075 hw_masks = &resp->rx_ucast_pkts; 5076 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 5077 } 5078 hwrm_req_drop(bp, req); 5079 return rc; 5080 } 5081 5082 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 5083 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 5084 5085 static void bnxt_init_stats(struct bnxt *bp) 5086 { 5087 struct bnxt_napi *bnapi = bp->bnapi[0]; 5088 struct bnxt_cp_ring_info *cpr; 5089 struct bnxt_stats_mem *stats; 5090 __le64 *rx_stats, *tx_stats; 5091 int rc, rx_count, tx_count; 5092 u64 *rx_masks, *tx_masks; 5093 u64 mask; 5094 u8 flags; 5095 5096 cpr = &bnapi->cp_ring; 5097 stats = &cpr->stats; 5098 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 5099 if (rc) { 5100 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5101 mask = (1ULL << 48) - 1; 5102 else 5103 mask = -1ULL; 5104 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 5105 } 5106 if (bp->flags & BNXT_FLAG_PORT_STATS) { 5107 stats = &bp->port_stats; 5108 rx_stats = stats->hw_stats; 5109 rx_masks = stats->hw_masks; 5110 rx_count = sizeof(struct rx_port_stats) / 8; 5111 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5112 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5113 tx_count = sizeof(struct tx_port_stats) / 8; 5114 5115 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 5116 rc = bnxt_hwrm_port_qstats(bp, flags); 5117 if (rc) { 5118 mask = (1ULL << 40) - 1; 5119 5120 bnxt_fill_masks(rx_masks, mask, rx_count); 5121 bnxt_fill_masks(tx_masks, mask, tx_count); 5122 } else { 5123 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 5124 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 5125 bnxt_hwrm_port_qstats(bp, 0); 5126 } 5127 } 5128 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 5129 stats = &bp->rx_port_stats_ext; 5130 rx_stats = stats->hw_stats; 5131 rx_masks = stats->hw_masks; 5132 rx_count = sizeof(struct rx_port_stats_ext) / 8; 5133 stats = &bp->tx_port_stats_ext; 5134 tx_stats = stats->hw_stats; 5135 tx_masks = stats->hw_masks; 5136 tx_count = sizeof(struct tx_port_stats_ext) / 8; 5137 5138 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 5139 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 5140 if (rc) { 5141 mask = (1ULL << 40) - 1; 5142 5143 bnxt_fill_masks(rx_masks, mask, rx_count); 5144 if (tx_stats) 5145 bnxt_fill_masks(tx_masks, mask, tx_count); 5146 } else { 5147 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 5148 if (tx_stats) 5149 bnxt_copy_hw_masks(tx_masks, tx_stats, 5150 tx_count); 5151 bnxt_hwrm_port_qstats_ext(bp, 0); 5152 } 5153 } 5154 } 5155 5156 static void bnxt_free_port_stats(struct bnxt *bp) 5157 { 5158 bp->flags &= ~BNXT_FLAG_PORT_STATS; 5159 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 5160 5161 bnxt_free_stats_mem(bp, &bp->port_stats); 5162 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 5163 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 5164 } 5165 5166 static void bnxt_free_ring_stats(struct bnxt *bp) 5167 { 5168 int i; 5169 5170 if (!bp->bnapi) 5171 return; 5172 5173 for (i = 0; i < bp->cp_nr_rings; i++) { 5174 struct bnxt_napi *bnapi = bp->bnapi[i]; 5175 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5176 5177 bnxt_free_stats_mem(bp, &cpr->stats); 5178 5179 kfree(cpr->sw_stats); 5180 cpr->sw_stats = NULL; 5181 } 5182 } 5183 5184 static int bnxt_alloc_stats(struct bnxt *bp) 5185 { 5186 u32 size, i; 5187 int rc; 5188 5189 size = bp->hw_ring_stats_size; 5190 5191 for (i = 0; i < bp->cp_nr_rings; i++) { 5192 struct bnxt_napi *bnapi = bp->bnapi[i]; 5193 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5194 5195 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL); 5196 if (!cpr->sw_stats) 5197 return -ENOMEM; 5198 5199 cpr->stats.len = size; 5200 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 5201 if (rc) 5202 return rc; 5203 5204 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 5205 } 5206 5207 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 5208 return 0; 5209 5210 if (bp->port_stats.hw_stats) 5211 goto alloc_ext_stats; 5212 5213 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 5214 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 5215 if (rc) 5216 return rc; 5217 5218 bp->flags |= BNXT_FLAG_PORT_STATS; 5219 5220 alloc_ext_stats: 5221 /* Display extended statistics only if FW supports it */ 5222 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 5223 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 5224 return 0; 5225 5226 if (bp->rx_port_stats_ext.hw_stats) 5227 goto alloc_tx_ext_stats; 5228 5229 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 5230 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 5231 /* Extended stats are optional */ 5232 if (rc) 5233 return 0; 5234 5235 alloc_tx_ext_stats: 5236 if (bp->tx_port_stats_ext.hw_stats) 5237 return 0; 5238 5239 if (bp->hwrm_spec_code >= 0x10902 || 5240 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 5241 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 5242 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 5243 /* Extended stats are optional */ 5244 if (rc) 5245 return 0; 5246 } 5247 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 5248 return 0; 5249 } 5250 5251 static void bnxt_clear_ring_indices(struct bnxt *bp) 5252 { 5253 int i, j; 5254 5255 if (!bp->bnapi) 5256 return; 5257 5258 for (i = 0; i < bp->cp_nr_rings; i++) { 5259 struct bnxt_napi *bnapi = bp->bnapi[i]; 5260 struct bnxt_cp_ring_info *cpr; 5261 struct bnxt_rx_ring_info *rxr; 5262 struct bnxt_tx_ring_info *txr; 5263 5264 if (!bnapi) 5265 continue; 5266 5267 cpr = &bnapi->cp_ring; 5268 cpr->cp_raw_cons = 0; 5269 5270 bnxt_for_each_napi_tx(j, bnapi, txr) { 5271 txr->tx_prod = 0; 5272 txr->tx_cons = 0; 5273 txr->tx_hw_cons = 0; 5274 } 5275 5276 rxr = bnapi->rx_ring; 5277 if (rxr) { 5278 rxr->rx_prod = 0; 5279 rxr->rx_agg_prod = 0; 5280 rxr->rx_sw_agg_prod = 0; 5281 rxr->rx_next_cons = 0; 5282 } 5283 bnapi->events = 0; 5284 } 5285 } 5286 5287 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5288 { 5289 u8 type = fltr->type, flags = fltr->flags; 5290 5291 INIT_LIST_HEAD(&fltr->list); 5292 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) || 5293 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING)) 5294 list_add_tail(&fltr->list, &bp->usr_fltr_list); 5295 } 5296 5297 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5298 { 5299 if (!list_empty(&fltr->list)) 5300 list_del_init(&fltr->list); 5301 } 5302 5303 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all) 5304 { 5305 struct bnxt_filter_base *usr_fltr, *tmp; 5306 5307 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 5308 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2) 5309 continue; 5310 bnxt_del_one_usr_fltr(bp, usr_fltr); 5311 } 5312 } 5313 5314 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5315 { 5316 hlist_del(&fltr->hash); 5317 bnxt_del_one_usr_fltr(bp, fltr); 5318 if (fltr->flags) { 5319 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 5320 bp->ntp_fltr_count--; 5321 } 5322 kfree(fltr); 5323 } 5324 5325 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) 5326 { 5327 int i; 5328 5329 netdev_assert_locked(bp->dev); 5330 5331 /* Under netdev instance lock and all our NAPIs have been disabled. 5332 * It's safe to delete the hash table. 5333 */ 5334 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 5335 struct hlist_head *head; 5336 struct hlist_node *tmp; 5337 struct bnxt_ntuple_filter *fltr; 5338 5339 head = &bp->ntp_fltr_hash_tbl[i]; 5340 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 5341 bnxt_del_l2_filter(bp, fltr->l2_fltr); 5342 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 5343 !list_empty(&fltr->base.list))) 5344 continue; 5345 bnxt_del_fltr(bp, &fltr->base); 5346 } 5347 } 5348 if (!all) 5349 return; 5350 5351 bitmap_free(bp->ntp_fltr_bmap); 5352 bp->ntp_fltr_bmap = NULL; 5353 bp->ntp_fltr_count = 0; 5354 } 5355 5356 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 5357 { 5358 int i, rc = 0; 5359 5360 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) 5361 return 0; 5362 5363 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 5364 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 5365 5366 bp->ntp_fltr_count = 0; 5367 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL); 5368 5369 if (!bp->ntp_fltr_bmap) 5370 rc = -ENOMEM; 5371 5372 return rc; 5373 } 5374 5375 static void bnxt_free_l2_filters(struct bnxt *bp, bool all) 5376 { 5377 int i; 5378 5379 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { 5380 struct hlist_head *head; 5381 struct hlist_node *tmp; 5382 struct bnxt_l2_filter *fltr; 5383 5384 head = &bp->l2_fltr_hash_tbl[i]; 5385 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 5386 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 5387 !list_empty(&fltr->base.list))) 5388 continue; 5389 bnxt_del_fltr(bp, &fltr->base); 5390 } 5391 } 5392 } 5393 5394 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) 5395 { 5396 int i; 5397 5398 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) 5399 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); 5400 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); 5401 } 5402 5403 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 5404 { 5405 bnxt_free_vnic_attributes(bp); 5406 bnxt_free_tx_rings(bp); 5407 bnxt_free_rx_rings(bp); 5408 bnxt_free_cp_rings(bp); 5409 bnxt_free_all_cp_arrays(bp); 5410 bnxt_free_ntp_fltrs(bp, false); 5411 bnxt_free_l2_filters(bp, false); 5412 if (irq_re_init) { 5413 bnxt_free_ring_stats(bp); 5414 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || 5415 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 5416 bnxt_free_port_stats(bp); 5417 bnxt_free_ring_grps(bp); 5418 bnxt_free_vnics(bp); 5419 kfree(bp->tx_ring_map); 5420 bp->tx_ring_map = NULL; 5421 kfree(bp->tx_ring); 5422 bp->tx_ring = NULL; 5423 kfree(bp->rx_ring); 5424 bp->rx_ring = NULL; 5425 kfree(bp->bnapi); 5426 bp->bnapi = NULL; 5427 } else { 5428 bnxt_clear_ring_indices(bp); 5429 } 5430 } 5431 5432 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 5433 { 5434 int i, j, rc, size, arr_size; 5435 void *bnapi; 5436 5437 if (irq_re_init) { 5438 /* Allocate bnapi mem pointer array and mem block for 5439 * all queues 5440 */ 5441 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 5442 bp->cp_nr_rings); 5443 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 5444 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 5445 if (!bnapi) 5446 return -ENOMEM; 5447 5448 bp->bnapi = bnapi; 5449 bnapi += arr_size; 5450 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 5451 bp->bnapi[i] = bnapi; 5452 bp->bnapi[i]->index = i; 5453 bp->bnapi[i]->bp = bp; 5454 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5455 struct bnxt_cp_ring_info *cpr = 5456 &bp->bnapi[i]->cp_ring; 5457 5458 cpr->cp_ring_struct.ring_mem.flags = 5459 BNXT_RMEM_RING_PTE_FLAG; 5460 } 5461 } 5462 5463 bp->rx_ring = kcalloc(bp->rx_nr_rings, 5464 sizeof(struct bnxt_rx_ring_info), 5465 GFP_KERNEL); 5466 if (!bp->rx_ring) 5467 return -ENOMEM; 5468 5469 for (i = 0; i < bp->rx_nr_rings; i++) { 5470 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5471 5472 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5473 rxr->rx_ring_struct.ring_mem.flags = 5474 BNXT_RMEM_RING_PTE_FLAG; 5475 rxr->rx_agg_ring_struct.ring_mem.flags = 5476 BNXT_RMEM_RING_PTE_FLAG; 5477 } else { 5478 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; 5479 } 5480 rxr->bnapi = bp->bnapi[i]; 5481 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 5482 } 5483 5484 bp->tx_ring = kcalloc(bp->tx_nr_rings, 5485 sizeof(struct bnxt_tx_ring_info), 5486 GFP_KERNEL); 5487 if (!bp->tx_ring) 5488 return -ENOMEM; 5489 5490 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 5491 GFP_KERNEL); 5492 5493 if (!bp->tx_ring_map) 5494 return -ENOMEM; 5495 5496 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5497 j = 0; 5498 else 5499 j = bp->rx_nr_rings; 5500 5501 for (i = 0; i < bp->tx_nr_rings; i++) { 5502 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5503 struct bnxt_napi *bnapi2; 5504 5505 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5506 txr->tx_ring_struct.ring_mem.flags = 5507 BNXT_RMEM_RING_PTE_FLAG; 5508 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 5509 if (i >= bp->tx_nr_rings_xdp) { 5510 int k = j + BNXT_RING_TO_TC_OFF(bp, i); 5511 5512 bnapi2 = bp->bnapi[k]; 5513 txr->txq_index = i - bp->tx_nr_rings_xdp; 5514 txr->tx_napi_idx = 5515 BNXT_RING_TO_TC(bp, txr->txq_index); 5516 bnapi2->tx_ring[txr->tx_napi_idx] = txr; 5517 bnapi2->tx_int = bnxt_tx_int; 5518 } else { 5519 bnapi2 = bp->bnapi[j]; 5520 bnapi2->flags |= BNXT_NAPI_FLAG_XDP; 5521 bnapi2->tx_ring[0] = txr; 5522 bnapi2->tx_int = bnxt_tx_int_xdp; 5523 j++; 5524 } 5525 txr->bnapi = bnapi2; 5526 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5527 txr->tx_cpr = &bnapi2->cp_ring; 5528 } 5529 5530 rc = bnxt_alloc_stats(bp); 5531 if (rc) 5532 goto alloc_mem_err; 5533 bnxt_init_stats(bp); 5534 5535 rc = bnxt_alloc_ntp_fltrs(bp); 5536 if (rc) 5537 goto alloc_mem_err; 5538 5539 rc = bnxt_alloc_vnics(bp); 5540 if (rc) 5541 goto alloc_mem_err; 5542 } 5543 5544 rc = bnxt_alloc_all_cp_arrays(bp); 5545 if (rc) 5546 goto alloc_mem_err; 5547 5548 bnxt_init_ring_struct(bp); 5549 5550 rc = bnxt_alloc_rx_rings(bp); 5551 if (rc) 5552 goto alloc_mem_err; 5553 5554 rc = bnxt_alloc_tx_rings(bp); 5555 if (rc) 5556 goto alloc_mem_err; 5557 5558 rc = bnxt_alloc_cp_rings(bp); 5559 if (rc) 5560 goto alloc_mem_err; 5561 5562 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG | 5563 BNXT_VNIC_MCAST_FLAG | 5564 BNXT_VNIC_UCAST_FLAG; 5565 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) 5566 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= 5567 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG; 5568 5569 rc = bnxt_alloc_vnic_attributes(bp); 5570 if (rc) 5571 goto alloc_mem_err; 5572 return 0; 5573 5574 alloc_mem_err: 5575 bnxt_free_mem(bp, true); 5576 return rc; 5577 } 5578 5579 static void bnxt_disable_int(struct bnxt *bp) 5580 { 5581 int i; 5582 5583 if (!bp->bnapi) 5584 return; 5585 5586 for (i = 0; i < bp->cp_nr_rings; i++) { 5587 struct bnxt_napi *bnapi = bp->bnapi[i]; 5588 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5589 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5590 5591 if (ring->fw_ring_id != INVALID_HW_RING_ID) 5592 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5593 } 5594 } 5595 5596 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 5597 { 5598 struct bnxt_napi *bnapi = bp->bnapi[n]; 5599 struct bnxt_cp_ring_info *cpr; 5600 5601 cpr = &bnapi->cp_ring; 5602 return cpr->cp_ring_struct.map_idx; 5603 } 5604 5605 static void bnxt_disable_int_sync(struct bnxt *bp) 5606 { 5607 int i; 5608 5609 if (!bp->irq_tbl) 5610 return; 5611 5612 atomic_inc(&bp->intr_sem); 5613 5614 bnxt_disable_int(bp); 5615 for (i = 0; i < bp->cp_nr_rings; i++) { 5616 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 5617 5618 synchronize_irq(bp->irq_tbl[map_idx].vector); 5619 } 5620 } 5621 5622 static void bnxt_enable_int(struct bnxt *bp) 5623 { 5624 int i; 5625 5626 atomic_set(&bp->intr_sem, 0); 5627 for (i = 0; i < bp->cp_nr_rings; i++) { 5628 struct bnxt_napi *bnapi = bp->bnapi[i]; 5629 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5630 5631 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 5632 } 5633 } 5634 5635 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 5636 bool async_only) 5637 { 5638 DECLARE_BITMAP(async_events_bmap, 256); 5639 u32 *events = (u32 *)async_events_bmap; 5640 struct hwrm_func_drv_rgtr_output *resp; 5641 struct hwrm_func_drv_rgtr_input *req; 5642 u32 flags; 5643 int rc, i; 5644 5645 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); 5646 if (rc) 5647 return rc; 5648 5649 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 5650 FUNC_DRV_RGTR_REQ_ENABLES_VER | 5651 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5652 5653 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 5654 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 5655 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 5656 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 5657 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 5658 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 5659 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 5660 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2) 5661 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT; 5662 req->flags = cpu_to_le32(flags); 5663 req->ver_maj_8b = DRV_VER_MAJ; 5664 req->ver_min_8b = DRV_VER_MIN; 5665 req->ver_upd_8b = DRV_VER_UPD; 5666 req->ver_maj = cpu_to_le16(DRV_VER_MAJ); 5667 req->ver_min = cpu_to_le16(DRV_VER_MIN); 5668 req->ver_upd = cpu_to_le16(DRV_VER_UPD); 5669 5670 if (BNXT_PF(bp)) { 5671 u32 data[8]; 5672 int i; 5673 5674 memset(data, 0, sizeof(data)); 5675 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 5676 u16 cmd = bnxt_vf_req_snif[i]; 5677 unsigned int bit, idx; 5678 5679 idx = cmd / 32; 5680 bit = cmd % 32; 5681 data[idx] |= 1 << bit; 5682 } 5683 5684 for (i = 0; i < 8; i++) 5685 req->vf_req_fwd[i] = cpu_to_le32(data[i]); 5686 5687 req->enables |= 5688 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 5689 } 5690 5691 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 5692 req->flags |= cpu_to_le32( 5693 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 5694 5695 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 5696 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 5697 u16 event_id = bnxt_async_events_arr[i]; 5698 5699 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 5700 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5701 continue; 5702 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && 5703 !bp->ptp_cfg) 5704 continue; 5705 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 5706 } 5707 if (bmap && bmap_size) { 5708 for (i = 0; i < bmap_size; i++) { 5709 if (test_bit(i, bmap)) 5710 __set_bit(i, async_events_bmap); 5711 } 5712 } 5713 for (i = 0; i < 8; i++) 5714 req->async_event_fwd[i] |= cpu_to_le32(events[i]); 5715 5716 if (async_only) 5717 req->enables = 5718 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5719 5720 resp = hwrm_req_hold(bp, req); 5721 rc = hwrm_req_send(bp, req); 5722 if (!rc) { 5723 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 5724 if (resp->flags & 5725 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 5726 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 5727 } 5728 hwrm_req_drop(bp, req); 5729 return rc; 5730 } 5731 5732 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 5733 { 5734 struct hwrm_func_drv_unrgtr_input *req; 5735 int rc; 5736 5737 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 5738 return 0; 5739 5740 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); 5741 if (rc) 5742 return rc; 5743 return hwrm_req_send(bp, req); 5744 } 5745 5746 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa); 5747 5748 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 5749 { 5750 struct hwrm_tunnel_dst_port_free_input *req; 5751 int rc; 5752 5753 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && 5754 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) 5755 return 0; 5756 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && 5757 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) 5758 return 0; 5759 5760 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); 5761 if (rc) 5762 return rc; 5763 5764 req->tunnel_type = tunnel_type; 5765 5766 switch (tunnel_type) { 5767 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 5768 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 5769 bp->vxlan_port = 0; 5770 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 5771 break; 5772 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 5773 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 5774 bp->nge_port = 0; 5775 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 5776 break; 5777 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE: 5778 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); 5779 bp->vxlan_gpe_port = 0; 5780 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; 5781 break; 5782 default: 5783 break; 5784 } 5785 5786 rc = hwrm_req_send(bp, req); 5787 if (rc) 5788 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 5789 rc); 5790 if (bp->flags & BNXT_FLAG_TPA) 5791 bnxt_set_tpa(bp, true); 5792 return rc; 5793 } 5794 5795 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 5796 u8 tunnel_type) 5797 { 5798 struct hwrm_tunnel_dst_port_alloc_output *resp; 5799 struct hwrm_tunnel_dst_port_alloc_input *req; 5800 int rc; 5801 5802 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); 5803 if (rc) 5804 return rc; 5805 5806 req->tunnel_type = tunnel_type; 5807 req->tunnel_dst_port_val = port; 5808 5809 resp = hwrm_req_hold(bp, req); 5810 rc = hwrm_req_send(bp, req); 5811 if (rc) { 5812 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 5813 rc); 5814 goto err_out; 5815 } 5816 5817 switch (tunnel_type) { 5818 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 5819 bp->vxlan_port = port; 5820 bp->vxlan_fw_dst_port_id = 5821 le16_to_cpu(resp->tunnel_dst_port_id); 5822 break; 5823 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 5824 bp->nge_port = port; 5825 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 5826 break; 5827 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE: 5828 bp->vxlan_gpe_port = port; 5829 bp->vxlan_gpe_fw_dst_port_id = 5830 le16_to_cpu(resp->tunnel_dst_port_id); 5831 break; 5832 default: 5833 break; 5834 } 5835 if (bp->flags & BNXT_FLAG_TPA) 5836 bnxt_set_tpa(bp, true); 5837 5838 err_out: 5839 hwrm_req_drop(bp, req); 5840 return rc; 5841 } 5842 5843 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 5844 { 5845 struct hwrm_cfa_l2_set_rx_mask_input *req; 5846 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5847 int rc; 5848 5849 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); 5850 if (rc) 5851 return rc; 5852 5853 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5854 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { 5855 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 5856 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 5857 } 5858 req->mask = cpu_to_le32(vnic->rx_mask); 5859 return hwrm_req_send_silent(bp, req); 5860 } 5861 5862 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5863 { 5864 if (!atomic_dec_and_test(&fltr->refcnt)) 5865 return; 5866 spin_lock_bh(&bp->ntp_fltr_lock); 5867 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 5868 spin_unlock_bh(&bp->ntp_fltr_lock); 5869 return; 5870 } 5871 hlist_del_rcu(&fltr->base.hash); 5872 bnxt_del_one_usr_fltr(bp, &fltr->base); 5873 if (fltr->base.flags) { 5874 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 5875 bp->ntp_fltr_count--; 5876 } 5877 spin_unlock_bh(&bp->ntp_fltr_lock); 5878 kfree_rcu(fltr, base.rcu); 5879 } 5880 5881 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, 5882 struct bnxt_l2_key *key, 5883 u32 idx) 5884 { 5885 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; 5886 struct bnxt_l2_filter *fltr; 5887 5888 hlist_for_each_entry_rcu(fltr, head, base.hash) { 5889 struct bnxt_l2_key *l2_key = &fltr->l2_key; 5890 5891 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && 5892 l2_key->vlan == key->vlan) 5893 return fltr; 5894 } 5895 return NULL; 5896 } 5897 5898 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, 5899 struct bnxt_l2_key *key, 5900 u32 idx) 5901 { 5902 struct bnxt_l2_filter *fltr = NULL; 5903 5904 rcu_read_lock(); 5905 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 5906 if (fltr) 5907 atomic_inc(&fltr->refcnt); 5908 rcu_read_unlock(); 5909 return fltr; 5910 } 5911 5912 #define BNXT_IPV4_4TUPLE(bp, fkeys) \ 5913 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5914 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \ 5915 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5916 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)) 5917 5918 #define BNXT_IPV6_4TUPLE(bp, fkeys) \ 5919 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5920 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \ 5921 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5922 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)) 5923 5924 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys) 5925 { 5926 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5927 if (BNXT_IPV4_4TUPLE(bp, fkeys)) 5928 return sizeof(fkeys->addrs.v4addrs) + 5929 sizeof(fkeys->ports); 5930 5931 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 5932 return sizeof(fkeys->addrs.v4addrs); 5933 } 5934 5935 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 5936 if (BNXT_IPV6_4TUPLE(bp, fkeys)) 5937 return sizeof(fkeys->addrs.v6addrs) + 5938 sizeof(fkeys->ports); 5939 5940 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 5941 return sizeof(fkeys->addrs.v6addrs); 5942 } 5943 5944 return 0; 5945 } 5946 5947 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys, 5948 const unsigned char *key) 5949 { 5950 u64 prefix = bp->toeplitz_prefix, hash = 0; 5951 struct bnxt_ipv4_tuple tuple4; 5952 struct bnxt_ipv6_tuple tuple6; 5953 int i, j, len = 0; 5954 u8 *four_tuple; 5955 5956 len = bnxt_get_rss_flow_tuple_len(bp, fkeys); 5957 if (!len) 5958 return 0; 5959 5960 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5961 tuple4.v4addrs = fkeys->addrs.v4addrs; 5962 tuple4.ports = fkeys->ports; 5963 four_tuple = (unsigned char *)&tuple4; 5964 } else { 5965 tuple6.v6addrs = fkeys->addrs.v6addrs; 5966 tuple6.ports = fkeys->ports; 5967 four_tuple = (unsigned char *)&tuple6; 5968 } 5969 5970 for (i = 0, j = 8; i < len; i++, j++) { 5971 u8 byte = four_tuple[i]; 5972 int bit; 5973 5974 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { 5975 if (byte & 0x80) 5976 hash ^= prefix; 5977 } 5978 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; 5979 } 5980 5981 /* The valid part of the hash is in the upper 32 bits. */ 5982 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK; 5983 } 5984 5985 #ifdef CONFIG_RFS_ACCEL 5986 static struct bnxt_l2_filter * 5987 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key) 5988 { 5989 struct bnxt_l2_filter *fltr; 5990 u32 idx; 5991 5992 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5993 BNXT_L2_FLTR_HASH_MASK; 5994 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5995 return fltr; 5996 } 5997 #endif 5998 5999 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, 6000 struct bnxt_l2_key *key, u32 idx) 6001 { 6002 struct hlist_head *head; 6003 6004 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); 6005 fltr->l2_key.vlan = key->vlan; 6006 fltr->base.type = BNXT_FLTR_TYPE_L2; 6007 if (fltr->base.flags) { 6008 int bit_id; 6009 6010 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 6011 bp->max_fltr, 0); 6012 if (bit_id < 0) 6013 return -ENOMEM; 6014 fltr->base.sw_id = (u16)bit_id; 6015 bp->ntp_fltr_count++; 6016 } 6017 head = &bp->l2_fltr_hash_tbl[idx]; 6018 hlist_add_head_rcu(&fltr->base.hash, head); 6019 bnxt_insert_usr_fltr(bp, &fltr->base); 6020 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 6021 atomic_set(&fltr->refcnt, 1); 6022 return 0; 6023 } 6024 6025 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, 6026 struct bnxt_l2_key *key, 6027 gfp_t gfp) 6028 { 6029 struct bnxt_l2_filter *fltr; 6030 u32 idx; 6031 int rc; 6032 6033 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 6034 BNXT_L2_FLTR_HASH_MASK; 6035 fltr = bnxt_lookup_l2_filter(bp, key, idx); 6036 if (fltr) 6037 return fltr; 6038 6039 fltr = kzalloc(sizeof(*fltr), gfp); 6040 if (!fltr) 6041 return ERR_PTR(-ENOMEM); 6042 spin_lock_bh(&bp->ntp_fltr_lock); 6043 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 6044 spin_unlock_bh(&bp->ntp_fltr_lock); 6045 if (rc) { 6046 bnxt_del_l2_filter(bp, fltr); 6047 fltr = ERR_PTR(rc); 6048 } 6049 return fltr; 6050 } 6051 6052 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, 6053 struct bnxt_l2_key *key, 6054 u16 flags) 6055 { 6056 struct bnxt_l2_filter *fltr; 6057 u32 idx; 6058 int rc; 6059 6060 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 6061 BNXT_L2_FLTR_HASH_MASK; 6062 spin_lock_bh(&bp->ntp_fltr_lock); 6063 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 6064 if (fltr) { 6065 fltr = ERR_PTR(-EEXIST); 6066 goto l2_filter_exit; 6067 } 6068 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC); 6069 if (!fltr) { 6070 fltr = ERR_PTR(-ENOMEM); 6071 goto l2_filter_exit; 6072 } 6073 fltr->base.flags = flags; 6074 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 6075 if (rc) { 6076 spin_unlock_bh(&bp->ntp_fltr_lock); 6077 bnxt_del_l2_filter(bp, fltr); 6078 return ERR_PTR(rc); 6079 } 6080 6081 l2_filter_exit: 6082 spin_unlock_bh(&bp->ntp_fltr_lock); 6083 return fltr; 6084 } 6085 6086 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) 6087 { 6088 #ifdef CONFIG_BNXT_SRIOV 6089 struct bnxt_vf_info *vf = &pf->vf[vf_idx]; 6090 6091 return vf->fw_fid; 6092 #else 6093 return INVALID_HW_RING_ID; 6094 #endif 6095 } 6096 6097 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) 6098 { 6099 struct hwrm_cfa_l2_filter_free_input *req; 6100 u16 target_id = 0xffff; 6101 int rc; 6102 6103 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 6104 struct bnxt_pf_info *pf = &bp->pf; 6105 6106 if (fltr->base.vf_idx >= pf->active_vfs) 6107 return -EINVAL; 6108 6109 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 6110 if (target_id == INVALID_HW_RING_ID) 6111 return -EINVAL; 6112 } 6113 6114 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 6115 if (rc) 6116 return rc; 6117 6118 req->target_id = cpu_to_le16(target_id); 6119 req->l2_filter_id = fltr->base.filter_id; 6120 return hwrm_req_send(bp, req); 6121 } 6122 6123 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) 6124 { 6125 struct hwrm_cfa_l2_filter_alloc_output *resp; 6126 struct hwrm_cfa_l2_filter_alloc_input *req; 6127 u16 target_id = 0xffff; 6128 int rc; 6129 6130 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 6131 struct bnxt_pf_info *pf = &bp->pf; 6132 6133 if (fltr->base.vf_idx >= pf->active_vfs) 6134 return -EINVAL; 6135 6136 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 6137 } 6138 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); 6139 if (rc) 6140 return rc; 6141 6142 req->target_id = cpu_to_le16(target_id); 6143 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 6144 6145 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 6146 req->flags |= 6147 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 6148 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); 6149 req->enables = 6150 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 6151 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 6152 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 6153 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); 6154 eth_broadcast_addr(req->l2_addr_mask); 6155 6156 if (fltr->l2_key.vlan) { 6157 req->enables |= 6158 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | 6159 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | 6160 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); 6161 req->num_vlans = 1; 6162 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); 6163 req->l2_ivlan_mask = cpu_to_le16(0xfff); 6164 } 6165 6166 resp = hwrm_req_hold(bp, req); 6167 rc = hwrm_req_send(bp, req); 6168 if (!rc) { 6169 fltr->base.filter_id = resp->l2_filter_id; 6170 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 6171 } 6172 hwrm_req_drop(bp, req); 6173 return rc; 6174 } 6175 6176 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 6177 struct bnxt_ntuple_filter *fltr) 6178 { 6179 struct hwrm_cfa_ntuple_filter_free_input *req; 6180 int rc; 6181 6182 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); 6183 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); 6184 if (rc) 6185 return rc; 6186 6187 req->ntuple_filter_id = fltr->base.filter_id; 6188 return hwrm_req_send(bp, req); 6189 } 6190 6191 #define BNXT_NTP_FLTR_FLAGS \ 6192 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 6193 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 6194 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 6195 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 6196 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 6197 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 6198 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 6199 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 6200 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 6201 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 6202 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 6203 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 6204 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 6205 6206 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 6207 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 6208 6209 void bnxt_fill_ipv6_mask(__be32 mask[4]) 6210 { 6211 int i; 6212 6213 for (i = 0; i < 4; i++) 6214 mask[i] = cpu_to_be32(~0); 6215 } 6216 6217 static void 6218 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, 6219 struct hwrm_cfa_ntuple_filter_alloc_input *req, 6220 struct bnxt_ntuple_filter *fltr) 6221 { 6222 u16 rxq = fltr->base.rxq; 6223 6224 if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 6225 struct ethtool_rxfh_context *ctx; 6226 struct bnxt_rss_ctx *rss_ctx; 6227 struct bnxt_vnic_info *vnic; 6228 6229 ctx = xa_load(&bp->dev->ethtool->rss_ctx, 6230 fltr->base.fw_vnic_id); 6231 if (ctx) { 6232 rss_ctx = ethtool_rxfh_context_priv(ctx); 6233 vnic = &rss_ctx->vnic; 6234 6235 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6236 } 6237 return; 6238 } 6239 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 6240 struct bnxt_vnic_info *vnic; 6241 u32 enables; 6242 6243 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 6244 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6245 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; 6246 req->enables |= cpu_to_le32(enables); 6247 req->rfs_ring_tbl_idx = cpu_to_le16(rxq); 6248 } else { 6249 u32 flags; 6250 6251 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 6252 req->flags |= cpu_to_le32(flags); 6253 req->dst_id = cpu_to_le16(rxq); 6254 } 6255 } 6256 6257 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 6258 struct bnxt_ntuple_filter *fltr) 6259 { 6260 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 6261 struct hwrm_cfa_ntuple_filter_alloc_input *req; 6262 struct bnxt_flow_masks *masks = &fltr->fmasks; 6263 struct flow_keys *keys = &fltr->fkeys; 6264 struct bnxt_l2_filter *l2_fltr; 6265 struct bnxt_vnic_info *vnic; 6266 int rc; 6267 6268 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); 6269 if (rc) 6270 return rc; 6271 6272 l2_fltr = fltr->l2_fltr; 6273 req->l2_filter_id = l2_fltr->base.filter_id; 6274 6275 if (fltr->base.flags & BNXT_ACT_DROP) { 6276 req->flags = 6277 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); 6278 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 6279 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); 6280 } else { 6281 vnic = &bp->vnic_info[fltr->base.rxq + 1]; 6282 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6283 } 6284 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 6285 6286 req->ethertype = htons(ETH_P_IP); 6287 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 6288 req->ip_protocol = keys->basic.ip_proto; 6289 6290 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 6291 req->ethertype = htons(ETH_P_IPV6); 6292 req->ip_addr_type = 6293 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 6294 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; 6295 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; 6296 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; 6297 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; 6298 } else { 6299 req->src_ipaddr[0] = keys->addrs.v4addrs.src; 6300 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; 6301 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; 6302 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; 6303 } 6304 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 6305 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 6306 req->tunnel_type = 6307 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 6308 } 6309 6310 req->src_port = keys->ports.src; 6311 req->src_port_mask = masks->ports.src; 6312 req->dst_port = keys->ports.dst; 6313 req->dst_port_mask = masks->ports.dst; 6314 6315 resp = hwrm_req_hold(bp, req); 6316 rc = hwrm_req_send(bp, req); 6317 if (!rc) 6318 fltr->base.filter_id = resp->ntuple_filter_id; 6319 hwrm_req_drop(bp, req); 6320 return rc; 6321 } 6322 6323 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 6324 const u8 *mac_addr) 6325 { 6326 struct bnxt_l2_filter *fltr; 6327 struct bnxt_l2_key key; 6328 int rc; 6329 6330 ether_addr_copy(key.dst_mac_addr, mac_addr); 6331 key.vlan = 0; 6332 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); 6333 if (IS_ERR(fltr)) 6334 return PTR_ERR(fltr); 6335 6336 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; 6337 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 6338 if (rc) 6339 bnxt_del_l2_filter(bp, fltr); 6340 else 6341 bp->vnic_info[vnic_id].l2_filters[idx] = fltr; 6342 return rc; 6343 } 6344 6345 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 6346 { 6347 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 6348 6349 /* Any associated ntuple filters will also be cleared by firmware. */ 6350 for (i = 0; i < num_of_vnics; i++) { 6351 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6352 6353 for (j = 0; j < vnic->uc_filter_count; j++) { 6354 struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; 6355 6356 bnxt_hwrm_l2_filter_free(bp, fltr); 6357 bnxt_del_l2_filter(bp, fltr); 6358 } 6359 vnic->uc_filter_count = 0; 6360 } 6361 } 6362 6363 #define BNXT_DFLT_TUNL_TPA_BMAP \ 6364 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \ 6365 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \ 6366 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6) 6367 6368 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, 6369 struct hwrm_vnic_tpa_cfg_input *req) 6370 { 6371 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP; 6372 6373 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) 6374 return; 6375 6376 if (bp->vxlan_port) 6377 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN; 6378 if (bp->vxlan_gpe_port) 6379 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE; 6380 if (bp->nge_port) 6381 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE; 6382 6383 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN); 6384 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); 6385 } 6386 6387 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6388 u32 tpa_flags) 6389 { 6390 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 6391 struct hwrm_vnic_tpa_cfg_input *req; 6392 int rc; 6393 6394 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 6395 return 0; 6396 6397 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); 6398 if (rc) 6399 return rc; 6400 6401 if (tpa_flags) { 6402 u16 mss = bp->dev->mtu - 40; 6403 u32 nsegs, n, segs = 0, flags; 6404 6405 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 6406 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 6407 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 6408 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 6409 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 6410 if (tpa_flags & BNXT_FLAG_GRO) 6411 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 6412 6413 req->flags = cpu_to_le32(flags); 6414 6415 req->enables = 6416 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 6417 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 6418 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 6419 6420 /* Number of segs are log2 units, and first packet is not 6421 * included as part of this units. 6422 */ 6423 if (mss <= BNXT_RX_PAGE_SIZE) { 6424 n = BNXT_RX_PAGE_SIZE / mss; 6425 nsegs = (MAX_SKB_FRAGS - 1) * n; 6426 } else { 6427 n = mss / BNXT_RX_PAGE_SIZE; 6428 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 6429 n++; 6430 nsegs = (MAX_SKB_FRAGS - n) / n; 6431 } 6432 6433 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6434 segs = MAX_TPA_SEGS_P5; 6435 max_aggs = bp->max_tpa; 6436 } else { 6437 segs = ilog2(nsegs); 6438 } 6439 req->max_agg_segs = cpu_to_le16(segs); 6440 req->max_aggs = cpu_to_le16(max_aggs); 6441 6442 req->min_agg_len = cpu_to_le32(512); 6443 bnxt_hwrm_vnic_update_tunl_tpa(bp, req); 6444 } 6445 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6446 6447 return hwrm_req_send(bp, req); 6448 } 6449 6450 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 6451 { 6452 struct bnxt_ring_grp_info *grp_info; 6453 6454 grp_info = &bp->grp_info[ring->grp_idx]; 6455 return grp_info->cp_fw_ring_id; 6456 } 6457 6458 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 6459 { 6460 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6461 return rxr->rx_cpr->cp_ring_struct.fw_ring_id; 6462 else 6463 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 6464 } 6465 6466 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 6467 { 6468 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6469 return txr->tx_cpr->cp_ring_struct.fw_ring_id; 6470 else 6471 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 6472 } 6473 6474 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) 6475 { 6476 int entries; 6477 6478 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6479 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 6480 else 6481 entries = HW_HASH_INDEX_SIZE; 6482 6483 bp->rss_indir_tbl_entries = entries; 6484 bp->rss_indir_tbl = 6485 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); 6486 if (!bp->rss_indir_tbl) 6487 return -ENOMEM; 6488 6489 return 0; 6490 } 6491 6492 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, 6493 struct ethtool_rxfh_context *rss_ctx) 6494 { 6495 u16 max_rings, max_entries, pad, i; 6496 u32 *rss_indir_tbl; 6497 6498 if (!bp->rx_nr_rings) 6499 return; 6500 6501 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6502 max_rings = bp->rx_nr_rings - 1; 6503 else 6504 max_rings = bp->rx_nr_rings; 6505 6506 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 6507 if (rss_ctx) 6508 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx); 6509 else 6510 rss_indir_tbl = &bp->rss_indir_tbl[0]; 6511 6512 for (i = 0; i < max_entries; i++) 6513 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 6514 6515 pad = bp->rss_indir_tbl_entries - max_entries; 6516 if (pad) 6517 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); 6518 } 6519 6520 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 6521 { 6522 u32 i, tbl_size, max_ring = 0; 6523 6524 if (!bp->rss_indir_tbl) 6525 return 0; 6526 6527 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6528 for (i = 0; i < tbl_size; i++) 6529 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 6530 return max_ring; 6531 } 6532 6533 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 6534 { 6535 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6536 if (!rx_rings) 6537 return 0; 6538 return bnxt_calc_nr_ring_pages(rx_rings - 1, 6539 BNXT_RSS_TABLE_ENTRIES_P5); 6540 } 6541 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6542 return 2; 6543 return 1; 6544 } 6545 6546 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6547 { 6548 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 6549 u16 i, j; 6550 6551 /* Fill the RSS indirection table with ring group ids */ 6552 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 6553 if (!no_rss) 6554 j = bp->rss_indir_tbl[i]; 6555 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 6556 } 6557 } 6558 6559 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 6560 struct bnxt_vnic_info *vnic) 6561 { 6562 __le16 *ring_tbl = vnic->rss_table; 6563 struct bnxt_rx_ring_info *rxr; 6564 u16 tbl_size, i; 6565 6566 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6567 6568 for (i = 0; i < tbl_size; i++) { 6569 u16 ring_id, j; 6570 6571 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) 6572 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); 6573 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) 6574 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; 6575 else 6576 j = bp->rss_indir_tbl[i]; 6577 rxr = &bp->rx_ring[j]; 6578 6579 ring_id = rxr->rx_ring_struct.fw_ring_id; 6580 *ring_tbl++ = cpu_to_le16(ring_id); 6581 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6582 *ring_tbl++ = cpu_to_le16(ring_id); 6583 } 6584 } 6585 6586 static void 6587 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, 6588 struct bnxt_vnic_info *vnic) 6589 { 6590 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6591 bnxt_fill_hw_rss_tbl_p5(bp, vnic); 6592 if (bp->flags & BNXT_FLAG_CHIP_P7) 6593 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; 6594 } else { 6595 bnxt_fill_hw_rss_tbl(bp, vnic); 6596 } 6597 6598 if (bp->rss_hash_delta) { 6599 req->hash_type = cpu_to_le32(bp->rss_hash_delta); 6600 if (bp->rss_hash_cfg & bp->rss_hash_delta) 6601 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; 6602 else 6603 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; 6604 } else { 6605 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); 6606 } 6607 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 6608 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 6609 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 6610 } 6611 6612 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6613 bool set_rss) 6614 { 6615 struct hwrm_vnic_rss_cfg_input *req; 6616 int rc; 6617 6618 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || 6619 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 6620 return 0; 6621 6622 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6623 if (rc) 6624 return rc; 6625 6626 if (set_rss) 6627 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6628 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6629 return hwrm_req_send(bp, req); 6630 } 6631 6632 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, 6633 struct bnxt_vnic_info *vnic, bool set_rss) 6634 { 6635 struct hwrm_vnic_rss_cfg_input *req; 6636 dma_addr_t ring_tbl_map; 6637 u32 i, nr_ctxs; 6638 int rc; 6639 6640 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6641 if (rc) 6642 return rc; 6643 6644 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6645 if (!set_rss) 6646 return hwrm_req_send(bp, req); 6647 6648 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6649 ring_tbl_map = vnic->rss_table_dma_addr; 6650 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 6651 6652 hwrm_req_hold(bp, req); 6653 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 6654 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 6655 req->ring_table_pair_index = i; 6656 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 6657 rc = hwrm_req_send(bp, req); 6658 if (rc) 6659 goto exit; 6660 } 6661 6662 exit: 6663 hwrm_req_drop(bp, req); 6664 return rc; 6665 } 6666 6667 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) 6668 { 6669 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6670 struct hwrm_vnic_rss_qcfg_output *resp; 6671 struct hwrm_vnic_rss_qcfg_input *req; 6672 6673 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) 6674 return; 6675 6676 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6677 /* all contexts configured to same hash_type, zero always exists */ 6678 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6679 resp = hwrm_req_hold(bp, req); 6680 if (!hwrm_req_send(bp, req)) { 6681 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; 6682 bp->rss_hash_delta = 0; 6683 } 6684 hwrm_req_drop(bp, req); 6685 } 6686 6687 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6688 { 6689 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh; 6690 struct hwrm_vnic_plcmodes_cfg_input *req; 6691 int rc; 6692 6693 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); 6694 if (rc) 6695 return rc; 6696 6697 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); 6698 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); 6699 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); 6700 6701 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 6702 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 6703 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 6704 req->enables |= 6705 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 6706 req->hds_threshold = cpu_to_le16(hds_thresh); 6707 } 6708 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6709 return hwrm_req_send(bp, req); 6710 } 6711 6712 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, 6713 struct bnxt_vnic_info *vnic, 6714 u16 ctx_idx) 6715 { 6716 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; 6717 6718 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) 6719 return; 6720 6721 req->rss_cos_lb_ctx_id = 6722 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); 6723 6724 hwrm_req_send(bp, req); 6725 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 6726 } 6727 6728 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 6729 { 6730 int i, j; 6731 6732 for (i = 0; i < bp->nr_vnics; i++) { 6733 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6734 6735 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 6736 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 6737 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); 6738 } 6739 } 6740 bp->rsscos_nr_ctxs = 0; 6741 } 6742 6743 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, 6744 struct bnxt_vnic_info *vnic, u16 ctx_idx) 6745 { 6746 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; 6747 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; 6748 int rc; 6749 6750 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); 6751 if (rc) 6752 return rc; 6753 6754 resp = hwrm_req_hold(bp, req); 6755 rc = hwrm_req_send(bp, req); 6756 if (!rc) 6757 vnic->fw_rss_cos_lb_ctx[ctx_idx] = 6758 le16_to_cpu(resp->rss_cos_lb_ctx_id); 6759 hwrm_req_drop(bp, req); 6760 6761 return rc; 6762 } 6763 6764 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 6765 { 6766 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 6767 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 6768 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 6769 } 6770 6771 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6772 { 6773 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6774 struct hwrm_vnic_cfg_input *req; 6775 unsigned int ring = 0, grp_idx; 6776 u16 def_vlan = 0; 6777 int rc; 6778 6779 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); 6780 if (rc) 6781 return rc; 6782 6783 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6784 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 6785 6786 req->default_rx_ring_id = 6787 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 6788 req->default_cmpl_ring_id = 6789 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 6790 req->enables = 6791 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 6792 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 6793 goto vnic_mru; 6794 } 6795 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 6796 /* Only RSS support for now TBD: COS & LB */ 6797 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 6798 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6799 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6800 VNIC_CFG_REQ_ENABLES_MRU); 6801 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 6802 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); 6803 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6804 VNIC_CFG_REQ_ENABLES_MRU); 6805 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 6806 } else { 6807 req->rss_rule = cpu_to_le16(0xffff); 6808 } 6809 6810 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 6811 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 6812 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 6813 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 6814 } else { 6815 req->cos_rule = cpu_to_le16(0xffff); 6816 } 6817 6818 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 6819 ring = 0; 6820 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 6821 ring = vnic->vnic_id - 1; 6822 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 6823 ring = bp->rx_nr_rings - 1; 6824 6825 grp_idx = bp->rx_ring[ring].bnapi->index; 6826 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 6827 req->lb_rule = cpu_to_le16(0xffff); 6828 vnic_mru: 6829 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 6830 req->mru = cpu_to_le16(vnic->mru); 6831 6832 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6833 #ifdef CONFIG_BNXT_SRIOV 6834 if (BNXT_VF(bp)) 6835 def_vlan = bp->vf.vlan; 6836 #endif 6837 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 6838 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 6839 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev)) 6840 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 6841 6842 return hwrm_req_send(bp, req); 6843 } 6844 6845 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, 6846 struct bnxt_vnic_info *vnic) 6847 { 6848 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { 6849 struct hwrm_vnic_free_input *req; 6850 6851 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) 6852 return; 6853 6854 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6855 6856 hwrm_req_send(bp, req); 6857 vnic->fw_vnic_id = INVALID_HW_RING_ID; 6858 } 6859 } 6860 6861 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 6862 { 6863 u16 i; 6864 6865 for (i = 0; i < bp->nr_vnics; i++) 6866 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); 6867 } 6868 6869 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6870 unsigned int start_rx_ring_idx, 6871 unsigned int nr_rings) 6872 { 6873 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 6874 struct hwrm_vnic_alloc_output *resp; 6875 struct hwrm_vnic_alloc_input *req; 6876 int rc; 6877 6878 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); 6879 if (rc) 6880 return rc; 6881 6882 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6883 goto vnic_no_ring_grps; 6884 6885 /* map ring groups to this vnic */ 6886 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 6887 grp_idx = bp->rx_ring[i].bnapi->index; 6888 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 6889 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 6890 j, nr_rings); 6891 break; 6892 } 6893 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 6894 } 6895 6896 vnic_no_ring_grps: 6897 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 6898 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 6899 if (vnic->vnic_id == BNXT_VNIC_DEFAULT) 6900 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 6901 6902 resp = hwrm_req_hold(bp, req); 6903 rc = hwrm_req_send(bp, req); 6904 if (!rc) 6905 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 6906 hwrm_req_drop(bp, req); 6907 return rc; 6908 } 6909 6910 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 6911 { 6912 struct hwrm_vnic_qcaps_output *resp; 6913 struct hwrm_vnic_qcaps_input *req; 6914 int rc; 6915 6916 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 6917 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; 6918 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; 6919 if (bp->hwrm_spec_code < 0x10600) 6920 return 0; 6921 6922 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); 6923 if (rc) 6924 return rc; 6925 6926 resp = hwrm_req_hold(bp, req); 6927 rc = hwrm_req_send(bp, req); 6928 if (!rc) { 6929 u32 flags = le32_to_cpu(resp->flags); 6930 6931 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 6932 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 6933 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; 6934 if (flags & 6935 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 6936 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 6937 6938 /* Older P5 fw before EXT_HW_STATS support did not set 6939 * VLAN_STRIP_CAP properly. 6940 */ 6941 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 6942 (BNXT_CHIP_P5(bp) && 6943 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 6944 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 6945 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) 6946 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; 6947 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) 6948 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; 6949 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 6950 if (bp->max_tpa_v2) { 6951 if (BNXT_CHIP_P5(bp)) 6952 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 6953 else 6954 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; 6955 } 6956 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) 6957 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; 6958 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP) 6959 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP; 6960 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP) 6961 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP; 6962 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP) 6963 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; 6964 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) 6965 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; 6966 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP) 6967 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH; 6968 } 6969 hwrm_req_drop(bp, req); 6970 return rc; 6971 } 6972 6973 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 6974 { 6975 struct hwrm_ring_grp_alloc_output *resp; 6976 struct hwrm_ring_grp_alloc_input *req; 6977 int rc; 6978 u16 i; 6979 6980 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6981 return 0; 6982 6983 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); 6984 if (rc) 6985 return rc; 6986 6987 resp = hwrm_req_hold(bp, req); 6988 for (i = 0; i < bp->rx_nr_rings; i++) { 6989 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 6990 6991 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 6992 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 6993 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 6994 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 6995 6996 rc = hwrm_req_send(bp, req); 6997 6998 if (rc) 6999 break; 7000 7001 bp->grp_info[grp_idx].fw_grp_id = 7002 le32_to_cpu(resp->ring_group_id); 7003 } 7004 hwrm_req_drop(bp, req); 7005 return rc; 7006 } 7007 7008 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 7009 { 7010 struct hwrm_ring_grp_free_input *req; 7011 u16 i; 7012 7013 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7014 return; 7015 7016 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) 7017 return; 7018 7019 hwrm_req_hold(bp, req); 7020 for (i = 0; i < bp->cp_nr_rings; i++) { 7021 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 7022 continue; 7023 req->ring_group_id = 7024 cpu_to_le32(bp->grp_info[i].fw_grp_id); 7025 7026 hwrm_req_send(bp, req); 7027 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 7028 } 7029 hwrm_req_drop(bp, req); 7030 } 7031 7032 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type, 7033 struct hwrm_ring_alloc_input *req, 7034 struct bnxt_ring_struct *ring) 7035 { 7036 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx]; 7037 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID | 7038 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID; 7039 7040 if (ring_type == HWRM_RING_ALLOC_AGG) { 7041 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 7042 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 7043 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 7044 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID; 7045 } else { 7046 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 7047 if (NET_IP_ALIGN == 2) 7048 req->flags = 7049 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD); 7050 } 7051 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 7052 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 7053 req->enables |= cpu_to_le32(enables); 7054 } 7055 7056 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 7057 struct bnxt_ring_struct *ring, 7058 u32 ring_type, u32 map_index) 7059 { 7060 struct hwrm_ring_alloc_output *resp; 7061 struct hwrm_ring_alloc_input *req; 7062 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 7063 struct bnxt_ring_grp_info *grp_info; 7064 int rc, err = 0; 7065 u16 ring_id; 7066 7067 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); 7068 if (rc) 7069 goto exit; 7070 7071 req->enables = 0; 7072 if (rmem->nr_pages > 1) { 7073 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 7074 /* Page size is in log2 units */ 7075 req->page_size = BNXT_PAGE_SHIFT; 7076 req->page_tbl_depth = 1; 7077 } else { 7078 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 7079 } 7080 req->fbo = 0; 7081 /* Association of ring index with doorbell index and MSIX number */ 7082 req->logical_id = cpu_to_le16(map_index); 7083 7084 switch (ring_type) { 7085 case HWRM_RING_ALLOC_TX: { 7086 struct bnxt_tx_ring_info *txr; 7087 u16 flags = 0; 7088 7089 txr = container_of(ring, struct bnxt_tx_ring_info, 7090 tx_ring_struct); 7091 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 7092 /* Association of transmit ring with completion ring */ 7093 grp_info = &bp->grp_info[ring->grp_idx]; 7094 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 7095 req->length = cpu_to_le32(bp->tx_ring_mask + 1); 7096 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 7097 req->queue_id = cpu_to_le16(ring->queue_id); 7098 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) 7099 req->cmpl_coal_cnt = 7100 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; 7101 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg) 7102 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE; 7103 req->flags = cpu_to_le16(flags); 7104 break; 7105 } 7106 case HWRM_RING_ALLOC_RX: 7107 case HWRM_RING_ALLOC_AGG: 7108 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 7109 req->length = (ring_type == HWRM_RING_ALLOC_RX) ? 7110 cpu_to_le32(bp->rx_ring_mask + 1) : 7111 cpu_to_le32(bp->rx_agg_ring_mask + 1); 7112 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7113 bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring); 7114 break; 7115 case HWRM_RING_ALLOC_CMPL: 7116 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 7117 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 7118 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7119 /* Association of cp ring with nq */ 7120 grp_info = &bp->grp_info[map_index]; 7121 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 7122 req->cq_handle = cpu_to_le64(ring->handle); 7123 req->enables |= cpu_to_le32( 7124 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 7125 } else { 7126 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 7127 } 7128 break; 7129 case HWRM_RING_ALLOC_NQ: 7130 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 7131 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 7132 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 7133 break; 7134 default: 7135 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 7136 ring_type); 7137 return -EINVAL; 7138 } 7139 7140 resp = hwrm_req_hold(bp, req); 7141 rc = hwrm_req_send(bp, req); 7142 err = le16_to_cpu(resp->error_code); 7143 ring_id = le16_to_cpu(resp->ring_id); 7144 hwrm_req_drop(bp, req); 7145 7146 exit: 7147 if (rc || err) { 7148 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 7149 ring_type, rc, err); 7150 return -EIO; 7151 } 7152 ring->fw_ring_id = ring_id; 7153 return rc; 7154 } 7155 7156 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 7157 { 7158 int rc; 7159 7160 if (BNXT_PF(bp)) { 7161 struct hwrm_func_cfg_input *req; 7162 7163 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 7164 if (rc) 7165 return rc; 7166 7167 req->fid = cpu_to_le16(0xffff); 7168 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 7169 req->async_event_cr = cpu_to_le16(idx); 7170 return hwrm_req_send(bp, req); 7171 } else { 7172 struct hwrm_func_vf_cfg_input *req; 7173 7174 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); 7175 if (rc) 7176 return rc; 7177 7178 req->enables = 7179 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 7180 req->async_event_cr = cpu_to_le16(idx); 7181 return hwrm_req_send(bp, req); 7182 } 7183 } 7184 7185 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, 7186 u32 ring_type) 7187 { 7188 switch (ring_type) { 7189 case HWRM_RING_ALLOC_TX: 7190 db->db_ring_mask = bp->tx_ring_mask; 7191 break; 7192 case HWRM_RING_ALLOC_RX: 7193 db->db_ring_mask = bp->rx_ring_mask; 7194 break; 7195 case HWRM_RING_ALLOC_AGG: 7196 db->db_ring_mask = bp->rx_agg_ring_mask; 7197 break; 7198 case HWRM_RING_ALLOC_CMPL: 7199 case HWRM_RING_ALLOC_NQ: 7200 db->db_ring_mask = bp->cp_ring_mask; 7201 break; 7202 } 7203 if (bp->flags & BNXT_FLAG_CHIP_P7) { 7204 db->db_epoch_mask = db->db_ring_mask + 1; 7205 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); 7206 } 7207 } 7208 7209 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 7210 u32 map_idx, u32 xid) 7211 { 7212 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7213 switch (ring_type) { 7214 case HWRM_RING_ALLOC_TX: 7215 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 7216 break; 7217 case HWRM_RING_ALLOC_RX: 7218 case HWRM_RING_ALLOC_AGG: 7219 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 7220 break; 7221 case HWRM_RING_ALLOC_CMPL: 7222 db->db_key64 = DBR_PATH_L2; 7223 break; 7224 case HWRM_RING_ALLOC_NQ: 7225 db->db_key64 = DBR_PATH_L2; 7226 break; 7227 } 7228 db->db_key64 |= (u64)xid << DBR_XID_SFT; 7229 7230 if (bp->flags & BNXT_FLAG_CHIP_P7) 7231 db->db_key64 |= DBR_VALID; 7232 7233 db->doorbell = bp->bar1 + bp->db_offset; 7234 } else { 7235 db->doorbell = bp->bar1 + map_idx * 0x80; 7236 switch (ring_type) { 7237 case HWRM_RING_ALLOC_TX: 7238 db->db_key32 = DB_KEY_TX; 7239 break; 7240 case HWRM_RING_ALLOC_RX: 7241 case HWRM_RING_ALLOC_AGG: 7242 db->db_key32 = DB_KEY_RX; 7243 break; 7244 case HWRM_RING_ALLOC_CMPL: 7245 db->db_key32 = DB_KEY_CP; 7246 break; 7247 } 7248 } 7249 bnxt_set_db_mask(bp, db, ring_type); 7250 } 7251 7252 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, 7253 struct bnxt_rx_ring_info *rxr) 7254 { 7255 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 7256 struct bnxt_napi *bnapi = rxr->bnapi; 7257 u32 type = HWRM_RING_ALLOC_RX; 7258 u32 map_idx = bnapi->index; 7259 int rc; 7260 7261 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7262 if (rc) 7263 return rc; 7264 7265 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 7266 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 7267 7268 return 0; 7269 } 7270 7271 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, 7272 struct bnxt_rx_ring_info *rxr) 7273 { 7274 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 7275 u32 type = HWRM_RING_ALLOC_AGG; 7276 u32 grp_idx = ring->grp_idx; 7277 u32 map_idx; 7278 int rc; 7279 7280 map_idx = grp_idx + bp->rx_nr_rings; 7281 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7282 if (rc) 7283 return rc; 7284 7285 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 7286 ring->fw_ring_id); 7287 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 7288 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 7289 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 7290 7291 return 0; 7292 } 7293 7294 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp, 7295 struct bnxt_cp_ring_info *cpr) 7296 { 7297 const u32 type = HWRM_RING_ALLOC_CMPL; 7298 struct bnxt_napi *bnapi = cpr->bnapi; 7299 struct bnxt_ring_struct *ring; 7300 u32 map_idx = bnapi->index; 7301 int rc; 7302 7303 ring = &cpr->cp_ring_struct; 7304 ring->handle = BNXT_SET_NQ_HDL(cpr); 7305 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7306 if (rc) 7307 return rc; 7308 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 7309 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 7310 return 0; 7311 } 7312 7313 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp, 7314 struct bnxt_tx_ring_info *txr, u32 tx_idx) 7315 { 7316 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 7317 const u32 type = HWRM_RING_ALLOC_TX; 7318 int rc; 7319 7320 rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx); 7321 if (rc) 7322 return rc; 7323 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id); 7324 return 0; 7325 } 7326 7327 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 7328 { 7329 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 7330 int i, rc = 0; 7331 u32 type; 7332 7333 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7334 type = HWRM_RING_ALLOC_NQ; 7335 else 7336 type = HWRM_RING_ALLOC_CMPL; 7337 for (i = 0; i < bp->cp_nr_rings; i++) { 7338 struct bnxt_napi *bnapi = bp->bnapi[i]; 7339 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7340 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 7341 u32 map_idx = ring->map_idx; 7342 unsigned int vector; 7343 7344 vector = bp->irq_tbl[map_idx].vector; 7345 disable_irq_nosync(vector); 7346 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7347 if (rc) { 7348 enable_irq(vector); 7349 goto err_out; 7350 } 7351 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 7352 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 7353 enable_irq(vector); 7354 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 7355 7356 if (!i) { 7357 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 7358 if (rc) 7359 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 7360 } 7361 } 7362 7363 for (i = 0; i < bp->tx_nr_rings; i++) { 7364 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 7365 7366 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7367 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); 7368 if (rc) 7369 goto err_out; 7370 } 7371 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i); 7372 if (rc) 7373 goto err_out; 7374 } 7375 7376 for (i = 0; i < bp->rx_nr_rings; i++) { 7377 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 7378 7379 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); 7380 if (rc) 7381 goto err_out; 7382 /* If we have agg rings, post agg buffers first. */ 7383 if (!agg_rings) 7384 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 7385 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7386 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); 7387 if (rc) 7388 goto err_out; 7389 } 7390 } 7391 7392 if (agg_rings) { 7393 for (i = 0; i < bp->rx_nr_rings; i++) { 7394 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]); 7395 if (rc) 7396 goto err_out; 7397 } 7398 } 7399 err_out: 7400 return rc; 7401 } 7402 7403 static void bnxt_cancel_dim(struct bnxt *bp) 7404 { 7405 int i; 7406 7407 /* DIM work is initialized in bnxt_enable_napi(). Proceed only 7408 * if NAPI is enabled. 7409 */ 7410 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 7411 return; 7412 7413 /* Make sure NAPI sees that the VNIC is disabled */ 7414 synchronize_net(); 7415 for (i = 0; i < bp->rx_nr_rings; i++) { 7416 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 7417 struct bnxt_napi *bnapi = rxr->bnapi; 7418 7419 cancel_work_sync(&bnapi->cp_ring.dim.work); 7420 } 7421 } 7422 7423 static int hwrm_ring_free_send_msg(struct bnxt *bp, 7424 struct bnxt_ring_struct *ring, 7425 u32 ring_type, int cmpl_ring_id) 7426 { 7427 struct hwrm_ring_free_output *resp; 7428 struct hwrm_ring_free_input *req; 7429 u16 error_code = 0; 7430 int rc; 7431 7432 if (BNXT_NO_FW_ACCESS(bp)) 7433 return 0; 7434 7435 rc = hwrm_req_init(bp, req, HWRM_RING_FREE); 7436 if (rc) 7437 goto exit; 7438 7439 req->cmpl_ring = cpu_to_le16(cmpl_ring_id); 7440 req->ring_type = ring_type; 7441 req->ring_id = cpu_to_le16(ring->fw_ring_id); 7442 7443 resp = hwrm_req_hold(bp, req); 7444 rc = hwrm_req_send(bp, req); 7445 error_code = le16_to_cpu(resp->error_code); 7446 hwrm_req_drop(bp, req); 7447 exit: 7448 if (rc || error_code) { 7449 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 7450 ring_type, rc, error_code); 7451 return -EIO; 7452 } 7453 return 0; 7454 } 7455 7456 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, 7457 struct bnxt_tx_ring_info *txr, 7458 bool close_path) 7459 { 7460 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 7461 u32 cmpl_ring_id; 7462 7463 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7464 return; 7465 7466 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) : 7467 INVALID_HW_RING_ID; 7468 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, 7469 cmpl_ring_id); 7470 ring->fw_ring_id = INVALID_HW_RING_ID; 7471 } 7472 7473 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp, 7474 struct bnxt_rx_ring_info *rxr, 7475 bool close_path) 7476 { 7477 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 7478 u32 grp_idx = rxr->bnapi->index; 7479 u32 cmpl_ring_id; 7480 7481 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7482 return; 7483 7484 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7485 hwrm_ring_free_send_msg(bp, ring, 7486 RING_FREE_REQ_RING_TYPE_RX, 7487 close_path ? cmpl_ring_id : 7488 INVALID_HW_RING_ID); 7489 ring->fw_ring_id = INVALID_HW_RING_ID; 7490 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; 7491 } 7492 7493 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp, 7494 struct bnxt_rx_ring_info *rxr, 7495 bool close_path) 7496 { 7497 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 7498 u32 grp_idx = rxr->bnapi->index; 7499 u32 type, cmpl_ring_id; 7500 7501 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7502 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 7503 else 7504 type = RING_FREE_REQ_RING_TYPE_RX; 7505 7506 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7507 return; 7508 7509 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7510 hwrm_ring_free_send_msg(bp, ring, type, 7511 close_path ? cmpl_ring_id : 7512 INVALID_HW_RING_ID); 7513 ring->fw_ring_id = INVALID_HW_RING_ID; 7514 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; 7515 } 7516 7517 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp, 7518 struct bnxt_cp_ring_info *cpr) 7519 { 7520 struct bnxt_ring_struct *ring; 7521 7522 ring = &cpr->cp_ring_struct; 7523 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7524 return; 7525 7526 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL, 7527 INVALID_HW_RING_ID); 7528 ring->fw_ring_id = INVALID_HW_RING_ID; 7529 } 7530 7531 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 7532 { 7533 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 7534 int i, size = ring->ring_mem.page_size; 7535 7536 cpr->cp_raw_cons = 0; 7537 cpr->toggle = 0; 7538 7539 for (i = 0; i < bp->cp_nr_pages; i++) 7540 if (cpr->cp_desc_ring[i]) 7541 memset(cpr->cp_desc_ring[i], 0, size); 7542 } 7543 7544 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 7545 { 7546 u32 type; 7547 int i; 7548 7549 if (!bp->bnapi) 7550 return; 7551 7552 for (i = 0; i < bp->tx_nr_rings; i++) 7553 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path); 7554 7555 bnxt_cancel_dim(bp); 7556 for (i = 0; i < bp->rx_nr_rings; i++) { 7557 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); 7558 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); 7559 } 7560 7561 /* The completion rings are about to be freed. After that the 7562 * IRQ doorbell will not work anymore. So we need to disable 7563 * IRQ here. 7564 */ 7565 bnxt_disable_int_sync(bp); 7566 7567 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7568 type = RING_FREE_REQ_RING_TYPE_NQ; 7569 else 7570 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 7571 for (i = 0; i < bp->cp_nr_rings; i++) { 7572 struct bnxt_napi *bnapi = bp->bnapi[i]; 7573 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7574 struct bnxt_ring_struct *ring; 7575 int j; 7576 7577 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) 7578 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]); 7579 7580 ring = &cpr->cp_ring_struct; 7581 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 7582 hwrm_ring_free_send_msg(bp, ring, type, 7583 INVALID_HW_RING_ID); 7584 ring->fw_ring_id = INVALID_HW_RING_ID; 7585 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 7586 } 7587 } 7588 } 7589 7590 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7591 bool shared); 7592 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7593 bool shared); 7594 7595 static int bnxt_hwrm_get_rings(struct bnxt *bp) 7596 { 7597 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7598 struct hwrm_func_qcfg_output *resp; 7599 struct hwrm_func_qcfg_input *req; 7600 int rc; 7601 7602 if (bp->hwrm_spec_code < 0x10601) 7603 return 0; 7604 7605 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7606 if (rc) 7607 return rc; 7608 7609 req->fid = cpu_to_le16(0xffff); 7610 resp = hwrm_req_hold(bp, req); 7611 rc = hwrm_req_send(bp, req); 7612 if (rc) { 7613 hwrm_req_drop(bp, req); 7614 return rc; 7615 } 7616 7617 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7618 if (BNXT_NEW_RM(bp)) { 7619 u16 cp, stats; 7620 7621 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 7622 hw_resc->resv_hw_ring_grps = 7623 le32_to_cpu(resp->alloc_hw_ring_grps); 7624 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 7625 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); 7626 cp = le16_to_cpu(resp->alloc_cmpl_rings); 7627 stats = le16_to_cpu(resp->alloc_stat_ctx); 7628 hw_resc->resv_irqs = cp; 7629 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7630 int rx = hw_resc->resv_rx_rings; 7631 int tx = hw_resc->resv_tx_rings; 7632 7633 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7634 rx >>= 1; 7635 if (cp < (rx + tx)) { 7636 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); 7637 if (rc) 7638 goto get_rings_exit; 7639 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7640 rx <<= 1; 7641 hw_resc->resv_rx_rings = rx; 7642 hw_resc->resv_tx_rings = tx; 7643 } 7644 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 7645 hw_resc->resv_hw_ring_grps = rx; 7646 } 7647 hw_resc->resv_cp_rings = cp; 7648 hw_resc->resv_stat_ctxs = stats; 7649 } 7650 get_rings_exit: 7651 hwrm_req_drop(bp, req); 7652 return rc; 7653 } 7654 7655 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 7656 { 7657 struct hwrm_func_qcfg_output *resp; 7658 struct hwrm_func_qcfg_input *req; 7659 int rc; 7660 7661 if (bp->hwrm_spec_code < 0x10601) 7662 return 0; 7663 7664 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7665 if (rc) 7666 return rc; 7667 7668 req->fid = cpu_to_le16(fid); 7669 resp = hwrm_req_hold(bp, req); 7670 rc = hwrm_req_send(bp, req); 7671 if (!rc) 7672 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7673 7674 hwrm_req_drop(bp, req); 7675 return rc; 7676 } 7677 7678 static bool bnxt_rfs_supported(struct bnxt *bp); 7679 7680 static struct hwrm_func_cfg_input * 7681 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7682 { 7683 struct hwrm_func_cfg_input *req; 7684 u32 enables = 0; 7685 7686 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) 7687 return NULL; 7688 7689 req->fid = cpu_to_le16(0xffff); 7690 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7691 req->num_tx_rings = cpu_to_le16(hwr->tx); 7692 if (BNXT_NEW_RM(bp)) { 7693 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 7694 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7695 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7696 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 7697 enables |= hwr->cp_p5 ? 7698 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7699 } else { 7700 enables |= hwr->cp ? 7701 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7702 enables |= hwr->grp ? 7703 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7704 } 7705 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 7706 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 7707 0; 7708 req->num_rx_rings = cpu_to_le16(hwr->rx); 7709 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7710 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7711 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7712 req->num_msix = cpu_to_le16(hwr->cp); 7713 } else { 7714 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7715 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7716 } 7717 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7718 req->num_vnics = cpu_to_le16(hwr->vnic); 7719 } 7720 req->enables = cpu_to_le32(enables); 7721 return req; 7722 } 7723 7724 static struct hwrm_func_vf_cfg_input * 7725 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7726 { 7727 struct hwrm_func_vf_cfg_input *req; 7728 u32 enables = 0; 7729 7730 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) 7731 return NULL; 7732 7733 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7734 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 7735 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7736 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7737 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7738 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7739 enables |= hwr->cp_p5 ? 7740 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7741 } else { 7742 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7743 enables |= hwr->grp ? 7744 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7745 } 7746 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 7747 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 7748 7749 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 7750 req->num_tx_rings = cpu_to_le16(hwr->tx); 7751 req->num_rx_rings = cpu_to_le16(hwr->rx); 7752 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7753 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7754 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7755 } else { 7756 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7757 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7758 } 7759 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7760 req->num_vnics = cpu_to_le16(hwr->vnic); 7761 7762 req->enables = cpu_to_le32(enables); 7763 return req; 7764 } 7765 7766 static int 7767 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7768 { 7769 struct hwrm_func_cfg_input *req; 7770 int rc; 7771 7772 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 7773 if (!req) 7774 return -ENOMEM; 7775 7776 if (!req->enables) { 7777 hwrm_req_drop(bp, req); 7778 return 0; 7779 } 7780 7781 rc = hwrm_req_send(bp, req); 7782 if (rc) 7783 return rc; 7784 7785 if (bp->hwrm_spec_code < 0x10601) 7786 bp->hw_resc.resv_tx_rings = hwr->tx; 7787 7788 return bnxt_hwrm_get_rings(bp); 7789 } 7790 7791 static int 7792 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7793 { 7794 struct hwrm_func_vf_cfg_input *req; 7795 int rc; 7796 7797 if (!BNXT_NEW_RM(bp)) { 7798 bp->hw_resc.resv_tx_rings = hwr->tx; 7799 return 0; 7800 } 7801 7802 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 7803 if (!req) 7804 return -ENOMEM; 7805 7806 rc = hwrm_req_send(bp, req); 7807 if (rc) 7808 return rc; 7809 7810 return bnxt_hwrm_get_rings(bp); 7811 } 7812 7813 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7814 { 7815 if (BNXT_PF(bp)) 7816 return bnxt_hwrm_reserve_pf_rings(bp, hwr); 7817 else 7818 return bnxt_hwrm_reserve_vf_rings(bp, hwr); 7819 } 7820 7821 int bnxt_nq_rings_in_use(struct bnxt *bp) 7822 { 7823 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp); 7824 } 7825 7826 static int bnxt_cp_rings_in_use(struct bnxt *bp) 7827 { 7828 int cp; 7829 7830 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7831 return bnxt_nq_rings_in_use(bp); 7832 7833 cp = bp->tx_nr_rings + bp->rx_nr_rings; 7834 return cp; 7835 } 7836 7837 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 7838 { 7839 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); 7840 } 7841 7842 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7843 { 7844 if (!hwr->grp) 7845 return 0; 7846 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7847 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); 7848 7849 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7850 rss_ctx *= hwr->vnic; 7851 return rss_ctx; 7852 } 7853 if (BNXT_VF(bp)) 7854 return BNXT_VF_MAX_RSS_CTX; 7855 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) 7856 return hwr->grp + 1; 7857 return 1; 7858 } 7859 7860 /* Check if a default RSS map needs to be setup. This function is only 7861 * used on older firmware that does not require reserving RX rings. 7862 */ 7863 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 7864 { 7865 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7866 7867 /* The RSS map is valid for RX rings set to resv_rx_rings */ 7868 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 7869 hw_resc->resv_rx_rings = bp->rx_nr_rings; 7870 if (!netif_is_rxfh_configured(bp->dev)) 7871 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7872 } 7873 } 7874 7875 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) 7876 { 7877 if (bp->flags & BNXT_FLAG_RFS) { 7878 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7879 return 2 + bp->num_rss_ctx; 7880 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7881 return rx_rings + 1; 7882 } 7883 return 1; 7884 } 7885 7886 static bool bnxt_need_reserve_rings(struct bnxt *bp) 7887 { 7888 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7889 int cp = bnxt_cp_rings_in_use(bp); 7890 int nq = bnxt_nq_rings_in_use(bp); 7891 int rx = bp->rx_nr_rings, stat; 7892 int vnic, grp = rx; 7893 7894 /* Old firmware does not need RX ring reservations but we still 7895 * need to setup a default RSS map when needed. With new firmware 7896 * we go through RX ring reservations first and then set up the 7897 * RSS map for the successfully reserved RX rings when needed. 7898 */ 7899 if (!BNXT_NEW_RM(bp)) 7900 bnxt_check_rss_tbl_no_rmgr(bp); 7901 7902 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 7903 bp->hwrm_spec_code >= 0x10601) 7904 return true; 7905 7906 if (!BNXT_NEW_RM(bp)) 7907 return false; 7908 7909 vnic = bnxt_get_total_vnics(bp, rx); 7910 7911 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7912 rx <<= 1; 7913 stat = bnxt_get_func_stat_ctxs(bp); 7914 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 7915 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 7916 (hw_resc->resv_hw_ring_grps != grp && 7917 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) 7918 return true; 7919 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && 7920 hw_resc->resv_irqs != nq) 7921 return true; 7922 return false; 7923 } 7924 7925 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7926 { 7927 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7928 7929 hwr->tx = hw_resc->resv_tx_rings; 7930 if (BNXT_NEW_RM(bp)) { 7931 hwr->rx = hw_resc->resv_rx_rings; 7932 hwr->cp = hw_resc->resv_irqs; 7933 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7934 hwr->cp_p5 = hw_resc->resv_cp_rings; 7935 hwr->grp = hw_resc->resv_hw_ring_grps; 7936 hwr->vnic = hw_resc->resv_vnics; 7937 hwr->stat = hw_resc->resv_stat_ctxs; 7938 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; 7939 } 7940 } 7941 7942 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7943 { 7944 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && 7945 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); 7946 } 7947 7948 static int bnxt_get_avail_msix(struct bnxt *bp, int num); 7949 7950 static int __bnxt_reserve_rings(struct bnxt *bp) 7951 { 7952 struct bnxt_hw_rings hwr = {0}; 7953 int rx_rings, old_rx_rings, rc; 7954 int cp = bp->cp_nr_rings; 7955 int ulp_msix = 0; 7956 bool sh = false; 7957 int tx_cp; 7958 7959 if (!bnxt_need_reserve_rings(bp)) 7960 return 0; 7961 7962 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 7963 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 7964 if (!ulp_msix) 7965 bnxt_set_ulp_stat_ctxs(bp, 0); 7966 7967 if (ulp_msix > bp->ulp_num_msix_want) 7968 ulp_msix = bp->ulp_num_msix_want; 7969 hwr.cp = cp + ulp_msix; 7970 } else { 7971 hwr.cp = bnxt_nq_rings_in_use(bp); 7972 } 7973 7974 hwr.tx = bp->tx_nr_rings; 7975 hwr.rx = bp->rx_nr_rings; 7976 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7977 sh = true; 7978 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7979 hwr.cp_p5 = hwr.rx + hwr.tx; 7980 7981 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); 7982 7983 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7984 hwr.rx <<= 1; 7985 hwr.grp = bp->rx_nr_rings; 7986 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 7987 hwr.stat = bnxt_get_func_stat_ctxs(bp); 7988 old_rx_rings = bp->hw_resc.resv_rx_rings; 7989 7990 rc = bnxt_hwrm_reserve_rings(bp, &hwr); 7991 if (rc) 7992 return rc; 7993 7994 bnxt_copy_reserved_rings(bp, &hwr); 7995 7996 rx_rings = hwr.rx; 7997 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7998 if (hwr.rx >= 2) { 7999 rx_rings = hwr.rx >> 1; 8000 } else { 8001 if (netif_running(bp->dev)) 8002 return -ENOMEM; 8003 8004 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 8005 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 8006 bp->dev->hw_features &= ~NETIF_F_LRO; 8007 bp->dev->features &= ~NETIF_F_LRO; 8008 bnxt_set_ring_params(bp); 8009 } 8010 } 8011 rx_rings = min_t(int, rx_rings, hwr.grp); 8012 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); 8013 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) 8014 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); 8015 hwr.cp = min_t(int, hwr.cp, hwr.stat); 8016 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); 8017 if (bp->flags & BNXT_FLAG_AGG_RINGS) 8018 hwr.rx = rx_rings << 1; 8019 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); 8020 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 8021 bp->tx_nr_rings = hwr.tx; 8022 8023 /* If we cannot reserve all the RX rings, reset the RSS map only 8024 * if absolutely necessary 8025 */ 8026 if (rx_rings != bp->rx_nr_rings) { 8027 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 8028 rx_rings, bp->rx_nr_rings); 8029 if (netif_is_rxfh_configured(bp->dev) && 8030 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 8031 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 8032 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 8033 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 8034 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 8035 } 8036 } 8037 bp->rx_nr_rings = rx_rings; 8038 bp->cp_nr_rings = hwr.cp; 8039 8040 if (!bnxt_rings_ok(bp, &hwr)) 8041 return -ENOMEM; 8042 8043 if (old_rx_rings != bp->hw_resc.resv_rx_rings && 8044 !netif_is_rxfh_configured(bp->dev)) 8045 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 8046 8047 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) { 8048 int resv_msix, resv_ctx, ulp_ctxs; 8049 struct bnxt_hw_resc *hw_resc; 8050 8051 hw_resc = &bp->hw_resc; 8052 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; 8053 ulp_msix = min_t(int, resv_msix, ulp_msix); 8054 bnxt_set_ulp_msix_num(bp, ulp_msix); 8055 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings; 8056 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp)); 8057 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs); 8058 } 8059 8060 return rc; 8061 } 8062 8063 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 8064 { 8065 struct hwrm_func_vf_cfg_input *req; 8066 u32 flags; 8067 8068 if (!BNXT_NEW_RM(bp)) 8069 return 0; 8070 8071 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 8072 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 8073 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 8074 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 8075 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 8076 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 8077 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 8078 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8079 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 8080 8081 req->flags = cpu_to_le32(flags); 8082 return hwrm_req_send_silent(bp, req); 8083 } 8084 8085 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 8086 { 8087 struct hwrm_func_cfg_input *req; 8088 u32 flags; 8089 8090 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 8091 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 8092 if (BNXT_NEW_RM(bp)) { 8093 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 8094 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 8095 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 8096 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 8097 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 8098 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 8099 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 8100 else 8101 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 8102 } 8103 8104 req->flags = cpu_to_le32(flags); 8105 return hwrm_req_send_silent(bp, req); 8106 } 8107 8108 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 8109 { 8110 if (bp->hwrm_spec_code < 0x10801) 8111 return 0; 8112 8113 if (BNXT_PF(bp)) 8114 return bnxt_hwrm_check_pf_rings(bp, hwr); 8115 8116 return bnxt_hwrm_check_vf_rings(bp, hwr); 8117 } 8118 8119 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 8120 { 8121 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8122 struct hwrm_ring_aggint_qcaps_output *resp; 8123 struct hwrm_ring_aggint_qcaps_input *req; 8124 int rc; 8125 8126 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 8127 coal_cap->num_cmpl_dma_aggr_max = 63; 8128 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 8129 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 8130 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 8131 coal_cap->int_lat_tmr_min_max = 65535; 8132 coal_cap->int_lat_tmr_max_max = 65535; 8133 coal_cap->num_cmpl_aggr_int_max = 65535; 8134 coal_cap->timer_units = 80; 8135 8136 if (bp->hwrm_spec_code < 0x10902) 8137 return; 8138 8139 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) 8140 return; 8141 8142 resp = hwrm_req_hold(bp, req); 8143 rc = hwrm_req_send_silent(bp, req); 8144 if (!rc) { 8145 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 8146 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 8147 coal_cap->num_cmpl_dma_aggr_max = 8148 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 8149 coal_cap->num_cmpl_dma_aggr_during_int_max = 8150 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 8151 coal_cap->cmpl_aggr_dma_tmr_max = 8152 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 8153 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 8154 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 8155 coal_cap->int_lat_tmr_min_max = 8156 le16_to_cpu(resp->int_lat_tmr_min_max); 8157 coal_cap->int_lat_tmr_max_max = 8158 le16_to_cpu(resp->int_lat_tmr_max_max); 8159 coal_cap->num_cmpl_aggr_int_max = 8160 le16_to_cpu(resp->num_cmpl_aggr_int_max); 8161 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 8162 } 8163 hwrm_req_drop(bp, req); 8164 } 8165 8166 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 8167 { 8168 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8169 8170 return usec * 1000 / coal_cap->timer_units; 8171 } 8172 8173 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 8174 struct bnxt_coal *hw_coal, 8175 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 8176 { 8177 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8178 u16 val, tmr, max, flags = hw_coal->flags; 8179 u32 cmpl_params = coal_cap->cmpl_params; 8180 8181 max = hw_coal->bufs_per_record * 128; 8182 if (hw_coal->budget) 8183 max = hw_coal->bufs_per_record * hw_coal->budget; 8184 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 8185 8186 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 8187 req->num_cmpl_aggr_int = cpu_to_le16(val); 8188 8189 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 8190 req->num_cmpl_dma_aggr = cpu_to_le16(val); 8191 8192 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 8193 coal_cap->num_cmpl_dma_aggr_during_int_max); 8194 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 8195 8196 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 8197 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 8198 req->int_lat_tmr_max = cpu_to_le16(tmr); 8199 8200 /* min timer set to 1/2 of interrupt timer */ 8201 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 8202 val = tmr / 2; 8203 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 8204 req->int_lat_tmr_min = cpu_to_le16(val); 8205 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 8206 } 8207 8208 /* buf timer set to 1/4 of interrupt timer */ 8209 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 8210 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 8211 8212 if (cmpl_params & 8213 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 8214 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 8215 val = clamp_t(u16, tmr, 1, 8216 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 8217 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 8218 req->enables |= 8219 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 8220 } 8221 8222 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 8223 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 8224 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 8225 req->flags = cpu_to_le16(flags); 8226 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 8227 } 8228 8229 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 8230 struct bnxt_coal *hw_coal) 8231 { 8232 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; 8233 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8234 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8235 u32 nq_params = coal_cap->nq_params; 8236 u16 tmr; 8237 int rc; 8238 8239 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 8240 return 0; 8241 8242 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8243 if (rc) 8244 return rc; 8245 8246 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 8247 req->flags = 8248 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 8249 8250 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 8251 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 8252 req->int_lat_tmr_min = cpu_to_le16(tmr); 8253 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 8254 return hwrm_req_send(bp, req); 8255 } 8256 8257 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 8258 { 8259 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; 8260 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8261 struct bnxt_coal coal; 8262 int rc; 8263 8264 /* Tick values in micro seconds. 8265 * 1 coal_buf x bufs_per_record = 1 completion record. 8266 */ 8267 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 8268 8269 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 8270 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 8271 8272 if (!bnapi->rx_ring) 8273 return -ENODEV; 8274 8275 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8276 if (rc) 8277 return rc; 8278 8279 bnxt_hwrm_set_coal_params(bp, &coal, req_rx); 8280 8281 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 8282 8283 return hwrm_req_send(bp, req_rx); 8284 } 8285 8286 static int 8287 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 8288 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 8289 { 8290 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 8291 8292 req->ring_id = cpu_to_le16(ring_id); 8293 return hwrm_req_send(bp, req); 8294 } 8295 8296 static int 8297 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 8298 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 8299 { 8300 struct bnxt_tx_ring_info *txr; 8301 int i, rc; 8302 8303 bnxt_for_each_napi_tx(i, bnapi, txr) { 8304 u16 ring_id; 8305 8306 ring_id = bnxt_cp_ring_for_tx(bp, txr); 8307 req->ring_id = cpu_to_le16(ring_id); 8308 rc = hwrm_req_send(bp, req); 8309 if (rc) 8310 return rc; 8311 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8312 return 0; 8313 } 8314 return 0; 8315 } 8316 8317 int bnxt_hwrm_set_coal(struct bnxt *bp) 8318 { 8319 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; 8320 int i, rc; 8321 8322 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8323 if (rc) 8324 return rc; 8325 8326 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8327 if (rc) { 8328 hwrm_req_drop(bp, req_rx); 8329 return rc; 8330 } 8331 8332 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); 8333 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); 8334 8335 hwrm_req_hold(bp, req_rx); 8336 hwrm_req_hold(bp, req_tx); 8337 for (i = 0; i < bp->cp_nr_rings; i++) { 8338 struct bnxt_napi *bnapi = bp->bnapi[i]; 8339 struct bnxt_coal *hw_coal; 8340 8341 if (!bnapi->rx_ring) 8342 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 8343 else 8344 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); 8345 if (rc) 8346 break; 8347 8348 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8349 continue; 8350 8351 if (bnapi->rx_ring && bnapi->tx_ring[0]) { 8352 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 8353 if (rc) 8354 break; 8355 } 8356 if (bnapi->rx_ring) 8357 hw_coal = &bp->rx_coal; 8358 else 8359 hw_coal = &bp->tx_coal; 8360 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 8361 } 8362 hwrm_req_drop(bp, req_rx); 8363 hwrm_req_drop(bp, req_tx); 8364 return rc; 8365 } 8366 8367 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 8368 { 8369 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; 8370 struct hwrm_stat_ctx_free_input *req; 8371 int i; 8372 8373 if (!bp->bnapi) 8374 return; 8375 8376 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8377 return; 8378 8379 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) 8380 return; 8381 if (BNXT_FW_MAJ(bp) <= 20) { 8382 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { 8383 hwrm_req_drop(bp, req); 8384 return; 8385 } 8386 hwrm_req_hold(bp, req0); 8387 } 8388 hwrm_req_hold(bp, req); 8389 for (i = 0; i < bp->cp_nr_rings; i++) { 8390 struct bnxt_napi *bnapi = bp->bnapi[i]; 8391 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8392 8393 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 8394 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 8395 if (req0) { 8396 req0->stat_ctx_id = req->stat_ctx_id; 8397 hwrm_req_send(bp, req0); 8398 } 8399 hwrm_req_send(bp, req); 8400 8401 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 8402 } 8403 } 8404 hwrm_req_drop(bp, req); 8405 if (req0) 8406 hwrm_req_drop(bp, req0); 8407 } 8408 8409 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 8410 { 8411 struct hwrm_stat_ctx_alloc_output *resp; 8412 struct hwrm_stat_ctx_alloc_input *req; 8413 int rc, i; 8414 8415 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8416 return 0; 8417 8418 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); 8419 if (rc) 8420 return rc; 8421 8422 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 8423 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 8424 8425 resp = hwrm_req_hold(bp, req); 8426 for (i = 0; i < bp->cp_nr_rings; i++) { 8427 struct bnxt_napi *bnapi = bp->bnapi[i]; 8428 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8429 8430 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 8431 8432 rc = hwrm_req_send(bp, req); 8433 if (rc) 8434 break; 8435 8436 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 8437 8438 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 8439 } 8440 hwrm_req_drop(bp, req); 8441 return rc; 8442 } 8443 8444 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 8445 { 8446 struct hwrm_func_qcfg_output *resp; 8447 struct hwrm_func_qcfg_input *req; 8448 u16 flags; 8449 int rc; 8450 8451 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 8452 if (rc) 8453 return rc; 8454 8455 req->fid = cpu_to_le16(0xffff); 8456 resp = hwrm_req_hold(bp, req); 8457 rc = hwrm_req_send(bp, req); 8458 if (rc) 8459 goto func_qcfg_exit; 8460 8461 flags = le16_to_cpu(resp->flags); 8462 #ifdef CONFIG_BNXT_SRIOV 8463 if (BNXT_VF(bp)) { 8464 struct bnxt_vf_info *vf = &bp->vf; 8465 8466 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 8467 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF) 8468 vf->flags |= BNXT_VF_TRUST; 8469 else 8470 vf->flags &= ~BNXT_VF_TRUST; 8471 } else { 8472 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 8473 } 8474 #endif 8475 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 8476 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 8477 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 8478 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 8479 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 8480 } 8481 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 8482 bp->flags |= BNXT_FLAG_MULTI_HOST; 8483 8484 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 8485 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 8486 8487 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV) 8488 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV; 8489 8490 switch (resp->port_partition_type) { 8491 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 8492 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2: 8493 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 8494 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 8495 bp->port_partition_type = resp->port_partition_type; 8496 break; 8497 } 8498 if (bp->hwrm_spec_code < 0x10707 || 8499 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 8500 bp->br_mode = BRIDGE_MODE_VEB; 8501 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 8502 bp->br_mode = BRIDGE_MODE_VEPA; 8503 else 8504 bp->br_mode = BRIDGE_MODE_UNDEF; 8505 8506 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 8507 if (!bp->max_mtu) 8508 bp->max_mtu = BNXT_MAX_MTU; 8509 8510 if (bp->db_size) 8511 goto func_qcfg_exit; 8512 8513 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; 8514 if (BNXT_CHIP_P5(bp)) { 8515 if (BNXT_PF(bp)) 8516 bp->db_offset = DB_PF_OFFSET_P5; 8517 else 8518 bp->db_offset = DB_VF_OFFSET_P5; 8519 } 8520 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 8521 1024); 8522 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 8523 bp->db_size <= bp->db_offset) 8524 bp->db_size = pci_resource_len(bp->pdev, 2); 8525 8526 func_qcfg_exit: 8527 hwrm_req_drop(bp, req); 8528 return rc; 8529 } 8530 8531 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, 8532 u8 init_val, u8 init_offset, 8533 bool init_mask_set) 8534 { 8535 ctxm->init_value = init_val; 8536 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; 8537 if (init_mask_set) 8538 ctxm->init_offset = init_offset * 4; 8539 else 8540 ctxm->init_value = 0; 8541 } 8542 8543 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) 8544 { 8545 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8546 u16 type; 8547 8548 for (type = 0; type < ctx_max; type++) { 8549 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8550 int n = 1; 8551 8552 if (!ctxm->max_entries || ctxm->pg_info) 8553 continue; 8554 8555 if (ctxm->instance_bmap) 8556 n = hweight32(ctxm->instance_bmap); 8557 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); 8558 if (!ctxm->pg_info) 8559 return -ENOMEM; 8560 } 8561 return 0; 8562 } 8563 8564 static void bnxt_free_one_ctx_mem(struct bnxt *bp, 8565 struct bnxt_ctx_mem_type *ctxm, bool force); 8566 8567 #define BNXT_CTX_INIT_VALID(flags) \ 8568 (!!((flags) & \ 8569 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) 8570 8571 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) 8572 { 8573 struct hwrm_func_backing_store_qcaps_v2_output *resp; 8574 struct hwrm_func_backing_store_qcaps_v2_input *req; 8575 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8576 u16 type; 8577 int rc; 8578 8579 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); 8580 if (rc) 8581 return rc; 8582 8583 if (!ctx) { 8584 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8585 if (!ctx) 8586 return -ENOMEM; 8587 bp->ctx = ctx; 8588 } 8589 8590 resp = hwrm_req_hold(bp, req); 8591 8592 for (type = 0; type < BNXT_CTX_V2_MAX; ) { 8593 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8594 u8 init_val, init_off, i; 8595 u32 max_entries; 8596 u16 entry_size; 8597 __le32 *p; 8598 u32 flags; 8599 8600 req->type = cpu_to_le16(type); 8601 rc = hwrm_req_send(bp, req); 8602 if (rc) 8603 goto ctx_done; 8604 flags = le32_to_cpu(resp->flags); 8605 type = le16_to_cpu(resp->next_valid_type); 8606 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) { 8607 bnxt_free_one_ctx_mem(bp, ctxm, true); 8608 continue; 8609 } 8610 entry_size = le16_to_cpu(resp->entry_size); 8611 max_entries = le32_to_cpu(resp->max_num_entries); 8612 if (ctxm->mem_valid) { 8613 if (!(flags & BNXT_CTX_MEM_PERSIST) || 8614 ctxm->entry_size != entry_size || 8615 ctxm->max_entries != max_entries) 8616 bnxt_free_one_ctx_mem(bp, ctxm, true); 8617 else 8618 continue; 8619 } 8620 ctxm->type = le16_to_cpu(resp->type); 8621 ctxm->entry_size = entry_size; 8622 ctxm->flags = flags; 8623 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); 8624 ctxm->entry_multiple = resp->entry_multiple; 8625 ctxm->max_entries = max_entries; 8626 ctxm->min_entries = le32_to_cpu(resp->min_num_entries); 8627 init_val = resp->ctx_init_value; 8628 init_off = resp->ctx_init_offset; 8629 bnxt_init_ctx_initializer(ctxm, init_val, init_off, 8630 BNXT_CTX_INIT_VALID(flags)); 8631 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, 8632 BNXT_MAX_SPLIT_ENTRY); 8633 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; 8634 i++, p++) 8635 ctxm->split[i] = le32_to_cpu(*p); 8636 } 8637 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); 8638 8639 ctx_done: 8640 hwrm_req_drop(bp, req); 8641 return rc; 8642 } 8643 8644 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 8645 { 8646 struct hwrm_func_backing_store_qcaps_output *resp; 8647 struct hwrm_func_backing_store_qcaps_input *req; 8648 int rc; 8649 8650 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || 8651 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 8652 return 0; 8653 8654 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 8655 return bnxt_hwrm_func_backing_store_qcaps_v2(bp); 8656 8657 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); 8658 if (rc) 8659 return rc; 8660 8661 resp = hwrm_req_hold(bp, req); 8662 rc = hwrm_req_send_silent(bp, req); 8663 if (!rc) { 8664 struct bnxt_ctx_mem_type *ctxm; 8665 struct bnxt_ctx_mem_info *ctx; 8666 u8 init_val, init_idx = 0; 8667 u16 init_mask; 8668 8669 ctx = bp->ctx; 8670 if (!ctx) { 8671 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8672 if (!ctx) { 8673 rc = -ENOMEM; 8674 goto ctx_err; 8675 } 8676 bp->ctx = ctx; 8677 } 8678 init_val = resp->ctx_kind_initializer; 8679 init_mask = le16_to_cpu(resp->ctx_init_mask); 8680 8681 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8682 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); 8683 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 8684 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 8685 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); 8686 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); 8687 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, 8688 (init_mask & (1 << init_idx++)) != 0); 8689 8690 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8691 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 8692 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); 8693 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); 8694 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, 8695 (init_mask & (1 << init_idx++)) != 0); 8696 8697 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8698 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 8699 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); 8700 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); 8701 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, 8702 (init_mask & (1 << init_idx++)) != 0); 8703 8704 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8705 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); 8706 ctxm->max_entries = ctxm->vnic_entries + 8707 le16_to_cpu(resp->vnic_max_ring_table_entries); 8708 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); 8709 bnxt_init_ctx_initializer(ctxm, init_val, 8710 resp->vnic_init_offset, 8711 (init_mask & (1 << init_idx++)) != 0); 8712 8713 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8714 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); 8715 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); 8716 bnxt_init_ctx_initializer(ctxm, init_val, 8717 resp->stat_init_offset, 8718 (init_mask & (1 << init_idx++)) != 0); 8719 8720 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8721 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); 8722 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); 8723 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); 8724 ctxm->entry_multiple = resp->tqm_entries_multiple; 8725 if (!ctxm->entry_multiple) 8726 ctxm->entry_multiple = 1; 8727 8728 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); 8729 8730 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8731 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); 8732 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); 8733 ctxm->mrav_num_entries_units = 8734 le16_to_cpu(resp->mrav_num_entries_units); 8735 bnxt_init_ctx_initializer(ctxm, init_val, 8736 resp->mrav_init_offset, 8737 (init_mask & (1 << init_idx++)) != 0); 8738 8739 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8740 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); 8741 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); 8742 8743 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 8744 if (!ctx->tqm_fp_rings_count) 8745 ctx->tqm_fp_rings_count = bp->max_q; 8746 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 8747 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 8748 8749 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 8750 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); 8751 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; 8752 8753 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); 8754 } else { 8755 rc = 0; 8756 } 8757 ctx_err: 8758 hwrm_req_drop(bp, req); 8759 return rc; 8760 } 8761 8762 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 8763 __le64 *pg_dir) 8764 { 8765 if (!rmem->nr_pages) 8766 return; 8767 8768 BNXT_SET_CTX_PAGE_ATTR(*pg_attr); 8769 if (rmem->depth >= 1) { 8770 if (rmem->depth == 2) 8771 *pg_attr |= 2; 8772 else 8773 *pg_attr |= 1; 8774 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 8775 } else { 8776 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 8777 } 8778 } 8779 8780 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 8781 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 8782 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 8783 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 8784 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 8785 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 8786 8787 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 8788 { 8789 struct hwrm_func_backing_store_cfg_input *req; 8790 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8791 struct bnxt_ctx_pg_info *ctx_pg; 8792 struct bnxt_ctx_mem_type *ctxm; 8793 void **__req = (void **)&req; 8794 u32 req_len = sizeof(*req); 8795 __le32 *num_entries; 8796 __le64 *pg_dir; 8797 u32 flags = 0; 8798 u8 *pg_attr; 8799 u32 ena; 8800 int rc; 8801 int i; 8802 8803 if (!ctx) 8804 return 0; 8805 8806 if (req_len > bp->hwrm_max_ext_req_len) 8807 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; 8808 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); 8809 if (rc) 8810 return rc; 8811 8812 req->enables = cpu_to_le32(enables); 8813 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 8814 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8815 ctx_pg = ctxm->pg_info; 8816 req->qp_num_entries = cpu_to_le32(ctx_pg->entries); 8817 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); 8818 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); 8819 req->qp_entry_size = cpu_to_le16(ctxm->entry_size); 8820 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8821 &req->qpc_pg_size_qpc_lvl, 8822 &req->qpc_page_dir); 8823 8824 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD) 8825 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); 8826 } 8827 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 8828 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8829 ctx_pg = ctxm->pg_info; 8830 req->srq_num_entries = cpu_to_le32(ctx_pg->entries); 8831 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); 8832 req->srq_entry_size = cpu_to_le16(ctxm->entry_size); 8833 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8834 &req->srq_pg_size_srq_lvl, 8835 &req->srq_page_dir); 8836 } 8837 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 8838 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8839 ctx_pg = ctxm->pg_info; 8840 req->cq_num_entries = cpu_to_le32(ctx_pg->entries); 8841 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); 8842 req->cq_entry_size = cpu_to_le16(ctxm->entry_size); 8843 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8844 &req->cq_pg_size_cq_lvl, 8845 &req->cq_page_dir); 8846 } 8847 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 8848 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8849 ctx_pg = ctxm->pg_info; 8850 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); 8851 req->vnic_num_ring_table_entries = 8852 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); 8853 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); 8854 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8855 &req->vnic_pg_size_vnic_lvl, 8856 &req->vnic_page_dir); 8857 } 8858 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 8859 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8860 ctx_pg = ctxm->pg_info; 8861 req->stat_num_entries = cpu_to_le32(ctxm->max_entries); 8862 req->stat_entry_size = cpu_to_le16(ctxm->entry_size); 8863 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8864 &req->stat_pg_size_stat_lvl, 8865 &req->stat_page_dir); 8866 } 8867 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 8868 u32 units; 8869 8870 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8871 ctx_pg = ctxm->pg_info; 8872 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); 8873 units = ctxm->mrav_num_entries_units; 8874 if (units) { 8875 u32 num_mr, num_ah = ctxm->mrav_av_entries; 8876 u32 entries; 8877 8878 num_mr = ctx_pg->entries - num_ah; 8879 entries = ((num_mr / units) << 16) | (num_ah / units); 8880 req->mrav_num_entries = cpu_to_le32(entries); 8881 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 8882 } 8883 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); 8884 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8885 &req->mrav_pg_size_mrav_lvl, 8886 &req->mrav_page_dir); 8887 } 8888 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 8889 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8890 ctx_pg = ctxm->pg_info; 8891 req->tim_num_entries = cpu_to_le32(ctx_pg->entries); 8892 req->tim_entry_size = cpu_to_le16(ctxm->entry_size); 8893 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8894 &req->tim_pg_size_tim_lvl, 8895 &req->tim_page_dir); 8896 } 8897 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8898 for (i = 0, num_entries = &req->tqm_sp_num_entries, 8899 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, 8900 pg_dir = &req->tqm_sp_page_dir, 8901 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, 8902 ctx_pg = ctxm->pg_info; 8903 i < BNXT_MAX_TQM_RINGS; 8904 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], 8905 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 8906 if (!(enables & ena)) 8907 continue; 8908 8909 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); 8910 *num_entries = cpu_to_le32(ctx_pg->entries); 8911 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 8912 } 8913 req->flags = cpu_to_le32(flags); 8914 return hwrm_req_send(bp, req); 8915 } 8916 8917 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 8918 struct bnxt_ctx_pg_info *ctx_pg) 8919 { 8920 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8921 8922 rmem->page_size = BNXT_PAGE_SIZE; 8923 rmem->pg_arr = ctx_pg->ctx_pg_arr; 8924 rmem->dma_arr = ctx_pg->ctx_dma_arr; 8925 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 8926 if (rmem->depth >= 1) 8927 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 8928 return bnxt_alloc_ring(bp, rmem); 8929 } 8930 8931 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 8932 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 8933 u8 depth, struct bnxt_ctx_mem_type *ctxm) 8934 { 8935 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8936 int rc; 8937 8938 if (!mem_size) 8939 return -EINVAL; 8940 8941 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8942 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 8943 ctx_pg->nr_pages = 0; 8944 return -EINVAL; 8945 } 8946 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 8947 int nr_tbls, i; 8948 8949 rmem->depth = 2; 8950 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 8951 GFP_KERNEL); 8952 if (!ctx_pg->ctx_pg_tbl) 8953 return -ENOMEM; 8954 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 8955 rmem->nr_pages = nr_tbls; 8956 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8957 if (rc) 8958 return rc; 8959 for (i = 0; i < nr_tbls; i++) { 8960 struct bnxt_ctx_pg_info *pg_tbl; 8961 8962 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 8963 if (!pg_tbl) 8964 return -ENOMEM; 8965 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 8966 rmem = &pg_tbl->ring_mem; 8967 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 8968 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 8969 rmem->depth = 1; 8970 rmem->nr_pages = MAX_CTX_PAGES; 8971 rmem->ctx_mem = ctxm; 8972 if (i == (nr_tbls - 1)) { 8973 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 8974 8975 if (rem) 8976 rmem->nr_pages = rem; 8977 } 8978 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 8979 if (rc) 8980 break; 8981 } 8982 } else { 8983 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8984 if (rmem->nr_pages > 1 || depth) 8985 rmem->depth = 1; 8986 rmem->ctx_mem = ctxm; 8987 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8988 } 8989 return rc; 8990 } 8991 8992 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp, 8993 struct bnxt_ctx_pg_info *ctx_pg, 8994 void *buf, size_t offset, size_t head, 8995 size_t tail) 8996 { 8997 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8998 size_t nr_pages = ctx_pg->nr_pages; 8999 int page_size = rmem->page_size; 9000 size_t len = 0, total_len = 0; 9001 u16 depth = rmem->depth; 9002 9003 tail %= nr_pages * page_size; 9004 do { 9005 if (depth > 1) { 9006 int i = head / (page_size * MAX_CTX_PAGES); 9007 struct bnxt_ctx_pg_info *pg_tbl; 9008 9009 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 9010 rmem = &pg_tbl->ring_mem; 9011 } 9012 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail); 9013 head += len; 9014 offset += len; 9015 total_len += len; 9016 if (head >= nr_pages * page_size) 9017 head = 0; 9018 } while (head != tail); 9019 return total_len; 9020 } 9021 9022 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 9023 struct bnxt_ctx_pg_info *ctx_pg) 9024 { 9025 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 9026 9027 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 9028 ctx_pg->ctx_pg_tbl) { 9029 int i, nr_tbls = rmem->nr_pages; 9030 9031 for (i = 0; i < nr_tbls; i++) { 9032 struct bnxt_ctx_pg_info *pg_tbl; 9033 struct bnxt_ring_mem_info *rmem2; 9034 9035 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 9036 if (!pg_tbl) 9037 continue; 9038 rmem2 = &pg_tbl->ring_mem; 9039 bnxt_free_ring(bp, rmem2); 9040 ctx_pg->ctx_pg_arr[i] = NULL; 9041 kfree(pg_tbl); 9042 ctx_pg->ctx_pg_tbl[i] = NULL; 9043 } 9044 kfree(ctx_pg->ctx_pg_tbl); 9045 ctx_pg->ctx_pg_tbl = NULL; 9046 } 9047 bnxt_free_ring(bp, rmem); 9048 ctx_pg->nr_pages = 0; 9049 } 9050 9051 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, 9052 struct bnxt_ctx_mem_type *ctxm, u32 entries, 9053 u8 pg_lvl) 9054 { 9055 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 9056 int i, rc = 0, n = 1; 9057 u32 mem_size; 9058 9059 if (!ctxm->entry_size || !ctx_pg) 9060 return -EINVAL; 9061 if (ctxm->instance_bmap) 9062 n = hweight32(ctxm->instance_bmap); 9063 if (ctxm->entry_multiple) 9064 entries = roundup(entries, ctxm->entry_multiple); 9065 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); 9066 mem_size = entries * ctxm->entry_size; 9067 for (i = 0; i < n && !rc; i++) { 9068 ctx_pg[i].entries = entries; 9069 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, 9070 ctxm->init_value ? ctxm : NULL); 9071 } 9072 if (!rc) 9073 ctxm->mem_valid = 1; 9074 return rc; 9075 } 9076 9077 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, 9078 struct bnxt_ctx_mem_type *ctxm, 9079 bool last) 9080 { 9081 struct hwrm_func_backing_store_cfg_v2_input *req; 9082 u32 instance_bmap = ctxm->instance_bmap; 9083 int i, j, rc = 0, n = 1; 9084 __le32 *p; 9085 9086 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) 9087 return 0; 9088 9089 if (instance_bmap) 9090 n = hweight32(ctxm->instance_bmap); 9091 else 9092 instance_bmap = 1; 9093 9094 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); 9095 if (rc) 9096 return rc; 9097 hwrm_req_hold(bp, req); 9098 req->type = cpu_to_le16(ctxm->type); 9099 req->entry_size = cpu_to_le16(ctxm->entry_size); 9100 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) && 9101 bnxt_bs_trace_avail(bp, ctxm->type)) { 9102 struct bnxt_bs_trace_info *bs_trace; 9103 u32 enables; 9104 9105 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET; 9106 req->enables = cpu_to_le32(enables); 9107 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]]; 9108 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset); 9109 } 9110 req->subtype_valid_cnt = ctxm->split_entry_cnt; 9111 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) 9112 p[i] = cpu_to_le32(ctxm->split[i]); 9113 for (i = 0, j = 0; j < n && !rc; i++) { 9114 struct bnxt_ctx_pg_info *ctx_pg; 9115 9116 if (!(instance_bmap & (1 << i))) 9117 continue; 9118 req->instance = cpu_to_le16(i); 9119 ctx_pg = &ctxm->pg_info[j++]; 9120 if (!ctx_pg->entries) 9121 continue; 9122 req->num_entries = cpu_to_le32(ctx_pg->entries); 9123 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 9124 &req->page_size_pbl_level, 9125 &req->page_dir); 9126 if (last && j == n) 9127 req->flags = 9128 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); 9129 rc = hwrm_req_send(bp, req); 9130 } 9131 hwrm_req_drop(bp, req); 9132 return rc; 9133 } 9134 9135 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) 9136 { 9137 struct bnxt_ctx_mem_info *ctx = bp->ctx; 9138 struct bnxt_ctx_mem_type *ctxm; 9139 u16 last_type = BNXT_CTX_INV; 9140 int rc = 0; 9141 u16 type; 9142 9143 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) { 9144 ctxm = &ctx->ctx_arr[type]; 9145 if (!bnxt_bs_trace_avail(bp, type)) 9146 continue; 9147 if (!ctxm->mem_valid) { 9148 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, 9149 ctxm->max_entries, 1); 9150 if (rc) { 9151 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n", 9152 type); 9153 continue; 9154 } 9155 bnxt_bs_trace_init(bp, ctxm); 9156 } 9157 last_type = type; 9158 } 9159 9160 if (last_type == BNXT_CTX_INV) { 9161 if (!ena) 9162 return 0; 9163 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) 9164 last_type = BNXT_CTX_MAX - 1; 9165 else 9166 last_type = BNXT_CTX_L2_MAX - 1; 9167 } 9168 ctx->ctx_arr[last_type].last = 1; 9169 9170 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { 9171 ctxm = &ctx->ctx_arr[type]; 9172 9173 if (!ctxm->mem_valid) 9174 continue; 9175 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); 9176 if (rc) 9177 return rc; 9178 } 9179 return 0; 9180 } 9181 9182 /** 9183 * __bnxt_copy_ctx_mem - copy host context memory 9184 * @bp: The driver context 9185 * @ctxm: The pointer to the context memory type 9186 * @buf: The destination buffer or NULL to just obtain the length 9187 * @offset: The buffer offset to copy the data to 9188 * @head: The head offset of context memory to copy from 9189 * @tail: The tail offset (last byte + 1) of context memory to end the copy 9190 * 9191 * This function is called for debugging purposes to dump the host context 9192 * used by the chip. 9193 * 9194 * Return: Length of memory copied 9195 */ 9196 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp, 9197 struct bnxt_ctx_mem_type *ctxm, void *buf, 9198 size_t offset, size_t head, size_t tail) 9199 { 9200 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 9201 size_t len = 0, total_len = 0; 9202 int i, n = 1; 9203 9204 if (!ctx_pg) 9205 return 0; 9206 9207 if (ctxm->instance_bmap) 9208 n = hweight32(ctxm->instance_bmap); 9209 for (i = 0; i < n; i++) { 9210 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head, 9211 tail); 9212 offset += len; 9213 total_len += len; 9214 } 9215 return total_len; 9216 } 9217 9218 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm, 9219 void *buf, size_t offset) 9220 { 9221 size_t tail = ctxm->max_entries * ctxm->entry_size; 9222 9223 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail); 9224 } 9225 9226 static void bnxt_free_one_ctx_mem(struct bnxt *bp, 9227 struct bnxt_ctx_mem_type *ctxm, bool force) 9228 { 9229 struct bnxt_ctx_pg_info *ctx_pg; 9230 int i, n = 1; 9231 9232 ctxm->last = 0; 9233 9234 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST)) 9235 return; 9236 9237 ctx_pg = ctxm->pg_info; 9238 if (ctx_pg) { 9239 if (ctxm->instance_bmap) 9240 n = hweight32(ctxm->instance_bmap); 9241 for (i = 0; i < n; i++) 9242 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); 9243 9244 kfree(ctx_pg); 9245 ctxm->pg_info = NULL; 9246 ctxm->mem_valid = 0; 9247 } 9248 memset(ctxm, 0, sizeof(*ctxm)); 9249 } 9250 9251 void bnxt_free_ctx_mem(struct bnxt *bp, bool force) 9252 { 9253 struct bnxt_ctx_mem_info *ctx = bp->ctx; 9254 u16 type; 9255 9256 if (!ctx) 9257 return; 9258 9259 for (type = 0; type < BNXT_CTX_V2_MAX; type++) 9260 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force); 9261 9262 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 9263 if (force) { 9264 kfree(ctx); 9265 bp->ctx = NULL; 9266 } 9267 } 9268 9269 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 9270 { 9271 struct bnxt_ctx_mem_type *ctxm; 9272 struct bnxt_ctx_mem_info *ctx; 9273 u32 l2_qps, qp1_qps, max_qps; 9274 u32 ena, entries_sp, entries; 9275 u32 srqs, max_srqs, min; 9276 u32 num_mr, num_ah; 9277 u32 extra_srqs = 0; 9278 u32 extra_qps = 0; 9279 u32 fast_qpmd_qps; 9280 u8 pg_lvl = 1; 9281 int i, rc; 9282 9283 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 9284 if (rc) { 9285 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 9286 rc); 9287 return rc; 9288 } 9289 ctx = bp->ctx; 9290 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 9291 return 0; 9292 9293 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 9294 l2_qps = ctxm->qp_l2_entries; 9295 qp1_qps = ctxm->qp_qp1_entries; 9296 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; 9297 max_qps = ctxm->max_entries; 9298 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 9299 srqs = ctxm->srq_l2_entries; 9300 max_srqs = ctxm->max_entries; 9301 ena = 0; 9302 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 9303 pg_lvl = 2; 9304 if (BNXT_SW_RES_LMT(bp)) { 9305 extra_qps = max_qps - l2_qps - qp1_qps; 9306 extra_srqs = max_srqs - srqs; 9307 } else { 9308 extra_qps = min_t(u32, 65536, 9309 max_qps - l2_qps - qp1_qps); 9310 /* allocate extra qps if fw supports RoCE fast qp 9311 * destroy feature 9312 */ 9313 extra_qps += fast_qpmd_qps; 9314 extra_srqs = min_t(u32, 8192, max_srqs - srqs); 9315 } 9316 if (fast_qpmd_qps) 9317 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; 9318 } 9319 9320 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 9321 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 9322 pg_lvl); 9323 if (rc) 9324 return rc; 9325 9326 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 9327 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); 9328 if (rc) 9329 return rc; 9330 9331 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 9332 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + 9333 extra_qps * 2, pg_lvl); 9334 if (rc) 9335 return rc; 9336 9337 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 9338 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 9339 if (rc) 9340 return rc; 9341 9342 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 9343 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 9344 if (rc) 9345 return rc; 9346 9347 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 9348 goto skip_rdma; 9349 9350 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 9351 if (BNXT_SW_RES_LMT(bp) && 9352 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) { 9353 num_ah = ctxm->mrav_av_entries; 9354 num_mr = ctxm->max_entries - num_ah; 9355 } else { 9356 /* 128K extra is needed to accommodate static AH context 9357 * allocation by f/w. 9358 */ 9359 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); 9360 num_ah = min_t(u32, num_mr, 1024 * 128); 9361 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; 9362 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) 9363 ctxm->mrav_av_entries = num_ah; 9364 } 9365 9366 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); 9367 if (rc) 9368 return rc; 9369 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 9370 9371 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 9372 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); 9373 if (rc) 9374 return rc; 9375 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 9376 9377 skip_rdma: 9378 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 9379 min = ctxm->min_entries; 9380 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + 9381 2 * (extra_qps + qp1_qps) + min; 9382 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); 9383 if (rc) 9384 return rc; 9385 9386 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 9387 entries = l2_qps + 2 * (extra_qps + qp1_qps); 9388 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); 9389 if (rc) 9390 return rc; 9391 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 9392 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 9393 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 9394 9395 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 9396 rc = bnxt_backing_store_cfg_v2(bp, ena); 9397 else 9398 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 9399 if (rc) { 9400 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 9401 rc); 9402 return rc; 9403 } 9404 ctx->flags |= BNXT_CTX_FLAG_INITED; 9405 return 0; 9406 } 9407 9408 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp) 9409 { 9410 struct hwrm_dbg_crashdump_medium_cfg_input *req; 9411 u16 page_attr; 9412 int rc; 9413 9414 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) 9415 return 0; 9416 9417 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG); 9418 if (rc) 9419 return rc; 9420 9421 if (BNXT_PAGE_SIZE == 0x2000) 9422 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K; 9423 else if (BNXT_PAGE_SIZE == 0x10000) 9424 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K; 9425 else 9426 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K; 9427 req->pg_size_lvl = cpu_to_le16(page_attr | 9428 bp->fw_crash_mem->ring_mem.depth); 9429 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map); 9430 req->size = cpu_to_le32(bp->fw_crash_len); 9431 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR); 9432 return hwrm_req_send(bp, req); 9433 } 9434 9435 static void bnxt_free_crash_dump_mem(struct bnxt *bp) 9436 { 9437 if (bp->fw_crash_mem) { 9438 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); 9439 kfree(bp->fw_crash_mem); 9440 bp->fw_crash_mem = NULL; 9441 } 9442 } 9443 9444 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp) 9445 { 9446 u32 mem_size = 0; 9447 int rc; 9448 9449 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) 9450 return 0; 9451 9452 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size); 9453 if (rc) 9454 return rc; 9455 9456 mem_size = round_up(mem_size, 4); 9457 9458 /* keep and use the existing pages */ 9459 if (bp->fw_crash_mem && 9460 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE) 9461 goto alloc_done; 9462 9463 if (bp->fw_crash_mem) 9464 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); 9465 else 9466 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem), 9467 GFP_KERNEL); 9468 if (!bp->fw_crash_mem) 9469 return -ENOMEM; 9470 9471 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL); 9472 if (rc) { 9473 bnxt_free_crash_dump_mem(bp); 9474 return rc; 9475 } 9476 9477 alloc_done: 9478 bp->fw_crash_len = mem_size; 9479 return 0; 9480 } 9481 9482 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 9483 { 9484 struct hwrm_func_resource_qcaps_output *resp; 9485 struct hwrm_func_resource_qcaps_input *req; 9486 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9487 int rc; 9488 9489 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); 9490 if (rc) 9491 return rc; 9492 9493 req->fid = cpu_to_le16(0xffff); 9494 resp = hwrm_req_hold(bp, req); 9495 rc = hwrm_req_send_silent(bp, req); 9496 if (rc) 9497 goto hwrm_func_resc_qcaps_exit; 9498 9499 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 9500 if (!all) 9501 goto hwrm_func_resc_qcaps_exit; 9502 9503 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 9504 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 9505 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 9506 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 9507 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 9508 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 9509 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 9510 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 9511 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 9512 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 9513 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 9514 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 9515 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 9516 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 9517 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 9518 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 9519 9520 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 9521 u16 max_msix = le16_to_cpu(resp->max_msix); 9522 9523 hw_resc->max_nqs = max_msix; 9524 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 9525 } 9526 9527 if (BNXT_PF(bp)) { 9528 struct bnxt_pf_info *pf = &bp->pf; 9529 9530 pf->vf_resv_strategy = 9531 le16_to_cpu(resp->vf_reservation_strategy); 9532 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 9533 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 9534 } 9535 hwrm_func_resc_qcaps_exit: 9536 hwrm_req_drop(bp, req); 9537 return rc; 9538 } 9539 9540 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) 9541 { 9542 struct hwrm_port_mac_ptp_qcfg_output *resp; 9543 struct hwrm_port_mac_ptp_qcfg_input *req; 9544 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 9545 u8 flags; 9546 int rc; 9547 9548 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { 9549 rc = -ENODEV; 9550 goto no_ptp; 9551 } 9552 9553 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); 9554 if (rc) 9555 goto no_ptp; 9556 9557 req->port_id = cpu_to_le16(bp->pf.port_id); 9558 resp = hwrm_req_hold(bp, req); 9559 rc = hwrm_req_send(bp, req); 9560 if (rc) 9561 goto exit; 9562 9563 flags = resp->flags; 9564 if (BNXT_CHIP_P5_AND_MINUS(bp) && 9565 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { 9566 rc = -ENODEV; 9567 goto exit; 9568 } 9569 if (!ptp) { 9570 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 9571 if (!ptp) { 9572 rc = -ENOMEM; 9573 goto exit; 9574 } 9575 ptp->bp = bp; 9576 bp->ptp_cfg = ptp; 9577 } 9578 9579 if (flags & 9580 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK | 9581 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) { 9582 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); 9583 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); 9584 } else if (BNXT_CHIP_P5(bp)) { 9585 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; 9586 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; 9587 } else { 9588 rc = -ENODEV; 9589 goto exit; 9590 } 9591 ptp->rtc_configured = 9592 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; 9593 rc = bnxt_ptp_init(bp); 9594 if (rc) 9595 netdev_warn(bp->dev, "PTP initialization failed.\n"); 9596 exit: 9597 hwrm_req_drop(bp, req); 9598 if (!rc) 9599 return 0; 9600 9601 no_ptp: 9602 bnxt_ptp_clear(bp); 9603 kfree(ptp); 9604 bp->ptp_cfg = NULL; 9605 return rc; 9606 } 9607 9608 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 9609 { 9610 struct hwrm_func_qcaps_output *resp; 9611 struct hwrm_func_qcaps_input *req; 9612 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9613 u32 flags, flags_ext, flags_ext2; 9614 int rc; 9615 9616 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); 9617 if (rc) 9618 return rc; 9619 9620 req->fid = cpu_to_le16(0xffff); 9621 resp = hwrm_req_hold(bp, req); 9622 rc = hwrm_req_send(bp, req); 9623 if (rc) 9624 goto hwrm_func_qcaps_exit; 9625 9626 flags = le32_to_cpu(resp->flags); 9627 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 9628 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 9629 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 9630 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 9631 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 9632 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 9633 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 9634 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 9635 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 9636 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 9637 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 9638 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 9639 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 9640 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 9641 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 9642 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 9643 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) 9644 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; 9645 9646 flags_ext = le32_to_cpu(resp->flags_ext); 9647 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 9648 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 9649 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) 9650 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; 9651 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) 9652 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; 9653 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) 9654 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; 9655 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) 9656 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; 9657 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED) 9658 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2; 9659 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED)) 9660 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP; 9661 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) 9662 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; 9663 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP) 9664 bp->flags |= BNXT_FLAG_TX_COAL_CMPL; 9665 9666 flags_ext2 = le32_to_cpu(resp->flags_ext2); 9667 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) 9668 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; 9669 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) 9670 bp->flags |= BNXT_FLAG_UDP_GSO_CAP; 9671 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) 9672 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; 9673 if (flags_ext2 & 9674 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED) 9675 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS; 9676 if (BNXT_PF(bp) && 9677 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED)) 9678 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED; 9679 9680 bp->tx_push_thresh = 0; 9681 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 9682 BNXT_FW_MAJ(bp) > 217) 9683 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 9684 9685 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 9686 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 9687 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 9688 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 9689 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 9690 if (!hw_resc->max_hw_ring_grps) 9691 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 9692 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 9693 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 9694 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 9695 9696 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records); 9697 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records); 9698 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 9699 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 9700 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 9701 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 9702 9703 if (BNXT_PF(bp)) { 9704 struct bnxt_pf_info *pf = &bp->pf; 9705 9706 pf->fw_fid = le16_to_cpu(resp->fid); 9707 pf->port_id = le16_to_cpu(resp->port_id); 9708 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 9709 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 9710 pf->max_vfs = le16_to_cpu(resp->max_vfs); 9711 bp->flags &= ~BNXT_FLAG_WOL_CAP; 9712 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 9713 bp->flags |= BNXT_FLAG_WOL_CAP; 9714 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { 9715 bp->fw_cap |= BNXT_FW_CAP_PTP; 9716 } else { 9717 bnxt_ptp_clear(bp); 9718 kfree(bp->ptp_cfg); 9719 bp->ptp_cfg = NULL; 9720 } 9721 } else { 9722 #ifdef CONFIG_BNXT_SRIOV 9723 struct bnxt_vf_info *vf = &bp->vf; 9724 9725 vf->fw_fid = le16_to_cpu(resp->fid); 9726 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 9727 #endif 9728 } 9729 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs); 9730 9731 hwrm_func_qcaps_exit: 9732 hwrm_req_drop(bp, req); 9733 return rc; 9734 } 9735 9736 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) 9737 { 9738 struct hwrm_dbg_qcaps_output *resp; 9739 struct hwrm_dbg_qcaps_input *req; 9740 int rc; 9741 9742 bp->fw_dbg_cap = 0; 9743 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) 9744 return; 9745 9746 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); 9747 if (rc) 9748 return; 9749 9750 req->fid = cpu_to_le16(0xffff); 9751 resp = hwrm_req_hold(bp, req); 9752 rc = hwrm_req_send(bp, req); 9753 if (rc) 9754 goto hwrm_dbg_qcaps_exit; 9755 9756 bp->fw_dbg_cap = le32_to_cpu(resp->flags); 9757 9758 hwrm_dbg_qcaps_exit: 9759 hwrm_req_drop(bp, req); 9760 } 9761 9762 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 9763 9764 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 9765 { 9766 int rc; 9767 9768 rc = __bnxt_hwrm_func_qcaps(bp); 9769 if (rc) 9770 return rc; 9771 9772 bnxt_hwrm_dbg_qcaps(bp); 9773 9774 rc = bnxt_hwrm_queue_qportcfg(bp); 9775 if (rc) { 9776 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 9777 return rc; 9778 } 9779 if (bp->hwrm_spec_code >= 0x10803) { 9780 rc = bnxt_alloc_ctx_mem(bp); 9781 if (rc) 9782 return rc; 9783 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 9784 if (!rc) 9785 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 9786 } 9787 return 0; 9788 } 9789 9790 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 9791 { 9792 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 9793 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; 9794 u32 flags; 9795 int rc; 9796 9797 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 9798 return 0; 9799 9800 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); 9801 if (rc) 9802 return rc; 9803 9804 resp = hwrm_req_hold(bp, req); 9805 rc = hwrm_req_send(bp, req); 9806 if (rc) 9807 goto hwrm_cfa_adv_qcaps_exit; 9808 9809 flags = le32_to_cpu(resp->flags); 9810 if (flags & 9811 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 9812 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 9813 9814 if (flags & 9815 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) 9816 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; 9817 9818 if (flags & 9819 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) 9820 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; 9821 9822 hwrm_cfa_adv_qcaps_exit: 9823 hwrm_req_drop(bp, req); 9824 return rc; 9825 } 9826 9827 static int __bnxt_alloc_fw_health(struct bnxt *bp) 9828 { 9829 if (bp->fw_health) 9830 return 0; 9831 9832 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 9833 if (!bp->fw_health) 9834 return -ENOMEM; 9835 9836 mutex_init(&bp->fw_health->lock); 9837 return 0; 9838 } 9839 9840 static int bnxt_alloc_fw_health(struct bnxt *bp) 9841 { 9842 int rc; 9843 9844 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 9845 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9846 return 0; 9847 9848 rc = __bnxt_alloc_fw_health(bp); 9849 if (rc) { 9850 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 9851 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9852 return rc; 9853 } 9854 9855 return 0; 9856 } 9857 9858 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 9859 { 9860 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 9861 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 9862 BNXT_FW_HEALTH_WIN_MAP_OFF); 9863 } 9864 9865 static void bnxt_inv_fw_health_reg(struct bnxt *bp) 9866 { 9867 struct bnxt_fw_health *fw_health = bp->fw_health; 9868 u32 reg_type; 9869 9870 if (!fw_health) 9871 return; 9872 9873 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); 9874 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9875 fw_health->status_reliable = false; 9876 9877 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); 9878 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9879 fw_health->resets_reliable = false; 9880 } 9881 9882 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 9883 { 9884 void __iomem *hs; 9885 u32 status_loc; 9886 u32 reg_type; 9887 u32 sig; 9888 9889 if (bp->fw_health) 9890 bp->fw_health->status_reliable = false; 9891 9892 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 9893 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 9894 9895 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 9896 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 9897 if (!bp->chip_num) { 9898 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); 9899 bp->chip_num = readl(bp->bar0 + 9900 BNXT_FW_HEALTH_WIN_BASE + 9901 BNXT_GRC_REG_CHIP_NUM); 9902 } 9903 if (!BNXT_CHIP_P5_PLUS(bp)) 9904 return; 9905 9906 status_loc = BNXT_GRC_REG_STATUS_P5 | 9907 BNXT_FW_HEALTH_REG_TYPE_BAR0; 9908 } else { 9909 status_loc = readl(hs + offsetof(struct hcomm_status, 9910 fw_status_loc)); 9911 } 9912 9913 if (__bnxt_alloc_fw_health(bp)) { 9914 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 9915 return; 9916 } 9917 9918 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 9919 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 9920 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 9921 __bnxt_map_fw_health_reg(bp, status_loc); 9922 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 9923 BNXT_FW_HEALTH_WIN_OFF(status_loc); 9924 } 9925 9926 bp->fw_health->status_reliable = true; 9927 } 9928 9929 static int bnxt_map_fw_health_regs(struct bnxt *bp) 9930 { 9931 struct bnxt_fw_health *fw_health = bp->fw_health; 9932 u32 reg_base = 0xffffffff; 9933 int i; 9934 9935 bp->fw_health->status_reliable = false; 9936 bp->fw_health->resets_reliable = false; 9937 /* Only pre-map the monitoring GRC registers using window 3 */ 9938 for (i = 0; i < 4; i++) { 9939 u32 reg = fw_health->regs[i]; 9940 9941 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 9942 continue; 9943 if (reg_base == 0xffffffff) 9944 reg_base = reg & BNXT_GRC_BASE_MASK; 9945 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 9946 return -ERANGE; 9947 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 9948 } 9949 bp->fw_health->status_reliable = true; 9950 bp->fw_health->resets_reliable = true; 9951 if (reg_base == 0xffffffff) 9952 return 0; 9953 9954 __bnxt_map_fw_health_reg(bp, reg_base); 9955 return 0; 9956 } 9957 9958 static void bnxt_remap_fw_health_regs(struct bnxt *bp) 9959 { 9960 if (!bp->fw_health) 9961 return; 9962 9963 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { 9964 bp->fw_health->status_reliable = true; 9965 bp->fw_health->resets_reliable = true; 9966 } else { 9967 bnxt_try_map_fw_health_reg(bp); 9968 } 9969 } 9970 9971 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 9972 { 9973 struct bnxt_fw_health *fw_health = bp->fw_health; 9974 struct hwrm_error_recovery_qcfg_output *resp; 9975 struct hwrm_error_recovery_qcfg_input *req; 9976 int rc, i; 9977 9978 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9979 return 0; 9980 9981 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); 9982 if (rc) 9983 return rc; 9984 9985 resp = hwrm_req_hold(bp, req); 9986 rc = hwrm_req_send(bp, req); 9987 if (rc) 9988 goto err_recovery_out; 9989 fw_health->flags = le32_to_cpu(resp->flags); 9990 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 9991 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 9992 rc = -EINVAL; 9993 goto err_recovery_out; 9994 } 9995 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 9996 fw_health->master_func_wait_dsecs = 9997 le32_to_cpu(resp->master_func_wait_period); 9998 fw_health->normal_func_wait_dsecs = 9999 le32_to_cpu(resp->normal_func_wait_period); 10000 fw_health->post_reset_wait_dsecs = 10001 le32_to_cpu(resp->master_func_wait_period_after_reset); 10002 fw_health->post_reset_max_wait_dsecs = 10003 le32_to_cpu(resp->max_bailout_time_after_reset); 10004 fw_health->regs[BNXT_FW_HEALTH_REG] = 10005 le32_to_cpu(resp->fw_health_status_reg); 10006 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 10007 le32_to_cpu(resp->fw_heartbeat_reg); 10008 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 10009 le32_to_cpu(resp->fw_reset_cnt_reg); 10010 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 10011 le32_to_cpu(resp->reset_inprogress_reg); 10012 fw_health->fw_reset_inprog_reg_mask = 10013 le32_to_cpu(resp->reset_inprogress_reg_mask); 10014 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 10015 if (fw_health->fw_reset_seq_cnt >= 16) { 10016 rc = -EINVAL; 10017 goto err_recovery_out; 10018 } 10019 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 10020 fw_health->fw_reset_seq_regs[i] = 10021 le32_to_cpu(resp->reset_reg[i]); 10022 fw_health->fw_reset_seq_vals[i] = 10023 le32_to_cpu(resp->reset_reg_val[i]); 10024 fw_health->fw_reset_seq_delay_msec[i] = 10025 resp->delay_after_reset[i]; 10026 } 10027 err_recovery_out: 10028 hwrm_req_drop(bp, req); 10029 if (!rc) 10030 rc = bnxt_map_fw_health_regs(bp); 10031 if (rc) 10032 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 10033 return rc; 10034 } 10035 10036 static int bnxt_hwrm_func_reset(struct bnxt *bp) 10037 { 10038 struct hwrm_func_reset_input *req; 10039 int rc; 10040 10041 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); 10042 if (rc) 10043 return rc; 10044 10045 req->enables = 0; 10046 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); 10047 return hwrm_req_send(bp, req); 10048 } 10049 10050 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 10051 { 10052 struct hwrm_nvm_get_dev_info_output nvm_info; 10053 10054 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 10055 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 10056 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 10057 nvm_info.nvm_cfg_ver_upd); 10058 } 10059 10060 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 10061 { 10062 struct hwrm_queue_qportcfg_output *resp; 10063 struct hwrm_queue_qportcfg_input *req; 10064 u8 i, j, *qptr; 10065 bool no_rdma; 10066 int rc = 0; 10067 10068 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); 10069 if (rc) 10070 return rc; 10071 10072 resp = hwrm_req_hold(bp, req); 10073 rc = hwrm_req_send(bp, req); 10074 if (rc) 10075 goto qportcfg_exit; 10076 10077 if (!resp->max_configurable_queues) { 10078 rc = -EINVAL; 10079 goto qportcfg_exit; 10080 } 10081 bp->max_tc = resp->max_configurable_queues; 10082 bp->max_lltc = resp->max_configurable_lossless_queues; 10083 if (bp->max_tc > BNXT_MAX_QUEUE) 10084 bp->max_tc = BNXT_MAX_QUEUE; 10085 10086 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 10087 qptr = &resp->queue_id0; 10088 for (i = 0, j = 0; i < bp->max_tc; i++) { 10089 bp->q_info[j].queue_id = *qptr; 10090 bp->q_ids[i] = *qptr++; 10091 bp->q_info[j].queue_profile = *qptr++; 10092 bp->tc_to_qidx[j] = j; 10093 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 10094 (no_rdma && BNXT_PF(bp))) 10095 j++; 10096 } 10097 bp->max_q = bp->max_tc; 10098 bp->max_tc = max_t(u8, j, 1); 10099 10100 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 10101 bp->max_tc = 1; 10102 10103 if (bp->max_lltc > bp->max_tc) 10104 bp->max_lltc = bp->max_tc; 10105 10106 qportcfg_exit: 10107 hwrm_req_drop(bp, req); 10108 return rc; 10109 } 10110 10111 static int bnxt_hwrm_poll(struct bnxt *bp) 10112 { 10113 struct hwrm_ver_get_input *req; 10114 int rc; 10115 10116 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 10117 if (rc) 10118 return rc; 10119 10120 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 10121 req->hwrm_intf_min = HWRM_VERSION_MINOR; 10122 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 10123 10124 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); 10125 rc = hwrm_req_send(bp, req); 10126 return rc; 10127 } 10128 10129 static int bnxt_hwrm_ver_get(struct bnxt *bp) 10130 { 10131 struct hwrm_ver_get_output *resp; 10132 struct hwrm_ver_get_input *req; 10133 u16 fw_maj, fw_min, fw_bld, fw_rsv; 10134 u32 dev_caps_cfg, hwrm_ver; 10135 int rc, len, max_tmo_secs; 10136 10137 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 10138 if (rc) 10139 return rc; 10140 10141 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 10142 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 10143 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 10144 req->hwrm_intf_min = HWRM_VERSION_MINOR; 10145 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 10146 10147 resp = hwrm_req_hold(bp, req); 10148 rc = hwrm_req_send(bp, req); 10149 if (rc) 10150 goto hwrm_ver_get_exit; 10151 10152 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 10153 10154 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 10155 resp->hwrm_intf_min_8b << 8 | 10156 resp->hwrm_intf_upd_8b; 10157 if (resp->hwrm_intf_maj_8b < 1) { 10158 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 10159 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 10160 resp->hwrm_intf_upd_8b); 10161 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 10162 } 10163 10164 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 10165 HWRM_VERSION_UPDATE; 10166 10167 if (bp->hwrm_spec_code > hwrm_ver) 10168 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 10169 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 10170 HWRM_VERSION_UPDATE); 10171 else 10172 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 10173 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 10174 resp->hwrm_intf_upd_8b); 10175 10176 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 10177 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 10178 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 10179 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 10180 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 10181 len = FW_VER_STR_LEN; 10182 } else { 10183 fw_maj = resp->hwrm_fw_maj_8b; 10184 fw_min = resp->hwrm_fw_min_8b; 10185 fw_bld = resp->hwrm_fw_bld_8b; 10186 fw_rsv = resp->hwrm_fw_rsvd_8b; 10187 len = BC_HWRM_STR_LEN; 10188 } 10189 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 10190 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 10191 fw_rsv); 10192 10193 if (strlen(resp->active_pkg_name)) { 10194 int fw_ver_len = strlen(bp->fw_ver_str); 10195 10196 snprintf(bp->fw_ver_str + fw_ver_len, 10197 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 10198 resp->active_pkg_name); 10199 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 10200 } 10201 10202 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 10203 if (!bp->hwrm_cmd_timeout) 10204 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 10205 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; 10206 if (!bp->hwrm_cmd_max_timeout) 10207 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; 10208 max_tmo_secs = bp->hwrm_cmd_max_timeout / 1000; 10209 #ifdef CONFIG_DETECT_HUNG_TASK 10210 if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT || 10211 max_tmo_secs > CONFIG_DEFAULT_HUNG_TASK_TIMEOUT) { 10212 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog (kernel default %ds)\n", 10213 max_tmo_secs, CONFIG_DEFAULT_HUNG_TASK_TIMEOUT); 10214 } 10215 #endif 10216 10217 if (resp->hwrm_intf_maj_8b >= 1) { 10218 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 10219 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 10220 } 10221 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 10222 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 10223 10224 bp->chip_num = le16_to_cpu(resp->chip_num); 10225 bp->chip_rev = resp->chip_rev; 10226 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 10227 !resp->chip_metal) 10228 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 10229 10230 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 10231 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 10232 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 10233 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 10234 10235 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 10236 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 10237 10238 if (dev_caps_cfg & 10239 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 10240 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 10241 10242 if (dev_caps_cfg & 10243 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 10244 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 10245 10246 if (dev_caps_cfg & 10247 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 10248 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 10249 10250 hwrm_ver_get_exit: 10251 hwrm_req_drop(bp, req); 10252 return rc; 10253 } 10254 10255 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 10256 { 10257 struct hwrm_fw_set_time_input *req; 10258 struct tm tm; 10259 time64_t now = ktime_get_real_seconds(); 10260 int rc; 10261 10262 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 10263 bp->hwrm_spec_code < 0x10400) 10264 return -EOPNOTSUPP; 10265 10266 time64_to_tm(now, 0, &tm); 10267 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); 10268 if (rc) 10269 return rc; 10270 10271 req->year = cpu_to_le16(1900 + tm.tm_year); 10272 req->month = 1 + tm.tm_mon; 10273 req->day = tm.tm_mday; 10274 req->hour = tm.tm_hour; 10275 req->minute = tm.tm_min; 10276 req->second = tm.tm_sec; 10277 return hwrm_req_send(bp, req); 10278 } 10279 10280 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 10281 { 10282 u64 sw_tmp; 10283 10284 hw &= mask; 10285 sw_tmp = (*sw & ~mask) | hw; 10286 if (hw < (*sw & mask)) 10287 sw_tmp += mask + 1; 10288 WRITE_ONCE(*sw, sw_tmp); 10289 } 10290 10291 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 10292 int count, bool ignore_zero) 10293 { 10294 int i; 10295 10296 for (i = 0; i < count; i++) { 10297 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 10298 10299 if (ignore_zero && !hw) 10300 continue; 10301 10302 if (masks[i] == -1ULL) 10303 sw_stats[i] = hw; 10304 else 10305 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 10306 } 10307 } 10308 10309 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 10310 { 10311 if (!stats->hw_stats) 10312 return; 10313 10314 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 10315 stats->hw_masks, stats->len / 8, false); 10316 } 10317 10318 static void bnxt_accumulate_all_stats(struct bnxt *bp) 10319 { 10320 struct bnxt_stats_mem *ring0_stats; 10321 bool ignore_zero = false; 10322 int i; 10323 10324 /* Chip bug. Counter intermittently becomes 0. */ 10325 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10326 ignore_zero = true; 10327 10328 for (i = 0; i < bp->cp_nr_rings; i++) { 10329 struct bnxt_napi *bnapi = bp->bnapi[i]; 10330 struct bnxt_cp_ring_info *cpr; 10331 struct bnxt_stats_mem *stats; 10332 10333 cpr = &bnapi->cp_ring; 10334 stats = &cpr->stats; 10335 if (!i) 10336 ring0_stats = stats; 10337 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 10338 ring0_stats->hw_masks, 10339 ring0_stats->len / 8, ignore_zero); 10340 } 10341 if (bp->flags & BNXT_FLAG_PORT_STATS) { 10342 struct bnxt_stats_mem *stats = &bp->port_stats; 10343 __le64 *hw_stats = stats->hw_stats; 10344 u64 *sw_stats = stats->sw_stats; 10345 u64 *masks = stats->hw_masks; 10346 int cnt; 10347 10348 cnt = sizeof(struct rx_port_stats) / 8; 10349 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 10350 10351 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10352 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10353 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10354 cnt = sizeof(struct tx_port_stats) / 8; 10355 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 10356 } 10357 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 10358 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 10359 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 10360 } 10361 } 10362 10363 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 10364 { 10365 struct hwrm_port_qstats_input *req; 10366 struct bnxt_pf_info *pf = &bp->pf; 10367 int rc; 10368 10369 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 10370 return 0; 10371 10372 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 10373 return -EOPNOTSUPP; 10374 10375 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); 10376 if (rc) 10377 return rc; 10378 10379 req->flags = flags; 10380 req->port_id = cpu_to_le16(pf->port_id); 10381 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 10382 BNXT_TX_PORT_STATS_BYTE_OFFSET); 10383 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 10384 return hwrm_req_send(bp, req); 10385 } 10386 10387 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 10388 { 10389 struct hwrm_queue_pri2cos_qcfg_output *resp_qc; 10390 struct hwrm_queue_pri2cos_qcfg_input *req_qc; 10391 struct hwrm_port_qstats_ext_output *resp_qs; 10392 struct hwrm_port_qstats_ext_input *req_qs; 10393 struct bnxt_pf_info *pf = &bp->pf; 10394 u32 tx_stat_size; 10395 int rc; 10396 10397 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 10398 return 0; 10399 10400 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 10401 return -EOPNOTSUPP; 10402 10403 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); 10404 if (rc) 10405 return rc; 10406 10407 req_qs->flags = flags; 10408 req_qs->port_id = cpu_to_le16(pf->port_id); 10409 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 10410 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 10411 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 10412 sizeof(struct tx_port_stats_ext) : 0; 10413 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); 10414 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 10415 resp_qs = hwrm_req_hold(bp, req_qs); 10416 rc = hwrm_req_send(bp, req_qs); 10417 if (!rc) { 10418 bp->fw_rx_stats_ext_size = 10419 le16_to_cpu(resp_qs->rx_stat_size) / 8; 10420 if (BNXT_FW_MAJ(bp) < 220 && 10421 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) 10422 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; 10423 10424 bp->fw_tx_stats_ext_size = tx_stat_size ? 10425 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; 10426 } else { 10427 bp->fw_rx_stats_ext_size = 0; 10428 bp->fw_tx_stats_ext_size = 0; 10429 } 10430 hwrm_req_drop(bp, req_qs); 10431 10432 if (flags) 10433 return rc; 10434 10435 if (bp->fw_tx_stats_ext_size <= 10436 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 10437 bp->pri2cos_valid = 0; 10438 return rc; 10439 } 10440 10441 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); 10442 if (rc) 10443 return rc; 10444 10445 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 10446 10447 resp_qc = hwrm_req_hold(bp, req_qc); 10448 rc = hwrm_req_send(bp, req_qc); 10449 if (!rc) { 10450 u8 *pri2cos; 10451 int i, j; 10452 10453 pri2cos = &resp_qc->pri0_cos_queue_id; 10454 for (i = 0; i < 8; i++) { 10455 u8 queue_id = pri2cos[i]; 10456 u8 queue_idx; 10457 10458 /* Per port queue IDs start from 0, 10, 20, etc */ 10459 queue_idx = queue_id % 10; 10460 if (queue_idx > BNXT_MAX_QUEUE) { 10461 bp->pri2cos_valid = false; 10462 hwrm_req_drop(bp, req_qc); 10463 return rc; 10464 } 10465 for (j = 0; j < bp->max_q; j++) { 10466 if (bp->q_ids[j] == queue_id) 10467 bp->pri2cos_idx[i] = queue_idx; 10468 } 10469 } 10470 bp->pri2cos_valid = true; 10471 } 10472 hwrm_req_drop(bp, req_qc); 10473 10474 return rc; 10475 } 10476 10477 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 10478 { 10479 bnxt_hwrm_tunnel_dst_port_free(bp, 10480 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10481 bnxt_hwrm_tunnel_dst_port_free(bp, 10482 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10483 } 10484 10485 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 10486 { 10487 int rc, i; 10488 u32 tpa_flags = 0; 10489 10490 if (set_tpa) 10491 tpa_flags = bp->flags & BNXT_FLAG_TPA; 10492 else if (BNXT_NO_FW_ACCESS(bp)) 10493 return 0; 10494 for (i = 0; i < bp->nr_vnics; i++) { 10495 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); 10496 if (rc) { 10497 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 10498 i, rc); 10499 return rc; 10500 } 10501 } 10502 return 0; 10503 } 10504 10505 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 10506 { 10507 int i; 10508 10509 for (i = 0; i < bp->nr_vnics; i++) 10510 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); 10511 } 10512 10513 static void bnxt_clear_vnic(struct bnxt *bp) 10514 { 10515 if (!bp->vnic_info) 10516 return; 10517 10518 bnxt_hwrm_clear_vnic_filter(bp); 10519 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { 10520 /* clear all RSS setting before free vnic ctx */ 10521 bnxt_hwrm_clear_vnic_rss(bp); 10522 bnxt_hwrm_vnic_ctx_free(bp); 10523 } 10524 /* before free the vnic, undo the vnic tpa settings */ 10525 if (bp->flags & BNXT_FLAG_TPA) 10526 bnxt_set_tpa(bp, false); 10527 bnxt_hwrm_vnic_free(bp); 10528 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10529 bnxt_hwrm_vnic_ctx_free(bp); 10530 } 10531 10532 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 10533 bool irq_re_init) 10534 { 10535 bnxt_clear_vnic(bp); 10536 bnxt_hwrm_ring_free(bp, close_path); 10537 bnxt_hwrm_ring_grp_free(bp); 10538 if (irq_re_init) { 10539 bnxt_hwrm_stat_ctx_free(bp); 10540 bnxt_hwrm_free_tunnel_ports(bp); 10541 } 10542 } 10543 10544 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 10545 { 10546 struct hwrm_func_cfg_input *req; 10547 u8 evb_mode; 10548 int rc; 10549 10550 if (br_mode == BRIDGE_MODE_VEB) 10551 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 10552 else if (br_mode == BRIDGE_MODE_VEPA) 10553 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 10554 else 10555 return -EINVAL; 10556 10557 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 10558 if (rc) 10559 return rc; 10560 10561 req->fid = cpu_to_le16(0xffff); 10562 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 10563 req->evb_mode = evb_mode; 10564 return hwrm_req_send(bp, req); 10565 } 10566 10567 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 10568 { 10569 struct hwrm_func_cfg_input *req; 10570 int rc; 10571 10572 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 10573 return 0; 10574 10575 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 10576 if (rc) 10577 return rc; 10578 10579 req->fid = cpu_to_le16(0xffff); 10580 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 10581 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 10582 if (size == 128) 10583 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 10584 10585 return hwrm_req_send(bp, req); 10586 } 10587 10588 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10589 { 10590 int rc; 10591 10592 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 10593 goto skip_rss_ctx; 10594 10595 /* allocate context for vnic */ 10596 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); 10597 if (rc) { 10598 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 10599 vnic->vnic_id, rc); 10600 goto vnic_setup_err; 10601 } 10602 bp->rsscos_nr_ctxs++; 10603 10604 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10605 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); 10606 if (rc) { 10607 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 10608 vnic->vnic_id, rc); 10609 goto vnic_setup_err; 10610 } 10611 bp->rsscos_nr_ctxs++; 10612 } 10613 10614 skip_rss_ctx: 10615 /* configure default vnic, ring grp */ 10616 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10617 if (rc) { 10618 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 10619 vnic->vnic_id, rc); 10620 goto vnic_setup_err; 10621 } 10622 10623 /* Enable RSS hashing on vnic */ 10624 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); 10625 if (rc) { 10626 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 10627 vnic->vnic_id, rc); 10628 goto vnic_setup_err; 10629 } 10630 10631 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 10632 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 10633 if (rc) { 10634 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 10635 vnic->vnic_id, rc); 10636 } 10637 } 10638 10639 vnic_setup_err: 10640 return rc; 10641 } 10642 10643 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic, 10644 u8 valid) 10645 { 10646 struct hwrm_vnic_update_input *req; 10647 int rc; 10648 10649 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE); 10650 if (rc) 10651 return rc; 10652 10653 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 10654 10655 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID) 10656 req->mru = cpu_to_le16(vnic->mru); 10657 10658 req->enables = cpu_to_le32(valid); 10659 10660 return hwrm_req_send(bp, req); 10661 } 10662 10663 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10664 { 10665 int rc; 10666 10667 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); 10668 if (rc) { 10669 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 10670 vnic->vnic_id, rc); 10671 return rc; 10672 } 10673 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10674 if (rc) 10675 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 10676 vnic->vnic_id, rc); 10677 return rc; 10678 } 10679 10680 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10681 { 10682 int rc, i, nr_ctxs; 10683 10684 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 10685 for (i = 0; i < nr_ctxs; i++) { 10686 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); 10687 if (rc) { 10688 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 10689 vnic->vnic_id, i, rc); 10690 break; 10691 } 10692 bp->rsscos_nr_ctxs++; 10693 } 10694 if (i < nr_ctxs) 10695 return -ENOMEM; 10696 10697 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); 10698 if (rc) 10699 return rc; 10700 10701 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 10702 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 10703 if (rc) { 10704 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 10705 vnic->vnic_id, rc); 10706 } 10707 } 10708 return rc; 10709 } 10710 10711 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10712 { 10713 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10714 return __bnxt_setup_vnic_p5(bp, vnic); 10715 else 10716 return __bnxt_setup_vnic(bp, vnic); 10717 } 10718 10719 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, 10720 struct bnxt_vnic_info *vnic, 10721 u16 start_rx_ring_idx, int rx_rings) 10722 { 10723 int rc; 10724 10725 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); 10726 if (rc) { 10727 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 10728 vnic->vnic_id, rc); 10729 return rc; 10730 } 10731 return bnxt_setup_vnic(bp, vnic); 10732 } 10733 10734 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 10735 { 10736 struct bnxt_vnic_info *vnic; 10737 int i, rc = 0; 10738 10739 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 10740 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 10741 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); 10742 } 10743 10744 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10745 return 0; 10746 10747 for (i = 0; i < bp->rx_nr_rings; i++) { 10748 u16 vnic_id = i + 1; 10749 u16 ring_id = i; 10750 10751 if (vnic_id >= bp->nr_vnics) 10752 break; 10753 10754 vnic = &bp->vnic_info[vnic_id]; 10755 vnic->flags |= BNXT_VNIC_RFS_FLAG; 10756 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 10757 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 10758 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) 10759 break; 10760 } 10761 return rc; 10762 } 10763 10764 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, 10765 bool all) 10766 { 10767 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10768 struct bnxt_filter_base *usr_fltr, *tmp; 10769 struct bnxt_ntuple_filter *ntp_fltr; 10770 int i; 10771 10772 if (netif_running(bp->dev)) { 10773 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); 10774 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { 10775 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) 10776 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); 10777 } 10778 } 10779 if (!all) 10780 return; 10781 10782 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 10783 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) && 10784 usr_fltr->fw_vnic_id == rss_ctx->index) { 10785 ntp_fltr = container_of(usr_fltr, 10786 struct bnxt_ntuple_filter, 10787 base); 10788 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); 10789 bnxt_del_ntp_filter(bp, ntp_fltr); 10790 bnxt_del_one_usr_fltr(bp, usr_fltr); 10791 } 10792 } 10793 10794 if (vnic->rss_table) 10795 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, 10796 vnic->rss_table, 10797 vnic->rss_table_dma_addr); 10798 bp->num_rss_ctx--; 10799 } 10800 10801 static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic, 10802 int rxr_id) 10803 { 10804 u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 10805 int i, vnic_rx; 10806 10807 /* Ntuple VNIC always has all the rx rings. Any change of ring id 10808 * must be updated because a future filter may use it. 10809 */ 10810 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) 10811 return true; 10812 10813 for (i = 0; i < tbl_size; i++) { 10814 if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) 10815 vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; 10816 else 10817 vnic_rx = bp->rss_indir_tbl[i]; 10818 10819 if (rxr_id == vnic_rx) 10820 return true; 10821 } 10822 10823 return false; 10824 } 10825 10826 static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic, 10827 u16 mru, int rxr_id) 10828 { 10829 int rc; 10830 10831 if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id)) 10832 return 0; 10833 10834 if (mru) { 10835 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); 10836 if (rc) { 10837 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 10838 vnic->vnic_id, rc); 10839 return rc; 10840 } 10841 } 10842 vnic->mru = mru; 10843 bnxt_hwrm_vnic_update(bp, vnic, 10844 VNIC_UPDATE_REQ_ENABLES_MRU_VALID); 10845 10846 return 0; 10847 } 10848 10849 static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id) 10850 { 10851 struct ethtool_rxfh_context *ctx; 10852 unsigned long context; 10853 int rc; 10854 10855 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { 10856 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); 10857 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10858 10859 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id); 10860 if (rc) 10861 return rc; 10862 } 10863 10864 return 0; 10865 } 10866 10867 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) 10868 { 10869 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); 10870 struct ethtool_rxfh_context *ctx; 10871 unsigned long context; 10872 10873 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { 10874 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); 10875 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10876 10877 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || 10878 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || 10879 __bnxt_setup_vnic_p5(bp, vnic)) { 10880 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", 10881 rss_ctx->index); 10882 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 10883 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index); 10884 } 10885 } 10886 } 10887 10888 static void bnxt_clear_rss_ctxs(struct bnxt *bp) 10889 { 10890 struct ethtool_rxfh_context *ctx; 10891 unsigned long context; 10892 10893 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { 10894 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); 10895 10896 bnxt_del_one_rss_ctx(bp, rss_ctx, false); 10897 } 10898 } 10899 10900 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ 10901 static bool bnxt_promisc_ok(struct bnxt *bp) 10902 { 10903 #ifdef CONFIG_BNXT_SRIOV 10904 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) 10905 return false; 10906 #endif 10907 return true; 10908 } 10909 10910 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 10911 { 10912 struct bnxt_vnic_info *vnic = &bp->vnic_info[1]; 10913 unsigned int rc = 0; 10914 10915 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1); 10916 if (rc) { 10917 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10918 rc); 10919 return rc; 10920 } 10921 10922 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10923 if (rc) { 10924 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10925 rc); 10926 return rc; 10927 } 10928 return rc; 10929 } 10930 10931 static int bnxt_cfg_rx_mode(struct bnxt *); 10932 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 10933 10934 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 10935 { 10936 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 10937 int rc = 0; 10938 unsigned int rx_nr_rings = bp->rx_nr_rings; 10939 10940 if (irq_re_init) { 10941 rc = bnxt_hwrm_stat_ctx_alloc(bp); 10942 if (rc) { 10943 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 10944 rc); 10945 goto err_out; 10946 } 10947 } 10948 10949 rc = bnxt_hwrm_ring_alloc(bp); 10950 if (rc) { 10951 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 10952 goto err_out; 10953 } 10954 10955 rc = bnxt_hwrm_ring_grp_alloc(bp); 10956 if (rc) { 10957 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 10958 goto err_out; 10959 } 10960 10961 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 10962 rx_nr_rings--; 10963 10964 /* default vnic 0 */ 10965 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); 10966 if (rc) { 10967 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 10968 goto err_out; 10969 } 10970 10971 if (BNXT_VF(bp)) 10972 bnxt_hwrm_func_qcfg(bp); 10973 10974 rc = bnxt_setup_vnic(bp, vnic); 10975 if (rc) 10976 goto err_out; 10977 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 10978 bnxt_hwrm_update_rss_hash_cfg(bp); 10979 10980 if (bp->flags & BNXT_FLAG_RFS) { 10981 rc = bnxt_alloc_rfs_vnics(bp); 10982 if (rc) 10983 goto err_out; 10984 } 10985 10986 if (bp->flags & BNXT_FLAG_TPA) { 10987 rc = bnxt_set_tpa(bp, true); 10988 if (rc) 10989 goto err_out; 10990 } 10991 10992 if (BNXT_VF(bp)) 10993 bnxt_update_vf_mac(bp); 10994 10995 /* Filter for default vnic 0 */ 10996 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 10997 if (rc) { 10998 if (BNXT_VF(bp) && rc == -ENODEV) 10999 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); 11000 else 11001 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 11002 goto err_out; 11003 } 11004 vnic->uc_filter_count = 1; 11005 11006 vnic->rx_mask = 0; 11007 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) 11008 goto skip_rx_mask; 11009 11010 if (bp->dev->flags & IFF_BROADCAST) 11011 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 11012 11013 if (bp->dev->flags & IFF_PROMISC) 11014 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 11015 11016 if (bp->dev->flags & IFF_ALLMULTI) { 11017 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 11018 vnic->mc_list_count = 0; 11019 } else if (bp->dev->flags & IFF_MULTICAST) { 11020 u32 mask = 0; 11021 11022 bnxt_mc_list_updated(bp, &mask); 11023 vnic->rx_mask |= mask; 11024 } 11025 11026 rc = bnxt_cfg_rx_mode(bp); 11027 if (rc) 11028 goto err_out; 11029 11030 skip_rx_mask: 11031 rc = bnxt_hwrm_set_coal(bp); 11032 if (rc) 11033 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 11034 rc); 11035 11036 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 11037 rc = bnxt_setup_nitroa0_vnic(bp); 11038 if (rc) 11039 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 11040 rc); 11041 } 11042 11043 if (BNXT_VF(bp)) { 11044 bnxt_hwrm_func_qcfg(bp); 11045 netdev_update_features(bp->dev); 11046 } 11047 11048 return 0; 11049 11050 err_out: 11051 bnxt_hwrm_resource_free(bp, 0, true); 11052 11053 return rc; 11054 } 11055 11056 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 11057 { 11058 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 11059 return 0; 11060 } 11061 11062 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 11063 { 11064 bnxt_init_cp_rings(bp); 11065 bnxt_init_rx_rings(bp); 11066 bnxt_init_tx_rings(bp); 11067 bnxt_init_ring_grps(bp, irq_re_init); 11068 bnxt_init_vnics(bp); 11069 11070 return bnxt_init_chip(bp, irq_re_init); 11071 } 11072 11073 static int bnxt_set_real_num_queues(struct bnxt *bp) 11074 { 11075 int rc; 11076 struct net_device *dev = bp->dev; 11077 11078 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 11079 bp->tx_nr_rings_xdp); 11080 if (rc) 11081 return rc; 11082 11083 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 11084 if (rc) 11085 return rc; 11086 11087 #ifdef CONFIG_RFS_ACCEL 11088 if (bp->flags & BNXT_FLAG_RFS) 11089 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 11090 #endif 11091 11092 return rc; 11093 } 11094 11095 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 11096 bool shared) 11097 { 11098 int _rx = *rx, _tx = *tx; 11099 11100 if (shared) { 11101 *rx = min_t(int, _rx, max); 11102 *tx = min_t(int, _tx, max); 11103 } else { 11104 if (max < 2) 11105 return -ENOMEM; 11106 11107 while (_rx + _tx > max) { 11108 if (_rx > _tx && _rx > 1) 11109 _rx--; 11110 else if (_tx > 1) 11111 _tx--; 11112 } 11113 *rx = _rx; 11114 *tx = _tx; 11115 } 11116 return 0; 11117 } 11118 11119 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) 11120 { 11121 return (tx - tx_xdp) / tx_sets + tx_xdp; 11122 } 11123 11124 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 11125 { 11126 int tcs = bp->num_tc; 11127 11128 if (!tcs) 11129 tcs = 1; 11130 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); 11131 } 11132 11133 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 11134 { 11135 int tcs = bp->num_tc; 11136 11137 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 11138 bp->tx_nr_rings_xdp; 11139 } 11140 11141 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 11142 bool sh) 11143 { 11144 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); 11145 11146 if (tx_cp != *tx) { 11147 int tx_saved = tx_cp, rc; 11148 11149 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); 11150 if (rc) 11151 return rc; 11152 if (tx_cp != tx_saved) 11153 *tx = bnxt_num_cp_to_tx(bp, tx_cp); 11154 return 0; 11155 } 11156 return __bnxt_trim_rings(bp, rx, tx, max, sh); 11157 } 11158 11159 static void bnxt_setup_msix(struct bnxt *bp) 11160 { 11161 const int len = sizeof(bp->irq_tbl[0].name); 11162 struct net_device *dev = bp->dev; 11163 int tcs, i; 11164 11165 tcs = bp->num_tc; 11166 if (tcs) { 11167 int i, off, count; 11168 11169 for (i = 0; i < tcs; i++) { 11170 count = bp->tx_nr_rings_per_tc; 11171 off = BNXT_TC_TO_RING_BASE(bp, i); 11172 netdev_set_tc_queue(dev, i, count, off); 11173 } 11174 } 11175 11176 for (i = 0; i < bp->cp_nr_rings; i++) { 11177 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 11178 char *attr; 11179 11180 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 11181 attr = "TxRx"; 11182 else if (i < bp->rx_nr_rings) 11183 attr = "rx"; 11184 else 11185 attr = "tx"; 11186 11187 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 11188 attr, i); 11189 bp->irq_tbl[map_idx].handler = bnxt_msix; 11190 } 11191 } 11192 11193 static int bnxt_init_int_mode(struct bnxt *bp); 11194 11195 static int bnxt_change_msix(struct bnxt *bp, int total) 11196 { 11197 struct msi_map map; 11198 int i; 11199 11200 /* add MSIX to the end if needed */ 11201 for (i = bp->total_irqs; i < total; i++) { 11202 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL); 11203 if (map.index < 0) 11204 return bp->total_irqs; 11205 bp->irq_tbl[i].vector = map.virq; 11206 bp->total_irqs++; 11207 } 11208 11209 /* trim MSIX from the end if needed */ 11210 for (i = bp->total_irqs; i > total; i--) { 11211 map.index = i - 1; 11212 map.virq = bp->irq_tbl[i - 1].vector; 11213 pci_msix_free_irq(bp->pdev, map); 11214 bp->total_irqs--; 11215 } 11216 return bp->total_irqs; 11217 } 11218 11219 static int bnxt_setup_int_mode(struct bnxt *bp) 11220 { 11221 int rc; 11222 11223 if (!bp->irq_tbl) { 11224 rc = bnxt_init_int_mode(bp); 11225 if (rc || !bp->irq_tbl) 11226 return rc ?: -ENODEV; 11227 } 11228 11229 bnxt_setup_msix(bp); 11230 11231 rc = bnxt_set_real_num_queues(bp); 11232 return rc; 11233 } 11234 11235 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 11236 { 11237 return bp->hw_resc.max_rsscos_ctxs; 11238 } 11239 11240 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 11241 { 11242 return bp->hw_resc.max_vnics; 11243 } 11244 11245 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 11246 { 11247 return bp->hw_resc.max_stat_ctxs; 11248 } 11249 11250 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 11251 { 11252 return bp->hw_resc.max_cp_rings; 11253 } 11254 11255 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 11256 { 11257 unsigned int cp = bp->hw_resc.max_cp_rings; 11258 11259 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 11260 cp -= bnxt_get_ulp_msix_num(bp); 11261 11262 return cp; 11263 } 11264 11265 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 11266 { 11267 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11268 11269 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11270 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 11271 11272 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 11273 } 11274 11275 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 11276 { 11277 bp->hw_resc.max_irqs = max_irqs; 11278 } 11279 11280 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 11281 { 11282 unsigned int cp; 11283 11284 cp = bnxt_get_max_func_cp_rings_for_en(bp); 11285 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11286 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 11287 else 11288 return cp - bp->cp_nr_rings; 11289 } 11290 11291 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 11292 { 11293 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 11294 } 11295 11296 static int bnxt_get_avail_msix(struct bnxt *bp, int num) 11297 { 11298 int max_irq = bnxt_get_max_func_irqs(bp); 11299 int total_req = bp->cp_nr_rings + num; 11300 11301 if (max_irq < total_req) { 11302 num = max_irq - bp->cp_nr_rings; 11303 if (num <= 0) 11304 return 0; 11305 } 11306 return num; 11307 } 11308 11309 static int bnxt_get_num_msix(struct bnxt *bp) 11310 { 11311 if (!BNXT_NEW_RM(bp)) 11312 return bnxt_get_max_func_irqs(bp); 11313 11314 return bnxt_nq_rings_in_use(bp); 11315 } 11316 11317 static int bnxt_init_int_mode(struct bnxt *bp) 11318 { 11319 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size; 11320 11321 total_vecs = bnxt_get_num_msix(bp); 11322 max = bnxt_get_max_func_irqs(bp); 11323 if (total_vecs > max) 11324 total_vecs = max; 11325 11326 if (!total_vecs) 11327 return 0; 11328 11329 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 11330 min = 2; 11331 11332 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs, 11333 PCI_IRQ_MSIX); 11334 ulp_msix = bnxt_get_ulp_msix_num(bp); 11335 if (total_vecs < 0 || total_vecs < ulp_msix) { 11336 rc = -ENODEV; 11337 goto msix_setup_exit; 11338 } 11339 11340 tbl_size = total_vecs; 11341 if (pci_msix_can_alloc_dyn(bp->pdev)) 11342 tbl_size = max; 11343 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL); 11344 if (bp->irq_tbl) { 11345 for (i = 0; i < total_vecs; i++) 11346 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i); 11347 11348 bp->total_irqs = total_vecs; 11349 /* Trim rings based upon num of vectors allocated */ 11350 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 11351 total_vecs - ulp_msix, min == 1); 11352 if (rc) 11353 goto msix_setup_exit; 11354 11355 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 11356 bp->cp_nr_rings = (min == 1) ? 11357 max_t(int, tx_cp, bp->rx_nr_rings) : 11358 tx_cp + bp->rx_nr_rings; 11359 11360 } else { 11361 rc = -ENOMEM; 11362 goto msix_setup_exit; 11363 } 11364 return 0; 11365 11366 msix_setup_exit: 11367 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc); 11368 kfree(bp->irq_tbl); 11369 bp->irq_tbl = NULL; 11370 pci_free_irq_vectors(bp->pdev); 11371 return rc; 11372 } 11373 11374 static void bnxt_clear_int_mode(struct bnxt *bp) 11375 { 11376 pci_free_irq_vectors(bp->pdev); 11377 11378 kfree(bp->irq_tbl); 11379 bp->irq_tbl = NULL; 11380 } 11381 11382 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 11383 { 11384 bool irq_cleared = false; 11385 bool irq_change = false; 11386 int tcs = bp->num_tc; 11387 int irqs_required; 11388 int rc; 11389 11390 if (!bnxt_need_reserve_rings(bp)) 11391 return 0; 11392 11393 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 11394 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 11395 11396 if (ulp_msix > bp->ulp_num_msix_want) 11397 ulp_msix = bp->ulp_num_msix_want; 11398 irqs_required = ulp_msix + bp->cp_nr_rings; 11399 } else { 11400 irqs_required = bnxt_get_num_msix(bp); 11401 } 11402 11403 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) { 11404 irq_change = true; 11405 if (!pci_msix_can_alloc_dyn(bp->pdev)) { 11406 bnxt_ulp_irq_stop(bp); 11407 bnxt_clear_int_mode(bp); 11408 irq_cleared = true; 11409 } 11410 } 11411 rc = __bnxt_reserve_rings(bp); 11412 if (irq_cleared) { 11413 if (!rc) 11414 rc = bnxt_init_int_mode(bp); 11415 bnxt_ulp_irq_restart(bp, rc); 11416 } else if (irq_change && !rc) { 11417 if (bnxt_change_msix(bp, irqs_required) != irqs_required) 11418 rc = -ENOSPC; 11419 } 11420 if (rc) { 11421 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 11422 return rc; 11423 } 11424 if (tcs && (bp->tx_nr_rings_per_tc * tcs != 11425 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 11426 netdev_err(bp->dev, "tx ring reservation failure\n"); 11427 netdev_reset_tc(bp->dev); 11428 bp->num_tc = 0; 11429 if (bp->tx_nr_rings_xdp) 11430 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 11431 else 11432 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11433 return -ENOMEM; 11434 } 11435 return 0; 11436 } 11437 11438 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx) 11439 { 11440 struct bnxt_tx_ring_info *txr; 11441 struct netdev_queue *txq; 11442 struct bnxt_napi *bnapi; 11443 int i; 11444 11445 bnapi = bp->bnapi[idx]; 11446 bnxt_for_each_napi_tx(i, bnapi, txr) { 11447 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 11448 synchronize_net(); 11449 11450 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) { 11451 txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 11452 if (txq) { 11453 __netif_tx_lock_bh(txq); 11454 netif_tx_stop_queue(txq); 11455 __netif_tx_unlock_bh(txq); 11456 } 11457 } 11458 11459 if (!bp->tph_mode) 11460 continue; 11461 11462 bnxt_hwrm_tx_ring_free(bp, txr, true); 11463 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr); 11464 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index); 11465 bnxt_clear_one_cp_ring(bp, txr->tx_cpr); 11466 } 11467 } 11468 11469 static int bnxt_tx_queue_start(struct bnxt *bp, int idx) 11470 { 11471 struct bnxt_tx_ring_info *txr; 11472 struct netdev_queue *txq; 11473 struct bnxt_napi *bnapi; 11474 int rc, i; 11475 11476 bnapi = bp->bnapi[idx]; 11477 /* All rings have been reserved and previously allocated. 11478 * Reallocating with the same parameters should never fail. 11479 */ 11480 bnxt_for_each_napi_tx(i, bnapi, txr) { 11481 if (!bp->tph_mode) 11482 goto start_tx; 11483 11484 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); 11485 if (rc) 11486 return rc; 11487 11488 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false); 11489 if (rc) 11490 return rc; 11491 11492 txr->tx_prod = 0; 11493 txr->tx_cons = 0; 11494 txr->tx_hw_cons = 0; 11495 start_tx: 11496 WRITE_ONCE(txr->dev_state, 0); 11497 synchronize_net(); 11498 11499 if (bnapi->flags & BNXT_NAPI_FLAG_XDP) 11500 continue; 11501 11502 txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 11503 if (txq) 11504 netif_tx_start_queue(txq); 11505 } 11506 11507 return 0; 11508 } 11509 11510 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify, 11511 const cpumask_t *mask) 11512 { 11513 struct bnxt_irq *irq; 11514 u16 tag; 11515 int err; 11516 11517 irq = container_of(notify, struct bnxt_irq, affinity_notify); 11518 11519 if (!irq->bp->tph_mode) 11520 return; 11521 11522 cpumask_copy(irq->cpu_mask, mask); 11523 11524 if (irq->ring_nr >= irq->bp->rx_nr_rings) 11525 return; 11526 11527 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, 11528 cpumask_first(irq->cpu_mask), &tag)) 11529 return; 11530 11531 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag)) 11532 return; 11533 11534 netdev_lock(irq->bp->dev); 11535 if (netif_running(irq->bp->dev)) { 11536 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr); 11537 if (err) 11538 netdev_err(irq->bp->dev, 11539 "RX queue restart failed: err=%d\n", err); 11540 } 11541 netdev_unlock(irq->bp->dev); 11542 } 11543 11544 static void bnxt_irq_affinity_release(struct kref *ref) 11545 { 11546 struct irq_affinity_notify *notify = 11547 container_of(ref, struct irq_affinity_notify, kref); 11548 struct bnxt_irq *irq; 11549 11550 irq = container_of(notify, struct bnxt_irq, affinity_notify); 11551 11552 if (!irq->bp->tph_mode) 11553 return; 11554 11555 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) { 11556 netdev_err(irq->bp->dev, 11557 "Setting ST=0 for MSIX entry %d failed\n", 11558 irq->msix_nr); 11559 return; 11560 } 11561 } 11562 11563 static void bnxt_release_irq_notifier(struct bnxt_irq *irq) 11564 { 11565 irq_set_affinity_notifier(irq->vector, NULL); 11566 } 11567 11568 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq) 11569 { 11570 struct irq_affinity_notify *notify; 11571 11572 irq->bp = bp; 11573 11574 /* Nothing to do if TPH is not enabled */ 11575 if (!bp->tph_mode) 11576 return; 11577 11578 /* Register IRQ affinity notifier */ 11579 notify = &irq->affinity_notify; 11580 notify->irq = irq->vector; 11581 notify->notify = bnxt_irq_affinity_notify; 11582 notify->release = bnxt_irq_affinity_release; 11583 11584 irq_set_affinity_notifier(irq->vector, notify); 11585 } 11586 11587 static void bnxt_free_irq(struct bnxt *bp) 11588 { 11589 struct bnxt_irq *irq; 11590 int i; 11591 11592 #ifdef CONFIG_RFS_ACCEL 11593 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 11594 bp->dev->rx_cpu_rmap = NULL; 11595 #endif 11596 if (!bp->irq_tbl || !bp->bnapi) 11597 return; 11598 11599 for (i = 0; i < bp->cp_nr_rings; i++) { 11600 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 11601 11602 irq = &bp->irq_tbl[map_idx]; 11603 if (irq->requested) { 11604 if (irq->have_cpumask) { 11605 irq_update_affinity_hint(irq->vector, NULL); 11606 free_cpumask_var(irq->cpu_mask); 11607 irq->have_cpumask = 0; 11608 } 11609 11610 bnxt_release_irq_notifier(irq); 11611 11612 free_irq(irq->vector, bp->bnapi[i]); 11613 } 11614 11615 irq->requested = 0; 11616 } 11617 11618 /* Disable TPH support */ 11619 pcie_disable_tph(bp->pdev); 11620 bp->tph_mode = 0; 11621 } 11622 11623 static int bnxt_request_irq(struct bnxt *bp) 11624 { 11625 struct cpu_rmap *rmap = NULL; 11626 int i, j, rc = 0; 11627 unsigned long flags = 0; 11628 11629 rc = bnxt_setup_int_mode(bp); 11630 if (rc) { 11631 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 11632 rc); 11633 return rc; 11634 } 11635 #ifdef CONFIG_RFS_ACCEL 11636 rmap = bp->dev->rx_cpu_rmap; 11637 #endif 11638 11639 /* Enable TPH support as part of IRQ request */ 11640 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE); 11641 if (!rc) 11642 bp->tph_mode = PCI_TPH_ST_IV_MODE; 11643 11644 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 11645 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 11646 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 11647 11648 if (IS_ENABLED(CONFIG_RFS_ACCEL) && 11649 rmap && bp->bnapi[i]->rx_ring) { 11650 rc = irq_cpu_rmap_add(rmap, irq->vector); 11651 if (rc) 11652 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 11653 j); 11654 j++; 11655 } 11656 11657 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 11658 bp->bnapi[i]); 11659 if (rc) 11660 break; 11661 11662 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector); 11663 irq->requested = 1; 11664 11665 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 11666 int numa_node = dev_to_node(&bp->pdev->dev); 11667 u16 tag; 11668 11669 irq->have_cpumask = 1; 11670 irq->msix_nr = map_idx; 11671 irq->ring_nr = i; 11672 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 11673 irq->cpu_mask); 11674 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask); 11675 if (rc) { 11676 netdev_warn(bp->dev, 11677 "Update affinity hint failed, IRQ = %d\n", 11678 irq->vector); 11679 break; 11680 } 11681 11682 bnxt_register_irq_notifier(bp, irq); 11683 11684 /* Init ST table entry */ 11685 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, 11686 cpumask_first(irq->cpu_mask), 11687 &tag)) 11688 continue; 11689 11690 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag); 11691 } 11692 } 11693 return rc; 11694 } 11695 11696 static void bnxt_del_napi(struct bnxt *bp) 11697 { 11698 int i; 11699 11700 if (!bp->bnapi) 11701 return; 11702 11703 for (i = 0; i < bp->rx_nr_rings; i++) 11704 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); 11705 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) 11706 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); 11707 11708 for (i = 0; i < bp->cp_nr_rings; i++) { 11709 struct bnxt_napi *bnapi = bp->bnapi[i]; 11710 11711 __netif_napi_del_locked(&bnapi->napi); 11712 } 11713 /* We called __netif_napi_del_locked(), we need 11714 * to respect an RCU grace period before freeing napi structures. 11715 */ 11716 synchronize_net(); 11717 } 11718 11719 static void bnxt_init_napi(struct bnxt *bp) 11720 { 11721 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 11722 unsigned int cp_nr_rings = bp->cp_nr_rings; 11723 struct bnxt_napi *bnapi; 11724 int i; 11725 11726 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11727 poll_fn = bnxt_poll_p5; 11728 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 11729 cp_nr_rings--; 11730 11731 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 11732 11733 for (i = 0; i < cp_nr_rings; i++) { 11734 bnapi = bp->bnapi[i]; 11735 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn, 11736 bnapi->index); 11737 } 11738 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 11739 bnapi = bp->bnapi[cp_nr_rings]; 11740 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0); 11741 } 11742 } 11743 11744 static void bnxt_disable_napi(struct bnxt *bp) 11745 { 11746 int i; 11747 11748 if (!bp->bnapi || 11749 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 11750 return; 11751 11752 for (i = 0; i < bp->cp_nr_rings; i++) { 11753 struct bnxt_napi *bnapi = bp->bnapi[i]; 11754 struct bnxt_cp_ring_info *cpr; 11755 11756 cpr = &bnapi->cp_ring; 11757 if (bnapi->tx_fault) 11758 cpr->sw_stats->tx.tx_resets++; 11759 if (bnapi->in_reset) 11760 cpr->sw_stats->rx.rx_resets++; 11761 napi_disable_locked(&bnapi->napi); 11762 } 11763 } 11764 11765 static void bnxt_enable_napi(struct bnxt *bp) 11766 { 11767 int i; 11768 11769 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 11770 for (i = 0; i < bp->cp_nr_rings; i++) { 11771 struct bnxt_napi *bnapi = bp->bnapi[i]; 11772 struct bnxt_cp_ring_info *cpr; 11773 11774 bnapi->tx_fault = 0; 11775 11776 cpr = &bnapi->cp_ring; 11777 bnapi->in_reset = false; 11778 11779 if (bnapi->rx_ring) { 11780 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 11781 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 11782 } 11783 napi_enable_locked(&bnapi->napi); 11784 } 11785 } 11786 11787 void bnxt_tx_disable(struct bnxt *bp) 11788 { 11789 int i; 11790 struct bnxt_tx_ring_info *txr; 11791 11792 if (bp->tx_ring) { 11793 for (i = 0; i < bp->tx_nr_rings; i++) { 11794 txr = &bp->tx_ring[i]; 11795 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 11796 } 11797 } 11798 /* Make sure napi polls see @dev_state change */ 11799 synchronize_net(); 11800 /* Drop carrier first to prevent TX timeout */ 11801 netif_carrier_off(bp->dev); 11802 /* Stop all TX queues */ 11803 netif_tx_disable(bp->dev); 11804 } 11805 11806 void bnxt_tx_enable(struct bnxt *bp) 11807 { 11808 int i; 11809 struct bnxt_tx_ring_info *txr; 11810 11811 for (i = 0; i < bp->tx_nr_rings; i++) { 11812 txr = &bp->tx_ring[i]; 11813 WRITE_ONCE(txr->dev_state, 0); 11814 } 11815 /* Make sure napi polls see @dev_state change */ 11816 synchronize_net(); 11817 netif_tx_wake_all_queues(bp->dev); 11818 if (BNXT_LINK_IS_UP(bp)) 11819 netif_carrier_on(bp->dev); 11820 } 11821 11822 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 11823 { 11824 u8 active_fec = link_info->active_fec_sig_mode & 11825 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 11826 11827 switch (active_fec) { 11828 default: 11829 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 11830 return "None"; 11831 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 11832 return "Clause 74 BaseR"; 11833 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 11834 return "Clause 91 RS(528,514)"; 11835 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 11836 return "Clause 91 RS544_1XN"; 11837 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 11838 return "Clause 91 RS(544,514)"; 11839 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 11840 return "Clause 91 RS272_1XN"; 11841 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 11842 return "Clause 91 RS(272,257)"; 11843 } 11844 } 11845 11846 void bnxt_report_link(struct bnxt *bp) 11847 { 11848 if (BNXT_LINK_IS_UP(bp)) { 11849 const char *signal = ""; 11850 const char *flow_ctrl; 11851 const char *duplex; 11852 u32 speed; 11853 u16 fec; 11854 11855 netif_carrier_on(bp->dev); 11856 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 11857 if (speed == SPEED_UNKNOWN) { 11858 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 11859 return; 11860 } 11861 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 11862 duplex = "full"; 11863 else 11864 duplex = "half"; 11865 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 11866 flow_ctrl = "ON - receive & transmit"; 11867 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 11868 flow_ctrl = "ON - transmit"; 11869 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 11870 flow_ctrl = "ON - receive"; 11871 else 11872 flow_ctrl = "none"; 11873 if (bp->link_info.phy_qcfg_resp.option_flags & 11874 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { 11875 u8 sig_mode = bp->link_info.active_fec_sig_mode & 11876 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 11877 switch (sig_mode) { 11878 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: 11879 signal = "(NRZ) "; 11880 break; 11881 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: 11882 signal = "(PAM4 56Gbps) "; 11883 break; 11884 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112: 11885 signal = "(PAM4 112Gbps) "; 11886 break; 11887 default: 11888 break; 11889 } 11890 } 11891 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", 11892 speed, signal, duplex, flow_ctrl); 11893 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) 11894 netdev_info(bp->dev, "EEE is %s\n", 11895 bp->eee.eee_active ? "active" : 11896 "not active"); 11897 fec = bp->link_info.fec_cfg; 11898 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 11899 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 11900 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 11901 bnxt_report_fec(&bp->link_info)); 11902 } else { 11903 netif_carrier_off(bp->dev); 11904 netdev_err(bp->dev, "NIC Link is Down\n"); 11905 } 11906 } 11907 11908 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 11909 { 11910 if (!resp->supported_speeds_auto_mode && 11911 !resp->supported_speeds_force_mode && 11912 !resp->supported_pam4_speeds_auto_mode && 11913 !resp->supported_pam4_speeds_force_mode && 11914 !resp->supported_speeds2_auto_mode && 11915 !resp->supported_speeds2_force_mode) 11916 return true; 11917 return false; 11918 } 11919 11920 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 11921 { 11922 struct bnxt_link_info *link_info = &bp->link_info; 11923 struct hwrm_port_phy_qcaps_output *resp; 11924 struct hwrm_port_phy_qcaps_input *req; 11925 int rc = 0; 11926 11927 if (bp->hwrm_spec_code < 0x10201) 11928 return 0; 11929 11930 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 11931 if (rc) 11932 return rc; 11933 11934 resp = hwrm_req_hold(bp, req); 11935 rc = hwrm_req_send(bp, req); 11936 if (rc) 11937 goto hwrm_phy_qcaps_exit; 11938 11939 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); 11940 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 11941 struct ethtool_keee *eee = &bp->eee; 11942 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 11943 11944 _bnxt_fw_to_linkmode(eee->supported, fw_speeds); 11945 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 11946 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 11947 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 11948 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 11949 } 11950 11951 if (bp->hwrm_spec_code >= 0x10a01) { 11952 if (bnxt_phy_qcaps_no_speed(resp)) { 11953 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 11954 netdev_warn(bp->dev, "Ethernet link disabled\n"); 11955 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 11956 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 11957 netdev_info(bp->dev, "Ethernet link enabled\n"); 11958 /* Phy re-enabled, reprobe the speeds */ 11959 link_info->support_auto_speeds = 0; 11960 link_info->support_pam4_auto_speeds = 0; 11961 link_info->support_auto_speeds2 = 0; 11962 } 11963 } 11964 if (resp->supported_speeds_auto_mode) 11965 link_info->support_auto_speeds = 11966 le16_to_cpu(resp->supported_speeds_auto_mode); 11967 if (resp->supported_pam4_speeds_auto_mode) 11968 link_info->support_pam4_auto_speeds = 11969 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 11970 if (resp->supported_speeds2_auto_mode) 11971 link_info->support_auto_speeds2 = 11972 le16_to_cpu(resp->supported_speeds2_auto_mode); 11973 11974 bp->port_count = resp->port_cnt; 11975 11976 hwrm_phy_qcaps_exit: 11977 hwrm_req_drop(bp, req); 11978 return rc; 11979 } 11980 11981 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp) 11982 { 11983 struct hwrm_port_mac_qcaps_output *resp; 11984 struct hwrm_port_mac_qcaps_input *req; 11985 int rc; 11986 11987 if (bp->hwrm_spec_code < 0x10a03) 11988 return; 11989 11990 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS); 11991 if (rc) 11992 return; 11993 11994 resp = hwrm_req_hold(bp, req); 11995 rc = hwrm_req_send_silent(bp, req); 11996 if (!rc) 11997 bp->mac_flags = resp->flags; 11998 hwrm_req_drop(bp, req); 11999 } 12000 12001 static bool bnxt_support_dropped(u16 advertising, u16 supported) 12002 { 12003 u16 diff = advertising ^ supported; 12004 12005 return ((supported | diff) != supported); 12006 } 12007 12008 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) 12009 { 12010 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 12011 12012 /* Check if any advertised speeds are no longer supported. The caller 12013 * holds the link_lock mutex, so we can modify link_info settings. 12014 */ 12015 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 12016 if (bnxt_support_dropped(link_info->advertising, 12017 link_info->support_auto_speeds2)) { 12018 link_info->advertising = link_info->support_auto_speeds2; 12019 return true; 12020 } 12021 return false; 12022 } 12023 if (bnxt_support_dropped(link_info->advertising, 12024 link_info->support_auto_speeds)) { 12025 link_info->advertising = link_info->support_auto_speeds; 12026 return true; 12027 } 12028 if (bnxt_support_dropped(link_info->advertising_pam4, 12029 link_info->support_pam4_auto_speeds)) { 12030 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 12031 return true; 12032 } 12033 return false; 12034 } 12035 12036 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 12037 { 12038 struct bnxt_link_info *link_info = &bp->link_info; 12039 struct hwrm_port_phy_qcfg_output *resp; 12040 struct hwrm_port_phy_qcfg_input *req; 12041 u8 link_state = link_info->link_state; 12042 bool support_changed; 12043 int rc; 12044 12045 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); 12046 if (rc) 12047 return rc; 12048 12049 resp = hwrm_req_hold(bp, req); 12050 rc = hwrm_req_send(bp, req); 12051 if (rc) { 12052 hwrm_req_drop(bp, req); 12053 if (BNXT_VF(bp) && rc == -ENODEV) { 12054 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); 12055 rc = 0; 12056 } 12057 return rc; 12058 } 12059 12060 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 12061 link_info->phy_link_status = resp->link; 12062 link_info->duplex = resp->duplex_cfg; 12063 if (bp->hwrm_spec_code >= 0x10800) 12064 link_info->duplex = resp->duplex_state; 12065 link_info->pause = resp->pause; 12066 link_info->auto_mode = resp->auto_mode; 12067 link_info->auto_pause_setting = resp->auto_pause; 12068 link_info->lp_pause = resp->link_partner_adv_pause; 12069 link_info->force_pause_setting = resp->force_pause; 12070 link_info->duplex_setting = resp->duplex_cfg; 12071 if (link_info->phy_link_status == BNXT_LINK_LINK) { 12072 link_info->link_speed = le16_to_cpu(resp->link_speed); 12073 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 12074 link_info->active_lanes = resp->active_lanes; 12075 } else { 12076 link_info->link_speed = 0; 12077 link_info->active_lanes = 0; 12078 } 12079 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 12080 link_info->force_pam4_link_speed = 12081 le16_to_cpu(resp->force_pam4_link_speed); 12082 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); 12083 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 12084 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 12085 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); 12086 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 12087 link_info->auto_pam4_link_speeds = 12088 le16_to_cpu(resp->auto_pam4_link_speed_mask); 12089 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); 12090 link_info->lp_auto_link_speeds = 12091 le16_to_cpu(resp->link_partner_adv_speeds); 12092 link_info->lp_auto_pam4_link_speeds = 12093 resp->link_partner_pam4_adv_speeds; 12094 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 12095 link_info->phy_ver[0] = resp->phy_maj; 12096 link_info->phy_ver[1] = resp->phy_min; 12097 link_info->phy_ver[2] = resp->phy_bld; 12098 link_info->media_type = resp->media_type; 12099 link_info->phy_type = resp->phy_type; 12100 link_info->transceiver = resp->xcvr_pkg_type; 12101 link_info->phy_addr = resp->eee_config_phy_addr & 12102 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 12103 link_info->module_status = resp->module_status; 12104 12105 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { 12106 struct ethtool_keee *eee = &bp->eee; 12107 u16 fw_speeds; 12108 12109 eee->eee_active = 0; 12110 if (resp->eee_config_phy_addr & 12111 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 12112 eee->eee_active = 1; 12113 fw_speeds = le16_to_cpu( 12114 resp->link_partner_adv_eee_link_speed_mask); 12115 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds); 12116 } 12117 12118 /* Pull initial EEE config */ 12119 if (!chng_link_state) { 12120 if (resp->eee_config_phy_addr & 12121 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 12122 eee->eee_enabled = 1; 12123 12124 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 12125 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds); 12126 12127 if (resp->eee_config_phy_addr & 12128 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 12129 __le32 tmr; 12130 12131 eee->tx_lpi_enabled = 1; 12132 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 12133 eee->tx_lpi_timer = le32_to_cpu(tmr) & 12134 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 12135 } 12136 } 12137 } 12138 12139 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 12140 if (bp->hwrm_spec_code >= 0x10504) { 12141 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 12142 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 12143 } 12144 /* TODO: need to add more logic to report VF link */ 12145 if (chng_link_state) { 12146 if (link_info->phy_link_status == BNXT_LINK_LINK) 12147 link_info->link_state = BNXT_LINK_STATE_UP; 12148 else 12149 link_info->link_state = BNXT_LINK_STATE_DOWN; 12150 if (link_state != link_info->link_state) 12151 bnxt_report_link(bp); 12152 } else { 12153 /* always link down if not require to update link state */ 12154 link_info->link_state = BNXT_LINK_STATE_DOWN; 12155 } 12156 hwrm_req_drop(bp, req); 12157 12158 if (!BNXT_PHY_CFG_ABLE(bp)) 12159 return 0; 12160 12161 support_changed = bnxt_support_speed_dropped(link_info); 12162 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 12163 bnxt_hwrm_set_link_setting(bp, true, false); 12164 return 0; 12165 } 12166 12167 static void bnxt_get_port_module_status(struct bnxt *bp) 12168 { 12169 struct bnxt_link_info *link_info = &bp->link_info; 12170 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 12171 u8 module_status; 12172 12173 if (bnxt_update_link(bp, true)) 12174 return; 12175 12176 module_status = link_info->module_status; 12177 switch (module_status) { 12178 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 12179 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 12180 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 12181 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 12182 bp->pf.port_id); 12183 if (bp->hwrm_spec_code >= 0x10201) { 12184 netdev_warn(bp->dev, "Module part number %s\n", 12185 resp->phy_vendor_partnumber); 12186 } 12187 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 12188 netdev_warn(bp->dev, "TX is disabled\n"); 12189 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 12190 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 12191 } 12192 } 12193 12194 static void 12195 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 12196 { 12197 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 12198 if (bp->hwrm_spec_code >= 0x10201) 12199 req->auto_pause = 12200 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 12201 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 12202 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 12203 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 12204 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 12205 req->enables |= 12206 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 12207 } else { 12208 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 12209 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 12210 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 12211 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 12212 req->enables |= 12213 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 12214 if (bp->hwrm_spec_code >= 0x10201) { 12215 req->auto_pause = req->force_pause; 12216 req->enables |= cpu_to_le32( 12217 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 12218 } 12219 } 12220 } 12221 12222 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 12223 { 12224 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 12225 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 12226 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 12227 req->enables |= 12228 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK); 12229 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); 12230 } else if (bp->link_info.advertising) { 12231 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 12232 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 12233 } 12234 if (bp->link_info.advertising_pam4) { 12235 req->enables |= 12236 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 12237 req->auto_link_pam4_speed_mask = 12238 cpu_to_le16(bp->link_info.advertising_pam4); 12239 } 12240 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 12241 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 12242 } else { 12243 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 12244 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 12245 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); 12246 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2); 12247 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", 12248 (u32)bp->link_info.req_link_speed); 12249 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 12250 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 12251 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 12252 } else { 12253 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 12254 } 12255 } 12256 12257 /* tell chimp that the setting takes effect immediately */ 12258 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 12259 } 12260 12261 int bnxt_hwrm_set_pause(struct bnxt *bp) 12262 { 12263 struct hwrm_port_phy_cfg_input *req; 12264 int rc; 12265 12266 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 12267 if (rc) 12268 return rc; 12269 12270 bnxt_hwrm_set_pause_common(bp, req); 12271 12272 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 12273 bp->link_info.force_link_chng) 12274 bnxt_hwrm_set_link_common(bp, req); 12275 12276 rc = hwrm_req_send(bp, req); 12277 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 12278 /* since changing of pause setting doesn't trigger any link 12279 * change event, the driver needs to update the current pause 12280 * result upon successfully return of the phy_cfg command 12281 */ 12282 bp->link_info.pause = 12283 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 12284 bp->link_info.auto_pause_setting = 0; 12285 if (!bp->link_info.force_link_chng) 12286 bnxt_report_link(bp); 12287 } 12288 bp->link_info.force_link_chng = false; 12289 return rc; 12290 } 12291 12292 static void bnxt_hwrm_set_eee(struct bnxt *bp, 12293 struct hwrm_port_phy_cfg_input *req) 12294 { 12295 struct ethtool_keee *eee = &bp->eee; 12296 12297 if (eee->eee_enabled) { 12298 u16 eee_speeds; 12299 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 12300 12301 if (eee->tx_lpi_enabled) 12302 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 12303 else 12304 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 12305 12306 req->flags |= cpu_to_le32(flags); 12307 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 12308 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 12309 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 12310 } else { 12311 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 12312 } 12313 } 12314 12315 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 12316 { 12317 struct hwrm_port_phy_cfg_input *req; 12318 int rc; 12319 12320 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 12321 if (rc) 12322 return rc; 12323 12324 if (set_pause) 12325 bnxt_hwrm_set_pause_common(bp, req); 12326 12327 bnxt_hwrm_set_link_common(bp, req); 12328 12329 if (set_eee) 12330 bnxt_hwrm_set_eee(bp, req); 12331 return hwrm_req_send(bp, req); 12332 } 12333 12334 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 12335 { 12336 struct hwrm_port_phy_cfg_input *req; 12337 int rc; 12338 12339 if (!BNXT_SINGLE_PF(bp)) 12340 return 0; 12341 12342 if (pci_num_vf(bp->pdev) && 12343 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) 12344 return 0; 12345 12346 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 12347 if (rc) 12348 return rc; 12349 12350 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 12351 rc = hwrm_req_send(bp, req); 12352 if (!rc) { 12353 mutex_lock(&bp->link_lock); 12354 /* Device is not obliged link down in certain scenarios, even 12355 * when forced. Setting the state unknown is consistent with 12356 * driver startup and will force link state to be reported 12357 * during subsequent open based on PORT_PHY_QCFG. 12358 */ 12359 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; 12360 mutex_unlock(&bp->link_lock); 12361 } 12362 return rc; 12363 } 12364 12365 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 12366 { 12367 #ifdef CONFIG_TEE_BNXT_FW 12368 int rc = tee_bnxt_fw_load(); 12369 12370 if (rc) 12371 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 12372 12373 return rc; 12374 #else 12375 netdev_err(bp->dev, "OP-TEE not supported\n"); 12376 return -ENODEV; 12377 #endif 12378 } 12379 12380 static int bnxt_try_recover_fw(struct bnxt *bp) 12381 { 12382 if (bp->fw_health && bp->fw_health->status_reliable) { 12383 int retry = 0, rc; 12384 u32 sts; 12385 12386 do { 12387 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 12388 rc = bnxt_hwrm_poll(bp); 12389 if (!BNXT_FW_IS_BOOTING(sts) && 12390 !BNXT_FW_IS_RECOVERING(sts)) 12391 break; 12392 retry++; 12393 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); 12394 12395 if (!BNXT_FW_IS_HEALTHY(sts)) { 12396 netdev_err(bp->dev, 12397 "Firmware not responding, status: 0x%x\n", 12398 sts); 12399 rc = -ENODEV; 12400 } 12401 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 12402 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 12403 return bnxt_fw_reset_via_optee(bp); 12404 } 12405 return rc; 12406 } 12407 12408 return -ENODEV; 12409 } 12410 12411 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 12412 { 12413 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 12414 12415 if (!BNXT_NEW_RM(bp)) 12416 return; /* no resource reservations required */ 12417 12418 hw_resc->resv_cp_rings = 0; 12419 hw_resc->resv_stat_ctxs = 0; 12420 hw_resc->resv_irqs = 0; 12421 hw_resc->resv_tx_rings = 0; 12422 hw_resc->resv_rx_rings = 0; 12423 hw_resc->resv_hw_ring_grps = 0; 12424 hw_resc->resv_vnics = 0; 12425 hw_resc->resv_rsscos_ctxs = 0; 12426 if (!fw_reset) { 12427 bp->tx_nr_rings = 0; 12428 bp->rx_nr_rings = 0; 12429 } 12430 } 12431 12432 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) 12433 { 12434 int rc; 12435 12436 if (!BNXT_NEW_RM(bp)) 12437 return 0; /* no resource reservations required */ 12438 12439 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 12440 if (rc) 12441 netdev_err(bp->dev, "resc_qcaps failed\n"); 12442 12443 bnxt_clear_reservations(bp, fw_reset); 12444 12445 return rc; 12446 } 12447 12448 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 12449 { 12450 struct hwrm_func_drv_if_change_output *resp; 12451 struct hwrm_func_drv_if_change_input *req; 12452 bool resc_reinit = false; 12453 bool caps_change = false; 12454 int rc, retry = 0; 12455 bool fw_reset; 12456 u32 flags = 0; 12457 12458 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT); 12459 bp->fw_reset_state = 0; 12460 12461 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 12462 return 0; 12463 12464 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); 12465 if (rc) 12466 return rc; 12467 12468 if (up) 12469 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 12470 resp = hwrm_req_hold(bp, req); 12471 12472 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 12473 while (retry < BNXT_FW_IF_RETRY) { 12474 rc = hwrm_req_send(bp, req); 12475 if (rc != -EAGAIN) 12476 break; 12477 12478 msleep(50); 12479 retry++; 12480 } 12481 12482 if (rc == -EAGAIN) { 12483 hwrm_req_drop(bp, req); 12484 return rc; 12485 } else if (!rc) { 12486 flags = le32_to_cpu(resp->flags); 12487 } else if (up) { 12488 rc = bnxt_try_recover_fw(bp); 12489 fw_reset = true; 12490 } 12491 hwrm_req_drop(bp, req); 12492 if (rc) 12493 return rc; 12494 12495 if (!up) { 12496 bnxt_inv_fw_health_reg(bp); 12497 return 0; 12498 } 12499 12500 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 12501 resc_reinit = true; 12502 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || 12503 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) 12504 fw_reset = true; 12505 else 12506 bnxt_remap_fw_health_regs(bp); 12507 12508 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 12509 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 12510 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12511 return -ENODEV; 12512 } 12513 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE) 12514 caps_change = true; 12515 12516 if (resc_reinit || fw_reset || caps_change) { 12517 if (fw_reset || caps_change) { 12518 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12519 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12520 bnxt_ulp_irq_stop(bp); 12521 bnxt_free_ctx_mem(bp, false); 12522 bnxt_dcb_free(bp); 12523 rc = bnxt_fw_init_one(bp); 12524 if (rc) { 12525 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12526 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12527 return rc; 12528 } 12529 /* IRQ will be initialized later in bnxt_request_irq()*/ 12530 bnxt_clear_int_mode(bp); 12531 } 12532 rc = bnxt_cancel_reservations(bp, fw_reset); 12533 } 12534 return rc; 12535 } 12536 12537 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 12538 { 12539 struct hwrm_port_led_qcaps_output *resp; 12540 struct hwrm_port_led_qcaps_input *req; 12541 struct bnxt_pf_info *pf = &bp->pf; 12542 int rc; 12543 12544 bp->num_leds = 0; 12545 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 12546 return 0; 12547 12548 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); 12549 if (rc) 12550 return rc; 12551 12552 req->port_id = cpu_to_le16(pf->port_id); 12553 resp = hwrm_req_hold(bp, req); 12554 rc = hwrm_req_send(bp, req); 12555 if (rc) { 12556 hwrm_req_drop(bp, req); 12557 return rc; 12558 } 12559 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 12560 int i; 12561 12562 bp->num_leds = resp->num_leds; 12563 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 12564 bp->num_leds); 12565 for (i = 0; i < bp->num_leds; i++) { 12566 struct bnxt_led_info *led = &bp->leds[i]; 12567 __le16 caps = led->led_state_caps; 12568 12569 if (!led->led_group_id || 12570 !BNXT_LED_ALT_BLINK_CAP(caps)) { 12571 bp->num_leds = 0; 12572 break; 12573 } 12574 } 12575 } 12576 hwrm_req_drop(bp, req); 12577 return 0; 12578 } 12579 12580 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 12581 { 12582 struct hwrm_wol_filter_alloc_output *resp; 12583 struct hwrm_wol_filter_alloc_input *req; 12584 int rc; 12585 12586 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); 12587 if (rc) 12588 return rc; 12589 12590 req->port_id = cpu_to_le16(bp->pf.port_id); 12591 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 12592 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 12593 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); 12594 12595 resp = hwrm_req_hold(bp, req); 12596 rc = hwrm_req_send(bp, req); 12597 if (!rc) 12598 bp->wol_filter_id = resp->wol_filter_id; 12599 hwrm_req_drop(bp, req); 12600 return rc; 12601 } 12602 12603 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 12604 { 12605 struct hwrm_wol_filter_free_input *req; 12606 int rc; 12607 12608 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); 12609 if (rc) 12610 return rc; 12611 12612 req->port_id = cpu_to_le16(bp->pf.port_id); 12613 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 12614 req->wol_filter_id = bp->wol_filter_id; 12615 12616 return hwrm_req_send(bp, req); 12617 } 12618 12619 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 12620 { 12621 struct hwrm_wol_filter_qcfg_output *resp; 12622 struct hwrm_wol_filter_qcfg_input *req; 12623 u16 next_handle = 0; 12624 int rc; 12625 12626 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); 12627 if (rc) 12628 return rc; 12629 12630 req->port_id = cpu_to_le16(bp->pf.port_id); 12631 req->handle = cpu_to_le16(handle); 12632 resp = hwrm_req_hold(bp, req); 12633 rc = hwrm_req_send(bp, req); 12634 if (!rc) { 12635 next_handle = le16_to_cpu(resp->next_handle); 12636 if (next_handle != 0) { 12637 if (resp->wol_type == 12638 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 12639 bp->wol = 1; 12640 bp->wol_filter_id = resp->wol_filter_id; 12641 } 12642 } 12643 } 12644 hwrm_req_drop(bp, req); 12645 return next_handle; 12646 } 12647 12648 static void bnxt_get_wol_settings(struct bnxt *bp) 12649 { 12650 u16 handle = 0; 12651 12652 bp->wol = 0; 12653 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 12654 return; 12655 12656 do { 12657 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 12658 } while (handle && handle != 0xffff); 12659 } 12660 12661 static bool bnxt_eee_config_ok(struct bnxt *bp) 12662 { 12663 struct ethtool_keee *eee = &bp->eee; 12664 struct bnxt_link_info *link_info = &bp->link_info; 12665 12666 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 12667 return true; 12668 12669 if (eee->eee_enabled) { 12670 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 12671 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 12672 12673 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 12674 12675 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 12676 eee->eee_enabled = 0; 12677 return false; 12678 } 12679 if (linkmode_andnot(tmp, eee->advertised, advertising)) { 12680 linkmode_and(eee->advertised, advertising, 12681 eee->supported); 12682 return false; 12683 } 12684 } 12685 return true; 12686 } 12687 12688 static int bnxt_update_phy_setting(struct bnxt *bp) 12689 { 12690 int rc; 12691 bool update_link = false; 12692 bool update_pause = false; 12693 bool update_eee = false; 12694 struct bnxt_link_info *link_info = &bp->link_info; 12695 12696 rc = bnxt_update_link(bp, true); 12697 if (rc) { 12698 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 12699 rc); 12700 return rc; 12701 } 12702 if (!BNXT_SINGLE_PF(bp)) 12703 return 0; 12704 12705 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 12706 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 12707 link_info->req_flow_ctrl) 12708 update_pause = true; 12709 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 12710 link_info->force_pause_setting != link_info->req_flow_ctrl) 12711 update_pause = true; 12712 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 12713 if (BNXT_AUTO_MODE(link_info->auto_mode)) 12714 update_link = true; 12715 if (bnxt_force_speed_updated(link_info)) 12716 update_link = true; 12717 if (link_info->req_duplex != link_info->duplex_setting) 12718 update_link = true; 12719 } else { 12720 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 12721 update_link = true; 12722 if (bnxt_auto_speed_updated(link_info)) 12723 update_link = true; 12724 } 12725 12726 /* The last close may have shutdown the link, so need to call 12727 * PHY_CFG to bring it back up. 12728 */ 12729 if (!BNXT_LINK_IS_UP(bp)) 12730 update_link = true; 12731 12732 if (!bnxt_eee_config_ok(bp)) 12733 update_eee = true; 12734 12735 if (update_link) 12736 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 12737 else if (update_pause) 12738 rc = bnxt_hwrm_set_pause(bp); 12739 if (rc) { 12740 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 12741 rc); 12742 return rc; 12743 } 12744 12745 return rc; 12746 } 12747 12748 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 12749 12750 static int bnxt_reinit_after_abort(struct bnxt *bp) 12751 { 12752 int rc; 12753 12754 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12755 return -EBUSY; 12756 12757 if (bp->dev->reg_state == NETREG_UNREGISTERED) 12758 return -ENODEV; 12759 12760 rc = bnxt_fw_init_one(bp); 12761 if (!rc) { 12762 bnxt_clear_int_mode(bp); 12763 rc = bnxt_init_int_mode(bp); 12764 if (!rc) { 12765 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12766 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12767 } 12768 } 12769 return rc; 12770 } 12771 12772 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 12773 { 12774 struct bnxt_ntuple_filter *ntp_fltr; 12775 struct bnxt_l2_filter *l2_fltr; 12776 12777 if (list_empty(&fltr->list)) 12778 return; 12779 12780 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { 12781 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); 12782 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 12783 atomic_inc(&l2_fltr->refcnt); 12784 ntp_fltr->l2_fltr = l2_fltr; 12785 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { 12786 bnxt_del_ntp_filter(bp, ntp_fltr); 12787 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n", 12788 fltr->sw_id); 12789 } 12790 } else if (fltr->type == BNXT_FLTR_TYPE_L2) { 12791 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base); 12792 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) { 12793 bnxt_del_l2_filter(bp, l2_fltr); 12794 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n", 12795 fltr->sw_id); 12796 } 12797 } 12798 } 12799 12800 static void bnxt_cfg_usr_fltrs(struct bnxt *bp) 12801 { 12802 struct bnxt_filter_base *usr_fltr, *tmp; 12803 12804 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) 12805 bnxt_cfg_one_usr_fltr(bp, usr_fltr); 12806 } 12807 12808 static int bnxt_set_xps_mapping(struct bnxt *bp) 12809 { 12810 int numa_node = dev_to_node(&bp->pdev->dev); 12811 unsigned int q_idx, map_idx, cpu, i; 12812 const struct cpumask *cpu_mask_ptr; 12813 int nr_cpus = num_online_cpus(); 12814 cpumask_t *q_map; 12815 int rc = 0; 12816 12817 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); 12818 if (!q_map) 12819 return -ENOMEM; 12820 12821 /* Create CPU mask for all TX queues across MQPRIO traffic classes. 12822 * Each TC has the same number of TX queues. The nth TX queue for each 12823 * TC will have the same CPU mask. 12824 */ 12825 for (i = 0; i < nr_cpus; i++) { 12826 map_idx = i % bp->tx_nr_rings_per_tc; 12827 cpu = cpumask_local_spread(i, numa_node); 12828 cpu_mask_ptr = get_cpu_mask(cpu); 12829 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr); 12830 } 12831 12832 /* Register CPU mask for each TX queue except the ones marked for XDP */ 12833 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { 12834 map_idx = q_idx % bp->tx_nr_rings_per_tc; 12835 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); 12836 if (rc) { 12837 netdev_warn(bp->dev, "Error setting XPS for q:%d\n", 12838 q_idx); 12839 break; 12840 } 12841 } 12842 12843 kfree(q_map); 12844 12845 return rc; 12846 } 12847 12848 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12849 { 12850 int rc = 0; 12851 12852 netif_carrier_off(bp->dev); 12853 if (irq_re_init) { 12854 /* Reserve rings now if none were reserved at driver probe. */ 12855 rc = bnxt_init_dflt_ring_mode(bp); 12856 if (rc) { 12857 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 12858 return rc; 12859 } 12860 } 12861 rc = bnxt_reserve_rings(bp, irq_re_init); 12862 if (rc) 12863 return rc; 12864 12865 rc = bnxt_alloc_mem(bp, irq_re_init); 12866 if (rc) { 12867 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 12868 goto open_err_free_mem; 12869 } 12870 12871 if (irq_re_init) { 12872 bnxt_init_napi(bp); 12873 rc = bnxt_request_irq(bp); 12874 if (rc) { 12875 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 12876 goto open_err_irq; 12877 } 12878 } 12879 12880 rc = bnxt_init_nic(bp, irq_re_init); 12881 if (rc) { 12882 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 12883 goto open_err_irq; 12884 } 12885 12886 bnxt_enable_napi(bp); 12887 bnxt_debug_dev_init(bp); 12888 12889 if (link_re_init) { 12890 mutex_lock(&bp->link_lock); 12891 rc = bnxt_update_phy_setting(bp); 12892 mutex_unlock(&bp->link_lock); 12893 if (rc) { 12894 netdev_warn(bp->dev, "failed to update phy settings\n"); 12895 if (BNXT_SINGLE_PF(bp)) { 12896 bp->link_info.phy_retry = true; 12897 bp->link_info.phy_retry_expires = 12898 jiffies + 5 * HZ; 12899 } 12900 } 12901 } 12902 12903 if (irq_re_init) { 12904 udp_tunnel_nic_reset_ntf(bp->dev); 12905 rc = bnxt_set_xps_mapping(bp); 12906 if (rc) 12907 netdev_warn(bp->dev, "failed to set xps mapping\n"); 12908 } 12909 12910 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { 12911 if (!static_key_enabled(&bnxt_xdp_locking_key)) 12912 static_branch_enable(&bnxt_xdp_locking_key); 12913 } else if (static_key_enabled(&bnxt_xdp_locking_key)) { 12914 static_branch_disable(&bnxt_xdp_locking_key); 12915 } 12916 set_bit(BNXT_STATE_OPEN, &bp->state); 12917 bnxt_enable_int(bp); 12918 /* Enable TX queues */ 12919 bnxt_tx_enable(bp); 12920 mod_timer(&bp->timer, jiffies + bp->current_interval); 12921 /* Poll link status and check for SFP+ module status */ 12922 mutex_lock(&bp->link_lock); 12923 bnxt_get_port_module_status(bp); 12924 mutex_unlock(&bp->link_lock); 12925 12926 /* VF-reps may need to be re-opened after the PF is re-opened */ 12927 if (BNXT_PF(bp)) 12928 bnxt_vf_reps_open(bp); 12929 bnxt_ptp_init_rtc(bp, true); 12930 bnxt_ptp_cfg_tstamp_filters(bp); 12931 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 12932 bnxt_hwrm_realloc_rss_ctx_vnic(bp); 12933 bnxt_cfg_usr_fltrs(bp); 12934 return 0; 12935 12936 open_err_irq: 12937 bnxt_del_napi(bp); 12938 12939 open_err_free_mem: 12940 bnxt_free_skbs(bp); 12941 bnxt_free_irq(bp); 12942 bnxt_free_mem(bp, true); 12943 return rc; 12944 } 12945 12946 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12947 { 12948 int rc = 0; 12949 12950 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 12951 rc = -EIO; 12952 if (!rc) 12953 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 12954 if (rc) { 12955 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 12956 netif_close(bp->dev); 12957 } 12958 return rc; 12959 } 12960 12961 /* netdev instance lock held, open the NIC half way by allocating all 12962 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used 12963 * for offline self tests. 12964 */ 12965 int bnxt_half_open_nic(struct bnxt *bp) 12966 { 12967 int rc = 0; 12968 12969 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12970 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); 12971 rc = -ENODEV; 12972 goto half_open_err; 12973 } 12974 12975 rc = bnxt_alloc_mem(bp, true); 12976 if (rc) { 12977 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 12978 goto half_open_err; 12979 } 12980 bnxt_init_napi(bp); 12981 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12982 rc = bnxt_init_nic(bp, true); 12983 if (rc) { 12984 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12985 bnxt_del_napi(bp); 12986 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 12987 goto half_open_err; 12988 } 12989 return 0; 12990 12991 half_open_err: 12992 bnxt_free_skbs(bp); 12993 bnxt_free_mem(bp, true); 12994 netif_close(bp->dev); 12995 return rc; 12996 } 12997 12998 /* netdev instance lock held, this call can only be made after a previous 12999 * successful call to bnxt_half_open_nic(). 13000 */ 13001 void bnxt_half_close_nic(struct bnxt *bp) 13002 { 13003 bnxt_hwrm_resource_free(bp, false, true); 13004 bnxt_del_napi(bp); 13005 bnxt_free_skbs(bp); 13006 bnxt_free_mem(bp, true); 13007 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 13008 } 13009 13010 void bnxt_reenable_sriov(struct bnxt *bp) 13011 { 13012 if (BNXT_PF(bp)) { 13013 struct bnxt_pf_info *pf = &bp->pf; 13014 int n = pf->active_vfs; 13015 13016 if (n) 13017 bnxt_cfg_hw_sriov(bp, &n, true); 13018 } 13019 } 13020 13021 static int bnxt_open(struct net_device *dev) 13022 { 13023 struct bnxt *bp = netdev_priv(dev); 13024 int rc; 13025 13026 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 13027 rc = bnxt_reinit_after_abort(bp); 13028 if (rc) { 13029 if (rc == -EBUSY) 13030 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); 13031 else 13032 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); 13033 return -ENODEV; 13034 } 13035 } 13036 13037 rc = bnxt_hwrm_if_change(bp, true); 13038 if (rc) 13039 return rc; 13040 13041 rc = __bnxt_open_nic(bp, true, true); 13042 if (rc) { 13043 bnxt_hwrm_if_change(bp, false); 13044 } else { 13045 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 13046 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 13047 bnxt_queue_sp_work(bp, 13048 BNXT_RESTART_ULP_SP_EVENT); 13049 } 13050 } 13051 13052 return rc; 13053 } 13054 13055 static bool bnxt_drv_busy(struct bnxt *bp) 13056 { 13057 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 13058 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 13059 } 13060 13061 static void bnxt_get_ring_stats(struct bnxt *bp, 13062 struct rtnl_link_stats64 *stats); 13063 13064 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 13065 bool link_re_init) 13066 { 13067 /* Close the VF-reps before closing PF */ 13068 if (BNXT_PF(bp)) 13069 bnxt_vf_reps_close(bp); 13070 13071 /* Change device state to avoid TX queue wake up's */ 13072 bnxt_tx_disable(bp); 13073 13074 clear_bit(BNXT_STATE_OPEN, &bp->state); 13075 smp_mb__after_atomic(); 13076 while (bnxt_drv_busy(bp)) 13077 msleep(20); 13078 13079 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 13080 bnxt_clear_rss_ctxs(bp); 13081 /* Flush rings and disable interrupts */ 13082 bnxt_shutdown_nic(bp, irq_re_init); 13083 13084 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 13085 13086 bnxt_debug_dev_exit(bp); 13087 bnxt_disable_napi(bp); 13088 timer_delete_sync(&bp->timer); 13089 bnxt_free_skbs(bp); 13090 13091 /* Save ring stats before shutdown */ 13092 if (bp->bnapi && irq_re_init) { 13093 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 13094 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); 13095 } 13096 if (irq_re_init) { 13097 bnxt_free_irq(bp); 13098 bnxt_del_napi(bp); 13099 } 13100 bnxt_free_mem(bp, irq_re_init); 13101 } 13102 13103 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 13104 { 13105 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 13106 /* If we get here, it means firmware reset is in progress 13107 * while we are trying to close. We can safely proceed with 13108 * the close because we are holding netdev instance lock. 13109 * Some firmware messages may fail as we proceed to close. 13110 * We set the ABORT_ERR flag here so that the FW reset thread 13111 * will later abort when it gets the netdev instance lock 13112 * and sees the flag. 13113 */ 13114 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 13115 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 13116 } 13117 13118 #ifdef CONFIG_BNXT_SRIOV 13119 if (bp->sriov_cfg) { 13120 int rc; 13121 13122 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 13123 !bp->sriov_cfg, 13124 BNXT_SRIOV_CFG_WAIT_TMO); 13125 if (!rc) 13126 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); 13127 else if (rc < 0) 13128 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); 13129 } 13130 #endif 13131 __bnxt_close_nic(bp, irq_re_init, link_re_init); 13132 } 13133 13134 static int bnxt_close(struct net_device *dev) 13135 { 13136 struct bnxt *bp = netdev_priv(dev); 13137 13138 bnxt_close_nic(bp, true, true); 13139 bnxt_hwrm_shutdown_link(bp); 13140 bnxt_hwrm_if_change(bp, false); 13141 return 0; 13142 } 13143 13144 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 13145 u16 *val) 13146 { 13147 struct hwrm_port_phy_mdio_read_output *resp; 13148 struct hwrm_port_phy_mdio_read_input *req; 13149 int rc; 13150 13151 if (bp->hwrm_spec_code < 0x10a00) 13152 return -EOPNOTSUPP; 13153 13154 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); 13155 if (rc) 13156 return rc; 13157 13158 req->port_id = cpu_to_le16(bp->pf.port_id); 13159 req->phy_addr = phy_addr; 13160 req->reg_addr = cpu_to_le16(reg & 0x1f); 13161 if (mdio_phy_id_is_c45(phy_addr)) { 13162 req->cl45_mdio = 1; 13163 req->phy_addr = mdio_phy_id_prtad(phy_addr); 13164 req->dev_addr = mdio_phy_id_devad(phy_addr); 13165 req->reg_addr = cpu_to_le16(reg); 13166 } 13167 13168 resp = hwrm_req_hold(bp, req); 13169 rc = hwrm_req_send(bp, req); 13170 if (!rc) 13171 *val = le16_to_cpu(resp->reg_data); 13172 hwrm_req_drop(bp, req); 13173 return rc; 13174 } 13175 13176 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 13177 u16 val) 13178 { 13179 struct hwrm_port_phy_mdio_write_input *req; 13180 int rc; 13181 13182 if (bp->hwrm_spec_code < 0x10a00) 13183 return -EOPNOTSUPP; 13184 13185 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); 13186 if (rc) 13187 return rc; 13188 13189 req->port_id = cpu_to_le16(bp->pf.port_id); 13190 req->phy_addr = phy_addr; 13191 req->reg_addr = cpu_to_le16(reg & 0x1f); 13192 if (mdio_phy_id_is_c45(phy_addr)) { 13193 req->cl45_mdio = 1; 13194 req->phy_addr = mdio_phy_id_prtad(phy_addr); 13195 req->dev_addr = mdio_phy_id_devad(phy_addr); 13196 req->reg_addr = cpu_to_le16(reg); 13197 } 13198 req->reg_data = cpu_to_le16(val); 13199 13200 return hwrm_req_send(bp, req); 13201 } 13202 13203 /* netdev instance lock held */ 13204 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13205 { 13206 struct mii_ioctl_data *mdio = if_mii(ifr); 13207 struct bnxt *bp = netdev_priv(dev); 13208 int rc; 13209 13210 switch (cmd) { 13211 case SIOCGMIIPHY: 13212 mdio->phy_id = bp->link_info.phy_addr; 13213 13214 fallthrough; 13215 case SIOCGMIIREG: { 13216 u16 mii_regval = 0; 13217 13218 if (!netif_running(dev)) 13219 return -EAGAIN; 13220 13221 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 13222 &mii_regval); 13223 mdio->val_out = mii_regval; 13224 return rc; 13225 } 13226 13227 case SIOCSMIIREG: 13228 if (!netif_running(dev)) 13229 return -EAGAIN; 13230 13231 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 13232 mdio->val_in); 13233 13234 case SIOCSHWTSTAMP: 13235 return bnxt_hwtstamp_set(dev, ifr); 13236 13237 case SIOCGHWTSTAMP: 13238 return bnxt_hwtstamp_get(dev, ifr); 13239 13240 default: 13241 /* do nothing */ 13242 break; 13243 } 13244 return -EOPNOTSUPP; 13245 } 13246 13247 static void bnxt_get_ring_stats(struct bnxt *bp, 13248 struct rtnl_link_stats64 *stats) 13249 { 13250 int i; 13251 13252 for (i = 0; i < bp->cp_nr_rings; i++) { 13253 struct bnxt_napi *bnapi = bp->bnapi[i]; 13254 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 13255 u64 *sw = cpr->stats.sw_stats; 13256 13257 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 13258 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 13259 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 13260 13261 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 13262 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 13263 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 13264 13265 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 13266 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 13267 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 13268 13269 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 13270 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 13271 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 13272 13273 stats->rx_missed_errors += 13274 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 13275 13276 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 13277 13278 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 13279 13280 stats->rx_dropped += 13281 cpr->sw_stats->rx.rx_netpoll_discards + 13282 cpr->sw_stats->rx.rx_oom_discards; 13283 } 13284 } 13285 13286 static void bnxt_add_prev_stats(struct bnxt *bp, 13287 struct rtnl_link_stats64 *stats) 13288 { 13289 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 13290 13291 stats->rx_packets += prev_stats->rx_packets; 13292 stats->tx_packets += prev_stats->tx_packets; 13293 stats->rx_bytes += prev_stats->rx_bytes; 13294 stats->tx_bytes += prev_stats->tx_bytes; 13295 stats->rx_missed_errors += prev_stats->rx_missed_errors; 13296 stats->multicast += prev_stats->multicast; 13297 stats->rx_dropped += prev_stats->rx_dropped; 13298 stats->tx_dropped += prev_stats->tx_dropped; 13299 } 13300 13301 static void 13302 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 13303 { 13304 struct bnxt *bp = netdev_priv(dev); 13305 13306 set_bit(BNXT_STATE_READ_STATS, &bp->state); 13307 /* Make sure bnxt_close_nic() sees that we are reading stats before 13308 * we check the BNXT_STATE_OPEN flag. 13309 */ 13310 smp_mb__after_atomic(); 13311 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13312 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 13313 *stats = bp->net_stats_prev; 13314 return; 13315 } 13316 13317 bnxt_get_ring_stats(bp, stats); 13318 bnxt_add_prev_stats(bp, stats); 13319 13320 if (bp->flags & BNXT_FLAG_PORT_STATS) { 13321 u64 *rx = bp->port_stats.sw_stats; 13322 u64 *tx = bp->port_stats.sw_stats + 13323 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 13324 13325 stats->rx_crc_errors = 13326 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 13327 stats->rx_frame_errors = 13328 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 13329 stats->rx_length_errors = 13330 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 13331 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 13332 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 13333 stats->rx_errors = 13334 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 13335 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 13336 stats->collisions = 13337 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 13338 stats->tx_fifo_errors = 13339 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 13340 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 13341 } 13342 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 13343 } 13344 13345 static void bnxt_get_one_ring_err_stats(struct bnxt *bp, 13346 struct bnxt_total_ring_err_stats *stats, 13347 struct bnxt_cp_ring_info *cpr) 13348 { 13349 struct bnxt_sw_stats *sw_stats = cpr->sw_stats; 13350 u64 *hw_stats = cpr->stats.sw_stats; 13351 13352 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; 13353 stats->rx_total_resets += sw_stats->rx.rx_resets; 13354 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; 13355 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; 13356 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; 13357 stats->rx_total_ring_discards += 13358 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); 13359 stats->tx_total_resets += sw_stats->tx.tx_resets; 13360 stats->tx_total_ring_discards += 13361 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); 13362 stats->total_missed_irqs += sw_stats->cmn.missed_irqs; 13363 } 13364 13365 void bnxt_get_ring_err_stats(struct bnxt *bp, 13366 struct bnxt_total_ring_err_stats *stats) 13367 { 13368 int i; 13369 13370 for (i = 0; i < bp->cp_nr_rings; i++) 13371 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); 13372 } 13373 13374 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 13375 { 13376 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13377 struct net_device *dev = bp->dev; 13378 struct netdev_hw_addr *ha; 13379 u8 *haddr; 13380 int mc_count = 0; 13381 bool update = false; 13382 int off = 0; 13383 13384 netdev_for_each_mc_addr(ha, dev) { 13385 if (mc_count >= BNXT_MAX_MC_ADDRS) { 13386 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 13387 vnic->mc_list_count = 0; 13388 return false; 13389 } 13390 haddr = ha->addr; 13391 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 13392 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 13393 update = true; 13394 } 13395 off += ETH_ALEN; 13396 mc_count++; 13397 } 13398 if (mc_count) 13399 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 13400 13401 if (mc_count != vnic->mc_list_count) { 13402 vnic->mc_list_count = mc_count; 13403 update = true; 13404 } 13405 return update; 13406 } 13407 13408 static bool bnxt_uc_list_updated(struct bnxt *bp) 13409 { 13410 struct net_device *dev = bp->dev; 13411 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13412 struct netdev_hw_addr *ha; 13413 int off = 0; 13414 13415 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 13416 return true; 13417 13418 netdev_for_each_uc_addr(ha, dev) { 13419 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 13420 return true; 13421 13422 off += ETH_ALEN; 13423 } 13424 return false; 13425 } 13426 13427 static void bnxt_set_rx_mode(struct net_device *dev) 13428 { 13429 struct bnxt *bp = netdev_priv(dev); 13430 struct bnxt_vnic_info *vnic; 13431 bool mc_update = false; 13432 bool uc_update; 13433 u32 mask; 13434 13435 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 13436 return; 13437 13438 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13439 mask = vnic->rx_mask; 13440 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 13441 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 13442 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 13443 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 13444 13445 if (dev->flags & IFF_PROMISC) 13446 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 13447 13448 uc_update = bnxt_uc_list_updated(bp); 13449 13450 if (dev->flags & IFF_BROADCAST) 13451 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 13452 if (dev->flags & IFF_ALLMULTI) { 13453 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 13454 vnic->mc_list_count = 0; 13455 } else if (dev->flags & IFF_MULTICAST) { 13456 mc_update = bnxt_mc_list_updated(bp, &mask); 13457 } 13458 13459 if (mask != vnic->rx_mask || uc_update || mc_update) { 13460 vnic->rx_mask = mask; 13461 13462 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 13463 } 13464 } 13465 13466 static int bnxt_cfg_rx_mode(struct bnxt *bp) 13467 { 13468 struct net_device *dev = bp->dev; 13469 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13470 struct netdev_hw_addr *ha; 13471 int i, off = 0, rc; 13472 bool uc_update; 13473 13474 netif_addr_lock_bh(dev); 13475 uc_update = bnxt_uc_list_updated(bp); 13476 netif_addr_unlock_bh(dev); 13477 13478 if (!uc_update) 13479 goto skip_uc; 13480 13481 for (i = 1; i < vnic->uc_filter_count; i++) { 13482 struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; 13483 13484 bnxt_hwrm_l2_filter_free(bp, fltr); 13485 bnxt_del_l2_filter(bp, fltr); 13486 } 13487 13488 vnic->uc_filter_count = 1; 13489 13490 netif_addr_lock_bh(dev); 13491 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 13492 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 13493 } else { 13494 netdev_for_each_uc_addr(ha, dev) { 13495 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 13496 off += ETH_ALEN; 13497 vnic->uc_filter_count++; 13498 } 13499 } 13500 netif_addr_unlock_bh(dev); 13501 13502 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 13503 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 13504 if (rc) { 13505 if (BNXT_VF(bp) && rc == -ENODEV) { 13506 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13507 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); 13508 else 13509 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); 13510 rc = 0; 13511 } else { 13512 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 13513 } 13514 vnic->uc_filter_count = i; 13515 return rc; 13516 } 13517 } 13518 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13519 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); 13520 13521 skip_uc: 13522 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && 13523 !bnxt_promisc_ok(bp)) 13524 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 13525 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 13526 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { 13527 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 13528 rc); 13529 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 13530 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 13531 vnic->mc_list_count = 0; 13532 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 13533 } 13534 if (rc) 13535 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 13536 rc); 13537 13538 return rc; 13539 } 13540 13541 static bool bnxt_can_reserve_rings(struct bnxt *bp) 13542 { 13543 #ifdef CONFIG_BNXT_SRIOV 13544 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 13545 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 13546 13547 /* No minimum rings were provisioned by the PF. Don't 13548 * reserve rings by default when device is down. 13549 */ 13550 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 13551 return true; 13552 13553 if (!netif_running(bp->dev)) 13554 return false; 13555 } 13556 #endif 13557 return true; 13558 } 13559 13560 /* If the chip and firmware supports RFS */ 13561 static bool bnxt_rfs_supported(struct bnxt *bp) 13562 { 13563 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 13564 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 13565 return true; 13566 return false; 13567 } 13568 /* 212 firmware is broken for aRFS */ 13569 if (BNXT_FW_MAJ(bp) == 212) 13570 return false; 13571 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 13572 return true; 13573 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 13574 return true; 13575 return false; 13576 } 13577 13578 /* If runtime conditions support RFS */ 13579 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) 13580 { 13581 struct bnxt_hw_rings hwr = {0}; 13582 int max_vnics, max_rss_ctxs; 13583 13584 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 13585 !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 13586 return bnxt_rfs_supported(bp); 13587 13588 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 13589 return false; 13590 13591 hwr.grp = bp->rx_nr_rings; 13592 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); 13593 if (new_rss_ctx) 13594 hwr.vnic++; 13595 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 13596 max_vnics = bnxt_get_max_func_vnics(bp); 13597 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 13598 13599 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { 13600 if (bp->rx_nr_rings > 1) 13601 netdev_warn(bp->dev, 13602 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 13603 min(max_rss_ctxs - 1, max_vnics - 1)); 13604 return false; 13605 } 13606 13607 if (!BNXT_NEW_RM(bp)) 13608 return true; 13609 13610 /* Do not reduce VNIC and RSS ctx reservations. There is a FW 13611 * issue that will mess up the default VNIC if we reduce the 13612 * reservations. 13613 */ 13614 if (hwr.vnic <= bp->hw_resc.resv_vnics && 13615 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 13616 return true; 13617 13618 bnxt_hwrm_reserve_rings(bp, &hwr); 13619 if (hwr.vnic <= bp->hw_resc.resv_vnics && 13620 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 13621 return true; 13622 13623 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 13624 hwr.vnic = 1; 13625 hwr.rss_ctx = 0; 13626 bnxt_hwrm_reserve_rings(bp, &hwr); 13627 return false; 13628 } 13629 13630 static netdev_features_t bnxt_fix_features(struct net_device *dev, 13631 netdev_features_t features) 13632 { 13633 struct bnxt *bp = netdev_priv(dev); 13634 netdev_features_t vlan_features; 13635 13636 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) 13637 features &= ~NETIF_F_NTUPLE; 13638 13639 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) 13640 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 13641 13642 if (!(features & NETIF_F_GRO)) 13643 features &= ~NETIF_F_GRO_HW; 13644 13645 if (features & NETIF_F_GRO_HW) 13646 features &= ~NETIF_F_LRO; 13647 13648 /* Both CTAG and STAG VLAN acceleration on the RX side have to be 13649 * turned on or off together. 13650 */ 13651 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 13652 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 13653 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 13654 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 13655 else if (vlan_features) 13656 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 13657 } 13658 #ifdef CONFIG_BNXT_SRIOV 13659 if (BNXT_VF(bp) && bp->vf.vlan) 13660 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 13661 #endif 13662 return features; 13663 } 13664 13665 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, 13666 bool link_re_init, u32 flags, bool update_tpa) 13667 { 13668 bnxt_close_nic(bp, irq_re_init, link_re_init); 13669 bp->flags = flags; 13670 if (update_tpa) 13671 bnxt_set_ring_params(bp); 13672 return bnxt_open_nic(bp, irq_re_init, link_re_init); 13673 } 13674 13675 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 13676 { 13677 bool update_tpa = false, update_ntuple = false; 13678 struct bnxt *bp = netdev_priv(dev); 13679 u32 flags = bp->flags; 13680 u32 changes; 13681 int rc = 0; 13682 bool re_init = false; 13683 13684 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 13685 if (features & NETIF_F_GRO_HW) 13686 flags |= BNXT_FLAG_GRO; 13687 else if (features & NETIF_F_LRO) 13688 flags |= BNXT_FLAG_LRO; 13689 13690 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 13691 flags &= ~BNXT_FLAG_TPA; 13692 13693 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 13694 flags |= BNXT_FLAG_STRIP_VLAN; 13695 13696 if (features & NETIF_F_NTUPLE) 13697 flags |= BNXT_FLAG_RFS; 13698 else 13699 bnxt_clear_usr_fltrs(bp, true); 13700 13701 changes = flags ^ bp->flags; 13702 if (changes & BNXT_FLAG_TPA) { 13703 update_tpa = true; 13704 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 13705 (flags & BNXT_FLAG_TPA) == 0 || 13706 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 13707 re_init = true; 13708 } 13709 13710 if (changes & ~BNXT_FLAG_TPA) 13711 re_init = true; 13712 13713 if (changes & BNXT_FLAG_RFS) 13714 update_ntuple = true; 13715 13716 if (flags != bp->flags) { 13717 u32 old_flags = bp->flags; 13718 13719 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13720 bp->flags = flags; 13721 if (update_tpa) 13722 bnxt_set_ring_params(bp); 13723 return rc; 13724 } 13725 13726 if (update_ntuple) 13727 return bnxt_reinit_features(bp, true, false, flags, update_tpa); 13728 13729 if (re_init) 13730 return bnxt_reinit_features(bp, false, false, flags, update_tpa); 13731 13732 if (update_tpa) { 13733 bp->flags = flags; 13734 rc = bnxt_set_tpa(bp, 13735 (flags & BNXT_FLAG_TPA) ? 13736 true : false); 13737 if (rc) 13738 bp->flags = old_flags; 13739 } 13740 } 13741 return rc; 13742 } 13743 13744 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, 13745 u8 **nextp) 13746 { 13747 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); 13748 struct hop_jumbo_hdr *jhdr; 13749 int hdr_count = 0; 13750 u8 *nexthdr; 13751 int start; 13752 13753 /* Check that there are at most 2 IPv6 extension headers, no 13754 * fragment header, and each is <= 64 bytes. 13755 */ 13756 start = nw_off + sizeof(*ip6h); 13757 nexthdr = &ip6h->nexthdr; 13758 while (ipv6_ext_hdr(*nexthdr)) { 13759 struct ipv6_opt_hdr *hp; 13760 int hdrlen; 13761 13762 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || 13763 *nexthdr == NEXTHDR_FRAGMENT) 13764 return false; 13765 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, 13766 skb_headlen(skb), NULL); 13767 if (!hp) 13768 return false; 13769 if (*nexthdr == NEXTHDR_AUTH) 13770 hdrlen = ipv6_authlen(hp); 13771 else 13772 hdrlen = ipv6_optlen(hp); 13773 13774 if (hdrlen > 64) 13775 return false; 13776 13777 /* The ext header may be a hop-by-hop header inserted for 13778 * big TCP purposes. This will be removed before sending 13779 * from NIC, so do not count it. 13780 */ 13781 if (*nexthdr == NEXTHDR_HOP) { 13782 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) 13783 goto increment_hdr; 13784 13785 jhdr = (struct hop_jumbo_hdr *)hp; 13786 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || 13787 jhdr->nexthdr != IPPROTO_TCP) 13788 goto increment_hdr; 13789 13790 goto next_hdr; 13791 } 13792 increment_hdr: 13793 hdr_count++; 13794 next_hdr: 13795 nexthdr = &hp->nexthdr; 13796 start += hdrlen; 13797 } 13798 if (nextp) { 13799 /* Caller will check inner protocol */ 13800 if (skb->encapsulation) { 13801 *nextp = nexthdr; 13802 return true; 13803 } 13804 *nextp = NULL; 13805 } 13806 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ 13807 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; 13808 } 13809 13810 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ 13811 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) 13812 { 13813 struct udphdr *uh = udp_hdr(skb); 13814 __be16 udp_port = uh->dest; 13815 13816 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && 13817 udp_port != bp->vxlan_gpe_port) 13818 return false; 13819 if (skb->inner_protocol == htons(ETH_P_TEB)) { 13820 struct ethhdr *eh = inner_eth_hdr(skb); 13821 13822 switch (eh->h_proto) { 13823 case htons(ETH_P_IP): 13824 return true; 13825 case htons(ETH_P_IPV6): 13826 return bnxt_exthdr_check(bp, skb, 13827 skb_inner_network_offset(skb), 13828 NULL); 13829 } 13830 } else if (skb->inner_protocol == htons(ETH_P_IP)) { 13831 return true; 13832 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) { 13833 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 13834 NULL); 13835 } 13836 return false; 13837 } 13838 13839 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) 13840 { 13841 switch (l4_proto) { 13842 case IPPROTO_UDP: 13843 return bnxt_udp_tunl_check(bp, skb); 13844 case IPPROTO_IPIP: 13845 return true; 13846 case IPPROTO_GRE: { 13847 switch (skb->inner_protocol) { 13848 default: 13849 return false; 13850 case htons(ETH_P_IP): 13851 return true; 13852 case htons(ETH_P_IPV6): 13853 fallthrough; 13854 } 13855 } 13856 case IPPROTO_IPV6: 13857 /* Check ext headers of inner ipv6 */ 13858 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 13859 NULL); 13860 } 13861 return false; 13862 } 13863 13864 static netdev_features_t bnxt_features_check(struct sk_buff *skb, 13865 struct net_device *dev, 13866 netdev_features_t features) 13867 { 13868 struct bnxt *bp = netdev_priv(dev); 13869 u8 *l4_proto; 13870 13871 features = vlan_features_check(skb, features); 13872 switch (vlan_get_protocol(skb)) { 13873 case htons(ETH_P_IP): 13874 if (!skb->encapsulation) 13875 return features; 13876 l4_proto = &ip_hdr(skb)->protocol; 13877 if (bnxt_tunl_check(bp, skb, *l4_proto)) 13878 return features; 13879 break; 13880 case htons(ETH_P_IPV6): 13881 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), 13882 &l4_proto)) 13883 break; 13884 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) 13885 return features; 13886 break; 13887 } 13888 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 13889 } 13890 13891 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 13892 u32 *reg_buf) 13893 { 13894 struct hwrm_dbg_read_direct_output *resp; 13895 struct hwrm_dbg_read_direct_input *req; 13896 __le32 *dbg_reg_buf; 13897 dma_addr_t mapping; 13898 int rc, i; 13899 13900 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); 13901 if (rc) 13902 return rc; 13903 13904 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, 13905 &mapping); 13906 if (!dbg_reg_buf) { 13907 rc = -ENOMEM; 13908 goto dbg_rd_reg_exit; 13909 } 13910 13911 req->host_dest_addr = cpu_to_le64(mapping); 13912 13913 resp = hwrm_req_hold(bp, req); 13914 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 13915 req->read_len32 = cpu_to_le32(num_words); 13916 13917 rc = hwrm_req_send(bp, req); 13918 if (rc || resp->error_code) { 13919 rc = -EIO; 13920 goto dbg_rd_reg_exit; 13921 } 13922 for (i = 0; i < num_words; i++) 13923 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 13924 13925 dbg_rd_reg_exit: 13926 hwrm_req_drop(bp, req); 13927 return rc; 13928 } 13929 13930 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 13931 u32 ring_id, u32 *prod, u32 *cons) 13932 { 13933 struct hwrm_dbg_ring_info_get_output *resp; 13934 struct hwrm_dbg_ring_info_get_input *req; 13935 int rc; 13936 13937 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); 13938 if (rc) 13939 return rc; 13940 13941 req->ring_type = ring_type; 13942 req->fw_ring_id = cpu_to_le32(ring_id); 13943 resp = hwrm_req_hold(bp, req); 13944 rc = hwrm_req_send(bp, req); 13945 if (!rc) { 13946 *prod = le32_to_cpu(resp->producer_index); 13947 *cons = le32_to_cpu(resp->consumer_index); 13948 } 13949 hwrm_req_drop(bp, req); 13950 return rc; 13951 } 13952 13953 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 13954 { 13955 struct bnxt_tx_ring_info *txr; 13956 int i = bnapi->index, j; 13957 13958 bnxt_for_each_napi_tx(j, bnapi, txr) 13959 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 13960 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 13961 txr->tx_cons); 13962 } 13963 13964 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 13965 { 13966 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 13967 int i = bnapi->index; 13968 13969 if (!rxr) 13970 return; 13971 13972 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 13973 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 13974 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 13975 rxr->rx_sw_agg_prod); 13976 } 13977 13978 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 13979 { 13980 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 13981 int i = bnapi->index; 13982 13983 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 13984 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 13985 } 13986 13987 static void bnxt_dbg_dump_states(struct bnxt *bp) 13988 { 13989 int i; 13990 struct bnxt_napi *bnapi; 13991 13992 for (i = 0; i < bp->cp_nr_rings; i++) { 13993 bnapi = bp->bnapi[i]; 13994 if (netif_msg_drv(bp)) { 13995 bnxt_dump_tx_sw_state(bnapi); 13996 bnxt_dump_rx_sw_state(bnapi); 13997 bnxt_dump_cp_sw_state(bnapi); 13998 } 13999 } 14000 } 14001 14002 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 14003 { 14004 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 14005 struct hwrm_ring_reset_input *req; 14006 struct bnxt_napi *bnapi = rxr->bnapi; 14007 struct bnxt_cp_ring_info *cpr; 14008 u16 cp_ring_id; 14009 int rc; 14010 14011 rc = hwrm_req_init(bp, req, HWRM_RING_RESET); 14012 if (rc) 14013 return rc; 14014 14015 cpr = &bnapi->cp_ring; 14016 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 14017 req->cmpl_ring = cpu_to_le16(cp_ring_id); 14018 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 14019 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 14020 return hwrm_req_send_silent(bp, req); 14021 } 14022 14023 static void bnxt_reset_task(struct bnxt *bp, bool silent) 14024 { 14025 if (!silent) 14026 bnxt_dbg_dump_states(bp); 14027 if (netif_running(bp->dev)) { 14028 bnxt_close_nic(bp, !silent, false); 14029 bnxt_open_nic(bp, !silent, false); 14030 } 14031 } 14032 14033 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 14034 { 14035 struct bnxt *bp = netdev_priv(dev); 14036 14037 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 14038 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 14039 } 14040 14041 static void bnxt_fw_health_check(struct bnxt *bp) 14042 { 14043 struct bnxt_fw_health *fw_health = bp->fw_health; 14044 struct pci_dev *pdev = bp->pdev; 14045 u32 val; 14046 14047 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 14048 return; 14049 14050 /* Make sure it is enabled before checking the tmr_counter. */ 14051 smp_rmb(); 14052 if (fw_health->tmr_counter) { 14053 fw_health->tmr_counter--; 14054 return; 14055 } 14056 14057 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 14058 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { 14059 fw_health->arrests++; 14060 goto fw_reset; 14061 } 14062 14063 fw_health->last_fw_heartbeat = val; 14064 14065 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 14066 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { 14067 fw_health->discoveries++; 14068 goto fw_reset; 14069 } 14070 14071 fw_health->tmr_counter = fw_health->tmr_multiplier; 14072 return; 14073 14074 fw_reset: 14075 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); 14076 } 14077 14078 static void bnxt_timer(struct timer_list *t) 14079 { 14080 struct bnxt *bp = timer_container_of(bp, t, timer); 14081 struct net_device *dev = bp->dev; 14082 14083 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 14084 return; 14085 14086 if (atomic_read(&bp->intr_sem) != 0) 14087 goto bnxt_restart_timer; 14088 14089 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 14090 bnxt_fw_health_check(bp); 14091 14092 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) 14093 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); 14094 14095 if (bnxt_tc_flower_enabled(bp)) 14096 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); 14097 14098 #ifdef CONFIG_RFS_ACCEL 14099 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) 14100 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 14101 #endif /*CONFIG_RFS_ACCEL*/ 14102 14103 if (bp->link_info.phy_retry) { 14104 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 14105 bp->link_info.phy_retry = false; 14106 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 14107 } else { 14108 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); 14109 } 14110 } 14111 14112 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 14113 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 14114 14115 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) 14116 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); 14117 14118 bnxt_restart_timer: 14119 mod_timer(&bp->timer, jiffies + bp->current_interval); 14120 } 14121 14122 static void bnxt_lock_sp(struct bnxt *bp) 14123 { 14124 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 14125 * set. If the device is being closed, bnxt_close() may be holding 14126 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear. 14127 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev 14128 * instance lock. 14129 */ 14130 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14131 netdev_lock(bp->dev); 14132 } 14133 14134 static void bnxt_unlock_sp(struct bnxt *bp) 14135 { 14136 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14137 netdev_unlock(bp->dev); 14138 } 14139 14140 /* Only called from bnxt_sp_task() */ 14141 static void bnxt_reset(struct bnxt *bp, bool silent) 14142 { 14143 bnxt_lock_sp(bp); 14144 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 14145 bnxt_reset_task(bp, silent); 14146 bnxt_unlock_sp(bp); 14147 } 14148 14149 /* Only called from bnxt_sp_task() */ 14150 static void bnxt_rx_ring_reset(struct bnxt *bp) 14151 { 14152 int i; 14153 14154 bnxt_lock_sp(bp); 14155 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 14156 bnxt_unlock_sp(bp); 14157 return; 14158 } 14159 /* Disable and flush TPA before resetting the RX ring */ 14160 if (bp->flags & BNXT_FLAG_TPA) 14161 bnxt_set_tpa(bp, false); 14162 for (i = 0; i < bp->rx_nr_rings; i++) { 14163 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 14164 struct bnxt_cp_ring_info *cpr; 14165 int rc; 14166 14167 if (!rxr->bnapi->in_reset) 14168 continue; 14169 14170 rc = bnxt_hwrm_rx_ring_reset(bp, i); 14171 if (rc) { 14172 if (rc == -EINVAL || rc == -EOPNOTSUPP) 14173 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 14174 else 14175 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 14176 rc); 14177 bnxt_reset_task(bp, true); 14178 break; 14179 } 14180 bnxt_free_one_rx_ring_skbs(bp, rxr); 14181 rxr->rx_prod = 0; 14182 rxr->rx_agg_prod = 0; 14183 rxr->rx_sw_agg_prod = 0; 14184 rxr->rx_next_cons = 0; 14185 rxr->bnapi->in_reset = false; 14186 bnxt_alloc_one_rx_ring(bp, i); 14187 cpr = &rxr->bnapi->cp_ring; 14188 cpr->sw_stats->rx.rx_resets++; 14189 if (bp->flags & BNXT_FLAG_AGG_RINGS) 14190 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 14191 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 14192 } 14193 if (bp->flags & BNXT_FLAG_TPA) 14194 bnxt_set_tpa(bp, true); 14195 bnxt_unlock_sp(bp); 14196 } 14197 14198 static void bnxt_fw_fatal_close(struct bnxt *bp) 14199 { 14200 bnxt_tx_disable(bp); 14201 bnxt_disable_napi(bp); 14202 bnxt_disable_int_sync(bp); 14203 bnxt_free_irq(bp); 14204 bnxt_clear_int_mode(bp); 14205 pci_disable_device(bp->pdev); 14206 } 14207 14208 static void bnxt_fw_reset_close(struct bnxt *bp) 14209 { 14210 /* When firmware is in fatal state, quiesce device and disable 14211 * bus master to prevent any potential bad DMAs before freeing 14212 * kernel memory. 14213 */ 14214 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 14215 u16 val = 0; 14216 14217 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 14218 if (val == 0xffff) 14219 bp->fw_reset_min_dsecs = 0; 14220 bnxt_fw_fatal_close(bp); 14221 } 14222 __bnxt_close_nic(bp, true, false); 14223 bnxt_vf_reps_free(bp); 14224 bnxt_clear_int_mode(bp); 14225 bnxt_hwrm_func_drv_unrgtr(bp); 14226 if (pci_is_enabled(bp->pdev)) 14227 pci_disable_device(bp->pdev); 14228 bnxt_free_ctx_mem(bp, false); 14229 } 14230 14231 static bool is_bnxt_fw_ok(struct bnxt *bp) 14232 { 14233 struct bnxt_fw_health *fw_health = bp->fw_health; 14234 bool no_heartbeat = false, has_reset = false; 14235 u32 val; 14236 14237 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 14238 if (val == fw_health->last_fw_heartbeat) 14239 no_heartbeat = true; 14240 14241 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 14242 if (val != fw_health->last_fw_reset_cnt) 14243 has_reset = true; 14244 14245 if (!no_heartbeat && has_reset) 14246 return true; 14247 14248 return false; 14249 } 14250 14251 /* netdev instance lock is acquired before calling this function */ 14252 static void bnxt_force_fw_reset(struct bnxt *bp) 14253 { 14254 struct bnxt_fw_health *fw_health = bp->fw_health; 14255 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 14256 u32 wait_dsecs; 14257 14258 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 14259 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 14260 return; 14261 14262 /* we have to serialize with bnxt_refclk_read()*/ 14263 if (ptp) { 14264 unsigned long flags; 14265 14266 write_seqlock_irqsave(&ptp->ptp_lock, flags); 14267 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14268 write_sequnlock_irqrestore(&ptp->ptp_lock, flags); 14269 } else { 14270 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14271 } 14272 bnxt_fw_reset_close(bp); 14273 wait_dsecs = fw_health->master_func_wait_dsecs; 14274 if (fw_health->primary) { 14275 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 14276 wait_dsecs = 0; 14277 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 14278 } else { 14279 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 14280 wait_dsecs = fw_health->normal_func_wait_dsecs; 14281 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14282 } 14283 14284 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 14285 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 14286 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 14287 } 14288 14289 void bnxt_fw_exception(struct bnxt *bp) 14290 { 14291 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 14292 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 14293 bnxt_ulp_stop(bp); 14294 bnxt_lock_sp(bp); 14295 bnxt_force_fw_reset(bp); 14296 bnxt_unlock_sp(bp); 14297 } 14298 14299 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 14300 * < 0 on error. 14301 */ 14302 static int bnxt_get_registered_vfs(struct bnxt *bp) 14303 { 14304 #ifdef CONFIG_BNXT_SRIOV 14305 int rc; 14306 14307 if (!BNXT_PF(bp)) 14308 return 0; 14309 14310 rc = bnxt_hwrm_func_qcfg(bp); 14311 if (rc) { 14312 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 14313 return rc; 14314 } 14315 if (bp->pf.registered_vfs) 14316 return bp->pf.registered_vfs; 14317 if (bp->sriov_cfg) 14318 return 1; 14319 #endif 14320 return 0; 14321 } 14322 14323 void bnxt_fw_reset(struct bnxt *bp) 14324 { 14325 bnxt_ulp_stop(bp); 14326 bnxt_lock_sp(bp); 14327 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 14328 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 14329 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 14330 int n = 0, tmo; 14331 14332 /* we have to serialize with bnxt_refclk_read()*/ 14333 if (ptp) { 14334 unsigned long flags; 14335 14336 write_seqlock_irqsave(&ptp->ptp_lock, flags); 14337 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14338 write_sequnlock_irqrestore(&ptp->ptp_lock, flags); 14339 } else { 14340 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14341 } 14342 if (bp->pf.active_vfs && 14343 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 14344 n = bnxt_get_registered_vfs(bp); 14345 if (n < 0) { 14346 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 14347 n); 14348 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14349 netif_close(bp->dev); 14350 goto fw_reset_exit; 14351 } else if (n > 0) { 14352 u16 vf_tmo_dsecs = n * 10; 14353 14354 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 14355 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 14356 bp->fw_reset_state = 14357 BNXT_FW_RESET_STATE_POLL_VF; 14358 bnxt_queue_fw_reset_work(bp, HZ / 10); 14359 goto fw_reset_exit; 14360 } 14361 bnxt_fw_reset_close(bp); 14362 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14363 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 14364 tmo = HZ / 10; 14365 } else { 14366 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14367 tmo = bp->fw_reset_min_dsecs * HZ / 10; 14368 } 14369 bnxt_queue_fw_reset_work(bp, tmo); 14370 } 14371 fw_reset_exit: 14372 bnxt_unlock_sp(bp); 14373 } 14374 14375 static void bnxt_chk_missed_irq(struct bnxt *bp) 14376 { 14377 int i; 14378 14379 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 14380 return; 14381 14382 for (i = 0; i < bp->cp_nr_rings; i++) { 14383 struct bnxt_napi *bnapi = bp->bnapi[i]; 14384 struct bnxt_cp_ring_info *cpr; 14385 u32 fw_ring_id; 14386 int j; 14387 14388 if (!bnapi) 14389 continue; 14390 14391 cpr = &bnapi->cp_ring; 14392 for (j = 0; j < cpr->cp_ring_count; j++) { 14393 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 14394 u32 val[2]; 14395 14396 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) 14397 continue; 14398 14399 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 14400 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 14401 continue; 14402 } 14403 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 14404 bnxt_dbg_hwrm_ring_info_get(bp, 14405 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 14406 fw_ring_id, &val[0], &val[1]); 14407 cpr->sw_stats->cmn.missed_irqs++; 14408 } 14409 } 14410 } 14411 14412 static void bnxt_cfg_ntp_filters(struct bnxt *); 14413 14414 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 14415 { 14416 struct bnxt_link_info *link_info = &bp->link_info; 14417 14418 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 14419 link_info->autoneg = BNXT_AUTONEG_SPEED; 14420 if (bp->hwrm_spec_code >= 0x10201) { 14421 if (link_info->auto_pause_setting & 14422 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 14423 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 14424 } else { 14425 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 14426 } 14427 bnxt_set_auto_speed(link_info); 14428 } else { 14429 bnxt_set_force_speed(link_info); 14430 link_info->req_duplex = link_info->duplex_setting; 14431 } 14432 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 14433 link_info->req_flow_ctrl = 14434 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 14435 else 14436 link_info->req_flow_ctrl = link_info->force_pause_setting; 14437 } 14438 14439 static void bnxt_fw_echo_reply(struct bnxt *bp) 14440 { 14441 struct bnxt_fw_health *fw_health = bp->fw_health; 14442 struct hwrm_func_echo_response_input *req; 14443 int rc; 14444 14445 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); 14446 if (rc) 14447 return; 14448 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); 14449 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); 14450 hwrm_req_send(bp, req); 14451 } 14452 14453 static void bnxt_ulp_restart(struct bnxt *bp) 14454 { 14455 bnxt_ulp_stop(bp); 14456 bnxt_ulp_start(bp, 0); 14457 } 14458 14459 static void bnxt_sp_task(struct work_struct *work) 14460 { 14461 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 14462 14463 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14464 smp_mb__after_atomic(); 14465 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 14466 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14467 return; 14468 } 14469 14470 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) { 14471 bnxt_ulp_restart(bp); 14472 bnxt_reenable_sriov(bp); 14473 } 14474 14475 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 14476 bnxt_cfg_rx_mode(bp); 14477 14478 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 14479 bnxt_cfg_ntp_filters(bp); 14480 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 14481 bnxt_hwrm_exec_fwd_req(bp); 14482 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 14483 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 14484 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 14485 bnxt_hwrm_port_qstats(bp, 0); 14486 bnxt_hwrm_port_qstats_ext(bp, 0); 14487 bnxt_accumulate_all_stats(bp); 14488 } 14489 14490 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 14491 int rc; 14492 14493 mutex_lock(&bp->link_lock); 14494 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 14495 &bp->sp_event)) 14496 bnxt_hwrm_phy_qcaps(bp); 14497 14498 rc = bnxt_update_link(bp, true); 14499 if (rc) 14500 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 14501 rc); 14502 14503 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 14504 &bp->sp_event)) 14505 bnxt_init_ethtool_link_settings(bp); 14506 mutex_unlock(&bp->link_lock); 14507 } 14508 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 14509 int rc; 14510 14511 mutex_lock(&bp->link_lock); 14512 rc = bnxt_update_phy_setting(bp); 14513 mutex_unlock(&bp->link_lock); 14514 if (rc) { 14515 netdev_warn(bp->dev, "update phy settings retry failed\n"); 14516 } else { 14517 bp->link_info.phy_retry = false; 14518 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 14519 } 14520 } 14521 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 14522 mutex_lock(&bp->link_lock); 14523 bnxt_get_port_module_status(bp); 14524 mutex_unlock(&bp->link_lock); 14525 } 14526 14527 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 14528 bnxt_tc_flow_stats_work(bp); 14529 14530 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 14531 bnxt_chk_missed_irq(bp); 14532 14533 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) 14534 bnxt_fw_echo_reply(bp); 14535 14536 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) 14537 bnxt_hwmon_notify_event(bp); 14538 14539 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 14540 * must be the last functions to be called before exiting. 14541 */ 14542 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 14543 bnxt_reset(bp, false); 14544 14545 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 14546 bnxt_reset(bp, true); 14547 14548 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 14549 bnxt_rx_ring_reset(bp); 14550 14551 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { 14552 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 14553 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 14554 bnxt_devlink_health_fw_report(bp); 14555 else 14556 bnxt_fw_reset(bp); 14557 } 14558 14559 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 14560 if (!is_bnxt_fw_ok(bp)) 14561 bnxt_devlink_health_fw_report(bp); 14562 } 14563 14564 smp_mb__before_atomic(); 14565 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14566 } 14567 14568 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 14569 int *max_cp); 14570 14571 /* Under netdev instance lock */ 14572 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 14573 int tx_xdp) 14574 { 14575 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; 14576 struct bnxt_hw_rings hwr = {0}; 14577 int rx_rings = rx; 14578 int rc; 14579 14580 if (tcs) 14581 tx_sets = tcs; 14582 14583 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); 14584 14585 if (max_rx < rx_rings) 14586 return -ENOMEM; 14587 14588 if (bp->flags & BNXT_FLAG_AGG_RINGS) 14589 rx_rings <<= 1; 14590 14591 hwr.rx = rx_rings; 14592 hwr.tx = tx * tx_sets + tx_xdp; 14593 if (max_tx < hwr.tx) 14594 return -ENOMEM; 14595 14596 hwr.vnic = bnxt_get_total_vnics(bp, rx); 14597 14598 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); 14599 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 14600 if (max_cp < hwr.cp) 14601 return -ENOMEM; 14602 hwr.stat = hwr.cp; 14603 if (BNXT_NEW_RM(bp)) { 14604 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp); 14605 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp); 14606 hwr.grp = rx; 14607 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 14608 } 14609 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 14610 hwr.cp_p5 = hwr.tx + rx; 14611 rc = bnxt_hwrm_check_rings(bp, &hwr); 14612 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) { 14613 if (!bnxt_ulp_registered(bp->edev)) { 14614 hwr.cp += bnxt_get_ulp_msix_num(bp); 14615 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp)); 14616 } 14617 if (hwr.cp > bp->total_irqs) { 14618 int total_msix = bnxt_change_msix(bp, hwr.cp); 14619 14620 if (total_msix < hwr.cp) { 14621 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n", 14622 hwr.cp, total_msix); 14623 rc = -ENOSPC; 14624 } 14625 } 14626 } 14627 return rc; 14628 } 14629 14630 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 14631 { 14632 if (bp->bar2) { 14633 pci_iounmap(pdev, bp->bar2); 14634 bp->bar2 = NULL; 14635 } 14636 14637 if (bp->bar1) { 14638 pci_iounmap(pdev, bp->bar1); 14639 bp->bar1 = NULL; 14640 } 14641 14642 if (bp->bar0) { 14643 pci_iounmap(pdev, bp->bar0); 14644 bp->bar0 = NULL; 14645 } 14646 } 14647 14648 static void bnxt_cleanup_pci(struct bnxt *bp) 14649 { 14650 bnxt_unmap_bars(bp, bp->pdev); 14651 pci_release_regions(bp->pdev); 14652 if (pci_is_enabled(bp->pdev)) 14653 pci_disable_device(bp->pdev); 14654 } 14655 14656 static void bnxt_init_dflt_coal(struct bnxt *bp) 14657 { 14658 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 14659 struct bnxt_coal *coal; 14660 u16 flags = 0; 14661 14662 if (coal_cap->cmpl_params & 14663 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 14664 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 14665 14666 /* Tick values in micro seconds. 14667 * 1 coal_buf x bufs_per_record = 1 completion record. 14668 */ 14669 coal = &bp->rx_coal; 14670 coal->coal_ticks = 10; 14671 coal->coal_bufs = 30; 14672 coal->coal_ticks_irq = 1; 14673 coal->coal_bufs_irq = 2; 14674 coal->idle_thresh = 50; 14675 coal->bufs_per_record = 2; 14676 coal->budget = 64; /* NAPI budget */ 14677 coal->flags = flags; 14678 14679 coal = &bp->tx_coal; 14680 coal->coal_ticks = 28; 14681 coal->coal_bufs = 30; 14682 coal->coal_ticks_irq = 2; 14683 coal->coal_bufs_irq = 2; 14684 coal->bufs_per_record = 1; 14685 coal->flags = flags; 14686 14687 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 14688 } 14689 14690 /* FW that pre-reserves 1 VNIC per function */ 14691 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) 14692 { 14693 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); 14694 14695 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 14696 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) 14697 return true; 14698 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 14699 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) 14700 return true; 14701 return false; 14702 } 14703 14704 static int bnxt_fw_init_one_p1(struct bnxt *bp) 14705 { 14706 int rc; 14707 14708 bp->fw_cap = 0; 14709 rc = bnxt_hwrm_ver_get(bp); 14710 /* FW may be unresponsive after FLR. FLR must complete within 100 msec 14711 * so wait before continuing with recovery. 14712 */ 14713 if (rc) 14714 msleep(100); 14715 bnxt_try_map_fw_health_reg(bp); 14716 if (rc) { 14717 rc = bnxt_try_recover_fw(bp); 14718 if (rc) 14719 return rc; 14720 rc = bnxt_hwrm_ver_get(bp); 14721 if (rc) 14722 return rc; 14723 } 14724 14725 bnxt_nvm_cfg_ver_get(bp); 14726 14727 rc = bnxt_hwrm_func_reset(bp); 14728 if (rc) 14729 return -ENODEV; 14730 14731 bnxt_hwrm_fw_set_time(bp); 14732 return 0; 14733 } 14734 14735 static int bnxt_fw_init_one_p2(struct bnxt *bp) 14736 { 14737 int rc; 14738 14739 /* Get the MAX capabilities for this function */ 14740 rc = bnxt_hwrm_func_qcaps(bp); 14741 if (rc) { 14742 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 14743 rc); 14744 return -ENODEV; 14745 } 14746 14747 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 14748 if (rc) 14749 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 14750 rc); 14751 14752 if (bnxt_alloc_fw_health(bp)) { 14753 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 14754 } else { 14755 rc = bnxt_hwrm_error_recovery_qcfg(bp); 14756 if (rc) 14757 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 14758 rc); 14759 } 14760 14761 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 14762 if (rc) 14763 return -ENODEV; 14764 14765 rc = bnxt_alloc_crash_dump_mem(bp); 14766 if (rc) 14767 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n", 14768 rc); 14769 if (!rc) { 14770 rc = bnxt_hwrm_crash_dump_mem_cfg(bp); 14771 if (rc) { 14772 bnxt_free_crash_dump_mem(bp); 14773 netdev_warn(bp->dev, 14774 "hwrm crash dump mem failure rc: %d\n", rc); 14775 } 14776 } 14777 14778 if (bnxt_fw_pre_resv_vnics(bp)) 14779 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; 14780 14781 bnxt_hwrm_func_qcfg(bp); 14782 bnxt_hwrm_vnic_qcaps(bp); 14783 bnxt_hwrm_port_led_qcaps(bp); 14784 bnxt_ethtool_init(bp); 14785 if (bp->fw_cap & BNXT_FW_CAP_PTP) 14786 __bnxt_hwrm_ptp_qcfg(bp); 14787 bnxt_dcb_init(bp); 14788 bnxt_hwmon_init(bp); 14789 return 0; 14790 } 14791 14792 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 14793 { 14794 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; 14795 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 14796 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 14797 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 14798 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 14799 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 14800 bp->rss_hash_delta = bp->rss_hash_cfg; 14801 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 14802 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; 14803 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 14804 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 14805 } 14806 } 14807 14808 static void bnxt_set_dflt_rfs(struct bnxt *bp) 14809 { 14810 struct net_device *dev = bp->dev; 14811 14812 dev->hw_features &= ~NETIF_F_NTUPLE; 14813 dev->features &= ~NETIF_F_NTUPLE; 14814 bp->flags &= ~BNXT_FLAG_RFS; 14815 if (bnxt_rfs_supported(bp)) { 14816 dev->hw_features |= NETIF_F_NTUPLE; 14817 if (bnxt_rfs_capable(bp, false)) { 14818 bp->flags |= BNXT_FLAG_RFS; 14819 dev->features |= NETIF_F_NTUPLE; 14820 } 14821 } 14822 } 14823 14824 static void bnxt_fw_init_one_p3(struct bnxt *bp) 14825 { 14826 struct pci_dev *pdev = bp->pdev; 14827 14828 bnxt_set_dflt_rss_hash_type(bp); 14829 bnxt_set_dflt_rfs(bp); 14830 14831 bnxt_get_wol_settings(bp); 14832 if (bp->flags & BNXT_FLAG_WOL_CAP) 14833 device_set_wakeup_enable(&pdev->dev, bp->wol); 14834 else 14835 device_set_wakeup_capable(&pdev->dev, false); 14836 14837 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 14838 bnxt_hwrm_coal_params_qcaps(bp); 14839 } 14840 14841 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); 14842 14843 int bnxt_fw_init_one(struct bnxt *bp) 14844 { 14845 int rc; 14846 14847 rc = bnxt_fw_init_one_p1(bp); 14848 if (rc) { 14849 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 14850 return rc; 14851 } 14852 rc = bnxt_fw_init_one_p2(bp); 14853 if (rc) { 14854 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 14855 return rc; 14856 } 14857 rc = bnxt_probe_phy(bp, false); 14858 if (rc) 14859 return rc; 14860 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 14861 if (rc) 14862 return rc; 14863 14864 bnxt_fw_init_one_p3(bp); 14865 return 0; 14866 } 14867 14868 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 14869 { 14870 struct bnxt_fw_health *fw_health = bp->fw_health; 14871 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 14872 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 14873 u32 reg_type, reg_off, delay_msecs; 14874 14875 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 14876 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 14877 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 14878 switch (reg_type) { 14879 case BNXT_FW_HEALTH_REG_TYPE_CFG: 14880 pci_write_config_dword(bp->pdev, reg_off, val); 14881 break; 14882 case BNXT_FW_HEALTH_REG_TYPE_GRC: 14883 writel(reg_off & BNXT_GRC_BASE_MASK, 14884 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 14885 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 14886 fallthrough; 14887 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 14888 writel(val, bp->bar0 + reg_off); 14889 break; 14890 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 14891 writel(val, bp->bar1 + reg_off); 14892 break; 14893 } 14894 if (delay_msecs) { 14895 pci_read_config_dword(bp->pdev, 0, &val); 14896 msleep(delay_msecs); 14897 } 14898 } 14899 14900 bool bnxt_hwrm_reset_permitted(struct bnxt *bp) 14901 { 14902 struct hwrm_func_qcfg_output *resp; 14903 struct hwrm_func_qcfg_input *req; 14904 bool result = true; /* firmware will enforce if unknown */ 14905 14906 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 14907 return result; 14908 14909 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) 14910 return result; 14911 14912 req->fid = cpu_to_le16(0xffff); 14913 resp = hwrm_req_hold(bp, req); 14914 if (!hwrm_req_send(bp, req)) 14915 result = !!(le16_to_cpu(resp->flags) & 14916 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); 14917 hwrm_req_drop(bp, req); 14918 return result; 14919 } 14920 14921 static void bnxt_reset_all(struct bnxt *bp) 14922 { 14923 struct bnxt_fw_health *fw_health = bp->fw_health; 14924 int i, rc; 14925 14926 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14927 bnxt_fw_reset_via_optee(bp); 14928 bp->fw_reset_timestamp = jiffies; 14929 return; 14930 } 14931 14932 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 14933 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 14934 bnxt_fw_reset_writel(bp, i); 14935 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 14936 struct hwrm_fw_reset_input *req; 14937 14938 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 14939 if (!rc) { 14940 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); 14941 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 14942 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 14943 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 14944 rc = hwrm_req_send(bp, req); 14945 } 14946 if (rc != -ENODEV) 14947 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 14948 } 14949 bp->fw_reset_timestamp = jiffies; 14950 } 14951 14952 static bool bnxt_fw_reset_timeout(struct bnxt *bp) 14953 { 14954 return time_after(jiffies, bp->fw_reset_timestamp + 14955 (bp->fw_reset_max_dsecs * HZ / 10)); 14956 } 14957 14958 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) 14959 { 14960 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14961 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 14962 bnxt_dl_health_fw_status_update(bp, false); 14963 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT; 14964 netif_close(bp->dev); 14965 } 14966 14967 static void bnxt_fw_reset_task(struct work_struct *work) 14968 { 14969 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 14970 int rc = 0; 14971 14972 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 14973 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 14974 return; 14975 } 14976 14977 switch (bp->fw_reset_state) { 14978 case BNXT_FW_RESET_STATE_POLL_VF: { 14979 int n = bnxt_get_registered_vfs(bp); 14980 int tmo; 14981 14982 if (n < 0) { 14983 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 14984 n, jiffies_to_msecs(jiffies - 14985 bp->fw_reset_timestamp)); 14986 goto fw_reset_abort; 14987 } else if (n > 0) { 14988 if (bnxt_fw_reset_timeout(bp)) { 14989 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14990 bp->fw_reset_state = 0; 14991 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 14992 n); 14993 goto ulp_start; 14994 } 14995 bnxt_queue_fw_reset_work(bp, HZ / 10); 14996 return; 14997 } 14998 bp->fw_reset_timestamp = jiffies; 14999 netdev_lock(bp->dev); 15000 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 15001 bnxt_fw_reset_abort(bp, rc); 15002 netdev_unlock(bp->dev); 15003 goto ulp_start; 15004 } 15005 bnxt_fw_reset_close(bp); 15006 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 15007 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 15008 tmo = HZ / 10; 15009 } else { 15010 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 15011 tmo = bp->fw_reset_min_dsecs * HZ / 10; 15012 } 15013 netdev_unlock(bp->dev); 15014 bnxt_queue_fw_reset_work(bp, tmo); 15015 return; 15016 } 15017 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 15018 u32 val; 15019 15020 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 15021 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 15022 !bnxt_fw_reset_timeout(bp)) { 15023 bnxt_queue_fw_reset_work(bp, HZ / 5); 15024 return; 15025 } 15026 15027 if (!bp->fw_health->primary) { 15028 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 15029 15030 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 15031 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 15032 return; 15033 } 15034 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 15035 } 15036 fallthrough; 15037 case BNXT_FW_RESET_STATE_RESET_FW: 15038 bnxt_reset_all(bp); 15039 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 15040 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 15041 return; 15042 case BNXT_FW_RESET_STATE_ENABLE_DEV: 15043 bnxt_inv_fw_health_reg(bp); 15044 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 15045 !bp->fw_reset_min_dsecs) { 15046 u16 val; 15047 15048 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 15049 if (val == 0xffff) { 15050 if (bnxt_fw_reset_timeout(bp)) { 15051 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); 15052 rc = -ETIMEDOUT; 15053 goto fw_reset_abort; 15054 } 15055 bnxt_queue_fw_reset_work(bp, HZ / 1000); 15056 return; 15057 } 15058 } 15059 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 15060 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 15061 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && 15062 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) 15063 bnxt_dl_remote_reload(bp); 15064 if (pci_enable_device(bp->pdev)) { 15065 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 15066 rc = -ENODEV; 15067 goto fw_reset_abort; 15068 } 15069 pci_set_master(bp->pdev); 15070 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 15071 fallthrough; 15072 case BNXT_FW_RESET_STATE_POLL_FW: 15073 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 15074 rc = bnxt_hwrm_poll(bp); 15075 if (rc) { 15076 if (bnxt_fw_reset_timeout(bp)) { 15077 netdev_err(bp->dev, "Firmware reset aborted\n"); 15078 goto fw_reset_abort_status; 15079 } 15080 bnxt_queue_fw_reset_work(bp, HZ / 5); 15081 return; 15082 } 15083 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 15084 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 15085 fallthrough; 15086 case BNXT_FW_RESET_STATE_OPENING: 15087 while (!netdev_trylock(bp->dev)) { 15088 bnxt_queue_fw_reset_work(bp, HZ / 10); 15089 return; 15090 } 15091 rc = bnxt_open(bp->dev); 15092 if (rc) { 15093 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); 15094 bnxt_fw_reset_abort(bp, rc); 15095 netdev_unlock(bp->dev); 15096 goto ulp_start; 15097 } 15098 15099 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && 15100 bp->fw_health->enabled) { 15101 bp->fw_health->last_fw_reset_cnt = 15102 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 15103 } 15104 bp->fw_reset_state = 0; 15105 /* Make sure fw_reset_state is 0 before clearing the flag */ 15106 smp_mb__before_atomic(); 15107 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 15108 bnxt_ptp_reapply_pps(bp); 15109 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 15110 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { 15111 bnxt_dl_health_fw_recovery_done(bp); 15112 bnxt_dl_health_fw_status_update(bp, true); 15113 } 15114 netdev_unlock(bp->dev); 15115 bnxt_ulp_start(bp, 0); 15116 bnxt_reenable_sriov(bp); 15117 netdev_lock(bp->dev); 15118 bnxt_vf_reps_alloc(bp); 15119 bnxt_vf_reps_open(bp); 15120 netdev_unlock(bp->dev); 15121 break; 15122 } 15123 return; 15124 15125 fw_reset_abort_status: 15126 if (bp->fw_health->status_reliable || 15127 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 15128 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 15129 15130 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 15131 } 15132 fw_reset_abort: 15133 netdev_lock(bp->dev); 15134 bnxt_fw_reset_abort(bp, rc); 15135 netdev_unlock(bp->dev); 15136 ulp_start: 15137 bnxt_ulp_start(bp, rc); 15138 } 15139 15140 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 15141 { 15142 int rc; 15143 struct bnxt *bp = netdev_priv(dev); 15144 15145 SET_NETDEV_DEV(dev, &pdev->dev); 15146 15147 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 15148 rc = pci_enable_device(pdev); 15149 if (rc) { 15150 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 15151 goto init_err; 15152 } 15153 15154 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 15155 dev_err(&pdev->dev, 15156 "Cannot find PCI device base address, aborting\n"); 15157 rc = -ENODEV; 15158 goto init_err_disable; 15159 } 15160 15161 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 15162 if (rc) { 15163 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 15164 goto init_err_disable; 15165 } 15166 15167 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 15168 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 15169 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 15170 rc = -EIO; 15171 goto init_err_release; 15172 } 15173 15174 pci_set_master(pdev); 15175 15176 bp->dev = dev; 15177 bp->pdev = pdev; 15178 15179 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 15180 * determines the BAR size. 15181 */ 15182 bp->bar0 = pci_ioremap_bar(pdev, 0); 15183 if (!bp->bar0) { 15184 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 15185 rc = -ENOMEM; 15186 goto init_err_release; 15187 } 15188 15189 bp->bar2 = pci_ioremap_bar(pdev, 4); 15190 if (!bp->bar2) { 15191 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 15192 rc = -ENOMEM; 15193 goto init_err_release; 15194 } 15195 15196 INIT_WORK(&bp->sp_task, bnxt_sp_task); 15197 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 15198 15199 spin_lock_init(&bp->ntp_fltr_lock); 15200 #if BITS_PER_LONG == 32 15201 spin_lock_init(&bp->db_lock); 15202 #endif 15203 15204 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 15205 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 15206 15207 timer_setup(&bp->timer, bnxt_timer, 0); 15208 bp->current_interval = BNXT_TIMER_INTERVAL; 15209 15210 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 15211 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 15212 15213 clear_bit(BNXT_STATE_OPEN, &bp->state); 15214 return 0; 15215 15216 init_err_release: 15217 bnxt_unmap_bars(bp, pdev); 15218 pci_release_regions(pdev); 15219 15220 init_err_disable: 15221 pci_disable_device(pdev); 15222 15223 init_err: 15224 return rc; 15225 } 15226 15227 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 15228 { 15229 struct sockaddr *addr = p; 15230 struct bnxt *bp = netdev_priv(dev); 15231 int rc = 0; 15232 15233 netdev_assert_locked(dev); 15234 15235 if (!is_valid_ether_addr(addr->sa_data)) 15236 return -EADDRNOTAVAIL; 15237 15238 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 15239 return 0; 15240 15241 rc = bnxt_approve_mac(bp, addr->sa_data, true); 15242 if (rc) 15243 return rc; 15244 15245 eth_hw_addr_set(dev, addr->sa_data); 15246 bnxt_clear_usr_fltrs(bp, true); 15247 if (netif_running(dev)) { 15248 bnxt_close_nic(bp, false, false); 15249 rc = bnxt_open_nic(bp, false, false); 15250 } 15251 15252 return rc; 15253 } 15254 15255 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 15256 { 15257 struct bnxt *bp = netdev_priv(dev); 15258 15259 netdev_assert_locked(dev); 15260 15261 if (netif_running(dev)) 15262 bnxt_close_nic(bp, true, false); 15263 15264 WRITE_ONCE(dev->mtu, new_mtu); 15265 15266 /* MTU change may change the AGG ring settings if an XDP multi-buffer 15267 * program is attached. We need to set the AGG rings settings and 15268 * rx_skb_func accordingly. 15269 */ 15270 if (READ_ONCE(bp->xdp_prog)) 15271 bnxt_set_rx_skb_mode(bp, true); 15272 15273 bnxt_set_ring_params(bp); 15274 15275 if (netif_running(dev)) 15276 return bnxt_open_nic(bp, true, false); 15277 15278 return 0; 15279 } 15280 15281 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 15282 { 15283 struct bnxt *bp = netdev_priv(dev); 15284 bool sh = false; 15285 int rc, tx_cp; 15286 15287 if (tc > bp->max_tc) { 15288 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 15289 tc, bp->max_tc); 15290 return -EINVAL; 15291 } 15292 15293 if (bp->num_tc == tc) 15294 return 0; 15295 15296 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 15297 sh = true; 15298 15299 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 15300 sh, tc, bp->tx_nr_rings_xdp); 15301 if (rc) 15302 return rc; 15303 15304 /* Needs to close the device and do hw resource re-allocations */ 15305 if (netif_running(bp->dev)) 15306 bnxt_close_nic(bp, true, false); 15307 15308 if (tc) { 15309 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 15310 netdev_set_num_tc(dev, tc); 15311 bp->num_tc = tc; 15312 } else { 15313 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 15314 netdev_reset_tc(dev); 15315 bp->num_tc = 0; 15316 } 15317 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 15318 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 15319 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 15320 tx_cp + bp->rx_nr_rings; 15321 15322 if (netif_running(bp->dev)) 15323 return bnxt_open_nic(bp, true, false); 15324 15325 return 0; 15326 } 15327 15328 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 15329 void *cb_priv) 15330 { 15331 struct bnxt *bp = cb_priv; 15332 15333 if (!bnxt_tc_flower_enabled(bp) || 15334 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 15335 return -EOPNOTSUPP; 15336 15337 switch (type) { 15338 case TC_SETUP_CLSFLOWER: 15339 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 15340 default: 15341 return -EOPNOTSUPP; 15342 } 15343 } 15344 15345 LIST_HEAD(bnxt_block_cb_list); 15346 15347 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 15348 void *type_data) 15349 { 15350 struct bnxt *bp = netdev_priv(dev); 15351 15352 switch (type) { 15353 case TC_SETUP_BLOCK: 15354 return flow_block_cb_setup_simple(type_data, 15355 &bnxt_block_cb_list, 15356 bnxt_setup_tc_block_cb, 15357 bp, bp, true); 15358 case TC_SETUP_QDISC_MQPRIO: { 15359 struct tc_mqprio_qopt *mqprio = type_data; 15360 15361 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 15362 15363 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 15364 } 15365 default: 15366 return -EOPNOTSUPP; 15367 } 15368 } 15369 15370 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, 15371 const struct sk_buff *skb) 15372 { 15373 struct bnxt_vnic_info *vnic; 15374 15375 if (skb) 15376 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 15377 15378 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 15379 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); 15380 } 15381 15382 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, 15383 u32 idx) 15384 { 15385 struct hlist_head *head; 15386 int bit_id; 15387 15388 spin_lock_bh(&bp->ntp_fltr_lock); 15389 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); 15390 if (bit_id < 0) { 15391 spin_unlock_bh(&bp->ntp_fltr_lock); 15392 return -ENOMEM; 15393 } 15394 15395 fltr->base.sw_id = (u16)bit_id; 15396 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; 15397 fltr->base.flags |= BNXT_ACT_RING_DST; 15398 head = &bp->ntp_fltr_hash_tbl[idx]; 15399 hlist_add_head_rcu(&fltr->base.hash, head); 15400 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 15401 bnxt_insert_usr_fltr(bp, &fltr->base); 15402 bp->ntp_fltr_count++; 15403 spin_unlock_bh(&bp->ntp_fltr_lock); 15404 return 0; 15405 } 15406 15407 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 15408 struct bnxt_ntuple_filter *f2) 15409 { 15410 struct bnxt_flow_masks *masks1 = &f1->fmasks; 15411 struct bnxt_flow_masks *masks2 = &f2->fmasks; 15412 struct flow_keys *keys1 = &f1->fkeys; 15413 struct flow_keys *keys2 = &f2->fkeys; 15414 15415 if (keys1->basic.n_proto != keys2->basic.n_proto || 15416 keys1->basic.ip_proto != keys2->basic.ip_proto) 15417 return false; 15418 15419 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 15420 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 15421 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src || 15422 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst || 15423 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst) 15424 return false; 15425 } else { 15426 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src, 15427 &keys2->addrs.v6addrs.src) || 15428 !ipv6_addr_equal(&masks1->addrs.v6addrs.src, 15429 &masks2->addrs.v6addrs.src) || 15430 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst, 15431 &keys2->addrs.v6addrs.dst) || 15432 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst, 15433 &masks2->addrs.v6addrs.dst)) 15434 return false; 15435 } 15436 15437 return keys1->ports.src == keys2->ports.src && 15438 masks1->ports.src == masks2->ports.src && 15439 keys1->ports.dst == keys2->ports.dst && 15440 masks1->ports.dst == masks2->ports.dst && 15441 keys1->control.flags == keys2->control.flags && 15442 f1->l2_fltr == f2->l2_fltr; 15443 } 15444 15445 struct bnxt_ntuple_filter * 15446 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, 15447 struct bnxt_ntuple_filter *fltr, u32 idx) 15448 { 15449 struct bnxt_ntuple_filter *f; 15450 struct hlist_head *head; 15451 15452 head = &bp->ntp_fltr_hash_tbl[idx]; 15453 hlist_for_each_entry_rcu(f, head, base.hash) { 15454 if (bnxt_fltr_match(f, fltr)) 15455 return f; 15456 } 15457 return NULL; 15458 } 15459 15460 #ifdef CONFIG_RFS_ACCEL 15461 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 15462 u16 rxq_index, u32 flow_id) 15463 { 15464 struct bnxt *bp = netdev_priv(dev); 15465 struct bnxt_ntuple_filter *fltr, *new_fltr; 15466 struct flow_keys *fkeys; 15467 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 15468 struct bnxt_l2_filter *l2_fltr; 15469 int rc = 0, idx; 15470 u32 flags; 15471 15472 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { 15473 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 15474 atomic_inc(&l2_fltr->refcnt); 15475 } else { 15476 struct bnxt_l2_key key; 15477 15478 ether_addr_copy(key.dst_mac_addr, eth->h_dest); 15479 key.vlan = 0; 15480 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key); 15481 if (!l2_fltr) 15482 return -EINVAL; 15483 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { 15484 bnxt_del_l2_filter(bp, l2_fltr); 15485 return -EINVAL; 15486 } 15487 } 15488 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 15489 if (!new_fltr) { 15490 bnxt_del_l2_filter(bp, l2_fltr); 15491 return -ENOMEM; 15492 } 15493 15494 fkeys = &new_fltr->fkeys; 15495 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 15496 rc = -EPROTONOSUPPORT; 15497 goto err_free; 15498 } 15499 15500 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 15501 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 15502 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 15503 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 15504 rc = -EPROTONOSUPPORT; 15505 goto err_free; 15506 } 15507 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL; 15508 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 15509 if (bp->hwrm_spec_code < 0x10601) { 15510 rc = -EPROTONOSUPPORT; 15511 goto err_free; 15512 } 15513 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL; 15514 } 15515 flags = fkeys->control.flags; 15516 if (((flags & FLOW_DIS_ENCAPSULATION) && 15517 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 15518 rc = -EPROTONOSUPPORT; 15519 goto err_free; 15520 } 15521 new_fltr->l2_fltr = l2_fltr; 15522 15523 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); 15524 rcu_read_lock(); 15525 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 15526 if (fltr) { 15527 rc = fltr->base.sw_id; 15528 rcu_read_unlock(); 15529 goto err_free; 15530 } 15531 rcu_read_unlock(); 15532 15533 new_fltr->flow_id = flow_id; 15534 new_fltr->base.rxq = rxq_index; 15535 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 15536 if (!rc) { 15537 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 15538 return new_fltr->base.sw_id; 15539 } 15540 15541 err_free: 15542 bnxt_del_l2_filter(bp, l2_fltr); 15543 kfree(new_fltr); 15544 return rc; 15545 } 15546 #endif 15547 15548 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) 15549 { 15550 spin_lock_bh(&bp->ntp_fltr_lock); 15551 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 15552 spin_unlock_bh(&bp->ntp_fltr_lock); 15553 return; 15554 } 15555 hlist_del_rcu(&fltr->base.hash); 15556 bnxt_del_one_usr_fltr(bp, &fltr->base); 15557 bp->ntp_fltr_count--; 15558 spin_unlock_bh(&bp->ntp_fltr_lock); 15559 bnxt_del_l2_filter(bp, fltr->l2_fltr); 15560 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 15561 kfree_rcu(fltr, base.rcu); 15562 } 15563 15564 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 15565 { 15566 #ifdef CONFIG_RFS_ACCEL 15567 int i; 15568 15569 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 15570 struct hlist_head *head; 15571 struct hlist_node *tmp; 15572 struct bnxt_ntuple_filter *fltr; 15573 int rc; 15574 15575 head = &bp->ntp_fltr_hash_tbl[i]; 15576 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 15577 bool del = false; 15578 15579 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { 15580 if (fltr->base.flags & BNXT_ACT_NO_AGING) 15581 continue; 15582 if (rps_may_expire_flow(bp->dev, fltr->base.rxq, 15583 fltr->flow_id, 15584 fltr->base.sw_id)) { 15585 bnxt_hwrm_cfa_ntuple_filter_free(bp, 15586 fltr); 15587 del = true; 15588 } 15589 } else { 15590 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 15591 fltr); 15592 if (rc) 15593 del = true; 15594 else 15595 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 15596 } 15597 15598 if (del) 15599 bnxt_del_ntp_filter(bp, fltr); 15600 } 15601 } 15602 #endif 15603 } 15604 15605 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, 15606 unsigned int entry, struct udp_tunnel_info *ti) 15607 { 15608 struct bnxt *bp = netdev_priv(netdev); 15609 unsigned int cmd; 15610 15611 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 15612 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 15613 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 15614 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE; 15615 else 15616 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE; 15617 15618 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); 15619 } 15620 15621 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, 15622 unsigned int entry, struct udp_tunnel_info *ti) 15623 { 15624 struct bnxt *bp = netdev_priv(netdev); 15625 unsigned int cmd; 15626 15627 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 15628 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 15629 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 15630 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 15631 else 15632 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE; 15633 15634 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 15635 } 15636 15637 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 15638 .set_port = bnxt_udp_tunnel_set_port, 15639 .unset_port = bnxt_udp_tunnel_unset_port, 15640 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 15641 .tables = { 15642 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 15643 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 15644 }, 15645 }, bnxt_udp_tunnels_p7 = { 15646 .set_port = bnxt_udp_tunnel_set_port, 15647 .unset_port = bnxt_udp_tunnel_unset_port, 15648 .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 15649 .tables = { 15650 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 15651 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 15652 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, 15653 }, 15654 }; 15655 15656 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 15657 struct net_device *dev, u32 filter_mask, 15658 int nlflags) 15659 { 15660 struct bnxt *bp = netdev_priv(dev); 15661 15662 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 15663 nlflags, filter_mask, NULL); 15664 } 15665 15666 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 15667 u16 flags, struct netlink_ext_ack *extack) 15668 { 15669 struct bnxt *bp = netdev_priv(dev); 15670 struct nlattr *attr, *br_spec; 15671 int rem, rc = 0; 15672 15673 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 15674 return -EOPNOTSUPP; 15675 15676 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 15677 if (!br_spec) 15678 return -EINVAL; 15679 15680 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { 15681 u16 mode; 15682 15683 mode = nla_get_u16(attr); 15684 if (mode == bp->br_mode) 15685 break; 15686 15687 rc = bnxt_hwrm_set_br_mode(bp, mode); 15688 if (!rc) 15689 bp->br_mode = mode; 15690 break; 15691 } 15692 return rc; 15693 } 15694 15695 int bnxt_get_port_parent_id(struct net_device *dev, 15696 struct netdev_phys_item_id *ppid) 15697 { 15698 struct bnxt *bp = netdev_priv(dev); 15699 15700 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 15701 return -EOPNOTSUPP; 15702 15703 /* The PF and it's VF-reps only support the switchdev framework */ 15704 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 15705 return -EOPNOTSUPP; 15706 15707 ppid->id_len = sizeof(bp->dsn); 15708 memcpy(ppid->id, bp->dsn, ppid->id_len); 15709 15710 return 0; 15711 } 15712 15713 static const struct net_device_ops bnxt_netdev_ops = { 15714 .ndo_open = bnxt_open, 15715 .ndo_start_xmit = bnxt_start_xmit, 15716 .ndo_stop = bnxt_close, 15717 .ndo_get_stats64 = bnxt_get_stats64, 15718 .ndo_set_rx_mode = bnxt_set_rx_mode, 15719 .ndo_eth_ioctl = bnxt_ioctl, 15720 .ndo_validate_addr = eth_validate_addr, 15721 .ndo_set_mac_address = bnxt_change_mac_addr, 15722 .ndo_change_mtu = bnxt_change_mtu, 15723 .ndo_fix_features = bnxt_fix_features, 15724 .ndo_set_features = bnxt_set_features, 15725 .ndo_features_check = bnxt_features_check, 15726 .ndo_tx_timeout = bnxt_tx_timeout, 15727 #ifdef CONFIG_BNXT_SRIOV 15728 .ndo_get_vf_config = bnxt_get_vf_config, 15729 .ndo_set_vf_mac = bnxt_set_vf_mac, 15730 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 15731 .ndo_set_vf_rate = bnxt_set_vf_bw, 15732 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 15733 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 15734 .ndo_set_vf_trust = bnxt_set_vf_trust, 15735 #endif 15736 .ndo_setup_tc = bnxt_setup_tc, 15737 #ifdef CONFIG_RFS_ACCEL 15738 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 15739 #endif 15740 .ndo_bpf = bnxt_xdp, 15741 .ndo_xdp_xmit = bnxt_xdp_xmit, 15742 .ndo_bridge_getlink = bnxt_bridge_getlink, 15743 .ndo_bridge_setlink = bnxt_bridge_setlink, 15744 }; 15745 15746 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, 15747 struct netdev_queue_stats_rx *stats) 15748 { 15749 struct bnxt *bp = netdev_priv(dev); 15750 struct bnxt_cp_ring_info *cpr; 15751 u64 *sw; 15752 15753 if (!bp->bnapi) 15754 return; 15755 15756 cpr = &bp->bnapi[i]->cp_ring; 15757 sw = cpr->stats.sw_stats; 15758 15759 stats->packets = 0; 15760 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 15761 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 15762 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 15763 15764 stats->bytes = 0; 15765 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 15766 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 15767 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 15768 15769 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards; 15770 } 15771 15772 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, 15773 struct netdev_queue_stats_tx *stats) 15774 { 15775 struct bnxt *bp = netdev_priv(dev); 15776 struct bnxt_napi *bnapi; 15777 u64 *sw; 15778 15779 if (!bp->tx_ring) 15780 return; 15781 15782 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; 15783 sw = bnapi->cp_ring.stats.sw_stats; 15784 15785 stats->packets = 0; 15786 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 15787 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 15788 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 15789 15790 stats->bytes = 0; 15791 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 15792 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 15793 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 15794 } 15795 15796 static void bnxt_get_base_stats(struct net_device *dev, 15797 struct netdev_queue_stats_rx *rx, 15798 struct netdev_queue_stats_tx *tx) 15799 { 15800 struct bnxt *bp = netdev_priv(dev); 15801 15802 rx->packets = bp->net_stats_prev.rx_packets; 15803 rx->bytes = bp->net_stats_prev.rx_bytes; 15804 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards; 15805 15806 tx->packets = bp->net_stats_prev.tx_packets; 15807 tx->bytes = bp->net_stats_prev.tx_bytes; 15808 } 15809 15810 static const struct netdev_stat_ops bnxt_stat_ops = { 15811 .get_queue_stats_rx = bnxt_get_queue_stats_rx, 15812 .get_queue_stats_tx = bnxt_get_queue_stats_tx, 15813 .get_base_stats = bnxt_get_base_stats, 15814 }; 15815 15816 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) 15817 { 15818 struct bnxt_rx_ring_info *rxr, *clone; 15819 struct bnxt *bp = netdev_priv(dev); 15820 struct bnxt_ring_struct *ring; 15821 int rc; 15822 15823 if (!bp->rx_ring) 15824 return -ENETDOWN; 15825 15826 rxr = &bp->rx_ring[idx]; 15827 clone = qmem; 15828 memcpy(clone, rxr, sizeof(*rxr)); 15829 bnxt_init_rx_ring_struct(bp, clone); 15830 bnxt_reset_rx_ring_struct(bp, clone); 15831 15832 clone->rx_prod = 0; 15833 clone->rx_agg_prod = 0; 15834 clone->rx_sw_agg_prod = 0; 15835 clone->rx_next_cons = 0; 15836 clone->need_head_pool = false; 15837 15838 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid); 15839 if (rc) 15840 return rc; 15841 15842 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0); 15843 if (rc < 0) 15844 goto err_page_pool_destroy; 15845 15846 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq, 15847 MEM_TYPE_PAGE_POOL, 15848 clone->page_pool); 15849 if (rc) 15850 goto err_rxq_info_unreg; 15851 15852 ring = &clone->rx_ring_struct; 15853 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 15854 if (rc) 15855 goto err_free_rx_ring; 15856 15857 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 15858 ring = &clone->rx_agg_ring_struct; 15859 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 15860 if (rc) 15861 goto err_free_rx_agg_ring; 15862 15863 rc = bnxt_alloc_rx_agg_bmap(bp, clone); 15864 if (rc) 15865 goto err_free_rx_agg_ring; 15866 } 15867 15868 if (bp->flags & BNXT_FLAG_TPA) { 15869 rc = bnxt_alloc_one_tpa_info(bp, clone); 15870 if (rc) 15871 goto err_free_tpa_info; 15872 } 15873 15874 bnxt_init_one_rx_ring_rxbd(bp, clone); 15875 bnxt_init_one_rx_agg_ring_rxbd(bp, clone); 15876 15877 bnxt_alloc_one_rx_ring_skb(bp, clone, idx); 15878 if (bp->flags & BNXT_FLAG_AGG_RINGS) 15879 bnxt_alloc_one_rx_ring_netmem(bp, clone, idx); 15880 if (bp->flags & BNXT_FLAG_TPA) 15881 bnxt_alloc_one_tpa_info_data(bp, clone); 15882 15883 return 0; 15884 15885 err_free_tpa_info: 15886 bnxt_free_one_tpa_info(bp, clone); 15887 err_free_rx_agg_ring: 15888 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); 15889 err_free_rx_ring: 15890 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem); 15891 err_rxq_info_unreg: 15892 xdp_rxq_info_unreg(&clone->xdp_rxq); 15893 err_page_pool_destroy: 15894 page_pool_destroy(clone->page_pool); 15895 if (bnxt_separate_head_pool(clone)) 15896 page_pool_destroy(clone->head_pool); 15897 clone->page_pool = NULL; 15898 clone->head_pool = NULL; 15899 return rc; 15900 } 15901 15902 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) 15903 { 15904 struct bnxt_rx_ring_info *rxr = qmem; 15905 struct bnxt *bp = netdev_priv(dev); 15906 struct bnxt_ring_struct *ring; 15907 15908 bnxt_free_one_rx_ring_skbs(bp, rxr); 15909 bnxt_free_one_tpa_info(bp, rxr); 15910 15911 xdp_rxq_info_unreg(&rxr->xdp_rxq); 15912 15913 page_pool_destroy(rxr->page_pool); 15914 if (bnxt_separate_head_pool(rxr)) 15915 page_pool_destroy(rxr->head_pool); 15916 rxr->page_pool = NULL; 15917 rxr->head_pool = NULL; 15918 15919 ring = &rxr->rx_ring_struct; 15920 bnxt_free_ring(bp, &ring->ring_mem); 15921 15922 ring = &rxr->rx_agg_ring_struct; 15923 bnxt_free_ring(bp, &ring->ring_mem); 15924 15925 kfree(rxr->rx_agg_bmap); 15926 rxr->rx_agg_bmap = NULL; 15927 } 15928 15929 static void bnxt_copy_rx_ring(struct bnxt *bp, 15930 struct bnxt_rx_ring_info *dst, 15931 struct bnxt_rx_ring_info *src) 15932 { 15933 struct bnxt_ring_mem_info *dst_rmem, *src_rmem; 15934 struct bnxt_ring_struct *dst_ring, *src_ring; 15935 int i; 15936 15937 dst_ring = &dst->rx_ring_struct; 15938 dst_rmem = &dst_ring->ring_mem; 15939 src_ring = &src->rx_ring_struct; 15940 src_rmem = &src_ring->ring_mem; 15941 15942 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); 15943 WARN_ON(dst_rmem->page_size != src_rmem->page_size); 15944 WARN_ON(dst_rmem->flags != src_rmem->flags); 15945 WARN_ON(dst_rmem->depth != src_rmem->depth); 15946 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); 15947 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); 15948 15949 dst_rmem->pg_tbl = src_rmem->pg_tbl; 15950 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; 15951 *dst_rmem->vmem = *src_rmem->vmem; 15952 for (i = 0; i < dst_rmem->nr_pages; i++) { 15953 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; 15954 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; 15955 } 15956 15957 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 15958 return; 15959 15960 dst_ring = &dst->rx_agg_ring_struct; 15961 dst_rmem = &dst_ring->ring_mem; 15962 src_ring = &src->rx_agg_ring_struct; 15963 src_rmem = &src_ring->ring_mem; 15964 15965 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); 15966 WARN_ON(dst_rmem->page_size != src_rmem->page_size); 15967 WARN_ON(dst_rmem->flags != src_rmem->flags); 15968 WARN_ON(dst_rmem->depth != src_rmem->depth); 15969 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); 15970 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); 15971 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size); 15972 15973 dst_rmem->pg_tbl = src_rmem->pg_tbl; 15974 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; 15975 *dst_rmem->vmem = *src_rmem->vmem; 15976 for (i = 0; i < dst_rmem->nr_pages; i++) { 15977 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; 15978 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; 15979 } 15980 15981 dst->rx_agg_bmap = src->rx_agg_bmap; 15982 } 15983 15984 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) 15985 { 15986 struct bnxt *bp = netdev_priv(dev); 15987 struct bnxt_rx_ring_info *rxr, *clone; 15988 struct bnxt_cp_ring_info *cpr; 15989 struct bnxt_vnic_info *vnic; 15990 struct bnxt_napi *bnapi; 15991 int i, rc; 15992 u16 mru; 15993 15994 rxr = &bp->rx_ring[idx]; 15995 clone = qmem; 15996 15997 rxr->rx_prod = clone->rx_prod; 15998 rxr->rx_agg_prod = clone->rx_agg_prod; 15999 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod; 16000 rxr->rx_next_cons = clone->rx_next_cons; 16001 rxr->rx_tpa = clone->rx_tpa; 16002 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map; 16003 rxr->page_pool = clone->page_pool; 16004 rxr->head_pool = clone->head_pool; 16005 rxr->xdp_rxq = clone->xdp_rxq; 16006 rxr->need_head_pool = clone->need_head_pool; 16007 16008 bnxt_copy_rx_ring(bp, rxr, clone); 16009 16010 bnapi = rxr->bnapi; 16011 cpr = &bnapi->cp_ring; 16012 16013 /* All rings have been reserved and previously allocated. 16014 * Reallocating with the same parameters should never fail. 16015 */ 16016 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); 16017 if (rc) 16018 goto err_reset; 16019 16020 if (bp->tph_mode) { 16021 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); 16022 if (rc) 16023 goto err_reset; 16024 } 16025 16026 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); 16027 if (rc) 16028 goto err_reset; 16029 16030 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 16031 if (bp->flags & BNXT_FLAG_AGG_RINGS) 16032 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 16033 16034 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 16035 rc = bnxt_tx_queue_start(bp, idx); 16036 if (rc) 16037 goto err_reset; 16038 } 16039 16040 bnxt_enable_rx_page_pool(rxr); 16041 napi_enable_locked(&bnapi->napi); 16042 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 16043 16044 mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 16045 for (i = 0; i < bp->nr_vnics; i++) { 16046 vnic = &bp->vnic_info[i]; 16047 16048 rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx); 16049 if (rc) 16050 return rc; 16051 } 16052 return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx); 16053 16054 err_reset: 16055 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n", 16056 rc); 16057 napi_enable_locked(&bnapi->napi); 16058 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 16059 bnxt_reset_task(bp, true); 16060 return rc; 16061 } 16062 16063 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) 16064 { 16065 struct bnxt *bp = netdev_priv(dev); 16066 struct bnxt_rx_ring_info *rxr; 16067 struct bnxt_cp_ring_info *cpr; 16068 struct bnxt_vnic_info *vnic; 16069 struct bnxt_napi *bnapi; 16070 int i; 16071 16072 for (i = 0; i < bp->nr_vnics; i++) { 16073 vnic = &bp->vnic_info[i]; 16074 16075 bnxt_set_vnic_mru_p5(bp, vnic, 0, idx); 16076 } 16077 bnxt_set_rss_ctx_vnic_mru(bp, 0, idx); 16078 /* Make sure NAPI sees that the VNIC is disabled */ 16079 synchronize_net(); 16080 rxr = &bp->rx_ring[idx]; 16081 bnapi = rxr->bnapi; 16082 cpr = &bnapi->cp_ring; 16083 cancel_work_sync(&cpr->dim.work); 16084 bnxt_hwrm_rx_ring_free(bp, rxr, false); 16085 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); 16086 page_pool_disable_direct_recycling(rxr->page_pool); 16087 if (bnxt_separate_head_pool(rxr)) 16088 page_pool_disable_direct_recycling(rxr->head_pool); 16089 16090 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 16091 bnxt_tx_queue_stop(bp, idx); 16092 16093 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE 16094 * completion is handled in NAPI to guarantee no more DMA on that ring 16095 * after seeing the completion. 16096 */ 16097 napi_disable_locked(&bnapi->napi); 16098 16099 if (bp->tph_mode) { 16100 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr); 16101 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr); 16102 } 16103 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 16104 16105 memcpy(qmem, rxr, sizeof(*rxr)); 16106 bnxt_init_rx_ring_struct(bp, qmem); 16107 16108 return 0; 16109 } 16110 16111 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = { 16112 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info), 16113 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc, 16114 .ndo_queue_mem_free = bnxt_queue_mem_free, 16115 .ndo_queue_start = bnxt_queue_start, 16116 .ndo_queue_stop = bnxt_queue_stop, 16117 }; 16118 16119 static void bnxt_remove_one(struct pci_dev *pdev) 16120 { 16121 struct net_device *dev = pci_get_drvdata(pdev); 16122 struct bnxt *bp = netdev_priv(dev); 16123 16124 if (BNXT_PF(bp)) 16125 bnxt_sriov_disable(bp); 16126 16127 bnxt_rdma_aux_device_del(bp); 16128 16129 unregister_netdev(dev); 16130 bnxt_ptp_clear(bp); 16131 16132 bnxt_rdma_aux_device_uninit(bp); 16133 16134 bnxt_free_l2_filters(bp, true); 16135 bnxt_free_ntp_fltrs(bp, true); 16136 WARN_ON(bp->num_rss_ctx); 16137 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 16138 /* Flush any pending tasks */ 16139 cancel_work_sync(&bp->sp_task); 16140 cancel_delayed_work_sync(&bp->fw_reset_task); 16141 bp->sp_event = 0; 16142 16143 bnxt_dl_fw_reporters_destroy(bp); 16144 bnxt_dl_unregister(bp); 16145 bnxt_shutdown_tc(bp); 16146 16147 bnxt_clear_int_mode(bp); 16148 bnxt_hwrm_func_drv_unrgtr(bp); 16149 bnxt_free_hwrm_resources(bp); 16150 bnxt_hwmon_uninit(bp); 16151 bnxt_ethtool_free(bp); 16152 bnxt_dcb_free(bp); 16153 kfree(bp->ptp_cfg); 16154 bp->ptp_cfg = NULL; 16155 kfree(bp->fw_health); 16156 bp->fw_health = NULL; 16157 bnxt_cleanup_pci(bp); 16158 bnxt_free_ctx_mem(bp, true); 16159 bnxt_free_crash_dump_mem(bp); 16160 kfree(bp->rss_indir_tbl); 16161 bp->rss_indir_tbl = NULL; 16162 bnxt_free_port_stats(bp); 16163 free_netdev(dev); 16164 } 16165 16166 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 16167 { 16168 int rc = 0; 16169 struct bnxt_link_info *link_info = &bp->link_info; 16170 16171 bp->phy_flags = 0; 16172 rc = bnxt_hwrm_phy_qcaps(bp); 16173 if (rc) { 16174 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 16175 rc); 16176 return rc; 16177 } 16178 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) 16179 bp->dev->priv_flags |= IFF_SUPP_NOFCS; 16180 else 16181 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; 16182 16183 bp->mac_flags = 0; 16184 bnxt_hwrm_mac_qcaps(bp); 16185 16186 if (!fw_dflt) 16187 return 0; 16188 16189 mutex_lock(&bp->link_lock); 16190 rc = bnxt_update_link(bp, false); 16191 if (rc) { 16192 mutex_unlock(&bp->link_lock); 16193 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 16194 rc); 16195 return rc; 16196 } 16197 16198 /* Older firmware does not have supported_auto_speeds, so assume 16199 * that all supported speeds can be autonegotiated. 16200 */ 16201 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 16202 link_info->support_auto_speeds = link_info->support_speeds; 16203 16204 bnxt_init_ethtool_link_settings(bp); 16205 mutex_unlock(&bp->link_lock); 16206 return 0; 16207 } 16208 16209 static int bnxt_get_max_irq(struct pci_dev *pdev) 16210 { 16211 u16 ctrl; 16212 16213 if (!pdev->msix_cap) 16214 return 1; 16215 16216 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 16217 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 16218 } 16219 16220 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 16221 int *max_cp) 16222 { 16223 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 16224 int max_ring_grps = 0, max_irq; 16225 16226 *max_tx = hw_resc->max_tx_rings; 16227 *max_rx = hw_resc->max_rx_rings; 16228 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 16229 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 16230 bnxt_get_ulp_msix_num_in_use(bp), 16231 hw_resc->max_stat_ctxs - 16232 bnxt_get_ulp_stat_ctxs_in_use(bp)); 16233 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 16234 *max_cp = min_t(int, *max_cp, max_irq); 16235 max_ring_grps = hw_resc->max_hw_ring_grps; 16236 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 16237 *max_cp -= 1; 16238 *max_rx -= 2; 16239 } 16240 if (bp->flags & BNXT_FLAG_AGG_RINGS) 16241 *max_rx >>= 1; 16242 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 16243 int rc; 16244 16245 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 16246 if (rc) { 16247 *max_rx = 0; 16248 *max_tx = 0; 16249 } 16250 /* On P5 chips, max_cp output param should be available NQs */ 16251 *max_cp = max_irq; 16252 } 16253 *max_rx = min_t(int, *max_rx, max_ring_grps); 16254 } 16255 16256 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 16257 { 16258 int rx, tx, cp; 16259 16260 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 16261 *max_rx = rx; 16262 *max_tx = tx; 16263 if (!rx || !tx || !cp) 16264 return -ENOMEM; 16265 16266 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 16267 } 16268 16269 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 16270 bool shared) 16271 { 16272 int rc; 16273 16274 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 16275 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 16276 /* Not enough rings, try disabling agg rings. */ 16277 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 16278 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 16279 if (rc) { 16280 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 16281 bp->flags |= BNXT_FLAG_AGG_RINGS; 16282 return rc; 16283 } 16284 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 16285 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 16286 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 16287 bnxt_set_ring_params(bp); 16288 } 16289 16290 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 16291 int max_cp, max_stat, max_irq; 16292 16293 /* Reserve minimum resources for RoCE */ 16294 max_cp = bnxt_get_max_func_cp_rings(bp); 16295 max_stat = bnxt_get_max_func_stat_ctxs(bp); 16296 max_irq = bnxt_get_max_func_irqs(bp); 16297 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 16298 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 16299 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 16300 return 0; 16301 16302 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 16303 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 16304 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 16305 max_cp = min_t(int, max_cp, max_irq); 16306 max_cp = min_t(int, max_cp, max_stat); 16307 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 16308 if (rc) 16309 rc = 0; 16310 } 16311 return rc; 16312 } 16313 16314 /* In initial default shared ring setting, each shared ring must have a 16315 * RX/TX ring pair. 16316 */ 16317 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 16318 { 16319 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 16320 bp->rx_nr_rings = bp->cp_nr_rings; 16321 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 16322 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 16323 } 16324 16325 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 16326 { 16327 int dflt_rings, max_rx_rings, max_tx_rings, rc; 16328 int avail_msix; 16329 16330 if (!bnxt_can_reserve_rings(bp)) 16331 return 0; 16332 16333 if (sh) 16334 bp->flags |= BNXT_FLAG_SHARED_RINGS; 16335 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 16336 /* Reduce default rings on multi-port cards so that total default 16337 * rings do not exceed CPU count. 16338 */ 16339 if (bp->port_count > 1) { 16340 int max_rings = 16341 max_t(int, num_online_cpus() / bp->port_count, 1); 16342 16343 dflt_rings = min_t(int, dflt_rings, max_rings); 16344 } 16345 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 16346 if (rc) 16347 return rc; 16348 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 16349 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 16350 if (sh) 16351 bnxt_trim_dflt_sh_rings(bp); 16352 else 16353 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 16354 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 16355 16356 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; 16357 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { 16358 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want); 16359 16360 bnxt_set_ulp_msix_num(bp, ulp_num_msix); 16361 bnxt_set_dflt_ulp_stat_ctxs(bp); 16362 } 16363 16364 rc = __bnxt_reserve_rings(bp); 16365 if (rc && rc != -ENODEV) 16366 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 16367 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16368 if (sh) 16369 bnxt_trim_dflt_sh_rings(bp); 16370 16371 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 16372 if (bnxt_need_reserve_rings(bp)) { 16373 rc = __bnxt_reserve_rings(bp); 16374 if (rc && rc != -ENODEV) 16375 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 16376 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16377 } 16378 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 16379 bp->rx_nr_rings++; 16380 bp->cp_nr_rings++; 16381 } 16382 if (rc) { 16383 bp->tx_nr_rings = 0; 16384 bp->rx_nr_rings = 0; 16385 } 16386 return rc; 16387 } 16388 16389 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 16390 { 16391 int rc; 16392 16393 if (bp->tx_nr_rings) 16394 return 0; 16395 16396 bnxt_ulp_irq_stop(bp); 16397 bnxt_clear_int_mode(bp); 16398 rc = bnxt_set_dflt_rings(bp, true); 16399 if (rc) { 16400 if (BNXT_VF(bp) && rc == -ENODEV) 16401 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 16402 else 16403 netdev_err(bp->dev, "Not enough rings available.\n"); 16404 goto init_dflt_ring_err; 16405 } 16406 rc = bnxt_init_int_mode(bp); 16407 if (rc) 16408 goto init_dflt_ring_err; 16409 16410 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16411 16412 bnxt_set_dflt_rfs(bp); 16413 16414 init_dflt_ring_err: 16415 bnxt_ulp_irq_restart(bp, rc); 16416 return rc; 16417 } 16418 16419 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 16420 { 16421 int rc; 16422 16423 netdev_ops_assert_locked(bp->dev); 16424 bnxt_hwrm_func_qcaps(bp); 16425 16426 if (netif_running(bp->dev)) 16427 __bnxt_close_nic(bp, true, false); 16428 16429 bnxt_ulp_irq_stop(bp); 16430 bnxt_clear_int_mode(bp); 16431 rc = bnxt_init_int_mode(bp); 16432 bnxt_ulp_irq_restart(bp, rc); 16433 16434 if (netif_running(bp->dev)) { 16435 if (rc) 16436 netif_close(bp->dev); 16437 else 16438 rc = bnxt_open_nic(bp, true, false); 16439 } 16440 16441 return rc; 16442 } 16443 16444 static int bnxt_init_mac_addr(struct bnxt *bp) 16445 { 16446 int rc = 0; 16447 16448 if (BNXT_PF(bp)) { 16449 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); 16450 } else { 16451 #ifdef CONFIG_BNXT_SRIOV 16452 struct bnxt_vf_info *vf = &bp->vf; 16453 bool strict_approval = true; 16454 16455 if (is_valid_ether_addr(vf->mac_addr)) { 16456 /* overwrite netdev dev_addr with admin VF MAC */ 16457 eth_hw_addr_set(bp->dev, vf->mac_addr); 16458 /* Older PF driver or firmware may not approve this 16459 * correctly. 16460 */ 16461 strict_approval = false; 16462 } else { 16463 eth_hw_addr_random(bp->dev); 16464 } 16465 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 16466 #endif 16467 } 16468 return rc; 16469 } 16470 16471 static void bnxt_vpd_read_info(struct bnxt *bp) 16472 { 16473 struct pci_dev *pdev = bp->pdev; 16474 unsigned int vpd_size, kw_len; 16475 int pos, size; 16476 u8 *vpd_data; 16477 16478 vpd_data = pci_vpd_alloc(pdev, &vpd_size); 16479 if (IS_ERR(vpd_data)) { 16480 pci_warn(pdev, "Unable to read VPD\n"); 16481 return; 16482 } 16483 16484 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 16485 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 16486 if (pos < 0) 16487 goto read_sn; 16488 16489 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 16490 memcpy(bp->board_partno, &vpd_data[pos], size); 16491 16492 read_sn: 16493 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 16494 PCI_VPD_RO_KEYWORD_SERIALNO, 16495 &kw_len); 16496 if (pos < 0) 16497 goto exit; 16498 16499 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 16500 memcpy(bp->board_serialno, &vpd_data[pos], size); 16501 exit: 16502 kfree(vpd_data); 16503 } 16504 16505 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 16506 { 16507 struct pci_dev *pdev = bp->pdev; 16508 u64 qword; 16509 16510 qword = pci_get_dsn(pdev); 16511 if (!qword) { 16512 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 16513 return -EOPNOTSUPP; 16514 } 16515 16516 put_unaligned_le64(qword, dsn); 16517 16518 bp->flags |= BNXT_FLAG_DSN_VALID; 16519 return 0; 16520 } 16521 16522 static int bnxt_map_db_bar(struct bnxt *bp) 16523 { 16524 if (!bp->db_size) 16525 return -ENODEV; 16526 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 16527 if (!bp->bar1) 16528 return -ENOMEM; 16529 return 0; 16530 } 16531 16532 void bnxt_print_device_info(struct bnxt *bp) 16533 { 16534 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", 16535 board_info[bp->board_idx].name, 16536 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); 16537 16538 pcie_print_link_status(bp->pdev); 16539 } 16540 16541 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 16542 { 16543 struct bnxt_hw_resc *hw_resc; 16544 struct net_device *dev; 16545 struct bnxt *bp; 16546 int rc, max_irqs; 16547 16548 if (pci_is_bridge(pdev)) 16549 return -ENODEV; 16550 16551 if (!pdev->msix_cap) { 16552 dev_err(&pdev->dev, "MSIX capability not found, aborting\n"); 16553 return -ENODEV; 16554 } 16555 16556 /* Clear any pending DMA transactions from crash kernel 16557 * while loading driver in capture kernel. 16558 */ 16559 if (is_kdump_kernel()) { 16560 pci_clear_master(pdev); 16561 pcie_flr(pdev); 16562 } 16563 16564 max_irqs = bnxt_get_max_irq(pdev); 16565 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, 16566 max_irqs); 16567 if (!dev) 16568 return -ENOMEM; 16569 16570 bp = netdev_priv(dev); 16571 bp->board_idx = ent->driver_data; 16572 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 16573 bnxt_set_max_func_irqs(bp, max_irqs); 16574 16575 if (bnxt_vf_pciid(bp->board_idx)) 16576 bp->flags |= BNXT_FLAG_VF; 16577 16578 /* No devlink port registration in case of a VF */ 16579 if (BNXT_PF(bp)) 16580 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); 16581 16582 rc = bnxt_init_board(pdev, dev); 16583 if (rc < 0) 16584 goto init_err_free; 16585 16586 dev->netdev_ops = &bnxt_netdev_ops; 16587 dev->stat_ops = &bnxt_stat_ops; 16588 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 16589 dev->ethtool_ops = &bnxt_ethtool_ops; 16590 pci_set_drvdata(pdev, dev); 16591 16592 rc = bnxt_alloc_hwrm_resources(bp); 16593 if (rc) 16594 goto init_err_pci_clean; 16595 16596 mutex_init(&bp->hwrm_cmd_lock); 16597 mutex_init(&bp->link_lock); 16598 16599 rc = bnxt_fw_init_one_p1(bp); 16600 if (rc) 16601 goto init_err_pci_clean; 16602 16603 if (BNXT_PF(bp)) 16604 bnxt_vpd_read_info(bp); 16605 16606 if (BNXT_CHIP_P5_PLUS(bp)) { 16607 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; 16608 if (BNXT_CHIP_P7(bp)) 16609 bp->flags |= BNXT_FLAG_CHIP_P7; 16610 } 16611 16612 rc = bnxt_alloc_rss_indir_tbl(bp); 16613 if (rc) 16614 goto init_err_pci_clean; 16615 16616 rc = bnxt_fw_init_one_p2(bp); 16617 if (rc) 16618 goto init_err_pci_clean; 16619 16620 rc = bnxt_map_db_bar(bp); 16621 if (rc) { 16622 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 16623 rc); 16624 goto init_err_pci_clean; 16625 } 16626 16627 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 16628 NETIF_F_TSO | NETIF_F_TSO6 | 16629 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 16630 NETIF_F_GSO_IPXIP4 | 16631 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 16632 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 16633 NETIF_F_RXCSUM | NETIF_F_GRO; 16634 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 16635 dev->hw_features |= NETIF_F_GSO_UDP_L4; 16636 16637 if (BNXT_SUPPORTS_TPA(bp)) 16638 dev->hw_features |= NETIF_F_LRO; 16639 16640 dev->hw_enc_features = 16641 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 16642 NETIF_F_TSO | NETIF_F_TSO6 | 16643 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 16644 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 16645 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 16646 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 16647 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4; 16648 if (bp->flags & BNXT_FLAG_CHIP_P7) 16649 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7; 16650 else 16651 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 16652 16653 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 16654 NETIF_F_GSO_GRE_CSUM; 16655 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 16656 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 16657 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 16658 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 16659 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 16660 if (BNXT_SUPPORTS_TPA(bp)) 16661 dev->hw_features |= NETIF_F_GRO_HW; 16662 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 16663 if (dev->features & NETIF_F_GRO_HW) 16664 dev->features &= ~NETIF_F_LRO; 16665 dev->priv_flags |= IFF_UNICAST_FLT; 16666 16667 netif_set_tso_max_size(dev, GSO_MAX_SIZE); 16668 if (bp->tso_max_segs) 16669 netif_set_tso_max_segs(dev, bp->tso_max_segs); 16670 16671 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 16672 NETDEV_XDP_ACT_RX_SG; 16673 16674 #ifdef CONFIG_BNXT_SRIOV 16675 init_waitqueue_head(&bp->sriov_cfg_wait); 16676 #endif 16677 if (BNXT_SUPPORTS_TPA(bp)) { 16678 bp->gro_func = bnxt_gro_func_5730x; 16679 if (BNXT_CHIP_P4(bp)) 16680 bp->gro_func = bnxt_gro_func_5731x; 16681 else if (BNXT_CHIP_P5_PLUS(bp)) 16682 bp->gro_func = bnxt_gro_func_5750x; 16683 } 16684 if (!BNXT_CHIP_P4_PLUS(bp)) 16685 bp->flags |= BNXT_FLAG_DOUBLE_DB; 16686 16687 rc = bnxt_init_mac_addr(bp); 16688 if (rc) { 16689 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 16690 rc = -EADDRNOTAVAIL; 16691 goto init_err_pci_clean; 16692 } 16693 16694 if (BNXT_PF(bp)) { 16695 /* Read the adapter's DSN to use as the eswitch switch_id */ 16696 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 16697 } 16698 16699 /* MTU range: 60 - FW defined max */ 16700 dev->min_mtu = ETH_ZLEN; 16701 dev->max_mtu = bp->max_mtu; 16702 16703 rc = bnxt_probe_phy(bp, true); 16704 if (rc) 16705 goto init_err_pci_clean; 16706 16707 hw_resc = &bp->hw_resc; 16708 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + 16709 BNXT_L2_FLTR_MAX_FLTR; 16710 /* Older firmware may not report these filters properly */ 16711 if (bp->max_fltr < BNXT_MAX_FLTR) 16712 bp->max_fltr = BNXT_MAX_FLTR; 16713 bnxt_init_l2_fltr_tbl(bp); 16714 __bnxt_set_rx_skb_mode(bp, false); 16715 bnxt_set_tpa_flags(bp); 16716 bnxt_init_ring_params(bp); 16717 bnxt_set_ring_params(bp); 16718 bnxt_rdma_aux_device_init(bp); 16719 rc = bnxt_set_dflt_rings(bp, true); 16720 if (rc) { 16721 if (BNXT_VF(bp) && rc == -ENODEV) { 16722 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 16723 } else { 16724 netdev_err(bp->dev, "Not enough rings available.\n"); 16725 rc = -ENOMEM; 16726 } 16727 goto init_err_pci_clean; 16728 } 16729 16730 bnxt_fw_init_one_p3(bp); 16731 16732 bnxt_init_dflt_coal(bp); 16733 16734 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 16735 bp->flags |= BNXT_FLAG_STRIP_VLAN; 16736 16737 rc = bnxt_init_int_mode(bp); 16738 if (rc) 16739 goto init_err_pci_clean; 16740 16741 /* No TC has been set yet and rings may have been trimmed due to 16742 * limited MSIX, so we re-initialize the TX rings per TC. 16743 */ 16744 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16745 16746 if (BNXT_PF(bp)) { 16747 if (!bnxt_pf_wq) { 16748 bnxt_pf_wq = 16749 create_singlethread_workqueue("bnxt_pf_wq"); 16750 if (!bnxt_pf_wq) { 16751 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 16752 rc = -ENOMEM; 16753 goto init_err_pci_clean; 16754 } 16755 } 16756 rc = bnxt_init_tc(bp); 16757 if (rc) 16758 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 16759 rc); 16760 } 16761 16762 bnxt_inv_fw_health_reg(bp); 16763 rc = bnxt_dl_register(bp); 16764 if (rc) 16765 goto init_err_dl; 16766 16767 INIT_LIST_HEAD(&bp->usr_fltr_list); 16768 16769 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 16770 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; 16771 if (BNXT_SUPPORTS_QUEUE_API(bp)) 16772 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; 16773 dev->request_ops_lock = true; 16774 dev->netmem_tx = true; 16775 16776 rc = register_netdev(dev); 16777 if (rc) 16778 goto init_err_cleanup; 16779 16780 bnxt_dl_fw_reporters_create(bp); 16781 16782 bnxt_rdma_aux_device_add(bp); 16783 16784 bnxt_print_device_info(bp); 16785 16786 pci_save_state(pdev); 16787 16788 return 0; 16789 init_err_cleanup: 16790 bnxt_rdma_aux_device_uninit(bp); 16791 bnxt_dl_unregister(bp); 16792 init_err_dl: 16793 bnxt_shutdown_tc(bp); 16794 bnxt_clear_int_mode(bp); 16795 16796 init_err_pci_clean: 16797 bnxt_hwrm_func_drv_unrgtr(bp); 16798 bnxt_free_hwrm_resources(bp); 16799 bnxt_hwmon_uninit(bp); 16800 bnxt_ethtool_free(bp); 16801 bnxt_ptp_clear(bp); 16802 kfree(bp->ptp_cfg); 16803 bp->ptp_cfg = NULL; 16804 kfree(bp->fw_health); 16805 bp->fw_health = NULL; 16806 bnxt_cleanup_pci(bp); 16807 bnxt_free_ctx_mem(bp, true); 16808 bnxt_free_crash_dump_mem(bp); 16809 kfree(bp->rss_indir_tbl); 16810 bp->rss_indir_tbl = NULL; 16811 16812 init_err_free: 16813 free_netdev(dev); 16814 return rc; 16815 } 16816 16817 static void bnxt_shutdown(struct pci_dev *pdev) 16818 { 16819 struct net_device *dev = pci_get_drvdata(pdev); 16820 struct bnxt *bp; 16821 16822 if (!dev) 16823 return; 16824 16825 rtnl_lock(); 16826 netdev_lock(dev); 16827 bp = netdev_priv(dev); 16828 if (!bp) 16829 goto shutdown_exit; 16830 16831 if (netif_running(dev)) 16832 netif_close(dev); 16833 16834 bnxt_ptp_clear(bp); 16835 bnxt_clear_int_mode(bp); 16836 pci_disable_device(pdev); 16837 16838 if (system_state == SYSTEM_POWER_OFF) { 16839 pci_wake_from_d3(pdev, bp->wol); 16840 pci_set_power_state(pdev, PCI_D3hot); 16841 } 16842 16843 shutdown_exit: 16844 netdev_unlock(dev); 16845 rtnl_unlock(); 16846 } 16847 16848 #ifdef CONFIG_PM_SLEEP 16849 static int bnxt_suspend(struct device *device) 16850 { 16851 struct net_device *dev = dev_get_drvdata(device); 16852 struct bnxt *bp = netdev_priv(dev); 16853 int rc = 0; 16854 16855 bnxt_ulp_stop(bp); 16856 16857 netdev_lock(dev); 16858 if (netif_running(dev)) { 16859 netif_device_detach(dev); 16860 rc = bnxt_close(dev); 16861 } 16862 bnxt_hwrm_func_drv_unrgtr(bp); 16863 bnxt_ptp_clear(bp); 16864 pci_disable_device(bp->pdev); 16865 bnxt_free_ctx_mem(bp, false); 16866 netdev_unlock(dev); 16867 return rc; 16868 } 16869 16870 static int bnxt_resume(struct device *device) 16871 { 16872 struct net_device *dev = dev_get_drvdata(device); 16873 struct bnxt *bp = netdev_priv(dev); 16874 int rc = 0; 16875 16876 netdev_lock(dev); 16877 rc = pci_enable_device(bp->pdev); 16878 if (rc) { 16879 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 16880 rc); 16881 goto resume_exit; 16882 } 16883 pci_set_master(bp->pdev); 16884 if (bnxt_hwrm_ver_get(bp)) { 16885 rc = -ENODEV; 16886 goto resume_exit; 16887 } 16888 rc = bnxt_hwrm_func_reset(bp); 16889 if (rc) { 16890 rc = -EBUSY; 16891 goto resume_exit; 16892 } 16893 16894 rc = bnxt_hwrm_func_qcaps(bp); 16895 if (rc) 16896 goto resume_exit; 16897 16898 bnxt_clear_reservations(bp, true); 16899 16900 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 16901 rc = -ENODEV; 16902 goto resume_exit; 16903 } 16904 if (bp->fw_crash_mem) 16905 bnxt_hwrm_crash_dump_mem_cfg(bp); 16906 16907 if (bnxt_ptp_init(bp)) { 16908 kfree(bp->ptp_cfg); 16909 bp->ptp_cfg = NULL; 16910 } 16911 bnxt_get_wol_settings(bp); 16912 if (netif_running(dev)) { 16913 rc = bnxt_open(dev); 16914 if (!rc) 16915 netif_device_attach(dev); 16916 } 16917 16918 resume_exit: 16919 netdev_unlock(bp->dev); 16920 bnxt_ulp_start(bp, rc); 16921 if (!rc) 16922 bnxt_reenable_sriov(bp); 16923 return rc; 16924 } 16925 16926 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 16927 #define BNXT_PM_OPS (&bnxt_pm_ops) 16928 16929 #else 16930 16931 #define BNXT_PM_OPS NULL 16932 16933 #endif /* CONFIG_PM_SLEEP */ 16934 16935 /** 16936 * bnxt_io_error_detected - called when PCI error is detected 16937 * @pdev: Pointer to PCI device 16938 * @state: The current pci connection state 16939 * 16940 * This function is called after a PCI bus error affecting 16941 * this device has been detected. 16942 */ 16943 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 16944 pci_channel_state_t state) 16945 { 16946 struct net_device *netdev = pci_get_drvdata(pdev); 16947 struct bnxt *bp = netdev_priv(netdev); 16948 bool abort = false; 16949 16950 netdev_info(netdev, "PCI I/O error detected\n"); 16951 16952 bnxt_ulp_stop(bp); 16953 16954 netdev_lock(netdev); 16955 netif_device_detach(netdev); 16956 16957 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 16958 netdev_err(bp->dev, "Firmware reset already in progress\n"); 16959 abort = true; 16960 } 16961 16962 if (abort || state == pci_channel_io_perm_failure) { 16963 netdev_unlock(netdev); 16964 return PCI_ERS_RESULT_DISCONNECT; 16965 } 16966 16967 /* Link is not reliable anymore if state is pci_channel_io_frozen 16968 * so we disable bus master to prevent any potential bad DMAs before 16969 * freeing kernel memory. 16970 */ 16971 if (state == pci_channel_io_frozen) { 16972 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 16973 bnxt_fw_fatal_close(bp); 16974 } 16975 16976 if (netif_running(netdev)) 16977 __bnxt_close_nic(bp, true, true); 16978 16979 if (pci_is_enabled(pdev)) 16980 pci_disable_device(pdev); 16981 bnxt_free_ctx_mem(bp, false); 16982 netdev_unlock(netdev); 16983 16984 /* Request a slot reset. */ 16985 return PCI_ERS_RESULT_NEED_RESET; 16986 } 16987 16988 /** 16989 * bnxt_io_slot_reset - called after the pci bus has been reset. 16990 * @pdev: Pointer to PCI device 16991 * 16992 * Restart the card from scratch, as if from a cold-boot. 16993 * At this point, the card has experienced a hard reset, 16994 * followed by fixups by BIOS, and has its config space 16995 * set up identically to what it was at cold boot. 16996 */ 16997 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 16998 { 16999 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 17000 struct net_device *netdev = pci_get_drvdata(pdev); 17001 struct bnxt *bp = netdev_priv(netdev); 17002 int retry = 0; 17003 int err = 0; 17004 int off; 17005 17006 netdev_info(bp->dev, "PCI Slot Reset\n"); 17007 17008 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 17009 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) 17010 msleep(900); 17011 17012 netdev_lock(netdev); 17013 17014 if (pci_enable_device(pdev)) { 17015 dev_err(&pdev->dev, 17016 "Cannot re-enable PCI device after reset.\n"); 17017 } else { 17018 pci_set_master(pdev); 17019 /* Upon fatal error, our device internal logic that latches to 17020 * BAR value is getting reset and will restore only upon 17021 * rewriting the BARs. 17022 * 17023 * As pci_restore_state() does not re-write the BARs if the 17024 * value is same as saved value earlier, driver needs to 17025 * write the BARs to 0 to force restore, in case of fatal error. 17026 */ 17027 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 17028 &bp->state)) { 17029 for (off = PCI_BASE_ADDRESS_0; 17030 off <= PCI_BASE_ADDRESS_5; off += 4) 17031 pci_write_config_dword(bp->pdev, off, 0); 17032 } 17033 pci_restore_state(pdev); 17034 pci_save_state(pdev); 17035 17036 bnxt_inv_fw_health_reg(bp); 17037 bnxt_try_map_fw_health_reg(bp); 17038 17039 /* In some PCIe AER scenarios, firmware may take up to 17040 * 10 seconds to become ready in the worst case. 17041 */ 17042 do { 17043 err = bnxt_try_recover_fw(bp); 17044 if (!err) 17045 break; 17046 retry++; 17047 } while (retry < BNXT_FW_SLOT_RESET_RETRY); 17048 17049 if (err) { 17050 dev_err(&pdev->dev, "Firmware not ready\n"); 17051 goto reset_exit; 17052 } 17053 17054 err = bnxt_hwrm_func_reset(bp); 17055 if (!err) 17056 result = PCI_ERS_RESULT_RECOVERED; 17057 17058 /* IRQ will be initialized later in bnxt_io_resume */ 17059 bnxt_ulp_irq_stop(bp); 17060 bnxt_clear_int_mode(bp); 17061 } 17062 17063 reset_exit: 17064 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 17065 bnxt_clear_reservations(bp, true); 17066 netdev_unlock(netdev); 17067 17068 return result; 17069 } 17070 17071 /** 17072 * bnxt_io_resume - called when traffic can start flowing again. 17073 * @pdev: Pointer to PCI device 17074 * 17075 * This callback is called when the error recovery driver tells 17076 * us that its OK to resume normal operation. 17077 */ 17078 static void bnxt_io_resume(struct pci_dev *pdev) 17079 { 17080 struct net_device *netdev = pci_get_drvdata(pdev); 17081 struct bnxt *bp = netdev_priv(netdev); 17082 int err; 17083 17084 netdev_info(bp->dev, "PCI Slot Resume\n"); 17085 netdev_lock(netdev); 17086 17087 err = bnxt_hwrm_func_qcaps(bp); 17088 if (!err) { 17089 if (netif_running(netdev)) { 17090 err = bnxt_open(netdev); 17091 } else { 17092 err = bnxt_reserve_rings(bp, true); 17093 if (!err) 17094 err = bnxt_init_int_mode(bp); 17095 } 17096 } 17097 17098 if (!err) 17099 netif_device_attach(netdev); 17100 17101 netdev_unlock(netdev); 17102 bnxt_ulp_start(bp, err); 17103 if (!err) 17104 bnxt_reenable_sriov(bp); 17105 } 17106 17107 static const struct pci_error_handlers bnxt_err_handler = { 17108 .error_detected = bnxt_io_error_detected, 17109 .slot_reset = bnxt_io_slot_reset, 17110 .resume = bnxt_io_resume 17111 }; 17112 17113 static struct pci_driver bnxt_pci_driver = { 17114 .name = DRV_MODULE_NAME, 17115 .id_table = bnxt_pci_tbl, 17116 .probe = bnxt_init_one, 17117 .remove = bnxt_remove_one, 17118 .shutdown = bnxt_shutdown, 17119 .driver.pm = BNXT_PM_OPS, 17120 .err_handler = &bnxt_err_handler, 17121 #if defined(CONFIG_BNXT_SRIOV) 17122 .sriov_configure = bnxt_sriov_configure, 17123 #endif 17124 }; 17125 17126 static int __init bnxt_init(void) 17127 { 17128 int err; 17129 17130 bnxt_debug_init(); 17131 err = pci_register_driver(&bnxt_pci_driver); 17132 if (err) { 17133 bnxt_debug_exit(); 17134 return err; 17135 } 17136 17137 return 0; 17138 } 17139 17140 static void __exit bnxt_exit(void) 17141 { 17142 pci_unregister_driver(&bnxt_pci_driver); 17143 if (bnxt_pf_wq) 17144 destroy_workqueue(bnxt_pf_wq); 17145 bnxt_debug_exit(); 17146 } 17147 17148 module_init(bnxt_init); 17149 module_exit(bnxt_exit); 17150