1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2014-2016 Broadcom Corporation 4 * Copyright (c) 2016-2019 Broadcom Limited 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 */ 10 11 #include <linux/module.h> 12 13 #include <linux/stringify.h> 14 #include <linux/kernel.h> 15 #include <linux/timer.h> 16 #include <linux/errno.h> 17 #include <linux/ioport.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/interrupt.h> 21 #include <linux/pci.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/skbuff.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/bitops.h> 27 #include <linux/io.h> 28 #include <linux/irq.h> 29 #include <linux/delay.h> 30 #include <asm/byteorder.h> 31 #include <asm/page.h> 32 #include <linux/time.h> 33 #include <linux/mii.h> 34 #include <linux/mdio.h> 35 #include <linux/if.h> 36 #include <linux/if_vlan.h> 37 #include <linux/if_bridge.h> 38 #include <linux/rtc.h> 39 #include <linux/bpf.h> 40 #include <net/gro.h> 41 #include <net/ip.h> 42 #include <net/tcp.h> 43 #include <net/udp.h> 44 #include <net/checksum.h> 45 #include <net/ip6_checksum.h> 46 #include <net/udp_tunnel.h> 47 #include <linux/workqueue.h> 48 #include <linux/prefetch.h> 49 #include <linux/cache.h> 50 #include <linux/log2.h> 51 #include <linux/bitmap.h> 52 #include <linux/cpu_rmap.h> 53 #include <linux/cpumask.h> 54 #include <net/pkt_cls.h> 55 #include <net/page_pool/helpers.h> 56 #include <linux/align.h> 57 #include <net/netdev_lock.h> 58 #include <net/netdev_queues.h> 59 #include <net/netdev_rx_queue.h> 60 #include <linux/pci-tph.h> 61 62 #include "bnxt_hsi.h" 63 #include "bnxt.h" 64 #include "bnxt_hwrm.h" 65 #include "bnxt_ulp.h" 66 #include "bnxt_sriov.h" 67 #include "bnxt_ethtool.h" 68 #include "bnxt_dcb.h" 69 #include "bnxt_xdp.h" 70 #include "bnxt_ptp.h" 71 #include "bnxt_vfr.h" 72 #include "bnxt_tc.h" 73 #include "bnxt_devlink.h" 74 #include "bnxt_debugfs.h" 75 #include "bnxt_coredump.h" 76 #include "bnxt_hwmon.h" 77 78 #define BNXT_TX_TIMEOUT (5 * HZ) 79 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ 80 NETIF_MSG_TX_ERR) 81 82 MODULE_IMPORT_NS("NETDEV_INTERNAL"); 83 MODULE_LICENSE("GPL"); 84 MODULE_DESCRIPTION("Broadcom NetXtreme network driver"); 85 86 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) 87 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD 88 89 #define BNXT_TX_PUSH_THRESH 164 90 91 /* indexed by enum board_idx */ 92 static const struct { 93 char *name; 94 } board_info[] = { 95 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, 96 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, 97 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 98 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, 99 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, 100 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, 101 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, 102 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, 103 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, 104 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, 105 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, 106 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, 107 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, 108 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, 109 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, 110 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, 111 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, 112 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, 113 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, 114 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, 115 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 116 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, 117 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, 118 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, 119 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 120 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, 121 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, 122 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 123 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, 124 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 125 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 126 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, 127 [BCM57608] = { "Broadcom BCM57608 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 128 [BCM57604] = { "Broadcom BCM57604 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, 129 [BCM57602] = { "Broadcom BCM57602 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" }, 130 [BCM57601] = { "Broadcom BCM57601 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, 131 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, 132 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, 133 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, 134 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, 135 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 136 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, 137 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, 138 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, 139 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, 140 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, 141 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, 142 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, 143 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, 144 [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" }, 145 }; 146 147 static const struct pci_device_id bnxt_pci_tbl[] = { 148 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, 149 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, 150 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, 151 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, 152 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 153 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 154 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 155 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, 156 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, 157 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 158 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 159 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 160 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 161 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 162 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, 163 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, 164 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 165 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 166 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 167 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 168 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, 169 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 170 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, 171 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, 172 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, 173 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, 174 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, 175 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, 176 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, 177 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, 178 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, 179 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, 180 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, 181 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, 182 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, 183 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, 184 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, 185 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, 186 { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 }, 187 { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 }, 188 { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 }, 189 { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 }, 190 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, 191 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, 192 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, 193 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, 194 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, 195 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, 196 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, 197 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, 198 #ifdef CONFIG_BNXT_SRIOV 199 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, 200 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, 201 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, 202 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, 203 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, 204 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, 205 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, 206 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, 207 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, 208 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, 209 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, 210 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, 211 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, 212 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, 213 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, 214 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, 215 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, 216 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, 217 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, 218 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, 219 { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF }, 220 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, 221 #endif 222 { 0 } 223 }; 224 225 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); 226 227 static const u16 bnxt_vf_req_snif[] = { 228 HWRM_FUNC_CFG, 229 HWRM_FUNC_VF_CFG, 230 HWRM_PORT_PHY_QCFG, 231 HWRM_CFA_L2_FILTER_ALLOC, 232 }; 233 234 static const u16 bnxt_async_events_arr[] = { 235 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 236 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, 237 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 238 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 239 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 240 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 241 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, 242 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, 243 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, 244 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, 245 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, 246 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, 247 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, 248 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, 249 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, 250 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, 251 ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER, 252 }; 253 254 const u16 bnxt_bstore_to_trace[] = { 255 [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE, 256 [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE, 257 [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE, 258 [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE, 259 [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE, 260 [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE, 261 [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE, 262 [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE, 263 [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE, 264 [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE, 265 [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE, 266 }; 267 268 static struct workqueue_struct *bnxt_pf_wq; 269 270 #define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ 271 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} 272 #define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} 273 274 const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = { 275 .ports = { 276 .src = 0, 277 .dst = 0, 278 }, 279 .addrs = { 280 .v6addrs = { 281 .src = BNXT_IPV6_MASK_NONE, 282 .dst = BNXT_IPV6_MASK_NONE, 283 }, 284 }, 285 }; 286 287 const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = { 288 .ports = { 289 .src = cpu_to_be16(0xffff), 290 .dst = cpu_to_be16(0xffff), 291 }, 292 .addrs = { 293 .v6addrs = { 294 .src = BNXT_IPV6_MASK_ALL, 295 .dst = BNXT_IPV6_MASK_ALL, 296 }, 297 }, 298 }; 299 300 const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = { 301 .ports = { 302 .src = cpu_to_be16(0xffff), 303 .dst = cpu_to_be16(0xffff), 304 }, 305 .addrs = { 306 .v4addrs = { 307 .src = cpu_to_be32(0xffffffff), 308 .dst = cpu_to_be32(0xffffffff), 309 }, 310 }, 311 }; 312 313 static bool bnxt_vf_pciid(enum board_idx idx) 314 { 315 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || 316 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || 317 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || 318 idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF); 319 } 320 321 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) 322 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) 323 324 #define BNXT_DB_CQ(db, idx) \ 325 writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 326 327 #define BNXT_DB_NQ_P5(db, idx) \ 328 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ 329 (db)->doorbell) 330 331 #define BNXT_DB_NQ_P7(db, idx) \ 332 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \ 333 DB_RING_IDX(db, idx), (db)->doorbell) 334 335 #define BNXT_DB_CQ_ARM(db, idx) \ 336 writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) 337 338 #define BNXT_DB_NQ_ARM_P5(db, idx) \ 339 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ 340 DB_RING_IDX(db, idx), (db)->doorbell) 341 342 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 343 { 344 if (bp->flags & BNXT_FLAG_CHIP_P7) 345 BNXT_DB_NQ_P7(db, idx); 346 else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 347 BNXT_DB_NQ_P5(db, idx); 348 else 349 BNXT_DB_CQ(db, idx); 350 } 351 352 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 353 { 354 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 355 BNXT_DB_NQ_ARM_P5(db, idx); 356 else 357 BNXT_DB_CQ_ARM(db, idx); 358 } 359 360 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) 361 { 362 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 363 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | 364 DB_RING_IDX(db, idx), db->doorbell); 365 else 366 BNXT_DB_CQ(db, idx); 367 } 368 369 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) 370 { 371 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) 372 return; 373 374 if (BNXT_PF(bp)) 375 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); 376 else 377 schedule_delayed_work(&bp->fw_reset_task, delay); 378 } 379 380 static void __bnxt_queue_sp_work(struct bnxt *bp) 381 { 382 if (BNXT_PF(bp)) 383 queue_work(bnxt_pf_wq, &bp->sp_task); 384 else 385 schedule_work(&bp->sp_task); 386 } 387 388 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) 389 { 390 set_bit(event, &bp->sp_event); 391 __bnxt_queue_sp_work(bp); 392 } 393 394 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 395 { 396 if (!rxr->bnapi->in_reset) { 397 rxr->bnapi->in_reset = true; 398 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 399 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); 400 else 401 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); 402 __bnxt_queue_sp_work(bp); 403 } 404 rxr->rx_next_cons = 0xffff; 405 } 406 407 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 408 u16 curr) 409 { 410 struct bnxt_napi *bnapi = txr->bnapi; 411 412 if (bnapi->tx_fault) 413 return; 414 415 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)", 416 txr->txq_index, txr->tx_hw_cons, 417 txr->tx_cons, txr->tx_prod, curr); 418 WARN_ON_ONCE(1); 419 bnapi->tx_fault = 1; 420 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 421 } 422 423 const u16 bnxt_lhint_arr[] = { 424 TX_BD_FLAGS_LHINT_512_AND_SMALLER, 425 TX_BD_FLAGS_LHINT_512_TO_1023, 426 TX_BD_FLAGS_LHINT_1024_TO_2047, 427 TX_BD_FLAGS_LHINT_1024_TO_2047, 428 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 429 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 430 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 431 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 432 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 433 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 434 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 435 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 436 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 437 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 438 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 439 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 440 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 441 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 442 TX_BD_FLAGS_LHINT_2048_AND_LARGER, 443 }; 444 445 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) 446 { 447 struct metadata_dst *md_dst = skb_metadata_dst(skb); 448 449 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) 450 return 0; 451 452 return md_dst->u.port_info.port_id; 453 } 454 455 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 456 u16 prod) 457 { 458 /* Sync BD data before updating doorbell */ 459 wmb(); 460 bnxt_db_write(bp, &txr->tx_db, prod); 461 txr->kick_pending = 0; 462 } 463 464 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) 465 { 466 struct bnxt *bp = netdev_priv(dev); 467 struct tx_bd *txbd, *txbd0; 468 struct tx_bd_ext *txbd1; 469 struct netdev_queue *txq; 470 int i; 471 dma_addr_t mapping; 472 unsigned int length, pad = 0; 473 u32 len, free_size, vlan_tag_flags, cfa_action, flags; 474 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 475 struct pci_dev *pdev = bp->pdev; 476 u16 prod, last_frag, txts_prod; 477 struct bnxt_tx_ring_info *txr; 478 struct bnxt_sw_tx_bd *tx_buf; 479 __le32 lflags = 0; 480 481 i = skb_get_queue_mapping(skb); 482 if (unlikely(i >= bp->tx_nr_rings)) { 483 dev_kfree_skb_any(skb); 484 dev_core_stats_tx_dropped_inc(dev); 485 return NETDEV_TX_OK; 486 } 487 488 txq = netdev_get_tx_queue(dev, i); 489 txr = &bp->tx_ring[bp->tx_ring_map[i]]; 490 prod = txr->tx_prod; 491 492 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS) 493 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) { 494 netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n", 495 skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS); 496 if (skb_linearize(skb)) { 497 dev_kfree_skb_any(skb); 498 dev_core_stats_tx_dropped_inc(dev); 499 return NETDEV_TX_OK; 500 } 501 } 502 #endif 503 free_size = bnxt_tx_avail(bp, txr); 504 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { 505 /* We must have raced with NAPI cleanup */ 506 if (net_ratelimit() && txr->kick_pending) 507 netif_warn(bp, tx_err, dev, 508 "bnxt: ring busy w/ flush pending!\n"); 509 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 510 bp->tx_wake_thresh)) 511 return NETDEV_TX_BUSY; 512 } 513 514 if (unlikely(ipv6_hopopt_jumbo_remove(skb))) 515 goto tx_free; 516 517 length = skb->len; 518 len = skb_headlen(skb); 519 last_frag = skb_shinfo(skb)->nr_frags; 520 521 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 522 523 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 524 tx_buf->skb = skb; 525 tx_buf->nr_frags = last_frag; 526 527 vlan_tag_flags = 0; 528 cfa_action = bnxt_xmit_get_cfa_action(skb); 529 if (skb_vlan_tag_present(skb)) { 530 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | 531 skb_vlan_tag_get(skb); 532 /* Currently supports 8021Q, 8021AD vlan offloads 533 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated 534 */ 535 if (skb->vlan_proto == htons(ETH_P_8021Q)) 536 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; 537 } 538 539 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp && 540 ptp->tx_tstamp_en) { 541 if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) { 542 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 543 tx_buf->is_ts_pkt = 1; 544 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 545 } else if (!skb_is_gso(skb)) { 546 u16 seq_id, hdr_off; 547 548 if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) && 549 !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) { 550 if (vlan_tag_flags) 551 hdr_off += VLAN_HLEN; 552 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); 553 tx_buf->is_ts_pkt = 1; 554 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 555 556 ptp->txts_req[txts_prod].tx_seqid = seq_id; 557 ptp->txts_req[txts_prod].tx_hdr_off = hdr_off; 558 tx_buf->txts_prod = txts_prod; 559 } 560 } 561 } 562 if (unlikely(skb->no_fcs)) 563 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); 564 565 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && 566 !lflags) { 567 struct tx_push_buffer *tx_push_buf = txr->tx_push; 568 struct tx_push_bd *tx_push = &tx_push_buf->push_bd; 569 struct tx_bd_ext *tx_push1 = &tx_push->txbd2; 570 void __iomem *db = txr->tx_db.doorbell; 571 void *pdata = tx_push_buf->data; 572 u64 *end; 573 int j, push_len; 574 575 /* Set COAL_NOW to be ready quickly for the next push */ 576 tx_push->tx_bd_len_flags_type = 577 cpu_to_le32((length << TX_BD_LEN_SHIFT) | 578 TX_BD_TYPE_LONG_TX_BD | 579 TX_BD_FLAGS_LHINT_512_AND_SMALLER | 580 TX_BD_FLAGS_COAL_NOW | 581 TX_BD_FLAGS_PACKET_END | 582 TX_BD_CNT(2)); 583 584 if (skb->ip_summed == CHECKSUM_PARTIAL) 585 tx_push1->tx_bd_hsize_lflags = 586 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 587 else 588 tx_push1->tx_bd_hsize_lflags = 0; 589 590 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 591 tx_push1->tx_bd_cfa_action = 592 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 593 594 end = pdata + length; 595 end = PTR_ALIGN(end, 8) - 1; 596 *end = 0; 597 598 skb_copy_from_linear_data(skb, pdata, len); 599 pdata += len; 600 for (j = 0; j < last_frag; j++) { 601 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 602 void *fptr; 603 604 fptr = skb_frag_address_safe(frag); 605 if (!fptr) 606 goto normal_tx; 607 608 memcpy(pdata, fptr, skb_frag_size(frag)); 609 pdata += skb_frag_size(frag); 610 } 611 612 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; 613 txbd->tx_bd_haddr = txr->data_mapping; 614 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); 615 prod = NEXT_TX(prod); 616 tx_push->tx_bd_opaque = txbd->tx_bd_opaque; 617 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 618 memcpy(txbd, tx_push1, sizeof(*txbd)); 619 prod = NEXT_TX(prod); 620 tx_push->doorbell = 621 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | 622 DB_RING_IDX(&txr->tx_db, prod)); 623 WRITE_ONCE(txr->tx_prod, prod); 624 625 tx_buf->is_push = 1; 626 netdev_tx_sent_queue(txq, skb->len); 627 wmb(); /* Sync is_push and byte queue before pushing data */ 628 629 push_len = (length + sizeof(*tx_push) + 7) / 8; 630 if (push_len > 16) { 631 __iowrite64_copy(db, tx_push_buf, 16); 632 __iowrite32_copy(db + 4, tx_push_buf + 1, 633 (push_len - 16) << 1); 634 } else { 635 __iowrite64_copy(db, tx_push_buf, push_len); 636 } 637 638 goto tx_done; 639 } 640 641 normal_tx: 642 if (length < BNXT_MIN_PKT_SIZE) { 643 pad = BNXT_MIN_PKT_SIZE - length; 644 if (skb_pad(skb, pad)) 645 /* SKB already freed. */ 646 goto tx_kick_pending; 647 length = BNXT_MIN_PKT_SIZE; 648 } 649 650 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); 651 652 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 653 goto tx_free; 654 655 dma_unmap_addr_set(tx_buf, mapping, mapping); 656 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | 657 TX_BD_CNT(last_frag + 2); 658 659 txbd->tx_bd_haddr = cpu_to_le64(mapping); 660 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); 661 662 prod = NEXT_TX(prod); 663 txbd1 = (struct tx_bd_ext *) 664 &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 665 666 txbd1->tx_bd_hsize_lflags = lflags; 667 if (skb_is_gso(skb)) { 668 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); 669 u32 hdr_len; 670 671 if (skb->encapsulation) { 672 if (udp_gso) 673 hdr_len = skb_inner_transport_offset(skb) + 674 sizeof(struct udphdr); 675 else 676 hdr_len = skb_inner_tcp_all_headers(skb); 677 } else if (udp_gso) { 678 hdr_len = skb_transport_offset(skb) + 679 sizeof(struct udphdr); 680 } else { 681 hdr_len = skb_tcp_all_headers(skb); 682 } 683 684 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | 685 TX_BD_FLAGS_T_IPID | 686 (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); 687 length = skb_shinfo(skb)->gso_size; 688 txbd1->tx_bd_mss = cpu_to_le32(length); 689 length += hdr_len; 690 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 691 txbd1->tx_bd_hsize_lflags |= 692 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); 693 txbd1->tx_bd_mss = 0; 694 } 695 696 length >>= 9; 697 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { 698 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", 699 skb->len); 700 i = 0; 701 goto tx_dma_error; 702 } 703 flags |= bnxt_lhint_arr[length]; 704 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 705 706 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 707 txbd1->tx_bd_cfa_action = 708 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); 709 txbd0 = txbd; 710 for (i = 0; i < last_frag; i++) { 711 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 712 713 prod = NEXT_TX(prod); 714 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; 715 716 len = skb_frag_size(frag); 717 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, 718 DMA_TO_DEVICE); 719 720 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) 721 goto tx_dma_error; 722 723 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 724 dma_unmap_addr_set(tx_buf, mapping, mapping); 725 726 txbd->tx_bd_haddr = cpu_to_le64(mapping); 727 728 flags = len << TX_BD_LEN_SHIFT; 729 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); 730 } 731 732 flags &= ~TX_BD_LEN; 733 txbd->tx_bd_len_flags_type = 734 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | 735 TX_BD_FLAGS_PACKET_END); 736 737 netdev_tx_sent_queue(txq, skb->len); 738 739 skb_tx_timestamp(skb); 740 741 prod = NEXT_TX(prod); 742 WRITE_ONCE(txr->tx_prod, prod); 743 744 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { 745 bnxt_txr_db_kick(bp, txr, prod); 746 } else { 747 if (free_size >= bp->tx_wake_thresh) 748 txbd0->tx_bd_len_flags_type |= 749 cpu_to_le32(TX_BD_FLAGS_NO_CMPL); 750 txr->kick_pending = 1; 751 } 752 753 tx_done: 754 755 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { 756 if (netdev_xmit_more() && !tx_buf->is_push) { 757 txbd0->tx_bd_len_flags_type &= 758 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL); 759 bnxt_txr_db_kick(bp, txr, prod); 760 } 761 762 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), 763 bp->tx_wake_thresh); 764 } 765 return NETDEV_TX_OK; 766 767 tx_dma_error: 768 last_frag = i; 769 770 /* start back at beginning and unmap skb */ 771 prod = txr->tx_prod; 772 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 773 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 774 skb_headlen(skb), DMA_TO_DEVICE); 775 prod = NEXT_TX(prod); 776 777 /* unmap remaining mapped pages */ 778 for (i = 0; i < last_frag; i++) { 779 prod = NEXT_TX(prod); 780 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; 781 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 782 skb_frag_size(&skb_shinfo(skb)->frags[i]), 783 DMA_TO_DEVICE); 784 } 785 786 tx_free: 787 dev_kfree_skb_any(skb); 788 tx_kick_pending: 789 if (BNXT_TX_PTP_IS_SET(lflags)) { 790 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0; 791 atomic64_inc(&bp->ptp_cfg->stats.ts_err); 792 if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 793 /* set SKB to err so PTP worker will clean up */ 794 ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO); 795 } 796 if (txr->kick_pending) 797 bnxt_txr_db_kick(bp, txr, txr->tx_prod); 798 txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL; 799 dev_core_stats_tx_dropped_inc(dev); 800 return NETDEV_TX_OK; 801 } 802 803 /* Returns true if some remaining TX packets not processed. */ 804 static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, 805 int budget) 806 { 807 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 808 struct pci_dev *pdev = bp->pdev; 809 u16 hw_cons = txr->tx_hw_cons; 810 unsigned int tx_bytes = 0; 811 u16 cons = txr->tx_cons; 812 int tx_pkts = 0; 813 bool rc = false; 814 815 while (RING_TX(bp, cons) != hw_cons) { 816 struct bnxt_sw_tx_bd *tx_buf; 817 struct sk_buff *skb; 818 bool is_ts_pkt; 819 int j, last; 820 821 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 822 skb = tx_buf->skb; 823 824 if (unlikely(!skb)) { 825 bnxt_sched_reset_txr(bp, txr, cons); 826 return rc; 827 } 828 829 is_ts_pkt = tx_buf->is_ts_pkt; 830 if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) { 831 rc = true; 832 break; 833 } 834 835 cons = NEXT_TX(cons); 836 tx_pkts++; 837 tx_bytes += skb->len; 838 tx_buf->skb = NULL; 839 tx_buf->is_ts_pkt = 0; 840 841 if (tx_buf->is_push) { 842 tx_buf->is_push = 0; 843 goto next_tx_int; 844 } 845 846 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), 847 skb_headlen(skb), DMA_TO_DEVICE); 848 last = tx_buf->nr_frags; 849 850 for (j = 0; j < last; j++) { 851 cons = NEXT_TX(cons); 852 tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; 853 dma_unmap_page( 854 &pdev->dev, 855 dma_unmap_addr(tx_buf, mapping), 856 skb_frag_size(&skb_shinfo(skb)->frags[j]), 857 DMA_TO_DEVICE); 858 } 859 if (unlikely(is_ts_pkt)) { 860 if (BNXT_CHIP_P5(bp)) { 861 /* PTP worker takes ownership of the skb */ 862 bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod); 863 skb = NULL; 864 } 865 } 866 867 next_tx_int: 868 cons = NEXT_TX(cons); 869 870 dev_consume_skb_any(skb); 871 } 872 873 WRITE_ONCE(txr->tx_cons, cons); 874 875 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, 876 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, 877 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); 878 879 return rc; 880 } 881 882 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 883 { 884 struct bnxt_tx_ring_info *txr; 885 bool more = false; 886 int i; 887 888 bnxt_for_each_napi_tx(i, bnapi, txr) { 889 if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) 890 more |= __bnxt_tx_int(bp, txr, budget); 891 } 892 if (!more) 893 bnapi->events &= ~BNXT_TX_CMP_EVENT; 894 } 895 896 static bool bnxt_separate_head_pool(void) 897 { 898 return PAGE_SIZE > BNXT_RX_PAGE_SIZE; 899 } 900 901 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, 902 struct bnxt_rx_ring_info *rxr, 903 unsigned int *offset, 904 gfp_t gfp) 905 { 906 struct page *page; 907 908 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { 909 page = page_pool_dev_alloc_frag(rxr->page_pool, offset, 910 BNXT_RX_PAGE_SIZE); 911 } else { 912 page = page_pool_dev_alloc_pages(rxr->page_pool); 913 *offset = 0; 914 } 915 if (!page) 916 return NULL; 917 918 *mapping = page_pool_get_dma_addr(page) + *offset; 919 return page; 920 } 921 922 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, 923 struct bnxt_rx_ring_info *rxr, 924 gfp_t gfp) 925 { 926 unsigned int offset; 927 struct page *page; 928 929 page = page_pool_alloc_frag(rxr->head_pool, &offset, 930 bp->rx_buf_size, gfp); 931 if (!page) 932 return NULL; 933 934 *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset; 935 return page_address(page) + offset; 936 } 937 938 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 939 u16 prod, gfp_t gfp) 940 { 941 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 942 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 943 dma_addr_t mapping; 944 945 if (BNXT_RX_PAGE_MODE(bp)) { 946 unsigned int offset; 947 struct page *page = 948 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 949 950 if (!page) 951 return -ENOMEM; 952 953 mapping += bp->rx_dma_offset; 954 rx_buf->data = page; 955 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; 956 } else { 957 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp); 958 959 if (!data) 960 return -ENOMEM; 961 962 rx_buf->data = data; 963 rx_buf->data_ptr = data + bp->rx_offset; 964 } 965 rx_buf->mapping = mapping; 966 967 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 968 return 0; 969 } 970 971 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) 972 { 973 u16 prod = rxr->rx_prod; 974 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 975 struct bnxt *bp = rxr->bnapi->bp; 976 struct rx_bd *cons_bd, *prod_bd; 977 978 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 979 cons_rx_buf = &rxr->rx_buf_ring[cons]; 980 981 prod_rx_buf->data = data; 982 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; 983 984 prod_rx_buf->mapping = cons_rx_buf->mapping; 985 986 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 987 cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; 988 989 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; 990 } 991 992 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 993 { 994 u16 next, max = rxr->rx_agg_bmap_size; 995 996 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); 997 if (next >= max) 998 next = find_first_zero_bit(rxr->rx_agg_bmap, max); 999 return next; 1000 } 1001 1002 static inline int bnxt_alloc_rx_page(struct bnxt *bp, 1003 struct bnxt_rx_ring_info *rxr, 1004 u16 prod, gfp_t gfp) 1005 { 1006 struct rx_bd *rxbd = 1007 &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 1008 struct bnxt_sw_rx_agg_bd *rx_agg_buf; 1009 struct page *page; 1010 dma_addr_t mapping; 1011 u16 sw_prod = rxr->rx_sw_agg_prod; 1012 unsigned int offset = 0; 1013 1014 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); 1015 1016 if (!page) 1017 return -ENOMEM; 1018 1019 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 1020 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 1021 1022 __set_bit(sw_prod, rxr->rx_agg_bmap); 1023 rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; 1024 rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1025 1026 rx_agg_buf->page = page; 1027 rx_agg_buf->offset = offset; 1028 rx_agg_buf->mapping = mapping; 1029 rxbd->rx_bd_haddr = cpu_to_le64(mapping); 1030 rxbd->rx_bd_opaque = sw_prod; 1031 return 0; 1032 } 1033 1034 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, 1035 struct bnxt_cp_ring_info *cpr, 1036 u16 cp_cons, u16 curr) 1037 { 1038 struct rx_agg_cmp *agg; 1039 1040 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); 1041 agg = (struct rx_agg_cmp *) 1042 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1043 return agg; 1044 } 1045 1046 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, 1047 struct bnxt_rx_ring_info *rxr, 1048 u16 agg_id, u16 curr) 1049 { 1050 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; 1051 1052 return &tpa_info->agg_arr[curr]; 1053 } 1054 1055 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, 1056 u16 start, u32 agg_bufs, bool tpa) 1057 { 1058 struct bnxt_napi *bnapi = cpr->bnapi; 1059 struct bnxt *bp = bnapi->bp; 1060 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1061 u16 prod = rxr->rx_agg_prod; 1062 u16 sw_prod = rxr->rx_sw_agg_prod; 1063 bool p5_tpa = false; 1064 u32 i; 1065 1066 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1067 p5_tpa = true; 1068 1069 for (i = 0; i < agg_bufs; i++) { 1070 u16 cons; 1071 struct rx_agg_cmp *agg; 1072 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; 1073 struct rx_bd *prod_bd; 1074 struct page *page; 1075 1076 if (p5_tpa) 1077 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); 1078 else 1079 agg = bnxt_get_agg(bp, cpr, idx, start + i); 1080 cons = agg->rx_agg_cmp_opaque; 1081 __clear_bit(cons, rxr->rx_agg_bmap); 1082 1083 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) 1084 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); 1085 1086 __set_bit(sw_prod, rxr->rx_agg_bmap); 1087 prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; 1088 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1089 1090 /* It is possible for sw_prod to be equal to cons, so 1091 * set cons_rx_buf->page to NULL first. 1092 */ 1093 page = cons_rx_buf->page; 1094 cons_rx_buf->page = NULL; 1095 prod_rx_buf->page = page; 1096 prod_rx_buf->offset = cons_rx_buf->offset; 1097 1098 prod_rx_buf->mapping = cons_rx_buf->mapping; 1099 1100 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; 1101 1102 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); 1103 prod_bd->rx_bd_opaque = sw_prod; 1104 1105 prod = NEXT_RX_AGG(prod); 1106 sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); 1107 } 1108 rxr->rx_agg_prod = prod; 1109 rxr->rx_sw_agg_prod = sw_prod; 1110 } 1111 1112 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, 1113 struct bnxt_rx_ring_info *rxr, 1114 u16 cons, void *data, u8 *data_ptr, 1115 dma_addr_t dma_addr, 1116 unsigned int offset_and_len) 1117 { 1118 unsigned int len = offset_and_len & 0xffff; 1119 struct page *page = data; 1120 u16 prod = rxr->rx_prod; 1121 struct sk_buff *skb; 1122 int err; 1123 1124 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1125 if (unlikely(err)) { 1126 bnxt_reuse_rx_data(rxr, cons, data); 1127 return NULL; 1128 } 1129 dma_addr -= bp->rx_dma_offset; 1130 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1131 bp->rx_dir); 1132 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); 1133 if (!skb) { 1134 page_pool_recycle_direct(rxr->page_pool, page); 1135 return NULL; 1136 } 1137 skb_mark_for_recycle(skb); 1138 skb_reserve(skb, bp->rx_offset); 1139 __skb_put(skb, len); 1140 1141 return skb; 1142 } 1143 1144 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, 1145 struct bnxt_rx_ring_info *rxr, 1146 u16 cons, void *data, u8 *data_ptr, 1147 dma_addr_t dma_addr, 1148 unsigned int offset_and_len) 1149 { 1150 unsigned int payload = offset_and_len >> 16; 1151 unsigned int len = offset_and_len & 0xffff; 1152 skb_frag_t *frag; 1153 struct page *page = data; 1154 u16 prod = rxr->rx_prod; 1155 struct sk_buff *skb; 1156 int off, err; 1157 1158 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1159 if (unlikely(err)) { 1160 bnxt_reuse_rx_data(rxr, cons, data); 1161 return NULL; 1162 } 1163 dma_addr -= bp->rx_dma_offset; 1164 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, 1165 bp->rx_dir); 1166 1167 if (unlikely(!payload)) 1168 payload = eth_get_headlen(bp->dev, data_ptr, len); 1169 1170 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 1171 if (!skb) { 1172 page_pool_recycle_direct(rxr->page_pool, page); 1173 return NULL; 1174 } 1175 1176 skb_mark_for_recycle(skb); 1177 off = (void *)data_ptr - page_address(page); 1178 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); 1179 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, 1180 payload + NET_IP_ALIGN); 1181 1182 frag = &skb_shinfo(skb)->frags[0]; 1183 skb_frag_size_sub(frag, payload); 1184 skb_frag_off_add(frag, payload); 1185 skb->data_len -= payload; 1186 skb->tail += payload; 1187 1188 return skb; 1189 } 1190 1191 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, 1192 struct bnxt_rx_ring_info *rxr, u16 cons, 1193 void *data, u8 *data_ptr, 1194 dma_addr_t dma_addr, 1195 unsigned int offset_and_len) 1196 { 1197 u16 prod = rxr->rx_prod; 1198 struct sk_buff *skb; 1199 int err; 1200 1201 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); 1202 if (unlikely(err)) { 1203 bnxt_reuse_rx_data(rxr, cons, data); 1204 return NULL; 1205 } 1206 1207 skb = napi_build_skb(data, bp->rx_buf_size); 1208 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 1209 bp->rx_dir); 1210 if (!skb) { 1211 page_pool_free_va(rxr->head_pool, data, true); 1212 return NULL; 1213 } 1214 1215 skb_mark_for_recycle(skb); 1216 skb_reserve(skb, bp->rx_offset); 1217 skb_put(skb, offset_and_len & 0xffff); 1218 return skb; 1219 } 1220 1221 static u32 __bnxt_rx_agg_pages(struct bnxt *bp, 1222 struct bnxt_cp_ring_info *cpr, 1223 struct skb_shared_info *shinfo, 1224 u16 idx, u32 agg_bufs, bool tpa, 1225 struct xdp_buff *xdp) 1226 { 1227 struct bnxt_napi *bnapi = cpr->bnapi; 1228 struct pci_dev *pdev = bp->pdev; 1229 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1230 u16 prod = rxr->rx_agg_prod; 1231 u32 i, total_frag_len = 0; 1232 bool p5_tpa = false; 1233 1234 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) 1235 p5_tpa = true; 1236 1237 for (i = 0; i < agg_bufs; i++) { 1238 skb_frag_t *frag = &shinfo->frags[i]; 1239 u16 cons, frag_len; 1240 struct rx_agg_cmp *agg; 1241 struct bnxt_sw_rx_agg_bd *cons_rx_buf; 1242 struct page *page; 1243 dma_addr_t mapping; 1244 1245 if (p5_tpa) 1246 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); 1247 else 1248 agg = bnxt_get_agg(bp, cpr, idx, i); 1249 cons = agg->rx_agg_cmp_opaque; 1250 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & 1251 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; 1252 1253 cons_rx_buf = &rxr->rx_agg_ring[cons]; 1254 skb_frag_fill_page_desc(frag, cons_rx_buf->page, 1255 cons_rx_buf->offset, frag_len); 1256 shinfo->nr_frags = i + 1; 1257 __clear_bit(cons, rxr->rx_agg_bmap); 1258 1259 /* It is possible for bnxt_alloc_rx_page() to allocate 1260 * a sw_prod index that equals the cons index, so we 1261 * need to clear the cons entry now. 1262 */ 1263 mapping = cons_rx_buf->mapping; 1264 page = cons_rx_buf->page; 1265 cons_rx_buf->page = NULL; 1266 1267 if (xdp && page_is_pfmemalloc(page)) 1268 xdp_buff_set_frag_pfmemalloc(xdp); 1269 1270 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { 1271 --shinfo->nr_frags; 1272 cons_rx_buf->page = page; 1273 1274 /* Update prod since possibly some pages have been 1275 * allocated already. 1276 */ 1277 rxr->rx_agg_prod = prod; 1278 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); 1279 return 0; 1280 } 1281 1282 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, 1283 bp->rx_dir); 1284 1285 total_frag_len += frag_len; 1286 prod = NEXT_RX_AGG(prod); 1287 } 1288 rxr->rx_agg_prod = prod; 1289 return total_frag_len; 1290 } 1291 1292 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, 1293 struct bnxt_cp_ring_info *cpr, 1294 struct sk_buff *skb, u16 idx, 1295 u32 agg_bufs, bool tpa) 1296 { 1297 struct skb_shared_info *shinfo = skb_shinfo(skb); 1298 u32 total_frag_len = 0; 1299 1300 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, 1301 agg_bufs, tpa, NULL); 1302 if (!total_frag_len) { 1303 skb_mark_for_recycle(skb); 1304 dev_kfree_skb(skb); 1305 return NULL; 1306 } 1307 1308 skb->data_len += total_frag_len; 1309 skb->len += total_frag_len; 1310 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; 1311 return skb; 1312 } 1313 1314 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, 1315 struct bnxt_cp_ring_info *cpr, 1316 struct xdp_buff *xdp, u16 idx, 1317 u32 agg_bufs, bool tpa) 1318 { 1319 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); 1320 u32 total_frag_len = 0; 1321 1322 if (!xdp_buff_has_frags(xdp)) 1323 shinfo->nr_frags = 0; 1324 1325 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, 1326 idx, agg_bufs, tpa, xdp); 1327 if (total_frag_len) { 1328 xdp_buff_set_frags_flag(xdp); 1329 shinfo->nr_frags = agg_bufs; 1330 shinfo->xdp_frags_size = total_frag_len; 1331 } 1332 return total_frag_len; 1333 } 1334 1335 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1336 u8 agg_bufs, u32 *raw_cons) 1337 { 1338 u16 last; 1339 struct rx_agg_cmp *agg; 1340 1341 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); 1342 last = RING_CMP(*raw_cons); 1343 agg = (struct rx_agg_cmp *) 1344 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; 1345 return RX_AGG_CMP_VALID(agg, *raw_cons); 1346 } 1347 1348 static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, 1349 unsigned int len, 1350 dma_addr_t mapping) 1351 { 1352 struct bnxt *bp = bnapi->bp; 1353 struct pci_dev *pdev = bp->pdev; 1354 struct sk_buff *skb; 1355 1356 skb = napi_alloc_skb(&bnapi->napi, len); 1357 if (!skb) 1358 return NULL; 1359 1360 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak, 1361 bp->rx_dir); 1362 1363 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, 1364 len + NET_IP_ALIGN); 1365 1366 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak, 1367 bp->rx_dir); 1368 1369 skb_put(skb, len); 1370 1371 return skb; 1372 } 1373 1374 static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, 1375 unsigned int len, 1376 dma_addr_t mapping) 1377 { 1378 return bnxt_copy_data(bnapi, data, len, mapping); 1379 } 1380 1381 static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, 1382 struct xdp_buff *xdp, 1383 unsigned int len, 1384 dma_addr_t mapping) 1385 { 1386 unsigned int metasize = 0; 1387 u8 *data = xdp->data; 1388 struct sk_buff *skb; 1389 1390 len = xdp->data_end - xdp->data_meta; 1391 metasize = xdp->data - xdp->data_meta; 1392 data = xdp->data_meta; 1393 1394 skb = bnxt_copy_data(bnapi, data, len, mapping); 1395 if (!skb) 1396 return skb; 1397 1398 if (metasize) { 1399 skb_metadata_set(skb, metasize); 1400 __skb_pull(skb, metasize); 1401 } 1402 1403 return skb; 1404 } 1405 1406 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 1407 u32 *raw_cons, void *cmp) 1408 { 1409 struct rx_cmp *rxcmp = cmp; 1410 u32 tmp_raw_cons = *raw_cons; 1411 u8 cmp_type, agg_bufs = 0; 1412 1413 cmp_type = RX_CMP_TYPE(rxcmp); 1414 1415 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1416 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & 1417 RX_CMP_AGG_BUFS) >> 1418 RX_CMP_AGG_BUFS_SHIFT; 1419 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1420 struct rx_tpa_end_cmp *tpa_end = cmp; 1421 1422 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1423 return 0; 1424 1425 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1426 } 1427 1428 if (agg_bufs) { 1429 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 1430 return -EBUSY; 1431 } 1432 *raw_cons = tmp_raw_cons; 1433 return 0; 1434 } 1435 1436 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1437 { 1438 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1439 u16 idx = agg_id & MAX_TPA_P5_MASK; 1440 1441 if (test_bit(idx, map->agg_idx_bmap)) 1442 idx = find_first_zero_bit(map->agg_idx_bmap, 1443 BNXT_AGG_IDX_BMAP_SIZE); 1444 __set_bit(idx, map->agg_idx_bmap); 1445 map->agg_id_tbl[agg_id] = idx; 1446 return idx; 1447 } 1448 1449 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) 1450 { 1451 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1452 1453 __clear_bit(idx, map->agg_idx_bmap); 1454 } 1455 1456 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) 1457 { 1458 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; 1459 1460 return map->agg_id_tbl[agg_id]; 1461 } 1462 1463 static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info, 1464 struct rx_tpa_start_cmp *tpa_start, 1465 struct rx_tpa_start_cmp_ext *tpa_start1) 1466 { 1467 tpa_info->cfa_code_valid = 1; 1468 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); 1469 tpa_info->vlan_valid = 0; 1470 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { 1471 tpa_info->vlan_valid = 1; 1472 tpa_info->metadata = 1473 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 1474 } 1475 } 1476 1477 static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info, 1478 struct rx_tpa_start_cmp *tpa_start, 1479 struct rx_tpa_start_cmp_ext *tpa_start1) 1480 { 1481 tpa_info->vlan_valid = 0; 1482 if (TPA_START_VLAN_VALID(tpa_start)) { 1483 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); 1484 u32 vlan_proto = ETH_P_8021Q; 1485 1486 tpa_info->vlan_valid = 1; 1487 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) 1488 vlan_proto = ETH_P_8021AD; 1489 tpa_info->metadata = vlan_proto << 16 | 1490 TPA_START_METADATA0_TCI(tpa_start1); 1491 } 1492 } 1493 1494 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1495 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, 1496 struct rx_tpa_start_cmp_ext *tpa_start1) 1497 { 1498 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; 1499 struct bnxt_tpa_info *tpa_info; 1500 u16 cons, prod, agg_id; 1501 struct rx_bd *prod_bd; 1502 dma_addr_t mapping; 1503 1504 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1505 agg_id = TPA_START_AGG_ID_P5(tpa_start); 1506 agg_id = bnxt_alloc_agg_idx(rxr, agg_id); 1507 } else { 1508 agg_id = TPA_START_AGG_ID(tpa_start); 1509 } 1510 cons = tpa_start->rx_tpa_start_cmp_opaque; 1511 prod = rxr->rx_prod; 1512 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1513 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; 1514 tpa_info = &rxr->rx_tpa[agg_id]; 1515 1516 if (unlikely(cons != rxr->rx_next_cons || 1517 TPA_START_ERROR(tpa_start))) { 1518 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", 1519 cons, rxr->rx_next_cons, 1520 TPA_START_ERROR_CODE(tpa_start1)); 1521 bnxt_sched_reset_rxr(bp, rxr); 1522 return; 1523 } 1524 prod_rx_buf->data = tpa_info->data; 1525 prod_rx_buf->data_ptr = tpa_info->data_ptr; 1526 1527 mapping = tpa_info->mapping; 1528 prod_rx_buf->mapping = mapping; 1529 1530 prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; 1531 1532 prod_bd->rx_bd_haddr = cpu_to_le64(mapping); 1533 1534 tpa_info->data = cons_rx_buf->data; 1535 tpa_info->data_ptr = cons_rx_buf->data_ptr; 1536 cons_rx_buf->data = NULL; 1537 tpa_info->mapping = cons_rx_buf->mapping; 1538 1539 tpa_info->len = 1540 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> 1541 RX_TPA_START_CMP_LEN_SHIFT; 1542 if (likely(TPA_START_HASH_VALID(tpa_start))) { 1543 tpa_info->hash_type = PKT_HASH_TYPE_L4; 1544 tpa_info->gso_type = SKB_GSO_TCPV4; 1545 if (TPA_START_IS_IPV6(tpa_start1)) 1546 tpa_info->gso_type = SKB_GSO_TCPV6; 1547 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ 1548 else if (!BNXT_CHIP_P4_PLUS(bp) && 1549 TPA_START_HASH_TYPE(tpa_start) == 3) 1550 tpa_info->gso_type = SKB_GSO_TCPV6; 1551 tpa_info->rss_hash = 1552 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); 1553 } else { 1554 tpa_info->hash_type = PKT_HASH_TYPE_NONE; 1555 tpa_info->gso_type = 0; 1556 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); 1557 } 1558 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 1559 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 1560 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) 1561 bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1); 1562 else 1563 bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); 1564 tpa_info->agg_count = 0; 1565 1566 rxr->rx_prod = NEXT_RX(prod); 1567 cons = RING_RX(bp, NEXT_RX(cons)); 1568 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 1569 cons_rx_buf = &rxr->rx_buf_ring[cons]; 1570 1571 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); 1572 rxr->rx_prod = NEXT_RX(rxr->rx_prod); 1573 cons_rx_buf->data = NULL; 1574 } 1575 1576 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) 1577 { 1578 if (agg_bufs) 1579 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); 1580 } 1581 1582 #ifdef CONFIG_INET 1583 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) 1584 { 1585 struct udphdr *uh = NULL; 1586 1587 if (ip_proto == htons(ETH_P_IP)) { 1588 struct iphdr *iph = (struct iphdr *)skb->data; 1589 1590 if (iph->protocol == IPPROTO_UDP) 1591 uh = (struct udphdr *)(iph + 1); 1592 } else { 1593 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1594 1595 if (iph->nexthdr == IPPROTO_UDP) 1596 uh = (struct udphdr *)(iph + 1); 1597 } 1598 if (uh) { 1599 if (uh->check) 1600 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 1601 else 1602 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1603 } 1604 } 1605 #endif 1606 1607 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 1608 int payload_off, int tcp_ts, 1609 struct sk_buff *skb) 1610 { 1611 #ifdef CONFIG_INET 1612 struct tcphdr *th; 1613 int len, nw_off; 1614 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1615 u32 hdr_info = tpa_info->hdr_info; 1616 bool loopback = false; 1617 1618 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1619 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1620 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1621 1622 /* If the packet is an internal loopback packet, the offsets will 1623 * have an extra 4 bytes. 1624 */ 1625 if (inner_mac_off == 4) { 1626 loopback = true; 1627 } else if (inner_mac_off > 4) { 1628 __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 1629 ETH_HLEN - 2)); 1630 1631 /* We only support inner iPv4/ipv6. If we don't see the 1632 * correct protocol ID, it must be a loopback packet where 1633 * the offsets are off by 4. 1634 */ 1635 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) 1636 loopback = true; 1637 } 1638 if (loopback) { 1639 /* internal loopback packet, subtract all offsets by 4 */ 1640 inner_ip_off -= 4; 1641 inner_mac_off -= 4; 1642 outer_ip_off -= 4; 1643 } 1644 1645 nw_off = inner_ip_off - ETH_HLEN; 1646 skb_set_network_header(skb, nw_off); 1647 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 1648 struct ipv6hdr *iph = ipv6_hdr(skb); 1649 1650 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1651 len = skb->len - skb_transport_offset(skb); 1652 th = tcp_hdr(skb); 1653 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1654 } else { 1655 struct iphdr *iph = ip_hdr(skb); 1656 1657 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1658 len = skb->len - skb_transport_offset(skb); 1659 th = tcp_hdr(skb); 1660 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1661 } 1662 1663 if (inner_mac_off) { /* tunnel */ 1664 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1665 ETH_HLEN - 2)); 1666 1667 bnxt_gro_tunnel(skb, proto); 1668 } 1669 #endif 1670 return skb; 1671 } 1672 1673 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, 1674 int payload_off, int tcp_ts, 1675 struct sk_buff *skb) 1676 { 1677 #ifdef CONFIG_INET 1678 u16 outer_ip_off, inner_ip_off, inner_mac_off; 1679 u32 hdr_info = tpa_info->hdr_info; 1680 int iphdr_len, nw_off; 1681 1682 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 1683 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 1684 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 1685 1686 nw_off = inner_ip_off - ETH_HLEN; 1687 skb_set_network_header(skb, nw_off); 1688 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? 1689 sizeof(struct ipv6hdr) : sizeof(struct iphdr); 1690 skb_set_transport_header(skb, nw_off + iphdr_len); 1691 1692 if (inner_mac_off) { /* tunnel */ 1693 __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1694 ETH_HLEN - 2)); 1695 1696 bnxt_gro_tunnel(skb, proto); 1697 } 1698 #endif 1699 return skb; 1700 } 1701 1702 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1703 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1704 1705 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1706 int payload_off, int tcp_ts, 1707 struct sk_buff *skb) 1708 { 1709 #ifdef CONFIG_INET 1710 struct tcphdr *th; 1711 int len, nw_off, tcp_opt_len = 0; 1712 1713 if (tcp_ts) 1714 tcp_opt_len = 12; 1715 1716 if (tpa_info->gso_type == SKB_GSO_TCPV4) { 1717 struct iphdr *iph; 1718 1719 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len - 1720 ETH_HLEN; 1721 skb_set_network_header(skb, nw_off); 1722 iph = ip_hdr(skb); 1723 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 1724 len = skb->len - skb_transport_offset(skb); 1725 th = tcp_hdr(skb); 1726 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 1727 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { 1728 struct ipv6hdr *iph; 1729 1730 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len - 1731 ETH_HLEN; 1732 skb_set_network_header(skb, nw_off); 1733 iph = ipv6_hdr(skb); 1734 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 1735 len = skb->len - skb_transport_offset(skb); 1736 th = tcp_hdr(skb); 1737 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 1738 } else { 1739 dev_kfree_skb_any(skb); 1740 return NULL; 1741 } 1742 1743 if (nw_off) /* tunnel */ 1744 bnxt_gro_tunnel(skb, skb->protocol); 1745 #endif 1746 return skb; 1747 } 1748 1749 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1750 struct bnxt_tpa_info *tpa_info, 1751 struct rx_tpa_end_cmp *tpa_end, 1752 struct rx_tpa_end_cmp_ext *tpa_end1, 1753 struct sk_buff *skb) 1754 { 1755 #ifdef CONFIG_INET 1756 int payload_off; 1757 u16 segs; 1758 1759 segs = TPA_END_TPA_SEGS(tpa_end); 1760 if (segs == 1) 1761 return skb; 1762 1763 NAPI_GRO_CB(skb)->count = segs; 1764 skb_shinfo(skb)->gso_size = 1765 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1766 skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1767 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1768 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); 1769 else 1770 payload_off = TPA_END_PAYLOAD_OFF(tpa_end); 1771 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1772 if (likely(skb)) 1773 tcp_gro_complete(skb); 1774 #endif 1775 return skb; 1776 } 1777 1778 /* Given the cfa_code of a received packet determine which 1779 * netdev (vf-rep or PF) the packet is destined to. 1780 */ 1781 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) 1782 { 1783 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); 1784 1785 /* if vf-rep dev is NULL, the must belongs to the PF */ 1786 return dev ? dev : bp->dev; 1787 } 1788 1789 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, 1790 struct bnxt_cp_ring_info *cpr, 1791 u32 *raw_cons, 1792 struct rx_tpa_end_cmp *tpa_end, 1793 struct rx_tpa_end_cmp_ext *tpa_end1, 1794 u8 *event) 1795 { 1796 struct bnxt_napi *bnapi = cpr->bnapi; 1797 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 1798 struct net_device *dev = bp->dev; 1799 u8 *data_ptr, agg_bufs; 1800 unsigned int len; 1801 struct bnxt_tpa_info *tpa_info; 1802 dma_addr_t mapping; 1803 struct sk_buff *skb; 1804 u16 idx = 0, agg_id; 1805 void *data; 1806 bool gro; 1807 1808 if (unlikely(bnapi->in_reset)) { 1809 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); 1810 1811 if (rc < 0) 1812 return ERR_PTR(-EBUSY); 1813 return NULL; 1814 } 1815 1816 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 1817 agg_id = TPA_END_AGG_ID_P5(tpa_end); 1818 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1819 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); 1820 tpa_info = &rxr->rx_tpa[agg_id]; 1821 if (unlikely(agg_bufs != tpa_info->agg_count)) { 1822 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", 1823 agg_bufs, tpa_info->agg_count); 1824 agg_bufs = tpa_info->agg_count; 1825 } 1826 tpa_info->agg_count = 0; 1827 *event |= BNXT_AGG_EVENT; 1828 bnxt_free_agg_idx(rxr, agg_id); 1829 idx = agg_id; 1830 gro = !!(bp->flags & BNXT_FLAG_GRO); 1831 } else { 1832 agg_id = TPA_END_AGG_ID(tpa_end); 1833 agg_bufs = TPA_END_AGG_BUFS(tpa_end); 1834 tpa_info = &rxr->rx_tpa[agg_id]; 1835 idx = RING_CMP(*raw_cons); 1836 if (agg_bufs) { 1837 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) 1838 return ERR_PTR(-EBUSY); 1839 1840 *event |= BNXT_AGG_EVENT; 1841 idx = NEXT_CMP(idx); 1842 } 1843 gro = !!TPA_END_GRO(tpa_end); 1844 } 1845 data = tpa_info->data; 1846 data_ptr = tpa_info->data_ptr; 1847 prefetch(data_ptr); 1848 len = tpa_info->len; 1849 mapping = tpa_info->mapping; 1850 1851 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1852 bnxt_abort_tpa(cpr, idx, agg_bufs); 1853 if (agg_bufs > MAX_SKB_FRAGS) 1854 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1855 agg_bufs, (int)MAX_SKB_FRAGS); 1856 return NULL; 1857 } 1858 1859 if (len <= bp->rx_copybreak) { 1860 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); 1861 if (!skb) { 1862 bnxt_abort_tpa(cpr, idx, agg_bufs); 1863 cpr->sw_stats->rx.rx_oom_discards += 1; 1864 return NULL; 1865 } 1866 } else { 1867 u8 *new_data; 1868 dma_addr_t new_mapping; 1869 1870 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr, 1871 GFP_ATOMIC); 1872 if (!new_data) { 1873 bnxt_abort_tpa(cpr, idx, agg_bufs); 1874 cpr->sw_stats->rx.rx_oom_discards += 1; 1875 return NULL; 1876 } 1877 1878 tpa_info->data = new_data; 1879 tpa_info->data_ptr = new_data + bp->rx_offset; 1880 tpa_info->mapping = new_mapping; 1881 1882 skb = napi_build_skb(data, bp->rx_buf_size); 1883 dma_sync_single_for_cpu(&bp->pdev->dev, mapping, 1884 bp->rx_buf_use_size, bp->rx_dir); 1885 1886 if (!skb) { 1887 page_pool_free_va(rxr->head_pool, data, true); 1888 bnxt_abort_tpa(cpr, idx, agg_bufs); 1889 cpr->sw_stats->rx.rx_oom_discards += 1; 1890 return NULL; 1891 } 1892 skb_mark_for_recycle(skb); 1893 skb_reserve(skb, bp->rx_offset); 1894 skb_put(skb, len); 1895 } 1896 1897 if (agg_bufs) { 1898 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); 1899 if (!skb) { 1900 /* Page reuse already handled by bnxt_rx_pages(). */ 1901 cpr->sw_stats->rx.rx_oom_discards += 1; 1902 return NULL; 1903 } 1904 } 1905 1906 if (tpa_info->cfa_code_valid) 1907 dev = bnxt_get_pkt_dev(bp, tpa_info->cfa_code); 1908 skb->protocol = eth_type_trans(skb, dev); 1909 1910 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) 1911 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); 1912 1913 if (tpa_info->vlan_valid && 1914 (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { 1915 __be16 vlan_proto = htons(tpa_info->metadata >> 1916 RX_CMP_FLAGS2_METADATA_TPID_SFT); 1917 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1918 1919 if (eth_type_vlan(vlan_proto)) { 1920 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 1921 } else { 1922 dev_kfree_skb(skb); 1923 return NULL; 1924 } 1925 } 1926 1927 skb_checksum_none_assert(skb); 1928 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { 1929 skb->ip_summed = CHECKSUM_UNNECESSARY; 1930 skb->csum_level = 1931 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; 1932 } 1933 1934 if (gro) 1935 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1936 1937 return skb; 1938 } 1939 1940 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, 1941 struct rx_agg_cmp *rx_agg) 1942 { 1943 u16 agg_id = TPA_AGG_AGG_ID(rx_agg); 1944 struct bnxt_tpa_info *tpa_info; 1945 1946 agg_id = bnxt_lookup_agg_idx(rxr, agg_id); 1947 tpa_info = &rxr->rx_tpa[agg_id]; 1948 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); 1949 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; 1950 } 1951 1952 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, 1953 struct sk_buff *skb) 1954 { 1955 skb_mark_for_recycle(skb); 1956 1957 if (skb->dev != bp->dev) { 1958 /* this packet belongs to a vf-rep */ 1959 bnxt_vf_rep_rx(bp, skb); 1960 return; 1961 } 1962 skb_record_rx_queue(skb, bnapi->index); 1963 napi_gro_receive(&bnapi->napi, skb); 1964 } 1965 1966 static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, 1967 struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) 1968 { 1969 u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); 1970 1971 if (BNXT_PTP_RX_TS_VALID(flags)) 1972 goto ts_valid; 1973 if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) 1974 return false; 1975 1976 ts_valid: 1977 *cmpl_ts = ts; 1978 return true; 1979 } 1980 1981 static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, 1982 struct rx_cmp *rxcmp, 1983 struct rx_cmp_ext *rxcmp1) 1984 { 1985 __be16 vlan_proto; 1986 u16 vtag; 1987 1988 if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1989 __le32 flags2 = rxcmp1->rx_cmp_flags2; 1990 u32 meta_data; 1991 1992 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) 1993 return skb; 1994 1995 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1996 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; 1997 vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); 1998 if (eth_type_vlan(vlan_proto)) 1999 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 2000 else 2001 goto vlan_err; 2002 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2003 if (RX_CMP_VLAN_VALID(rxcmp)) { 2004 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); 2005 2006 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) 2007 vlan_proto = htons(ETH_P_8021Q); 2008 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) 2009 vlan_proto = htons(ETH_P_8021AD); 2010 else 2011 goto vlan_err; 2012 vtag = RX_CMP_METADATA0_TCI(rxcmp1); 2013 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); 2014 } 2015 } 2016 return skb; 2017 vlan_err: 2018 skb_mark_for_recycle(skb); 2019 dev_kfree_skb(skb); 2020 return NULL; 2021 } 2022 2023 static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp, 2024 struct rx_cmp *rxcmp) 2025 { 2026 u8 ext_op; 2027 2028 ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); 2029 switch (ext_op) { 2030 case EXT_OP_INNER_4: 2031 case EXT_OP_OUTER_4: 2032 case EXT_OP_INNFL_3: 2033 case EXT_OP_OUTFL_3: 2034 return PKT_HASH_TYPE_L4; 2035 default: 2036 return PKT_HASH_TYPE_L3; 2037 } 2038 } 2039 2040 /* returns the following: 2041 * 1 - 1 packet successfully received 2042 * 0 - successful TPA_START, packet not completed yet 2043 * -EBUSY - completion ring does not have all the agg buffers yet 2044 * -ENOMEM - packet aborted due to out of memory 2045 * -EIO - packet aborted due to hw error indicated in BD 2046 */ 2047 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2048 u32 *raw_cons, u8 *event) 2049 { 2050 struct bnxt_napi *bnapi = cpr->bnapi; 2051 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2052 struct net_device *dev = bp->dev; 2053 struct rx_cmp *rxcmp; 2054 struct rx_cmp_ext *rxcmp1; 2055 u32 tmp_raw_cons = *raw_cons; 2056 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); 2057 struct skb_shared_info *sinfo; 2058 struct bnxt_sw_rx_bd *rx_buf; 2059 unsigned int len; 2060 u8 *data_ptr, agg_bufs, cmp_type; 2061 bool xdp_active = false; 2062 dma_addr_t dma_addr; 2063 struct sk_buff *skb; 2064 struct xdp_buff xdp; 2065 u32 flags, misc; 2066 u32 cmpl_ts; 2067 void *data; 2068 int rc = 0; 2069 2070 rxcmp = (struct rx_cmp *) 2071 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2072 2073 cmp_type = RX_CMP_TYPE(rxcmp); 2074 2075 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { 2076 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); 2077 goto next_rx_no_prod_no_len; 2078 } 2079 2080 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2081 cp_cons = RING_CMP(tmp_raw_cons); 2082 rxcmp1 = (struct rx_cmp_ext *) 2083 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2084 2085 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2086 return -EBUSY; 2087 2088 /* The valid test of the entry must be done first before 2089 * reading any further. 2090 */ 2091 dma_rmb(); 2092 prod = rxr->rx_prod; 2093 2094 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || 2095 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 2096 bnxt_tpa_start(bp, rxr, cmp_type, 2097 (struct rx_tpa_start_cmp *)rxcmp, 2098 (struct rx_tpa_start_cmp_ext *)rxcmp1); 2099 2100 *event |= BNXT_RX_EVENT; 2101 goto next_rx_no_prod_no_len; 2102 2103 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2104 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, 2105 (struct rx_tpa_end_cmp *)rxcmp, 2106 (struct rx_tpa_end_cmp_ext *)rxcmp1, event); 2107 2108 if (IS_ERR(skb)) 2109 return -EBUSY; 2110 2111 rc = -ENOMEM; 2112 if (likely(skb)) { 2113 bnxt_deliver_skb(bp, bnapi, skb); 2114 rc = 1; 2115 } 2116 *event |= BNXT_RX_EVENT; 2117 goto next_rx_no_prod_no_len; 2118 } 2119 2120 cons = rxcmp->rx_cmp_opaque; 2121 if (unlikely(cons != rxr->rx_next_cons)) { 2122 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); 2123 2124 /* 0xffff is forced error, don't print it */ 2125 if (rxr->rx_next_cons != 0xffff) 2126 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", 2127 cons, rxr->rx_next_cons); 2128 bnxt_sched_reset_rxr(bp, rxr); 2129 if (rc1) 2130 return rc1; 2131 goto next_rx_no_prod_no_len; 2132 } 2133 rx_buf = &rxr->rx_buf_ring[cons]; 2134 data = rx_buf->data; 2135 data_ptr = rx_buf->data_ptr; 2136 prefetch(data_ptr); 2137 2138 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); 2139 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; 2140 2141 if (agg_bufs) { 2142 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) 2143 return -EBUSY; 2144 2145 cp_cons = NEXT_CMP(cp_cons); 2146 *event |= BNXT_AGG_EVENT; 2147 } 2148 *event |= BNXT_RX_EVENT; 2149 2150 rx_buf->data = NULL; 2151 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { 2152 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); 2153 2154 bnxt_reuse_rx_data(rxr, cons, data); 2155 if (agg_bufs) 2156 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, 2157 false); 2158 2159 rc = -EIO; 2160 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { 2161 bnapi->cp_ring.sw_stats->rx.rx_buf_errors++; 2162 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 2163 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { 2164 netdev_warn_once(bp->dev, "RX buffer error %x\n", 2165 rx_err); 2166 bnxt_sched_reset_rxr(bp, rxr); 2167 } 2168 } 2169 goto next_rx_no_len; 2170 } 2171 2172 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); 2173 len = flags >> RX_CMP_LEN_SHIFT; 2174 dma_addr = rx_buf->mapping; 2175 2176 if (bnxt_xdp_attached(bp, rxr)) { 2177 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); 2178 if (agg_bufs) { 2179 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, 2180 cp_cons, agg_bufs, 2181 false); 2182 if (!frag_len) 2183 goto oom_next_rx; 2184 2185 } 2186 xdp_active = true; 2187 } 2188 2189 if (xdp_active) { 2190 if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { 2191 rc = 1; 2192 goto next_rx; 2193 } 2194 if (xdp_buff_has_frags(&xdp)) { 2195 sinfo = xdp_get_shared_info_from_buff(&xdp); 2196 agg_bufs = sinfo->nr_frags; 2197 } else { 2198 agg_bufs = 0; 2199 } 2200 } 2201 2202 if (len <= bp->rx_copybreak) { 2203 if (!xdp_active) 2204 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); 2205 else 2206 skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr); 2207 bnxt_reuse_rx_data(rxr, cons, data); 2208 if (!skb) { 2209 if (agg_bufs) { 2210 if (!xdp_active) 2211 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, 2212 agg_bufs, false); 2213 else 2214 bnxt_xdp_buff_frags_free(rxr, &xdp); 2215 } 2216 goto oom_next_rx; 2217 } 2218 } else { 2219 u32 payload; 2220 2221 if (rx_buf->data_ptr == data_ptr) 2222 payload = misc & RX_CMP_PAYLOAD_OFFSET; 2223 else 2224 payload = 0; 2225 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, 2226 payload | len); 2227 if (!skb) 2228 goto oom_next_rx; 2229 } 2230 2231 if (agg_bufs) { 2232 if (!xdp_active) { 2233 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); 2234 if (!skb) 2235 goto oom_next_rx; 2236 } else { 2237 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, 2238 rxr->page_pool, &xdp); 2239 if (!skb) { 2240 /* we should be able to free the old skb here */ 2241 bnxt_xdp_buff_frags_free(rxr, &xdp); 2242 goto oom_next_rx; 2243 } 2244 } 2245 } 2246 2247 if (RX_CMP_HASH_VALID(rxcmp)) { 2248 enum pkt_hash_types type; 2249 2250 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2251 type = bnxt_rss_ext_op(bp, rxcmp); 2252 } else { 2253 u32 itypes = RX_CMP_ITYPES(rxcmp); 2254 2255 if (itypes == RX_CMP_FLAGS_ITYPE_TCP || 2256 itypes == RX_CMP_FLAGS_ITYPE_UDP) 2257 type = PKT_HASH_TYPE_L4; 2258 else 2259 type = PKT_HASH_TYPE_L3; 2260 } 2261 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); 2262 } 2263 2264 if (cmp_type == CMP_TYPE_RX_L2_CMP) 2265 dev = bnxt_get_pkt_dev(bp, RX_CMP_CFA_CODE(rxcmp1)); 2266 skb->protocol = eth_type_trans(skb, dev); 2267 2268 if (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { 2269 skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); 2270 if (!skb) 2271 goto next_rx; 2272 } 2273 2274 skb_checksum_none_assert(skb); 2275 if (RX_CMP_L4_CS_OK(rxcmp1)) { 2276 if (dev->features & NETIF_F_RXCSUM) { 2277 skb->ip_summed = CHECKSUM_UNNECESSARY; 2278 skb->csum_level = RX_CMP_ENCAP(rxcmp1); 2279 } 2280 } else { 2281 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { 2282 if (dev->features & NETIF_F_RXCSUM) 2283 bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++; 2284 } 2285 } 2286 2287 if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { 2288 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 2289 u64 ns, ts; 2290 2291 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { 2292 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2293 2294 ns = bnxt_timecounter_cyc2time(ptp, ts); 2295 memset(skb_hwtstamps(skb), 0, 2296 sizeof(*skb_hwtstamps(skb))); 2297 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); 2298 } 2299 } 2300 } 2301 bnxt_deliver_skb(bp, bnapi, skb); 2302 rc = 1; 2303 2304 next_rx: 2305 cpr->rx_packets += 1; 2306 cpr->rx_bytes += len; 2307 2308 next_rx_no_len: 2309 rxr->rx_prod = NEXT_RX(prod); 2310 rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); 2311 2312 next_rx_no_prod_no_len: 2313 *raw_cons = tmp_raw_cons; 2314 2315 return rc; 2316 2317 oom_next_rx: 2318 cpr->sw_stats->rx.rx_oom_discards += 1; 2319 rc = -ENOMEM; 2320 goto next_rx; 2321 } 2322 2323 /* In netpoll mode, if we are using a combined completion ring, we need to 2324 * discard the rx packets and recycle the buffers. 2325 */ 2326 static int bnxt_force_rx_discard(struct bnxt *bp, 2327 struct bnxt_cp_ring_info *cpr, 2328 u32 *raw_cons, u8 *event) 2329 { 2330 u32 tmp_raw_cons = *raw_cons; 2331 struct rx_cmp_ext *rxcmp1; 2332 struct rx_cmp *rxcmp; 2333 u16 cp_cons; 2334 u8 cmp_type; 2335 int rc; 2336 2337 cp_cons = RING_CMP(tmp_raw_cons); 2338 rxcmp = (struct rx_cmp *) 2339 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2340 2341 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 2342 cp_cons = RING_CMP(tmp_raw_cons); 2343 rxcmp1 = (struct rx_cmp_ext *) 2344 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 2345 2346 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 2347 return -EBUSY; 2348 2349 /* The valid test of the entry must be done first before 2350 * reading any further. 2351 */ 2352 dma_rmb(); 2353 cmp_type = RX_CMP_TYPE(rxcmp); 2354 if (cmp_type == CMP_TYPE_RX_L2_CMP || 2355 cmp_type == CMP_TYPE_RX_L2_V3_CMP) { 2356 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 2357 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 2358 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 2359 struct rx_tpa_end_cmp_ext *tpa_end1; 2360 2361 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 2362 tpa_end1->rx_tpa_end_cmp_errors_v2 |= 2363 cpu_to_le32(RX_TPA_END_CMP_ERRORS); 2364 } 2365 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); 2366 if (rc && rc != -EBUSY) 2367 cpr->sw_stats->rx.rx_netpoll_discards += 1; 2368 return rc; 2369 } 2370 2371 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) 2372 { 2373 struct bnxt_fw_health *fw_health = bp->fw_health; 2374 u32 reg = fw_health->regs[reg_idx]; 2375 u32 reg_type, reg_off, val = 0; 2376 2377 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 2378 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 2379 switch (reg_type) { 2380 case BNXT_FW_HEALTH_REG_TYPE_CFG: 2381 pci_read_config_dword(bp->pdev, reg_off, &val); 2382 break; 2383 case BNXT_FW_HEALTH_REG_TYPE_GRC: 2384 reg_off = fw_health->mapped_regs[reg_idx]; 2385 fallthrough; 2386 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 2387 val = readl(bp->bar0 + reg_off); 2388 break; 2389 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 2390 val = readl(bp->bar1 + reg_off); 2391 break; 2392 } 2393 if (reg_idx == BNXT_FW_RESET_INPROG_REG) 2394 val &= fw_health->fw_reset_inprog_reg_mask; 2395 return val; 2396 } 2397 2398 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) 2399 { 2400 int i; 2401 2402 for (i = 0; i < bp->rx_nr_rings; i++) { 2403 u16 grp_idx = bp->rx_ring[i].bnapi->index; 2404 struct bnxt_ring_grp_info *grp_info; 2405 2406 grp_info = &bp->grp_info[grp_idx]; 2407 if (grp_info->agg_fw_ring_id == ring_id) 2408 return grp_idx; 2409 } 2410 return INVALID_HW_RING_ID; 2411 } 2412 2413 static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) 2414 { 2415 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2416 2417 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 2418 return link_info->force_link_speed2; 2419 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) 2420 return link_info->force_pam4_link_speed; 2421 return link_info->force_link_speed; 2422 } 2423 2424 static void bnxt_set_force_speed(struct bnxt_link_info *link_info) 2425 { 2426 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2427 2428 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2429 link_info->req_link_speed = link_info->force_link_speed2; 2430 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2431 switch (link_info->req_link_speed) { 2432 case BNXT_LINK_SPEED_50GB_PAM4: 2433 case BNXT_LINK_SPEED_100GB_PAM4: 2434 case BNXT_LINK_SPEED_200GB_PAM4: 2435 case BNXT_LINK_SPEED_400GB_PAM4: 2436 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2437 break; 2438 case BNXT_LINK_SPEED_100GB_PAM4_112: 2439 case BNXT_LINK_SPEED_200GB_PAM4_112: 2440 case BNXT_LINK_SPEED_400GB_PAM4_112: 2441 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; 2442 break; 2443 default: 2444 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2445 } 2446 return; 2447 } 2448 link_info->req_link_speed = link_info->force_link_speed; 2449 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; 2450 if (link_info->force_pam4_link_speed) { 2451 link_info->req_link_speed = link_info->force_pam4_link_speed; 2452 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; 2453 } 2454 } 2455 2456 static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) 2457 { 2458 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2459 2460 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2461 link_info->advertising = link_info->auto_link_speeds2; 2462 return; 2463 } 2464 link_info->advertising = link_info->auto_link_speeds; 2465 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; 2466 } 2467 2468 static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) 2469 { 2470 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2471 2472 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2473 if (link_info->req_link_speed != link_info->force_link_speed2) 2474 return true; 2475 return false; 2476 } 2477 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && 2478 link_info->req_link_speed != link_info->force_link_speed) 2479 return true; 2480 if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && 2481 link_info->req_link_speed != link_info->force_pam4_link_speed) 2482 return true; 2483 return false; 2484 } 2485 2486 static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) 2487 { 2488 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 2489 2490 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 2491 if (link_info->advertising != link_info->auto_link_speeds2) 2492 return true; 2493 return false; 2494 } 2495 if (link_info->advertising != link_info->auto_link_speeds || 2496 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) 2497 return true; 2498 return false; 2499 } 2500 2501 bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type) 2502 { 2503 u32 flags = bp->ctx->ctx_arr[type].flags; 2504 2505 return (flags & BNXT_CTX_MEM_TYPE_VALID) && 2506 ((flags & BNXT_CTX_MEM_FW_TRACE) || 2507 (flags & BNXT_CTX_MEM_FW_BIN_TRACE)); 2508 } 2509 2510 static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm) 2511 { 2512 u32 mem_size, pages, rem_bytes, magic_byte_offset; 2513 u16 trace_type = bnxt_bstore_to_trace[ctxm->type]; 2514 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 2515 struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl; 2516 struct bnxt_bs_trace_info *bs_trace; 2517 int last_pg; 2518 2519 if (ctxm->instance_bmap && ctxm->instance_bmap > 1) 2520 return; 2521 2522 mem_size = ctxm->max_entries * ctxm->entry_size; 2523 rem_bytes = mem_size % BNXT_PAGE_SIZE; 2524 pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 2525 2526 last_pg = (pages - 1) & (MAX_CTX_PAGES - 1); 2527 magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1; 2528 2529 rmem = &ctx_pg[0].ring_mem; 2530 bs_trace = &bp->bs_trace[trace_type]; 2531 bs_trace->ctx_type = ctxm->type; 2532 bs_trace->trace_type = trace_type; 2533 if (pages > MAX_CTX_PAGES) { 2534 int last_pg_dir = rmem->nr_pages - 1; 2535 2536 rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem; 2537 bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg]; 2538 } else { 2539 bs_trace->magic_byte = rmem->pg_arr[last_pg]; 2540 } 2541 bs_trace->magic_byte += magic_byte_offset; 2542 *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE; 2543 } 2544 2545 #define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \ 2546 (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\ 2547 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT) 2548 2549 #define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \ 2550 (((data2) & \ 2551 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\ 2552 ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT) 2553 2554 #define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ 2555 ((data2) & \ 2556 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) 2557 2558 #define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ 2559 (((data2) & \ 2560 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\ 2561 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) 2562 2563 #define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ 2564 ((data1) & \ 2565 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) 2566 2567 #define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ 2568 (((data1) & \ 2569 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\ 2570 ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) 2571 2572 /* Return true if the workqueue has to be scheduled */ 2573 static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) 2574 { 2575 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); 2576 2577 switch (err_type) { 2578 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: 2579 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", 2580 BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); 2581 break; 2582 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: 2583 netdev_warn(bp->dev, "Pause Storm detected!\n"); 2584 break; 2585 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: 2586 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n"); 2587 break; 2588 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { 2589 u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); 2590 char *threshold_type; 2591 bool notify = false; 2592 char *dir_str; 2593 2594 switch (type) { 2595 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: 2596 threshold_type = "warning"; 2597 break; 2598 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: 2599 threshold_type = "critical"; 2600 break; 2601 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: 2602 threshold_type = "fatal"; 2603 break; 2604 case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: 2605 threshold_type = "shutdown"; 2606 break; 2607 default: 2608 netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); 2609 return false; 2610 } 2611 if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { 2612 dir_str = "above"; 2613 notify = true; 2614 } else { 2615 dir_str = "below"; 2616 } 2617 netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", 2618 dir_str, threshold_type); 2619 netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", 2620 BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), 2621 BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); 2622 if (notify) { 2623 bp->thermal_threshold_type = type; 2624 set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); 2625 return true; 2626 } 2627 return false; 2628 } 2629 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED: 2630 netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n"); 2631 break; 2632 default: 2633 netdev_err(bp->dev, "FW reported unknown error type %u\n", 2634 err_type); 2635 break; 2636 } 2637 return false; 2638 } 2639 2640 #define BNXT_GET_EVENT_PORT(data) \ 2641 ((data) & \ 2642 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) 2643 2644 #define BNXT_EVENT_RING_TYPE(data2) \ 2645 ((data2) & \ 2646 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) 2647 2648 #define BNXT_EVENT_RING_TYPE_RX(data2) \ 2649 (BNXT_EVENT_RING_TYPE(data2) == \ 2650 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) 2651 2652 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ 2653 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ 2654 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) 2655 2656 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ 2657 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ 2658 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) 2659 2660 #define BNXT_PHC_BITS 48 2661 2662 static int bnxt_async_event_process(struct bnxt *bp, 2663 struct hwrm_async_event_cmpl *cmpl) 2664 { 2665 u16 event_id = le16_to_cpu(cmpl->event_id); 2666 u32 data1 = le32_to_cpu(cmpl->event_data1); 2667 u32 data2 = le32_to_cpu(cmpl->event_data2); 2668 2669 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", 2670 event_id, data1, data2); 2671 2672 /* TODO CHIMP_FW: Define event id's for link change, error etc */ 2673 switch (event_id) { 2674 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { 2675 struct bnxt_link_info *link_info = &bp->link_info; 2676 2677 if (BNXT_VF(bp)) 2678 goto async_event_process_exit; 2679 2680 /* print unsupported speed warning in forced speed mode only */ 2681 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && 2682 (data1 & 0x20000)) { 2683 u16 fw_speed = bnxt_get_force_speed(link_info); 2684 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); 2685 2686 if (speed != SPEED_UNKNOWN) 2687 netdev_warn(bp->dev, "Link speed %d no longer supported\n", 2688 speed); 2689 } 2690 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); 2691 } 2692 fallthrough; 2693 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: 2694 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: 2695 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); 2696 fallthrough; 2697 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: 2698 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); 2699 break; 2700 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: 2701 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); 2702 break; 2703 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { 2704 u16 port_id = BNXT_GET_EVENT_PORT(data1); 2705 2706 if (BNXT_VF(bp)) 2707 break; 2708 2709 if (bp->pf.port_id != port_id) 2710 break; 2711 2712 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 2713 break; 2714 } 2715 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 2716 if (BNXT_PF(bp)) 2717 goto async_event_process_exit; 2718 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 2719 break; 2720 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { 2721 char *type_str = "Solicited"; 2722 2723 if (!bp->fw_health) 2724 goto async_event_process_exit; 2725 2726 bp->fw_reset_timestamp = jiffies; 2727 bp->fw_reset_min_dsecs = cmpl->timestamp_lo; 2728 if (!bp->fw_reset_min_dsecs) 2729 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; 2730 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); 2731 if (!bp->fw_reset_max_dsecs) 2732 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; 2733 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { 2734 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); 2735 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { 2736 type_str = "Fatal"; 2737 bp->fw_health->fatalities++; 2738 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 2739 } else if (data2 && BNXT_FW_STATUS_HEALTHY != 2740 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { 2741 type_str = "Non-fatal"; 2742 bp->fw_health->survivals++; 2743 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 2744 } 2745 netif_warn(bp, hw, bp->dev, 2746 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", 2747 type_str, data1, data2, 2748 bp->fw_reset_min_dsecs * 100, 2749 bp->fw_reset_max_dsecs * 100); 2750 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); 2751 break; 2752 } 2753 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { 2754 struct bnxt_fw_health *fw_health = bp->fw_health; 2755 char *status_desc = "healthy"; 2756 u32 status; 2757 2758 if (!fw_health) 2759 goto async_event_process_exit; 2760 2761 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { 2762 fw_health->enabled = false; 2763 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); 2764 break; 2765 } 2766 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); 2767 fw_health->tmr_multiplier = 2768 DIV_ROUND_UP(fw_health->polling_dsecs * HZ, 2769 bp->current_interval * 10); 2770 fw_health->tmr_counter = fw_health->tmr_multiplier; 2771 if (!fw_health->enabled) 2772 fw_health->last_fw_heartbeat = 2773 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 2774 fw_health->last_fw_reset_cnt = 2775 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 2776 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 2777 if (status != BNXT_FW_STATUS_HEALTHY) 2778 status_desc = "unhealthy"; 2779 netif_info(bp, drv, bp->dev, 2780 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", 2781 fw_health->primary ? "primary" : "backup", status, 2782 status_desc, fw_health->last_fw_reset_cnt); 2783 if (!fw_health->enabled) { 2784 /* Make sure tmr_counter is set and visible to 2785 * bnxt_health_check() before setting enabled to true. 2786 */ 2787 smp_wmb(); 2788 fw_health->enabled = true; 2789 } 2790 goto async_event_process_exit; 2791 } 2792 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: 2793 netif_notice(bp, hw, bp->dev, 2794 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", 2795 data1, data2); 2796 goto async_event_process_exit; 2797 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { 2798 struct bnxt_rx_ring_info *rxr; 2799 u16 grp_idx; 2800 2801 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 2802 goto async_event_process_exit; 2803 2804 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", 2805 BNXT_EVENT_RING_TYPE(data2), data1); 2806 if (!BNXT_EVENT_RING_TYPE_RX(data2)) 2807 goto async_event_process_exit; 2808 2809 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); 2810 if (grp_idx == INVALID_HW_RING_ID) { 2811 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n", 2812 data1); 2813 goto async_event_process_exit; 2814 } 2815 rxr = bp->bnapi[grp_idx]->rx_ring; 2816 bnxt_sched_reset_rxr(bp, rxr); 2817 goto async_event_process_exit; 2818 } 2819 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { 2820 struct bnxt_fw_health *fw_health = bp->fw_health; 2821 2822 netif_notice(bp, hw, bp->dev, 2823 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", 2824 data1, data2); 2825 if (fw_health) { 2826 fw_health->echo_req_data1 = data1; 2827 fw_health->echo_req_data2 = data2; 2828 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); 2829 break; 2830 } 2831 goto async_event_process_exit; 2832 } 2833 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { 2834 bnxt_ptp_pps_event(bp, data1, data2); 2835 goto async_event_process_exit; 2836 } 2837 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { 2838 if (bnxt_event_error_report(bp, data1, data2)) 2839 break; 2840 goto async_event_process_exit; 2841 } 2842 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { 2843 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { 2844 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: 2845 if (BNXT_PTP_USE_RTC(bp)) { 2846 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 2847 unsigned long flags; 2848 u64 ns; 2849 2850 if (!ptp) 2851 goto async_event_process_exit; 2852 2853 bnxt_ptp_update_current_time(bp); 2854 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << 2855 BNXT_PHC_BITS) | ptp->current_time); 2856 write_seqlock_irqsave(&ptp->ptp_lock, flags); 2857 bnxt_ptp_rtc_timecounter_init(ptp, ns); 2858 write_sequnlock_irqrestore(&ptp->ptp_lock, flags); 2859 } 2860 break; 2861 } 2862 goto async_event_process_exit; 2863 } 2864 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { 2865 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; 2866 2867 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); 2868 goto async_event_process_exit; 2869 } 2870 case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: { 2871 u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1); 2872 u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2); 2873 2874 bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset); 2875 goto async_event_process_exit; 2876 } 2877 default: 2878 goto async_event_process_exit; 2879 } 2880 __bnxt_queue_sp_work(bp); 2881 async_event_process_exit: 2882 bnxt_ulp_async_events(bp, cmpl); 2883 return 0; 2884 } 2885 2886 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) 2887 { 2888 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; 2889 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; 2890 struct hwrm_fwd_req_cmpl *fwd_req_cmpl = 2891 (struct hwrm_fwd_req_cmpl *)txcmp; 2892 2893 switch (cmpl_type) { 2894 case CMPL_BASE_TYPE_HWRM_DONE: 2895 seq_id = le16_to_cpu(h_cmpl->sequence_id); 2896 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); 2897 break; 2898 2899 case CMPL_BASE_TYPE_HWRM_FWD_REQ: 2900 vf_id = le16_to_cpu(fwd_req_cmpl->source_id); 2901 2902 if ((vf_id < bp->pf.first_vf_id) || 2903 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { 2904 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", 2905 vf_id); 2906 return -EINVAL; 2907 } 2908 2909 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); 2910 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); 2911 break; 2912 2913 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: 2914 bnxt_async_event_process(bp, 2915 (struct hwrm_async_event_cmpl *)txcmp); 2916 break; 2917 2918 default: 2919 break; 2920 } 2921 2922 return 0; 2923 } 2924 2925 static bool bnxt_vnic_is_active(struct bnxt *bp) 2926 { 2927 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; 2928 2929 return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0; 2930 } 2931 2932 static irqreturn_t bnxt_msix(int irq, void *dev_instance) 2933 { 2934 struct bnxt_napi *bnapi = dev_instance; 2935 struct bnxt *bp = bnapi->bp; 2936 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 2937 u32 cons = RING_CMP(cpr->cp_raw_cons); 2938 2939 cpr->event_ctr++; 2940 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); 2941 napi_schedule(&bnapi->napi); 2942 return IRQ_HANDLED; 2943 } 2944 2945 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 2946 { 2947 u32 raw_cons = cpr->cp_raw_cons; 2948 u16 cons = RING_CMP(raw_cons); 2949 struct tx_cmp *txcmp; 2950 2951 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2952 2953 return TX_CMP_VALID(txcmp, raw_cons); 2954 } 2955 2956 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 2957 int budget) 2958 { 2959 struct bnxt_napi *bnapi = cpr->bnapi; 2960 u32 raw_cons = cpr->cp_raw_cons; 2961 u32 cons; 2962 int rx_pkts = 0; 2963 u8 event = 0; 2964 struct tx_cmp *txcmp; 2965 2966 cpr->has_more_work = 0; 2967 cpr->had_work_done = 1; 2968 while (1) { 2969 u8 cmp_type; 2970 int rc; 2971 2972 cons = RING_CMP(raw_cons); 2973 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 2974 2975 if (!TX_CMP_VALID(txcmp, raw_cons)) 2976 break; 2977 2978 /* The valid test of the entry must be done first before 2979 * reading any further. 2980 */ 2981 dma_rmb(); 2982 cmp_type = TX_CMP_TYPE(txcmp); 2983 if (cmp_type == CMP_TYPE_TX_L2_CMP || 2984 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { 2985 u32 opaque = txcmp->tx_cmp_opaque; 2986 struct bnxt_tx_ring_info *txr; 2987 u16 tx_freed; 2988 2989 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; 2990 event |= BNXT_TX_CMP_EVENT; 2991 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) 2992 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); 2993 else 2994 txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); 2995 tx_freed = (txr->tx_hw_cons - txr->tx_cons) & 2996 bp->tx_ring_mask; 2997 /* return full budget so NAPI will complete. */ 2998 if (unlikely(tx_freed >= bp->tx_wake_thresh)) { 2999 rx_pkts = budget; 3000 raw_cons = NEXT_RAW_CMP(raw_cons); 3001 if (budget) 3002 cpr->has_more_work = 1; 3003 break; 3004 } 3005 } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) { 3006 bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp); 3007 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && 3008 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { 3009 if (likely(budget)) 3010 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 3011 else 3012 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, 3013 &event); 3014 if (likely(rc >= 0)) 3015 rx_pkts += rc; 3016 /* Increment rx_pkts when rc is -ENOMEM to count towards 3017 * the NAPI budget. Otherwise, we may potentially loop 3018 * here forever if we consistently cannot allocate 3019 * buffers. 3020 */ 3021 else if (rc == -ENOMEM && budget) 3022 rx_pkts++; 3023 else if (rc == -EBUSY) /* partial completion */ 3024 break; 3025 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || 3026 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || 3027 cmp_type == CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { 3028 bnxt_hwrm_handler(bp, txcmp); 3029 } 3030 raw_cons = NEXT_RAW_CMP(raw_cons); 3031 3032 if (rx_pkts && rx_pkts == budget) { 3033 cpr->has_more_work = 1; 3034 break; 3035 } 3036 } 3037 3038 if (event & BNXT_REDIRECT_EVENT) { 3039 xdp_do_flush(); 3040 event &= ~BNXT_REDIRECT_EVENT; 3041 } 3042 3043 if (event & BNXT_TX_EVENT) { 3044 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; 3045 u16 prod = txr->tx_prod; 3046 3047 /* Sync BD data before updating doorbell */ 3048 wmb(); 3049 3050 bnxt_db_write_relaxed(bp, &txr->tx_db, prod); 3051 event &= ~BNXT_TX_EVENT; 3052 } 3053 3054 cpr->cp_raw_cons = raw_cons; 3055 bnapi->events |= event; 3056 return rx_pkts; 3057 } 3058 3059 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3060 int budget) 3061 { 3062 if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) 3063 bnapi->tx_int(bp, bnapi, budget); 3064 3065 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { 3066 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3067 3068 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 3069 bnapi->events &= ~BNXT_RX_EVENT; 3070 } 3071 if (bnapi->events & BNXT_AGG_EVENT) { 3072 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3073 3074 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 3075 bnapi->events &= ~BNXT_AGG_EVENT; 3076 } 3077 } 3078 3079 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, 3080 int budget) 3081 { 3082 struct bnxt_napi *bnapi = cpr->bnapi; 3083 int rx_pkts; 3084 3085 rx_pkts = __bnxt_poll_work(bp, cpr, budget); 3086 3087 /* ACK completion ring before freeing tx ring and producing new 3088 * buffers in rx/agg rings to prevent overflowing the completion 3089 * ring. 3090 */ 3091 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 3092 3093 __bnxt_poll_work_done(bp, bnapi, budget); 3094 return rx_pkts; 3095 } 3096 3097 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) 3098 { 3099 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3100 struct bnxt *bp = bnapi->bp; 3101 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3102 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 3103 struct tx_cmp *txcmp; 3104 struct rx_cmp_ext *rxcmp1; 3105 u32 cp_cons, tmp_raw_cons; 3106 u32 raw_cons = cpr->cp_raw_cons; 3107 bool flush_xdp = false; 3108 u32 rx_pkts = 0; 3109 u8 event = 0; 3110 3111 while (1) { 3112 int rc; 3113 3114 cp_cons = RING_CMP(raw_cons); 3115 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3116 3117 if (!TX_CMP_VALID(txcmp, raw_cons)) 3118 break; 3119 3120 /* The valid test of the entry must be done first before 3121 * reading any further. 3122 */ 3123 dma_rmb(); 3124 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 3125 tmp_raw_cons = NEXT_RAW_CMP(raw_cons); 3126 cp_cons = RING_CMP(tmp_raw_cons); 3127 rxcmp1 = (struct rx_cmp_ext *) 3128 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 3129 3130 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 3131 break; 3132 3133 /* force an error to recycle the buffer */ 3134 rxcmp1->rx_cmp_cfa_code_errors_v2 |= 3135 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 3136 3137 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); 3138 if (likely(rc == -EIO) && budget) 3139 rx_pkts++; 3140 else if (rc == -EBUSY) /* partial completion */ 3141 break; 3142 if (event & BNXT_REDIRECT_EVENT) 3143 flush_xdp = true; 3144 } else if (unlikely(TX_CMP_TYPE(txcmp) == 3145 CMPL_BASE_TYPE_HWRM_DONE)) { 3146 bnxt_hwrm_handler(bp, txcmp); 3147 } else { 3148 netdev_err(bp->dev, 3149 "Invalid completion received on special ring\n"); 3150 } 3151 raw_cons = NEXT_RAW_CMP(raw_cons); 3152 3153 if (rx_pkts == budget) 3154 break; 3155 } 3156 3157 cpr->cp_raw_cons = raw_cons; 3158 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); 3159 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 3160 3161 if (event & BNXT_AGG_EVENT) 3162 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 3163 if (flush_xdp) 3164 xdp_do_flush(); 3165 3166 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { 3167 napi_complete_done(napi, rx_pkts); 3168 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3169 } 3170 return rx_pkts; 3171 } 3172 3173 static int bnxt_poll(struct napi_struct *napi, int budget) 3174 { 3175 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3176 struct bnxt *bp = bnapi->bp; 3177 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3178 int work_done = 0; 3179 3180 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3181 napi_complete(napi); 3182 return 0; 3183 } 3184 while (1) { 3185 work_done += bnxt_poll_work(bp, cpr, budget - work_done); 3186 3187 if (work_done >= budget) { 3188 if (!budget) 3189 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3190 break; 3191 } 3192 3193 if (!bnxt_has_work(bp, cpr)) { 3194 if (napi_complete_done(napi, work_done)) 3195 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); 3196 break; 3197 } 3198 } 3199 if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { 3200 struct dim_sample dim_sample = {}; 3201 3202 dim_update_sample(cpr->event_ctr, 3203 cpr->rx_packets, 3204 cpr->rx_bytes, 3205 &dim_sample); 3206 net_dim(&cpr->dim, &dim_sample); 3207 } 3208 return work_done; 3209 } 3210 3211 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) 3212 { 3213 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3214 int i, work_done = 0; 3215 3216 for (i = 0; i < cpr->cp_ring_count; i++) { 3217 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3218 3219 if (cpr2->had_nqe_notify) { 3220 work_done += __bnxt_poll_work(bp, cpr2, 3221 budget - work_done); 3222 cpr->has_more_work |= cpr2->has_more_work; 3223 } 3224 } 3225 return work_done; 3226 } 3227 3228 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, 3229 u64 dbr_type, int budget) 3230 { 3231 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3232 int i; 3233 3234 for (i = 0; i < cpr->cp_ring_count; i++) { 3235 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; 3236 struct bnxt_db_info *db; 3237 3238 if (cpr2->had_work_done) { 3239 u32 tgl = 0; 3240 3241 if (dbr_type == DBR_TYPE_CQ_ARMALL) { 3242 cpr2->had_nqe_notify = 0; 3243 tgl = cpr2->toggle; 3244 } 3245 db = &cpr2->cp_db; 3246 bnxt_writeq(bp, 3247 db->db_key64 | dbr_type | DB_TOGGLE(tgl) | 3248 DB_RING_IDX(db, cpr2->cp_raw_cons), 3249 db->doorbell); 3250 cpr2->had_work_done = 0; 3251 } 3252 } 3253 __bnxt_poll_work_done(bp, bnapi, budget); 3254 } 3255 3256 static int bnxt_poll_p5(struct napi_struct *napi, int budget) 3257 { 3258 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); 3259 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 3260 struct bnxt_cp_ring_info *cpr_rx; 3261 u32 raw_cons = cpr->cp_raw_cons; 3262 struct bnxt *bp = bnapi->bp; 3263 struct nqe_cn *nqcmp; 3264 int work_done = 0; 3265 u32 cons; 3266 3267 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { 3268 napi_complete(napi); 3269 return 0; 3270 } 3271 if (cpr->has_more_work) { 3272 cpr->has_more_work = 0; 3273 work_done = __bnxt_poll_cqs(bp, bnapi, budget); 3274 } 3275 while (1) { 3276 u16 type; 3277 3278 cons = RING_CMP(raw_cons); 3279 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; 3280 3281 if (!NQ_CMP_VALID(nqcmp, raw_cons)) { 3282 if (cpr->has_more_work) 3283 break; 3284 3285 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, 3286 budget); 3287 cpr->cp_raw_cons = raw_cons; 3288 if (napi_complete_done(napi, work_done)) 3289 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, 3290 cpr->cp_raw_cons); 3291 goto poll_done; 3292 } 3293 3294 /* The valid test of the entry must be done first before 3295 * reading any further. 3296 */ 3297 dma_rmb(); 3298 3299 type = le16_to_cpu(nqcmp->type); 3300 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { 3301 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 3302 u32 cq_type = BNXT_NQ_HDL_TYPE(idx); 3303 struct bnxt_cp_ring_info *cpr2; 3304 3305 /* No more budget for RX work */ 3306 if (budget && work_done >= budget && 3307 cq_type == BNXT_NQ_HDL_TYPE_RX) 3308 break; 3309 3310 idx = BNXT_NQ_HDL_IDX(idx); 3311 cpr2 = &cpr->cp_ring_arr[idx]; 3312 cpr2->had_nqe_notify = 1; 3313 cpr2->toggle = NQE_CN_TOGGLE(type); 3314 work_done += __bnxt_poll_work(bp, cpr2, 3315 budget - work_done); 3316 cpr->has_more_work |= cpr2->has_more_work; 3317 } else { 3318 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); 3319 } 3320 raw_cons = NEXT_RAW_CMP(raw_cons); 3321 } 3322 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); 3323 if (raw_cons != cpr->cp_raw_cons) { 3324 cpr->cp_raw_cons = raw_cons; 3325 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); 3326 } 3327 poll_done: 3328 cpr_rx = &cpr->cp_ring_arr[0]; 3329 if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && 3330 (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) { 3331 struct dim_sample dim_sample = {}; 3332 3333 dim_update_sample(cpr->event_ctr, 3334 cpr_rx->rx_packets, 3335 cpr_rx->rx_bytes, 3336 &dim_sample); 3337 net_dim(&cpr->dim, &dim_sample); 3338 } 3339 return work_done; 3340 } 3341 3342 static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp, 3343 struct bnxt_tx_ring_info *txr, int idx) 3344 { 3345 int i, max_idx; 3346 struct pci_dev *pdev = bp->pdev; 3347 3348 max_idx = bp->tx_nr_pages * TX_DESC_CNT; 3349 3350 for (i = 0; i < max_idx;) { 3351 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i]; 3352 struct sk_buff *skb; 3353 int j, last; 3354 3355 if (idx < bp->tx_nr_rings_xdp && 3356 tx_buf->action == XDP_REDIRECT) { 3357 dma_unmap_single(&pdev->dev, 3358 dma_unmap_addr(tx_buf, mapping), 3359 dma_unmap_len(tx_buf, len), 3360 DMA_TO_DEVICE); 3361 xdp_return_frame(tx_buf->xdpf); 3362 tx_buf->action = 0; 3363 tx_buf->xdpf = NULL; 3364 i++; 3365 continue; 3366 } 3367 3368 skb = tx_buf->skb; 3369 if (!skb) { 3370 i++; 3371 continue; 3372 } 3373 3374 tx_buf->skb = NULL; 3375 3376 if (tx_buf->is_push) { 3377 dev_kfree_skb(skb); 3378 i += 2; 3379 continue; 3380 } 3381 3382 dma_unmap_single(&pdev->dev, 3383 dma_unmap_addr(tx_buf, mapping), 3384 skb_headlen(skb), 3385 DMA_TO_DEVICE); 3386 3387 last = tx_buf->nr_frags; 3388 i += 2; 3389 for (j = 0; j < last; j++, i++) { 3390 int ring_idx = i & bp->tx_ring_mask; 3391 skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 3392 3393 tx_buf = &txr->tx_buf_ring[ring_idx]; 3394 dma_unmap_page(&pdev->dev, 3395 dma_unmap_addr(tx_buf, mapping), 3396 skb_frag_size(frag), DMA_TO_DEVICE); 3397 } 3398 dev_kfree_skb(skb); 3399 } 3400 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx)); 3401 } 3402 3403 static void bnxt_free_tx_skbs(struct bnxt *bp) 3404 { 3405 int i; 3406 3407 if (!bp->tx_ring) 3408 return; 3409 3410 for (i = 0; i < bp->tx_nr_rings; i++) { 3411 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3412 3413 if (!txr->tx_buf_ring) 3414 continue; 3415 3416 bnxt_free_one_tx_ring_skbs(bp, txr, i); 3417 } 3418 3419 if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) 3420 bnxt_ptp_free_txts_skbs(bp->ptp_cfg); 3421 } 3422 3423 static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3424 { 3425 int i, max_idx; 3426 3427 max_idx = bp->rx_nr_pages * RX_DESC_CNT; 3428 3429 for (i = 0; i < max_idx; i++) { 3430 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; 3431 void *data = rx_buf->data; 3432 3433 if (!data) 3434 continue; 3435 3436 rx_buf->data = NULL; 3437 if (BNXT_RX_PAGE_MODE(bp)) 3438 page_pool_recycle_direct(rxr->page_pool, data); 3439 else 3440 page_pool_free_va(rxr->head_pool, data, true); 3441 } 3442 } 3443 3444 static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3445 { 3446 int i, max_idx; 3447 3448 max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; 3449 3450 for (i = 0; i < max_idx; i++) { 3451 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; 3452 struct page *page = rx_agg_buf->page; 3453 3454 if (!page) 3455 continue; 3456 3457 rx_agg_buf->page = NULL; 3458 __clear_bit(i, rxr->rx_agg_bmap); 3459 3460 page_pool_recycle_direct(rxr->page_pool, page); 3461 } 3462 } 3463 3464 static void bnxt_free_one_tpa_info_data(struct bnxt *bp, 3465 struct bnxt_rx_ring_info *rxr) 3466 { 3467 int i; 3468 3469 for (i = 0; i < bp->max_tpa; i++) { 3470 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; 3471 u8 *data = tpa_info->data; 3472 3473 if (!data) 3474 continue; 3475 3476 tpa_info->data = NULL; 3477 page_pool_free_va(rxr->head_pool, data, false); 3478 } 3479 } 3480 3481 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, 3482 struct bnxt_rx_ring_info *rxr) 3483 { 3484 struct bnxt_tpa_idx_map *map; 3485 3486 if (!rxr->rx_tpa) 3487 goto skip_rx_tpa_free; 3488 3489 bnxt_free_one_tpa_info_data(bp, rxr); 3490 3491 skip_rx_tpa_free: 3492 if (!rxr->rx_buf_ring) 3493 goto skip_rx_buf_free; 3494 3495 bnxt_free_one_rx_ring(bp, rxr); 3496 3497 skip_rx_buf_free: 3498 if (!rxr->rx_agg_ring) 3499 goto skip_rx_agg_free; 3500 3501 bnxt_free_one_rx_agg_ring(bp, rxr); 3502 3503 skip_rx_agg_free: 3504 map = rxr->rx_tpa_idx_map; 3505 if (map) 3506 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); 3507 } 3508 3509 static void bnxt_free_rx_skbs(struct bnxt *bp) 3510 { 3511 int i; 3512 3513 if (!bp->rx_ring) 3514 return; 3515 3516 for (i = 0; i < bp->rx_nr_rings; i++) 3517 bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]); 3518 } 3519 3520 static void bnxt_free_skbs(struct bnxt *bp) 3521 { 3522 bnxt_free_tx_skbs(bp); 3523 bnxt_free_rx_skbs(bp); 3524 } 3525 3526 static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) 3527 { 3528 u8 init_val = ctxm->init_value; 3529 u16 offset = ctxm->init_offset; 3530 u8 *p2 = p; 3531 int i; 3532 3533 if (!init_val) 3534 return; 3535 if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { 3536 memset(p, init_val, len); 3537 return; 3538 } 3539 for (i = 0; i < len; i += ctxm->entry_size) 3540 *(p2 + i + offset) = init_val; 3541 } 3542 3543 static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem, 3544 void *buf, size_t offset, size_t head, 3545 size_t tail) 3546 { 3547 int i, head_page, start_idx, source_offset; 3548 size_t len, rem_len, total_len, max_bytes; 3549 3550 head_page = head / rmem->page_size; 3551 source_offset = head % rmem->page_size; 3552 total_len = (tail - head) & MAX_CTX_BYTES_MASK; 3553 if (!total_len) 3554 total_len = MAX_CTX_BYTES; 3555 start_idx = head_page % MAX_CTX_PAGES; 3556 max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size - 3557 source_offset; 3558 total_len = min(total_len, max_bytes); 3559 rem_len = total_len; 3560 3561 for (i = start_idx; rem_len; i++, source_offset = 0) { 3562 len = min((size_t)(rmem->page_size - source_offset), rem_len); 3563 if (buf) 3564 memcpy(buf + offset, rmem->pg_arr[i] + source_offset, 3565 len); 3566 offset += len; 3567 rem_len -= len; 3568 } 3569 return total_len; 3570 } 3571 3572 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3573 { 3574 struct pci_dev *pdev = bp->pdev; 3575 int i; 3576 3577 if (!rmem->pg_arr) 3578 goto skip_pages; 3579 3580 for (i = 0; i < rmem->nr_pages; i++) { 3581 if (!rmem->pg_arr[i]) 3582 continue; 3583 3584 dma_free_coherent(&pdev->dev, rmem->page_size, 3585 rmem->pg_arr[i], rmem->dma_arr[i]); 3586 3587 rmem->pg_arr[i] = NULL; 3588 } 3589 skip_pages: 3590 if (rmem->pg_tbl) { 3591 size_t pg_tbl_size = rmem->nr_pages * 8; 3592 3593 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3594 pg_tbl_size = rmem->page_size; 3595 dma_free_coherent(&pdev->dev, pg_tbl_size, 3596 rmem->pg_tbl, rmem->pg_tbl_map); 3597 rmem->pg_tbl = NULL; 3598 } 3599 if (rmem->vmem_size && *rmem->vmem) { 3600 vfree(*rmem->vmem); 3601 *rmem->vmem = NULL; 3602 } 3603 } 3604 3605 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) 3606 { 3607 struct pci_dev *pdev = bp->pdev; 3608 u64 valid_bit = 0; 3609 int i; 3610 3611 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) 3612 valid_bit = PTU_PTE_VALID; 3613 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { 3614 size_t pg_tbl_size = rmem->nr_pages * 8; 3615 3616 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) 3617 pg_tbl_size = rmem->page_size; 3618 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, 3619 &rmem->pg_tbl_map, 3620 GFP_KERNEL); 3621 if (!rmem->pg_tbl) 3622 return -ENOMEM; 3623 } 3624 3625 for (i = 0; i < rmem->nr_pages; i++) { 3626 u64 extra_bits = valid_bit; 3627 3628 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 3629 rmem->page_size, 3630 &rmem->dma_arr[i], 3631 GFP_KERNEL); 3632 if (!rmem->pg_arr[i]) 3633 return -ENOMEM; 3634 3635 if (rmem->ctx_mem) 3636 bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], 3637 rmem->page_size); 3638 if (rmem->nr_pages > 1 || rmem->depth > 0) { 3639 if (i == rmem->nr_pages - 2 && 3640 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3641 extra_bits |= PTU_PTE_NEXT_TO_LAST; 3642 else if (i == rmem->nr_pages - 1 && 3643 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) 3644 extra_bits |= PTU_PTE_LAST; 3645 rmem->pg_tbl[i] = 3646 cpu_to_le64(rmem->dma_arr[i] | extra_bits); 3647 } 3648 } 3649 3650 if (rmem->vmem_size) { 3651 *rmem->vmem = vzalloc(rmem->vmem_size); 3652 if (!(*rmem->vmem)) 3653 return -ENOMEM; 3654 } 3655 return 0; 3656 } 3657 3658 static void bnxt_free_one_tpa_info(struct bnxt *bp, 3659 struct bnxt_rx_ring_info *rxr) 3660 { 3661 int i; 3662 3663 kfree(rxr->rx_tpa_idx_map); 3664 rxr->rx_tpa_idx_map = NULL; 3665 if (rxr->rx_tpa) { 3666 for (i = 0; i < bp->max_tpa; i++) { 3667 kfree(rxr->rx_tpa[i].agg_arr); 3668 rxr->rx_tpa[i].agg_arr = NULL; 3669 } 3670 } 3671 kfree(rxr->rx_tpa); 3672 rxr->rx_tpa = NULL; 3673 } 3674 3675 static void bnxt_free_tpa_info(struct bnxt *bp) 3676 { 3677 int i; 3678 3679 for (i = 0; i < bp->rx_nr_rings; i++) { 3680 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3681 3682 bnxt_free_one_tpa_info(bp, rxr); 3683 } 3684 } 3685 3686 static int bnxt_alloc_one_tpa_info(struct bnxt *bp, 3687 struct bnxt_rx_ring_info *rxr) 3688 { 3689 struct rx_agg_cmp *agg; 3690 int i; 3691 3692 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), 3693 GFP_KERNEL); 3694 if (!rxr->rx_tpa) 3695 return -ENOMEM; 3696 3697 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 3698 return 0; 3699 for (i = 0; i < bp->max_tpa; i++) { 3700 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); 3701 if (!agg) 3702 return -ENOMEM; 3703 rxr->rx_tpa[i].agg_arr = agg; 3704 } 3705 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), 3706 GFP_KERNEL); 3707 if (!rxr->rx_tpa_idx_map) 3708 return -ENOMEM; 3709 3710 return 0; 3711 } 3712 3713 static int bnxt_alloc_tpa_info(struct bnxt *bp) 3714 { 3715 int i, rc; 3716 3717 bp->max_tpa = MAX_TPA; 3718 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 3719 if (!bp->max_tpa_v2) 3720 return 0; 3721 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); 3722 } 3723 3724 for (i = 0; i < bp->rx_nr_rings; i++) { 3725 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3726 3727 rc = bnxt_alloc_one_tpa_info(bp, rxr); 3728 if (rc) 3729 return rc; 3730 } 3731 return 0; 3732 } 3733 3734 static void bnxt_free_rx_rings(struct bnxt *bp) 3735 { 3736 int i; 3737 3738 if (!bp->rx_ring) 3739 return; 3740 3741 bnxt_free_tpa_info(bp); 3742 for (i = 0; i < bp->rx_nr_rings; i++) { 3743 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3744 struct bnxt_ring_struct *ring; 3745 3746 if (rxr->xdp_prog) 3747 bpf_prog_put(rxr->xdp_prog); 3748 3749 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) 3750 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3751 3752 page_pool_destroy(rxr->page_pool); 3753 if (bnxt_separate_head_pool()) 3754 page_pool_destroy(rxr->head_pool); 3755 rxr->page_pool = rxr->head_pool = NULL; 3756 3757 kfree(rxr->rx_agg_bmap); 3758 rxr->rx_agg_bmap = NULL; 3759 3760 ring = &rxr->rx_ring_struct; 3761 bnxt_free_ring(bp, &ring->ring_mem); 3762 3763 ring = &rxr->rx_agg_ring_struct; 3764 bnxt_free_ring(bp, &ring->ring_mem); 3765 } 3766 } 3767 3768 static int bnxt_alloc_rx_page_pool(struct bnxt *bp, 3769 struct bnxt_rx_ring_info *rxr, 3770 int numa_node) 3771 { 3772 struct page_pool_params pp = { 0 }; 3773 struct page_pool *pool; 3774 3775 pp.pool_size = bp->rx_agg_ring_size; 3776 if (BNXT_RX_PAGE_MODE(bp)) 3777 pp.pool_size += bp->rx_ring_size; 3778 pp.nid = numa_node; 3779 pp.napi = &rxr->bnapi->napi; 3780 pp.netdev = bp->dev; 3781 pp.dev = &bp->pdev->dev; 3782 pp.dma_dir = bp->rx_dir; 3783 pp.max_len = PAGE_SIZE; 3784 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 3785 3786 pool = page_pool_create(&pp); 3787 if (IS_ERR(pool)) 3788 return PTR_ERR(pool); 3789 rxr->page_pool = pool; 3790 3791 if (bnxt_separate_head_pool()) { 3792 pp.pool_size = max(bp->rx_ring_size, 1024); 3793 pool = page_pool_create(&pp); 3794 if (IS_ERR(pool)) 3795 goto err_destroy_pp; 3796 } 3797 rxr->head_pool = pool; 3798 3799 return 0; 3800 3801 err_destroy_pp: 3802 page_pool_destroy(rxr->page_pool); 3803 rxr->page_pool = NULL; 3804 return PTR_ERR(pool); 3805 } 3806 3807 static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 3808 { 3809 u16 mem_size; 3810 3811 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; 3812 mem_size = rxr->rx_agg_bmap_size / 8; 3813 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); 3814 if (!rxr->rx_agg_bmap) 3815 return -ENOMEM; 3816 3817 return 0; 3818 } 3819 3820 static int bnxt_alloc_rx_rings(struct bnxt *bp) 3821 { 3822 int numa_node = dev_to_node(&bp->pdev->dev); 3823 int i, rc = 0, agg_rings = 0, cpu; 3824 3825 if (!bp->rx_ring) 3826 return -ENOMEM; 3827 3828 if (bp->flags & BNXT_FLAG_AGG_RINGS) 3829 agg_rings = 1; 3830 3831 for (i = 0; i < bp->rx_nr_rings; i++) { 3832 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 3833 struct bnxt_ring_struct *ring; 3834 int cpu_node; 3835 3836 ring = &rxr->rx_ring_struct; 3837 3838 cpu = cpumask_local_spread(i, numa_node); 3839 cpu_node = cpu_to_node(cpu); 3840 netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", 3841 i, cpu_node); 3842 rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); 3843 if (rc) 3844 return rc; 3845 3846 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); 3847 if (rc < 0) 3848 return rc; 3849 3850 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, 3851 MEM_TYPE_PAGE_POOL, 3852 rxr->page_pool); 3853 if (rc) { 3854 xdp_rxq_info_unreg(&rxr->xdp_rxq); 3855 return rc; 3856 } 3857 3858 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3859 if (rc) 3860 return rc; 3861 3862 ring->grp_idx = i; 3863 if (agg_rings) { 3864 ring = &rxr->rx_agg_ring_struct; 3865 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3866 if (rc) 3867 return rc; 3868 3869 ring->grp_idx = i; 3870 rc = bnxt_alloc_rx_agg_bmap(bp, rxr); 3871 if (rc) 3872 return rc; 3873 } 3874 } 3875 if (bp->flags & BNXT_FLAG_TPA) 3876 rc = bnxt_alloc_tpa_info(bp); 3877 return rc; 3878 } 3879 3880 static void bnxt_free_tx_rings(struct bnxt *bp) 3881 { 3882 int i; 3883 struct pci_dev *pdev = bp->pdev; 3884 3885 if (!bp->tx_ring) 3886 return; 3887 3888 for (i = 0; i < bp->tx_nr_rings; i++) { 3889 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3890 struct bnxt_ring_struct *ring; 3891 3892 if (txr->tx_push) { 3893 dma_free_coherent(&pdev->dev, bp->tx_push_size, 3894 txr->tx_push, txr->tx_push_mapping); 3895 txr->tx_push = NULL; 3896 } 3897 3898 ring = &txr->tx_ring_struct; 3899 3900 bnxt_free_ring(bp, &ring->ring_mem); 3901 } 3902 } 3903 3904 #define BNXT_TC_TO_RING_BASE(bp, tc) \ 3905 ((tc) * (bp)->tx_nr_rings_per_tc) 3906 3907 #define BNXT_RING_TO_TC_OFF(bp, tx) \ 3908 ((tx) % (bp)->tx_nr_rings_per_tc) 3909 3910 #define BNXT_RING_TO_TC(bp, tx) \ 3911 ((tx) / (bp)->tx_nr_rings_per_tc) 3912 3913 static int bnxt_alloc_tx_rings(struct bnxt *bp) 3914 { 3915 int i, j, rc; 3916 struct pci_dev *pdev = bp->pdev; 3917 3918 bp->tx_push_size = 0; 3919 if (bp->tx_push_thresh) { 3920 int push_size; 3921 3922 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 3923 bp->tx_push_thresh); 3924 3925 if (push_size > 256) { 3926 push_size = 0; 3927 bp->tx_push_thresh = 0; 3928 } 3929 3930 bp->tx_push_size = push_size; 3931 } 3932 3933 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { 3934 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 3935 struct bnxt_ring_struct *ring; 3936 u8 qidx; 3937 3938 ring = &txr->tx_ring_struct; 3939 3940 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 3941 if (rc) 3942 return rc; 3943 3944 ring->grp_idx = txr->bnapi->index; 3945 if (bp->tx_push_size) { 3946 dma_addr_t mapping; 3947 3948 /* One pre-allocated DMA buffer to backup 3949 * TX push operation 3950 */ 3951 txr->tx_push = dma_alloc_coherent(&pdev->dev, 3952 bp->tx_push_size, 3953 &txr->tx_push_mapping, 3954 GFP_KERNEL); 3955 3956 if (!txr->tx_push) 3957 return -ENOMEM; 3958 3959 mapping = txr->tx_push_mapping + 3960 sizeof(struct tx_push_bd); 3961 txr->data_mapping = cpu_to_le64(mapping); 3962 } 3963 qidx = bp->tc_to_qidx[j]; 3964 ring->queue_id = bp->q_info[qidx].queue_id; 3965 spin_lock_init(&txr->xdp_tx_lock); 3966 if (i < bp->tx_nr_rings_xdp) 3967 continue; 3968 if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) 3969 j++; 3970 } 3971 return 0; 3972 } 3973 3974 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) 3975 { 3976 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 3977 3978 kfree(cpr->cp_desc_ring); 3979 cpr->cp_desc_ring = NULL; 3980 ring->ring_mem.pg_arr = NULL; 3981 kfree(cpr->cp_desc_mapping); 3982 cpr->cp_desc_mapping = NULL; 3983 ring->ring_mem.dma_arr = NULL; 3984 } 3985 3986 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) 3987 { 3988 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); 3989 if (!cpr->cp_desc_ring) 3990 return -ENOMEM; 3991 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), 3992 GFP_KERNEL); 3993 if (!cpr->cp_desc_mapping) 3994 return -ENOMEM; 3995 return 0; 3996 } 3997 3998 static void bnxt_free_all_cp_arrays(struct bnxt *bp) 3999 { 4000 int i; 4001 4002 if (!bp->bnapi) 4003 return; 4004 for (i = 0; i < bp->cp_nr_rings; i++) { 4005 struct bnxt_napi *bnapi = bp->bnapi[i]; 4006 4007 if (!bnapi) 4008 continue; 4009 bnxt_free_cp_arrays(&bnapi->cp_ring); 4010 } 4011 } 4012 4013 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) 4014 { 4015 int i, n = bp->cp_nr_pages; 4016 4017 for (i = 0; i < bp->cp_nr_rings; i++) { 4018 struct bnxt_napi *bnapi = bp->bnapi[i]; 4019 int rc; 4020 4021 if (!bnapi) 4022 continue; 4023 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); 4024 if (rc) 4025 return rc; 4026 } 4027 return 0; 4028 } 4029 4030 static void bnxt_free_cp_rings(struct bnxt *bp) 4031 { 4032 int i; 4033 4034 if (!bp->bnapi) 4035 return; 4036 4037 for (i = 0; i < bp->cp_nr_rings; i++) { 4038 struct bnxt_napi *bnapi = bp->bnapi[i]; 4039 struct bnxt_cp_ring_info *cpr; 4040 struct bnxt_ring_struct *ring; 4041 int j; 4042 4043 if (!bnapi) 4044 continue; 4045 4046 cpr = &bnapi->cp_ring; 4047 ring = &cpr->cp_ring_struct; 4048 4049 bnxt_free_ring(bp, &ring->ring_mem); 4050 4051 if (!cpr->cp_ring_arr) 4052 continue; 4053 4054 for (j = 0; j < cpr->cp_ring_count; j++) { 4055 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4056 4057 ring = &cpr2->cp_ring_struct; 4058 bnxt_free_ring(bp, &ring->ring_mem); 4059 bnxt_free_cp_arrays(cpr2); 4060 } 4061 kfree(cpr->cp_ring_arr); 4062 cpr->cp_ring_arr = NULL; 4063 cpr->cp_ring_count = 0; 4064 } 4065 } 4066 4067 static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, 4068 struct bnxt_cp_ring_info *cpr) 4069 { 4070 struct bnxt_ring_mem_info *rmem; 4071 struct bnxt_ring_struct *ring; 4072 int rc; 4073 4074 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); 4075 if (rc) { 4076 bnxt_free_cp_arrays(cpr); 4077 return -ENOMEM; 4078 } 4079 ring = &cpr->cp_ring_struct; 4080 rmem = &ring->ring_mem; 4081 rmem->nr_pages = bp->cp_nr_pages; 4082 rmem->page_size = HW_CMPD_RING_SIZE; 4083 rmem->pg_arr = (void **)cpr->cp_desc_ring; 4084 rmem->dma_arr = cpr->cp_desc_mapping; 4085 rmem->flags = BNXT_RMEM_RING_PTE_FLAG; 4086 rc = bnxt_alloc_ring(bp, rmem); 4087 if (rc) { 4088 bnxt_free_ring(bp, rmem); 4089 bnxt_free_cp_arrays(cpr); 4090 } 4091 return rc; 4092 } 4093 4094 static int bnxt_alloc_cp_rings(struct bnxt *bp) 4095 { 4096 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 4097 int i, j, rc, ulp_msix; 4098 int tcs = bp->num_tc; 4099 4100 if (!tcs) 4101 tcs = 1; 4102 ulp_msix = bnxt_get_ulp_msix_num(bp); 4103 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 4104 struct bnxt_napi *bnapi = bp->bnapi[i]; 4105 struct bnxt_cp_ring_info *cpr, *cpr2; 4106 struct bnxt_ring_struct *ring; 4107 int cp_count = 0, k; 4108 int rx = 0, tx = 0; 4109 4110 if (!bnapi) 4111 continue; 4112 4113 cpr = &bnapi->cp_ring; 4114 cpr->bnapi = bnapi; 4115 ring = &cpr->cp_ring_struct; 4116 4117 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 4118 if (rc) 4119 return rc; 4120 4121 ring->map_idx = ulp_msix + i; 4122 4123 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4124 continue; 4125 4126 if (i < bp->rx_nr_rings) { 4127 cp_count++; 4128 rx = 1; 4129 } 4130 if (i < bp->tx_nr_rings_xdp) { 4131 cp_count++; 4132 tx = 1; 4133 } else if ((sh && i < bp->tx_nr_rings) || 4134 (!sh && i >= bp->rx_nr_rings)) { 4135 cp_count += tcs; 4136 tx = 1; 4137 } 4138 4139 cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), 4140 GFP_KERNEL); 4141 if (!cpr->cp_ring_arr) 4142 return -ENOMEM; 4143 cpr->cp_ring_count = cp_count; 4144 4145 for (k = 0; k < cp_count; k++) { 4146 cpr2 = &cpr->cp_ring_arr[k]; 4147 rc = bnxt_alloc_cp_sub_ring(bp, cpr2); 4148 if (rc) 4149 return rc; 4150 cpr2->bnapi = bnapi; 4151 cpr2->sw_stats = cpr->sw_stats; 4152 cpr2->cp_idx = k; 4153 if (!k && rx) { 4154 bp->rx_ring[i].rx_cpr = cpr2; 4155 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; 4156 } else { 4157 int n, tc = k - rx; 4158 4159 n = BNXT_TC_TO_RING_BASE(bp, tc) + j; 4160 bp->tx_ring[n].tx_cpr = cpr2; 4161 cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; 4162 } 4163 } 4164 if (tx) 4165 j++; 4166 } 4167 return 0; 4168 } 4169 4170 static void bnxt_init_rx_ring_struct(struct bnxt *bp, 4171 struct bnxt_rx_ring_info *rxr) 4172 { 4173 struct bnxt_ring_mem_info *rmem; 4174 struct bnxt_ring_struct *ring; 4175 4176 ring = &rxr->rx_ring_struct; 4177 rmem = &ring->ring_mem; 4178 rmem->nr_pages = bp->rx_nr_pages; 4179 rmem->page_size = HW_RXBD_RING_SIZE; 4180 rmem->pg_arr = (void **)rxr->rx_desc_ring; 4181 rmem->dma_arr = rxr->rx_desc_mapping; 4182 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 4183 rmem->vmem = (void **)&rxr->rx_buf_ring; 4184 4185 ring = &rxr->rx_agg_ring_struct; 4186 rmem = &ring->ring_mem; 4187 rmem->nr_pages = bp->rx_agg_nr_pages; 4188 rmem->page_size = HW_RXBD_RING_SIZE; 4189 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 4190 rmem->dma_arr = rxr->rx_agg_desc_mapping; 4191 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 4192 rmem->vmem = (void **)&rxr->rx_agg_ring; 4193 } 4194 4195 static void bnxt_reset_rx_ring_struct(struct bnxt *bp, 4196 struct bnxt_rx_ring_info *rxr) 4197 { 4198 struct bnxt_ring_mem_info *rmem; 4199 struct bnxt_ring_struct *ring; 4200 int i; 4201 4202 rxr->page_pool->p.napi = NULL; 4203 rxr->page_pool = NULL; 4204 memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info)); 4205 4206 ring = &rxr->rx_ring_struct; 4207 rmem = &ring->ring_mem; 4208 rmem->pg_tbl = NULL; 4209 rmem->pg_tbl_map = 0; 4210 for (i = 0; i < rmem->nr_pages; i++) { 4211 rmem->pg_arr[i] = NULL; 4212 rmem->dma_arr[i] = 0; 4213 } 4214 *rmem->vmem = NULL; 4215 4216 ring = &rxr->rx_agg_ring_struct; 4217 rmem = &ring->ring_mem; 4218 rmem->pg_tbl = NULL; 4219 rmem->pg_tbl_map = 0; 4220 for (i = 0; i < rmem->nr_pages; i++) { 4221 rmem->pg_arr[i] = NULL; 4222 rmem->dma_arr[i] = 0; 4223 } 4224 *rmem->vmem = NULL; 4225 } 4226 4227 static void bnxt_init_ring_struct(struct bnxt *bp) 4228 { 4229 int i, j; 4230 4231 for (i = 0; i < bp->cp_nr_rings; i++) { 4232 struct bnxt_napi *bnapi = bp->bnapi[i]; 4233 struct bnxt_ring_mem_info *rmem; 4234 struct bnxt_cp_ring_info *cpr; 4235 struct bnxt_rx_ring_info *rxr; 4236 struct bnxt_tx_ring_info *txr; 4237 struct bnxt_ring_struct *ring; 4238 4239 if (!bnapi) 4240 continue; 4241 4242 cpr = &bnapi->cp_ring; 4243 ring = &cpr->cp_ring_struct; 4244 rmem = &ring->ring_mem; 4245 rmem->nr_pages = bp->cp_nr_pages; 4246 rmem->page_size = HW_CMPD_RING_SIZE; 4247 rmem->pg_arr = (void **)cpr->cp_desc_ring; 4248 rmem->dma_arr = cpr->cp_desc_mapping; 4249 rmem->vmem_size = 0; 4250 4251 rxr = bnapi->rx_ring; 4252 if (!rxr) 4253 goto skip_rx; 4254 4255 ring = &rxr->rx_ring_struct; 4256 rmem = &ring->ring_mem; 4257 rmem->nr_pages = bp->rx_nr_pages; 4258 rmem->page_size = HW_RXBD_RING_SIZE; 4259 rmem->pg_arr = (void **)rxr->rx_desc_ring; 4260 rmem->dma_arr = rxr->rx_desc_mapping; 4261 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; 4262 rmem->vmem = (void **)&rxr->rx_buf_ring; 4263 4264 ring = &rxr->rx_agg_ring_struct; 4265 rmem = &ring->ring_mem; 4266 rmem->nr_pages = bp->rx_agg_nr_pages; 4267 rmem->page_size = HW_RXBD_RING_SIZE; 4268 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; 4269 rmem->dma_arr = rxr->rx_agg_desc_mapping; 4270 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; 4271 rmem->vmem = (void **)&rxr->rx_agg_ring; 4272 4273 skip_rx: 4274 bnxt_for_each_napi_tx(j, bnapi, txr) { 4275 ring = &txr->tx_ring_struct; 4276 rmem = &ring->ring_mem; 4277 rmem->nr_pages = bp->tx_nr_pages; 4278 rmem->page_size = HW_TXBD_RING_SIZE; 4279 rmem->pg_arr = (void **)txr->tx_desc_ring; 4280 rmem->dma_arr = txr->tx_desc_mapping; 4281 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; 4282 rmem->vmem = (void **)&txr->tx_buf_ring; 4283 } 4284 } 4285 } 4286 4287 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) 4288 { 4289 int i; 4290 u32 prod; 4291 struct rx_bd **rx_buf_ring; 4292 4293 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; 4294 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { 4295 int j; 4296 struct rx_bd *rxbd; 4297 4298 rxbd = rx_buf_ring[i]; 4299 if (!rxbd) 4300 continue; 4301 4302 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { 4303 rxbd->rx_bd_len_flags_type = cpu_to_le32(type); 4304 rxbd->rx_bd_opaque = prod; 4305 } 4306 } 4307 } 4308 4309 static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp, 4310 struct bnxt_rx_ring_info *rxr, 4311 int ring_nr) 4312 { 4313 u32 prod; 4314 int i; 4315 4316 prod = rxr->rx_prod; 4317 for (i = 0; i < bp->rx_ring_size; i++) { 4318 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { 4319 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", 4320 ring_nr, i, bp->rx_ring_size); 4321 break; 4322 } 4323 prod = NEXT_RX(prod); 4324 } 4325 rxr->rx_prod = prod; 4326 } 4327 4328 static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp, 4329 struct bnxt_rx_ring_info *rxr, 4330 int ring_nr) 4331 { 4332 u32 prod; 4333 int i; 4334 4335 prod = rxr->rx_agg_prod; 4336 for (i = 0; i < bp->rx_agg_ring_size; i++) { 4337 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { 4338 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", 4339 ring_nr, i, bp->rx_ring_size); 4340 break; 4341 } 4342 prod = NEXT_RX_AGG(prod); 4343 } 4344 rxr->rx_agg_prod = prod; 4345 } 4346 4347 static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp, 4348 struct bnxt_rx_ring_info *rxr) 4349 { 4350 dma_addr_t mapping; 4351 u8 *data; 4352 int i; 4353 4354 for (i = 0; i < bp->max_tpa; i++) { 4355 data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, 4356 GFP_KERNEL); 4357 if (!data) 4358 return -ENOMEM; 4359 4360 rxr->rx_tpa[i].data = data; 4361 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; 4362 rxr->rx_tpa[i].mapping = mapping; 4363 } 4364 4365 return 0; 4366 } 4367 4368 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) 4369 { 4370 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 4371 int rc; 4372 4373 bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); 4374 4375 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 4376 return 0; 4377 4378 bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr); 4379 4380 if (rxr->rx_tpa) { 4381 rc = bnxt_alloc_one_tpa_info_data(bp, rxr); 4382 if (rc) 4383 return rc; 4384 } 4385 return 0; 4386 } 4387 4388 static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp, 4389 struct bnxt_rx_ring_info *rxr) 4390 { 4391 struct bnxt_ring_struct *ring; 4392 u32 type; 4393 4394 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | 4395 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; 4396 4397 if (NET_IP_ALIGN == 2) 4398 type |= RX_BD_FLAGS_SOP; 4399 4400 ring = &rxr->rx_ring_struct; 4401 bnxt_init_rxbd_pages(ring, type); 4402 ring->fw_ring_id = INVALID_HW_RING_ID; 4403 } 4404 4405 static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, 4406 struct bnxt_rx_ring_info *rxr) 4407 { 4408 struct bnxt_ring_struct *ring; 4409 u32 type; 4410 4411 ring = &rxr->rx_agg_ring_struct; 4412 ring->fw_ring_id = INVALID_HW_RING_ID; 4413 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { 4414 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | 4415 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; 4416 4417 bnxt_init_rxbd_pages(ring, type); 4418 } 4419 } 4420 4421 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) 4422 { 4423 struct bnxt_rx_ring_info *rxr; 4424 4425 rxr = &bp->rx_ring[ring_nr]; 4426 bnxt_init_one_rx_ring_rxbd(bp, rxr); 4427 4428 netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, 4429 &rxr->bnapi->napi); 4430 4431 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { 4432 bpf_prog_add(bp->xdp_prog, 1); 4433 rxr->xdp_prog = bp->xdp_prog; 4434 } 4435 4436 bnxt_init_one_rx_agg_ring_rxbd(bp, rxr); 4437 4438 return bnxt_alloc_one_rx_ring(bp, ring_nr); 4439 } 4440 4441 static void bnxt_init_cp_rings(struct bnxt *bp) 4442 { 4443 int i, j; 4444 4445 for (i = 0; i < bp->cp_nr_rings; i++) { 4446 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; 4447 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4448 4449 ring->fw_ring_id = INVALID_HW_RING_ID; 4450 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4451 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4452 if (!cpr->cp_ring_arr) 4453 continue; 4454 for (j = 0; j < cpr->cp_ring_count; j++) { 4455 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 4456 4457 ring = &cpr2->cp_ring_struct; 4458 ring->fw_ring_id = INVALID_HW_RING_ID; 4459 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; 4460 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; 4461 } 4462 } 4463 } 4464 4465 static int bnxt_init_rx_rings(struct bnxt *bp) 4466 { 4467 int i, rc = 0; 4468 4469 if (BNXT_RX_PAGE_MODE(bp)) { 4470 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; 4471 bp->rx_dma_offset = XDP_PACKET_HEADROOM; 4472 } else { 4473 bp->rx_offset = BNXT_RX_OFFSET; 4474 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; 4475 } 4476 4477 for (i = 0; i < bp->rx_nr_rings; i++) { 4478 rc = bnxt_init_one_rx_ring(bp, i); 4479 if (rc) 4480 break; 4481 } 4482 4483 return rc; 4484 } 4485 4486 static int bnxt_init_tx_rings(struct bnxt *bp) 4487 { 4488 u16 i; 4489 4490 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 4491 BNXT_MIN_TX_DESC_CNT); 4492 4493 for (i = 0; i < bp->tx_nr_rings; i++) { 4494 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 4495 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 4496 4497 ring->fw_ring_id = INVALID_HW_RING_ID; 4498 4499 if (i >= bp->tx_nr_rings_xdp) 4500 netif_queue_set_napi(bp->dev, i - bp->tx_nr_rings_xdp, 4501 NETDEV_QUEUE_TYPE_TX, 4502 &txr->bnapi->napi); 4503 } 4504 4505 return 0; 4506 } 4507 4508 static void bnxt_free_ring_grps(struct bnxt *bp) 4509 { 4510 kfree(bp->grp_info); 4511 bp->grp_info = NULL; 4512 } 4513 4514 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) 4515 { 4516 int i; 4517 4518 if (irq_re_init) { 4519 bp->grp_info = kcalloc(bp->cp_nr_rings, 4520 sizeof(struct bnxt_ring_grp_info), 4521 GFP_KERNEL); 4522 if (!bp->grp_info) 4523 return -ENOMEM; 4524 } 4525 for (i = 0; i < bp->cp_nr_rings; i++) { 4526 if (irq_re_init) 4527 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; 4528 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 4529 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; 4530 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; 4531 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 4532 } 4533 return 0; 4534 } 4535 4536 static void bnxt_free_vnics(struct bnxt *bp) 4537 { 4538 kfree(bp->vnic_info); 4539 bp->vnic_info = NULL; 4540 bp->nr_vnics = 0; 4541 } 4542 4543 static int bnxt_alloc_vnics(struct bnxt *bp) 4544 { 4545 int num_vnics = 1; 4546 4547 #ifdef CONFIG_RFS_ACCEL 4548 if (bp->flags & BNXT_FLAG_RFS) { 4549 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 4550 num_vnics++; 4551 else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 4552 num_vnics += bp->rx_nr_rings; 4553 } 4554 #endif 4555 4556 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 4557 num_vnics++; 4558 4559 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), 4560 GFP_KERNEL); 4561 if (!bp->vnic_info) 4562 return -ENOMEM; 4563 4564 bp->nr_vnics = num_vnics; 4565 return 0; 4566 } 4567 4568 static void bnxt_init_vnics(struct bnxt *bp) 4569 { 4570 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 4571 int i; 4572 4573 for (i = 0; i < bp->nr_vnics; i++) { 4574 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 4575 int j; 4576 4577 vnic->fw_vnic_id = INVALID_HW_RING_ID; 4578 vnic->vnic_id = i; 4579 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) 4580 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; 4581 4582 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; 4583 4584 if (bp->vnic_info[i].rss_hash_key) { 4585 if (i == BNXT_VNIC_DEFAULT) { 4586 u8 *key = (void *)vnic->rss_hash_key; 4587 int k; 4588 4589 if (!bp->rss_hash_key_valid && 4590 !bp->rss_hash_key_updated) { 4591 get_random_bytes(bp->rss_hash_key, 4592 HW_HASH_KEY_SIZE); 4593 bp->rss_hash_key_updated = true; 4594 } 4595 4596 memcpy(vnic->rss_hash_key, bp->rss_hash_key, 4597 HW_HASH_KEY_SIZE); 4598 4599 if (!bp->rss_hash_key_updated) 4600 continue; 4601 4602 bp->rss_hash_key_updated = false; 4603 bp->rss_hash_key_valid = true; 4604 4605 bp->toeplitz_prefix = 0; 4606 for (k = 0; k < 8; k++) { 4607 bp->toeplitz_prefix <<= 8; 4608 bp->toeplitz_prefix |= key[k]; 4609 } 4610 } else { 4611 memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, 4612 HW_HASH_KEY_SIZE); 4613 } 4614 } 4615 } 4616 } 4617 4618 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) 4619 { 4620 int pages; 4621 4622 pages = ring_size / desc_per_pg; 4623 4624 if (!pages) 4625 return 1; 4626 4627 pages++; 4628 4629 while (pages & (pages - 1)) 4630 pages++; 4631 4632 return pages; 4633 } 4634 4635 void bnxt_set_tpa_flags(struct bnxt *bp) 4636 { 4637 bp->flags &= ~BNXT_FLAG_TPA; 4638 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 4639 return; 4640 if (bp->dev->features & NETIF_F_LRO) 4641 bp->flags |= BNXT_FLAG_LRO; 4642 else if (bp->dev->features & NETIF_F_GRO_HW) 4643 bp->flags |= BNXT_FLAG_GRO; 4644 } 4645 4646 static void bnxt_init_ring_params(struct bnxt *bp) 4647 { 4648 unsigned int rx_size; 4649 4650 bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK; 4651 /* Try to fit 4 chunks into a 4k page */ 4652 rx_size = SZ_1K - 4653 NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4654 bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size); 4655 } 4656 4657 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must 4658 * be set on entry. 4659 */ 4660 void bnxt_set_ring_params(struct bnxt *bp) 4661 { 4662 u32 ring_size, rx_size, rx_space, max_rx_cmpl; 4663 u32 agg_factor = 0, agg_ring_size = 0; 4664 4665 /* 8 for CRC and VLAN */ 4666 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); 4667 4668 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + 4669 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4670 4671 ring_size = bp->rx_ring_size; 4672 bp->rx_agg_ring_size = 0; 4673 bp->rx_agg_nr_pages = 0; 4674 4675 if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS) 4676 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); 4677 4678 bp->flags &= ~BNXT_FLAG_JUMBO; 4679 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { 4680 u32 jumbo_factor; 4681 4682 bp->flags |= BNXT_FLAG_JUMBO; 4683 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 4684 if (jumbo_factor > agg_factor) 4685 agg_factor = jumbo_factor; 4686 } 4687 if (agg_factor) { 4688 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { 4689 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; 4690 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", 4691 bp->rx_ring_size, ring_size); 4692 bp->rx_ring_size = ring_size; 4693 } 4694 agg_ring_size = ring_size * agg_factor; 4695 4696 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, 4697 RX_DESC_CNT); 4698 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { 4699 u32 tmp = agg_ring_size; 4700 4701 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; 4702 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; 4703 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", 4704 tmp, agg_ring_size); 4705 } 4706 bp->rx_agg_ring_size = agg_ring_size; 4707 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; 4708 4709 if (BNXT_RX_PAGE_MODE(bp)) { 4710 rx_space = PAGE_SIZE; 4711 rx_size = PAGE_SIZE - 4712 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - 4713 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4714 } else { 4715 rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK, 4716 bp->rx_copybreak, 4717 bp->dev->cfg_pending->hds_thresh); 4718 rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN); 4719 rx_space = rx_size + NET_SKB_PAD + 4720 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4721 } 4722 } 4723 4724 bp->rx_buf_use_size = rx_size; 4725 bp->rx_buf_size = rx_space; 4726 4727 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); 4728 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; 4729 4730 ring_size = bp->tx_ring_size; 4731 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); 4732 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; 4733 4734 max_rx_cmpl = bp->rx_ring_size; 4735 /* MAX TPA needs to be added because TPA_START completions are 4736 * immediately recycled, so the TPA completions are not bound by 4737 * the RX ring size. 4738 */ 4739 if (bp->flags & BNXT_FLAG_TPA) 4740 max_rx_cmpl += bp->max_tpa; 4741 /* RX and TPA completions are 32-byte, all others are 16-byte */ 4742 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; 4743 bp->cp_ring_size = ring_size; 4744 4745 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); 4746 if (bp->cp_nr_pages > MAX_CP_PAGES) { 4747 bp->cp_nr_pages = MAX_CP_PAGES; 4748 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; 4749 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", 4750 ring_size, bp->cp_ring_size); 4751 } 4752 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; 4753 bp->cp_ring_mask = bp->cp_bit - 1; 4754 } 4755 4756 /* Changing allocation mode of RX rings. 4757 * TODO: Update when extending xdp_rxq_info to support allocation modes. 4758 */ 4759 static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4760 { 4761 struct net_device *dev = bp->dev; 4762 4763 if (page_mode) { 4764 bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS); 4765 bp->flags |= BNXT_FLAG_RX_PAGE_MODE; 4766 4767 if (bp->xdp_prog->aux->xdp_has_frags) 4768 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); 4769 else 4770 dev->max_mtu = 4771 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); 4772 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { 4773 bp->flags |= BNXT_FLAG_JUMBO; 4774 bp->rx_skb_func = bnxt_rx_multi_page_skb; 4775 } else { 4776 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 4777 bp->rx_skb_func = bnxt_rx_page_skb; 4778 } 4779 bp->rx_dir = DMA_BIDIRECTIONAL; 4780 } else { 4781 dev->max_mtu = bp->max_mtu; 4782 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; 4783 bp->rx_dir = DMA_FROM_DEVICE; 4784 bp->rx_skb_func = bnxt_rx_skb; 4785 } 4786 } 4787 4788 void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) 4789 { 4790 __bnxt_set_rx_skb_mode(bp, page_mode); 4791 4792 if (!page_mode) { 4793 int rx, tx; 4794 4795 bnxt_get_max_rings(bp, &rx, &tx, true); 4796 if (rx > 1) { 4797 bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; 4798 bp->dev->hw_features |= NETIF_F_LRO; 4799 } 4800 } 4801 4802 /* Update LRO and GRO_HW availability */ 4803 netdev_update_features(bp->dev); 4804 } 4805 4806 static void bnxt_free_vnic_attributes(struct bnxt *bp) 4807 { 4808 int i; 4809 struct bnxt_vnic_info *vnic; 4810 struct pci_dev *pdev = bp->pdev; 4811 4812 if (!bp->vnic_info) 4813 return; 4814 4815 for (i = 0; i < bp->nr_vnics; i++) { 4816 vnic = &bp->vnic_info[i]; 4817 4818 kfree(vnic->fw_grp_ids); 4819 vnic->fw_grp_ids = NULL; 4820 4821 kfree(vnic->uc_list); 4822 vnic->uc_list = NULL; 4823 4824 if (vnic->mc_list) { 4825 dma_free_coherent(&pdev->dev, vnic->mc_list_size, 4826 vnic->mc_list, vnic->mc_list_mapping); 4827 vnic->mc_list = NULL; 4828 } 4829 4830 if (vnic->rss_table) { 4831 dma_free_coherent(&pdev->dev, vnic->rss_table_size, 4832 vnic->rss_table, 4833 vnic->rss_table_dma_addr); 4834 vnic->rss_table = NULL; 4835 } 4836 4837 vnic->rss_hash_key = NULL; 4838 vnic->flags = 0; 4839 } 4840 } 4841 4842 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) 4843 { 4844 int i, rc = 0, size; 4845 struct bnxt_vnic_info *vnic; 4846 struct pci_dev *pdev = bp->pdev; 4847 int max_rings; 4848 4849 for (i = 0; i < bp->nr_vnics; i++) { 4850 vnic = &bp->vnic_info[i]; 4851 4852 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { 4853 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; 4854 4855 if (mem_size > 0) { 4856 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); 4857 if (!vnic->uc_list) { 4858 rc = -ENOMEM; 4859 goto out; 4860 } 4861 } 4862 } 4863 4864 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { 4865 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; 4866 vnic->mc_list = 4867 dma_alloc_coherent(&pdev->dev, 4868 vnic->mc_list_size, 4869 &vnic->mc_list_mapping, 4870 GFP_KERNEL); 4871 if (!vnic->mc_list) { 4872 rc = -ENOMEM; 4873 goto out; 4874 } 4875 } 4876 4877 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4878 goto vnic_skip_grps; 4879 4880 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 4881 max_rings = bp->rx_nr_rings; 4882 else 4883 max_rings = 1; 4884 4885 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); 4886 if (!vnic->fw_grp_ids) { 4887 rc = -ENOMEM; 4888 goto out; 4889 } 4890 vnic_skip_grps: 4891 if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && 4892 !(vnic->flags & BNXT_VNIC_RSS_FLAG)) 4893 continue; 4894 4895 /* Allocate rss table and hash key */ 4896 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); 4897 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 4898 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); 4899 4900 vnic->rss_table_size = size + HW_HASH_KEY_SIZE; 4901 vnic->rss_table = dma_alloc_coherent(&pdev->dev, 4902 vnic->rss_table_size, 4903 &vnic->rss_table_dma_addr, 4904 GFP_KERNEL); 4905 if (!vnic->rss_table) { 4906 rc = -ENOMEM; 4907 goto out; 4908 } 4909 4910 vnic->rss_hash_key = ((void *)vnic->rss_table) + size; 4911 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; 4912 } 4913 return 0; 4914 4915 out: 4916 return rc; 4917 } 4918 4919 static void bnxt_free_hwrm_resources(struct bnxt *bp) 4920 { 4921 struct bnxt_hwrm_wait_token *token; 4922 4923 dma_pool_destroy(bp->hwrm_dma_pool); 4924 bp->hwrm_dma_pool = NULL; 4925 4926 rcu_read_lock(); 4927 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) 4928 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); 4929 rcu_read_unlock(); 4930 } 4931 4932 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) 4933 { 4934 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, 4935 BNXT_HWRM_DMA_SIZE, 4936 BNXT_HWRM_DMA_ALIGN, 0); 4937 if (!bp->hwrm_dma_pool) 4938 return -ENOMEM; 4939 4940 INIT_HLIST_HEAD(&bp->hwrm_pending_list); 4941 4942 return 0; 4943 } 4944 4945 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) 4946 { 4947 kfree(stats->hw_masks); 4948 stats->hw_masks = NULL; 4949 kfree(stats->sw_stats); 4950 stats->sw_stats = NULL; 4951 if (stats->hw_stats) { 4952 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, 4953 stats->hw_stats_map); 4954 stats->hw_stats = NULL; 4955 } 4956 } 4957 4958 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, 4959 bool alloc_masks) 4960 { 4961 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, 4962 &stats->hw_stats_map, GFP_KERNEL); 4963 if (!stats->hw_stats) 4964 return -ENOMEM; 4965 4966 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); 4967 if (!stats->sw_stats) 4968 goto stats_mem_err; 4969 4970 if (alloc_masks) { 4971 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); 4972 if (!stats->hw_masks) 4973 goto stats_mem_err; 4974 } 4975 return 0; 4976 4977 stats_mem_err: 4978 bnxt_free_stats_mem(bp, stats); 4979 return -ENOMEM; 4980 } 4981 4982 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) 4983 { 4984 int i; 4985 4986 for (i = 0; i < count; i++) 4987 mask_arr[i] = mask; 4988 } 4989 4990 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) 4991 { 4992 int i; 4993 4994 for (i = 0; i < count; i++) 4995 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); 4996 } 4997 4998 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, 4999 struct bnxt_stats_mem *stats) 5000 { 5001 struct hwrm_func_qstats_ext_output *resp; 5002 struct hwrm_func_qstats_ext_input *req; 5003 __le64 *hw_masks; 5004 int rc; 5005 5006 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || 5007 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5008 return -EOPNOTSUPP; 5009 5010 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); 5011 if (rc) 5012 return rc; 5013 5014 req->fid = cpu_to_le16(0xffff); 5015 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 5016 5017 resp = hwrm_req_hold(bp, req); 5018 rc = hwrm_req_send(bp, req); 5019 if (!rc) { 5020 hw_masks = &resp->rx_ucast_pkts; 5021 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); 5022 } 5023 hwrm_req_drop(bp, req); 5024 return rc; 5025 } 5026 5027 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); 5028 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); 5029 5030 static void bnxt_init_stats(struct bnxt *bp) 5031 { 5032 struct bnxt_napi *bnapi = bp->bnapi[0]; 5033 struct bnxt_cp_ring_info *cpr; 5034 struct bnxt_stats_mem *stats; 5035 __le64 *rx_stats, *tx_stats; 5036 int rc, rx_count, tx_count; 5037 u64 *rx_masks, *tx_masks; 5038 u64 mask; 5039 u8 flags; 5040 5041 cpr = &bnapi->cp_ring; 5042 stats = &cpr->stats; 5043 rc = bnxt_hwrm_func_qstat_ext(bp, stats); 5044 if (rc) { 5045 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5046 mask = (1ULL << 48) - 1; 5047 else 5048 mask = -1ULL; 5049 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); 5050 } 5051 if (bp->flags & BNXT_FLAG_PORT_STATS) { 5052 stats = &bp->port_stats; 5053 rx_stats = stats->hw_stats; 5054 rx_masks = stats->hw_masks; 5055 rx_count = sizeof(struct rx_port_stats) / 8; 5056 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5057 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 5058 tx_count = sizeof(struct tx_port_stats) / 8; 5059 5060 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; 5061 rc = bnxt_hwrm_port_qstats(bp, flags); 5062 if (rc) { 5063 mask = (1ULL << 40) - 1; 5064 5065 bnxt_fill_masks(rx_masks, mask, rx_count); 5066 bnxt_fill_masks(tx_masks, mask, tx_count); 5067 } else { 5068 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 5069 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); 5070 bnxt_hwrm_port_qstats(bp, 0); 5071 } 5072 } 5073 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 5074 stats = &bp->rx_port_stats_ext; 5075 rx_stats = stats->hw_stats; 5076 rx_masks = stats->hw_masks; 5077 rx_count = sizeof(struct rx_port_stats_ext) / 8; 5078 stats = &bp->tx_port_stats_ext; 5079 tx_stats = stats->hw_stats; 5080 tx_masks = stats->hw_masks; 5081 tx_count = sizeof(struct tx_port_stats_ext) / 8; 5082 5083 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; 5084 rc = bnxt_hwrm_port_qstats_ext(bp, flags); 5085 if (rc) { 5086 mask = (1ULL << 40) - 1; 5087 5088 bnxt_fill_masks(rx_masks, mask, rx_count); 5089 if (tx_stats) 5090 bnxt_fill_masks(tx_masks, mask, tx_count); 5091 } else { 5092 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); 5093 if (tx_stats) 5094 bnxt_copy_hw_masks(tx_masks, tx_stats, 5095 tx_count); 5096 bnxt_hwrm_port_qstats_ext(bp, 0); 5097 } 5098 } 5099 } 5100 5101 static void bnxt_free_port_stats(struct bnxt *bp) 5102 { 5103 bp->flags &= ~BNXT_FLAG_PORT_STATS; 5104 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; 5105 5106 bnxt_free_stats_mem(bp, &bp->port_stats); 5107 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); 5108 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); 5109 } 5110 5111 static void bnxt_free_ring_stats(struct bnxt *bp) 5112 { 5113 int i; 5114 5115 if (!bp->bnapi) 5116 return; 5117 5118 for (i = 0; i < bp->cp_nr_rings; i++) { 5119 struct bnxt_napi *bnapi = bp->bnapi[i]; 5120 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5121 5122 bnxt_free_stats_mem(bp, &cpr->stats); 5123 5124 kfree(cpr->sw_stats); 5125 cpr->sw_stats = NULL; 5126 } 5127 } 5128 5129 static int bnxt_alloc_stats(struct bnxt *bp) 5130 { 5131 u32 size, i; 5132 int rc; 5133 5134 size = bp->hw_ring_stats_size; 5135 5136 for (i = 0; i < bp->cp_nr_rings; i++) { 5137 struct bnxt_napi *bnapi = bp->bnapi[i]; 5138 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5139 5140 cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL); 5141 if (!cpr->sw_stats) 5142 return -ENOMEM; 5143 5144 cpr->stats.len = size; 5145 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); 5146 if (rc) 5147 return rc; 5148 5149 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 5150 } 5151 5152 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) 5153 return 0; 5154 5155 if (bp->port_stats.hw_stats) 5156 goto alloc_ext_stats; 5157 5158 bp->port_stats.len = BNXT_PORT_STATS_SIZE; 5159 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); 5160 if (rc) 5161 return rc; 5162 5163 bp->flags |= BNXT_FLAG_PORT_STATS; 5164 5165 alloc_ext_stats: 5166 /* Display extended statistics only if FW supports it */ 5167 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) 5168 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) 5169 return 0; 5170 5171 if (bp->rx_port_stats_ext.hw_stats) 5172 goto alloc_tx_ext_stats; 5173 5174 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); 5175 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); 5176 /* Extended stats are optional */ 5177 if (rc) 5178 return 0; 5179 5180 alloc_tx_ext_stats: 5181 if (bp->tx_port_stats_ext.hw_stats) 5182 return 0; 5183 5184 if (bp->hwrm_spec_code >= 0x10902 || 5185 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { 5186 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); 5187 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); 5188 /* Extended stats are optional */ 5189 if (rc) 5190 return 0; 5191 } 5192 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 5193 return 0; 5194 } 5195 5196 static void bnxt_clear_ring_indices(struct bnxt *bp) 5197 { 5198 int i, j; 5199 5200 if (!bp->bnapi) 5201 return; 5202 5203 for (i = 0; i < bp->cp_nr_rings; i++) { 5204 struct bnxt_napi *bnapi = bp->bnapi[i]; 5205 struct bnxt_cp_ring_info *cpr; 5206 struct bnxt_rx_ring_info *rxr; 5207 struct bnxt_tx_ring_info *txr; 5208 5209 if (!bnapi) 5210 continue; 5211 5212 cpr = &bnapi->cp_ring; 5213 cpr->cp_raw_cons = 0; 5214 5215 bnxt_for_each_napi_tx(j, bnapi, txr) { 5216 txr->tx_prod = 0; 5217 txr->tx_cons = 0; 5218 txr->tx_hw_cons = 0; 5219 } 5220 5221 rxr = bnapi->rx_ring; 5222 if (rxr) { 5223 rxr->rx_prod = 0; 5224 rxr->rx_agg_prod = 0; 5225 rxr->rx_sw_agg_prod = 0; 5226 rxr->rx_next_cons = 0; 5227 } 5228 bnapi->events = 0; 5229 } 5230 } 5231 5232 void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5233 { 5234 u8 type = fltr->type, flags = fltr->flags; 5235 5236 INIT_LIST_HEAD(&fltr->list); 5237 if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) || 5238 (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING)) 5239 list_add_tail(&fltr->list, &bp->usr_fltr_list); 5240 } 5241 5242 void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5243 { 5244 if (!list_empty(&fltr->list)) 5245 list_del_init(&fltr->list); 5246 } 5247 5248 static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all) 5249 { 5250 struct bnxt_filter_base *usr_fltr, *tmp; 5251 5252 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 5253 if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2) 5254 continue; 5255 bnxt_del_one_usr_fltr(bp, usr_fltr); 5256 } 5257 } 5258 5259 static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 5260 { 5261 hlist_del(&fltr->hash); 5262 bnxt_del_one_usr_fltr(bp, fltr); 5263 if (fltr->flags) { 5264 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); 5265 bp->ntp_fltr_count--; 5266 } 5267 kfree(fltr); 5268 } 5269 5270 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) 5271 { 5272 int i; 5273 5274 netdev_assert_locked(bp->dev); 5275 5276 /* Under netdev instance lock and all our NAPIs have been disabled. 5277 * It's safe to delete the hash table. 5278 */ 5279 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 5280 struct hlist_head *head; 5281 struct hlist_node *tmp; 5282 struct bnxt_ntuple_filter *fltr; 5283 5284 head = &bp->ntp_fltr_hash_tbl[i]; 5285 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 5286 bnxt_del_l2_filter(bp, fltr->l2_fltr); 5287 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 5288 !list_empty(&fltr->base.list))) 5289 continue; 5290 bnxt_del_fltr(bp, &fltr->base); 5291 } 5292 } 5293 if (!all) 5294 return; 5295 5296 bitmap_free(bp->ntp_fltr_bmap); 5297 bp->ntp_fltr_bmap = NULL; 5298 bp->ntp_fltr_count = 0; 5299 } 5300 5301 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) 5302 { 5303 int i, rc = 0; 5304 5305 if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) 5306 return 0; 5307 5308 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) 5309 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); 5310 5311 bp->ntp_fltr_count = 0; 5312 bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL); 5313 5314 if (!bp->ntp_fltr_bmap) 5315 rc = -ENOMEM; 5316 5317 return rc; 5318 } 5319 5320 static void bnxt_free_l2_filters(struct bnxt *bp, bool all) 5321 { 5322 int i; 5323 5324 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { 5325 struct hlist_head *head; 5326 struct hlist_node *tmp; 5327 struct bnxt_l2_filter *fltr; 5328 5329 head = &bp->l2_fltr_hash_tbl[i]; 5330 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 5331 if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || 5332 !list_empty(&fltr->base.list))) 5333 continue; 5334 bnxt_del_fltr(bp, &fltr->base); 5335 } 5336 } 5337 } 5338 5339 static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) 5340 { 5341 int i; 5342 5343 for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) 5344 INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); 5345 get_random_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); 5346 } 5347 5348 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) 5349 { 5350 bnxt_free_vnic_attributes(bp); 5351 bnxt_free_tx_rings(bp); 5352 bnxt_free_rx_rings(bp); 5353 bnxt_free_cp_rings(bp); 5354 bnxt_free_all_cp_arrays(bp); 5355 bnxt_free_ntp_fltrs(bp, false); 5356 bnxt_free_l2_filters(bp, false); 5357 if (irq_re_init) { 5358 bnxt_free_ring_stats(bp); 5359 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || 5360 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 5361 bnxt_free_port_stats(bp); 5362 bnxt_free_ring_grps(bp); 5363 bnxt_free_vnics(bp); 5364 kfree(bp->tx_ring_map); 5365 bp->tx_ring_map = NULL; 5366 kfree(bp->tx_ring); 5367 bp->tx_ring = NULL; 5368 kfree(bp->rx_ring); 5369 bp->rx_ring = NULL; 5370 kfree(bp->bnapi); 5371 bp->bnapi = NULL; 5372 } else { 5373 bnxt_clear_ring_indices(bp); 5374 } 5375 } 5376 5377 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) 5378 { 5379 int i, j, rc, size, arr_size; 5380 void *bnapi; 5381 5382 if (irq_re_init) { 5383 /* Allocate bnapi mem pointer array and mem block for 5384 * all queues 5385 */ 5386 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * 5387 bp->cp_nr_rings); 5388 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); 5389 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); 5390 if (!bnapi) 5391 return -ENOMEM; 5392 5393 bp->bnapi = bnapi; 5394 bnapi += arr_size; 5395 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { 5396 bp->bnapi[i] = bnapi; 5397 bp->bnapi[i]->index = i; 5398 bp->bnapi[i]->bp = bp; 5399 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5400 struct bnxt_cp_ring_info *cpr = 5401 &bp->bnapi[i]->cp_ring; 5402 5403 cpr->cp_ring_struct.ring_mem.flags = 5404 BNXT_RMEM_RING_PTE_FLAG; 5405 } 5406 } 5407 5408 bp->rx_ring = kcalloc(bp->rx_nr_rings, 5409 sizeof(struct bnxt_rx_ring_info), 5410 GFP_KERNEL); 5411 if (!bp->rx_ring) 5412 return -ENOMEM; 5413 5414 for (i = 0; i < bp->rx_nr_rings; i++) { 5415 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5416 5417 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5418 rxr->rx_ring_struct.ring_mem.flags = 5419 BNXT_RMEM_RING_PTE_FLAG; 5420 rxr->rx_agg_ring_struct.ring_mem.flags = 5421 BNXT_RMEM_RING_PTE_FLAG; 5422 } else { 5423 rxr->rx_cpr = &bp->bnapi[i]->cp_ring; 5424 } 5425 rxr->bnapi = bp->bnapi[i]; 5426 bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; 5427 } 5428 5429 bp->tx_ring = kcalloc(bp->tx_nr_rings, 5430 sizeof(struct bnxt_tx_ring_info), 5431 GFP_KERNEL); 5432 if (!bp->tx_ring) 5433 return -ENOMEM; 5434 5435 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), 5436 GFP_KERNEL); 5437 5438 if (!bp->tx_ring_map) 5439 return -ENOMEM; 5440 5441 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 5442 j = 0; 5443 else 5444 j = bp->rx_nr_rings; 5445 5446 for (i = 0; i < bp->tx_nr_rings; i++) { 5447 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 5448 struct bnxt_napi *bnapi2; 5449 5450 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5451 txr->tx_ring_struct.ring_mem.flags = 5452 BNXT_RMEM_RING_PTE_FLAG; 5453 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; 5454 if (i >= bp->tx_nr_rings_xdp) { 5455 int k = j + BNXT_RING_TO_TC_OFF(bp, i); 5456 5457 bnapi2 = bp->bnapi[k]; 5458 txr->txq_index = i - bp->tx_nr_rings_xdp; 5459 txr->tx_napi_idx = 5460 BNXT_RING_TO_TC(bp, txr->txq_index); 5461 bnapi2->tx_ring[txr->tx_napi_idx] = txr; 5462 bnapi2->tx_int = bnxt_tx_int; 5463 } else { 5464 bnapi2 = bp->bnapi[j]; 5465 bnapi2->flags |= BNXT_NAPI_FLAG_XDP; 5466 bnapi2->tx_ring[0] = txr; 5467 bnapi2->tx_int = bnxt_tx_int_xdp; 5468 j++; 5469 } 5470 txr->bnapi = bnapi2; 5471 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 5472 txr->tx_cpr = &bnapi2->cp_ring; 5473 } 5474 5475 rc = bnxt_alloc_stats(bp); 5476 if (rc) 5477 goto alloc_mem_err; 5478 bnxt_init_stats(bp); 5479 5480 rc = bnxt_alloc_ntp_fltrs(bp); 5481 if (rc) 5482 goto alloc_mem_err; 5483 5484 rc = bnxt_alloc_vnics(bp); 5485 if (rc) 5486 goto alloc_mem_err; 5487 } 5488 5489 rc = bnxt_alloc_all_cp_arrays(bp); 5490 if (rc) 5491 goto alloc_mem_err; 5492 5493 bnxt_init_ring_struct(bp); 5494 5495 rc = bnxt_alloc_rx_rings(bp); 5496 if (rc) 5497 goto alloc_mem_err; 5498 5499 rc = bnxt_alloc_tx_rings(bp); 5500 if (rc) 5501 goto alloc_mem_err; 5502 5503 rc = bnxt_alloc_cp_rings(bp); 5504 if (rc) 5505 goto alloc_mem_err; 5506 5507 bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG | 5508 BNXT_VNIC_MCAST_FLAG | 5509 BNXT_VNIC_UCAST_FLAG; 5510 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) 5511 bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= 5512 BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG; 5513 5514 rc = bnxt_alloc_vnic_attributes(bp); 5515 if (rc) 5516 goto alloc_mem_err; 5517 return 0; 5518 5519 alloc_mem_err: 5520 bnxt_free_mem(bp, true); 5521 return rc; 5522 } 5523 5524 static void bnxt_disable_int(struct bnxt *bp) 5525 { 5526 int i; 5527 5528 if (!bp->bnapi) 5529 return; 5530 5531 for (i = 0; i < bp->cp_nr_rings; i++) { 5532 struct bnxt_napi *bnapi = bp->bnapi[i]; 5533 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5534 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 5535 5536 if (ring->fw_ring_id != INVALID_HW_RING_ID) 5537 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 5538 } 5539 } 5540 5541 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) 5542 { 5543 struct bnxt_napi *bnapi = bp->bnapi[n]; 5544 struct bnxt_cp_ring_info *cpr; 5545 5546 cpr = &bnapi->cp_ring; 5547 return cpr->cp_ring_struct.map_idx; 5548 } 5549 5550 static void bnxt_disable_int_sync(struct bnxt *bp) 5551 { 5552 int i; 5553 5554 if (!bp->irq_tbl) 5555 return; 5556 5557 atomic_inc(&bp->intr_sem); 5558 5559 bnxt_disable_int(bp); 5560 for (i = 0; i < bp->cp_nr_rings; i++) { 5561 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 5562 5563 synchronize_irq(bp->irq_tbl[map_idx].vector); 5564 } 5565 } 5566 5567 static void bnxt_enable_int(struct bnxt *bp) 5568 { 5569 int i; 5570 5571 atomic_set(&bp->intr_sem, 0); 5572 for (i = 0; i < bp->cp_nr_rings; i++) { 5573 struct bnxt_napi *bnapi = bp->bnapi[i]; 5574 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5575 5576 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 5577 } 5578 } 5579 5580 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, 5581 bool async_only) 5582 { 5583 DECLARE_BITMAP(async_events_bmap, 256); 5584 u32 *events = (u32 *)async_events_bmap; 5585 struct hwrm_func_drv_rgtr_output *resp; 5586 struct hwrm_func_drv_rgtr_input *req; 5587 u32 flags; 5588 int rc, i; 5589 5590 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); 5591 if (rc) 5592 return rc; 5593 5594 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | 5595 FUNC_DRV_RGTR_REQ_ENABLES_VER | 5596 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5597 5598 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); 5599 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; 5600 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) 5601 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; 5602 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 5603 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | 5604 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; 5605 if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2) 5606 flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT; 5607 req->flags = cpu_to_le32(flags); 5608 req->ver_maj_8b = DRV_VER_MAJ; 5609 req->ver_min_8b = DRV_VER_MIN; 5610 req->ver_upd_8b = DRV_VER_UPD; 5611 req->ver_maj = cpu_to_le16(DRV_VER_MAJ); 5612 req->ver_min = cpu_to_le16(DRV_VER_MIN); 5613 req->ver_upd = cpu_to_le16(DRV_VER_UPD); 5614 5615 if (BNXT_PF(bp)) { 5616 u32 data[8]; 5617 int i; 5618 5619 memset(data, 0, sizeof(data)); 5620 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 5621 u16 cmd = bnxt_vf_req_snif[i]; 5622 unsigned int bit, idx; 5623 5624 idx = cmd / 32; 5625 bit = cmd % 32; 5626 data[idx] |= 1 << bit; 5627 } 5628 5629 for (i = 0; i < 8; i++) 5630 req->vf_req_fwd[i] = cpu_to_le32(data[i]); 5631 5632 req->enables |= 5633 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); 5634 } 5635 5636 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) 5637 req->flags |= cpu_to_le32( 5638 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); 5639 5640 memset(async_events_bmap, 0, sizeof(async_events_bmap)); 5641 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { 5642 u16 event_id = bnxt_async_events_arr[i]; 5643 5644 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && 5645 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 5646 continue; 5647 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && 5648 !bp->ptp_cfg) 5649 continue; 5650 __set_bit(bnxt_async_events_arr[i], async_events_bmap); 5651 } 5652 if (bmap && bmap_size) { 5653 for (i = 0; i < bmap_size; i++) { 5654 if (test_bit(i, bmap)) 5655 __set_bit(i, async_events_bmap); 5656 } 5657 } 5658 for (i = 0; i < 8; i++) 5659 req->async_event_fwd[i] |= cpu_to_le32(events[i]); 5660 5661 if (async_only) 5662 req->enables = 5663 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); 5664 5665 resp = hwrm_req_hold(bp, req); 5666 rc = hwrm_req_send(bp, req); 5667 if (!rc) { 5668 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); 5669 if (resp->flags & 5670 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) 5671 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; 5672 } 5673 hwrm_req_drop(bp, req); 5674 return rc; 5675 } 5676 5677 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) 5678 { 5679 struct hwrm_func_drv_unrgtr_input *req; 5680 int rc; 5681 5682 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) 5683 return 0; 5684 5685 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); 5686 if (rc) 5687 return rc; 5688 return hwrm_req_send(bp, req); 5689 } 5690 5691 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa); 5692 5693 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) 5694 { 5695 struct hwrm_tunnel_dst_port_free_input *req; 5696 int rc; 5697 5698 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN && 5699 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID) 5700 return 0; 5701 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE && 5702 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID) 5703 return 0; 5704 5705 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); 5706 if (rc) 5707 return rc; 5708 5709 req->tunnel_type = tunnel_type; 5710 5711 switch (tunnel_type) { 5712 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: 5713 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); 5714 bp->vxlan_port = 0; 5715 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 5716 break; 5717 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: 5718 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); 5719 bp->nge_port = 0; 5720 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 5721 break; 5722 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE: 5723 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); 5724 bp->vxlan_gpe_port = 0; 5725 bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; 5726 break; 5727 default: 5728 break; 5729 } 5730 5731 rc = hwrm_req_send(bp, req); 5732 if (rc) 5733 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", 5734 rc); 5735 if (bp->flags & BNXT_FLAG_TPA) 5736 bnxt_set_tpa(bp, true); 5737 return rc; 5738 } 5739 5740 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, 5741 u8 tunnel_type) 5742 { 5743 struct hwrm_tunnel_dst_port_alloc_output *resp; 5744 struct hwrm_tunnel_dst_port_alloc_input *req; 5745 int rc; 5746 5747 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); 5748 if (rc) 5749 return rc; 5750 5751 req->tunnel_type = tunnel_type; 5752 req->tunnel_dst_port_val = port; 5753 5754 resp = hwrm_req_hold(bp, req); 5755 rc = hwrm_req_send(bp, req); 5756 if (rc) { 5757 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", 5758 rc); 5759 goto err_out; 5760 } 5761 5762 switch (tunnel_type) { 5763 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: 5764 bp->vxlan_port = port; 5765 bp->vxlan_fw_dst_port_id = 5766 le16_to_cpu(resp->tunnel_dst_port_id); 5767 break; 5768 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: 5769 bp->nge_port = port; 5770 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); 5771 break; 5772 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE: 5773 bp->vxlan_gpe_port = port; 5774 bp->vxlan_gpe_fw_dst_port_id = 5775 le16_to_cpu(resp->tunnel_dst_port_id); 5776 break; 5777 default: 5778 break; 5779 } 5780 if (bp->flags & BNXT_FLAG_TPA) 5781 bnxt_set_tpa(bp, true); 5782 5783 err_out: 5784 hwrm_req_drop(bp, req); 5785 return rc; 5786 } 5787 5788 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) 5789 { 5790 struct hwrm_cfa_l2_set_rx_mask_input *req; 5791 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 5792 int rc; 5793 5794 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); 5795 if (rc) 5796 return rc; 5797 5798 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 5799 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { 5800 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); 5801 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); 5802 } 5803 req->mask = cpu_to_le32(vnic->rx_mask); 5804 return hwrm_req_send_silent(bp, req); 5805 } 5806 5807 void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) 5808 { 5809 if (!atomic_dec_and_test(&fltr->refcnt)) 5810 return; 5811 spin_lock_bh(&bp->ntp_fltr_lock); 5812 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 5813 spin_unlock_bh(&bp->ntp_fltr_lock); 5814 return; 5815 } 5816 hlist_del_rcu(&fltr->base.hash); 5817 bnxt_del_one_usr_fltr(bp, &fltr->base); 5818 if (fltr->base.flags) { 5819 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 5820 bp->ntp_fltr_count--; 5821 } 5822 spin_unlock_bh(&bp->ntp_fltr_lock); 5823 kfree_rcu(fltr, base.rcu); 5824 } 5825 5826 static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, 5827 struct bnxt_l2_key *key, 5828 u32 idx) 5829 { 5830 struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; 5831 struct bnxt_l2_filter *fltr; 5832 5833 hlist_for_each_entry_rcu(fltr, head, base.hash) { 5834 struct bnxt_l2_key *l2_key = &fltr->l2_key; 5835 5836 if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && 5837 l2_key->vlan == key->vlan) 5838 return fltr; 5839 } 5840 return NULL; 5841 } 5842 5843 static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, 5844 struct bnxt_l2_key *key, 5845 u32 idx) 5846 { 5847 struct bnxt_l2_filter *fltr = NULL; 5848 5849 rcu_read_lock(); 5850 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 5851 if (fltr) 5852 atomic_inc(&fltr->refcnt); 5853 rcu_read_unlock(); 5854 return fltr; 5855 } 5856 5857 #define BNXT_IPV4_4TUPLE(bp, fkeys) \ 5858 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5859 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \ 5860 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5861 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)) 5862 5863 #define BNXT_IPV6_4TUPLE(bp, fkeys) \ 5864 (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ 5865 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \ 5866 ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ 5867 (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)) 5868 5869 static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys) 5870 { 5871 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5872 if (BNXT_IPV4_4TUPLE(bp, fkeys)) 5873 return sizeof(fkeys->addrs.v4addrs) + 5874 sizeof(fkeys->ports); 5875 5876 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) 5877 return sizeof(fkeys->addrs.v4addrs); 5878 } 5879 5880 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 5881 if (BNXT_IPV6_4TUPLE(bp, fkeys)) 5882 return sizeof(fkeys->addrs.v6addrs) + 5883 sizeof(fkeys->ports); 5884 5885 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) 5886 return sizeof(fkeys->addrs.v6addrs); 5887 } 5888 5889 return 0; 5890 } 5891 5892 static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys, 5893 const unsigned char *key) 5894 { 5895 u64 prefix = bp->toeplitz_prefix, hash = 0; 5896 struct bnxt_ipv4_tuple tuple4; 5897 struct bnxt_ipv6_tuple tuple6; 5898 int i, j, len = 0; 5899 u8 *four_tuple; 5900 5901 len = bnxt_get_rss_flow_tuple_len(bp, fkeys); 5902 if (!len) 5903 return 0; 5904 5905 if (fkeys->basic.n_proto == htons(ETH_P_IP)) { 5906 tuple4.v4addrs = fkeys->addrs.v4addrs; 5907 tuple4.ports = fkeys->ports; 5908 four_tuple = (unsigned char *)&tuple4; 5909 } else { 5910 tuple6.v6addrs = fkeys->addrs.v6addrs; 5911 tuple6.ports = fkeys->ports; 5912 four_tuple = (unsigned char *)&tuple6; 5913 } 5914 5915 for (i = 0, j = 8; i < len; i++, j++) { 5916 u8 byte = four_tuple[i]; 5917 int bit; 5918 5919 for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { 5920 if (byte & 0x80) 5921 hash ^= prefix; 5922 } 5923 prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; 5924 } 5925 5926 /* The valid part of the hash is in the upper 32 bits. */ 5927 return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK; 5928 } 5929 5930 #ifdef CONFIG_RFS_ACCEL 5931 static struct bnxt_l2_filter * 5932 bnxt_lookup_l2_filter_from_key(struct bnxt *bp, struct bnxt_l2_key *key) 5933 { 5934 struct bnxt_l2_filter *fltr; 5935 u32 idx; 5936 5937 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5938 BNXT_L2_FLTR_HASH_MASK; 5939 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5940 return fltr; 5941 } 5942 #endif 5943 5944 static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, 5945 struct bnxt_l2_key *key, u32 idx) 5946 { 5947 struct hlist_head *head; 5948 5949 ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); 5950 fltr->l2_key.vlan = key->vlan; 5951 fltr->base.type = BNXT_FLTR_TYPE_L2; 5952 if (fltr->base.flags) { 5953 int bit_id; 5954 5955 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, 5956 bp->max_fltr, 0); 5957 if (bit_id < 0) 5958 return -ENOMEM; 5959 fltr->base.sw_id = (u16)bit_id; 5960 bp->ntp_fltr_count++; 5961 } 5962 head = &bp->l2_fltr_hash_tbl[idx]; 5963 hlist_add_head_rcu(&fltr->base.hash, head); 5964 bnxt_insert_usr_fltr(bp, &fltr->base); 5965 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 5966 atomic_set(&fltr->refcnt, 1); 5967 return 0; 5968 } 5969 5970 static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, 5971 struct bnxt_l2_key *key, 5972 gfp_t gfp) 5973 { 5974 struct bnxt_l2_filter *fltr; 5975 u32 idx; 5976 int rc; 5977 5978 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 5979 BNXT_L2_FLTR_HASH_MASK; 5980 fltr = bnxt_lookup_l2_filter(bp, key, idx); 5981 if (fltr) 5982 return fltr; 5983 5984 fltr = kzalloc(sizeof(*fltr), gfp); 5985 if (!fltr) 5986 return ERR_PTR(-ENOMEM); 5987 spin_lock_bh(&bp->ntp_fltr_lock); 5988 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 5989 spin_unlock_bh(&bp->ntp_fltr_lock); 5990 if (rc) { 5991 bnxt_del_l2_filter(bp, fltr); 5992 fltr = ERR_PTR(rc); 5993 } 5994 return fltr; 5995 } 5996 5997 struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, 5998 struct bnxt_l2_key *key, 5999 u16 flags) 6000 { 6001 struct bnxt_l2_filter *fltr; 6002 u32 idx; 6003 int rc; 6004 6005 idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & 6006 BNXT_L2_FLTR_HASH_MASK; 6007 spin_lock_bh(&bp->ntp_fltr_lock); 6008 fltr = __bnxt_lookup_l2_filter(bp, key, idx); 6009 if (fltr) { 6010 fltr = ERR_PTR(-EEXIST); 6011 goto l2_filter_exit; 6012 } 6013 fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC); 6014 if (!fltr) { 6015 fltr = ERR_PTR(-ENOMEM); 6016 goto l2_filter_exit; 6017 } 6018 fltr->base.flags = flags; 6019 rc = bnxt_init_l2_filter(bp, fltr, key, idx); 6020 if (rc) { 6021 spin_unlock_bh(&bp->ntp_fltr_lock); 6022 bnxt_del_l2_filter(bp, fltr); 6023 return ERR_PTR(rc); 6024 } 6025 6026 l2_filter_exit: 6027 spin_unlock_bh(&bp->ntp_fltr_lock); 6028 return fltr; 6029 } 6030 6031 static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) 6032 { 6033 #ifdef CONFIG_BNXT_SRIOV 6034 struct bnxt_vf_info *vf = &pf->vf[vf_idx]; 6035 6036 return vf->fw_fid; 6037 #else 6038 return INVALID_HW_RING_ID; 6039 #endif 6040 } 6041 6042 int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) 6043 { 6044 struct hwrm_cfa_l2_filter_free_input *req; 6045 u16 target_id = 0xffff; 6046 int rc; 6047 6048 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 6049 struct bnxt_pf_info *pf = &bp->pf; 6050 6051 if (fltr->base.vf_idx >= pf->active_vfs) 6052 return -EINVAL; 6053 6054 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 6055 if (target_id == INVALID_HW_RING_ID) 6056 return -EINVAL; 6057 } 6058 6059 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); 6060 if (rc) 6061 return rc; 6062 6063 req->target_id = cpu_to_le16(target_id); 6064 req->l2_filter_id = fltr->base.filter_id; 6065 return hwrm_req_send(bp, req); 6066 } 6067 6068 int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) 6069 { 6070 struct hwrm_cfa_l2_filter_alloc_output *resp; 6071 struct hwrm_cfa_l2_filter_alloc_input *req; 6072 u16 target_id = 0xffff; 6073 int rc; 6074 6075 if (fltr->base.flags & BNXT_ACT_FUNC_DST) { 6076 struct bnxt_pf_info *pf = &bp->pf; 6077 6078 if (fltr->base.vf_idx >= pf->active_vfs) 6079 return -EINVAL; 6080 6081 target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); 6082 } 6083 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); 6084 if (rc) 6085 return rc; 6086 6087 req->target_id = cpu_to_le16(target_id); 6088 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); 6089 6090 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) 6091 req->flags |= 6092 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); 6093 req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); 6094 req->enables = 6095 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | 6096 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | 6097 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); 6098 ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); 6099 eth_broadcast_addr(req->l2_addr_mask); 6100 6101 if (fltr->l2_key.vlan) { 6102 req->enables |= 6103 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | 6104 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | 6105 CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); 6106 req->num_vlans = 1; 6107 req->l2_ivlan = cpu_to_le16(fltr->l2_key.vlan); 6108 req->l2_ivlan_mask = cpu_to_le16(0xfff); 6109 } 6110 6111 resp = hwrm_req_hold(bp, req); 6112 rc = hwrm_req_send(bp, req); 6113 if (!rc) { 6114 fltr->base.filter_id = resp->l2_filter_id; 6115 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 6116 } 6117 hwrm_req_drop(bp, req); 6118 return rc; 6119 } 6120 6121 int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, 6122 struct bnxt_ntuple_filter *fltr) 6123 { 6124 struct hwrm_cfa_ntuple_filter_free_input *req; 6125 int rc; 6126 6127 set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); 6128 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); 6129 if (rc) 6130 return rc; 6131 6132 req->ntuple_filter_id = fltr->base.filter_id; 6133 return hwrm_req_send(bp, req); 6134 } 6135 6136 #define BNXT_NTP_FLTR_FLAGS \ 6137 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ 6138 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ 6139 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ 6140 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ 6141 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ 6142 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ 6143 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ 6144 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ 6145 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ 6146 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ 6147 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ 6148 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ 6149 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) 6150 6151 #define BNXT_NTP_TUNNEL_FLTR_FLAG \ 6152 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 6153 6154 void bnxt_fill_ipv6_mask(__be32 mask[4]) 6155 { 6156 int i; 6157 6158 for (i = 0; i < 4; i++) 6159 mask[i] = cpu_to_be32(~0); 6160 } 6161 6162 static void 6163 bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, 6164 struct hwrm_cfa_ntuple_filter_alloc_input *req, 6165 struct bnxt_ntuple_filter *fltr) 6166 { 6167 u16 rxq = fltr->base.rxq; 6168 6169 if (fltr->base.flags & BNXT_ACT_RSS_CTX) { 6170 struct ethtool_rxfh_context *ctx; 6171 struct bnxt_rss_ctx *rss_ctx; 6172 struct bnxt_vnic_info *vnic; 6173 6174 ctx = xa_load(&bp->dev->ethtool->rss_ctx, 6175 fltr->base.fw_vnic_id); 6176 if (ctx) { 6177 rss_ctx = ethtool_rxfh_context_priv(ctx); 6178 vnic = &rss_ctx->vnic; 6179 6180 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6181 } 6182 return; 6183 } 6184 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 6185 struct bnxt_vnic_info *vnic; 6186 u32 enables; 6187 6188 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 6189 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6190 enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; 6191 req->enables |= cpu_to_le32(enables); 6192 req->rfs_ring_tbl_idx = cpu_to_le16(rxq); 6193 } else { 6194 u32 flags; 6195 6196 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; 6197 req->flags |= cpu_to_le32(flags); 6198 req->dst_id = cpu_to_le16(rxq); 6199 } 6200 } 6201 6202 int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, 6203 struct bnxt_ntuple_filter *fltr) 6204 { 6205 struct hwrm_cfa_ntuple_filter_alloc_output *resp; 6206 struct hwrm_cfa_ntuple_filter_alloc_input *req; 6207 struct bnxt_flow_masks *masks = &fltr->fmasks; 6208 struct flow_keys *keys = &fltr->fkeys; 6209 struct bnxt_l2_filter *l2_fltr; 6210 struct bnxt_vnic_info *vnic; 6211 int rc; 6212 6213 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); 6214 if (rc) 6215 return rc; 6216 6217 l2_fltr = fltr->l2_fltr; 6218 req->l2_filter_id = l2_fltr->base.filter_id; 6219 6220 if (fltr->base.flags & BNXT_ACT_DROP) { 6221 req->flags = 6222 cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); 6223 } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { 6224 bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); 6225 } else { 6226 vnic = &bp->vnic_info[fltr->base.rxq + 1]; 6227 req->dst_id = cpu_to_le16(vnic->fw_vnic_id); 6228 } 6229 req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); 6230 6231 req->ethertype = htons(ETH_P_IP); 6232 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; 6233 req->ip_protocol = keys->basic.ip_proto; 6234 6235 if (keys->basic.n_proto == htons(ETH_P_IPV6)) { 6236 req->ethertype = htons(ETH_P_IPV6); 6237 req->ip_addr_type = 6238 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; 6239 *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; 6240 *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; 6241 *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; 6242 *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; 6243 } else { 6244 req->src_ipaddr[0] = keys->addrs.v4addrs.src; 6245 req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; 6246 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; 6247 req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; 6248 } 6249 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { 6250 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); 6251 req->tunnel_type = 6252 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; 6253 } 6254 6255 req->src_port = keys->ports.src; 6256 req->src_port_mask = masks->ports.src; 6257 req->dst_port = keys->ports.dst; 6258 req->dst_port_mask = masks->ports.dst; 6259 6260 resp = hwrm_req_hold(bp, req); 6261 rc = hwrm_req_send(bp, req); 6262 if (!rc) 6263 fltr->base.filter_id = resp->ntuple_filter_id; 6264 hwrm_req_drop(bp, req); 6265 return rc; 6266 } 6267 6268 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, 6269 const u8 *mac_addr) 6270 { 6271 struct bnxt_l2_filter *fltr; 6272 struct bnxt_l2_key key; 6273 int rc; 6274 6275 ether_addr_copy(key.dst_mac_addr, mac_addr); 6276 key.vlan = 0; 6277 fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); 6278 if (IS_ERR(fltr)) 6279 return PTR_ERR(fltr); 6280 6281 fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; 6282 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); 6283 if (rc) 6284 bnxt_del_l2_filter(bp, fltr); 6285 else 6286 bp->vnic_info[vnic_id].l2_filters[idx] = fltr; 6287 return rc; 6288 } 6289 6290 static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) 6291 { 6292 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ 6293 6294 /* Any associated ntuple filters will also be cleared by firmware. */ 6295 for (i = 0; i < num_of_vnics; i++) { 6296 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6297 6298 for (j = 0; j < vnic->uc_filter_count; j++) { 6299 struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; 6300 6301 bnxt_hwrm_l2_filter_free(bp, fltr); 6302 bnxt_del_l2_filter(bp, fltr); 6303 } 6304 vnic->uc_filter_count = 0; 6305 } 6306 } 6307 6308 #define BNXT_DFLT_TUNL_TPA_BMAP \ 6309 (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \ 6310 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \ 6311 VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6) 6312 6313 static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, 6314 struct hwrm_vnic_tpa_cfg_input *req) 6315 { 6316 u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP; 6317 6318 if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) 6319 return; 6320 6321 if (bp->vxlan_port) 6322 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN; 6323 if (bp->vxlan_gpe_port) 6324 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE; 6325 if (bp->nge_port) 6326 tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE; 6327 6328 req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN); 6329 req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); 6330 } 6331 6332 int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6333 u32 tpa_flags) 6334 { 6335 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; 6336 struct hwrm_vnic_tpa_cfg_input *req; 6337 int rc; 6338 6339 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) 6340 return 0; 6341 6342 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); 6343 if (rc) 6344 return rc; 6345 6346 if (tpa_flags) { 6347 u16 mss = bp->dev->mtu - 40; 6348 u32 nsegs, n, segs = 0, flags; 6349 6350 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | 6351 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | 6352 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | 6353 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | 6354 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; 6355 if (tpa_flags & BNXT_FLAG_GRO) 6356 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; 6357 6358 req->flags = cpu_to_le32(flags); 6359 6360 req->enables = 6361 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | 6362 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | 6363 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); 6364 6365 /* Number of segs are log2 units, and first packet is not 6366 * included as part of this units. 6367 */ 6368 if (mss <= BNXT_RX_PAGE_SIZE) { 6369 n = BNXT_RX_PAGE_SIZE / mss; 6370 nsegs = (MAX_SKB_FRAGS - 1) * n; 6371 } else { 6372 n = mss / BNXT_RX_PAGE_SIZE; 6373 if (mss & (BNXT_RX_PAGE_SIZE - 1)) 6374 n++; 6375 nsegs = (MAX_SKB_FRAGS - n) / n; 6376 } 6377 6378 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6379 segs = MAX_TPA_SEGS_P5; 6380 max_aggs = bp->max_tpa; 6381 } else { 6382 segs = ilog2(nsegs); 6383 } 6384 req->max_agg_segs = cpu_to_le16(segs); 6385 req->max_aggs = cpu_to_le16(max_aggs); 6386 6387 req->min_agg_len = cpu_to_le32(512); 6388 bnxt_hwrm_vnic_update_tunl_tpa(bp, req); 6389 } 6390 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6391 6392 return hwrm_req_send(bp, req); 6393 } 6394 6395 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) 6396 { 6397 struct bnxt_ring_grp_info *grp_info; 6398 6399 grp_info = &bp->grp_info[ring->grp_idx]; 6400 return grp_info->cp_fw_ring_id; 6401 } 6402 6403 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) 6404 { 6405 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6406 return rxr->rx_cpr->cp_ring_struct.fw_ring_id; 6407 else 6408 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); 6409 } 6410 6411 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) 6412 { 6413 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6414 return txr->tx_cpr->cp_ring_struct.fw_ring_id; 6415 else 6416 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); 6417 } 6418 6419 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) 6420 { 6421 int entries; 6422 6423 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6424 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; 6425 else 6426 entries = HW_HASH_INDEX_SIZE; 6427 6428 bp->rss_indir_tbl_entries = entries; 6429 bp->rss_indir_tbl = 6430 kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); 6431 if (!bp->rss_indir_tbl) 6432 return -ENOMEM; 6433 6434 return 0; 6435 } 6436 6437 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, 6438 struct ethtool_rxfh_context *rss_ctx) 6439 { 6440 u16 max_rings, max_entries, pad, i; 6441 u32 *rss_indir_tbl; 6442 6443 if (!bp->rx_nr_rings) 6444 return; 6445 6446 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6447 max_rings = bp->rx_nr_rings - 1; 6448 else 6449 max_rings = bp->rx_nr_rings; 6450 6451 max_entries = bnxt_get_rxfh_indir_size(bp->dev); 6452 if (rss_ctx) 6453 rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx); 6454 else 6455 rss_indir_tbl = &bp->rss_indir_tbl[0]; 6456 6457 for (i = 0; i < max_entries; i++) 6458 rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); 6459 6460 pad = bp->rss_indir_tbl_entries - max_entries; 6461 if (pad) 6462 memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); 6463 } 6464 6465 static u16 bnxt_get_max_rss_ring(struct bnxt *bp) 6466 { 6467 u32 i, tbl_size, max_ring = 0; 6468 6469 if (!bp->rss_indir_tbl) 6470 return 0; 6471 6472 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6473 for (i = 0; i < tbl_size; i++) 6474 max_ring = max(max_ring, bp->rss_indir_tbl[i]); 6475 return max_ring; 6476 } 6477 6478 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 6479 { 6480 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6481 if (!rx_rings) 6482 return 0; 6483 return bnxt_calc_nr_ring_pages(rx_rings - 1, 6484 BNXT_RSS_TABLE_ENTRIES_P5); 6485 } 6486 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 6487 return 2; 6488 return 1; 6489 } 6490 6491 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6492 { 6493 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); 6494 u16 i, j; 6495 6496 /* Fill the RSS indirection table with ring group ids */ 6497 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { 6498 if (!no_rss) 6499 j = bp->rss_indir_tbl[i]; 6500 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); 6501 } 6502 } 6503 6504 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, 6505 struct bnxt_vnic_info *vnic) 6506 { 6507 __le16 *ring_tbl = vnic->rss_table; 6508 struct bnxt_rx_ring_info *rxr; 6509 u16 tbl_size, i; 6510 6511 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); 6512 6513 for (i = 0; i < tbl_size; i++) { 6514 u16 ring_id, j; 6515 6516 if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) 6517 j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); 6518 else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) 6519 j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; 6520 else 6521 j = bp->rss_indir_tbl[i]; 6522 rxr = &bp->rx_ring[j]; 6523 6524 ring_id = rxr->rx_ring_struct.fw_ring_id; 6525 *ring_tbl++ = cpu_to_le16(ring_id); 6526 ring_id = bnxt_cp_ring_for_rx(bp, rxr); 6527 *ring_tbl++ = cpu_to_le16(ring_id); 6528 } 6529 } 6530 6531 static void 6532 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, 6533 struct bnxt_vnic_info *vnic) 6534 { 6535 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6536 bnxt_fill_hw_rss_tbl_p5(bp, vnic); 6537 if (bp->flags & BNXT_FLAG_CHIP_P7) 6538 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; 6539 } else { 6540 bnxt_fill_hw_rss_tbl(bp, vnic); 6541 } 6542 6543 if (bp->rss_hash_delta) { 6544 req->hash_type = cpu_to_le32(bp->rss_hash_delta); 6545 if (bp->rss_hash_cfg & bp->rss_hash_delta) 6546 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; 6547 else 6548 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; 6549 } else { 6550 req->hash_type = cpu_to_le32(bp->rss_hash_cfg); 6551 } 6552 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; 6553 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); 6554 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); 6555 } 6556 6557 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6558 bool set_rss) 6559 { 6560 struct hwrm_vnic_rss_cfg_input *req; 6561 int rc; 6562 6563 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || 6564 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) 6565 return 0; 6566 6567 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6568 if (rc) 6569 return rc; 6570 6571 if (set_rss) 6572 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6573 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6574 return hwrm_req_send(bp, req); 6575 } 6576 6577 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, 6578 struct bnxt_vnic_info *vnic, bool set_rss) 6579 { 6580 struct hwrm_vnic_rss_cfg_input *req; 6581 dma_addr_t ring_tbl_map; 6582 u32 i, nr_ctxs; 6583 int rc; 6584 6585 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); 6586 if (rc) 6587 return rc; 6588 6589 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6590 if (!set_rss) 6591 return hwrm_req_send(bp, req); 6592 6593 __bnxt_hwrm_vnic_set_rss(bp, req, vnic); 6594 ring_tbl_map = vnic->rss_table_dma_addr; 6595 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 6596 6597 hwrm_req_hold(bp, req); 6598 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { 6599 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); 6600 req->ring_table_pair_index = i; 6601 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); 6602 rc = hwrm_req_send(bp, req); 6603 if (rc) 6604 goto exit; 6605 } 6606 6607 exit: 6608 hwrm_req_drop(bp, req); 6609 return rc; 6610 } 6611 6612 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) 6613 { 6614 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6615 struct hwrm_vnic_rss_qcfg_output *resp; 6616 struct hwrm_vnic_rss_qcfg_input *req; 6617 6618 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) 6619 return; 6620 6621 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6622 /* all contexts configured to same hash_type, zero always exists */ 6623 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6624 resp = hwrm_req_hold(bp, req); 6625 if (!hwrm_req_send(bp, req)) { 6626 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; 6627 bp->rss_hash_delta = 0; 6628 } 6629 hwrm_req_drop(bp, req); 6630 } 6631 6632 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6633 { 6634 u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh; 6635 struct hwrm_vnic_plcmodes_cfg_input *req; 6636 int rc; 6637 6638 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); 6639 if (rc) 6640 return rc; 6641 6642 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); 6643 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); 6644 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); 6645 6646 if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 6647 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | 6648 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); 6649 req->enables |= 6650 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); 6651 req->hds_threshold = cpu_to_le16(hds_thresh); 6652 } 6653 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6654 return hwrm_req_send(bp, req); 6655 } 6656 6657 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, 6658 struct bnxt_vnic_info *vnic, 6659 u16 ctx_idx) 6660 { 6661 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; 6662 6663 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) 6664 return; 6665 6666 req->rss_cos_lb_ctx_id = 6667 cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); 6668 6669 hwrm_req_send(bp, req); 6670 vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; 6671 } 6672 6673 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) 6674 { 6675 int i, j; 6676 6677 for (i = 0; i < bp->nr_vnics; i++) { 6678 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; 6679 6680 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { 6681 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) 6682 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); 6683 } 6684 } 6685 bp->rsscos_nr_ctxs = 0; 6686 } 6687 6688 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, 6689 struct bnxt_vnic_info *vnic, u16 ctx_idx) 6690 { 6691 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; 6692 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; 6693 int rc; 6694 6695 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); 6696 if (rc) 6697 return rc; 6698 6699 resp = hwrm_req_hold(bp, req); 6700 rc = hwrm_req_send(bp, req); 6701 if (!rc) 6702 vnic->fw_rss_cos_lb_ctx[ctx_idx] = 6703 le16_to_cpu(resp->rss_cos_lb_ctx_id); 6704 hwrm_req_drop(bp, req); 6705 6706 return rc; 6707 } 6708 6709 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) 6710 { 6711 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) 6712 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; 6713 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; 6714 } 6715 6716 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) 6717 { 6718 struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 6719 struct hwrm_vnic_cfg_input *req; 6720 unsigned int ring = 0, grp_idx; 6721 u16 def_vlan = 0; 6722 int rc; 6723 6724 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); 6725 if (rc) 6726 return rc; 6727 6728 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 6729 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; 6730 6731 req->default_rx_ring_id = 6732 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); 6733 req->default_cmpl_ring_id = 6734 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); 6735 req->enables = 6736 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | 6737 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); 6738 goto vnic_mru; 6739 } 6740 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); 6741 /* Only RSS support for now TBD: COS & LB */ 6742 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { 6743 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); 6744 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6745 VNIC_CFG_REQ_ENABLES_MRU); 6746 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { 6747 req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); 6748 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | 6749 VNIC_CFG_REQ_ENABLES_MRU); 6750 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); 6751 } else { 6752 req->rss_rule = cpu_to_le16(0xffff); 6753 } 6754 6755 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && 6756 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { 6757 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); 6758 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); 6759 } else { 6760 req->cos_rule = cpu_to_le16(0xffff); 6761 } 6762 6763 if (vnic->flags & BNXT_VNIC_RSS_FLAG) 6764 ring = 0; 6765 else if (vnic->flags & BNXT_VNIC_RFS_FLAG) 6766 ring = vnic->vnic_id - 1; 6767 else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) 6768 ring = bp->rx_nr_rings - 1; 6769 6770 grp_idx = bp->rx_ring[ring].bnapi->index; 6771 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); 6772 req->lb_rule = cpu_to_le16(0xffff); 6773 vnic_mru: 6774 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 6775 req->mru = cpu_to_le16(vnic->mru); 6776 6777 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); 6778 #ifdef CONFIG_BNXT_SRIOV 6779 if (BNXT_VF(bp)) 6780 def_vlan = bp->vf.vlan; 6781 #endif 6782 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 6783 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 6784 if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev)) 6785 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); 6786 6787 return hwrm_req_send(bp, req); 6788 } 6789 6790 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, 6791 struct bnxt_vnic_info *vnic) 6792 { 6793 if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { 6794 struct hwrm_vnic_free_input *req; 6795 6796 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) 6797 return; 6798 6799 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 6800 6801 hwrm_req_send(bp, req); 6802 vnic->fw_vnic_id = INVALID_HW_RING_ID; 6803 } 6804 } 6805 6806 static void bnxt_hwrm_vnic_free(struct bnxt *bp) 6807 { 6808 u16 i; 6809 6810 for (i = 0; i < bp->nr_vnics; i++) 6811 bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); 6812 } 6813 6814 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, 6815 unsigned int start_rx_ring_idx, 6816 unsigned int nr_rings) 6817 { 6818 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; 6819 struct hwrm_vnic_alloc_output *resp; 6820 struct hwrm_vnic_alloc_input *req; 6821 int rc; 6822 6823 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); 6824 if (rc) 6825 return rc; 6826 6827 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6828 goto vnic_no_ring_grps; 6829 6830 /* map ring groups to this vnic */ 6831 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { 6832 grp_idx = bp->rx_ring[i].bnapi->index; 6833 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { 6834 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", 6835 j, nr_rings); 6836 break; 6837 } 6838 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; 6839 } 6840 6841 vnic_no_ring_grps: 6842 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) 6843 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; 6844 if (vnic->vnic_id == BNXT_VNIC_DEFAULT) 6845 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); 6846 6847 resp = hwrm_req_hold(bp, req); 6848 rc = hwrm_req_send(bp, req); 6849 if (!rc) 6850 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); 6851 hwrm_req_drop(bp, req); 6852 return rc; 6853 } 6854 6855 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) 6856 { 6857 struct hwrm_vnic_qcaps_output *resp; 6858 struct hwrm_vnic_qcaps_input *req; 6859 int rc; 6860 6861 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); 6862 bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; 6863 bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; 6864 if (bp->hwrm_spec_code < 0x10600) 6865 return 0; 6866 6867 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); 6868 if (rc) 6869 return rc; 6870 6871 resp = hwrm_req_hold(bp, req); 6872 rc = hwrm_req_send(bp, req); 6873 if (!rc) { 6874 u32 flags = le32_to_cpu(resp->flags); 6875 6876 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 6877 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) 6878 bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; 6879 if (flags & 6880 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) 6881 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; 6882 6883 /* Older P5 fw before EXT_HW_STATS support did not set 6884 * VLAN_STRIP_CAP properly. 6885 */ 6886 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || 6887 (BNXT_CHIP_P5(bp) && 6888 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) 6889 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; 6890 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) 6891 bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; 6892 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) 6893 bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; 6894 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); 6895 if (bp->max_tpa_v2) { 6896 if (BNXT_CHIP_P5(bp)) 6897 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; 6898 else 6899 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; 6900 } 6901 if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) 6902 bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; 6903 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP) 6904 bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP; 6905 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP) 6906 bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP; 6907 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP) 6908 bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; 6909 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) 6910 bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; 6911 if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP) 6912 bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH; 6913 } 6914 hwrm_req_drop(bp, req); 6915 return rc; 6916 } 6917 6918 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) 6919 { 6920 struct hwrm_ring_grp_alloc_output *resp; 6921 struct hwrm_ring_grp_alloc_input *req; 6922 int rc; 6923 u16 i; 6924 6925 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 6926 return 0; 6927 6928 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); 6929 if (rc) 6930 return rc; 6931 6932 resp = hwrm_req_hold(bp, req); 6933 for (i = 0; i < bp->rx_nr_rings; i++) { 6934 unsigned int grp_idx = bp->rx_ring[i].bnapi->index; 6935 6936 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); 6937 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); 6938 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); 6939 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); 6940 6941 rc = hwrm_req_send(bp, req); 6942 6943 if (rc) 6944 break; 6945 6946 bp->grp_info[grp_idx].fw_grp_id = 6947 le32_to_cpu(resp->ring_group_id); 6948 } 6949 hwrm_req_drop(bp, req); 6950 return rc; 6951 } 6952 6953 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) 6954 { 6955 struct hwrm_ring_grp_free_input *req; 6956 u16 i; 6957 6958 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 6959 return; 6960 6961 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) 6962 return; 6963 6964 hwrm_req_hold(bp, req); 6965 for (i = 0; i < bp->cp_nr_rings; i++) { 6966 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) 6967 continue; 6968 req->ring_group_id = 6969 cpu_to_le32(bp->grp_info[i].fw_grp_id); 6970 6971 hwrm_req_send(bp, req); 6972 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; 6973 } 6974 hwrm_req_drop(bp, req); 6975 } 6976 6977 static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type, 6978 struct hwrm_ring_alloc_input *req, 6979 struct bnxt_ring_struct *ring) 6980 { 6981 struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx]; 6982 u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID | 6983 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID; 6984 6985 if (ring_type == HWRM_RING_ALLOC_AGG) { 6986 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; 6987 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); 6988 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); 6989 enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID; 6990 } else { 6991 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); 6992 if (NET_IP_ALIGN == 2) 6993 req->flags = 6994 cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD); 6995 } 6996 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 6997 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 6998 req->enables |= cpu_to_le32(enables); 6999 } 7000 7001 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, 7002 struct bnxt_ring_struct *ring, 7003 u32 ring_type, u32 map_index) 7004 { 7005 struct hwrm_ring_alloc_output *resp; 7006 struct hwrm_ring_alloc_input *req; 7007 struct bnxt_ring_mem_info *rmem = &ring->ring_mem; 7008 struct bnxt_ring_grp_info *grp_info; 7009 int rc, err = 0; 7010 u16 ring_id; 7011 7012 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); 7013 if (rc) 7014 goto exit; 7015 7016 req->enables = 0; 7017 if (rmem->nr_pages > 1) { 7018 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); 7019 /* Page size is in log2 units */ 7020 req->page_size = BNXT_PAGE_SHIFT; 7021 req->page_tbl_depth = 1; 7022 } else { 7023 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); 7024 } 7025 req->fbo = 0; 7026 /* Association of ring index with doorbell index and MSIX number */ 7027 req->logical_id = cpu_to_le16(map_index); 7028 7029 switch (ring_type) { 7030 case HWRM_RING_ALLOC_TX: { 7031 struct bnxt_tx_ring_info *txr; 7032 u16 flags = 0; 7033 7034 txr = container_of(ring, struct bnxt_tx_ring_info, 7035 tx_ring_struct); 7036 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; 7037 /* Association of transmit ring with completion ring */ 7038 grp_info = &bp->grp_info[ring->grp_idx]; 7039 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); 7040 req->length = cpu_to_le32(bp->tx_ring_mask + 1); 7041 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); 7042 req->queue_id = cpu_to_le16(ring->queue_id); 7043 if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) 7044 req->cmpl_coal_cnt = 7045 RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; 7046 if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg) 7047 flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE; 7048 req->flags = cpu_to_le16(flags); 7049 break; 7050 } 7051 case HWRM_RING_ALLOC_RX: 7052 case HWRM_RING_ALLOC_AGG: 7053 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; 7054 req->length = (ring_type == HWRM_RING_ALLOC_RX) ? 7055 cpu_to_le32(bp->rx_ring_mask + 1) : 7056 cpu_to_le32(bp->rx_agg_ring_mask + 1); 7057 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7058 bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring); 7059 break; 7060 case HWRM_RING_ALLOC_CMPL: 7061 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 7062 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 7063 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7064 /* Association of cp ring with nq */ 7065 grp_info = &bp->grp_info[map_index]; 7066 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); 7067 req->cq_handle = cpu_to_le64(ring->handle); 7068 req->enables |= cpu_to_le32( 7069 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); 7070 } else { 7071 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 7072 } 7073 break; 7074 case HWRM_RING_ALLOC_NQ: 7075 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; 7076 req->length = cpu_to_le32(bp->cp_ring_mask + 1); 7077 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; 7078 break; 7079 default: 7080 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", 7081 ring_type); 7082 return -1; 7083 } 7084 7085 resp = hwrm_req_hold(bp, req); 7086 rc = hwrm_req_send(bp, req); 7087 err = le16_to_cpu(resp->error_code); 7088 ring_id = le16_to_cpu(resp->ring_id); 7089 hwrm_req_drop(bp, req); 7090 7091 exit: 7092 if (rc || err) { 7093 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", 7094 ring_type, rc, err); 7095 return -EIO; 7096 } 7097 ring->fw_ring_id = ring_id; 7098 return rc; 7099 } 7100 7101 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) 7102 { 7103 int rc; 7104 7105 if (BNXT_PF(bp)) { 7106 struct hwrm_func_cfg_input *req; 7107 7108 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 7109 if (rc) 7110 return rc; 7111 7112 req->fid = cpu_to_le16(0xffff); 7113 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 7114 req->async_event_cr = cpu_to_le16(idx); 7115 return hwrm_req_send(bp, req); 7116 } else { 7117 struct hwrm_func_vf_cfg_input *req; 7118 7119 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); 7120 if (rc) 7121 return rc; 7122 7123 req->enables = 7124 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); 7125 req->async_event_cr = cpu_to_le16(idx); 7126 return hwrm_req_send(bp, req); 7127 } 7128 } 7129 7130 static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, 7131 u32 ring_type) 7132 { 7133 switch (ring_type) { 7134 case HWRM_RING_ALLOC_TX: 7135 db->db_ring_mask = bp->tx_ring_mask; 7136 break; 7137 case HWRM_RING_ALLOC_RX: 7138 db->db_ring_mask = bp->rx_ring_mask; 7139 break; 7140 case HWRM_RING_ALLOC_AGG: 7141 db->db_ring_mask = bp->rx_agg_ring_mask; 7142 break; 7143 case HWRM_RING_ALLOC_CMPL: 7144 case HWRM_RING_ALLOC_NQ: 7145 db->db_ring_mask = bp->cp_ring_mask; 7146 break; 7147 } 7148 if (bp->flags & BNXT_FLAG_CHIP_P7) { 7149 db->db_epoch_mask = db->db_ring_mask + 1; 7150 db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); 7151 } 7152 } 7153 7154 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, 7155 u32 map_idx, u32 xid) 7156 { 7157 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7158 switch (ring_type) { 7159 case HWRM_RING_ALLOC_TX: 7160 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; 7161 break; 7162 case HWRM_RING_ALLOC_RX: 7163 case HWRM_RING_ALLOC_AGG: 7164 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; 7165 break; 7166 case HWRM_RING_ALLOC_CMPL: 7167 db->db_key64 = DBR_PATH_L2; 7168 break; 7169 case HWRM_RING_ALLOC_NQ: 7170 db->db_key64 = DBR_PATH_L2; 7171 break; 7172 } 7173 db->db_key64 |= (u64)xid << DBR_XID_SFT; 7174 7175 if (bp->flags & BNXT_FLAG_CHIP_P7) 7176 db->db_key64 |= DBR_VALID; 7177 7178 db->doorbell = bp->bar1 + bp->db_offset; 7179 } else { 7180 db->doorbell = bp->bar1 + map_idx * 0x80; 7181 switch (ring_type) { 7182 case HWRM_RING_ALLOC_TX: 7183 db->db_key32 = DB_KEY_TX; 7184 break; 7185 case HWRM_RING_ALLOC_RX: 7186 case HWRM_RING_ALLOC_AGG: 7187 db->db_key32 = DB_KEY_RX; 7188 break; 7189 case HWRM_RING_ALLOC_CMPL: 7190 db->db_key32 = DB_KEY_CP; 7191 break; 7192 } 7193 } 7194 bnxt_set_db_mask(bp, db, ring_type); 7195 } 7196 7197 static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, 7198 struct bnxt_rx_ring_info *rxr) 7199 { 7200 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 7201 struct bnxt_napi *bnapi = rxr->bnapi; 7202 u32 type = HWRM_RING_ALLOC_RX; 7203 u32 map_idx = bnapi->index; 7204 int rc; 7205 7206 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7207 if (rc) 7208 return rc; 7209 7210 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 7211 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 7212 7213 return 0; 7214 } 7215 7216 static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, 7217 struct bnxt_rx_ring_info *rxr) 7218 { 7219 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 7220 u32 type = HWRM_RING_ALLOC_AGG; 7221 u32 grp_idx = ring->grp_idx; 7222 u32 map_idx; 7223 int rc; 7224 7225 map_idx = grp_idx + bp->rx_nr_rings; 7226 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7227 if (rc) 7228 return rc; 7229 7230 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 7231 ring->fw_ring_id); 7232 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 7233 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 7234 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 7235 7236 return 0; 7237 } 7238 7239 static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp, 7240 struct bnxt_cp_ring_info *cpr) 7241 { 7242 const u32 type = HWRM_RING_ALLOC_CMPL; 7243 struct bnxt_napi *bnapi = cpr->bnapi; 7244 struct bnxt_ring_struct *ring; 7245 u32 map_idx = bnapi->index; 7246 int rc; 7247 7248 ring = &cpr->cp_ring_struct; 7249 ring->handle = BNXT_SET_NQ_HDL(cpr); 7250 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7251 if (rc) 7252 return rc; 7253 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 7254 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); 7255 return 0; 7256 } 7257 7258 static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp, 7259 struct bnxt_tx_ring_info *txr, u32 tx_idx) 7260 { 7261 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 7262 const u32 type = HWRM_RING_ALLOC_TX; 7263 int rc; 7264 7265 rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx); 7266 if (rc) 7267 return rc; 7268 bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id); 7269 return 0; 7270 } 7271 7272 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 7273 { 7274 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); 7275 int i, rc = 0; 7276 u32 type; 7277 7278 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7279 type = HWRM_RING_ALLOC_NQ; 7280 else 7281 type = HWRM_RING_ALLOC_CMPL; 7282 for (i = 0; i < bp->cp_nr_rings; i++) { 7283 struct bnxt_napi *bnapi = bp->bnapi[i]; 7284 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7285 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 7286 u32 map_idx = ring->map_idx; 7287 unsigned int vector; 7288 7289 vector = bp->irq_tbl[map_idx].vector; 7290 disable_irq_nosync(vector); 7291 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 7292 if (rc) { 7293 enable_irq(vector); 7294 goto err_out; 7295 } 7296 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 7297 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 7298 enable_irq(vector); 7299 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 7300 7301 if (!i) { 7302 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); 7303 if (rc) 7304 netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); 7305 } 7306 } 7307 7308 for (i = 0; i < bp->tx_nr_rings; i++) { 7309 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 7310 7311 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7312 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); 7313 if (rc) 7314 goto err_out; 7315 } 7316 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i); 7317 if (rc) 7318 goto err_out; 7319 } 7320 7321 for (i = 0; i < bp->rx_nr_rings; i++) { 7322 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 7323 7324 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); 7325 if (rc) 7326 goto err_out; 7327 /* If we have agg rings, post agg buffers first. */ 7328 if (!agg_rings) 7329 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 7330 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7331 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); 7332 if (rc) 7333 goto err_out; 7334 } 7335 } 7336 7337 if (agg_rings) { 7338 for (i = 0; i < bp->rx_nr_rings; i++) { 7339 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]); 7340 if (rc) 7341 goto err_out; 7342 } 7343 } 7344 err_out: 7345 return rc; 7346 } 7347 7348 static void bnxt_cancel_dim(struct bnxt *bp) 7349 { 7350 int i; 7351 7352 /* DIM work is initialized in bnxt_enable_napi(). Proceed only 7353 * if NAPI is enabled. 7354 */ 7355 if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 7356 return; 7357 7358 /* Make sure NAPI sees that the VNIC is disabled */ 7359 synchronize_net(); 7360 for (i = 0; i < bp->rx_nr_rings; i++) { 7361 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 7362 struct bnxt_napi *bnapi = rxr->bnapi; 7363 7364 cancel_work_sync(&bnapi->cp_ring.dim.work); 7365 } 7366 } 7367 7368 static int hwrm_ring_free_send_msg(struct bnxt *bp, 7369 struct bnxt_ring_struct *ring, 7370 u32 ring_type, int cmpl_ring_id) 7371 { 7372 struct hwrm_ring_free_output *resp; 7373 struct hwrm_ring_free_input *req; 7374 u16 error_code = 0; 7375 int rc; 7376 7377 if (BNXT_NO_FW_ACCESS(bp)) 7378 return 0; 7379 7380 rc = hwrm_req_init(bp, req, HWRM_RING_FREE); 7381 if (rc) 7382 goto exit; 7383 7384 req->cmpl_ring = cpu_to_le16(cmpl_ring_id); 7385 req->ring_type = ring_type; 7386 req->ring_id = cpu_to_le16(ring->fw_ring_id); 7387 7388 resp = hwrm_req_hold(bp, req); 7389 rc = hwrm_req_send(bp, req); 7390 error_code = le16_to_cpu(resp->error_code); 7391 hwrm_req_drop(bp, req); 7392 exit: 7393 if (rc || error_code) { 7394 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", 7395 ring_type, rc, error_code); 7396 return -EIO; 7397 } 7398 return 0; 7399 } 7400 7401 static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, 7402 struct bnxt_tx_ring_info *txr, 7403 bool close_path) 7404 { 7405 struct bnxt_ring_struct *ring = &txr->tx_ring_struct; 7406 u32 cmpl_ring_id; 7407 7408 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7409 return; 7410 7411 cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) : 7412 INVALID_HW_RING_ID; 7413 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, 7414 cmpl_ring_id); 7415 ring->fw_ring_id = INVALID_HW_RING_ID; 7416 } 7417 7418 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp, 7419 struct bnxt_rx_ring_info *rxr, 7420 bool close_path) 7421 { 7422 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; 7423 u32 grp_idx = rxr->bnapi->index; 7424 u32 cmpl_ring_id; 7425 7426 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7427 return; 7428 7429 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7430 hwrm_ring_free_send_msg(bp, ring, 7431 RING_FREE_REQ_RING_TYPE_RX, 7432 close_path ? cmpl_ring_id : 7433 INVALID_HW_RING_ID); 7434 ring->fw_ring_id = INVALID_HW_RING_ID; 7435 bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; 7436 } 7437 7438 static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp, 7439 struct bnxt_rx_ring_info *rxr, 7440 bool close_path) 7441 { 7442 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; 7443 u32 grp_idx = rxr->bnapi->index; 7444 u32 type, cmpl_ring_id; 7445 7446 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7447 type = RING_FREE_REQ_RING_TYPE_RX_AGG; 7448 else 7449 type = RING_FREE_REQ_RING_TYPE_RX; 7450 7451 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7452 return; 7453 7454 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); 7455 hwrm_ring_free_send_msg(bp, ring, type, 7456 close_path ? cmpl_ring_id : 7457 INVALID_HW_RING_ID); 7458 ring->fw_ring_id = INVALID_HW_RING_ID; 7459 bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; 7460 } 7461 7462 static void bnxt_hwrm_cp_ring_free(struct bnxt *bp, 7463 struct bnxt_cp_ring_info *cpr) 7464 { 7465 struct bnxt_ring_struct *ring; 7466 7467 ring = &cpr->cp_ring_struct; 7468 if (ring->fw_ring_id == INVALID_HW_RING_ID) 7469 return; 7470 7471 hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL, 7472 INVALID_HW_RING_ID); 7473 ring->fw_ring_id = INVALID_HW_RING_ID; 7474 } 7475 7476 static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) 7477 { 7478 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 7479 int i, size = ring->ring_mem.page_size; 7480 7481 cpr->cp_raw_cons = 0; 7482 cpr->toggle = 0; 7483 7484 for (i = 0; i < bp->cp_nr_pages; i++) 7485 if (cpr->cp_desc_ring[i]) 7486 memset(cpr->cp_desc_ring[i], 0, size); 7487 } 7488 7489 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) 7490 { 7491 u32 type; 7492 int i; 7493 7494 if (!bp->bnapi) 7495 return; 7496 7497 for (i = 0; i < bp->tx_nr_rings; i++) 7498 bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path); 7499 7500 bnxt_cancel_dim(bp); 7501 for (i = 0; i < bp->rx_nr_rings; i++) { 7502 bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); 7503 bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); 7504 } 7505 7506 /* The completion rings are about to be freed. After that the 7507 * IRQ doorbell will not work anymore. So we need to disable 7508 * IRQ here. 7509 */ 7510 bnxt_disable_int_sync(bp); 7511 7512 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7513 type = RING_FREE_REQ_RING_TYPE_NQ; 7514 else 7515 type = RING_FREE_REQ_RING_TYPE_L2_CMPL; 7516 for (i = 0; i < bp->cp_nr_rings; i++) { 7517 struct bnxt_napi *bnapi = bp->bnapi[i]; 7518 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 7519 struct bnxt_ring_struct *ring; 7520 int j; 7521 7522 for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) 7523 bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]); 7524 7525 ring = &cpr->cp_ring_struct; 7526 if (ring->fw_ring_id != INVALID_HW_RING_ID) { 7527 hwrm_ring_free_send_msg(bp, ring, type, 7528 INVALID_HW_RING_ID); 7529 ring->fw_ring_id = INVALID_HW_RING_ID; 7530 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; 7531 } 7532 } 7533 } 7534 7535 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7536 bool shared); 7537 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 7538 bool shared); 7539 7540 static int bnxt_hwrm_get_rings(struct bnxt *bp) 7541 { 7542 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7543 struct hwrm_func_qcfg_output *resp; 7544 struct hwrm_func_qcfg_input *req; 7545 int rc; 7546 7547 if (bp->hwrm_spec_code < 0x10601) 7548 return 0; 7549 7550 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7551 if (rc) 7552 return rc; 7553 7554 req->fid = cpu_to_le16(0xffff); 7555 resp = hwrm_req_hold(bp, req); 7556 rc = hwrm_req_send(bp, req); 7557 if (rc) { 7558 hwrm_req_drop(bp, req); 7559 return rc; 7560 } 7561 7562 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7563 if (BNXT_NEW_RM(bp)) { 7564 u16 cp, stats; 7565 7566 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); 7567 hw_resc->resv_hw_ring_grps = 7568 le32_to_cpu(resp->alloc_hw_ring_grps); 7569 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); 7570 hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); 7571 cp = le16_to_cpu(resp->alloc_cmpl_rings); 7572 stats = le16_to_cpu(resp->alloc_stat_ctx); 7573 hw_resc->resv_irqs = cp; 7574 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7575 int rx = hw_resc->resv_rx_rings; 7576 int tx = hw_resc->resv_tx_rings; 7577 7578 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7579 rx >>= 1; 7580 if (cp < (rx + tx)) { 7581 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); 7582 if (rc) 7583 goto get_rings_exit; 7584 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7585 rx <<= 1; 7586 hw_resc->resv_rx_rings = rx; 7587 hw_resc->resv_tx_rings = tx; 7588 } 7589 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); 7590 hw_resc->resv_hw_ring_grps = rx; 7591 } 7592 hw_resc->resv_cp_rings = cp; 7593 hw_resc->resv_stat_ctxs = stats; 7594 } 7595 get_rings_exit: 7596 hwrm_req_drop(bp, req); 7597 return rc; 7598 } 7599 7600 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) 7601 { 7602 struct hwrm_func_qcfg_output *resp; 7603 struct hwrm_func_qcfg_input *req; 7604 int rc; 7605 7606 if (bp->hwrm_spec_code < 0x10601) 7607 return 0; 7608 7609 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 7610 if (rc) 7611 return rc; 7612 7613 req->fid = cpu_to_le16(fid); 7614 resp = hwrm_req_hold(bp, req); 7615 rc = hwrm_req_send(bp, req); 7616 if (!rc) 7617 *tx_rings = le16_to_cpu(resp->alloc_tx_rings); 7618 7619 hwrm_req_drop(bp, req); 7620 return rc; 7621 } 7622 7623 static bool bnxt_rfs_supported(struct bnxt *bp); 7624 7625 static struct hwrm_func_cfg_input * 7626 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7627 { 7628 struct hwrm_func_cfg_input *req; 7629 u32 enables = 0; 7630 7631 if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) 7632 return NULL; 7633 7634 req->fid = cpu_to_le16(0xffff); 7635 enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7636 req->num_tx_rings = cpu_to_le16(hwr->tx); 7637 if (BNXT_NEW_RM(bp)) { 7638 enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 7639 enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7640 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7641 enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; 7642 enables |= hwr->cp_p5 ? 7643 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7644 } else { 7645 enables |= hwr->cp ? 7646 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7647 enables |= hwr->grp ? 7648 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7649 } 7650 enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; 7651 enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 7652 0; 7653 req->num_rx_rings = cpu_to_le16(hwr->rx); 7654 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7655 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7656 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7657 req->num_msix = cpu_to_le16(hwr->cp); 7658 } else { 7659 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7660 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7661 } 7662 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7663 req->num_vnics = cpu_to_le16(hwr->vnic); 7664 } 7665 req->enables = cpu_to_le32(enables); 7666 return req; 7667 } 7668 7669 static struct hwrm_func_vf_cfg_input * 7670 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7671 { 7672 struct hwrm_func_vf_cfg_input *req; 7673 u32 enables = 0; 7674 7675 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) 7676 return NULL; 7677 7678 enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 7679 enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | 7680 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7681 enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; 7682 enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; 7683 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7684 enables |= hwr->cp_p5 ? 7685 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7686 } else { 7687 enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; 7688 enables |= hwr->grp ? 7689 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 7690 } 7691 enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 7692 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; 7693 7694 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); 7695 req->num_tx_rings = cpu_to_le16(hwr->tx); 7696 req->num_rx_rings = cpu_to_le16(hwr->rx); 7697 req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); 7698 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7699 req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); 7700 } else { 7701 req->num_cmpl_rings = cpu_to_le16(hwr->cp); 7702 req->num_hw_ring_grps = cpu_to_le16(hwr->grp); 7703 } 7704 req->num_stat_ctxs = cpu_to_le16(hwr->stat); 7705 req->num_vnics = cpu_to_le16(hwr->vnic); 7706 7707 req->enables = cpu_to_le32(enables); 7708 return req; 7709 } 7710 7711 static int 7712 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7713 { 7714 struct hwrm_func_cfg_input *req; 7715 int rc; 7716 7717 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 7718 if (!req) 7719 return -ENOMEM; 7720 7721 if (!req->enables) { 7722 hwrm_req_drop(bp, req); 7723 return 0; 7724 } 7725 7726 rc = hwrm_req_send(bp, req); 7727 if (rc) 7728 return rc; 7729 7730 if (bp->hwrm_spec_code < 0x10601) 7731 bp->hw_resc.resv_tx_rings = hwr->tx; 7732 7733 return bnxt_hwrm_get_rings(bp); 7734 } 7735 7736 static int 7737 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7738 { 7739 struct hwrm_func_vf_cfg_input *req; 7740 int rc; 7741 7742 if (!BNXT_NEW_RM(bp)) { 7743 bp->hw_resc.resv_tx_rings = hwr->tx; 7744 return 0; 7745 } 7746 7747 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 7748 if (!req) 7749 return -ENOMEM; 7750 7751 rc = hwrm_req_send(bp, req); 7752 if (rc) 7753 return rc; 7754 7755 return bnxt_hwrm_get_rings(bp); 7756 } 7757 7758 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7759 { 7760 if (BNXT_PF(bp)) 7761 return bnxt_hwrm_reserve_pf_rings(bp, hwr); 7762 else 7763 return bnxt_hwrm_reserve_vf_rings(bp, hwr); 7764 } 7765 7766 int bnxt_nq_rings_in_use(struct bnxt *bp) 7767 { 7768 return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp); 7769 } 7770 7771 static int bnxt_cp_rings_in_use(struct bnxt *bp) 7772 { 7773 int cp; 7774 7775 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7776 return bnxt_nq_rings_in_use(bp); 7777 7778 cp = bp->tx_nr_rings + bp->rx_nr_rings; 7779 return cp; 7780 } 7781 7782 static int bnxt_get_func_stat_ctxs(struct bnxt *bp) 7783 { 7784 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); 7785 } 7786 7787 static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7788 { 7789 if (!hwr->grp) 7790 return 0; 7791 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7792 int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); 7793 7794 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7795 rss_ctx *= hwr->vnic; 7796 return rss_ctx; 7797 } 7798 if (BNXT_VF(bp)) 7799 return BNXT_VF_MAX_RSS_CTX; 7800 if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) 7801 return hwr->grp + 1; 7802 return 1; 7803 } 7804 7805 /* Check if a default RSS map needs to be setup. This function is only 7806 * used on older firmware that does not require reserving RX rings. 7807 */ 7808 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) 7809 { 7810 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7811 7812 /* The RSS map is valid for RX rings set to resv_rx_rings */ 7813 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { 7814 hw_resc->resv_rx_rings = bp->rx_nr_rings; 7815 if (!netif_is_rxfh_configured(bp->dev)) 7816 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7817 } 7818 } 7819 7820 static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) 7821 { 7822 if (bp->flags & BNXT_FLAG_RFS) { 7823 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 7824 return 2 + bp->num_rss_ctx; 7825 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 7826 return rx_rings + 1; 7827 } 7828 return 1; 7829 } 7830 7831 static bool bnxt_need_reserve_rings(struct bnxt *bp) 7832 { 7833 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7834 int cp = bnxt_cp_rings_in_use(bp); 7835 int nq = bnxt_nq_rings_in_use(bp); 7836 int rx = bp->rx_nr_rings, stat; 7837 int vnic, grp = rx; 7838 7839 /* Old firmware does not need RX ring reservations but we still 7840 * need to setup a default RSS map when needed. With new firmware 7841 * we go through RX ring reservations first and then set up the 7842 * RSS map for the successfully reserved RX rings when needed. 7843 */ 7844 if (!BNXT_NEW_RM(bp)) 7845 bnxt_check_rss_tbl_no_rmgr(bp); 7846 7847 if (hw_resc->resv_tx_rings != bp->tx_nr_rings && 7848 bp->hwrm_spec_code >= 0x10601) 7849 return true; 7850 7851 if (!BNXT_NEW_RM(bp)) 7852 return false; 7853 7854 vnic = bnxt_get_total_vnics(bp, rx); 7855 7856 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7857 rx <<= 1; 7858 stat = bnxt_get_func_stat_ctxs(bp); 7859 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || 7860 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || 7861 (hw_resc->resv_hw_ring_grps != grp && 7862 !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) 7863 return true; 7864 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && 7865 hw_resc->resv_irqs != nq) 7866 return true; 7867 return false; 7868 } 7869 7870 static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7871 { 7872 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 7873 7874 hwr->tx = hw_resc->resv_tx_rings; 7875 if (BNXT_NEW_RM(bp)) { 7876 hwr->rx = hw_resc->resv_rx_rings; 7877 hwr->cp = hw_resc->resv_irqs; 7878 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7879 hwr->cp_p5 = hw_resc->resv_cp_rings; 7880 hwr->grp = hw_resc->resv_hw_ring_grps; 7881 hwr->vnic = hw_resc->resv_vnics; 7882 hwr->stat = hw_resc->resv_stat_ctxs; 7883 hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; 7884 } 7885 } 7886 7887 static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) 7888 { 7889 return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && 7890 hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); 7891 } 7892 7893 static int bnxt_get_avail_msix(struct bnxt *bp, int num); 7894 7895 static int __bnxt_reserve_rings(struct bnxt *bp) 7896 { 7897 struct bnxt_hw_rings hwr = {0}; 7898 int rx_rings, old_rx_rings, rc; 7899 int cp = bp->cp_nr_rings; 7900 int ulp_msix = 0; 7901 bool sh = false; 7902 int tx_cp; 7903 7904 if (!bnxt_need_reserve_rings(bp)) 7905 return 0; 7906 7907 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 7908 ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 7909 if (!ulp_msix) 7910 bnxt_set_ulp_stat_ctxs(bp, 0); 7911 7912 if (ulp_msix > bp->ulp_num_msix_want) 7913 ulp_msix = bp->ulp_num_msix_want; 7914 hwr.cp = cp + ulp_msix; 7915 } else { 7916 hwr.cp = bnxt_nq_rings_in_use(bp); 7917 } 7918 7919 hwr.tx = bp->tx_nr_rings; 7920 hwr.rx = bp->rx_nr_rings; 7921 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 7922 sh = true; 7923 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 7924 hwr.cp_p5 = hwr.rx + hwr.tx; 7925 7926 hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); 7927 7928 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7929 hwr.rx <<= 1; 7930 hwr.grp = bp->rx_nr_rings; 7931 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 7932 hwr.stat = bnxt_get_func_stat_ctxs(bp); 7933 old_rx_rings = bp->hw_resc.resv_rx_rings; 7934 7935 rc = bnxt_hwrm_reserve_rings(bp, &hwr); 7936 if (rc) 7937 return rc; 7938 7939 bnxt_copy_reserved_rings(bp, &hwr); 7940 7941 rx_rings = hwr.rx; 7942 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 7943 if (hwr.rx >= 2) { 7944 rx_rings = hwr.rx >> 1; 7945 } else { 7946 if (netif_running(bp->dev)) 7947 return -ENOMEM; 7948 7949 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 7950 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 7951 bp->dev->hw_features &= ~NETIF_F_LRO; 7952 bp->dev->features &= ~NETIF_F_LRO; 7953 bnxt_set_ring_params(bp); 7954 } 7955 } 7956 rx_rings = min_t(int, rx_rings, hwr.grp); 7957 hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); 7958 if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) 7959 hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); 7960 hwr.cp = min_t(int, hwr.cp, hwr.stat); 7961 rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); 7962 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7963 hwr.rx = rx_rings << 1; 7964 tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); 7965 hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; 7966 bp->tx_nr_rings = hwr.tx; 7967 7968 /* If we cannot reserve all the RX rings, reset the RSS map only 7969 * if absolutely necessary 7970 */ 7971 if (rx_rings != bp->rx_nr_rings) { 7972 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", 7973 rx_rings, bp->rx_nr_rings); 7974 if (netif_is_rxfh_configured(bp->dev) && 7975 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != 7976 bnxt_get_nr_rss_ctxs(bp, rx_rings) || 7977 bnxt_get_max_rss_ring(bp) >= rx_rings)) { 7978 netdev_warn(bp->dev, "RSS table entries reverting to default\n"); 7979 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; 7980 } 7981 } 7982 bp->rx_nr_rings = rx_rings; 7983 bp->cp_nr_rings = hwr.cp; 7984 7985 if (!bnxt_rings_ok(bp, &hwr)) 7986 return -ENOMEM; 7987 7988 if (old_rx_rings != bp->hw_resc.resv_rx_rings && 7989 !netif_is_rxfh_configured(bp->dev)) 7990 bnxt_set_dflt_rss_indir_tbl(bp, NULL); 7991 7992 if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) { 7993 int resv_msix, resv_ctx, ulp_ctxs; 7994 struct bnxt_hw_resc *hw_resc; 7995 7996 hw_resc = &bp->hw_resc; 7997 resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; 7998 ulp_msix = min_t(int, resv_msix, ulp_msix); 7999 bnxt_set_ulp_msix_num(bp, ulp_msix); 8000 resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings; 8001 ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp)); 8002 bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs); 8003 } 8004 8005 return rc; 8006 } 8007 8008 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 8009 { 8010 struct hwrm_func_vf_cfg_input *req; 8011 u32 flags; 8012 8013 if (!BNXT_NEW_RM(bp)) 8014 return 0; 8015 8016 req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); 8017 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 8018 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 8019 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 8020 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 8021 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | 8022 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 8023 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8024 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 8025 8026 req->flags = cpu_to_le32(flags); 8027 return hwrm_req_send_silent(bp, req); 8028 } 8029 8030 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 8031 { 8032 struct hwrm_func_cfg_input *req; 8033 u32 flags; 8034 8035 req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); 8036 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 8037 if (BNXT_NEW_RM(bp)) { 8038 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 8039 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 8040 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 8041 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 8042 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 8043 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | 8044 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; 8045 else 8046 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 8047 } 8048 8049 req->flags = cpu_to_le32(flags); 8050 return hwrm_req_send_silent(bp, req); 8051 } 8052 8053 static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) 8054 { 8055 if (bp->hwrm_spec_code < 0x10801) 8056 return 0; 8057 8058 if (BNXT_PF(bp)) 8059 return bnxt_hwrm_check_pf_rings(bp, hwr); 8060 8061 return bnxt_hwrm_check_vf_rings(bp, hwr); 8062 } 8063 8064 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) 8065 { 8066 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8067 struct hwrm_ring_aggint_qcaps_output *resp; 8068 struct hwrm_ring_aggint_qcaps_input *req; 8069 int rc; 8070 8071 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; 8072 coal_cap->num_cmpl_dma_aggr_max = 63; 8073 coal_cap->num_cmpl_dma_aggr_during_int_max = 63; 8074 coal_cap->cmpl_aggr_dma_tmr_max = 65535; 8075 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; 8076 coal_cap->int_lat_tmr_min_max = 65535; 8077 coal_cap->int_lat_tmr_max_max = 65535; 8078 coal_cap->num_cmpl_aggr_int_max = 65535; 8079 coal_cap->timer_units = 80; 8080 8081 if (bp->hwrm_spec_code < 0x10902) 8082 return; 8083 8084 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) 8085 return; 8086 8087 resp = hwrm_req_hold(bp, req); 8088 rc = hwrm_req_send_silent(bp, req); 8089 if (!rc) { 8090 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); 8091 coal_cap->nq_params = le32_to_cpu(resp->nq_params); 8092 coal_cap->num_cmpl_dma_aggr_max = 8093 le16_to_cpu(resp->num_cmpl_dma_aggr_max); 8094 coal_cap->num_cmpl_dma_aggr_during_int_max = 8095 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); 8096 coal_cap->cmpl_aggr_dma_tmr_max = 8097 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); 8098 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 8099 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); 8100 coal_cap->int_lat_tmr_min_max = 8101 le16_to_cpu(resp->int_lat_tmr_min_max); 8102 coal_cap->int_lat_tmr_max_max = 8103 le16_to_cpu(resp->int_lat_tmr_max_max); 8104 coal_cap->num_cmpl_aggr_int_max = 8105 le16_to_cpu(resp->num_cmpl_aggr_int_max); 8106 coal_cap->timer_units = le16_to_cpu(resp->timer_units); 8107 } 8108 hwrm_req_drop(bp, req); 8109 } 8110 8111 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) 8112 { 8113 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8114 8115 return usec * 1000 / coal_cap->timer_units; 8116 } 8117 8118 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, 8119 struct bnxt_coal *hw_coal, 8120 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 8121 { 8122 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8123 u16 val, tmr, max, flags = hw_coal->flags; 8124 u32 cmpl_params = coal_cap->cmpl_params; 8125 8126 max = hw_coal->bufs_per_record * 128; 8127 if (hw_coal->budget) 8128 max = hw_coal->bufs_per_record * hw_coal->budget; 8129 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); 8130 8131 val = clamp_t(u16, hw_coal->coal_bufs, 1, max); 8132 req->num_cmpl_aggr_int = cpu_to_le16(val); 8133 8134 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); 8135 req->num_cmpl_dma_aggr = cpu_to_le16(val); 8136 8137 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 8138 coal_cap->num_cmpl_dma_aggr_during_int_max); 8139 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); 8140 8141 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); 8142 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); 8143 req->int_lat_tmr_max = cpu_to_le16(tmr); 8144 8145 /* min timer set to 1/2 of interrupt timer */ 8146 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { 8147 val = tmr / 2; 8148 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); 8149 req->int_lat_tmr_min = cpu_to_le16(val); 8150 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 8151 } 8152 8153 /* buf timer set to 1/4 of interrupt timer */ 8154 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); 8155 req->cmpl_aggr_dma_tmr = cpu_to_le16(val); 8156 8157 if (cmpl_params & 8158 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { 8159 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); 8160 val = clamp_t(u16, tmr, 1, 8161 coal_cap->cmpl_aggr_dma_tmr_during_int_max); 8162 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); 8163 req->enables |= 8164 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); 8165 } 8166 8167 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && 8168 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) 8169 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; 8170 req->flags = cpu_to_le16(flags); 8171 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); 8172 } 8173 8174 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, 8175 struct bnxt_coal *hw_coal) 8176 { 8177 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; 8178 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8179 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 8180 u32 nq_params = coal_cap->nq_params; 8181 u16 tmr; 8182 int rc; 8183 8184 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) 8185 return 0; 8186 8187 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8188 if (rc) 8189 return rc; 8190 8191 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); 8192 req->flags = 8193 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); 8194 8195 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; 8196 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); 8197 req->int_lat_tmr_min = cpu_to_le16(tmr); 8198 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); 8199 return hwrm_req_send(bp, req); 8200 } 8201 8202 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) 8203 { 8204 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; 8205 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8206 struct bnxt_coal coal; 8207 int rc; 8208 8209 /* Tick values in micro seconds. 8210 * 1 coal_buf x bufs_per_record = 1 completion record. 8211 */ 8212 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); 8213 8214 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; 8215 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; 8216 8217 if (!bnapi->rx_ring) 8218 return -ENODEV; 8219 8220 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8221 if (rc) 8222 return rc; 8223 8224 bnxt_hwrm_set_coal_params(bp, &coal, req_rx); 8225 8226 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); 8227 8228 return hwrm_req_send(bp, req_rx); 8229 } 8230 8231 static int 8232 bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 8233 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 8234 { 8235 u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); 8236 8237 req->ring_id = cpu_to_le16(ring_id); 8238 return hwrm_req_send(bp, req); 8239 } 8240 8241 static int 8242 bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, 8243 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) 8244 { 8245 struct bnxt_tx_ring_info *txr; 8246 int i, rc; 8247 8248 bnxt_for_each_napi_tx(i, bnapi, txr) { 8249 u16 ring_id; 8250 8251 ring_id = bnxt_cp_ring_for_tx(bp, txr); 8252 req->ring_id = cpu_to_le16(ring_id); 8253 rc = hwrm_req_send(bp, req); 8254 if (rc) 8255 return rc; 8256 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8257 return 0; 8258 } 8259 return 0; 8260 } 8261 8262 int bnxt_hwrm_set_coal(struct bnxt *bp) 8263 { 8264 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; 8265 int i, rc; 8266 8267 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8268 if (rc) 8269 return rc; 8270 8271 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); 8272 if (rc) { 8273 hwrm_req_drop(bp, req_rx); 8274 return rc; 8275 } 8276 8277 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); 8278 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); 8279 8280 hwrm_req_hold(bp, req_rx); 8281 hwrm_req_hold(bp, req_tx); 8282 for (i = 0; i < bp->cp_nr_rings; i++) { 8283 struct bnxt_napi *bnapi = bp->bnapi[i]; 8284 struct bnxt_coal *hw_coal; 8285 8286 if (!bnapi->rx_ring) 8287 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 8288 else 8289 rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); 8290 if (rc) 8291 break; 8292 8293 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 8294 continue; 8295 8296 if (bnapi->rx_ring && bnapi->tx_ring[0]) { 8297 rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); 8298 if (rc) 8299 break; 8300 } 8301 if (bnapi->rx_ring) 8302 hw_coal = &bp->rx_coal; 8303 else 8304 hw_coal = &bp->tx_coal; 8305 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); 8306 } 8307 hwrm_req_drop(bp, req_rx); 8308 hwrm_req_drop(bp, req_tx); 8309 return rc; 8310 } 8311 8312 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) 8313 { 8314 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; 8315 struct hwrm_stat_ctx_free_input *req; 8316 int i; 8317 8318 if (!bp->bnapi) 8319 return; 8320 8321 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8322 return; 8323 8324 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) 8325 return; 8326 if (BNXT_FW_MAJ(bp) <= 20) { 8327 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { 8328 hwrm_req_drop(bp, req); 8329 return; 8330 } 8331 hwrm_req_hold(bp, req0); 8332 } 8333 hwrm_req_hold(bp, req); 8334 for (i = 0; i < bp->cp_nr_rings; i++) { 8335 struct bnxt_napi *bnapi = bp->bnapi[i]; 8336 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8337 8338 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { 8339 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); 8340 if (req0) { 8341 req0->stat_ctx_id = req->stat_ctx_id; 8342 hwrm_req_send(bp, req0); 8343 } 8344 hwrm_req_send(bp, req); 8345 8346 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; 8347 } 8348 } 8349 hwrm_req_drop(bp, req); 8350 if (req0) 8351 hwrm_req_drop(bp, req0); 8352 } 8353 8354 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) 8355 { 8356 struct hwrm_stat_ctx_alloc_output *resp; 8357 struct hwrm_stat_ctx_alloc_input *req; 8358 int rc, i; 8359 8360 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 8361 return 0; 8362 8363 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); 8364 if (rc) 8365 return rc; 8366 8367 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); 8368 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); 8369 8370 resp = hwrm_req_hold(bp, req); 8371 for (i = 0; i < bp->cp_nr_rings; i++) { 8372 struct bnxt_napi *bnapi = bp->bnapi[i]; 8373 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 8374 8375 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); 8376 8377 rc = hwrm_req_send(bp, req); 8378 if (rc) 8379 break; 8380 8381 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); 8382 8383 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; 8384 } 8385 hwrm_req_drop(bp, req); 8386 return rc; 8387 } 8388 8389 static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 8390 { 8391 struct hwrm_func_qcfg_output *resp; 8392 struct hwrm_func_qcfg_input *req; 8393 u16 flags; 8394 int rc; 8395 8396 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); 8397 if (rc) 8398 return rc; 8399 8400 req->fid = cpu_to_le16(0xffff); 8401 resp = hwrm_req_hold(bp, req); 8402 rc = hwrm_req_send(bp, req); 8403 if (rc) 8404 goto func_qcfg_exit; 8405 8406 flags = le16_to_cpu(resp->flags); 8407 #ifdef CONFIG_BNXT_SRIOV 8408 if (BNXT_VF(bp)) { 8409 struct bnxt_vf_info *vf = &bp->vf; 8410 8411 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 8412 if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF) 8413 vf->flags |= BNXT_VF_TRUST; 8414 else 8415 vf->flags &= ~BNXT_VF_TRUST; 8416 } else { 8417 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); 8418 } 8419 #endif 8420 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | 8421 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { 8422 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; 8423 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) 8424 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; 8425 } 8426 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) 8427 bp->flags |= BNXT_FLAG_MULTI_HOST; 8428 8429 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) 8430 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; 8431 8432 if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV) 8433 bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV; 8434 8435 switch (resp->port_partition_type) { 8436 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 8437 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2: 8438 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 8439 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 8440 bp->port_partition_type = resp->port_partition_type; 8441 break; 8442 } 8443 if (bp->hwrm_spec_code < 0x10707 || 8444 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) 8445 bp->br_mode = BRIDGE_MODE_VEB; 8446 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) 8447 bp->br_mode = BRIDGE_MODE_VEPA; 8448 else 8449 bp->br_mode = BRIDGE_MODE_UNDEF; 8450 8451 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); 8452 if (!bp->max_mtu) 8453 bp->max_mtu = BNXT_MAX_MTU; 8454 8455 if (bp->db_size) 8456 goto func_qcfg_exit; 8457 8458 bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; 8459 if (BNXT_CHIP_P5(bp)) { 8460 if (BNXT_PF(bp)) 8461 bp->db_offset = DB_PF_OFFSET_P5; 8462 else 8463 bp->db_offset = DB_VF_OFFSET_P5; 8464 } 8465 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * 8466 1024); 8467 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || 8468 bp->db_size <= bp->db_offset) 8469 bp->db_size = pci_resource_len(bp->pdev, 2); 8470 8471 func_qcfg_exit: 8472 hwrm_req_drop(bp, req); 8473 return rc; 8474 } 8475 8476 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, 8477 u8 init_val, u8 init_offset, 8478 bool init_mask_set) 8479 { 8480 ctxm->init_value = init_val; 8481 ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; 8482 if (init_mask_set) 8483 ctxm->init_offset = init_offset * 4; 8484 else 8485 ctxm->init_value = 0; 8486 } 8487 8488 static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) 8489 { 8490 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8491 u16 type; 8492 8493 for (type = 0; type < ctx_max; type++) { 8494 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8495 int n = 1; 8496 8497 if (!ctxm->max_entries || ctxm->pg_info) 8498 continue; 8499 8500 if (ctxm->instance_bmap) 8501 n = hweight32(ctxm->instance_bmap); 8502 ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); 8503 if (!ctxm->pg_info) 8504 return -ENOMEM; 8505 } 8506 return 0; 8507 } 8508 8509 static void bnxt_free_one_ctx_mem(struct bnxt *bp, 8510 struct bnxt_ctx_mem_type *ctxm, bool force); 8511 8512 #define BNXT_CTX_INIT_VALID(flags) \ 8513 (!!((flags) & \ 8514 FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) 8515 8516 static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) 8517 { 8518 struct hwrm_func_backing_store_qcaps_v2_output *resp; 8519 struct hwrm_func_backing_store_qcaps_v2_input *req; 8520 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8521 u16 type; 8522 int rc; 8523 8524 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); 8525 if (rc) 8526 return rc; 8527 8528 if (!ctx) { 8529 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8530 if (!ctx) 8531 return -ENOMEM; 8532 bp->ctx = ctx; 8533 } 8534 8535 resp = hwrm_req_hold(bp, req); 8536 8537 for (type = 0; type < BNXT_CTX_V2_MAX; ) { 8538 struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; 8539 u8 init_val, init_off, i; 8540 u32 max_entries; 8541 u16 entry_size; 8542 __le32 *p; 8543 u32 flags; 8544 8545 req->type = cpu_to_le16(type); 8546 rc = hwrm_req_send(bp, req); 8547 if (rc) 8548 goto ctx_done; 8549 flags = le32_to_cpu(resp->flags); 8550 type = le16_to_cpu(resp->next_valid_type); 8551 if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) { 8552 bnxt_free_one_ctx_mem(bp, ctxm, true); 8553 continue; 8554 } 8555 entry_size = le16_to_cpu(resp->entry_size); 8556 max_entries = le32_to_cpu(resp->max_num_entries); 8557 if (ctxm->mem_valid) { 8558 if (!(flags & BNXT_CTX_MEM_PERSIST) || 8559 ctxm->entry_size != entry_size || 8560 ctxm->max_entries != max_entries) 8561 bnxt_free_one_ctx_mem(bp, ctxm, true); 8562 else 8563 continue; 8564 } 8565 ctxm->type = le16_to_cpu(resp->type); 8566 ctxm->entry_size = entry_size; 8567 ctxm->flags = flags; 8568 ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); 8569 ctxm->entry_multiple = resp->entry_multiple; 8570 ctxm->max_entries = max_entries; 8571 ctxm->min_entries = le32_to_cpu(resp->min_num_entries); 8572 init_val = resp->ctx_init_value; 8573 init_off = resp->ctx_init_offset; 8574 bnxt_init_ctx_initializer(ctxm, init_val, init_off, 8575 BNXT_CTX_INIT_VALID(flags)); 8576 ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, 8577 BNXT_MAX_SPLIT_ENTRY); 8578 for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; 8579 i++, p++) 8580 ctxm->split[i] = le32_to_cpu(*p); 8581 } 8582 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); 8583 8584 ctx_done: 8585 hwrm_req_drop(bp, req); 8586 return rc; 8587 } 8588 8589 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) 8590 { 8591 struct hwrm_func_backing_store_qcaps_output *resp; 8592 struct hwrm_func_backing_store_qcaps_input *req; 8593 int rc; 8594 8595 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || 8596 (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED)) 8597 return 0; 8598 8599 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 8600 return bnxt_hwrm_func_backing_store_qcaps_v2(bp); 8601 8602 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); 8603 if (rc) 8604 return rc; 8605 8606 resp = hwrm_req_hold(bp, req); 8607 rc = hwrm_req_send_silent(bp, req); 8608 if (!rc) { 8609 struct bnxt_ctx_mem_type *ctxm; 8610 struct bnxt_ctx_mem_info *ctx; 8611 u8 init_val, init_idx = 0; 8612 u16 init_mask; 8613 8614 ctx = bp->ctx; 8615 if (!ctx) { 8616 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 8617 if (!ctx) { 8618 rc = -ENOMEM; 8619 goto ctx_err; 8620 } 8621 bp->ctx = ctx; 8622 } 8623 init_val = resp->ctx_kind_initializer; 8624 init_mask = le16_to_cpu(resp->ctx_init_mask); 8625 8626 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8627 ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); 8628 ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); 8629 ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); 8630 ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); 8631 ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); 8632 bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, 8633 (init_mask & (1 << init_idx++)) != 0); 8634 8635 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8636 ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); 8637 ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); 8638 ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); 8639 bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, 8640 (init_mask & (1 << init_idx++)) != 0); 8641 8642 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8643 ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); 8644 ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); 8645 ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); 8646 bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, 8647 (init_mask & (1 << init_idx++)) != 0); 8648 8649 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8650 ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries); 8651 ctxm->max_entries = ctxm->vnic_entries + 8652 le16_to_cpu(resp->vnic_max_ring_table_entries); 8653 ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); 8654 bnxt_init_ctx_initializer(ctxm, init_val, 8655 resp->vnic_init_offset, 8656 (init_mask & (1 << init_idx++)) != 0); 8657 8658 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8659 ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); 8660 ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); 8661 bnxt_init_ctx_initializer(ctxm, init_val, 8662 resp->stat_init_offset, 8663 (init_mask & (1 << init_idx++)) != 0); 8664 8665 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8666 ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); 8667 ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); 8668 ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); 8669 ctxm->entry_multiple = resp->tqm_entries_multiple; 8670 if (!ctxm->entry_multiple) 8671 ctxm->entry_multiple = 1; 8672 8673 memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); 8674 8675 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8676 ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); 8677 ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); 8678 ctxm->mrav_num_entries_units = 8679 le16_to_cpu(resp->mrav_num_entries_units); 8680 bnxt_init_ctx_initializer(ctxm, init_val, 8681 resp->mrav_init_offset, 8682 (init_mask & (1 << init_idx++)) != 0); 8683 8684 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8685 ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); 8686 ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); 8687 8688 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; 8689 if (!ctx->tqm_fp_rings_count) 8690 ctx->tqm_fp_rings_count = bp->max_q; 8691 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) 8692 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; 8693 8694 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 8695 memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); 8696 ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; 8697 8698 rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); 8699 } else { 8700 rc = 0; 8701 } 8702 ctx_err: 8703 hwrm_req_drop(bp, req); 8704 return rc; 8705 } 8706 8707 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, 8708 __le64 *pg_dir) 8709 { 8710 if (!rmem->nr_pages) 8711 return; 8712 8713 BNXT_SET_CTX_PAGE_ATTR(*pg_attr); 8714 if (rmem->depth >= 1) { 8715 if (rmem->depth == 2) 8716 *pg_attr |= 2; 8717 else 8718 *pg_attr |= 1; 8719 *pg_dir = cpu_to_le64(rmem->pg_tbl_map); 8720 } else { 8721 *pg_dir = cpu_to_le64(rmem->dma_arr[0]); 8722 } 8723 } 8724 8725 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ 8726 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ 8727 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ 8728 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ 8729 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ 8730 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) 8731 8732 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) 8733 { 8734 struct hwrm_func_backing_store_cfg_input *req; 8735 struct bnxt_ctx_mem_info *ctx = bp->ctx; 8736 struct bnxt_ctx_pg_info *ctx_pg; 8737 struct bnxt_ctx_mem_type *ctxm; 8738 void **__req = (void **)&req; 8739 u32 req_len = sizeof(*req); 8740 __le32 *num_entries; 8741 __le64 *pg_dir; 8742 u32 flags = 0; 8743 u8 *pg_attr; 8744 u32 ena; 8745 int rc; 8746 int i; 8747 8748 if (!ctx) 8749 return 0; 8750 8751 if (req_len > bp->hwrm_max_ext_req_len) 8752 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; 8753 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); 8754 if (rc) 8755 return rc; 8756 8757 req->enables = cpu_to_le32(enables); 8758 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { 8759 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 8760 ctx_pg = ctxm->pg_info; 8761 req->qp_num_entries = cpu_to_le32(ctx_pg->entries); 8762 req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); 8763 req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); 8764 req->qp_entry_size = cpu_to_le16(ctxm->entry_size); 8765 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8766 &req->qpc_pg_size_qpc_lvl, 8767 &req->qpc_page_dir); 8768 8769 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD) 8770 req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); 8771 } 8772 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { 8773 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 8774 ctx_pg = ctxm->pg_info; 8775 req->srq_num_entries = cpu_to_le32(ctx_pg->entries); 8776 req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); 8777 req->srq_entry_size = cpu_to_le16(ctxm->entry_size); 8778 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8779 &req->srq_pg_size_srq_lvl, 8780 &req->srq_page_dir); 8781 } 8782 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { 8783 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 8784 ctx_pg = ctxm->pg_info; 8785 req->cq_num_entries = cpu_to_le32(ctx_pg->entries); 8786 req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); 8787 req->cq_entry_size = cpu_to_le16(ctxm->entry_size); 8788 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8789 &req->cq_pg_size_cq_lvl, 8790 &req->cq_page_dir); 8791 } 8792 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { 8793 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 8794 ctx_pg = ctxm->pg_info; 8795 req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); 8796 req->vnic_num_ring_table_entries = 8797 cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); 8798 req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); 8799 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8800 &req->vnic_pg_size_vnic_lvl, 8801 &req->vnic_page_dir); 8802 } 8803 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { 8804 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 8805 ctx_pg = ctxm->pg_info; 8806 req->stat_num_entries = cpu_to_le32(ctxm->max_entries); 8807 req->stat_entry_size = cpu_to_le16(ctxm->entry_size); 8808 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8809 &req->stat_pg_size_stat_lvl, 8810 &req->stat_page_dir); 8811 } 8812 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { 8813 u32 units; 8814 8815 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 8816 ctx_pg = ctxm->pg_info; 8817 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); 8818 units = ctxm->mrav_num_entries_units; 8819 if (units) { 8820 u32 num_mr, num_ah = ctxm->mrav_av_entries; 8821 u32 entries; 8822 8823 num_mr = ctx_pg->entries - num_ah; 8824 entries = ((num_mr / units) << 16) | (num_ah / units); 8825 req->mrav_num_entries = cpu_to_le32(entries); 8826 flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; 8827 } 8828 req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); 8829 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8830 &req->mrav_pg_size_mrav_lvl, 8831 &req->mrav_page_dir); 8832 } 8833 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { 8834 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 8835 ctx_pg = ctxm->pg_info; 8836 req->tim_num_entries = cpu_to_le32(ctx_pg->entries); 8837 req->tim_entry_size = cpu_to_le16(ctxm->entry_size); 8838 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 8839 &req->tim_pg_size_tim_lvl, 8840 &req->tim_page_dir); 8841 } 8842 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 8843 for (i = 0, num_entries = &req->tqm_sp_num_entries, 8844 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, 8845 pg_dir = &req->tqm_sp_page_dir, 8846 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, 8847 ctx_pg = ctxm->pg_info; 8848 i < BNXT_MAX_TQM_RINGS; 8849 ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], 8850 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { 8851 if (!(enables & ena)) 8852 continue; 8853 8854 req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); 8855 *num_entries = cpu_to_le32(ctx_pg->entries); 8856 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); 8857 } 8858 req->flags = cpu_to_le32(flags); 8859 return hwrm_req_send(bp, req); 8860 } 8861 8862 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, 8863 struct bnxt_ctx_pg_info *ctx_pg) 8864 { 8865 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8866 8867 rmem->page_size = BNXT_PAGE_SIZE; 8868 rmem->pg_arr = ctx_pg->ctx_pg_arr; 8869 rmem->dma_arr = ctx_pg->ctx_dma_arr; 8870 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; 8871 if (rmem->depth >= 1) 8872 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; 8873 return bnxt_alloc_ring(bp, rmem); 8874 } 8875 8876 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, 8877 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, 8878 u8 depth, struct bnxt_ctx_mem_type *ctxm) 8879 { 8880 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8881 int rc; 8882 8883 if (!mem_size) 8884 return -EINVAL; 8885 8886 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8887 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { 8888 ctx_pg->nr_pages = 0; 8889 return -EINVAL; 8890 } 8891 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { 8892 int nr_tbls, i; 8893 8894 rmem->depth = 2; 8895 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), 8896 GFP_KERNEL); 8897 if (!ctx_pg->ctx_pg_tbl) 8898 return -ENOMEM; 8899 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); 8900 rmem->nr_pages = nr_tbls; 8901 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8902 if (rc) 8903 return rc; 8904 for (i = 0; i < nr_tbls; i++) { 8905 struct bnxt_ctx_pg_info *pg_tbl; 8906 8907 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); 8908 if (!pg_tbl) 8909 return -ENOMEM; 8910 ctx_pg->ctx_pg_tbl[i] = pg_tbl; 8911 rmem = &pg_tbl->ring_mem; 8912 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; 8913 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 8914 rmem->depth = 1; 8915 rmem->nr_pages = MAX_CTX_PAGES; 8916 rmem->ctx_mem = ctxm; 8917 if (i == (nr_tbls - 1)) { 8918 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; 8919 8920 if (rem) 8921 rmem->nr_pages = rem; 8922 } 8923 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 8924 if (rc) 8925 break; 8926 } 8927 } else { 8928 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 8929 if (rmem->nr_pages > 1 || depth) 8930 rmem->depth = 1; 8931 rmem->ctx_mem = ctxm; 8932 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); 8933 } 8934 return rc; 8935 } 8936 8937 static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp, 8938 struct bnxt_ctx_pg_info *ctx_pg, 8939 void *buf, size_t offset, size_t head, 8940 size_t tail) 8941 { 8942 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8943 size_t nr_pages = ctx_pg->nr_pages; 8944 int page_size = rmem->page_size; 8945 size_t len = 0, total_len = 0; 8946 u16 depth = rmem->depth; 8947 8948 tail %= nr_pages * page_size; 8949 do { 8950 if (depth > 1) { 8951 int i = head / (page_size * MAX_CTX_PAGES); 8952 struct bnxt_ctx_pg_info *pg_tbl; 8953 8954 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 8955 rmem = &pg_tbl->ring_mem; 8956 } 8957 len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail); 8958 head += len; 8959 offset += len; 8960 total_len += len; 8961 if (head >= nr_pages * page_size) 8962 head = 0; 8963 } while (head != tail); 8964 return total_len; 8965 } 8966 8967 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, 8968 struct bnxt_ctx_pg_info *ctx_pg) 8969 { 8970 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; 8971 8972 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || 8973 ctx_pg->ctx_pg_tbl) { 8974 int i, nr_tbls = rmem->nr_pages; 8975 8976 for (i = 0; i < nr_tbls; i++) { 8977 struct bnxt_ctx_pg_info *pg_tbl; 8978 struct bnxt_ring_mem_info *rmem2; 8979 8980 pg_tbl = ctx_pg->ctx_pg_tbl[i]; 8981 if (!pg_tbl) 8982 continue; 8983 rmem2 = &pg_tbl->ring_mem; 8984 bnxt_free_ring(bp, rmem2); 8985 ctx_pg->ctx_pg_arr[i] = NULL; 8986 kfree(pg_tbl); 8987 ctx_pg->ctx_pg_tbl[i] = NULL; 8988 } 8989 kfree(ctx_pg->ctx_pg_tbl); 8990 ctx_pg->ctx_pg_tbl = NULL; 8991 } 8992 bnxt_free_ring(bp, rmem); 8993 ctx_pg->nr_pages = 0; 8994 } 8995 8996 static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, 8997 struct bnxt_ctx_mem_type *ctxm, u32 entries, 8998 u8 pg_lvl) 8999 { 9000 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 9001 int i, rc = 0, n = 1; 9002 u32 mem_size; 9003 9004 if (!ctxm->entry_size || !ctx_pg) 9005 return -EINVAL; 9006 if (ctxm->instance_bmap) 9007 n = hweight32(ctxm->instance_bmap); 9008 if (ctxm->entry_multiple) 9009 entries = roundup(entries, ctxm->entry_multiple); 9010 entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); 9011 mem_size = entries * ctxm->entry_size; 9012 for (i = 0; i < n && !rc; i++) { 9013 ctx_pg[i].entries = entries; 9014 rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, 9015 ctxm->init_value ? ctxm : NULL); 9016 } 9017 if (!rc) 9018 ctxm->mem_valid = 1; 9019 return rc; 9020 } 9021 9022 static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, 9023 struct bnxt_ctx_mem_type *ctxm, 9024 bool last) 9025 { 9026 struct hwrm_func_backing_store_cfg_v2_input *req; 9027 u32 instance_bmap = ctxm->instance_bmap; 9028 int i, j, rc = 0, n = 1; 9029 __le32 *p; 9030 9031 if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) 9032 return 0; 9033 9034 if (instance_bmap) 9035 n = hweight32(ctxm->instance_bmap); 9036 else 9037 instance_bmap = 1; 9038 9039 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); 9040 if (rc) 9041 return rc; 9042 hwrm_req_hold(bp, req); 9043 req->type = cpu_to_le16(ctxm->type); 9044 req->entry_size = cpu_to_le16(ctxm->entry_size); 9045 if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) && 9046 bnxt_bs_trace_avail(bp, ctxm->type)) { 9047 struct bnxt_bs_trace_info *bs_trace; 9048 u32 enables; 9049 9050 enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET; 9051 req->enables = cpu_to_le32(enables); 9052 bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]]; 9053 req->next_bs_offset = cpu_to_le32(bs_trace->last_offset); 9054 } 9055 req->subtype_valid_cnt = ctxm->split_entry_cnt; 9056 for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) 9057 p[i] = cpu_to_le32(ctxm->split[i]); 9058 for (i = 0, j = 0; j < n && !rc; i++) { 9059 struct bnxt_ctx_pg_info *ctx_pg; 9060 9061 if (!(instance_bmap & (1 << i))) 9062 continue; 9063 req->instance = cpu_to_le16(i); 9064 ctx_pg = &ctxm->pg_info[j++]; 9065 if (!ctx_pg->entries) 9066 continue; 9067 req->num_entries = cpu_to_le32(ctx_pg->entries); 9068 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, 9069 &req->page_size_pbl_level, 9070 &req->page_dir); 9071 if (last && j == n) 9072 req->flags = 9073 cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); 9074 rc = hwrm_req_send(bp, req); 9075 } 9076 hwrm_req_drop(bp, req); 9077 return rc; 9078 } 9079 9080 static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) 9081 { 9082 struct bnxt_ctx_mem_info *ctx = bp->ctx; 9083 struct bnxt_ctx_mem_type *ctxm; 9084 u16 last_type = BNXT_CTX_INV; 9085 int rc = 0; 9086 u16 type; 9087 9088 for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) { 9089 ctxm = &ctx->ctx_arr[type]; 9090 if (!bnxt_bs_trace_avail(bp, type)) 9091 continue; 9092 if (!ctxm->mem_valid) { 9093 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, 9094 ctxm->max_entries, 1); 9095 if (rc) { 9096 netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n", 9097 type); 9098 continue; 9099 } 9100 bnxt_bs_trace_init(bp, ctxm); 9101 } 9102 last_type = type; 9103 } 9104 9105 if (last_type == BNXT_CTX_INV) { 9106 if (!ena) 9107 return 0; 9108 else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) 9109 last_type = BNXT_CTX_MAX - 1; 9110 else 9111 last_type = BNXT_CTX_L2_MAX - 1; 9112 } 9113 ctx->ctx_arr[last_type].last = 1; 9114 9115 for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { 9116 ctxm = &ctx->ctx_arr[type]; 9117 9118 if (!ctxm->mem_valid) 9119 continue; 9120 rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); 9121 if (rc) 9122 return rc; 9123 } 9124 return 0; 9125 } 9126 9127 /** 9128 * __bnxt_copy_ctx_mem - copy host context memory 9129 * @bp: The driver context 9130 * @ctxm: The pointer to the context memory type 9131 * @buf: The destination buffer or NULL to just obtain the length 9132 * @offset: The buffer offset to copy the data to 9133 * @head: The head offset of context memory to copy from 9134 * @tail: The tail offset (last byte + 1) of context memory to end the copy 9135 * 9136 * This function is called for debugging purposes to dump the host context 9137 * used by the chip. 9138 * 9139 * Return: Length of memory copied 9140 */ 9141 static size_t __bnxt_copy_ctx_mem(struct bnxt *bp, 9142 struct bnxt_ctx_mem_type *ctxm, void *buf, 9143 size_t offset, size_t head, size_t tail) 9144 { 9145 struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; 9146 size_t len = 0, total_len = 0; 9147 int i, n = 1; 9148 9149 if (!ctx_pg) 9150 return 0; 9151 9152 if (ctxm->instance_bmap) 9153 n = hweight32(ctxm->instance_bmap); 9154 for (i = 0; i < n; i++) { 9155 len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head, 9156 tail); 9157 offset += len; 9158 total_len += len; 9159 } 9160 return total_len; 9161 } 9162 9163 size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm, 9164 void *buf, size_t offset) 9165 { 9166 size_t tail = ctxm->max_entries * ctxm->entry_size; 9167 9168 return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail); 9169 } 9170 9171 static void bnxt_free_one_ctx_mem(struct bnxt *bp, 9172 struct bnxt_ctx_mem_type *ctxm, bool force) 9173 { 9174 struct bnxt_ctx_pg_info *ctx_pg; 9175 int i, n = 1; 9176 9177 ctxm->last = 0; 9178 9179 if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST)) 9180 return; 9181 9182 ctx_pg = ctxm->pg_info; 9183 if (ctx_pg) { 9184 if (ctxm->instance_bmap) 9185 n = hweight32(ctxm->instance_bmap); 9186 for (i = 0; i < n; i++) 9187 bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); 9188 9189 kfree(ctx_pg); 9190 ctxm->pg_info = NULL; 9191 ctxm->mem_valid = 0; 9192 } 9193 memset(ctxm, 0, sizeof(*ctxm)); 9194 } 9195 9196 void bnxt_free_ctx_mem(struct bnxt *bp, bool force) 9197 { 9198 struct bnxt_ctx_mem_info *ctx = bp->ctx; 9199 u16 type; 9200 9201 if (!ctx) 9202 return; 9203 9204 for (type = 0; type < BNXT_CTX_V2_MAX; type++) 9205 bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force); 9206 9207 ctx->flags &= ~BNXT_CTX_FLAG_INITED; 9208 if (force) { 9209 kfree(ctx); 9210 bp->ctx = NULL; 9211 } 9212 } 9213 9214 static int bnxt_alloc_ctx_mem(struct bnxt *bp) 9215 { 9216 struct bnxt_ctx_mem_type *ctxm; 9217 struct bnxt_ctx_mem_info *ctx; 9218 u32 l2_qps, qp1_qps, max_qps; 9219 u32 ena, entries_sp, entries; 9220 u32 srqs, max_srqs, min; 9221 u32 num_mr, num_ah; 9222 u32 extra_srqs = 0; 9223 u32 extra_qps = 0; 9224 u32 fast_qpmd_qps; 9225 u8 pg_lvl = 1; 9226 int i, rc; 9227 9228 rc = bnxt_hwrm_func_backing_store_qcaps(bp); 9229 if (rc) { 9230 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", 9231 rc); 9232 return rc; 9233 } 9234 ctx = bp->ctx; 9235 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) 9236 return 0; 9237 9238 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 9239 l2_qps = ctxm->qp_l2_entries; 9240 qp1_qps = ctxm->qp_qp1_entries; 9241 fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; 9242 max_qps = ctxm->max_entries; 9243 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 9244 srqs = ctxm->srq_l2_entries; 9245 max_srqs = ctxm->max_entries; 9246 ena = 0; 9247 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { 9248 pg_lvl = 2; 9249 if (BNXT_SW_RES_LMT(bp)) { 9250 extra_qps = max_qps - l2_qps - qp1_qps; 9251 extra_srqs = max_srqs - srqs; 9252 } else { 9253 extra_qps = min_t(u32, 65536, 9254 max_qps - l2_qps - qp1_qps); 9255 /* allocate extra qps if fw supports RoCE fast qp 9256 * destroy feature 9257 */ 9258 extra_qps += fast_qpmd_qps; 9259 extra_srqs = min_t(u32, 8192, max_srqs - srqs); 9260 } 9261 if (fast_qpmd_qps) 9262 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; 9263 } 9264 9265 ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; 9266 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 9267 pg_lvl); 9268 if (rc) 9269 return rc; 9270 9271 ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; 9272 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); 9273 if (rc) 9274 return rc; 9275 9276 ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; 9277 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + 9278 extra_qps * 2, pg_lvl); 9279 if (rc) 9280 return rc; 9281 9282 ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; 9283 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 9284 if (rc) 9285 return rc; 9286 9287 ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; 9288 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); 9289 if (rc) 9290 return rc; 9291 9292 if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) 9293 goto skip_rdma; 9294 9295 ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; 9296 if (BNXT_SW_RES_LMT(bp) && 9297 ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) { 9298 num_ah = ctxm->mrav_av_entries; 9299 num_mr = ctxm->max_entries - num_ah; 9300 } else { 9301 /* 128K extra is needed to accommodate static AH context 9302 * allocation by f/w. 9303 */ 9304 num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); 9305 num_ah = min_t(u32, num_mr, 1024 * 128); 9306 ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; 9307 if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) 9308 ctxm->mrav_av_entries = num_ah; 9309 } 9310 9311 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); 9312 if (rc) 9313 return rc; 9314 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; 9315 9316 ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; 9317 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); 9318 if (rc) 9319 return rc; 9320 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; 9321 9322 skip_rdma: 9323 ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; 9324 min = ctxm->min_entries; 9325 entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + 9326 2 * (extra_qps + qp1_qps) + min; 9327 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); 9328 if (rc) 9329 return rc; 9330 9331 ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; 9332 entries = l2_qps + 2 * (extra_qps + qp1_qps); 9333 rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); 9334 if (rc) 9335 return rc; 9336 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) 9337 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; 9338 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; 9339 9340 if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) 9341 rc = bnxt_backing_store_cfg_v2(bp, ena); 9342 else 9343 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); 9344 if (rc) { 9345 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", 9346 rc); 9347 return rc; 9348 } 9349 ctx->flags |= BNXT_CTX_FLAG_INITED; 9350 return 0; 9351 } 9352 9353 static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp) 9354 { 9355 struct hwrm_dbg_crashdump_medium_cfg_input *req; 9356 u16 page_attr; 9357 int rc; 9358 9359 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) 9360 return 0; 9361 9362 rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG); 9363 if (rc) 9364 return rc; 9365 9366 if (BNXT_PAGE_SIZE == 0x2000) 9367 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K; 9368 else if (BNXT_PAGE_SIZE == 0x10000) 9369 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K; 9370 else 9371 page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K; 9372 req->pg_size_lvl = cpu_to_le16(page_attr | 9373 bp->fw_crash_mem->ring_mem.depth); 9374 req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map); 9375 req->size = cpu_to_le32(bp->fw_crash_len); 9376 req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR); 9377 return hwrm_req_send(bp, req); 9378 } 9379 9380 static void bnxt_free_crash_dump_mem(struct bnxt *bp) 9381 { 9382 if (bp->fw_crash_mem) { 9383 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); 9384 kfree(bp->fw_crash_mem); 9385 bp->fw_crash_mem = NULL; 9386 } 9387 } 9388 9389 static int bnxt_alloc_crash_dump_mem(struct bnxt *bp) 9390 { 9391 u32 mem_size = 0; 9392 int rc; 9393 9394 if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) 9395 return 0; 9396 9397 rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size); 9398 if (rc) 9399 return rc; 9400 9401 mem_size = round_up(mem_size, 4); 9402 9403 /* keep and use the existing pages */ 9404 if (bp->fw_crash_mem && 9405 mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE) 9406 goto alloc_done; 9407 9408 if (bp->fw_crash_mem) 9409 bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); 9410 else 9411 bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem), 9412 GFP_KERNEL); 9413 if (!bp->fw_crash_mem) 9414 return -ENOMEM; 9415 9416 rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL); 9417 if (rc) { 9418 bnxt_free_crash_dump_mem(bp); 9419 return rc; 9420 } 9421 9422 alloc_done: 9423 bp->fw_crash_len = mem_size; 9424 return 0; 9425 } 9426 9427 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) 9428 { 9429 struct hwrm_func_resource_qcaps_output *resp; 9430 struct hwrm_func_resource_qcaps_input *req; 9431 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9432 int rc; 9433 9434 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); 9435 if (rc) 9436 return rc; 9437 9438 req->fid = cpu_to_le16(0xffff); 9439 resp = hwrm_req_hold(bp, req); 9440 rc = hwrm_req_send_silent(bp, req); 9441 if (rc) 9442 goto hwrm_func_resc_qcaps_exit; 9443 9444 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); 9445 if (!all) 9446 goto hwrm_func_resc_qcaps_exit; 9447 9448 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); 9449 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 9450 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); 9451 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 9452 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); 9453 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 9454 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); 9455 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 9456 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); 9457 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); 9458 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); 9459 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 9460 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); 9461 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 9462 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); 9463 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 9464 9465 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 9466 u16 max_msix = le16_to_cpu(resp->max_msix); 9467 9468 hw_resc->max_nqs = max_msix; 9469 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; 9470 } 9471 9472 if (BNXT_PF(bp)) { 9473 struct bnxt_pf_info *pf = &bp->pf; 9474 9475 pf->vf_resv_strategy = 9476 le16_to_cpu(resp->vf_reservation_strategy); 9477 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) 9478 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; 9479 } 9480 hwrm_func_resc_qcaps_exit: 9481 hwrm_req_drop(bp, req); 9482 return rc; 9483 } 9484 9485 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) 9486 { 9487 struct hwrm_port_mac_ptp_qcfg_output *resp; 9488 struct hwrm_port_mac_ptp_qcfg_input *req; 9489 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 9490 u8 flags; 9491 int rc; 9492 9493 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { 9494 rc = -ENODEV; 9495 goto no_ptp; 9496 } 9497 9498 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); 9499 if (rc) 9500 goto no_ptp; 9501 9502 req->port_id = cpu_to_le16(bp->pf.port_id); 9503 resp = hwrm_req_hold(bp, req); 9504 rc = hwrm_req_send(bp, req); 9505 if (rc) 9506 goto exit; 9507 9508 flags = resp->flags; 9509 if (BNXT_CHIP_P5_AND_MINUS(bp) && 9510 !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { 9511 rc = -ENODEV; 9512 goto exit; 9513 } 9514 if (!ptp) { 9515 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); 9516 if (!ptp) { 9517 rc = -ENOMEM; 9518 goto exit; 9519 } 9520 ptp->bp = bp; 9521 bp->ptp_cfg = ptp; 9522 } 9523 9524 if (flags & 9525 (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK | 9526 PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) { 9527 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); 9528 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); 9529 } else if (BNXT_CHIP_P5(bp)) { 9530 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; 9531 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; 9532 } else { 9533 rc = -ENODEV; 9534 goto exit; 9535 } 9536 ptp->rtc_configured = 9537 (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; 9538 rc = bnxt_ptp_init(bp); 9539 if (rc) 9540 netdev_warn(bp->dev, "PTP initialization failed.\n"); 9541 exit: 9542 hwrm_req_drop(bp, req); 9543 if (!rc) 9544 return 0; 9545 9546 no_ptp: 9547 bnxt_ptp_clear(bp); 9548 kfree(ptp); 9549 bp->ptp_cfg = NULL; 9550 return rc; 9551 } 9552 9553 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) 9554 { 9555 struct hwrm_func_qcaps_output *resp; 9556 struct hwrm_func_qcaps_input *req; 9557 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 9558 u32 flags, flags_ext, flags_ext2; 9559 int rc; 9560 9561 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); 9562 if (rc) 9563 return rc; 9564 9565 req->fid = cpu_to_le16(0xffff); 9566 resp = hwrm_req_hold(bp, req); 9567 rc = hwrm_req_send(bp, req); 9568 if (rc) 9569 goto hwrm_func_qcaps_exit; 9570 9571 flags = le32_to_cpu(resp->flags); 9572 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) 9573 bp->flags |= BNXT_FLAG_ROCEV1_CAP; 9574 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) 9575 bp->flags |= BNXT_FLAG_ROCEV2_CAP; 9576 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) 9577 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; 9578 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) 9579 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; 9580 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) 9581 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; 9582 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) 9583 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; 9584 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) 9585 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; 9586 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) 9587 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; 9588 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) 9589 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; 9590 9591 flags_ext = le32_to_cpu(resp->flags_ext); 9592 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) 9593 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; 9594 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) 9595 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; 9596 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) 9597 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; 9598 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) 9599 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; 9600 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) 9601 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; 9602 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED) 9603 bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2; 9604 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED)) 9605 bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP; 9606 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) 9607 bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; 9608 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP) 9609 bp->flags |= BNXT_FLAG_TX_COAL_CMPL; 9610 9611 flags_ext2 = le32_to_cpu(resp->flags_ext2); 9612 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) 9613 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; 9614 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) 9615 bp->flags |= BNXT_FLAG_UDP_GSO_CAP; 9616 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) 9617 bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; 9618 if (flags_ext2 & 9619 FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED) 9620 bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS; 9621 if (BNXT_PF(bp) && 9622 (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED)) 9623 bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED; 9624 9625 bp->tx_push_thresh = 0; 9626 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && 9627 BNXT_FW_MAJ(bp) > 217) 9628 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; 9629 9630 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); 9631 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); 9632 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); 9633 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); 9634 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); 9635 if (!hw_resc->max_hw_ring_grps) 9636 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; 9637 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); 9638 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); 9639 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); 9640 9641 hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records); 9642 hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records); 9643 hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); 9644 hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); 9645 hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); 9646 hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); 9647 9648 if (BNXT_PF(bp)) { 9649 struct bnxt_pf_info *pf = &bp->pf; 9650 9651 pf->fw_fid = le16_to_cpu(resp->fid); 9652 pf->port_id = le16_to_cpu(resp->port_id); 9653 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); 9654 pf->first_vf_id = le16_to_cpu(resp->first_vf_id); 9655 pf->max_vfs = le16_to_cpu(resp->max_vfs); 9656 bp->flags &= ~BNXT_FLAG_WOL_CAP; 9657 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) 9658 bp->flags |= BNXT_FLAG_WOL_CAP; 9659 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { 9660 bp->fw_cap |= BNXT_FW_CAP_PTP; 9661 } else { 9662 bnxt_ptp_clear(bp); 9663 kfree(bp->ptp_cfg); 9664 bp->ptp_cfg = NULL; 9665 } 9666 } else { 9667 #ifdef CONFIG_BNXT_SRIOV 9668 struct bnxt_vf_info *vf = &bp->vf; 9669 9670 vf->fw_fid = le16_to_cpu(resp->fid); 9671 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); 9672 #endif 9673 } 9674 bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs); 9675 9676 hwrm_func_qcaps_exit: 9677 hwrm_req_drop(bp, req); 9678 return rc; 9679 } 9680 9681 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) 9682 { 9683 struct hwrm_dbg_qcaps_output *resp; 9684 struct hwrm_dbg_qcaps_input *req; 9685 int rc; 9686 9687 bp->fw_dbg_cap = 0; 9688 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) 9689 return; 9690 9691 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); 9692 if (rc) 9693 return; 9694 9695 req->fid = cpu_to_le16(0xffff); 9696 resp = hwrm_req_hold(bp, req); 9697 rc = hwrm_req_send(bp, req); 9698 if (rc) 9699 goto hwrm_dbg_qcaps_exit; 9700 9701 bp->fw_dbg_cap = le32_to_cpu(resp->flags); 9702 9703 hwrm_dbg_qcaps_exit: 9704 hwrm_req_drop(bp, req); 9705 } 9706 9707 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); 9708 9709 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 9710 { 9711 int rc; 9712 9713 rc = __bnxt_hwrm_func_qcaps(bp); 9714 if (rc) 9715 return rc; 9716 9717 bnxt_hwrm_dbg_qcaps(bp); 9718 9719 rc = bnxt_hwrm_queue_qportcfg(bp); 9720 if (rc) { 9721 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc); 9722 return rc; 9723 } 9724 if (bp->hwrm_spec_code >= 0x10803) { 9725 rc = bnxt_alloc_ctx_mem(bp); 9726 if (rc) 9727 return rc; 9728 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 9729 if (!rc) 9730 bp->fw_cap |= BNXT_FW_CAP_NEW_RM; 9731 } 9732 return 0; 9733 } 9734 9735 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) 9736 { 9737 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; 9738 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; 9739 u32 flags; 9740 int rc; 9741 9742 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) 9743 return 0; 9744 9745 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); 9746 if (rc) 9747 return rc; 9748 9749 resp = hwrm_req_hold(bp, req); 9750 rc = hwrm_req_send(bp, req); 9751 if (rc) 9752 goto hwrm_cfa_adv_qcaps_exit; 9753 9754 flags = le32_to_cpu(resp->flags); 9755 if (flags & 9756 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) 9757 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; 9758 9759 if (flags & 9760 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) 9761 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; 9762 9763 if (flags & 9764 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) 9765 bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; 9766 9767 hwrm_cfa_adv_qcaps_exit: 9768 hwrm_req_drop(bp, req); 9769 return rc; 9770 } 9771 9772 static int __bnxt_alloc_fw_health(struct bnxt *bp) 9773 { 9774 if (bp->fw_health) 9775 return 0; 9776 9777 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); 9778 if (!bp->fw_health) 9779 return -ENOMEM; 9780 9781 mutex_init(&bp->fw_health->lock); 9782 return 0; 9783 } 9784 9785 static int bnxt_alloc_fw_health(struct bnxt *bp) 9786 { 9787 int rc; 9788 9789 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && 9790 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9791 return 0; 9792 9793 rc = __bnxt_alloc_fw_health(bp); 9794 if (rc) { 9795 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; 9796 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9797 return rc; 9798 } 9799 9800 return 0; 9801 } 9802 9803 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) 9804 { 9805 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + 9806 BNXT_GRCPF_REG_WINDOW_BASE_OUT + 9807 BNXT_FW_HEALTH_WIN_MAP_OFF); 9808 } 9809 9810 static void bnxt_inv_fw_health_reg(struct bnxt *bp) 9811 { 9812 struct bnxt_fw_health *fw_health = bp->fw_health; 9813 u32 reg_type; 9814 9815 if (!fw_health) 9816 return; 9817 9818 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); 9819 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9820 fw_health->status_reliable = false; 9821 9822 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); 9823 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) 9824 fw_health->resets_reliable = false; 9825 } 9826 9827 static void bnxt_try_map_fw_health_reg(struct bnxt *bp) 9828 { 9829 void __iomem *hs; 9830 u32 status_loc; 9831 u32 reg_type; 9832 u32 sig; 9833 9834 if (bp->fw_health) 9835 bp->fw_health->status_reliable = false; 9836 9837 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); 9838 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); 9839 9840 sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); 9841 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { 9842 if (!bp->chip_num) { 9843 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); 9844 bp->chip_num = readl(bp->bar0 + 9845 BNXT_FW_HEALTH_WIN_BASE + 9846 BNXT_GRC_REG_CHIP_NUM); 9847 } 9848 if (!BNXT_CHIP_P5_PLUS(bp)) 9849 return; 9850 9851 status_loc = BNXT_GRC_REG_STATUS_P5 | 9852 BNXT_FW_HEALTH_REG_TYPE_BAR0; 9853 } else { 9854 status_loc = readl(hs + offsetof(struct hcomm_status, 9855 fw_status_loc)); 9856 } 9857 9858 if (__bnxt_alloc_fw_health(bp)) { 9859 netdev_warn(bp->dev, "no memory for firmware status checks\n"); 9860 return; 9861 } 9862 9863 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; 9864 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); 9865 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { 9866 __bnxt_map_fw_health_reg(bp, status_loc); 9867 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = 9868 BNXT_FW_HEALTH_WIN_OFF(status_loc); 9869 } 9870 9871 bp->fw_health->status_reliable = true; 9872 } 9873 9874 static int bnxt_map_fw_health_regs(struct bnxt *bp) 9875 { 9876 struct bnxt_fw_health *fw_health = bp->fw_health; 9877 u32 reg_base = 0xffffffff; 9878 int i; 9879 9880 bp->fw_health->status_reliable = false; 9881 bp->fw_health->resets_reliable = false; 9882 /* Only pre-map the monitoring GRC registers using window 3 */ 9883 for (i = 0; i < 4; i++) { 9884 u32 reg = fw_health->regs[i]; 9885 9886 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) 9887 continue; 9888 if (reg_base == 0xffffffff) 9889 reg_base = reg & BNXT_GRC_BASE_MASK; 9890 if ((reg & BNXT_GRC_BASE_MASK) != reg_base) 9891 return -ERANGE; 9892 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); 9893 } 9894 bp->fw_health->status_reliable = true; 9895 bp->fw_health->resets_reliable = true; 9896 if (reg_base == 0xffffffff) 9897 return 0; 9898 9899 __bnxt_map_fw_health_reg(bp, reg_base); 9900 return 0; 9901 } 9902 9903 static void bnxt_remap_fw_health_regs(struct bnxt *bp) 9904 { 9905 if (!bp->fw_health) 9906 return; 9907 9908 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { 9909 bp->fw_health->status_reliable = true; 9910 bp->fw_health->resets_reliable = true; 9911 } else { 9912 bnxt_try_map_fw_health_reg(bp); 9913 } 9914 } 9915 9916 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) 9917 { 9918 struct bnxt_fw_health *fw_health = bp->fw_health; 9919 struct hwrm_error_recovery_qcfg_output *resp; 9920 struct hwrm_error_recovery_qcfg_input *req; 9921 int rc, i; 9922 9923 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) 9924 return 0; 9925 9926 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); 9927 if (rc) 9928 return rc; 9929 9930 resp = hwrm_req_hold(bp, req); 9931 rc = hwrm_req_send(bp, req); 9932 if (rc) 9933 goto err_recovery_out; 9934 fw_health->flags = le32_to_cpu(resp->flags); 9935 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && 9936 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { 9937 rc = -EINVAL; 9938 goto err_recovery_out; 9939 } 9940 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); 9941 fw_health->master_func_wait_dsecs = 9942 le32_to_cpu(resp->master_func_wait_period); 9943 fw_health->normal_func_wait_dsecs = 9944 le32_to_cpu(resp->normal_func_wait_period); 9945 fw_health->post_reset_wait_dsecs = 9946 le32_to_cpu(resp->master_func_wait_period_after_reset); 9947 fw_health->post_reset_max_wait_dsecs = 9948 le32_to_cpu(resp->max_bailout_time_after_reset); 9949 fw_health->regs[BNXT_FW_HEALTH_REG] = 9950 le32_to_cpu(resp->fw_health_status_reg); 9951 fw_health->regs[BNXT_FW_HEARTBEAT_REG] = 9952 le32_to_cpu(resp->fw_heartbeat_reg); 9953 fw_health->regs[BNXT_FW_RESET_CNT_REG] = 9954 le32_to_cpu(resp->fw_reset_cnt_reg); 9955 fw_health->regs[BNXT_FW_RESET_INPROG_REG] = 9956 le32_to_cpu(resp->reset_inprogress_reg); 9957 fw_health->fw_reset_inprog_reg_mask = 9958 le32_to_cpu(resp->reset_inprogress_reg_mask); 9959 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; 9960 if (fw_health->fw_reset_seq_cnt >= 16) { 9961 rc = -EINVAL; 9962 goto err_recovery_out; 9963 } 9964 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { 9965 fw_health->fw_reset_seq_regs[i] = 9966 le32_to_cpu(resp->reset_reg[i]); 9967 fw_health->fw_reset_seq_vals[i] = 9968 le32_to_cpu(resp->reset_reg_val[i]); 9969 fw_health->fw_reset_seq_delay_msec[i] = 9970 resp->delay_after_reset[i]; 9971 } 9972 err_recovery_out: 9973 hwrm_req_drop(bp, req); 9974 if (!rc) 9975 rc = bnxt_map_fw_health_regs(bp); 9976 if (rc) 9977 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; 9978 return rc; 9979 } 9980 9981 static int bnxt_hwrm_func_reset(struct bnxt *bp) 9982 { 9983 struct hwrm_func_reset_input *req; 9984 int rc; 9985 9986 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); 9987 if (rc) 9988 return rc; 9989 9990 req->enables = 0; 9991 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); 9992 return hwrm_req_send(bp, req); 9993 } 9994 9995 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) 9996 { 9997 struct hwrm_nvm_get_dev_info_output nvm_info; 9998 9999 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) 10000 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", 10001 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, 10002 nvm_info.nvm_cfg_ver_upd); 10003 } 10004 10005 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) 10006 { 10007 struct hwrm_queue_qportcfg_output *resp; 10008 struct hwrm_queue_qportcfg_input *req; 10009 u8 i, j, *qptr; 10010 bool no_rdma; 10011 int rc = 0; 10012 10013 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); 10014 if (rc) 10015 return rc; 10016 10017 resp = hwrm_req_hold(bp, req); 10018 rc = hwrm_req_send(bp, req); 10019 if (rc) 10020 goto qportcfg_exit; 10021 10022 if (!resp->max_configurable_queues) { 10023 rc = -EINVAL; 10024 goto qportcfg_exit; 10025 } 10026 bp->max_tc = resp->max_configurable_queues; 10027 bp->max_lltc = resp->max_configurable_lossless_queues; 10028 if (bp->max_tc > BNXT_MAX_QUEUE) 10029 bp->max_tc = BNXT_MAX_QUEUE; 10030 10031 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); 10032 qptr = &resp->queue_id0; 10033 for (i = 0, j = 0; i < bp->max_tc; i++) { 10034 bp->q_info[j].queue_id = *qptr; 10035 bp->q_ids[i] = *qptr++; 10036 bp->q_info[j].queue_profile = *qptr++; 10037 bp->tc_to_qidx[j] = j; 10038 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) || 10039 (no_rdma && BNXT_PF(bp))) 10040 j++; 10041 } 10042 bp->max_q = bp->max_tc; 10043 bp->max_tc = max_t(u8, j, 1); 10044 10045 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) 10046 bp->max_tc = 1; 10047 10048 if (bp->max_lltc > bp->max_tc) 10049 bp->max_lltc = bp->max_tc; 10050 10051 qportcfg_exit: 10052 hwrm_req_drop(bp, req); 10053 return rc; 10054 } 10055 10056 static int bnxt_hwrm_poll(struct bnxt *bp) 10057 { 10058 struct hwrm_ver_get_input *req; 10059 int rc; 10060 10061 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 10062 if (rc) 10063 return rc; 10064 10065 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 10066 req->hwrm_intf_min = HWRM_VERSION_MINOR; 10067 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 10068 10069 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); 10070 rc = hwrm_req_send(bp, req); 10071 return rc; 10072 } 10073 10074 static int bnxt_hwrm_ver_get(struct bnxt *bp) 10075 { 10076 struct hwrm_ver_get_output *resp; 10077 struct hwrm_ver_get_input *req; 10078 u16 fw_maj, fw_min, fw_bld, fw_rsv; 10079 u32 dev_caps_cfg, hwrm_ver; 10080 int rc, len; 10081 10082 rc = hwrm_req_init(bp, req, HWRM_VER_GET); 10083 if (rc) 10084 return rc; 10085 10086 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 10087 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; 10088 req->hwrm_intf_maj = HWRM_VERSION_MAJOR; 10089 req->hwrm_intf_min = HWRM_VERSION_MINOR; 10090 req->hwrm_intf_upd = HWRM_VERSION_UPDATE; 10091 10092 resp = hwrm_req_hold(bp, req); 10093 rc = hwrm_req_send(bp, req); 10094 if (rc) 10095 goto hwrm_ver_get_exit; 10096 10097 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); 10098 10099 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | 10100 resp->hwrm_intf_min_8b << 8 | 10101 resp->hwrm_intf_upd_8b; 10102 if (resp->hwrm_intf_maj_8b < 1) { 10103 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", 10104 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 10105 resp->hwrm_intf_upd_8b); 10106 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); 10107 } 10108 10109 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | 10110 HWRM_VERSION_UPDATE; 10111 10112 if (bp->hwrm_spec_code > hwrm_ver) 10113 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 10114 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, 10115 HWRM_VERSION_UPDATE); 10116 else 10117 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", 10118 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, 10119 resp->hwrm_intf_upd_8b); 10120 10121 fw_maj = le16_to_cpu(resp->hwrm_fw_major); 10122 if (bp->hwrm_spec_code > 0x10803 && fw_maj) { 10123 fw_min = le16_to_cpu(resp->hwrm_fw_minor); 10124 fw_bld = le16_to_cpu(resp->hwrm_fw_build); 10125 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); 10126 len = FW_VER_STR_LEN; 10127 } else { 10128 fw_maj = resp->hwrm_fw_maj_8b; 10129 fw_min = resp->hwrm_fw_min_8b; 10130 fw_bld = resp->hwrm_fw_bld_8b; 10131 fw_rsv = resp->hwrm_fw_rsvd_8b; 10132 len = BC_HWRM_STR_LEN; 10133 } 10134 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); 10135 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, 10136 fw_rsv); 10137 10138 if (strlen(resp->active_pkg_name)) { 10139 int fw_ver_len = strlen(bp->fw_ver_str); 10140 10141 snprintf(bp->fw_ver_str + fw_ver_len, 10142 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", 10143 resp->active_pkg_name); 10144 bp->fw_cap |= BNXT_FW_CAP_PKG_VER; 10145 } 10146 10147 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); 10148 if (!bp->hwrm_cmd_timeout) 10149 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 10150 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; 10151 if (!bp->hwrm_cmd_max_timeout) 10152 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; 10153 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) 10154 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", 10155 bp->hwrm_cmd_max_timeout / 1000); 10156 10157 if (resp->hwrm_intf_maj_8b >= 1) { 10158 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 10159 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); 10160 } 10161 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) 10162 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; 10163 10164 bp->chip_num = le16_to_cpu(resp->chip_num); 10165 bp->chip_rev = resp->chip_rev; 10166 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && 10167 !resp->chip_metal) 10168 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; 10169 10170 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); 10171 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && 10172 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) 10173 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; 10174 10175 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) 10176 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; 10177 10178 if (dev_caps_cfg & 10179 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) 10180 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; 10181 10182 if (dev_caps_cfg & 10183 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) 10184 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; 10185 10186 if (dev_caps_cfg & 10187 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) 10188 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; 10189 10190 hwrm_ver_get_exit: 10191 hwrm_req_drop(bp, req); 10192 return rc; 10193 } 10194 10195 int bnxt_hwrm_fw_set_time(struct bnxt *bp) 10196 { 10197 struct hwrm_fw_set_time_input *req; 10198 struct tm tm; 10199 time64_t now = ktime_get_real_seconds(); 10200 int rc; 10201 10202 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || 10203 bp->hwrm_spec_code < 0x10400) 10204 return -EOPNOTSUPP; 10205 10206 time64_to_tm(now, 0, &tm); 10207 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); 10208 if (rc) 10209 return rc; 10210 10211 req->year = cpu_to_le16(1900 + tm.tm_year); 10212 req->month = 1 + tm.tm_mon; 10213 req->day = tm.tm_mday; 10214 req->hour = tm.tm_hour; 10215 req->minute = tm.tm_min; 10216 req->second = tm.tm_sec; 10217 return hwrm_req_send(bp, req); 10218 } 10219 10220 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) 10221 { 10222 u64 sw_tmp; 10223 10224 hw &= mask; 10225 sw_tmp = (*sw & ~mask) | hw; 10226 if (hw < (*sw & mask)) 10227 sw_tmp += mask + 1; 10228 WRITE_ONCE(*sw, sw_tmp); 10229 } 10230 10231 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, 10232 int count, bool ignore_zero) 10233 { 10234 int i; 10235 10236 for (i = 0; i < count; i++) { 10237 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); 10238 10239 if (ignore_zero && !hw) 10240 continue; 10241 10242 if (masks[i] == -1ULL) 10243 sw_stats[i] = hw; 10244 else 10245 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); 10246 } 10247 } 10248 10249 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) 10250 { 10251 if (!stats->hw_stats) 10252 return; 10253 10254 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 10255 stats->hw_masks, stats->len / 8, false); 10256 } 10257 10258 static void bnxt_accumulate_all_stats(struct bnxt *bp) 10259 { 10260 struct bnxt_stats_mem *ring0_stats; 10261 bool ignore_zero = false; 10262 int i; 10263 10264 /* Chip bug. Counter intermittently becomes 0. */ 10265 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10266 ignore_zero = true; 10267 10268 for (i = 0; i < bp->cp_nr_rings; i++) { 10269 struct bnxt_napi *bnapi = bp->bnapi[i]; 10270 struct bnxt_cp_ring_info *cpr; 10271 struct bnxt_stats_mem *stats; 10272 10273 cpr = &bnapi->cp_ring; 10274 stats = &cpr->stats; 10275 if (!i) 10276 ring0_stats = stats; 10277 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, 10278 ring0_stats->hw_masks, 10279 ring0_stats->len / 8, ignore_zero); 10280 } 10281 if (bp->flags & BNXT_FLAG_PORT_STATS) { 10282 struct bnxt_stats_mem *stats = &bp->port_stats; 10283 __le64 *hw_stats = stats->hw_stats; 10284 u64 *sw_stats = stats->sw_stats; 10285 u64 *masks = stats->hw_masks; 10286 int cnt; 10287 10288 cnt = sizeof(struct rx_port_stats) / 8; 10289 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 10290 10291 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10292 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10293 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 10294 cnt = sizeof(struct tx_port_stats) / 8; 10295 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); 10296 } 10297 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { 10298 bnxt_accumulate_stats(&bp->rx_port_stats_ext); 10299 bnxt_accumulate_stats(&bp->tx_port_stats_ext); 10300 } 10301 } 10302 10303 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) 10304 { 10305 struct hwrm_port_qstats_input *req; 10306 struct bnxt_pf_info *pf = &bp->pf; 10307 int rc; 10308 10309 if (!(bp->flags & BNXT_FLAG_PORT_STATS)) 10310 return 0; 10311 10312 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 10313 return -EOPNOTSUPP; 10314 10315 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); 10316 if (rc) 10317 return rc; 10318 10319 req->flags = flags; 10320 req->port_id = cpu_to_le16(pf->port_id); 10321 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + 10322 BNXT_TX_PORT_STATS_BYTE_OFFSET); 10323 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); 10324 return hwrm_req_send(bp, req); 10325 } 10326 10327 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) 10328 { 10329 struct hwrm_queue_pri2cos_qcfg_output *resp_qc; 10330 struct hwrm_queue_pri2cos_qcfg_input *req_qc; 10331 struct hwrm_port_qstats_ext_output *resp_qs; 10332 struct hwrm_port_qstats_ext_input *req_qs; 10333 struct bnxt_pf_info *pf = &bp->pf; 10334 u32 tx_stat_size; 10335 int rc; 10336 10337 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) 10338 return 0; 10339 10340 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) 10341 return -EOPNOTSUPP; 10342 10343 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); 10344 if (rc) 10345 return rc; 10346 10347 req_qs->flags = flags; 10348 req_qs->port_id = cpu_to_le16(pf->port_id); 10349 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); 10350 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); 10351 tx_stat_size = bp->tx_port_stats_ext.hw_stats ? 10352 sizeof(struct tx_port_stats_ext) : 0; 10353 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); 10354 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); 10355 resp_qs = hwrm_req_hold(bp, req_qs); 10356 rc = hwrm_req_send(bp, req_qs); 10357 if (!rc) { 10358 bp->fw_rx_stats_ext_size = 10359 le16_to_cpu(resp_qs->rx_stat_size) / 8; 10360 if (BNXT_FW_MAJ(bp) < 220 && 10361 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) 10362 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; 10363 10364 bp->fw_tx_stats_ext_size = tx_stat_size ? 10365 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; 10366 } else { 10367 bp->fw_rx_stats_ext_size = 0; 10368 bp->fw_tx_stats_ext_size = 0; 10369 } 10370 hwrm_req_drop(bp, req_qs); 10371 10372 if (flags) 10373 return rc; 10374 10375 if (bp->fw_tx_stats_ext_size <= 10376 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { 10377 bp->pri2cos_valid = 0; 10378 return rc; 10379 } 10380 10381 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); 10382 if (rc) 10383 return rc; 10384 10385 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); 10386 10387 resp_qc = hwrm_req_hold(bp, req_qc); 10388 rc = hwrm_req_send(bp, req_qc); 10389 if (!rc) { 10390 u8 *pri2cos; 10391 int i, j; 10392 10393 pri2cos = &resp_qc->pri0_cos_queue_id; 10394 for (i = 0; i < 8; i++) { 10395 u8 queue_id = pri2cos[i]; 10396 u8 queue_idx; 10397 10398 /* Per port queue IDs start from 0, 10, 20, etc */ 10399 queue_idx = queue_id % 10; 10400 if (queue_idx > BNXT_MAX_QUEUE) { 10401 bp->pri2cos_valid = false; 10402 hwrm_req_drop(bp, req_qc); 10403 return rc; 10404 } 10405 for (j = 0; j < bp->max_q; j++) { 10406 if (bp->q_ids[j] == queue_id) 10407 bp->pri2cos_idx[i] = queue_idx; 10408 } 10409 } 10410 bp->pri2cos_valid = true; 10411 } 10412 hwrm_req_drop(bp, req_qc); 10413 10414 return rc; 10415 } 10416 10417 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) 10418 { 10419 bnxt_hwrm_tunnel_dst_port_free(bp, 10420 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 10421 bnxt_hwrm_tunnel_dst_port_free(bp, 10422 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); 10423 } 10424 10425 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) 10426 { 10427 int rc, i; 10428 u32 tpa_flags = 0; 10429 10430 if (set_tpa) 10431 tpa_flags = bp->flags & BNXT_FLAG_TPA; 10432 else if (BNXT_NO_FW_ACCESS(bp)) 10433 return 0; 10434 for (i = 0; i < bp->nr_vnics; i++) { 10435 rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); 10436 if (rc) { 10437 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", 10438 i, rc); 10439 return rc; 10440 } 10441 } 10442 return 0; 10443 } 10444 10445 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) 10446 { 10447 int i; 10448 10449 for (i = 0; i < bp->nr_vnics; i++) 10450 bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); 10451 } 10452 10453 static void bnxt_clear_vnic(struct bnxt *bp) 10454 { 10455 if (!bp->vnic_info) 10456 return; 10457 10458 bnxt_hwrm_clear_vnic_filter(bp); 10459 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { 10460 /* clear all RSS setting before free vnic ctx */ 10461 bnxt_hwrm_clear_vnic_rss(bp); 10462 bnxt_hwrm_vnic_ctx_free(bp); 10463 } 10464 /* before free the vnic, undo the vnic tpa settings */ 10465 if (bp->flags & BNXT_FLAG_TPA) 10466 bnxt_set_tpa(bp, false); 10467 bnxt_hwrm_vnic_free(bp); 10468 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10469 bnxt_hwrm_vnic_ctx_free(bp); 10470 } 10471 10472 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 10473 bool irq_re_init) 10474 { 10475 bnxt_clear_vnic(bp); 10476 bnxt_hwrm_ring_free(bp, close_path); 10477 bnxt_hwrm_ring_grp_free(bp); 10478 if (irq_re_init) { 10479 bnxt_hwrm_stat_ctx_free(bp); 10480 bnxt_hwrm_free_tunnel_ports(bp); 10481 } 10482 } 10483 10484 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) 10485 { 10486 struct hwrm_func_cfg_input *req; 10487 u8 evb_mode; 10488 int rc; 10489 10490 if (br_mode == BRIDGE_MODE_VEB) 10491 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; 10492 else if (br_mode == BRIDGE_MODE_VEPA) 10493 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; 10494 else 10495 return -EINVAL; 10496 10497 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 10498 if (rc) 10499 return rc; 10500 10501 req->fid = cpu_to_le16(0xffff); 10502 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); 10503 req->evb_mode = evb_mode; 10504 return hwrm_req_send(bp, req); 10505 } 10506 10507 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) 10508 { 10509 struct hwrm_func_cfg_input *req; 10510 int rc; 10511 10512 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) 10513 return 0; 10514 10515 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 10516 if (rc) 10517 return rc; 10518 10519 req->fid = cpu_to_le16(0xffff); 10520 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); 10521 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; 10522 if (size == 128) 10523 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; 10524 10525 return hwrm_req_send(bp, req); 10526 } 10527 10528 static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10529 { 10530 int rc; 10531 10532 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) 10533 goto skip_rss_ctx; 10534 10535 /* allocate context for vnic */ 10536 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); 10537 if (rc) { 10538 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 10539 vnic->vnic_id, rc); 10540 goto vnic_setup_err; 10541 } 10542 bp->rsscos_nr_ctxs++; 10543 10544 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10545 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); 10546 if (rc) { 10547 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", 10548 vnic->vnic_id, rc); 10549 goto vnic_setup_err; 10550 } 10551 bp->rsscos_nr_ctxs++; 10552 } 10553 10554 skip_rss_ctx: 10555 /* configure default vnic, ring grp */ 10556 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10557 if (rc) { 10558 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 10559 vnic->vnic_id, rc); 10560 goto vnic_setup_err; 10561 } 10562 10563 /* Enable RSS hashing on vnic */ 10564 rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); 10565 if (rc) { 10566 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", 10567 vnic->vnic_id, rc); 10568 goto vnic_setup_err; 10569 } 10570 10571 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 10572 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 10573 if (rc) { 10574 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 10575 vnic->vnic_id, rc); 10576 } 10577 } 10578 10579 vnic_setup_err: 10580 return rc; 10581 } 10582 10583 int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic, 10584 u8 valid) 10585 { 10586 struct hwrm_vnic_update_input *req; 10587 int rc; 10588 10589 rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE); 10590 if (rc) 10591 return rc; 10592 10593 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); 10594 10595 if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID) 10596 req->mru = cpu_to_le16(vnic->mru); 10597 10598 req->enables = cpu_to_le32(valid); 10599 10600 return hwrm_req_send(bp, req); 10601 } 10602 10603 int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10604 { 10605 int rc; 10606 10607 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); 10608 if (rc) { 10609 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 10610 vnic->vnic_id, rc); 10611 return rc; 10612 } 10613 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10614 if (rc) 10615 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", 10616 vnic->vnic_id, rc); 10617 return rc; 10618 } 10619 10620 int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10621 { 10622 int rc, i, nr_ctxs; 10623 10624 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); 10625 for (i = 0; i < nr_ctxs; i++) { 10626 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); 10627 if (rc) { 10628 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", 10629 vnic->vnic_id, i, rc); 10630 break; 10631 } 10632 bp->rsscos_nr_ctxs++; 10633 } 10634 if (i < nr_ctxs) 10635 return -ENOMEM; 10636 10637 rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); 10638 if (rc) 10639 return rc; 10640 10641 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 10642 rc = bnxt_hwrm_vnic_set_hds(bp, vnic); 10643 if (rc) { 10644 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", 10645 vnic->vnic_id, rc); 10646 } 10647 } 10648 return rc; 10649 } 10650 10651 static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) 10652 { 10653 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10654 return __bnxt_setup_vnic_p5(bp, vnic); 10655 else 10656 return __bnxt_setup_vnic(bp, vnic); 10657 } 10658 10659 static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, 10660 struct bnxt_vnic_info *vnic, 10661 u16 start_rx_ring_idx, int rx_rings) 10662 { 10663 int rc; 10664 10665 rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); 10666 if (rc) { 10667 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", 10668 vnic->vnic_id, rc); 10669 return rc; 10670 } 10671 return bnxt_setup_vnic(bp, vnic); 10672 } 10673 10674 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) 10675 { 10676 struct bnxt_vnic_info *vnic; 10677 int i, rc = 0; 10678 10679 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { 10680 vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; 10681 return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); 10682 } 10683 10684 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 10685 return 0; 10686 10687 for (i = 0; i < bp->rx_nr_rings; i++) { 10688 u16 vnic_id = i + 1; 10689 u16 ring_id = i; 10690 10691 if (vnic_id >= bp->nr_vnics) 10692 break; 10693 10694 vnic = &bp->vnic_info[vnic_id]; 10695 vnic->flags |= BNXT_VNIC_RFS_FLAG; 10696 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 10697 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; 10698 if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) 10699 break; 10700 } 10701 return rc; 10702 } 10703 10704 void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, 10705 bool all) 10706 { 10707 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10708 struct bnxt_filter_base *usr_fltr, *tmp; 10709 struct bnxt_ntuple_filter *ntp_fltr; 10710 int i; 10711 10712 if (netif_running(bp->dev)) { 10713 bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); 10714 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { 10715 if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) 10716 bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); 10717 } 10718 } 10719 if (!all) 10720 return; 10721 10722 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { 10723 if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) && 10724 usr_fltr->fw_vnic_id == rss_ctx->index) { 10725 ntp_fltr = container_of(usr_fltr, 10726 struct bnxt_ntuple_filter, 10727 base); 10728 bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); 10729 bnxt_del_ntp_filter(bp, ntp_fltr); 10730 bnxt_del_one_usr_fltr(bp, usr_fltr); 10731 } 10732 } 10733 10734 if (vnic->rss_table) 10735 dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, 10736 vnic->rss_table, 10737 vnic->rss_table_dma_addr); 10738 bp->num_rss_ctx--; 10739 } 10740 10741 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) 10742 { 10743 bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); 10744 struct ethtool_rxfh_context *ctx; 10745 unsigned long context; 10746 10747 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { 10748 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); 10749 struct bnxt_vnic_info *vnic = &rss_ctx->vnic; 10750 10751 if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || 10752 bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || 10753 __bnxt_setup_vnic_p5(bp, vnic)) { 10754 netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", 10755 rss_ctx->index); 10756 bnxt_del_one_rss_ctx(bp, rss_ctx, true); 10757 ethtool_rxfh_context_lost(bp->dev, rss_ctx->index); 10758 } 10759 } 10760 } 10761 10762 static void bnxt_clear_rss_ctxs(struct bnxt *bp) 10763 { 10764 struct ethtool_rxfh_context *ctx; 10765 unsigned long context; 10766 10767 xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { 10768 struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); 10769 10770 bnxt_del_one_rss_ctx(bp, rss_ctx, false); 10771 } 10772 } 10773 10774 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ 10775 static bool bnxt_promisc_ok(struct bnxt *bp) 10776 { 10777 #ifdef CONFIG_BNXT_SRIOV 10778 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) 10779 return false; 10780 #endif 10781 return true; 10782 } 10783 10784 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) 10785 { 10786 struct bnxt_vnic_info *vnic = &bp->vnic_info[1]; 10787 unsigned int rc = 0; 10788 10789 rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1); 10790 if (rc) { 10791 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10792 rc); 10793 return rc; 10794 } 10795 10796 rc = bnxt_hwrm_vnic_cfg(bp, vnic); 10797 if (rc) { 10798 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", 10799 rc); 10800 return rc; 10801 } 10802 return rc; 10803 } 10804 10805 static int bnxt_cfg_rx_mode(struct bnxt *); 10806 static bool bnxt_mc_list_updated(struct bnxt *, u32 *); 10807 10808 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) 10809 { 10810 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 10811 int rc = 0; 10812 unsigned int rx_nr_rings = bp->rx_nr_rings; 10813 10814 if (irq_re_init) { 10815 rc = bnxt_hwrm_stat_ctx_alloc(bp); 10816 if (rc) { 10817 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", 10818 rc); 10819 goto err_out; 10820 } 10821 } 10822 10823 rc = bnxt_hwrm_ring_alloc(bp); 10824 if (rc) { 10825 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); 10826 goto err_out; 10827 } 10828 10829 rc = bnxt_hwrm_ring_grp_alloc(bp); 10830 if (rc) { 10831 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); 10832 goto err_out; 10833 } 10834 10835 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 10836 rx_nr_rings--; 10837 10838 /* default vnic 0 */ 10839 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); 10840 if (rc) { 10841 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); 10842 goto err_out; 10843 } 10844 10845 if (BNXT_VF(bp)) 10846 bnxt_hwrm_func_qcfg(bp); 10847 10848 rc = bnxt_setup_vnic(bp, vnic); 10849 if (rc) 10850 goto err_out; 10851 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 10852 bnxt_hwrm_update_rss_hash_cfg(bp); 10853 10854 if (bp->flags & BNXT_FLAG_RFS) { 10855 rc = bnxt_alloc_rfs_vnics(bp); 10856 if (rc) 10857 goto err_out; 10858 } 10859 10860 if (bp->flags & BNXT_FLAG_TPA) { 10861 rc = bnxt_set_tpa(bp, true); 10862 if (rc) 10863 goto err_out; 10864 } 10865 10866 if (BNXT_VF(bp)) 10867 bnxt_update_vf_mac(bp); 10868 10869 /* Filter for default vnic 0 */ 10870 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); 10871 if (rc) { 10872 if (BNXT_VF(bp) && rc == -ENODEV) 10873 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); 10874 else 10875 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 10876 goto err_out; 10877 } 10878 vnic->uc_filter_count = 1; 10879 10880 vnic->rx_mask = 0; 10881 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) 10882 goto skip_rx_mask; 10883 10884 if (bp->dev->flags & IFF_BROADCAST) 10885 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 10886 10887 if (bp->dev->flags & IFF_PROMISC) 10888 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 10889 10890 if (bp->dev->flags & IFF_ALLMULTI) { 10891 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 10892 vnic->mc_list_count = 0; 10893 } else if (bp->dev->flags & IFF_MULTICAST) { 10894 u32 mask = 0; 10895 10896 bnxt_mc_list_updated(bp, &mask); 10897 vnic->rx_mask |= mask; 10898 } 10899 10900 rc = bnxt_cfg_rx_mode(bp); 10901 if (rc) 10902 goto err_out; 10903 10904 skip_rx_mask: 10905 rc = bnxt_hwrm_set_coal(bp); 10906 if (rc) 10907 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 10908 rc); 10909 10910 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 10911 rc = bnxt_setup_nitroa0_vnic(bp); 10912 if (rc) 10913 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", 10914 rc); 10915 } 10916 10917 if (BNXT_VF(bp)) { 10918 bnxt_hwrm_func_qcfg(bp); 10919 netdev_update_features(bp->dev); 10920 } 10921 10922 return 0; 10923 10924 err_out: 10925 bnxt_hwrm_resource_free(bp, 0, true); 10926 10927 return rc; 10928 } 10929 10930 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) 10931 { 10932 bnxt_hwrm_resource_free(bp, 1, irq_re_init); 10933 return 0; 10934 } 10935 10936 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) 10937 { 10938 bnxt_init_cp_rings(bp); 10939 bnxt_init_rx_rings(bp); 10940 bnxt_init_tx_rings(bp); 10941 bnxt_init_ring_grps(bp, irq_re_init); 10942 bnxt_init_vnics(bp); 10943 10944 return bnxt_init_chip(bp, irq_re_init); 10945 } 10946 10947 static int bnxt_set_real_num_queues(struct bnxt *bp) 10948 { 10949 int rc; 10950 struct net_device *dev = bp->dev; 10951 10952 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - 10953 bp->tx_nr_rings_xdp); 10954 if (rc) 10955 return rc; 10956 10957 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); 10958 if (rc) 10959 return rc; 10960 10961 #ifdef CONFIG_RFS_ACCEL 10962 if (bp->flags & BNXT_FLAG_RFS) 10963 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); 10964 #endif 10965 10966 return rc; 10967 } 10968 10969 static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 10970 bool shared) 10971 { 10972 int _rx = *rx, _tx = *tx; 10973 10974 if (shared) { 10975 *rx = min_t(int, _rx, max); 10976 *tx = min_t(int, _tx, max); 10977 } else { 10978 if (max < 2) 10979 return -ENOMEM; 10980 10981 while (_rx + _tx > max) { 10982 if (_rx > _tx && _rx > 1) 10983 _rx--; 10984 else if (_tx > 1) 10985 _tx--; 10986 } 10987 *rx = _rx; 10988 *tx = _tx; 10989 } 10990 return 0; 10991 } 10992 10993 static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) 10994 { 10995 return (tx - tx_xdp) / tx_sets + tx_xdp; 10996 } 10997 10998 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 10999 { 11000 int tcs = bp->num_tc; 11001 11002 if (!tcs) 11003 tcs = 1; 11004 return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); 11005 } 11006 11007 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 11008 { 11009 int tcs = bp->num_tc; 11010 11011 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 11012 bp->tx_nr_rings_xdp; 11013 } 11014 11015 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, 11016 bool sh) 11017 { 11018 int tx_cp = bnxt_num_tx_to_cp(bp, *tx); 11019 11020 if (tx_cp != *tx) { 11021 int tx_saved = tx_cp, rc; 11022 11023 rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); 11024 if (rc) 11025 return rc; 11026 if (tx_cp != tx_saved) 11027 *tx = bnxt_num_cp_to_tx(bp, tx_cp); 11028 return 0; 11029 } 11030 return __bnxt_trim_rings(bp, rx, tx, max, sh); 11031 } 11032 11033 static void bnxt_setup_msix(struct bnxt *bp) 11034 { 11035 const int len = sizeof(bp->irq_tbl[0].name); 11036 struct net_device *dev = bp->dev; 11037 int tcs, i; 11038 11039 tcs = bp->num_tc; 11040 if (tcs) { 11041 int i, off, count; 11042 11043 for (i = 0; i < tcs; i++) { 11044 count = bp->tx_nr_rings_per_tc; 11045 off = BNXT_TC_TO_RING_BASE(bp, i); 11046 netdev_set_tc_queue(dev, i, count, off); 11047 } 11048 } 11049 11050 for (i = 0; i < bp->cp_nr_rings; i++) { 11051 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 11052 char *attr; 11053 11054 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 11055 attr = "TxRx"; 11056 else if (i < bp->rx_nr_rings) 11057 attr = "rx"; 11058 else 11059 attr = "tx"; 11060 11061 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, 11062 attr, i); 11063 bp->irq_tbl[map_idx].handler = bnxt_msix; 11064 } 11065 } 11066 11067 static int bnxt_init_int_mode(struct bnxt *bp); 11068 11069 static int bnxt_change_msix(struct bnxt *bp, int total) 11070 { 11071 struct msi_map map; 11072 int i; 11073 11074 /* add MSIX to the end if needed */ 11075 for (i = bp->total_irqs; i < total; i++) { 11076 map = pci_msix_alloc_irq_at(bp->pdev, i, NULL); 11077 if (map.index < 0) 11078 return bp->total_irqs; 11079 bp->irq_tbl[i].vector = map.virq; 11080 bp->total_irqs++; 11081 } 11082 11083 /* trim MSIX from the end if needed */ 11084 for (i = bp->total_irqs; i > total; i--) { 11085 map.index = i - 1; 11086 map.virq = bp->irq_tbl[i - 1].vector; 11087 pci_msix_free_irq(bp->pdev, map); 11088 bp->total_irqs--; 11089 } 11090 return bp->total_irqs; 11091 } 11092 11093 static int bnxt_setup_int_mode(struct bnxt *bp) 11094 { 11095 int rc; 11096 11097 if (!bp->irq_tbl) { 11098 rc = bnxt_init_int_mode(bp); 11099 if (rc || !bp->irq_tbl) 11100 return rc ?: -ENODEV; 11101 } 11102 11103 bnxt_setup_msix(bp); 11104 11105 rc = bnxt_set_real_num_queues(bp); 11106 return rc; 11107 } 11108 11109 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) 11110 { 11111 return bp->hw_resc.max_rsscos_ctxs; 11112 } 11113 11114 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) 11115 { 11116 return bp->hw_resc.max_vnics; 11117 } 11118 11119 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) 11120 { 11121 return bp->hw_resc.max_stat_ctxs; 11122 } 11123 11124 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) 11125 { 11126 return bp->hw_resc.max_cp_rings; 11127 } 11128 11129 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) 11130 { 11131 unsigned int cp = bp->hw_resc.max_cp_rings; 11132 11133 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 11134 cp -= bnxt_get_ulp_msix_num(bp); 11135 11136 return cp; 11137 } 11138 11139 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) 11140 { 11141 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 11142 11143 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11144 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); 11145 11146 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 11147 } 11148 11149 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 11150 { 11151 bp->hw_resc.max_irqs = max_irqs; 11152 } 11153 11154 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) 11155 { 11156 unsigned int cp; 11157 11158 cp = bnxt_get_max_func_cp_rings_for_en(bp); 11159 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11160 return cp - bp->rx_nr_rings - bp->tx_nr_rings; 11161 else 11162 return cp - bp->cp_nr_rings; 11163 } 11164 11165 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) 11166 { 11167 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); 11168 } 11169 11170 static int bnxt_get_avail_msix(struct bnxt *bp, int num) 11171 { 11172 int max_irq = bnxt_get_max_func_irqs(bp); 11173 int total_req = bp->cp_nr_rings + num; 11174 11175 if (max_irq < total_req) { 11176 num = max_irq - bp->cp_nr_rings; 11177 if (num <= 0) 11178 return 0; 11179 } 11180 return num; 11181 } 11182 11183 static int bnxt_get_num_msix(struct bnxt *bp) 11184 { 11185 if (!BNXT_NEW_RM(bp)) 11186 return bnxt_get_max_func_irqs(bp); 11187 11188 return bnxt_nq_rings_in_use(bp); 11189 } 11190 11191 static int bnxt_init_int_mode(struct bnxt *bp) 11192 { 11193 int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size; 11194 11195 total_vecs = bnxt_get_num_msix(bp); 11196 max = bnxt_get_max_func_irqs(bp); 11197 if (total_vecs > max) 11198 total_vecs = max; 11199 11200 if (!total_vecs) 11201 return 0; 11202 11203 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 11204 min = 2; 11205 11206 total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs, 11207 PCI_IRQ_MSIX); 11208 ulp_msix = bnxt_get_ulp_msix_num(bp); 11209 if (total_vecs < 0 || total_vecs < ulp_msix) { 11210 rc = -ENODEV; 11211 goto msix_setup_exit; 11212 } 11213 11214 tbl_size = total_vecs; 11215 if (pci_msix_can_alloc_dyn(bp->pdev)) 11216 tbl_size = max; 11217 bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL); 11218 if (bp->irq_tbl) { 11219 for (i = 0; i < total_vecs; i++) 11220 bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i); 11221 11222 bp->total_irqs = total_vecs; 11223 /* Trim rings based upon num of vectors allocated */ 11224 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, 11225 total_vecs - ulp_msix, min == 1); 11226 if (rc) 11227 goto msix_setup_exit; 11228 11229 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 11230 bp->cp_nr_rings = (min == 1) ? 11231 max_t(int, tx_cp, bp->rx_nr_rings) : 11232 tx_cp + bp->rx_nr_rings; 11233 11234 } else { 11235 rc = -ENOMEM; 11236 goto msix_setup_exit; 11237 } 11238 return 0; 11239 11240 msix_setup_exit: 11241 netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc); 11242 kfree(bp->irq_tbl); 11243 bp->irq_tbl = NULL; 11244 pci_free_irq_vectors(bp->pdev); 11245 return rc; 11246 } 11247 11248 static void bnxt_clear_int_mode(struct bnxt *bp) 11249 { 11250 pci_free_irq_vectors(bp->pdev); 11251 11252 kfree(bp->irq_tbl); 11253 bp->irq_tbl = NULL; 11254 } 11255 11256 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 11257 { 11258 bool irq_cleared = false; 11259 bool irq_change = false; 11260 int tcs = bp->num_tc; 11261 int irqs_required; 11262 int rc; 11263 11264 if (!bnxt_need_reserve_rings(bp)) 11265 return 0; 11266 11267 if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { 11268 int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); 11269 11270 if (ulp_msix > bp->ulp_num_msix_want) 11271 ulp_msix = bp->ulp_num_msix_want; 11272 irqs_required = ulp_msix + bp->cp_nr_rings; 11273 } else { 11274 irqs_required = bnxt_get_num_msix(bp); 11275 } 11276 11277 if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) { 11278 irq_change = true; 11279 if (!pci_msix_can_alloc_dyn(bp->pdev)) { 11280 bnxt_ulp_irq_stop(bp); 11281 bnxt_clear_int_mode(bp); 11282 irq_cleared = true; 11283 } 11284 } 11285 rc = __bnxt_reserve_rings(bp); 11286 if (irq_cleared) { 11287 if (!rc) 11288 rc = bnxt_init_int_mode(bp); 11289 bnxt_ulp_irq_restart(bp, rc); 11290 } else if (irq_change && !rc) { 11291 if (bnxt_change_msix(bp, irqs_required) != irqs_required) 11292 rc = -ENOSPC; 11293 } 11294 if (rc) { 11295 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); 11296 return rc; 11297 } 11298 if (tcs && (bp->tx_nr_rings_per_tc * tcs != 11299 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 11300 netdev_err(bp->dev, "tx ring reservation failure\n"); 11301 netdev_reset_tc(bp->dev); 11302 bp->num_tc = 0; 11303 if (bp->tx_nr_rings_xdp) 11304 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 11305 else 11306 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 11307 return -ENOMEM; 11308 } 11309 return 0; 11310 } 11311 11312 static void bnxt_tx_queue_stop(struct bnxt *bp, int idx) 11313 { 11314 struct bnxt_tx_ring_info *txr; 11315 struct netdev_queue *txq; 11316 struct bnxt_napi *bnapi; 11317 int i; 11318 11319 bnapi = bp->bnapi[idx]; 11320 bnxt_for_each_napi_tx(i, bnapi, txr) { 11321 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 11322 synchronize_net(); 11323 11324 if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) { 11325 txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 11326 if (txq) { 11327 __netif_tx_lock_bh(txq); 11328 netif_tx_stop_queue(txq); 11329 __netif_tx_unlock_bh(txq); 11330 } 11331 } 11332 11333 if (!bp->tph_mode) 11334 continue; 11335 11336 bnxt_hwrm_tx_ring_free(bp, txr, true); 11337 bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr); 11338 bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index); 11339 bnxt_clear_one_cp_ring(bp, txr->tx_cpr); 11340 } 11341 } 11342 11343 static int bnxt_tx_queue_start(struct bnxt *bp, int idx) 11344 { 11345 struct bnxt_tx_ring_info *txr; 11346 struct netdev_queue *txq; 11347 struct bnxt_napi *bnapi; 11348 int rc, i; 11349 11350 bnapi = bp->bnapi[idx]; 11351 /* All rings have been reserved and previously allocated. 11352 * Reallocating with the same parameters should never fail. 11353 */ 11354 bnxt_for_each_napi_tx(i, bnapi, txr) { 11355 if (!bp->tph_mode) 11356 goto start_tx; 11357 11358 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); 11359 if (rc) 11360 return rc; 11361 11362 rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false); 11363 if (rc) 11364 return rc; 11365 11366 txr->tx_prod = 0; 11367 txr->tx_cons = 0; 11368 txr->tx_hw_cons = 0; 11369 start_tx: 11370 WRITE_ONCE(txr->dev_state, 0); 11371 synchronize_net(); 11372 11373 if (bnapi->flags & BNXT_NAPI_FLAG_XDP) 11374 continue; 11375 11376 txq = netdev_get_tx_queue(bp->dev, txr->txq_index); 11377 if (txq) 11378 netif_tx_start_queue(txq); 11379 } 11380 11381 return 0; 11382 } 11383 11384 static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify, 11385 const cpumask_t *mask) 11386 { 11387 struct bnxt_irq *irq; 11388 u16 tag; 11389 int err; 11390 11391 irq = container_of(notify, struct bnxt_irq, affinity_notify); 11392 11393 if (!irq->bp->tph_mode) 11394 return; 11395 11396 cpumask_copy(irq->cpu_mask, mask); 11397 11398 if (irq->ring_nr >= irq->bp->rx_nr_rings) 11399 return; 11400 11401 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, 11402 cpumask_first(irq->cpu_mask), &tag)) 11403 return; 11404 11405 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag)) 11406 return; 11407 11408 netdev_lock(irq->bp->dev); 11409 if (netif_running(irq->bp->dev)) { 11410 err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr); 11411 if (err) 11412 netdev_err(irq->bp->dev, 11413 "RX queue restart failed: err=%d\n", err); 11414 } 11415 netdev_unlock(irq->bp->dev); 11416 } 11417 11418 static void bnxt_irq_affinity_release(struct kref *ref) 11419 { 11420 struct irq_affinity_notify *notify = 11421 container_of(ref, struct irq_affinity_notify, kref); 11422 struct bnxt_irq *irq; 11423 11424 irq = container_of(notify, struct bnxt_irq, affinity_notify); 11425 11426 if (!irq->bp->tph_mode) 11427 return; 11428 11429 if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) { 11430 netdev_err(irq->bp->dev, 11431 "Setting ST=0 for MSIX entry %d failed\n", 11432 irq->msix_nr); 11433 return; 11434 } 11435 } 11436 11437 static void bnxt_release_irq_notifier(struct bnxt_irq *irq) 11438 { 11439 irq_set_affinity_notifier(irq->vector, NULL); 11440 } 11441 11442 static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq) 11443 { 11444 struct irq_affinity_notify *notify; 11445 11446 irq->bp = bp; 11447 11448 /* Nothing to do if TPH is not enabled */ 11449 if (!bp->tph_mode) 11450 return; 11451 11452 /* Register IRQ affinity notifier */ 11453 notify = &irq->affinity_notify; 11454 notify->irq = irq->vector; 11455 notify->notify = bnxt_irq_affinity_notify; 11456 notify->release = bnxt_irq_affinity_release; 11457 11458 irq_set_affinity_notifier(irq->vector, notify); 11459 } 11460 11461 static void bnxt_free_irq(struct bnxt *bp) 11462 { 11463 struct bnxt_irq *irq; 11464 int i; 11465 11466 #ifdef CONFIG_RFS_ACCEL 11467 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); 11468 bp->dev->rx_cpu_rmap = NULL; 11469 #endif 11470 if (!bp->irq_tbl || !bp->bnapi) 11471 return; 11472 11473 for (i = 0; i < bp->cp_nr_rings; i++) { 11474 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 11475 11476 irq = &bp->irq_tbl[map_idx]; 11477 if (irq->requested) { 11478 if (irq->have_cpumask) { 11479 irq_update_affinity_hint(irq->vector, NULL); 11480 free_cpumask_var(irq->cpu_mask); 11481 irq->have_cpumask = 0; 11482 } 11483 11484 bnxt_release_irq_notifier(irq); 11485 11486 free_irq(irq->vector, bp->bnapi[i]); 11487 } 11488 11489 irq->requested = 0; 11490 } 11491 11492 /* Disable TPH support */ 11493 pcie_disable_tph(bp->pdev); 11494 bp->tph_mode = 0; 11495 } 11496 11497 static int bnxt_request_irq(struct bnxt *bp) 11498 { 11499 int i, j, rc = 0; 11500 unsigned long flags = 0; 11501 #ifdef CONFIG_RFS_ACCEL 11502 struct cpu_rmap *rmap; 11503 #endif 11504 11505 rc = bnxt_setup_int_mode(bp); 11506 if (rc) { 11507 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", 11508 rc); 11509 return rc; 11510 } 11511 #ifdef CONFIG_RFS_ACCEL 11512 rmap = bp->dev->rx_cpu_rmap; 11513 #endif 11514 11515 /* Enable TPH support as part of IRQ request */ 11516 rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE); 11517 if (!rc) 11518 bp->tph_mode = PCI_TPH_ST_IV_MODE; 11519 11520 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { 11521 int map_idx = bnxt_cp_num_to_irq_num(bp, i); 11522 struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; 11523 11524 #ifdef CONFIG_RFS_ACCEL 11525 if (rmap && bp->bnapi[i]->rx_ring) { 11526 rc = irq_cpu_rmap_add(rmap, irq->vector); 11527 if (rc) 11528 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", 11529 j); 11530 j++; 11531 } 11532 #endif 11533 rc = request_irq(irq->vector, irq->handler, flags, irq->name, 11534 bp->bnapi[i]); 11535 if (rc) 11536 break; 11537 11538 netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector); 11539 irq->requested = 1; 11540 11541 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { 11542 int numa_node = dev_to_node(&bp->pdev->dev); 11543 u16 tag; 11544 11545 irq->have_cpumask = 1; 11546 irq->msix_nr = map_idx; 11547 irq->ring_nr = i; 11548 cpumask_set_cpu(cpumask_local_spread(i, numa_node), 11549 irq->cpu_mask); 11550 rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask); 11551 if (rc) { 11552 netdev_warn(bp->dev, 11553 "Update affinity hint failed, IRQ = %d\n", 11554 irq->vector); 11555 break; 11556 } 11557 11558 bnxt_register_irq_notifier(bp, irq); 11559 11560 /* Init ST table entry */ 11561 if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, 11562 cpumask_first(irq->cpu_mask), 11563 &tag)) 11564 continue; 11565 11566 pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag); 11567 } 11568 } 11569 return rc; 11570 } 11571 11572 static void bnxt_del_napi(struct bnxt *bp) 11573 { 11574 int i; 11575 11576 if (!bp->bnapi) 11577 return; 11578 11579 for (i = 0; i < bp->rx_nr_rings; i++) 11580 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_RX, NULL); 11581 for (i = 0; i < bp->tx_nr_rings - bp->tx_nr_rings_xdp; i++) 11582 netif_queue_set_napi(bp->dev, i, NETDEV_QUEUE_TYPE_TX, NULL); 11583 11584 for (i = 0; i < bp->cp_nr_rings; i++) { 11585 struct bnxt_napi *bnapi = bp->bnapi[i]; 11586 11587 __netif_napi_del_locked(&bnapi->napi); 11588 } 11589 /* We called __netif_napi_del_locked(), we need 11590 * to respect an RCU grace period before freeing napi structures. 11591 */ 11592 synchronize_net(); 11593 } 11594 11595 static void bnxt_init_napi(struct bnxt *bp) 11596 { 11597 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; 11598 unsigned int cp_nr_rings = bp->cp_nr_rings; 11599 struct bnxt_napi *bnapi; 11600 int i; 11601 11602 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 11603 poll_fn = bnxt_poll_p5; 11604 else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 11605 cp_nr_rings--; 11606 11607 set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 11608 11609 for (i = 0; i < cp_nr_rings; i++) { 11610 bnapi = bp->bnapi[i]; 11611 netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn, 11612 bnapi->index); 11613 } 11614 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 11615 bnapi = bp->bnapi[cp_nr_rings]; 11616 netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0); 11617 } 11618 } 11619 11620 static void bnxt_disable_napi(struct bnxt *bp) 11621 { 11622 int i; 11623 11624 if (!bp->bnapi || 11625 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) 11626 return; 11627 11628 for (i = 0; i < bp->cp_nr_rings; i++) { 11629 struct bnxt_napi *bnapi = bp->bnapi[i]; 11630 struct bnxt_cp_ring_info *cpr; 11631 11632 cpr = &bnapi->cp_ring; 11633 if (bnapi->tx_fault) 11634 cpr->sw_stats->tx.tx_resets++; 11635 if (bnapi->in_reset) 11636 cpr->sw_stats->rx.rx_resets++; 11637 napi_disable_locked(&bnapi->napi); 11638 } 11639 } 11640 11641 static void bnxt_enable_napi(struct bnxt *bp) 11642 { 11643 int i; 11644 11645 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); 11646 for (i = 0; i < bp->cp_nr_rings; i++) { 11647 struct bnxt_napi *bnapi = bp->bnapi[i]; 11648 struct bnxt_cp_ring_info *cpr; 11649 11650 bnapi->tx_fault = 0; 11651 11652 cpr = &bnapi->cp_ring; 11653 bnapi->in_reset = false; 11654 11655 if (bnapi->rx_ring) { 11656 INIT_WORK(&cpr->dim.work, bnxt_dim_work); 11657 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 11658 } 11659 napi_enable_locked(&bnapi->napi); 11660 } 11661 } 11662 11663 void bnxt_tx_disable(struct bnxt *bp) 11664 { 11665 int i; 11666 struct bnxt_tx_ring_info *txr; 11667 11668 if (bp->tx_ring) { 11669 for (i = 0; i < bp->tx_nr_rings; i++) { 11670 txr = &bp->tx_ring[i]; 11671 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); 11672 } 11673 } 11674 /* Make sure napi polls see @dev_state change */ 11675 synchronize_net(); 11676 /* Drop carrier first to prevent TX timeout */ 11677 netif_carrier_off(bp->dev); 11678 /* Stop all TX queues */ 11679 netif_tx_disable(bp->dev); 11680 } 11681 11682 void bnxt_tx_enable(struct bnxt *bp) 11683 { 11684 int i; 11685 struct bnxt_tx_ring_info *txr; 11686 11687 for (i = 0; i < bp->tx_nr_rings; i++) { 11688 txr = &bp->tx_ring[i]; 11689 WRITE_ONCE(txr->dev_state, 0); 11690 } 11691 /* Make sure napi polls see @dev_state change */ 11692 synchronize_net(); 11693 netif_tx_wake_all_queues(bp->dev); 11694 if (BNXT_LINK_IS_UP(bp)) 11695 netif_carrier_on(bp->dev); 11696 } 11697 11698 static char *bnxt_report_fec(struct bnxt_link_info *link_info) 11699 { 11700 u8 active_fec = link_info->active_fec_sig_mode & 11701 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; 11702 11703 switch (active_fec) { 11704 default: 11705 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: 11706 return "None"; 11707 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: 11708 return "Clause 74 BaseR"; 11709 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: 11710 return "Clause 91 RS(528,514)"; 11711 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: 11712 return "Clause 91 RS544_1XN"; 11713 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: 11714 return "Clause 91 RS(544,514)"; 11715 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: 11716 return "Clause 91 RS272_1XN"; 11717 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: 11718 return "Clause 91 RS(272,257)"; 11719 } 11720 } 11721 11722 void bnxt_report_link(struct bnxt *bp) 11723 { 11724 if (BNXT_LINK_IS_UP(bp)) { 11725 const char *signal = ""; 11726 const char *flow_ctrl; 11727 const char *duplex; 11728 u32 speed; 11729 u16 fec; 11730 11731 netif_carrier_on(bp->dev); 11732 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); 11733 if (speed == SPEED_UNKNOWN) { 11734 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); 11735 return; 11736 } 11737 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) 11738 duplex = "full"; 11739 else 11740 duplex = "half"; 11741 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) 11742 flow_ctrl = "ON - receive & transmit"; 11743 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) 11744 flow_ctrl = "ON - transmit"; 11745 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) 11746 flow_ctrl = "ON - receive"; 11747 else 11748 flow_ctrl = "none"; 11749 if (bp->link_info.phy_qcfg_resp.option_flags & 11750 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { 11751 u8 sig_mode = bp->link_info.active_fec_sig_mode & 11752 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; 11753 switch (sig_mode) { 11754 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: 11755 signal = "(NRZ) "; 11756 break; 11757 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: 11758 signal = "(PAM4 56Gbps) "; 11759 break; 11760 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112: 11761 signal = "(PAM4 112Gbps) "; 11762 break; 11763 default: 11764 break; 11765 } 11766 } 11767 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", 11768 speed, signal, duplex, flow_ctrl); 11769 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) 11770 netdev_info(bp->dev, "EEE is %s\n", 11771 bp->eee.eee_active ? "active" : 11772 "not active"); 11773 fec = bp->link_info.fec_cfg; 11774 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) 11775 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", 11776 (fec & BNXT_FEC_AUTONEG) ? "on" : "off", 11777 bnxt_report_fec(&bp->link_info)); 11778 } else { 11779 netif_carrier_off(bp->dev); 11780 netdev_err(bp->dev, "NIC Link is Down\n"); 11781 } 11782 } 11783 11784 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) 11785 { 11786 if (!resp->supported_speeds_auto_mode && 11787 !resp->supported_speeds_force_mode && 11788 !resp->supported_pam4_speeds_auto_mode && 11789 !resp->supported_pam4_speeds_force_mode && 11790 !resp->supported_speeds2_auto_mode && 11791 !resp->supported_speeds2_force_mode) 11792 return true; 11793 return false; 11794 } 11795 11796 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) 11797 { 11798 struct bnxt_link_info *link_info = &bp->link_info; 11799 struct hwrm_port_phy_qcaps_output *resp; 11800 struct hwrm_port_phy_qcaps_input *req; 11801 int rc = 0; 11802 11803 if (bp->hwrm_spec_code < 0x10201) 11804 return 0; 11805 11806 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); 11807 if (rc) 11808 return rc; 11809 11810 resp = hwrm_req_hold(bp, req); 11811 rc = hwrm_req_send(bp, req); 11812 if (rc) 11813 goto hwrm_phy_qcaps_exit; 11814 11815 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); 11816 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { 11817 struct ethtool_keee *eee = &bp->eee; 11818 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); 11819 11820 _bnxt_fw_to_linkmode(eee->supported, fw_speeds); 11821 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & 11822 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; 11823 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 11824 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 11825 } 11826 11827 if (bp->hwrm_spec_code >= 0x10a01) { 11828 if (bnxt_phy_qcaps_no_speed(resp)) { 11829 link_info->phy_state = BNXT_PHY_STATE_DISABLED; 11830 netdev_warn(bp->dev, "Ethernet link disabled\n"); 11831 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { 11832 link_info->phy_state = BNXT_PHY_STATE_ENABLED; 11833 netdev_info(bp->dev, "Ethernet link enabled\n"); 11834 /* Phy re-enabled, reprobe the speeds */ 11835 link_info->support_auto_speeds = 0; 11836 link_info->support_pam4_auto_speeds = 0; 11837 link_info->support_auto_speeds2 = 0; 11838 } 11839 } 11840 if (resp->supported_speeds_auto_mode) 11841 link_info->support_auto_speeds = 11842 le16_to_cpu(resp->supported_speeds_auto_mode); 11843 if (resp->supported_pam4_speeds_auto_mode) 11844 link_info->support_pam4_auto_speeds = 11845 le16_to_cpu(resp->supported_pam4_speeds_auto_mode); 11846 if (resp->supported_speeds2_auto_mode) 11847 link_info->support_auto_speeds2 = 11848 le16_to_cpu(resp->supported_speeds2_auto_mode); 11849 11850 bp->port_count = resp->port_cnt; 11851 11852 hwrm_phy_qcaps_exit: 11853 hwrm_req_drop(bp, req); 11854 return rc; 11855 } 11856 11857 static void bnxt_hwrm_mac_qcaps(struct bnxt *bp) 11858 { 11859 struct hwrm_port_mac_qcaps_output *resp; 11860 struct hwrm_port_mac_qcaps_input *req; 11861 int rc; 11862 11863 if (bp->hwrm_spec_code < 0x10a03) 11864 return; 11865 11866 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS); 11867 if (rc) 11868 return; 11869 11870 resp = hwrm_req_hold(bp, req); 11871 rc = hwrm_req_send_silent(bp, req); 11872 if (!rc) 11873 bp->mac_flags = resp->flags; 11874 hwrm_req_drop(bp, req); 11875 } 11876 11877 static bool bnxt_support_dropped(u16 advertising, u16 supported) 11878 { 11879 u16 diff = advertising ^ supported; 11880 11881 return ((supported | diff) != supported); 11882 } 11883 11884 static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) 11885 { 11886 struct bnxt *bp = container_of(link_info, struct bnxt, link_info); 11887 11888 /* Check if any advertised speeds are no longer supported. The caller 11889 * holds the link_lock mutex, so we can modify link_info settings. 11890 */ 11891 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 11892 if (bnxt_support_dropped(link_info->advertising, 11893 link_info->support_auto_speeds2)) { 11894 link_info->advertising = link_info->support_auto_speeds2; 11895 return true; 11896 } 11897 return false; 11898 } 11899 if (bnxt_support_dropped(link_info->advertising, 11900 link_info->support_auto_speeds)) { 11901 link_info->advertising = link_info->support_auto_speeds; 11902 return true; 11903 } 11904 if (bnxt_support_dropped(link_info->advertising_pam4, 11905 link_info->support_pam4_auto_speeds)) { 11906 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; 11907 return true; 11908 } 11909 return false; 11910 } 11911 11912 int bnxt_update_link(struct bnxt *bp, bool chng_link_state) 11913 { 11914 struct bnxt_link_info *link_info = &bp->link_info; 11915 struct hwrm_port_phy_qcfg_output *resp; 11916 struct hwrm_port_phy_qcfg_input *req; 11917 u8 link_state = link_info->link_state; 11918 bool support_changed; 11919 int rc; 11920 11921 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); 11922 if (rc) 11923 return rc; 11924 11925 resp = hwrm_req_hold(bp, req); 11926 rc = hwrm_req_send(bp, req); 11927 if (rc) { 11928 hwrm_req_drop(bp, req); 11929 if (BNXT_VF(bp) && rc == -ENODEV) { 11930 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); 11931 rc = 0; 11932 } 11933 return rc; 11934 } 11935 11936 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); 11937 link_info->phy_link_status = resp->link; 11938 link_info->duplex = resp->duplex_cfg; 11939 if (bp->hwrm_spec_code >= 0x10800) 11940 link_info->duplex = resp->duplex_state; 11941 link_info->pause = resp->pause; 11942 link_info->auto_mode = resp->auto_mode; 11943 link_info->auto_pause_setting = resp->auto_pause; 11944 link_info->lp_pause = resp->link_partner_adv_pause; 11945 link_info->force_pause_setting = resp->force_pause; 11946 link_info->duplex_setting = resp->duplex_cfg; 11947 if (link_info->phy_link_status == BNXT_LINK_LINK) { 11948 link_info->link_speed = le16_to_cpu(resp->link_speed); 11949 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) 11950 link_info->active_lanes = resp->active_lanes; 11951 } else { 11952 link_info->link_speed = 0; 11953 link_info->active_lanes = 0; 11954 } 11955 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); 11956 link_info->force_pam4_link_speed = 11957 le16_to_cpu(resp->force_pam4_link_speed); 11958 link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); 11959 link_info->support_speeds = le16_to_cpu(resp->support_speeds); 11960 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); 11961 link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); 11962 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); 11963 link_info->auto_pam4_link_speeds = 11964 le16_to_cpu(resp->auto_pam4_link_speed_mask); 11965 link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); 11966 link_info->lp_auto_link_speeds = 11967 le16_to_cpu(resp->link_partner_adv_speeds); 11968 link_info->lp_auto_pam4_link_speeds = 11969 resp->link_partner_pam4_adv_speeds; 11970 link_info->preemphasis = le32_to_cpu(resp->preemphasis); 11971 link_info->phy_ver[0] = resp->phy_maj; 11972 link_info->phy_ver[1] = resp->phy_min; 11973 link_info->phy_ver[2] = resp->phy_bld; 11974 link_info->media_type = resp->media_type; 11975 link_info->phy_type = resp->phy_type; 11976 link_info->transceiver = resp->xcvr_pkg_type; 11977 link_info->phy_addr = resp->eee_config_phy_addr & 11978 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; 11979 link_info->module_status = resp->module_status; 11980 11981 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { 11982 struct ethtool_keee *eee = &bp->eee; 11983 u16 fw_speeds; 11984 11985 eee->eee_active = 0; 11986 if (resp->eee_config_phy_addr & 11987 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { 11988 eee->eee_active = 1; 11989 fw_speeds = le16_to_cpu( 11990 resp->link_partner_adv_eee_link_speed_mask); 11991 _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds); 11992 } 11993 11994 /* Pull initial EEE config */ 11995 if (!chng_link_state) { 11996 if (resp->eee_config_phy_addr & 11997 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) 11998 eee->eee_enabled = 1; 11999 12000 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); 12001 _bnxt_fw_to_linkmode(eee->advertised, fw_speeds); 12002 12003 if (resp->eee_config_phy_addr & 12004 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { 12005 __le32 tmr; 12006 12007 eee->tx_lpi_enabled = 1; 12008 tmr = resp->xcvr_identifier_type_tx_lpi_timer; 12009 eee->tx_lpi_timer = le32_to_cpu(tmr) & 12010 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; 12011 } 12012 } 12013 } 12014 12015 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; 12016 if (bp->hwrm_spec_code >= 0x10504) { 12017 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); 12018 link_info->active_fec_sig_mode = resp->active_fec_signal_mode; 12019 } 12020 /* TODO: need to add more logic to report VF link */ 12021 if (chng_link_state) { 12022 if (link_info->phy_link_status == BNXT_LINK_LINK) 12023 link_info->link_state = BNXT_LINK_STATE_UP; 12024 else 12025 link_info->link_state = BNXT_LINK_STATE_DOWN; 12026 if (link_state != link_info->link_state) 12027 bnxt_report_link(bp); 12028 } else { 12029 /* always link down if not require to update link state */ 12030 link_info->link_state = BNXT_LINK_STATE_DOWN; 12031 } 12032 hwrm_req_drop(bp, req); 12033 12034 if (!BNXT_PHY_CFG_ABLE(bp)) 12035 return 0; 12036 12037 support_changed = bnxt_support_speed_dropped(link_info); 12038 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) 12039 bnxt_hwrm_set_link_setting(bp, true, false); 12040 return 0; 12041 } 12042 12043 static void bnxt_get_port_module_status(struct bnxt *bp) 12044 { 12045 struct bnxt_link_info *link_info = &bp->link_info; 12046 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; 12047 u8 module_status; 12048 12049 if (bnxt_update_link(bp, true)) 12050 return; 12051 12052 module_status = link_info->module_status; 12053 switch (module_status) { 12054 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: 12055 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: 12056 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: 12057 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", 12058 bp->pf.port_id); 12059 if (bp->hwrm_spec_code >= 0x10201) { 12060 netdev_warn(bp->dev, "Module part number %s\n", 12061 resp->phy_vendor_partnumber); 12062 } 12063 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) 12064 netdev_warn(bp->dev, "TX is disabled\n"); 12065 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) 12066 netdev_warn(bp->dev, "SFP+ module is shutdown\n"); 12067 } 12068 } 12069 12070 static void 12071 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 12072 { 12073 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { 12074 if (bp->hwrm_spec_code >= 0x10201) 12075 req->auto_pause = 12076 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; 12077 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 12078 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; 12079 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 12080 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; 12081 req->enables |= 12082 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 12083 } else { 12084 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) 12085 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; 12086 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) 12087 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; 12088 req->enables |= 12089 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); 12090 if (bp->hwrm_spec_code >= 0x10201) { 12091 req->auto_pause = req->force_pause; 12092 req->enables |= cpu_to_le32( 12093 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); 12094 } 12095 } 12096 } 12097 12098 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) 12099 { 12100 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { 12101 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; 12102 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 12103 req->enables |= 12104 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK); 12105 req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); 12106 } else if (bp->link_info.advertising) { 12107 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); 12108 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); 12109 } 12110 if (bp->link_info.advertising_pam4) { 12111 req->enables |= 12112 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); 12113 req->auto_link_pam4_speed_mask = 12114 cpu_to_le16(bp->link_info.advertising_pam4); 12115 } 12116 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); 12117 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); 12118 } else { 12119 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); 12120 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { 12121 req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); 12122 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2); 12123 netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", 12124 (u32)bp->link_info.req_link_speed); 12125 } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { 12126 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 12127 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); 12128 } else { 12129 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); 12130 } 12131 } 12132 12133 /* tell chimp that the setting takes effect immediately */ 12134 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); 12135 } 12136 12137 int bnxt_hwrm_set_pause(struct bnxt *bp) 12138 { 12139 struct hwrm_port_phy_cfg_input *req; 12140 int rc; 12141 12142 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 12143 if (rc) 12144 return rc; 12145 12146 bnxt_hwrm_set_pause_common(bp, req); 12147 12148 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || 12149 bp->link_info.force_link_chng) 12150 bnxt_hwrm_set_link_common(bp, req); 12151 12152 rc = hwrm_req_send(bp, req); 12153 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { 12154 /* since changing of pause setting doesn't trigger any link 12155 * change event, the driver needs to update the current pause 12156 * result upon successfully return of the phy_cfg command 12157 */ 12158 bp->link_info.pause = 12159 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; 12160 bp->link_info.auto_pause_setting = 0; 12161 if (!bp->link_info.force_link_chng) 12162 bnxt_report_link(bp); 12163 } 12164 bp->link_info.force_link_chng = false; 12165 return rc; 12166 } 12167 12168 static void bnxt_hwrm_set_eee(struct bnxt *bp, 12169 struct hwrm_port_phy_cfg_input *req) 12170 { 12171 struct ethtool_keee *eee = &bp->eee; 12172 12173 if (eee->eee_enabled) { 12174 u16 eee_speeds; 12175 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; 12176 12177 if (eee->tx_lpi_enabled) 12178 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; 12179 else 12180 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; 12181 12182 req->flags |= cpu_to_le32(flags); 12183 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); 12184 req->eee_link_speed_mask = cpu_to_le16(eee_speeds); 12185 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); 12186 } else { 12187 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); 12188 } 12189 } 12190 12191 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) 12192 { 12193 struct hwrm_port_phy_cfg_input *req; 12194 int rc; 12195 12196 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 12197 if (rc) 12198 return rc; 12199 12200 if (set_pause) 12201 bnxt_hwrm_set_pause_common(bp, req); 12202 12203 bnxt_hwrm_set_link_common(bp, req); 12204 12205 if (set_eee) 12206 bnxt_hwrm_set_eee(bp, req); 12207 return hwrm_req_send(bp, req); 12208 } 12209 12210 static int bnxt_hwrm_shutdown_link(struct bnxt *bp) 12211 { 12212 struct hwrm_port_phy_cfg_input *req; 12213 int rc; 12214 12215 if (!BNXT_SINGLE_PF(bp)) 12216 return 0; 12217 12218 if (pci_num_vf(bp->pdev) && 12219 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) 12220 return 0; 12221 12222 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); 12223 if (rc) 12224 return rc; 12225 12226 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); 12227 rc = hwrm_req_send(bp, req); 12228 if (!rc) { 12229 mutex_lock(&bp->link_lock); 12230 /* Device is not obliged link down in certain scenarios, even 12231 * when forced. Setting the state unknown is consistent with 12232 * driver startup and will force link state to be reported 12233 * during subsequent open based on PORT_PHY_QCFG. 12234 */ 12235 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; 12236 mutex_unlock(&bp->link_lock); 12237 } 12238 return rc; 12239 } 12240 12241 static int bnxt_fw_reset_via_optee(struct bnxt *bp) 12242 { 12243 #ifdef CONFIG_TEE_BNXT_FW 12244 int rc = tee_bnxt_fw_load(); 12245 12246 if (rc) 12247 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); 12248 12249 return rc; 12250 #else 12251 netdev_err(bp->dev, "OP-TEE not supported\n"); 12252 return -ENODEV; 12253 #endif 12254 } 12255 12256 static int bnxt_try_recover_fw(struct bnxt *bp) 12257 { 12258 if (bp->fw_health && bp->fw_health->status_reliable) { 12259 int retry = 0, rc; 12260 u32 sts; 12261 12262 do { 12263 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 12264 rc = bnxt_hwrm_poll(bp); 12265 if (!BNXT_FW_IS_BOOTING(sts) && 12266 !BNXT_FW_IS_RECOVERING(sts)) 12267 break; 12268 retry++; 12269 } while (rc == -EBUSY && retry < BNXT_FW_RETRY); 12270 12271 if (!BNXT_FW_IS_HEALTHY(sts)) { 12272 netdev_err(bp->dev, 12273 "Firmware not responding, status: 0x%x\n", 12274 sts); 12275 rc = -ENODEV; 12276 } 12277 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { 12278 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); 12279 return bnxt_fw_reset_via_optee(bp); 12280 } 12281 return rc; 12282 } 12283 12284 return -ENODEV; 12285 } 12286 12287 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) 12288 { 12289 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 12290 12291 if (!BNXT_NEW_RM(bp)) 12292 return; /* no resource reservations required */ 12293 12294 hw_resc->resv_cp_rings = 0; 12295 hw_resc->resv_stat_ctxs = 0; 12296 hw_resc->resv_irqs = 0; 12297 hw_resc->resv_tx_rings = 0; 12298 hw_resc->resv_rx_rings = 0; 12299 hw_resc->resv_hw_ring_grps = 0; 12300 hw_resc->resv_vnics = 0; 12301 hw_resc->resv_rsscos_ctxs = 0; 12302 if (!fw_reset) { 12303 bp->tx_nr_rings = 0; 12304 bp->rx_nr_rings = 0; 12305 } 12306 } 12307 12308 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) 12309 { 12310 int rc; 12311 12312 if (!BNXT_NEW_RM(bp)) 12313 return 0; /* no resource reservations required */ 12314 12315 rc = bnxt_hwrm_func_resc_qcaps(bp, true); 12316 if (rc) 12317 netdev_err(bp->dev, "resc_qcaps failed\n"); 12318 12319 bnxt_clear_reservations(bp, fw_reset); 12320 12321 return rc; 12322 } 12323 12324 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) 12325 { 12326 struct hwrm_func_drv_if_change_output *resp; 12327 struct hwrm_func_drv_if_change_input *req; 12328 bool resc_reinit = false; 12329 bool caps_change = false; 12330 int rc, retry = 0; 12331 bool fw_reset; 12332 u32 flags = 0; 12333 12334 fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT); 12335 bp->fw_reset_state = 0; 12336 12337 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) 12338 return 0; 12339 12340 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); 12341 if (rc) 12342 return rc; 12343 12344 if (up) 12345 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); 12346 resp = hwrm_req_hold(bp, req); 12347 12348 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); 12349 while (retry < BNXT_FW_IF_RETRY) { 12350 rc = hwrm_req_send(bp, req); 12351 if (rc != -EAGAIN) 12352 break; 12353 12354 msleep(50); 12355 retry++; 12356 } 12357 12358 if (rc == -EAGAIN) { 12359 hwrm_req_drop(bp, req); 12360 return rc; 12361 } else if (!rc) { 12362 flags = le32_to_cpu(resp->flags); 12363 } else if (up) { 12364 rc = bnxt_try_recover_fw(bp); 12365 fw_reset = true; 12366 } 12367 hwrm_req_drop(bp, req); 12368 if (rc) 12369 return rc; 12370 12371 if (!up) { 12372 bnxt_inv_fw_health_reg(bp); 12373 return 0; 12374 } 12375 12376 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) 12377 resc_reinit = true; 12378 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || 12379 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) 12380 fw_reset = true; 12381 else 12382 bnxt_remap_fw_health_regs(bp); 12383 12384 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { 12385 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); 12386 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12387 return -ENODEV; 12388 } 12389 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE) 12390 caps_change = true; 12391 12392 if (resc_reinit || fw_reset || caps_change) { 12393 if (fw_reset || caps_change) { 12394 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12395 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12396 bnxt_ulp_irq_stop(bp); 12397 bnxt_free_ctx_mem(bp, false); 12398 bnxt_dcb_free(bp); 12399 rc = bnxt_fw_init_one(bp); 12400 if (rc) { 12401 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12402 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12403 return rc; 12404 } 12405 /* IRQ will be initialized later in bnxt_request_irq()*/ 12406 bnxt_clear_int_mode(bp); 12407 } 12408 rc = bnxt_cancel_reservations(bp, fw_reset); 12409 } 12410 return rc; 12411 } 12412 12413 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) 12414 { 12415 struct hwrm_port_led_qcaps_output *resp; 12416 struct hwrm_port_led_qcaps_input *req; 12417 struct bnxt_pf_info *pf = &bp->pf; 12418 int rc; 12419 12420 bp->num_leds = 0; 12421 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) 12422 return 0; 12423 12424 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); 12425 if (rc) 12426 return rc; 12427 12428 req->port_id = cpu_to_le16(pf->port_id); 12429 resp = hwrm_req_hold(bp, req); 12430 rc = hwrm_req_send(bp, req); 12431 if (rc) { 12432 hwrm_req_drop(bp, req); 12433 return rc; 12434 } 12435 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { 12436 int i; 12437 12438 bp->num_leds = resp->num_leds; 12439 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * 12440 bp->num_leds); 12441 for (i = 0; i < bp->num_leds; i++) { 12442 struct bnxt_led_info *led = &bp->leds[i]; 12443 __le16 caps = led->led_state_caps; 12444 12445 if (!led->led_group_id || 12446 !BNXT_LED_ALT_BLINK_CAP(caps)) { 12447 bp->num_leds = 0; 12448 break; 12449 } 12450 } 12451 } 12452 hwrm_req_drop(bp, req); 12453 return 0; 12454 } 12455 12456 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) 12457 { 12458 struct hwrm_wol_filter_alloc_output *resp; 12459 struct hwrm_wol_filter_alloc_input *req; 12460 int rc; 12461 12462 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); 12463 if (rc) 12464 return rc; 12465 12466 req->port_id = cpu_to_le16(bp->pf.port_id); 12467 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; 12468 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); 12469 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); 12470 12471 resp = hwrm_req_hold(bp, req); 12472 rc = hwrm_req_send(bp, req); 12473 if (!rc) 12474 bp->wol_filter_id = resp->wol_filter_id; 12475 hwrm_req_drop(bp, req); 12476 return rc; 12477 } 12478 12479 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) 12480 { 12481 struct hwrm_wol_filter_free_input *req; 12482 int rc; 12483 12484 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); 12485 if (rc) 12486 return rc; 12487 12488 req->port_id = cpu_to_le16(bp->pf.port_id); 12489 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); 12490 req->wol_filter_id = bp->wol_filter_id; 12491 12492 return hwrm_req_send(bp, req); 12493 } 12494 12495 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) 12496 { 12497 struct hwrm_wol_filter_qcfg_output *resp; 12498 struct hwrm_wol_filter_qcfg_input *req; 12499 u16 next_handle = 0; 12500 int rc; 12501 12502 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); 12503 if (rc) 12504 return rc; 12505 12506 req->port_id = cpu_to_le16(bp->pf.port_id); 12507 req->handle = cpu_to_le16(handle); 12508 resp = hwrm_req_hold(bp, req); 12509 rc = hwrm_req_send(bp, req); 12510 if (!rc) { 12511 next_handle = le16_to_cpu(resp->next_handle); 12512 if (next_handle != 0) { 12513 if (resp->wol_type == 12514 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { 12515 bp->wol = 1; 12516 bp->wol_filter_id = resp->wol_filter_id; 12517 } 12518 } 12519 } 12520 hwrm_req_drop(bp, req); 12521 return next_handle; 12522 } 12523 12524 static void bnxt_get_wol_settings(struct bnxt *bp) 12525 { 12526 u16 handle = 0; 12527 12528 bp->wol = 0; 12529 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) 12530 return; 12531 12532 do { 12533 handle = bnxt_hwrm_get_wol_fltrs(bp, handle); 12534 } while (handle && handle != 0xffff); 12535 } 12536 12537 static bool bnxt_eee_config_ok(struct bnxt *bp) 12538 { 12539 struct ethtool_keee *eee = &bp->eee; 12540 struct bnxt_link_info *link_info = &bp->link_info; 12541 12542 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) 12543 return true; 12544 12545 if (eee->eee_enabled) { 12546 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); 12547 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); 12548 12549 _bnxt_fw_to_linkmode(advertising, link_info->advertising); 12550 12551 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 12552 eee->eee_enabled = 0; 12553 return false; 12554 } 12555 if (linkmode_andnot(tmp, eee->advertised, advertising)) { 12556 linkmode_and(eee->advertised, advertising, 12557 eee->supported); 12558 return false; 12559 } 12560 } 12561 return true; 12562 } 12563 12564 static int bnxt_update_phy_setting(struct bnxt *bp) 12565 { 12566 int rc; 12567 bool update_link = false; 12568 bool update_pause = false; 12569 bool update_eee = false; 12570 struct bnxt_link_info *link_info = &bp->link_info; 12571 12572 rc = bnxt_update_link(bp, true); 12573 if (rc) { 12574 netdev_err(bp->dev, "failed to update link (rc: %x)\n", 12575 rc); 12576 return rc; 12577 } 12578 if (!BNXT_SINGLE_PF(bp)) 12579 return 0; 12580 12581 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 12582 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != 12583 link_info->req_flow_ctrl) 12584 update_pause = true; 12585 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 12586 link_info->force_pause_setting != link_info->req_flow_ctrl) 12587 update_pause = true; 12588 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 12589 if (BNXT_AUTO_MODE(link_info->auto_mode)) 12590 update_link = true; 12591 if (bnxt_force_speed_updated(link_info)) 12592 update_link = true; 12593 if (link_info->req_duplex != link_info->duplex_setting) 12594 update_link = true; 12595 } else { 12596 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 12597 update_link = true; 12598 if (bnxt_auto_speed_updated(link_info)) 12599 update_link = true; 12600 } 12601 12602 /* The last close may have shutdown the link, so need to call 12603 * PHY_CFG to bring it back up. 12604 */ 12605 if (!BNXT_LINK_IS_UP(bp)) 12606 update_link = true; 12607 12608 if (!bnxt_eee_config_ok(bp)) 12609 update_eee = true; 12610 12611 if (update_link) 12612 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); 12613 else if (update_pause) 12614 rc = bnxt_hwrm_set_pause(bp); 12615 if (rc) { 12616 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", 12617 rc); 12618 return rc; 12619 } 12620 12621 return rc; 12622 } 12623 12624 static int bnxt_init_dflt_ring_mode(struct bnxt *bp); 12625 12626 static int bnxt_reinit_after_abort(struct bnxt *bp) 12627 { 12628 int rc; 12629 12630 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12631 return -EBUSY; 12632 12633 if (bp->dev->reg_state == NETREG_UNREGISTERED) 12634 return -ENODEV; 12635 12636 rc = bnxt_fw_init_one(bp); 12637 if (!rc) { 12638 bnxt_clear_int_mode(bp); 12639 rc = bnxt_init_int_mode(bp); 12640 if (!rc) { 12641 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12642 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); 12643 } 12644 } 12645 return rc; 12646 } 12647 12648 static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) 12649 { 12650 struct bnxt_ntuple_filter *ntp_fltr; 12651 struct bnxt_l2_filter *l2_fltr; 12652 12653 if (list_empty(&fltr->list)) 12654 return; 12655 12656 if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { 12657 ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); 12658 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 12659 atomic_inc(&l2_fltr->refcnt); 12660 ntp_fltr->l2_fltr = l2_fltr; 12661 if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { 12662 bnxt_del_ntp_filter(bp, ntp_fltr); 12663 netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n", 12664 fltr->sw_id); 12665 } 12666 } else if (fltr->type == BNXT_FLTR_TYPE_L2) { 12667 l2_fltr = container_of(fltr, struct bnxt_l2_filter, base); 12668 if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) { 12669 bnxt_del_l2_filter(bp, l2_fltr); 12670 netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n", 12671 fltr->sw_id); 12672 } 12673 } 12674 } 12675 12676 static void bnxt_cfg_usr_fltrs(struct bnxt *bp) 12677 { 12678 struct bnxt_filter_base *usr_fltr, *tmp; 12679 12680 list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) 12681 bnxt_cfg_one_usr_fltr(bp, usr_fltr); 12682 } 12683 12684 static int bnxt_set_xps_mapping(struct bnxt *bp) 12685 { 12686 int numa_node = dev_to_node(&bp->pdev->dev); 12687 unsigned int q_idx, map_idx, cpu, i; 12688 const struct cpumask *cpu_mask_ptr; 12689 int nr_cpus = num_online_cpus(); 12690 cpumask_t *q_map; 12691 int rc = 0; 12692 12693 q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); 12694 if (!q_map) 12695 return -ENOMEM; 12696 12697 /* Create CPU mask for all TX queues across MQPRIO traffic classes. 12698 * Each TC has the same number of TX queues. The nth TX queue for each 12699 * TC will have the same CPU mask. 12700 */ 12701 for (i = 0; i < nr_cpus; i++) { 12702 map_idx = i % bp->tx_nr_rings_per_tc; 12703 cpu = cpumask_local_spread(i, numa_node); 12704 cpu_mask_ptr = get_cpu_mask(cpu); 12705 cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr); 12706 } 12707 12708 /* Register CPU mask for each TX queue except the ones marked for XDP */ 12709 for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { 12710 map_idx = q_idx % bp->tx_nr_rings_per_tc; 12711 rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); 12712 if (rc) { 12713 netdev_warn(bp->dev, "Error setting XPS for q:%d\n", 12714 q_idx); 12715 break; 12716 } 12717 } 12718 12719 kfree(q_map); 12720 12721 return rc; 12722 } 12723 12724 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12725 { 12726 int rc = 0; 12727 12728 netif_carrier_off(bp->dev); 12729 if (irq_re_init) { 12730 /* Reserve rings now if none were reserved at driver probe. */ 12731 rc = bnxt_init_dflt_ring_mode(bp); 12732 if (rc) { 12733 netdev_err(bp->dev, "Failed to reserve default rings at open\n"); 12734 return rc; 12735 } 12736 } 12737 rc = bnxt_reserve_rings(bp, irq_re_init); 12738 if (rc) 12739 return rc; 12740 12741 rc = bnxt_alloc_mem(bp, irq_re_init); 12742 if (rc) { 12743 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 12744 goto open_err_free_mem; 12745 } 12746 12747 if (irq_re_init) { 12748 bnxt_init_napi(bp); 12749 rc = bnxt_request_irq(bp); 12750 if (rc) { 12751 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 12752 goto open_err_irq; 12753 } 12754 } 12755 12756 rc = bnxt_init_nic(bp, irq_re_init); 12757 if (rc) { 12758 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 12759 goto open_err_irq; 12760 } 12761 12762 bnxt_enable_napi(bp); 12763 bnxt_debug_dev_init(bp); 12764 12765 if (link_re_init) { 12766 mutex_lock(&bp->link_lock); 12767 rc = bnxt_update_phy_setting(bp); 12768 mutex_unlock(&bp->link_lock); 12769 if (rc) { 12770 netdev_warn(bp->dev, "failed to update phy settings\n"); 12771 if (BNXT_SINGLE_PF(bp)) { 12772 bp->link_info.phy_retry = true; 12773 bp->link_info.phy_retry_expires = 12774 jiffies + 5 * HZ; 12775 } 12776 } 12777 } 12778 12779 if (irq_re_init) { 12780 udp_tunnel_nic_reset_ntf(bp->dev); 12781 rc = bnxt_set_xps_mapping(bp); 12782 if (rc) 12783 netdev_warn(bp->dev, "failed to set xps mapping\n"); 12784 } 12785 12786 if (bp->tx_nr_rings_xdp < num_possible_cpus()) { 12787 if (!static_key_enabled(&bnxt_xdp_locking_key)) 12788 static_branch_enable(&bnxt_xdp_locking_key); 12789 } else if (static_key_enabled(&bnxt_xdp_locking_key)) { 12790 static_branch_disable(&bnxt_xdp_locking_key); 12791 } 12792 set_bit(BNXT_STATE_OPEN, &bp->state); 12793 bnxt_enable_int(bp); 12794 /* Enable TX queues */ 12795 bnxt_tx_enable(bp); 12796 mod_timer(&bp->timer, jiffies + bp->current_interval); 12797 /* Poll link status and check for SFP+ module status */ 12798 mutex_lock(&bp->link_lock); 12799 bnxt_get_port_module_status(bp); 12800 mutex_unlock(&bp->link_lock); 12801 12802 /* VF-reps may need to be re-opened after the PF is re-opened */ 12803 if (BNXT_PF(bp)) 12804 bnxt_vf_reps_open(bp); 12805 bnxt_ptp_init_rtc(bp, true); 12806 bnxt_ptp_cfg_tstamp_filters(bp); 12807 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 12808 bnxt_hwrm_realloc_rss_ctx_vnic(bp); 12809 bnxt_cfg_usr_fltrs(bp); 12810 return 0; 12811 12812 open_err_irq: 12813 bnxt_del_napi(bp); 12814 12815 open_err_free_mem: 12816 bnxt_free_skbs(bp); 12817 bnxt_free_irq(bp); 12818 bnxt_free_mem(bp, true); 12819 return rc; 12820 } 12821 12822 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12823 { 12824 int rc = 0; 12825 12826 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) 12827 rc = -EIO; 12828 if (!rc) 12829 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); 12830 if (rc) { 12831 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); 12832 netif_close(bp->dev); 12833 } 12834 return rc; 12835 } 12836 12837 /* netdev instance lock held, open the NIC half way by allocating all 12838 * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used 12839 * for offline self tests. 12840 */ 12841 int bnxt_half_open_nic(struct bnxt *bp) 12842 { 12843 int rc = 0; 12844 12845 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12846 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); 12847 rc = -ENODEV; 12848 goto half_open_err; 12849 } 12850 12851 rc = bnxt_alloc_mem(bp, true); 12852 if (rc) { 12853 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 12854 goto half_open_err; 12855 } 12856 bnxt_init_napi(bp); 12857 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12858 rc = bnxt_init_nic(bp, true); 12859 if (rc) { 12860 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12861 bnxt_del_napi(bp); 12862 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 12863 goto half_open_err; 12864 } 12865 return 0; 12866 12867 half_open_err: 12868 bnxt_free_skbs(bp); 12869 bnxt_free_mem(bp, true); 12870 netif_close(bp->dev); 12871 return rc; 12872 } 12873 12874 /* netdev instance lock held, this call can only be made after a previous 12875 * successful call to bnxt_half_open_nic(). 12876 */ 12877 void bnxt_half_close_nic(struct bnxt *bp) 12878 { 12879 bnxt_hwrm_resource_free(bp, false, true); 12880 bnxt_del_napi(bp); 12881 bnxt_free_skbs(bp); 12882 bnxt_free_mem(bp, true); 12883 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 12884 } 12885 12886 void bnxt_reenable_sriov(struct bnxt *bp) 12887 { 12888 if (BNXT_PF(bp)) { 12889 struct bnxt_pf_info *pf = &bp->pf; 12890 int n = pf->active_vfs; 12891 12892 if (n) 12893 bnxt_cfg_hw_sriov(bp, &n, true); 12894 } 12895 } 12896 12897 static int bnxt_open(struct net_device *dev) 12898 { 12899 struct bnxt *bp = netdev_priv(dev); 12900 int rc; 12901 12902 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 12903 rc = bnxt_reinit_after_abort(bp); 12904 if (rc) { 12905 if (rc == -EBUSY) 12906 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); 12907 else 12908 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); 12909 return -ENODEV; 12910 } 12911 } 12912 12913 rc = bnxt_hwrm_if_change(bp, true); 12914 if (rc) 12915 return rc; 12916 12917 rc = __bnxt_open_nic(bp, true, true); 12918 if (rc) { 12919 bnxt_hwrm_if_change(bp, false); 12920 } else { 12921 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { 12922 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 12923 bnxt_queue_sp_work(bp, 12924 BNXT_RESTART_ULP_SP_EVENT); 12925 } 12926 } 12927 12928 return rc; 12929 } 12930 12931 static bool bnxt_drv_busy(struct bnxt *bp) 12932 { 12933 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 12934 test_bit(BNXT_STATE_READ_STATS, &bp->state)); 12935 } 12936 12937 static void bnxt_get_ring_stats(struct bnxt *bp, 12938 struct rtnl_link_stats64 *stats); 12939 12940 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, 12941 bool link_re_init) 12942 { 12943 /* Close the VF-reps before closing PF */ 12944 if (BNXT_PF(bp)) 12945 bnxt_vf_reps_close(bp); 12946 12947 /* Change device state to avoid TX queue wake up's */ 12948 bnxt_tx_disable(bp); 12949 12950 clear_bit(BNXT_STATE_OPEN, &bp->state); 12951 smp_mb__after_atomic(); 12952 while (bnxt_drv_busy(bp)) 12953 msleep(20); 12954 12955 if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) 12956 bnxt_clear_rss_ctxs(bp); 12957 /* Flush rings and disable interrupts */ 12958 bnxt_shutdown_nic(bp, irq_re_init); 12959 12960 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ 12961 12962 bnxt_debug_dev_exit(bp); 12963 bnxt_disable_napi(bp); 12964 timer_delete_sync(&bp->timer); 12965 bnxt_free_skbs(bp); 12966 12967 /* Save ring stats before shutdown */ 12968 if (bp->bnapi && irq_re_init) { 12969 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 12970 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); 12971 } 12972 if (irq_re_init) { 12973 bnxt_free_irq(bp); 12974 bnxt_del_napi(bp); 12975 } 12976 bnxt_free_mem(bp, irq_re_init); 12977 } 12978 12979 void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 12980 { 12981 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 12982 /* If we get here, it means firmware reset is in progress 12983 * while we are trying to close. We can safely proceed with 12984 * the close because we are holding netdev instance lock. 12985 * Some firmware messages may fail as we proceed to close. 12986 * We set the ABORT_ERR flag here so that the FW reset thread 12987 * will later abort when it gets the netdev instance lock 12988 * and sees the flag. 12989 */ 12990 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); 12991 set_bit(BNXT_STATE_ABORT_ERR, &bp->state); 12992 } 12993 12994 #ifdef CONFIG_BNXT_SRIOV 12995 if (bp->sriov_cfg) { 12996 int rc; 12997 12998 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, 12999 !bp->sriov_cfg, 13000 BNXT_SRIOV_CFG_WAIT_TMO); 13001 if (!rc) 13002 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); 13003 else if (rc < 0) 13004 netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); 13005 } 13006 #endif 13007 __bnxt_close_nic(bp, irq_re_init, link_re_init); 13008 } 13009 13010 static int bnxt_close(struct net_device *dev) 13011 { 13012 struct bnxt *bp = netdev_priv(dev); 13013 13014 bnxt_close_nic(bp, true, true); 13015 bnxt_hwrm_shutdown_link(bp); 13016 bnxt_hwrm_if_change(bp, false); 13017 return 0; 13018 } 13019 13020 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, 13021 u16 *val) 13022 { 13023 struct hwrm_port_phy_mdio_read_output *resp; 13024 struct hwrm_port_phy_mdio_read_input *req; 13025 int rc; 13026 13027 if (bp->hwrm_spec_code < 0x10a00) 13028 return -EOPNOTSUPP; 13029 13030 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); 13031 if (rc) 13032 return rc; 13033 13034 req->port_id = cpu_to_le16(bp->pf.port_id); 13035 req->phy_addr = phy_addr; 13036 req->reg_addr = cpu_to_le16(reg & 0x1f); 13037 if (mdio_phy_id_is_c45(phy_addr)) { 13038 req->cl45_mdio = 1; 13039 req->phy_addr = mdio_phy_id_prtad(phy_addr); 13040 req->dev_addr = mdio_phy_id_devad(phy_addr); 13041 req->reg_addr = cpu_to_le16(reg); 13042 } 13043 13044 resp = hwrm_req_hold(bp, req); 13045 rc = hwrm_req_send(bp, req); 13046 if (!rc) 13047 *val = le16_to_cpu(resp->reg_data); 13048 hwrm_req_drop(bp, req); 13049 return rc; 13050 } 13051 13052 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, 13053 u16 val) 13054 { 13055 struct hwrm_port_phy_mdio_write_input *req; 13056 int rc; 13057 13058 if (bp->hwrm_spec_code < 0x10a00) 13059 return -EOPNOTSUPP; 13060 13061 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); 13062 if (rc) 13063 return rc; 13064 13065 req->port_id = cpu_to_le16(bp->pf.port_id); 13066 req->phy_addr = phy_addr; 13067 req->reg_addr = cpu_to_le16(reg & 0x1f); 13068 if (mdio_phy_id_is_c45(phy_addr)) { 13069 req->cl45_mdio = 1; 13070 req->phy_addr = mdio_phy_id_prtad(phy_addr); 13071 req->dev_addr = mdio_phy_id_devad(phy_addr); 13072 req->reg_addr = cpu_to_le16(reg); 13073 } 13074 req->reg_data = cpu_to_le16(val); 13075 13076 return hwrm_req_send(bp, req); 13077 } 13078 13079 /* netdev instance lock held */ 13080 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 13081 { 13082 struct mii_ioctl_data *mdio = if_mii(ifr); 13083 struct bnxt *bp = netdev_priv(dev); 13084 int rc; 13085 13086 switch (cmd) { 13087 case SIOCGMIIPHY: 13088 mdio->phy_id = bp->link_info.phy_addr; 13089 13090 fallthrough; 13091 case SIOCGMIIREG: { 13092 u16 mii_regval = 0; 13093 13094 if (!netif_running(dev)) 13095 return -EAGAIN; 13096 13097 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, 13098 &mii_regval); 13099 mdio->val_out = mii_regval; 13100 return rc; 13101 } 13102 13103 case SIOCSMIIREG: 13104 if (!netif_running(dev)) 13105 return -EAGAIN; 13106 13107 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, 13108 mdio->val_in); 13109 13110 case SIOCSHWTSTAMP: 13111 return bnxt_hwtstamp_set(dev, ifr); 13112 13113 case SIOCGHWTSTAMP: 13114 return bnxt_hwtstamp_get(dev, ifr); 13115 13116 default: 13117 /* do nothing */ 13118 break; 13119 } 13120 return -EOPNOTSUPP; 13121 } 13122 13123 static void bnxt_get_ring_stats(struct bnxt *bp, 13124 struct rtnl_link_stats64 *stats) 13125 { 13126 int i; 13127 13128 for (i = 0; i < bp->cp_nr_rings; i++) { 13129 struct bnxt_napi *bnapi = bp->bnapi[i]; 13130 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 13131 u64 *sw = cpr->stats.sw_stats; 13132 13133 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 13134 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 13135 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 13136 13137 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 13138 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 13139 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 13140 13141 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 13142 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 13143 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 13144 13145 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 13146 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 13147 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 13148 13149 stats->rx_missed_errors += 13150 BNXT_GET_RING_STATS64(sw, rx_discard_pkts); 13151 13152 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 13153 13154 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); 13155 13156 stats->rx_dropped += 13157 cpr->sw_stats->rx.rx_netpoll_discards + 13158 cpr->sw_stats->rx.rx_oom_discards; 13159 } 13160 } 13161 13162 static void bnxt_add_prev_stats(struct bnxt *bp, 13163 struct rtnl_link_stats64 *stats) 13164 { 13165 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; 13166 13167 stats->rx_packets += prev_stats->rx_packets; 13168 stats->tx_packets += prev_stats->tx_packets; 13169 stats->rx_bytes += prev_stats->rx_bytes; 13170 stats->tx_bytes += prev_stats->tx_bytes; 13171 stats->rx_missed_errors += prev_stats->rx_missed_errors; 13172 stats->multicast += prev_stats->multicast; 13173 stats->rx_dropped += prev_stats->rx_dropped; 13174 stats->tx_dropped += prev_stats->tx_dropped; 13175 } 13176 13177 static void 13178 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 13179 { 13180 struct bnxt *bp = netdev_priv(dev); 13181 13182 set_bit(BNXT_STATE_READ_STATS, &bp->state); 13183 /* Make sure bnxt_close_nic() sees that we are reading stats before 13184 * we check the BNXT_STATE_OPEN flag. 13185 */ 13186 smp_mb__after_atomic(); 13187 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13188 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 13189 *stats = bp->net_stats_prev; 13190 return; 13191 } 13192 13193 bnxt_get_ring_stats(bp, stats); 13194 bnxt_add_prev_stats(bp, stats); 13195 13196 if (bp->flags & BNXT_FLAG_PORT_STATS) { 13197 u64 *rx = bp->port_stats.sw_stats; 13198 u64 *tx = bp->port_stats.sw_stats + 13199 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; 13200 13201 stats->rx_crc_errors = 13202 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); 13203 stats->rx_frame_errors = 13204 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); 13205 stats->rx_length_errors = 13206 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + 13207 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + 13208 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); 13209 stats->rx_errors = 13210 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + 13211 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); 13212 stats->collisions = 13213 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); 13214 stats->tx_fifo_errors = 13215 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); 13216 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); 13217 } 13218 clear_bit(BNXT_STATE_READ_STATS, &bp->state); 13219 } 13220 13221 static void bnxt_get_one_ring_err_stats(struct bnxt *bp, 13222 struct bnxt_total_ring_err_stats *stats, 13223 struct bnxt_cp_ring_info *cpr) 13224 { 13225 struct bnxt_sw_stats *sw_stats = cpr->sw_stats; 13226 u64 *hw_stats = cpr->stats.sw_stats; 13227 13228 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; 13229 stats->rx_total_resets += sw_stats->rx.rx_resets; 13230 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; 13231 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; 13232 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; 13233 stats->rx_total_ring_discards += 13234 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); 13235 stats->tx_total_resets += sw_stats->tx.tx_resets; 13236 stats->tx_total_ring_discards += 13237 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); 13238 stats->total_missed_irqs += sw_stats->cmn.missed_irqs; 13239 } 13240 13241 void bnxt_get_ring_err_stats(struct bnxt *bp, 13242 struct bnxt_total_ring_err_stats *stats) 13243 { 13244 int i; 13245 13246 for (i = 0; i < bp->cp_nr_rings; i++) 13247 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); 13248 } 13249 13250 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) 13251 { 13252 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13253 struct net_device *dev = bp->dev; 13254 struct netdev_hw_addr *ha; 13255 u8 *haddr; 13256 int mc_count = 0; 13257 bool update = false; 13258 int off = 0; 13259 13260 netdev_for_each_mc_addr(ha, dev) { 13261 if (mc_count >= BNXT_MAX_MC_ADDRS) { 13262 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 13263 vnic->mc_list_count = 0; 13264 return false; 13265 } 13266 haddr = ha->addr; 13267 if (!ether_addr_equal(haddr, vnic->mc_list + off)) { 13268 memcpy(vnic->mc_list + off, haddr, ETH_ALEN); 13269 update = true; 13270 } 13271 off += ETH_ALEN; 13272 mc_count++; 13273 } 13274 if (mc_count) 13275 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 13276 13277 if (mc_count != vnic->mc_list_count) { 13278 vnic->mc_list_count = mc_count; 13279 update = true; 13280 } 13281 return update; 13282 } 13283 13284 static bool bnxt_uc_list_updated(struct bnxt *bp) 13285 { 13286 struct net_device *dev = bp->dev; 13287 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13288 struct netdev_hw_addr *ha; 13289 int off = 0; 13290 13291 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) 13292 return true; 13293 13294 netdev_for_each_uc_addr(ha, dev) { 13295 if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) 13296 return true; 13297 13298 off += ETH_ALEN; 13299 } 13300 return false; 13301 } 13302 13303 static void bnxt_set_rx_mode(struct net_device *dev) 13304 { 13305 struct bnxt *bp = netdev_priv(dev); 13306 struct bnxt_vnic_info *vnic; 13307 bool mc_update = false; 13308 bool uc_update; 13309 u32 mask; 13310 13311 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) 13312 return; 13313 13314 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13315 mask = vnic->rx_mask; 13316 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 13317 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 13318 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | 13319 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); 13320 13321 if (dev->flags & IFF_PROMISC) 13322 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 13323 13324 uc_update = bnxt_uc_list_updated(bp); 13325 13326 if (dev->flags & IFF_BROADCAST) 13327 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 13328 if (dev->flags & IFF_ALLMULTI) { 13329 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 13330 vnic->mc_list_count = 0; 13331 } else if (dev->flags & IFF_MULTICAST) { 13332 mc_update = bnxt_mc_list_updated(bp, &mask); 13333 } 13334 13335 if (mask != vnic->rx_mask || uc_update || mc_update) { 13336 vnic->rx_mask = mask; 13337 13338 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 13339 } 13340 } 13341 13342 static int bnxt_cfg_rx_mode(struct bnxt *bp) 13343 { 13344 struct net_device *dev = bp->dev; 13345 struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 13346 struct netdev_hw_addr *ha; 13347 int i, off = 0, rc; 13348 bool uc_update; 13349 13350 netif_addr_lock_bh(dev); 13351 uc_update = bnxt_uc_list_updated(bp); 13352 netif_addr_unlock_bh(dev); 13353 13354 if (!uc_update) 13355 goto skip_uc; 13356 13357 for (i = 1; i < vnic->uc_filter_count; i++) { 13358 struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; 13359 13360 bnxt_hwrm_l2_filter_free(bp, fltr); 13361 bnxt_del_l2_filter(bp, fltr); 13362 } 13363 13364 vnic->uc_filter_count = 1; 13365 13366 netif_addr_lock_bh(dev); 13367 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { 13368 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 13369 } else { 13370 netdev_for_each_uc_addr(ha, dev) { 13371 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); 13372 off += ETH_ALEN; 13373 vnic->uc_filter_count++; 13374 } 13375 } 13376 netif_addr_unlock_bh(dev); 13377 13378 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { 13379 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); 13380 if (rc) { 13381 if (BNXT_VF(bp) && rc == -ENODEV) { 13382 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13383 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); 13384 else 13385 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); 13386 rc = 0; 13387 } else { 13388 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); 13389 } 13390 vnic->uc_filter_count = i; 13391 return rc; 13392 } 13393 } 13394 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13395 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); 13396 13397 skip_uc: 13398 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && 13399 !bnxt_promisc_ok(bp)) 13400 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 13401 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 13402 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { 13403 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", 13404 rc); 13405 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; 13406 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 13407 vnic->mc_list_count = 0; 13408 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); 13409 } 13410 if (rc) 13411 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", 13412 rc); 13413 13414 return rc; 13415 } 13416 13417 static bool bnxt_can_reserve_rings(struct bnxt *bp) 13418 { 13419 #ifdef CONFIG_BNXT_SRIOV 13420 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { 13421 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 13422 13423 /* No minimum rings were provisioned by the PF. Don't 13424 * reserve rings by default when device is down. 13425 */ 13426 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) 13427 return true; 13428 13429 if (!netif_running(bp->dev)) 13430 return false; 13431 } 13432 #endif 13433 return true; 13434 } 13435 13436 /* If the chip and firmware supports RFS */ 13437 static bool bnxt_rfs_supported(struct bnxt *bp) 13438 { 13439 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 13440 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) 13441 return true; 13442 return false; 13443 } 13444 /* 212 firmware is broken for aRFS */ 13445 if (BNXT_FW_MAJ(bp) == 212) 13446 return false; 13447 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) 13448 return true; 13449 if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) 13450 return true; 13451 return false; 13452 } 13453 13454 /* If runtime conditions support RFS */ 13455 bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) 13456 { 13457 struct bnxt_hw_rings hwr = {0}; 13458 int max_vnics, max_rss_ctxs; 13459 13460 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 13461 !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 13462 return bnxt_rfs_supported(bp); 13463 13464 if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 13465 return false; 13466 13467 hwr.grp = bp->rx_nr_rings; 13468 hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); 13469 if (new_rss_ctx) 13470 hwr.vnic++; 13471 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 13472 max_vnics = bnxt_get_max_func_vnics(bp); 13473 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); 13474 13475 if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { 13476 if (bp->rx_nr_rings > 1) 13477 netdev_warn(bp->dev, 13478 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", 13479 min(max_rss_ctxs - 1, max_vnics - 1)); 13480 return false; 13481 } 13482 13483 if (!BNXT_NEW_RM(bp)) 13484 return true; 13485 13486 /* Do not reduce VNIC and RSS ctx reservations. There is a FW 13487 * issue that will mess up the default VNIC if we reduce the 13488 * reservations. 13489 */ 13490 if (hwr.vnic <= bp->hw_resc.resv_vnics && 13491 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 13492 return true; 13493 13494 bnxt_hwrm_reserve_rings(bp, &hwr); 13495 if (hwr.vnic <= bp->hw_resc.resv_vnics && 13496 hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) 13497 return true; 13498 13499 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); 13500 hwr.vnic = 1; 13501 hwr.rss_ctx = 0; 13502 bnxt_hwrm_reserve_rings(bp, &hwr); 13503 return false; 13504 } 13505 13506 static netdev_features_t bnxt_fix_features(struct net_device *dev, 13507 netdev_features_t features) 13508 { 13509 struct bnxt *bp = netdev_priv(dev); 13510 netdev_features_t vlan_features; 13511 13512 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) 13513 features &= ~NETIF_F_NTUPLE; 13514 13515 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) 13516 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 13517 13518 if (!(features & NETIF_F_GRO)) 13519 features &= ~NETIF_F_GRO_HW; 13520 13521 if (features & NETIF_F_GRO_HW) 13522 features &= ~NETIF_F_LRO; 13523 13524 /* Both CTAG and STAG VLAN acceleration on the RX side have to be 13525 * turned on or off together. 13526 */ 13527 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; 13528 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { 13529 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) 13530 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 13531 else if (vlan_features) 13532 features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 13533 } 13534 #ifdef CONFIG_BNXT_SRIOV 13535 if (BNXT_VF(bp) && bp->vf.vlan) 13536 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; 13537 #endif 13538 return features; 13539 } 13540 13541 static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, 13542 bool link_re_init, u32 flags, bool update_tpa) 13543 { 13544 bnxt_close_nic(bp, irq_re_init, link_re_init); 13545 bp->flags = flags; 13546 if (update_tpa) 13547 bnxt_set_ring_params(bp); 13548 return bnxt_open_nic(bp, irq_re_init, link_re_init); 13549 } 13550 13551 static int bnxt_set_features(struct net_device *dev, netdev_features_t features) 13552 { 13553 bool update_tpa = false, update_ntuple = false; 13554 struct bnxt *bp = netdev_priv(dev); 13555 u32 flags = bp->flags; 13556 u32 changes; 13557 int rc = 0; 13558 bool re_init = false; 13559 13560 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; 13561 if (features & NETIF_F_GRO_HW) 13562 flags |= BNXT_FLAG_GRO; 13563 else if (features & NETIF_F_LRO) 13564 flags |= BNXT_FLAG_LRO; 13565 13566 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) 13567 flags &= ~BNXT_FLAG_TPA; 13568 13569 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) 13570 flags |= BNXT_FLAG_STRIP_VLAN; 13571 13572 if (features & NETIF_F_NTUPLE) 13573 flags |= BNXT_FLAG_RFS; 13574 else 13575 bnxt_clear_usr_fltrs(bp, true); 13576 13577 changes = flags ^ bp->flags; 13578 if (changes & BNXT_FLAG_TPA) { 13579 update_tpa = true; 13580 if ((bp->flags & BNXT_FLAG_TPA) == 0 || 13581 (flags & BNXT_FLAG_TPA) == 0 || 13582 (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 13583 re_init = true; 13584 } 13585 13586 if (changes & ~BNXT_FLAG_TPA) 13587 re_init = true; 13588 13589 if (changes & BNXT_FLAG_RFS) 13590 update_ntuple = true; 13591 13592 if (flags != bp->flags) { 13593 u32 old_flags = bp->flags; 13594 13595 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 13596 bp->flags = flags; 13597 if (update_tpa) 13598 bnxt_set_ring_params(bp); 13599 return rc; 13600 } 13601 13602 if (update_ntuple) 13603 return bnxt_reinit_features(bp, true, false, flags, update_tpa); 13604 13605 if (re_init) 13606 return bnxt_reinit_features(bp, false, false, flags, update_tpa); 13607 13608 if (update_tpa) { 13609 bp->flags = flags; 13610 rc = bnxt_set_tpa(bp, 13611 (flags & BNXT_FLAG_TPA) ? 13612 true : false); 13613 if (rc) 13614 bp->flags = old_flags; 13615 } 13616 } 13617 return rc; 13618 } 13619 13620 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, 13621 u8 **nextp) 13622 { 13623 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); 13624 struct hop_jumbo_hdr *jhdr; 13625 int hdr_count = 0; 13626 u8 *nexthdr; 13627 int start; 13628 13629 /* Check that there are at most 2 IPv6 extension headers, no 13630 * fragment header, and each is <= 64 bytes. 13631 */ 13632 start = nw_off + sizeof(*ip6h); 13633 nexthdr = &ip6h->nexthdr; 13634 while (ipv6_ext_hdr(*nexthdr)) { 13635 struct ipv6_opt_hdr *hp; 13636 int hdrlen; 13637 13638 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || 13639 *nexthdr == NEXTHDR_FRAGMENT) 13640 return false; 13641 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, 13642 skb_headlen(skb), NULL); 13643 if (!hp) 13644 return false; 13645 if (*nexthdr == NEXTHDR_AUTH) 13646 hdrlen = ipv6_authlen(hp); 13647 else 13648 hdrlen = ipv6_optlen(hp); 13649 13650 if (hdrlen > 64) 13651 return false; 13652 13653 /* The ext header may be a hop-by-hop header inserted for 13654 * big TCP purposes. This will be removed before sending 13655 * from NIC, so do not count it. 13656 */ 13657 if (*nexthdr == NEXTHDR_HOP) { 13658 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) 13659 goto increment_hdr; 13660 13661 jhdr = (struct hop_jumbo_hdr *)hp; 13662 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || 13663 jhdr->nexthdr != IPPROTO_TCP) 13664 goto increment_hdr; 13665 13666 goto next_hdr; 13667 } 13668 increment_hdr: 13669 hdr_count++; 13670 next_hdr: 13671 nexthdr = &hp->nexthdr; 13672 start += hdrlen; 13673 } 13674 if (nextp) { 13675 /* Caller will check inner protocol */ 13676 if (skb->encapsulation) { 13677 *nextp = nexthdr; 13678 return true; 13679 } 13680 *nextp = NULL; 13681 } 13682 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ 13683 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; 13684 } 13685 13686 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ 13687 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) 13688 { 13689 struct udphdr *uh = udp_hdr(skb); 13690 __be16 udp_port = uh->dest; 13691 13692 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && 13693 udp_port != bp->vxlan_gpe_port) 13694 return false; 13695 if (skb->inner_protocol == htons(ETH_P_TEB)) { 13696 struct ethhdr *eh = inner_eth_hdr(skb); 13697 13698 switch (eh->h_proto) { 13699 case htons(ETH_P_IP): 13700 return true; 13701 case htons(ETH_P_IPV6): 13702 return bnxt_exthdr_check(bp, skb, 13703 skb_inner_network_offset(skb), 13704 NULL); 13705 } 13706 } else if (skb->inner_protocol == htons(ETH_P_IP)) { 13707 return true; 13708 } else if (skb->inner_protocol == htons(ETH_P_IPV6)) { 13709 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 13710 NULL); 13711 } 13712 return false; 13713 } 13714 13715 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) 13716 { 13717 switch (l4_proto) { 13718 case IPPROTO_UDP: 13719 return bnxt_udp_tunl_check(bp, skb); 13720 case IPPROTO_IPIP: 13721 return true; 13722 case IPPROTO_GRE: { 13723 switch (skb->inner_protocol) { 13724 default: 13725 return false; 13726 case htons(ETH_P_IP): 13727 return true; 13728 case htons(ETH_P_IPV6): 13729 fallthrough; 13730 } 13731 } 13732 case IPPROTO_IPV6: 13733 /* Check ext headers of inner ipv6 */ 13734 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), 13735 NULL); 13736 } 13737 return false; 13738 } 13739 13740 static netdev_features_t bnxt_features_check(struct sk_buff *skb, 13741 struct net_device *dev, 13742 netdev_features_t features) 13743 { 13744 struct bnxt *bp = netdev_priv(dev); 13745 u8 *l4_proto; 13746 13747 features = vlan_features_check(skb, features); 13748 switch (vlan_get_protocol(skb)) { 13749 case htons(ETH_P_IP): 13750 if (!skb->encapsulation) 13751 return features; 13752 l4_proto = &ip_hdr(skb)->protocol; 13753 if (bnxt_tunl_check(bp, skb, *l4_proto)) 13754 return features; 13755 break; 13756 case htons(ETH_P_IPV6): 13757 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), 13758 &l4_proto)) 13759 break; 13760 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) 13761 return features; 13762 break; 13763 } 13764 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); 13765 } 13766 13767 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, 13768 u32 *reg_buf) 13769 { 13770 struct hwrm_dbg_read_direct_output *resp; 13771 struct hwrm_dbg_read_direct_input *req; 13772 __le32 *dbg_reg_buf; 13773 dma_addr_t mapping; 13774 int rc, i; 13775 13776 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); 13777 if (rc) 13778 return rc; 13779 13780 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, 13781 &mapping); 13782 if (!dbg_reg_buf) { 13783 rc = -ENOMEM; 13784 goto dbg_rd_reg_exit; 13785 } 13786 13787 req->host_dest_addr = cpu_to_le64(mapping); 13788 13789 resp = hwrm_req_hold(bp, req); 13790 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); 13791 req->read_len32 = cpu_to_le32(num_words); 13792 13793 rc = hwrm_req_send(bp, req); 13794 if (rc || resp->error_code) { 13795 rc = -EIO; 13796 goto dbg_rd_reg_exit; 13797 } 13798 for (i = 0; i < num_words; i++) 13799 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); 13800 13801 dbg_rd_reg_exit: 13802 hwrm_req_drop(bp, req); 13803 return rc; 13804 } 13805 13806 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, 13807 u32 ring_id, u32 *prod, u32 *cons) 13808 { 13809 struct hwrm_dbg_ring_info_get_output *resp; 13810 struct hwrm_dbg_ring_info_get_input *req; 13811 int rc; 13812 13813 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); 13814 if (rc) 13815 return rc; 13816 13817 req->ring_type = ring_type; 13818 req->fw_ring_id = cpu_to_le32(ring_id); 13819 resp = hwrm_req_hold(bp, req); 13820 rc = hwrm_req_send(bp, req); 13821 if (!rc) { 13822 *prod = le32_to_cpu(resp->producer_index); 13823 *cons = le32_to_cpu(resp->consumer_index); 13824 } 13825 hwrm_req_drop(bp, req); 13826 return rc; 13827 } 13828 13829 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) 13830 { 13831 struct bnxt_tx_ring_info *txr; 13832 int i = bnapi->index, j; 13833 13834 bnxt_for_each_napi_tx(j, bnapi, txr) 13835 netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", 13836 i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, 13837 txr->tx_cons); 13838 } 13839 13840 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) 13841 { 13842 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 13843 int i = bnapi->index; 13844 13845 if (!rxr) 13846 return; 13847 13848 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", 13849 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, 13850 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, 13851 rxr->rx_sw_agg_prod); 13852 } 13853 13854 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) 13855 { 13856 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 13857 int i = bnapi->index; 13858 13859 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", 13860 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); 13861 } 13862 13863 static void bnxt_dbg_dump_states(struct bnxt *bp) 13864 { 13865 int i; 13866 struct bnxt_napi *bnapi; 13867 13868 for (i = 0; i < bp->cp_nr_rings; i++) { 13869 bnapi = bp->bnapi[i]; 13870 if (netif_msg_drv(bp)) { 13871 bnxt_dump_tx_sw_state(bnapi); 13872 bnxt_dump_rx_sw_state(bnapi); 13873 bnxt_dump_cp_sw_state(bnapi); 13874 } 13875 } 13876 } 13877 13878 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) 13879 { 13880 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; 13881 struct hwrm_ring_reset_input *req; 13882 struct bnxt_napi *bnapi = rxr->bnapi; 13883 struct bnxt_cp_ring_info *cpr; 13884 u16 cp_ring_id; 13885 int rc; 13886 13887 rc = hwrm_req_init(bp, req, HWRM_RING_RESET); 13888 if (rc) 13889 return rc; 13890 13891 cpr = &bnapi->cp_ring; 13892 cp_ring_id = cpr->cp_ring_struct.fw_ring_id; 13893 req->cmpl_ring = cpu_to_le16(cp_ring_id); 13894 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; 13895 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); 13896 return hwrm_req_send_silent(bp, req); 13897 } 13898 13899 static void bnxt_reset_task(struct bnxt *bp, bool silent) 13900 { 13901 if (!silent) 13902 bnxt_dbg_dump_states(bp); 13903 if (netif_running(bp->dev)) { 13904 bnxt_close_nic(bp, !silent, false); 13905 bnxt_open_nic(bp, !silent, false); 13906 } 13907 } 13908 13909 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) 13910 { 13911 struct bnxt *bp = netdev_priv(dev); 13912 13913 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); 13914 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); 13915 } 13916 13917 static void bnxt_fw_health_check(struct bnxt *bp) 13918 { 13919 struct bnxt_fw_health *fw_health = bp->fw_health; 13920 struct pci_dev *pdev = bp->pdev; 13921 u32 val; 13922 13923 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 13924 return; 13925 13926 /* Make sure it is enabled before checking the tmr_counter. */ 13927 smp_rmb(); 13928 if (fw_health->tmr_counter) { 13929 fw_health->tmr_counter--; 13930 return; 13931 } 13932 13933 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 13934 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { 13935 fw_health->arrests++; 13936 goto fw_reset; 13937 } 13938 13939 fw_health->last_fw_heartbeat = val; 13940 13941 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 13942 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { 13943 fw_health->discoveries++; 13944 goto fw_reset; 13945 } 13946 13947 fw_health->tmr_counter = fw_health->tmr_multiplier; 13948 return; 13949 13950 fw_reset: 13951 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); 13952 } 13953 13954 static void bnxt_timer(struct timer_list *t) 13955 { 13956 struct bnxt *bp = from_timer(bp, t, timer); 13957 struct net_device *dev = bp->dev; 13958 13959 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) 13960 return; 13961 13962 if (atomic_read(&bp->intr_sem) != 0) 13963 goto bnxt_restart_timer; 13964 13965 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 13966 bnxt_fw_health_check(bp); 13967 13968 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) 13969 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); 13970 13971 if (bnxt_tc_flower_enabled(bp)) 13972 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); 13973 13974 #ifdef CONFIG_RFS_ACCEL 13975 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) 13976 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 13977 #endif /*CONFIG_RFS_ACCEL*/ 13978 13979 if (bp->link_info.phy_retry) { 13980 if (time_after(jiffies, bp->link_info.phy_retry_expires)) { 13981 bp->link_info.phy_retry = false; 13982 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); 13983 } else { 13984 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); 13985 } 13986 } 13987 13988 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) 13989 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); 13990 13991 if ((BNXT_CHIP_P5(bp)) && !bp->chip_rev && netif_carrier_ok(dev)) 13992 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); 13993 13994 bnxt_restart_timer: 13995 mod_timer(&bp->timer, jiffies + bp->current_interval); 13996 } 13997 13998 static void bnxt_lock_sp(struct bnxt *bp) 13999 { 14000 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK 14001 * set. If the device is being closed, bnxt_close() may be holding 14002 * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear. 14003 * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev 14004 * instance lock. 14005 */ 14006 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14007 netdev_lock(bp->dev); 14008 } 14009 14010 static void bnxt_unlock_sp(struct bnxt *bp) 14011 { 14012 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14013 netdev_unlock(bp->dev); 14014 } 14015 14016 /* Only called from bnxt_sp_task() */ 14017 static void bnxt_reset(struct bnxt *bp, bool silent) 14018 { 14019 bnxt_lock_sp(bp); 14020 if (test_bit(BNXT_STATE_OPEN, &bp->state)) 14021 bnxt_reset_task(bp, silent); 14022 bnxt_unlock_sp(bp); 14023 } 14024 14025 /* Only called from bnxt_sp_task() */ 14026 static void bnxt_rx_ring_reset(struct bnxt *bp) 14027 { 14028 int i; 14029 14030 bnxt_lock_sp(bp); 14031 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 14032 bnxt_unlock_sp(bp); 14033 return; 14034 } 14035 /* Disable and flush TPA before resetting the RX ring */ 14036 if (bp->flags & BNXT_FLAG_TPA) 14037 bnxt_set_tpa(bp, false); 14038 for (i = 0; i < bp->rx_nr_rings; i++) { 14039 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 14040 struct bnxt_cp_ring_info *cpr; 14041 int rc; 14042 14043 if (!rxr->bnapi->in_reset) 14044 continue; 14045 14046 rc = bnxt_hwrm_rx_ring_reset(bp, i); 14047 if (rc) { 14048 if (rc == -EINVAL || rc == -EOPNOTSUPP) 14049 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); 14050 else 14051 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", 14052 rc); 14053 bnxt_reset_task(bp, true); 14054 break; 14055 } 14056 bnxt_free_one_rx_ring_skbs(bp, rxr); 14057 rxr->rx_prod = 0; 14058 rxr->rx_agg_prod = 0; 14059 rxr->rx_sw_agg_prod = 0; 14060 rxr->rx_next_cons = 0; 14061 rxr->bnapi->in_reset = false; 14062 bnxt_alloc_one_rx_ring(bp, i); 14063 cpr = &rxr->bnapi->cp_ring; 14064 cpr->sw_stats->rx.rx_resets++; 14065 if (bp->flags & BNXT_FLAG_AGG_RINGS) 14066 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 14067 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 14068 } 14069 if (bp->flags & BNXT_FLAG_TPA) 14070 bnxt_set_tpa(bp, true); 14071 bnxt_unlock_sp(bp); 14072 } 14073 14074 static void bnxt_fw_fatal_close(struct bnxt *bp) 14075 { 14076 bnxt_tx_disable(bp); 14077 bnxt_disable_napi(bp); 14078 bnxt_disable_int_sync(bp); 14079 bnxt_free_irq(bp); 14080 bnxt_clear_int_mode(bp); 14081 pci_disable_device(bp->pdev); 14082 } 14083 14084 static void bnxt_fw_reset_close(struct bnxt *bp) 14085 { 14086 /* When firmware is in fatal state, quiesce device and disable 14087 * bus master to prevent any potential bad DMAs before freeing 14088 * kernel memory. 14089 */ 14090 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { 14091 u16 val = 0; 14092 14093 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 14094 if (val == 0xffff) 14095 bp->fw_reset_min_dsecs = 0; 14096 bnxt_fw_fatal_close(bp); 14097 } 14098 __bnxt_close_nic(bp, true, false); 14099 bnxt_vf_reps_free(bp); 14100 bnxt_clear_int_mode(bp); 14101 bnxt_hwrm_func_drv_unrgtr(bp); 14102 if (pci_is_enabled(bp->pdev)) 14103 pci_disable_device(bp->pdev); 14104 bnxt_free_ctx_mem(bp, false); 14105 } 14106 14107 static bool is_bnxt_fw_ok(struct bnxt *bp) 14108 { 14109 struct bnxt_fw_health *fw_health = bp->fw_health; 14110 bool no_heartbeat = false, has_reset = false; 14111 u32 val; 14112 14113 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); 14114 if (val == fw_health->last_fw_heartbeat) 14115 no_heartbeat = true; 14116 14117 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 14118 if (val != fw_health->last_fw_reset_cnt) 14119 has_reset = true; 14120 14121 if (!no_heartbeat && has_reset) 14122 return true; 14123 14124 return false; 14125 } 14126 14127 /* netdev instance lock is acquired before calling this function */ 14128 static void bnxt_force_fw_reset(struct bnxt *bp) 14129 { 14130 struct bnxt_fw_health *fw_health = bp->fw_health; 14131 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 14132 u32 wait_dsecs; 14133 14134 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || 14135 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) 14136 return; 14137 14138 /* we have to serialize with bnxt_refclk_read()*/ 14139 if (ptp) { 14140 unsigned long flags; 14141 14142 write_seqlock_irqsave(&ptp->ptp_lock, flags); 14143 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14144 write_sequnlock_irqrestore(&ptp->ptp_lock, flags); 14145 } else { 14146 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14147 } 14148 bnxt_fw_reset_close(bp); 14149 wait_dsecs = fw_health->master_func_wait_dsecs; 14150 if (fw_health->primary) { 14151 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) 14152 wait_dsecs = 0; 14153 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 14154 } else { 14155 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; 14156 wait_dsecs = fw_health->normal_func_wait_dsecs; 14157 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14158 } 14159 14160 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; 14161 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; 14162 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 14163 } 14164 14165 void bnxt_fw_exception(struct bnxt *bp) 14166 { 14167 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); 14168 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 14169 bnxt_ulp_stop(bp); 14170 bnxt_lock_sp(bp); 14171 bnxt_force_fw_reset(bp); 14172 bnxt_unlock_sp(bp); 14173 } 14174 14175 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or 14176 * < 0 on error. 14177 */ 14178 static int bnxt_get_registered_vfs(struct bnxt *bp) 14179 { 14180 #ifdef CONFIG_BNXT_SRIOV 14181 int rc; 14182 14183 if (!BNXT_PF(bp)) 14184 return 0; 14185 14186 rc = bnxt_hwrm_func_qcfg(bp); 14187 if (rc) { 14188 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); 14189 return rc; 14190 } 14191 if (bp->pf.registered_vfs) 14192 return bp->pf.registered_vfs; 14193 if (bp->sriov_cfg) 14194 return 1; 14195 #endif 14196 return 0; 14197 } 14198 14199 void bnxt_fw_reset(struct bnxt *bp) 14200 { 14201 bnxt_ulp_stop(bp); 14202 bnxt_lock_sp(bp); 14203 if (test_bit(BNXT_STATE_OPEN, &bp->state) && 14204 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 14205 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; 14206 int n = 0, tmo; 14207 14208 /* we have to serialize with bnxt_refclk_read()*/ 14209 if (ptp) { 14210 unsigned long flags; 14211 14212 write_seqlock_irqsave(&ptp->ptp_lock, flags); 14213 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14214 write_sequnlock_irqrestore(&ptp->ptp_lock, flags); 14215 } else { 14216 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14217 } 14218 if (bp->pf.active_vfs && 14219 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 14220 n = bnxt_get_registered_vfs(bp); 14221 if (n < 0) { 14222 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", 14223 n); 14224 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14225 netif_close(bp->dev); 14226 goto fw_reset_exit; 14227 } else if (n > 0) { 14228 u16 vf_tmo_dsecs = n * 10; 14229 14230 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) 14231 bp->fw_reset_max_dsecs = vf_tmo_dsecs; 14232 bp->fw_reset_state = 14233 BNXT_FW_RESET_STATE_POLL_VF; 14234 bnxt_queue_fw_reset_work(bp, HZ / 10); 14235 goto fw_reset_exit; 14236 } 14237 bnxt_fw_reset_close(bp); 14238 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14239 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 14240 tmo = HZ / 10; 14241 } else { 14242 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14243 tmo = bp->fw_reset_min_dsecs * HZ / 10; 14244 } 14245 bnxt_queue_fw_reset_work(bp, tmo); 14246 } 14247 fw_reset_exit: 14248 bnxt_unlock_sp(bp); 14249 } 14250 14251 static void bnxt_chk_missed_irq(struct bnxt *bp) 14252 { 14253 int i; 14254 14255 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 14256 return; 14257 14258 for (i = 0; i < bp->cp_nr_rings; i++) { 14259 struct bnxt_napi *bnapi = bp->bnapi[i]; 14260 struct bnxt_cp_ring_info *cpr; 14261 u32 fw_ring_id; 14262 int j; 14263 14264 if (!bnapi) 14265 continue; 14266 14267 cpr = &bnapi->cp_ring; 14268 for (j = 0; j < cpr->cp_ring_count; j++) { 14269 struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; 14270 u32 val[2]; 14271 14272 if (cpr2->has_more_work || !bnxt_has_work(bp, cpr2)) 14273 continue; 14274 14275 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) { 14276 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons; 14277 continue; 14278 } 14279 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; 14280 bnxt_dbg_hwrm_ring_info_get(bp, 14281 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, 14282 fw_ring_id, &val[0], &val[1]); 14283 cpr->sw_stats->cmn.missed_irqs++; 14284 } 14285 } 14286 } 14287 14288 static void bnxt_cfg_ntp_filters(struct bnxt *); 14289 14290 static void bnxt_init_ethtool_link_settings(struct bnxt *bp) 14291 { 14292 struct bnxt_link_info *link_info = &bp->link_info; 14293 14294 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 14295 link_info->autoneg = BNXT_AUTONEG_SPEED; 14296 if (bp->hwrm_spec_code >= 0x10201) { 14297 if (link_info->auto_pause_setting & 14298 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) 14299 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 14300 } else { 14301 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 14302 } 14303 bnxt_set_auto_speed(link_info); 14304 } else { 14305 bnxt_set_force_speed(link_info); 14306 link_info->req_duplex = link_info->duplex_setting; 14307 } 14308 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 14309 link_info->req_flow_ctrl = 14310 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; 14311 else 14312 link_info->req_flow_ctrl = link_info->force_pause_setting; 14313 } 14314 14315 static void bnxt_fw_echo_reply(struct bnxt *bp) 14316 { 14317 struct bnxt_fw_health *fw_health = bp->fw_health; 14318 struct hwrm_func_echo_response_input *req; 14319 int rc; 14320 14321 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); 14322 if (rc) 14323 return; 14324 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); 14325 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); 14326 hwrm_req_send(bp, req); 14327 } 14328 14329 static void bnxt_ulp_restart(struct bnxt *bp) 14330 { 14331 bnxt_ulp_stop(bp); 14332 bnxt_ulp_start(bp, 0); 14333 } 14334 14335 static void bnxt_sp_task(struct work_struct *work) 14336 { 14337 struct bnxt *bp = container_of(work, struct bnxt, sp_task); 14338 14339 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14340 smp_mb__after_atomic(); 14341 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 14342 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14343 return; 14344 } 14345 14346 if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) { 14347 bnxt_ulp_restart(bp); 14348 bnxt_reenable_sriov(bp); 14349 } 14350 14351 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) 14352 bnxt_cfg_rx_mode(bp); 14353 14354 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) 14355 bnxt_cfg_ntp_filters(bp); 14356 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) 14357 bnxt_hwrm_exec_fwd_req(bp); 14358 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) 14359 netdev_info(bp->dev, "Receive PF driver unload event!\n"); 14360 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { 14361 bnxt_hwrm_port_qstats(bp, 0); 14362 bnxt_hwrm_port_qstats_ext(bp, 0); 14363 bnxt_accumulate_all_stats(bp); 14364 } 14365 14366 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { 14367 int rc; 14368 14369 mutex_lock(&bp->link_lock); 14370 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, 14371 &bp->sp_event)) 14372 bnxt_hwrm_phy_qcaps(bp); 14373 14374 rc = bnxt_update_link(bp, true); 14375 if (rc) 14376 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", 14377 rc); 14378 14379 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, 14380 &bp->sp_event)) 14381 bnxt_init_ethtool_link_settings(bp); 14382 mutex_unlock(&bp->link_lock); 14383 } 14384 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { 14385 int rc; 14386 14387 mutex_lock(&bp->link_lock); 14388 rc = bnxt_update_phy_setting(bp); 14389 mutex_unlock(&bp->link_lock); 14390 if (rc) { 14391 netdev_warn(bp->dev, "update phy settings retry failed\n"); 14392 } else { 14393 bp->link_info.phy_retry = false; 14394 netdev_info(bp->dev, "update phy settings retry succeeded\n"); 14395 } 14396 } 14397 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { 14398 mutex_lock(&bp->link_lock); 14399 bnxt_get_port_module_status(bp); 14400 mutex_unlock(&bp->link_lock); 14401 } 14402 14403 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) 14404 bnxt_tc_flow_stats_work(bp); 14405 14406 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) 14407 bnxt_chk_missed_irq(bp); 14408 14409 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) 14410 bnxt_fw_echo_reply(bp); 14411 14412 if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) 14413 bnxt_hwmon_notify_event(bp); 14414 14415 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They 14416 * must be the last functions to be called before exiting. 14417 */ 14418 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 14419 bnxt_reset(bp, false); 14420 14421 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 14422 bnxt_reset(bp, true); 14423 14424 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) 14425 bnxt_rx_ring_reset(bp); 14426 14427 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { 14428 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 14429 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 14430 bnxt_devlink_health_fw_report(bp); 14431 else 14432 bnxt_fw_reset(bp); 14433 } 14434 14435 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { 14436 if (!is_bnxt_fw_ok(bp)) 14437 bnxt_devlink_health_fw_report(bp); 14438 } 14439 14440 smp_mb__before_atomic(); 14441 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 14442 } 14443 14444 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 14445 int *max_cp); 14446 14447 /* Under netdev instance lock */ 14448 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 14449 int tx_xdp) 14450 { 14451 int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; 14452 struct bnxt_hw_rings hwr = {0}; 14453 int rx_rings = rx; 14454 int rc; 14455 14456 if (tcs) 14457 tx_sets = tcs; 14458 14459 _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); 14460 14461 if (max_rx < rx_rings) 14462 return -ENOMEM; 14463 14464 if (bp->flags & BNXT_FLAG_AGG_RINGS) 14465 rx_rings <<= 1; 14466 14467 hwr.rx = rx_rings; 14468 hwr.tx = tx * tx_sets + tx_xdp; 14469 if (max_tx < hwr.tx) 14470 return -ENOMEM; 14471 14472 hwr.vnic = bnxt_get_total_vnics(bp, rx); 14473 14474 tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); 14475 hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; 14476 if (max_cp < hwr.cp) 14477 return -ENOMEM; 14478 hwr.stat = hwr.cp; 14479 if (BNXT_NEW_RM(bp)) { 14480 hwr.cp += bnxt_get_ulp_msix_num_in_use(bp); 14481 hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp); 14482 hwr.grp = rx; 14483 hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); 14484 } 14485 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 14486 hwr.cp_p5 = hwr.tx + rx; 14487 rc = bnxt_hwrm_check_rings(bp, &hwr); 14488 if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) { 14489 if (!bnxt_ulp_registered(bp->edev)) { 14490 hwr.cp += bnxt_get_ulp_msix_num(bp); 14491 hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp)); 14492 } 14493 if (hwr.cp > bp->total_irqs) { 14494 int total_msix = bnxt_change_msix(bp, hwr.cp); 14495 14496 if (total_msix < hwr.cp) { 14497 netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n", 14498 hwr.cp, total_msix); 14499 rc = -ENOSPC; 14500 } 14501 } 14502 } 14503 return rc; 14504 } 14505 14506 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 14507 { 14508 if (bp->bar2) { 14509 pci_iounmap(pdev, bp->bar2); 14510 bp->bar2 = NULL; 14511 } 14512 14513 if (bp->bar1) { 14514 pci_iounmap(pdev, bp->bar1); 14515 bp->bar1 = NULL; 14516 } 14517 14518 if (bp->bar0) { 14519 pci_iounmap(pdev, bp->bar0); 14520 bp->bar0 = NULL; 14521 } 14522 } 14523 14524 static void bnxt_cleanup_pci(struct bnxt *bp) 14525 { 14526 bnxt_unmap_bars(bp, bp->pdev); 14527 pci_release_regions(bp->pdev); 14528 if (pci_is_enabled(bp->pdev)) 14529 pci_disable_device(bp->pdev); 14530 } 14531 14532 static void bnxt_init_dflt_coal(struct bnxt *bp) 14533 { 14534 struct bnxt_coal_cap *coal_cap = &bp->coal_cap; 14535 struct bnxt_coal *coal; 14536 u16 flags = 0; 14537 14538 if (coal_cap->cmpl_params & 14539 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) 14540 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; 14541 14542 /* Tick values in micro seconds. 14543 * 1 coal_buf x bufs_per_record = 1 completion record. 14544 */ 14545 coal = &bp->rx_coal; 14546 coal->coal_ticks = 10; 14547 coal->coal_bufs = 30; 14548 coal->coal_ticks_irq = 1; 14549 coal->coal_bufs_irq = 2; 14550 coal->idle_thresh = 50; 14551 coal->bufs_per_record = 2; 14552 coal->budget = 64; /* NAPI budget */ 14553 coal->flags = flags; 14554 14555 coal = &bp->tx_coal; 14556 coal->coal_ticks = 28; 14557 coal->coal_bufs = 30; 14558 coal->coal_ticks_irq = 2; 14559 coal->coal_bufs_irq = 2; 14560 coal->bufs_per_record = 1; 14561 coal->flags = flags; 14562 14563 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; 14564 } 14565 14566 /* FW that pre-reserves 1 VNIC per function */ 14567 static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp) 14568 { 14569 u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); 14570 14571 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 14572 (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) 14573 return true; 14574 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 14575 (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) 14576 return true; 14577 return false; 14578 } 14579 14580 static int bnxt_fw_init_one_p1(struct bnxt *bp) 14581 { 14582 int rc; 14583 14584 bp->fw_cap = 0; 14585 rc = bnxt_hwrm_ver_get(bp); 14586 /* FW may be unresponsive after FLR. FLR must complete within 100 msec 14587 * so wait before continuing with recovery. 14588 */ 14589 if (rc) 14590 msleep(100); 14591 bnxt_try_map_fw_health_reg(bp); 14592 if (rc) { 14593 rc = bnxt_try_recover_fw(bp); 14594 if (rc) 14595 return rc; 14596 rc = bnxt_hwrm_ver_get(bp); 14597 if (rc) 14598 return rc; 14599 } 14600 14601 bnxt_nvm_cfg_ver_get(bp); 14602 14603 rc = bnxt_hwrm_func_reset(bp); 14604 if (rc) 14605 return -ENODEV; 14606 14607 bnxt_hwrm_fw_set_time(bp); 14608 return 0; 14609 } 14610 14611 static int bnxt_fw_init_one_p2(struct bnxt *bp) 14612 { 14613 int rc; 14614 14615 /* Get the MAX capabilities for this function */ 14616 rc = bnxt_hwrm_func_qcaps(bp); 14617 if (rc) { 14618 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", 14619 rc); 14620 return -ENODEV; 14621 } 14622 14623 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); 14624 if (rc) 14625 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", 14626 rc); 14627 14628 if (bnxt_alloc_fw_health(bp)) { 14629 netdev_warn(bp->dev, "no memory for firmware error recovery\n"); 14630 } else { 14631 rc = bnxt_hwrm_error_recovery_qcfg(bp); 14632 if (rc) 14633 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", 14634 rc); 14635 } 14636 14637 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); 14638 if (rc) 14639 return -ENODEV; 14640 14641 rc = bnxt_alloc_crash_dump_mem(bp); 14642 if (rc) 14643 netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n", 14644 rc); 14645 if (!rc) { 14646 rc = bnxt_hwrm_crash_dump_mem_cfg(bp); 14647 if (rc) { 14648 bnxt_free_crash_dump_mem(bp); 14649 netdev_warn(bp->dev, 14650 "hwrm crash dump mem failure rc: %d\n", rc); 14651 } 14652 } 14653 14654 if (bnxt_fw_pre_resv_vnics(bp)) 14655 bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS; 14656 14657 bnxt_hwrm_func_qcfg(bp); 14658 bnxt_hwrm_vnic_qcaps(bp); 14659 bnxt_hwrm_port_led_qcaps(bp); 14660 bnxt_ethtool_init(bp); 14661 if (bp->fw_cap & BNXT_FW_CAP_PTP) 14662 __bnxt_hwrm_ptp_qcfg(bp); 14663 bnxt_dcb_init(bp); 14664 bnxt_hwmon_init(bp); 14665 return 0; 14666 } 14667 14668 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) 14669 { 14670 bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; 14671 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | 14672 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | 14673 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | 14674 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; 14675 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) 14676 bp->rss_hash_delta = bp->rss_hash_cfg; 14677 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { 14678 bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; 14679 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | 14680 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; 14681 } 14682 } 14683 14684 static void bnxt_set_dflt_rfs(struct bnxt *bp) 14685 { 14686 struct net_device *dev = bp->dev; 14687 14688 dev->hw_features &= ~NETIF_F_NTUPLE; 14689 dev->features &= ~NETIF_F_NTUPLE; 14690 bp->flags &= ~BNXT_FLAG_RFS; 14691 if (bnxt_rfs_supported(bp)) { 14692 dev->hw_features |= NETIF_F_NTUPLE; 14693 if (bnxt_rfs_capable(bp, false)) { 14694 bp->flags |= BNXT_FLAG_RFS; 14695 dev->features |= NETIF_F_NTUPLE; 14696 } 14697 } 14698 } 14699 14700 static void bnxt_fw_init_one_p3(struct bnxt *bp) 14701 { 14702 struct pci_dev *pdev = bp->pdev; 14703 14704 bnxt_set_dflt_rss_hash_type(bp); 14705 bnxt_set_dflt_rfs(bp); 14706 14707 bnxt_get_wol_settings(bp); 14708 if (bp->flags & BNXT_FLAG_WOL_CAP) 14709 device_set_wakeup_enable(&pdev->dev, bp->wol); 14710 else 14711 device_set_wakeup_capable(&pdev->dev, false); 14712 14713 bnxt_hwrm_set_cache_line_size(bp, cache_line_size()); 14714 bnxt_hwrm_coal_params_qcaps(bp); 14715 } 14716 14717 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); 14718 14719 int bnxt_fw_init_one(struct bnxt *bp) 14720 { 14721 int rc; 14722 14723 rc = bnxt_fw_init_one_p1(bp); 14724 if (rc) { 14725 netdev_err(bp->dev, "Firmware init phase 1 failed\n"); 14726 return rc; 14727 } 14728 rc = bnxt_fw_init_one_p2(bp); 14729 if (rc) { 14730 netdev_err(bp->dev, "Firmware init phase 2 failed\n"); 14731 return rc; 14732 } 14733 rc = bnxt_probe_phy(bp, false); 14734 if (rc) 14735 return rc; 14736 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); 14737 if (rc) 14738 return rc; 14739 14740 bnxt_fw_init_one_p3(bp); 14741 return 0; 14742 } 14743 14744 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) 14745 { 14746 struct bnxt_fw_health *fw_health = bp->fw_health; 14747 u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; 14748 u32 val = fw_health->fw_reset_seq_vals[reg_idx]; 14749 u32 reg_type, reg_off, delay_msecs; 14750 14751 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; 14752 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); 14753 reg_off = BNXT_FW_HEALTH_REG_OFF(reg); 14754 switch (reg_type) { 14755 case BNXT_FW_HEALTH_REG_TYPE_CFG: 14756 pci_write_config_dword(bp->pdev, reg_off, val); 14757 break; 14758 case BNXT_FW_HEALTH_REG_TYPE_GRC: 14759 writel(reg_off & BNXT_GRC_BASE_MASK, 14760 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); 14761 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; 14762 fallthrough; 14763 case BNXT_FW_HEALTH_REG_TYPE_BAR0: 14764 writel(val, bp->bar0 + reg_off); 14765 break; 14766 case BNXT_FW_HEALTH_REG_TYPE_BAR1: 14767 writel(val, bp->bar1 + reg_off); 14768 break; 14769 } 14770 if (delay_msecs) { 14771 pci_read_config_dword(bp->pdev, 0, &val); 14772 msleep(delay_msecs); 14773 } 14774 } 14775 14776 bool bnxt_hwrm_reset_permitted(struct bnxt *bp) 14777 { 14778 struct hwrm_func_qcfg_output *resp; 14779 struct hwrm_func_qcfg_input *req; 14780 bool result = true; /* firmware will enforce if unknown */ 14781 14782 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 14783 return result; 14784 14785 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) 14786 return result; 14787 14788 req->fid = cpu_to_le16(0xffff); 14789 resp = hwrm_req_hold(bp, req); 14790 if (!hwrm_req_send(bp, req)) 14791 result = !!(le16_to_cpu(resp->flags) & 14792 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); 14793 hwrm_req_drop(bp, req); 14794 return result; 14795 } 14796 14797 static void bnxt_reset_all(struct bnxt *bp) 14798 { 14799 struct bnxt_fw_health *fw_health = bp->fw_health; 14800 int i, rc; 14801 14802 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14803 bnxt_fw_reset_via_optee(bp); 14804 bp->fw_reset_timestamp = jiffies; 14805 return; 14806 } 14807 14808 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { 14809 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) 14810 bnxt_fw_reset_writel(bp, i); 14811 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { 14812 struct hwrm_fw_reset_input *req; 14813 14814 rc = hwrm_req_init(bp, req, HWRM_FW_RESET); 14815 if (!rc) { 14816 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); 14817 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; 14818 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; 14819 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; 14820 rc = hwrm_req_send(bp, req); 14821 } 14822 if (rc != -ENODEV) 14823 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); 14824 } 14825 bp->fw_reset_timestamp = jiffies; 14826 } 14827 14828 static bool bnxt_fw_reset_timeout(struct bnxt *bp) 14829 { 14830 return time_after(jiffies, bp->fw_reset_timestamp + 14831 (bp->fw_reset_max_dsecs * HZ / 10)); 14832 } 14833 14834 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) 14835 { 14836 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14837 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) 14838 bnxt_dl_health_fw_status_update(bp, false); 14839 bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT; 14840 netif_close(bp->dev); 14841 } 14842 14843 static void bnxt_fw_reset_task(struct work_struct *work) 14844 { 14845 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); 14846 int rc = 0; 14847 14848 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 14849 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); 14850 return; 14851 } 14852 14853 switch (bp->fw_reset_state) { 14854 case BNXT_FW_RESET_STATE_POLL_VF: { 14855 int n = bnxt_get_registered_vfs(bp); 14856 int tmo; 14857 14858 if (n < 0) { 14859 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", 14860 n, jiffies_to_msecs(jiffies - 14861 bp->fw_reset_timestamp)); 14862 goto fw_reset_abort; 14863 } else if (n > 0) { 14864 if (bnxt_fw_reset_timeout(bp)) { 14865 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14866 bp->fw_reset_state = 0; 14867 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", 14868 n); 14869 goto ulp_start; 14870 } 14871 bnxt_queue_fw_reset_work(bp, HZ / 10); 14872 return; 14873 } 14874 bp->fw_reset_timestamp = jiffies; 14875 netdev_lock(bp->dev); 14876 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 14877 bnxt_fw_reset_abort(bp, rc); 14878 netdev_unlock(bp->dev); 14879 goto ulp_start; 14880 } 14881 bnxt_fw_reset_close(bp); 14882 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { 14883 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; 14884 tmo = HZ / 10; 14885 } else { 14886 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14887 tmo = bp->fw_reset_min_dsecs * HZ / 10; 14888 } 14889 netdev_unlock(bp->dev); 14890 bnxt_queue_fw_reset_work(bp, tmo); 14891 return; 14892 } 14893 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { 14894 u32 val; 14895 14896 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 14897 if (!(val & BNXT_FW_STATUS_SHUTDOWN) && 14898 !bnxt_fw_reset_timeout(bp)) { 14899 bnxt_queue_fw_reset_work(bp, HZ / 5); 14900 return; 14901 } 14902 14903 if (!bp->fw_health->primary) { 14904 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; 14905 14906 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14907 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); 14908 return; 14909 } 14910 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; 14911 } 14912 fallthrough; 14913 case BNXT_FW_RESET_STATE_RESET_FW: 14914 bnxt_reset_all(bp); 14915 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; 14916 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); 14917 return; 14918 case BNXT_FW_RESET_STATE_ENABLE_DEV: 14919 bnxt_inv_fw_health_reg(bp); 14920 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && 14921 !bp->fw_reset_min_dsecs) { 14922 u16 val; 14923 14924 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); 14925 if (val == 0xffff) { 14926 if (bnxt_fw_reset_timeout(bp)) { 14927 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); 14928 rc = -ETIMEDOUT; 14929 goto fw_reset_abort; 14930 } 14931 bnxt_queue_fw_reset_work(bp, HZ / 1000); 14932 return; 14933 } 14934 } 14935 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); 14936 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); 14937 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && 14938 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) 14939 bnxt_dl_remote_reload(bp); 14940 if (pci_enable_device(bp->pdev)) { 14941 netdev_err(bp->dev, "Cannot re-enable PCI device\n"); 14942 rc = -ENODEV; 14943 goto fw_reset_abort; 14944 } 14945 pci_set_master(bp->pdev); 14946 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; 14947 fallthrough; 14948 case BNXT_FW_RESET_STATE_POLL_FW: 14949 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; 14950 rc = bnxt_hwrm_poll(bp); 14951 if (rc) { 14952 if (bnxt_fw_reset_timeout(bp)) { 14953 netdev_err(bp->dev, "Firmware reset aborted\n"); 14954 goto fw_reset_abort_status; 14955 } 14956 bnxt_queue_fw_reset_work(bp, HZ / 5); 14957 return; 14958 } 14959 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; 14960 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; 14961 fallthrough; 14962 case BNXT_FW_RESET_STATE_OPENING: 14963 while (!netdev_trylock(bp->dev)) { 14964 bnxt_queue_fw_reset_work(bp, HZ / 10); 14965 return; 14966 } 14967 rc = bnxt_open(bp->dev); 14968 if (rc) { 14969 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); 14970 bnxt_fw_reset_abort(bp, rc); 14971 netdev_unlock(bp->dev); 14972 goto ulp_start; 14973 } 14974 14975 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && 14976 bp->fw_health->enabled) { 14977 bp->fw_health->last_fw_reset_cnt = 14978 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 14979 } 14980 bp->fw_reset_state = 0; 14981 /* Make sure fw_reset_state is 0 before clearing the flag */ 14982 smp_mb__before_atomic(); 14983 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 14984 bnxt_ptp_reapply_pps(bp); 14985 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 14986 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { 14987 bnxt_dl_health_fw_recovery_done(bp); 14988 bnxt_dl_health_fw_status_update(bp, true); 14989 } 14990 netdev_unlock(bp->dev); 14991 bnxt_ulp_start(bp, 0); 14992 bnxt_reenable_sriov(bp); 14993 netdev_lock(bp->dev); 14994 bnxt_vf_reps_alloc(bp); 14995 bnxt_vf_reps_open(bp); 14996 netdev_unlock(bp->dev); 14997 break; 14998 } 14999 return; 15000 15001 fw_reset_abort_status: 15002 if (bp->fw_health->status_reliable || 15003 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { 15004 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 15005 15006 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); 15007 } 15008 fw_reset_abort: 15009 netdev_lock(bp->dev); 15010 bnxt_fw_reset_abort(bp, rc); 15011 netdev_unlock(bp->dev); 15012 ulp_start: 15013 bnxt_ulp_start(bp, rc); 15014 } 15015 15016 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) 15017 { 15018 int rc; 15019 struct bnxt *bp = netdev_priv(dev); 15020 15021 SET_NETDEV_DEV(dev, &pdev->dev); 15022 15023 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 15024 rc = pci_enable_device(pdev); 15025 if (rc) { 15026 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 15027 goto init_err; 15028 } 15029 15030 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 15031 dev_err(&pdev->dev, 15032 "Cannot find PCI device base address, aborting\n"); 15033 rc = -ENODEV; 15034 goto init_err_disable; 15035 } 15036 15037 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 15038 if (rc) { 15039 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); 15040 goto init_err_disable; 15041 } 15042 15043 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && 15044 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 15045 dev_err(&pdev->dev, "System does not support DMA, aborting\n"); 15046 rc = -EIO; 15047 goto init_err_release; 15048 } 15049 15050 pci_set_master(pdev); 15051 15052 bp->dev = dev; 15053 bp->pdev = pdev; 15054 15055 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() 15056 * determines the BAR size. 15057 */ 15058 bp->bar0 = pci_ioremap_bar(pdev, 0); 15059 if (!bp->bar0) { 15060 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); 15061 rc = -ENOMEM; 15062 goto init_err_release; 15063 } 15064 15065 bp->bar2 = pci_ioremap_bar(pdev, 4); 15066 if (!bp->bar2) { 15067 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); 15068 rc = -ENOMEM; 15069 goto init_err_release; 15070 } 15071 15072 INIT_WORK(&bp->sp_task, bnxt_sp_task); 15073 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); 15074 15075 spin_lock_init(&bp->ntp_fltr_lock); 15076 #if BITS_PER_LONG == 32 15077 spin_lock_init(&bp->db_lock); 15078 #endif 15079 15080 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; 15081 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; 15082 15083 timer_setup(&bp->timer, bnxt_timer, 0); 15084 bp->current_interval = BNXT_TIMER_INTERVAL; 15085 15086 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; 15087 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; 15088 15089 clear_bit(BNXT_STATE_OPEN, &bp->state); 15090 return 0; 15091 15092 init_err_release: 15093 bnxt_unmap_bars(bp, pdev); 15094 pci_release_regions(pdev); 15095 15096 init_err_disable: 15097 pci_disable_device(pdev); 15098 15099 init_err: 15100 return rc; 15101 } 15102 15103 static int bnxt_change_mac_addr(struct net_device *dev, void *p) 15104 { 15105 struct sockaddr *addr = p; 15106 struct bnxt *bp = netdev_priv(dev); 15107 int rc = 0; 15108 15109 netdev_assert_locked(dev); 15110 15111 if (!is_valid_ether_addr(addr->sa_data)) 15112 return -EADDRNOTAVAIL; 15113 15114 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 15115 return 0; 15116 15117 rc = bnxt_approve_mac(bp, addr->sa_data, true); 15118 if (rc) 15119 return rc; 15120 15121 eth_hw_addr_set(dev, addr->sa_data); 15122 bnxt_clear_usr_fltrs(bp, true); 15123 if (netif_running(dev)) { 15124 bnxt_close_nic(bp, false, false); 15125 rc = bnxt_open_nic(bp, false, false); 15126 } 15127 15128 return rc; 15129 } 15130 15131 static int bnxt_change_mtu(struct net_device *dev, int new_mtu) 15132 { 15133 struct bnxt *bp = netdev_priv(dev); 15134 15135 netdev_assert_locked(dev); 15136 15137 if (netif_running(dev)) 15138 bnxt_close_nic(bp, true, false); 15139 15140 WRITE_ONCE(dev->mtu, new_mtu); 15141 15142 /* MTU change may change the AGG ring settings if an XDP multi-buffer 15143 * program is attached. We need to set the AGG rings settings and 15144 * rx_skb_func accordingly. 15145 */ 15146 if (READ_ONCE(bp->xdp_prog)) 15147 bnxt_set_rx_skb_mode(bp, true); 15148 15149 bnxt_set_ring_params(bp); 15150 15151 if (netif_running(dev)) 15152 return bnxt_open_nic(bp, true, false); 15153 15154 return 0; 15155 } 15156 15157 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) 15158 { 15159 struct bnxt *bp = netdev_priv(dev); 15160 bool sh = false; 15161 int rc, tx_cp; 15162 15163 if (tc > bp->max_tc) { 15164 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", 15165 tc, bp->max_tc); 15166 return -EINVAL; 15167 } 15168 15169 if (bp->num_tc == tc) 15170 return 0; 15171 15172 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 15173 sh = true; 15174 15175 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 15176 sh, tc, bp->tx_nr_rings_xdp); 15177 if (rc) 15178 return rc; 15179 15180 /* Needs to close the device and do hw resource re-allocations */ 15181 if (netif_running(bp->dev)) 15182 bnxt_close_nic(bp, true, false); 15183 15184 if (tc) { 15185 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 15186 netdev_set_num_tc(dev, tc); 15187 bp->num_tc = tc; 15188 } else { 15189 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 15190 netdev_reset_tc(dev); 15191 bp->num_tc = 0; 15192 } 15193 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 15194 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); 15195 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : 15196 tx_cp + bp->rx_nr_rings; 15197 15198 if (netif_running(bp->dev)) 15199 return bnxt_open_nic(bp, true, false); 15200 15201 return 0; 15202 } 15203 15204 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 15205 void *cb_priv) 15206 { 15207 struct bnxt *bp = cb_priv; 15208 15209 if (!bnxt_tc_flower_enabled(bp) || 15210 !tc_cls_can_offload_and_chain0(bp->dev, type_data)) 15211 return -EOPNOTSUPP; 15212 15213 switch (type) { 15214 case TC_SETUP_CLSFLOWER: 15215 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); 15216 default: 15217 return -EOPNOTSUPP; 15218 } 15219 } 15220 15221 LIST_HEAD(bnxt_block_cb_list); 15222 15223 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, 15224 void *type_data) 15225 { 15226 struct bnxt *bp = netdev_priv(dev); 15227 15228 switch (type) { 15229 case TC_SETUP_BLOCK: 15230 return flow_block_cb_setup_simple(type_data, 15231 &bnxt_block_cb_list, 15232 bnxt_setup_tc_block_cb, 15233 bp, bp, true); 15234 case TC_SETUP_QDISC_MQPRIO: { 15235 struct tc_mqprio_qopt *mqprio = type_data; 15236 15237 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 15238 15239 return bnxt_setup_mq_tc(dev, mqprio->num_tc); 15240 } 15241 default: 15242 return -EOPNOTSUPP; 15243 } 15244 } 15245 15246 u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, 15247 const struct sk_buff *skb) 15248 { 15249 struct bnxt_vnic_info *vnic; 15250 15251 if (skb) 15252 return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; 15253 15254 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; 15255 return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); 15256 } 15257 15258 int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, 15259 u32 idx) 15260 { 15261 struct hlist_head *head; 15262 int bit_id; 15263 15264 spin_lock_bh(&bp->ntp_fltr_lock); 15265 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); 15266 if (bit_id < 0) { 15267 spin_unlock_bh(&bp->ntp_fltr_lock); 15268 return -ENOMEM; 15269 } 15270 15271 fltr->base.sw_id = (u16)bit_id; 15272 fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; 15273 fltr->base.flags |= BNXT_ACT_RING_DST; 15274 head = &bp->ntp_fltr_hash_tbl[idx]; 15275 hlist_add_head_rcu(&fltr->base.hash, head); 15276 set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); 15277 bnxt_insert_usr_fltr(bp, &fltr->base); 15278 bp->ntp_fltr_count++; 15279 spin_unlock_bh(&bp->ntp_fltr_lock); 15280 return 0; 15281 } 15282 15283 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, 15284 struct bnxt_ntuple_filter *f2) 15285 { 15286 struct bnxt_flow_masks *masks1 = &f1->fmasks; 15287 struct bnxt_flow_masks *masks2 = &f2->fmasks; 15288 struct flow_keys *keys1 = &f1->fkeys; 15289 struct flow_keys *keys2 = &f2->fkeys; 15290 15291 if (keys1->basic.n_proto != keys2->basic.n_proto || 15292 keys1->basic.ip_proto != keys2->basic.ip_proto) 15293 return false; 15294 15295 if (keys1->basic.n_proto == htons(ETH_P_IP)) { 15296 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || 15297 masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src || 15298 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst || 15299 masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst) 15300 return false; 15301 } else { 15302 if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src, 15303 &keys2->addrs.v6addrs.src) || 15304 !ipv6_addr_equal(&masks1->addrs.v6addrs.src, 15305 &masks2->addrs.v6addrs.src) || 15306 !ipv6_addr_equal(&keys1->addrs.v6addrs.dst, 15307 &keys2->addrs.v6addrs.dst) || 15308 !ipv6_addr_equal(&masks1->addrs.v6addrs.dst, 15309 &masks2->addrs.v6addrs.dst)) 15310 return false; 15311 } 15312 15313 return keys1->ports.src == keys2->ports.src && 15314 masks1->ports.src == masks2->ports.src && 15315 keys1->ports.dst == keys2->ports.dst && 15316 masks1->ports.dst == masks2->ports.dst && 15317 keys1->control.flags == keys2->control.flags && 15318 f1->l2_fltr == f2->l2_fltr; 15319 } 15320 15321 struct bnxt_ntuple_filter * 15322 bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, 15323 struct bnxt_ntuple_filter *fltr, u32 idx) 15324 { 15325 struct bnxt_ntuple_filter *f; 15326 struct hlist_head *head; 15327 15328 head = &bp->ntp_fltr_hash_tbl[idx]; 15329 hlist_for_each_entry_rcu(f, head, base.hash) { 15330 if (bnxt_fltr_match(f, fltr)) 15331 return f; 15332 } 15333 return NULL; 15334 } 15335 15336 #ifdef CONFIG_RFS_ACCEL 15337 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, 15338 u16 rxq_index, u32 flow_id) 15339 { 15340 struct bnxt *bp = netdev_priv(dev); 15341 struct bnxt_ntuple_filter *fltr, *new_fltr; 15342 struct flow_keys *fkeys; 15343 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); 15344 struct bnxt_l2_filter *l2_fltr; 15345 int rc = 0, idx; 15346 u32 flags; 15347 15348 if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { 15349 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; 15350 atomic_inc(&l2_fltr->refcnt); 15351 } else { 15352 struct bnxt_l2_key key; 15353 15354 ether_addr_copy(key.dst_mac_addr, eth->h_dest); 15355 key.vlan = 0; 15356 l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key); 15357 if (!l2_fltr) 15358 return -EINVAL; 15359 if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { 15360 bnxt_del_l2_filter(bp, l2_fltr); 15361 return -EINVAL; 15362 } 15363 } 15364 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); 15365 if (!new_fltr) { 15366 bnxt_del_l2_filter(bp, l2_fltr); 15367 return -ENOMEM; 15368 } 15369 15370 fkeys = &new_fltr->fkeys; 15371 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { 15372 rc = -EPROTONOSUPPORT; 15373 goto err_free; 15374 } 15375 15376 if ((fkeys->basic.n_proto != htons(ETH_P_IP) && 15377 fkeys->basic.n_proto != htons(ETH_P_IPV6)) || 15378 ((fkeys->basic.ip_proto != IPPROTO_TCP) && 15379 (fkeys->basic.ip_proto != IPPROTO_UDP))) { 15380 rc = -EPROTONOSUPPORT; 15381 goto err_free; 15382 } 15383 new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL; 15384 if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { 15385 if (bp->hwrm_spec_code < 0x10601) { 15386 rc = -EPROTONOSUPPORT; 15387 goto err_free; 15388 } 15389 new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL; 15390 } 15391 flags = fkeys->control.flags; 15392 if (((flags & FLOW_DIS_ENCAPSULATION) && 15393 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { 15394 rc = -EPROTONOSUPPORT; 15395 goto err_free; 15396 } 15397 new_fltr->l2_fltr = l2_fltr; 15398 15399 idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); 15400 rcu_read_lock(); 15401 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); 15402 if (fltr) { 15403 rc = fltr->base.sw_id; 15404 rcu_read_unlock(); 15405 goto err_free; 15406 } 15407 rcu_read_unlock(); 15408 15409 new_fltr->flow_id = flow_id; 15410 new_fltr->base.rxq = rxq_index; 15411 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); 15412 if (!rc) { 15413 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); 15414 return new_fltr->base.sw_id; 15415 } 15416 15417 err_free: 15418 bnxt_del_l2_filter(bp, l2_fltr); 15419 kfree(new_fltr); 15420 return rc; 15421 } 15422 #endif 15423 15424 void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) 15425 { 15426 spin_lock_bh(&bp->ntp_fltr_lock); 15427 if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { 15428 spin_unlock_bh(&bp->ntp_fltr_lock); 15429 return; 15430 } 15431 hlist_del_rcu(&fltr->base.hash); 15432 bnxt_del_one_usr_fltr(bp, &fltr->base); 15433 bp->ntp_fltr_count--; 15434 spin_unlock_bh(&bp->ntp_fltr_lock); 15435 bnxt_del_l2_filter(bp, fltr->l2_fltr); 15436 clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); 15437 kfree_rcu(fltr, base.rcu); 15438 } 15439 15440 static void bnxt_cfg_ntp_filters(struct bnxt *bp) 15441 { 15442 #ifdef CONFIG_RFS_ACCEL 15443 int i; 15444 15445 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { 15446 struct hlist_head *head; 15447 struct hlist_node *tmp; 15448 struct bnxt_ntuple_filter *fltr; 15449 int rc; 15450 15451 head = &bp->ntp_fltr_hash_tbl[i]; 15452 hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { 15453 bool del = false; 15454 15455 if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { 15456 if (fltr->base.flags & BNXT_ACT_NO_AGING) 15457 continue; 15458 if (rps_may_expire_flow(bp->dev, fltr->base.rxq, 15459 fltr->flow_id, 15460 fltr->base.sw_id)) { 15461 bnxt_hwrm_cfa_ntuple_filter_free(bp, 15462 fltr); 15463 del = true; 15464 } 15465 } else { 15466 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, 15467 fltr); 15468 if (rc) 15469 del = true; 15470 else 15471 set_bit(BNXT_FLTR_VALID, &fltr->base.state); 15472 } 15473 15474 if (del) 15475 bnxt_del_ntp_filter(bp, fltr); 15476 } 15477 } 15478 #endif 15479 } 15480 15481 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, 15482 unsigned int entry, struct udp_tunnel_info *ti) 15483 { 15484 struct bnxt *bp = netdev_priv(netdev); 15485 unsigned int cmd; 15486 15487 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 15488 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN; 15489 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 15490 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE; 15491 else 15492 cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE; 15493 15494 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); 15495 } 15496 15497 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, 15498 unsigned int entry, struct udp_tunnel_info *ti) 15499 { 15500 struct bnxt *bp = netdev_priv(netdev); 15501 unsigned int cmd; 15502 15503 if (ti->type == UDP_TUNNEL_TYPE_VXLAN) 15504 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; 15505 else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) 15506 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; 15507 else 15508 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE; 15509 15510 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); 15511 } 15512 15513 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { 15514 .set_port = bnxt_udp_tunnel_set_port, 15515 .unset_port = bnxt_udp_tunnel_unset_port, 15516 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 15517 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 15518 .tables = { 15519 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 15520 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 15521 }, 15522 }, bnxt_udp_tunnels_p7 = { 15523 .set_port = bnxt_udp_tunnel_set_port, 15524 .unset_port = bnxt_udp_tunnel_unset_port, 15525 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | 15526 UDP_TUNNEL_NIC_INFO_OPEN_ONLY, 15527 .tables = { 15528 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 15529 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, 15530 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, 15531 }, 15532 }; 15533 15534 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 15535 struct net_device *dev, u32 filter_mask, 15536 int nlflags) 15537 { 15538 struct bnxt *bp = netdev_priv(dev); 15539 15540 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, 15541 nlflags, filter_mask, NULL); 15542 } 15543 15544 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, 15545 u16 flags, struct netlink_ext_ack *extack) 15546 { 15547 struct bnxt *bp = netdev_priv(dev); 15548 struct nlattr *attr, *br_spec; 15549 int rem, rc = 0; 15550 15551 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) 15552 return -EOPNOTSUPP; 15553 15554 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 15555 if (!br_spec) 15556 return -EINVAL; 15557 15558 nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { 15559 u16 mode; 15560 15561 mode = nla_get_u16(attr); 15562 if (mode == bp->br_mode) 15563 break; 15564 15565 rc = bnxt_hwrm_set_br_mode(bp, mode); 15566 if (!rc) 15567 bp->br_mode = mode; 15568 break; 15569 } 15570 return rc; 15571 } 15572 15573 int bnxt_get_port_parent_id(struct net_device *dev, 15574 struct netdev_phys_item_id *ppid) 15575 { 15576 struct bnxt *bp = netdev_priv(dev); 15577 15578 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 15579 return -EOPNOTSUPP; 15580 15581 /* The PF and it's VF-reps only support the switchdev framework */ 15582 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) 15583 return -EOPNOTSUPP; 15584 15585 ppid->id_len = sizeof(bp->dsn); 15586 memcpy(ppid->id, bp->dsn, ppid->id_len); 15587 15588 return 0; 15589 } 15590 15591 static const struct net_device_ops bnxt_netdev_ops = { 15592 .ndo_open = bnxt_open, 15593 .ndo_start_xmit = bnxt_start_xmit, 15594 .ndo_stop = bnxt_close, 15595 .ndo_get_stats64 = bnxt_get_stats64, 15596 .ndo_set_rx_mode = bnxt_set_rx_mode, 15597 .ndo_eth_ioctl = bnxt_ioctl, 15598 .ndo_validate_addr = eth_validate_addr, 15599 .ndo_set_mac_address = bnxt_change_mac_addr, 15600 .ndo_change_mtu = bnxt_change_mtu, 15601 .ndo_fix_features = bnxt_fix_features, 15602 .ndo_set_features = bnxt_set_features, 15603 .ndo_features_check = bnxt_features_check, 15604 .ndo_tx_timeout = bnxt_tx_timeout, 15605 #ifdef CONFIG_BNXT_SRIOV 15606 .ndo_get_vf_config = bnxt_get_vf_config, 15607 .ndo_set_vf_mac = bnxt_set_vf_mac, 15608 .ndo_set_vf_vlan = bnxt_set_vf_vlan, 15609 .ndo_set_vf_rate = bnxt_set_vf_bw, 15610 .ndo_set_vf_link_state = bnxt_set_vf_link_state, 15611 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 15612 .ndo_set_vf_trust = bnxt_set_vf_trust, 15613 #endif 15614 .ndo_setup_tc = bnxt_setup_tc, 15615 #ifdef CONFIG_RFS_ACCEL 15616 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 15617 #endif 15618 .ndo_bpf = bnxt_xdp, 15619 .ndo_xdp_xmit = bnxt_xdp_xmit, 15620 .ndo_bridge_getlink = bnxt_bridge_getlink, 15621 .ndo_bridge_setlink = bnxt_bridge_setlink, 15622 }; 15623 15624 static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, 15625 struct netdev_queue_stats_rx *stats) 15626 { 15627 struct bnxt *bp = netdev_priv(dev); 15628 struct bnxt_cp_ring_info *cpr; 15629 u64 *sw; 15630 15631 if (!bp->bnapi) 15632 return; 15633 15634 cpr = &bp->bnapi[i]->cp_ring; 15635 sw = cpr->stats.sw_stats; 15636 15637 stats->packets = 0; 15638 stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); 15639 stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); 15640 stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); 15641 15642 stats->bytes = 0; 15643 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); 15644 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); 15645 stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); 15646 15647 stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards; 15648 } 15649 15650 static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, 15651 struct netdev_queue_stats_tx *stats) 15652 { 15653 struct bnxt *bp = netdev_priv(dev); 15654 struct bnxt_napi *bnapi; 15655 u64 *sw; 15656 15657 if (!bp->tx_ring) 15658 return; 15659 15660 bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; 15661 sw = bnapi->cp_ring.stats.sw_stats; 15662 15663 stats->packets = 0; 15664 stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); 15665 stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); 15666 stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); 15667 15668 stats->bytes = 0; 15669 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); 15670 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); 15671 stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); 15672 } 15673 15674 static void bnxt_get_base_stats(struct net_device *dev, 15675 struct netdev_queue_stats_rx *rx, 15676 struct netdev_queue_stats_tx *tx) 15677 { 15678 struct bnxt *bp = netdev_priv(dev); 15679 15680 rx->packets = bp->net_stats_prev.rx_packets; 15681 rx->bytes = bp->net_stats_prev.rx_bytes; 15682 rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards; 15683 15684 tx->packets = bp->net_stats_prev.tx_packets; 15685 tx->bytes = bp->net_stats_prev.tx_bytes; 15686 } 15687 15688 static const struct netdev_stat_ops bnxt_stat_ops = { 15689 .get_queue_stats_rx = bnxt_get_queue_stats_rx, 15690 .get_queue_stats_tx = bnxt_get_queue_stats_tx, 15691 .get_base_stats = bnxt_get_base_stats, 15692 }; 15693 15694 static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) 15695 { 15696 struct bnxt_rx_ring_info *rxr, *clone; 15697 struct bnxt *bp = netdev_priv(dev); 15698 struct bnxt_ring_struct *ring; 15699 int rc; 15700 15701 if (!bp->rx_ring) 15702 return -ENETDOWN; 15703 15704 rxr = &bp->rx_ring[idx]; 15705 clone = qmem; 15706 memcpy(clone, rxr, sizeof(*rxr)); 15707 bnxt_init_rx_ring_struct(bp, clone); 15708 bnxt_reset_rx_ring_struct(bp, clone); 15709 15710 clone->rx_prod = 0; 15711 clone->rx_agg_prod = 0; 15712 clone->rx_sw_agg_prod = 0; 15713 clone->rx_next_cons = 0; 15714 15715 rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid); 15716 if (rc) 15717 return rc; 15718 15719 rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0); 15720 if (rc < 0) 15721 goto err_page_pool_destroy; 15722 15723 rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq, 15724 MEM_TYPE_PAGE_POOL, 15725 clone->page_pool); 15726 if (rc) 15727 goto err_rxq_info_unreg; 15728 15729 ring = &clone->rx_ring_struct; 15730 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 15731 if (rc) 15732 goto err_free_rx_ring; 15733 15734 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 15735 ring = &clone->rx_agg_ring_struct; 15736 rc = bnxt_alloc_ring(bp, &ring->ring_mem); 15737 if (rc) 15738 goto err_free_rx_agg_ring; 15739 15740 rc = bnxt_alloc_rx_agg_bmap(bp, clone); 15741 if (rc) 15742 goto err_free_rx_agg_ring; 15743 } 15744 15745 if (bp->flags & BNXT_FLAG_TPA) { 15746 rc = bnxt_alloc_one_tpa_info(bp, clone); 15747 if (rc) 15748 goto err_free_tpa_info; 15749 } 15750 15751 bnxt_init_one_rx_ring_rxbd(bp, clone); 15752 bnxt_init_one_rx_agg_ring_rxbd(bp, clone); 15753 15754 bnxt_alloc_one_rx_ring_skb(bp, clone, idx); 15755 if (bp->flags & BNXT_FLAG_AGG_RINGS) 15756 bnxt_alloc_one_rx_ring_page(bp, clone, idx); 15757 if (bp->flags & BNXT_FLAG_TPA) 15758 bnxt_alloc_one_tpa_info_data(bp, clone); 15759 15760 return 0; 15761 15762 err_free_tpa_info: 15763 bnxt_free_one_tpa_info(bp, clone); 15764 err_free_rx_agg_ring: 15765 bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); 15766 err_free_rx_ring: 15767 bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem); 15768 err_rxq_info_unreg: 15769 xdp_rxq_info_unreg(&clone->xdp_rxq); 15770 err_page_pool_destroy: 15771 page_pool_destroy(clone->page_pool); 15772 if (bnxt_separate_head_pool()) 15773 page_pool_destroy(clone->head_pool); 15774 clone->page_pool = NULL; 15775 clone->head_pool = NULL; 15776 return rc; 15777 } 15778 15779 static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) 15780 { 15781 struct bnxt_rx_ring_info *rxr = qmem; 15782 struct bnxt *bp = netdev_priv(dev); 15783 struct bnxt_ring_struct *ring; 15784 15785 bnxt_free_one_rx_ring_skbs(bp, rxr); 15786 bnxt_free_one_tpa_info(bp, rxr); 15787 15788 xdp_rxq_info_unreg(&rxr->xdp_rxq); 15789 15790 page_pool_destroy(rxr->page_pool); 15791 if (bnxt_separate_head_pool()) 15792 page_pool_destroy(rxr->head_pool); 15793 rxr->page_pool = NULL; 15794 rxr->head_pool = NULL; 15795 15796 ring = &rxr->rx_ring_struct; 15797 bnxt_free_ring(bp, &ring->ring_mem); 15798 15799 ring = &rxr->rx_agg_ring_struct; 15800 bnxt_free_ring(bp, &ring->ring_mem); 15801 15802 kfree(rxr->rx_agg_bmap); 15803 rxr->rx_agg_bmap = NULL; 15804 } 15805 15806 static void bnxt_copy_rx_ring(struct bnxt *bp, 15807 struct bnxt_rx_ring_info *dst, 15808 struct bnxt_rx_ring_info *src) 15809 { 15810 struct bnxt_ring_mem_info *dst_rmem, *src_rmem; 15811 struct bnxt_ring_struct *dst_ring, *src_ring; 15812 int i; 15813 15814 dst_ring = &dst->rx_ring_struct; 15815 dst_rmem = &dst_ring->ring_mem; 15816 src_ring = &src->rx_ring_struct; 15817 src_rmem = &src_ring->ring_mem; 15818 15819 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); 15820 WARN_ON(dst_rmem->page_size != src_rmem->page_size); 15821 WARN_ON(dst_rmem->flags != src_rmem->flags); 15822 WARN_ON(dst_rmem->depth != src_rmem->depth); 15823 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); 15824 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); 15825 15826 dst_rmem->pg_tbl = src_rmem->pg_tbl; 15827 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; 15828 *dst_rmem->vmem = *src_rmem->vmem; 15829 for (i = 0; i < dst_rmem->nr_pages; i++) { 15830 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; 15831 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; 15832 } 15833 15834 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) 15835 return; 15836 15837 dst_ring = &dst->rx_agg_ring_struct; 15838 dst_rmem = &dst_ring->ring_mem; 15839 src_ring = &src->rx_agg_ring_struct; 15840 src_rmem = &src_ring->ring_mem; 15841 15842 WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); 15843 WARN_ON(dst_rmem->page_size != src_rmem->page_size); 15844 WARN_ON(dst_rmem->flags != src_rmem->flags); 15845 WARN_ON(dst_rmem->depth != src_rmem->depth); 15846 WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); 15847 WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); 15848 WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size); 15849 15850 dst_rmem->pg_tbl = src_rmem->pg_tbl; 15851 dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; 15852 *dst_rmem->vmem = *src_rmem->vmem; 15853 for (i = 0; i < dst_rmem->nr_pages; i++) { 15854 dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; 15855 dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; 15856 } 15857 15858 dst->rx_agg_bmap = src->rx_agg_bmap; 15859 } 15860 15861 static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) 15862 { 15863 struct bnxt *bp = netdev_priv(dev); 15864 struct bnxt_rx_ring_info *rxr, *clone; 15865 struct bnxt_cp_ring_info *cpr; 15866 struct bnxt_vnic_info *vnic; 15867 struct bnxt_napi *bnapi; 15868 int i, rc; 15869 15870 rxr = &bp->rx_ring[idx]; 15871 clone = qmem; 15872 15873 rxr->rx_prod = clone->rx_prod; 15874 rxr->rx_agg_prod = clone->rx_agg_prod; 15875 rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod; 15876 rxr->rx_next_cons = clone->rx_next_cons; 15877 rxr->rx_tpa = clone->rx_tpa; 15878 rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map; 15879 rxr->page_pool = clone->page_pool; 15880 rxr->head_pool = clone->head_pool; 15881 rxr->xdp_rxq = clone->xdp_rxq; 15882 15883 bnxt_copy_rx_ring(bp, rxr, clone); 15884 15885 bnapi = rxr->bnapi; 15886 cpr = &bnapi->cp_ring; 15887 15888 /* All rings have been reserved and previously allocated. 15889 * Reallocating with the same parameters should never fail. 15890 */ 15891 rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); 15892 if (rc) 15893 goto err_reset; 15894 15895 if (bp->tph_mode) { 15896 rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); 15897 if (rc) 15898 goto err_reset; 15899 } 15900 15901 rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); 15902 if (rc) 15903 goto err_reset; 15904 15905 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 15906 if (bp->flags & BNXT_FLAG_AGG_RINGS) 15907 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 15908 15909 if (bp->flags & BNXT_FLAG_SHARED_RINGS) { 15910 rc = bnxt_tx_queue_start(bp, idx); 15911 if (rc) 15912 goto err_reset; 15913 } 15914 15915 napi_enable_locked(&bnapi->napi); 15916 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 15917 15918 for (i = 0; i < bp->nr_vnics; i++) { 15919 vnic = &bp->vnic_info[i]; 15920 15921 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); 15922 if (rc) { 15923 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", 15924 vnic->vnic_id, rc); 15925 return rc; 15926 } 15927 vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; 15928 bnxt_hwrm_vnic_update(bp, vnic, 15929 VNIC_UPDATE_REQ_ENABLES_MRU_VALID); 15930 } 15931 15932 return 0; 15933 15934 err_reset: 15935 netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n", 15936 rc); 15937 napi_enable_locked(&bnapi->napi); 15938 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); 15939 bnxt_reset_task(bp, true); 15940 return rc; 15941 } 15942 15943 static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) 15944 { 15945 struct bnxt *bp = netdev_priv(dev); 15946 struct bnxt_rx_ring_info *rxr; 15947 struct bnxt_cp_ring_info *cpr; 15948 struct bnxt_vnic_info *vnic; 15949 struct bnxt_napi *bnapi; 15950 int i; 15951 15952 for (i = 0; i < bp->nr_vnics; i++) { 15953 vnic = &bp->vnic_info[i]; 15954 vnic->mru = 0; 15955 bnxt_hwrm_vnic_update(bp, vnic, 15956 VNIC_UPDATE_REQ_ENABLES_MRU_VALID); 15957 } 15958 /* Make sure NAPI sees that the VNIC is disabled */ 15959 synchronize_net(); 15960 rxr = &bp->rx_ring[idx]; 15961 bnapi = rxr->bnapi; 15962 cpr = &bnapi->cp_ring; 15963 cancel_work_sync(&cpr->dim.work); 15964 bnxt_hwrm_rx_ring_free(bp, rxr, false); 15965 bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); 15966 page_pool_disable_direct_recycling(rxr->page_pool); 15967 if (bnxt_separate_head_pool()) 15968 page_pool_disable_direct_recycling(rxr->head_pool); 15969 15970 if (bp->flags & BNXT_FLAG_SHARED_RINGS) 15971 bnxt_tx_queue_stop(bp, idx); 15972 15973 /* Disable NAPI now after freeing the rings because HWRM_RING_FREE 15974 * completion is handled in NAPI to guarantee no more DMA on that ring 15975 * after seeing the completion. 15976 */ 15977 napi_disable_locked(&bnapi->napi); 15978 15979 if (bp->tph_mode) { 15980 bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr); 15981 bnxt_clear_one_cp_ring(bp, rxr->rx_cpr); 15982 } 15983 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 15984 15985 memcpy(qmem, rxr, sizeof(*rxr)); 15986 bnxt_init_rx_ring_struct(bp, qmem); 15987 15988 return 0; 15989 } 15990 15991 static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = { 15992 .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info), 15993 .ndo_queue_mem_alloc = bnxt_queue_mem_alloc, 15994 .ndo_queue_mem_free = bnxt_queue_mem_free, 15995 .ndo_queue_start = bnxt_queue_start, 15996 .ndo_queue_stop = bnxt_queue_stop, 15997 }; 15998 15999 static void bnxt_remove_one(struct pci_dev *pdev) 16000 { 16001 struct net_device *dev = pci_get_drvdata(pdev); 16002 struct bnxt *bp = netdev_priv(dev); 16003 16004 if (BNXT_PF(bp)) 16005 bnxt_sriov_disable(bp); 16006 16007 bnxt_rdma_aux_device_del(bp); 16008 16009 unregister_netdev(dev); 16010 bnxt_ptp_clear(bp); 16011 16012 bnxt_rdma_aux_device_uninit(bp); 16013 16014 bnxt_free_l2_filters(bp, true); 16015 bnxt_free_ntp_fltrs(bp, true); 16016 WARN_ON(bp->num_rss_ctx); 16017 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 16018 /* Flush any pending tasks */ 16019 cancel_work_sync(&bp->sp_task); 16020 cancel_delayed_work_sync(&bp->fw_reset_task); 16021 bp->sp_event = 0; 16022 16023 bnxt_dl_fw_reporters_destroy(bp); 16024 bnxt_dl_unregister(bp); 16025 bnxt_shutdown_tc(bp); 16026 16027 bnxt_clear_int_mode(bp); 16028 bnxt_hwrm_func_drv_unrgtr(bp); 16029 bnxt_free_hwrm_resources(bp); 16030 bnxt_hwmon_uninit(bp); 16031 bnxt_ethtool_free(bp); 16032 bnxt_dcb_free(bp); 16033 kfree(bp->ptp_cfg); 16034 bp->ptp_cfg = NULL; 16035 kfree(bp->fw_health); 16036 bp->fw_health = NULL; 16037 bnxt_cleanup_pci(bp); 16038 bnxt_free_ctx_mem(bp, true); 16039 bnxt_free_crash_dump_mem(bp); 16040 kfree(bp->rss_indir_tbl); 16041 bp->rss_indir_tbl = NULL; 16042 bnxt_free_port_stats(bp); 16043 free_netdev(dev); 16044 } 16045 16046 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) 16047 { 16048 int rc = 0; 16049 struct bnxt_link_info *link_info = &bp->link_info; 16050 16051 bp->phy_flags = 0; 16052 rc = bnxt_hwrm_phy_qcaps(bp); 16053 if (rc) { 16054 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", 16055 rc); 16056 return rc; 16057 } 16058 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) 16059 bp->dev->priv_flags |= IFF_SUPP_NOFCS; 16060 else 16061 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; 16062 16063 bp->mac_flags = 0; 16064 bnxt_hwrm_mac_qcaps(bp); 16065 16066 if (!fw_dflt) 16067 return 0; 16068 16069 mutex_lock(&bp->link_lock); 16070 rc = bnxt_update_link(bp, false); 16071 if (rc) { 16072 mutex_unlock(&bp->link_lock); 16073 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", 16074 rc); 16075 return rc; 16076 } 16077 16078 /* Older firmware does not have supported_auto_speeds, so assume 16079 * that all supported speeds can be autonegotiated. 16080 */ 16081 if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 16082 link_info->support_auto_speeds = link_info->support_speeds; 16083 16084 bnxt_init_ethtool_link_settings(bp); 16085 mutex_unlock(&bp->link_lock); 16086 return 0; 16087 } 16088 16089 static int bnxt_get_max_irq(struct pci_dev *pdev) 16090 { 16091 u16 ctrl; 16092 16093 if (!pdev->msix_cap) 16094 return 1; 16095 16096 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 16097 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; 16098 } 16099 16100 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, 16101 int *max_cp) 16102 { 16103 struct bnxt_hw_resc *hw_resc = &bp->hw_resc; 16104 int max_ring_grps = 0, max_irq; 16105 16106 *max_tx = hw_resc->max_tx_rings; 16107 *max_rx = hw_resc->max_rx_rings; 16108 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); 16109 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - 16110 bnxt_get_ulp_msix_num_in_use(bp), 16111 hw_resc->max_stat_ctxs - 16112 bnxt_get_ulp_stat_ctxs_in_use(bp)); 16113 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) 16114 *max_cp = min_t(int, *max_cp, max_irq); 16115 max_ring_grps = hw_resc->max_hw_ring_grps; 16116 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { 16117 *max_cp -= 1; 16118 *max_rx -= 2; 16119 } 16120 if (bp->flags & BNXT_FLAG_AGG_RINGS) 16121 *max_rx >>= 1; 16122 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 16123 int rc; 16124 16125 rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); 16126 if (rc) { 16127 *max_rx = 0; 16128 *max_tx = 0; 16129 } 16130 /* On P5 chips, max_cp output param should be available NQs */ 16131 *max_cp = max_irq; 16132 } 16133 *max_rx = min_t(int, *max_rx, max_ring_grps); 16134 } 16135 16136 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) 16137 { 16138 int rx, tx, cp; 16139 16140 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 16141 *max_rx = rx; 16142 *max_tx = tx; 16143 if (!rx || !tx || !cp) 16144 return -ENOMEM; 16145 16146 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 16147 } 16148 16149 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, 16150 bool shared) 16151 { 16152 int rc; 16153 16154 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 16155 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { 16156 /* Not enough rings, try disabling agg rings. */ 16157 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 16158 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 16159 if (rc) { 16160 /* set BNXT_FLAG_AGG_RINGS back for consistency */ 16161 bp->flags |= BNXT_FLAG_AGG_RINGS; 16162 return rc; 16163 } 16164 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 16165 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 16166 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 16167 bnxt_set_ring_params(bp); 16168 } 16169 16170 if (bp->flags & BNXT_FLAG_ROCE_CAP) { 16171 int max_cp, max_stat, max_irq; 16172 16173 /* Reserve minimum resources for RoCE */ 16174 max_cp = bnxt_get_max_func_cp_rings(bp); 16175 max_stat = bnxt_get_max_func_stat_ctxs(bp); 16176 max_irq = bnxt_get_max_func_irqs(bp); 16177 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || 16178 max_irq <= BNXT_MIN_ROCE_CP_RINGS || 16179 max_stat <= BNXT_MIN_ROCE_STAT_CTXS) 16180 return 0; 16181 16182 max_cp -= BNXT_MIN_ROCE_CP_RINGS; 16183 max_irq -= BNXT_MIN_ROCE_CP_RINGS; 16184 max_stat -= BNXT_MIN_ROCE_STAT_CTXS; 16185 max_cp = min_t(int, max_cp, max_irq); 16186 max_cp = min_t(int, max_cp, max_stat); 16187 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); 16188 if (rc) 16189 rc = 0; 16190 } 16191 return rc; 16192 } 16193 16194 /* In initial default shared ring setting, each shared ring must have a 16195 * RX/TX ring pair. 16196 */ 16197 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) 16198 { 16199 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); 16200 bp->rx_nr_rings = bp->cp_nr_rings; 16201 bp->tx_nr_rings_per_tc = bp->cp_nr_rings; 16202 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 16203 } 16204 16205 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) 16206 { 16207 int dflt_rings, max_rx_rings, max_tx_rings, rc; 16208 int avail_msix; 16209 16210 if (!bnxt_can_reserve_rings(bp)) 16211 return 0; 16212 16213 if (sh) 16214 bp->flags |= BNXT_FLAG_SHARED_RINGS; 16215 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues(); 16216 /* Reduce default rings on multi-port cards so that total default 16217 * rings do not exceed CPU count. 16218 */ 16219 if (bp->port_count > 1) { 16220 int max_rings = 16221 max_t(int, num_online_cpus() / bp->port_count, 1); 16222 16223 dflt_rings = min_t(int, dflt_rings, max_rings); 16224 } 16225 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); 16226 if (rc) 16227 return rc; 16228 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); 16229 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); 16230 if (sh) 16231 bnxt_trim_dflt_sh_rings(bp); 16232 else 16233 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; 16234 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 16235 16236 avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; 16237 if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { 16238 int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want); 16239 16240 bnxt_set_ulp_msix_num(bp, ulp_num_msix); 16241 bnxt_set_dflt_ulp_stat_ctxs(bp); 16242 } 16243 16244 rc = __bnxt_reserve_rings(bp); 16245 if (rc && rc != -ENODEV) 16246 netdev_warn(bp->dev, "Unable to reserve tx rings\n"); 16247 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16248 if (sh) 16249 bnxt_trim_dflt_sh_rings(bp); 16250 16251 /* Rings may have been trimmed, re-reserve the trimmed rings. */ 16252 if (bnxt_need_reserve_rings(bp)) { 16253 rc = __bnxt_reserve_rings(bp); 16254 if (rc && rc != -ENODEV) 16255 netdev_warn(bp->dev, "2nd rings reservation failed.\n"); 16256 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16257 } 16258 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { 16259 bp->rx_nr_rings++; 16260 bp->cp_nr_rings++; 16261 } 16262 if (rc) { 16263 bp->tx_nr_rings = 0; 16264 bp->rx_nr_rings = 0; 16265 } 16266 return rc; 16267 } 16268 16269 static int bnxt_init_dflt_ring_mode(struct bnxt *bp) 16270 { 16271 int rc; 16272 16273 if (bp->tx_nr_rings) 16274 return 0; 16275 16276 bnxt_ulp_irq_stop(bp); 16277 bnxt_clear_int_mode(bp); 16278 rc = bnxt_set_dflt_rings(bp, true); 16279 if (rc) { 16280 if (BNXT_VF(bp) && rc == -ENODEV) 16281 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 16282 else 16283 netdev_err(bp->dev, "Not enough rings available.\n"); 16284 goto init_dflt_ring_err; 16285 } 16286 rc = bnxt_init_int_mode(bp); 16287 if (rc) 16288 goto init_dflt_ring_err; 16289 16290 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16291 16292 bnxt_set_dflt_rfs(bp); 16293 16294 init_dflt_ring_err: 16295 bnxt_ulp_irq_restart(bp, rc); 16296 return rc; 16297 } 16298 16299 int bnxt_restore_pf_fw_resources(struct bnxt *bp) 16300 { 16301 int rc; 16302 16303 netdev_ops_assert_locked(bp->dev); 16304 bnxt_hwrm_func_qcaps(bp); 16305 16306 if (netif_running(bp->dev)) 16307 __bnxt_close_nic(bp, true, false); 16308 16309 bnxt_ulp_irq_stop(bp); 16310 bnxt_clear_int_mode(bp); 16311 rc = bnxt_init_int_mode(bp); 16312 bnxt_ulp_irq_restart(bp, rc); 16313 16314 if (netif_running(bp->dev)) { 16315 if (rc) 16316 netif_close(bp->dev); 16317 else 16318 rc = bnxt_open_nic(bp, true, false); 16319 } 16320 16321 return rc; 16322 } 16323 16324 static int bnxt_init_mac_addr(struct bnxt *bp) 16325 { 16326 int rc = 0; 16327 16328 if (BNXT_PF(bp)) { 16329 eth_hw_addr_set(bp->dev, bp->pf.mac_addr); 16330 } else { 16331 #ifdef CONFIG_BNXT_SRIOV 16332 struct bnxt_vf_info *vf = &bp->vf; 16333 bool strict_approval = true; 16334 16335 if (is_valid_ether_addr(vf->mac_addr)) { 16336 /* overwrite netdev dev_addr with admin VF MAC */ 16337 eth_hw_addr_set(bp->dev, vf->mac_addr); 16338 /* Older PF driver or firmware may not approve this 16339 * correctly. 16340 */ 16341 strict_approval = false; 16342 } else { 16343 eth_hw_addr_random(bp->dev); 16344 } 16345 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); 16346 #endif 16347 } 16348 return rc; 16349 } 16350 16351 static void bnxt_vpd_read_info(struct bnxt *bp) 16352 { 16353 struct pci_dev *pdev = bp->pdev; 16354 unsigned int vpd_size, kw_len; 16355 int pos, size; 16356 u8 *vpd_data; 16357 16358 vpd_data = pci_vpd_alloc(pdev, &vpd_size); 16359 if (IS_ERR(vpd_data)) { 16360 pci_warn(pdev, "Unable to read VPD\n"); 16361 return; 16362 } 16363 16364 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 16365 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); 16366 if (pos < 0) 16367 goto read_sn; 16368 16369 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 16370 memcpy(bp->board_partno, &vpd_data[pos], size); 16371 16372 read_sn: 16373 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, 16374 PCI_VPD_RO_KEYWORD_SERIALNO, 16375 &kw_len); 16376 if (pos < 0) 16377 goto exit; 16378 16379 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); 16380 memcpy(bp->board_serialno, &vpd_data[pos], size); 16381 exit: 16382 kfree(vpd_data); 16383 } 16384 16385 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) 16386 { 16387 struct pci_dev *pdev = bp->pdev; 16388 u64 qword; 16389 16390 qword = pci_get_dsn(pdev); 16391 if (!qword) { 16392 netdev_info(bp->dev, "Unable to read adapter's DSN\n"); 16393 return -EOPNOTSUPP; 16394 } 16395 16396 put_unaligned_le64(qword, dsn); 16397 16398 bp->flags |= BNXT_FLAG_DSN_VALID; 16399 return 0; 16400 } 16401 16402 static int bnxt_map_db_bar(struct bnxt *bp) 16403 { 16404 if (!bp->db_size) 16405 return -ENODEV; 16406 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size); 16407 if (!bp->bar1) 16408 return -ENOMEM; 16409 return 0; 16410 } 16411 16412 void bnxt_print_device_info(struct bnxt *bp) 16413 { 16414 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", 16415 board_info[bp->board_idx].name, 16416 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); 16417 16418 pcie_print_link_status(bp->pdev); 16419 } 16420 16421 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 16422 { 16423 struct bnxt_hw_resc *hw_resc; 16424 struct net_device *dev; 16425 struct bnxt *bp; 16426 int rc, max_irqs; 16427 16428 if (pci_is_bridge(pdev)) 16429 return -ENODEV; 16430 16431 if (!pdev->msix_cap) { 16432 dev_err(&pdev->dev, "MSIX capability not found, aborting\n"); 16433 return -ENODEV; 16434 } 16435 16436 /* Clear any pending DMA transactions from crash kernel 16437 * while loading driver in capture kernel. 16438 */ 16439 if (is_kdump_kernel()) { 16440 pci_clear_master(pdev); 16441 pcie_flr(pdev); 16442 } 16443 16444 max_irqs = bnxt_get_max_irq(pdev); 16445 dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, 16446 max_irqs); 16447 if (!dev) 16448 return -ENOMEM; 16449 16450 bp = netdev_priv(dev); 16451 bp->board_idx = ent->driver_data; 16452 bp->msg_enable = BNXT_DEF_MSG_ENABLE; 16453 bnxt_set_max_func_irqs(bp, max_irqs); 16454 16455 if (bnxt_vf_pciid(bp->board_idx)) 16456 bp->flags |= BNXT_FLAG_VF; 16457 16458 /* No devlink port registration in case of a VF */ 16459 if (BNXT_PF(bp)) 16460 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); 16461 16462 rc = bnxt_init_board(pdev, dev); 16463 if (rc < 0) 16464 goto init_err_free; 16465 16466 dev->netdev_ops = &bnxt_netdev_ops; 16467 dev->stat_ops = &bnxt_stat_ops; 16468 dev->watchdog_timeo = BNXT_TX_TIMEOUT; 16469 dev->ethtool_ops = &bnxt_ethtool_ops; 16470 pci_set_drvdata(pdev, dev); 16471 16472 rc = bnxt_alloc_hwrm_resources(bp); 16473 if (rc) 16474 goto init_err_pci_clean; 16475 16476 mutex_init(&bp->hwrm_cmd_lock); 16477 mutex_init(&bp->link_lock); 16478 16479 rc = bnxt_fw_init_one_p1(bp); 16480 if (rc) 16481 goto init_err_pci_clean; 16482 16483 if (BNXT_PF(bp)) 16484 bnxt_vpd_read_info(bp); 16485 16486 if (BNXT_CHIP_P5_PLUS(bp)) { 16487 bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; 16488 if (BNXT_CHIP_P7(bp)) 16489 bp->flags |= BNXT_FLAG_CHIP_P7; 16490 } 16491 16492 rc = bnxt_alloc_rss_indir_tbl(bp); 16493 if (rc) 16494 goto init_err_pci_clean; 16495 16496 rc = bnxt_fw_init_one_p2(bp); 16497 if (rc) 16498 goto init_err_pci_clean; 16499 16500 rc = bnxt_map_db_bar(bp); 16501 if (rc) { 16502 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", 16503 rc); 16504 goto init_err_pci_clean; 16505 } 16506 16507 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 16508 NETIF_F_TSO | NETIF_F_TSO6 | 16509 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 16510 NETIF_F_GSO_IPXIP4 | 16511 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 16512 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | 16513 NETIF_F_RXCSUM | NETIF_F_GRO; 16514 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 16515 dev->hw_features |= NETIF_F_GSO_UDP_L4; 16516 16517 if (BNXT_SUPPORTS_TPA(bp)) 16518 dev->hw_features |= NETIF_F_LRO; 16519 16520 dev->hw_enc_features = 16521 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | 16522 NETIF_F_TSO | NETIF_F_TSO6 | 16523 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | 16524 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | 16525 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; 16526 if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) 16527 dev->hw_enc_features |= NETIF_F_GSO_UDP_L4; 16528 if (bp->flags & BNXT_FLAG_CHIP_P7) 16529 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7; 16530 else 16531 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; 16532 16533 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | 16534 NETIF_F_GSO_GRE_CSUM; 16535 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; 16536 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) 16537 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; 16538 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) 16539 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; 16540 if (BNXT_SUPPORTS_TPA(bp)) 16541 dev->hw_features |= NETIF_F_GRO_HW; 16542 dev->features |= dev->hw_features | NETIF_F_HIGHDMA; 16543 if (dev->features & NETIF_F_GRO_HW) 16544 dev->features &= ~NETIF_F_LRO; 16545 dev->priv_flags |= IFF_UNICAST_FLT; 16546 16547 netif_set_tso_max_size(dev, GSO_MAX_SIZE); 16548 if (bp->tso_max_segs) 16549 netif_set_tso_max_segs(dev, bp->tso_max_segs); 16550 16551 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 16552 NETDEV_XDP_ACT_RX_SG; 16553 16554 #ifdef CONFIG_BNXT_SRIOV 16555 init_waitqueue_head(&bp->sriov_cfg_wait); 16556 #endif 16557 if (BNXT_SUPPORTS_TPA(bp)) { 16558 bp->gro_func = bnxt_gro_func_5730x; 16559 if (BNXT_CHIP_P4(bp)) 16560 bp->gro_func = bnxt_gro_func_5731x; 16561 else if (BNXT_CHIP_P5_PLUS(bp)) 16562 bp->gro_func = bnxt_gro_func_5750x; 16563 } 16564 if (!BNXT_CHIP_P4_PLUS(bp)) 16565 bp->flags |= BNXT_FLAG_DOUBLE_DB; 16566 16567 rc = bnxt_init_mac_addr(bp); 16568 if (rc) { 16569 dev_err(&pdev->dev, "Unable to initialize mac address.\n"); 16570 rc = -EADDRNOTAVAIL; 16571 goto init_err_pci_clean; 16572 } 16573 16574 if (BNXT_PF(bp)) { 16575 /* Read the adapter's DSN to use as the eswitch switch_id */ 16576 rc = bnxt_pcie_dsn_get(bp, bp->dsn); 16577 } 16578 16579 /* MTU range: 60 - FW defined max */ 16580 dev->min_mtu = ETH_ZLEN; 16581 dev->max_mtu = bp->max_mtu; 16582 16583 rc = bnxt_probe_phy(bp, true); 16584 if (rc) 16585 goto init_err_pci_clean; 16586 16587 hw_resc = &bp->hw_resc; 16588 bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + 16589 BNXT_L2_FLTR_MAX_FLTR; 16590 /* Older firmware may not report these filters properly */ 16591 if (bp->max_fltr < BNXT_MAX_FLTR) 16592 bp->max_fltr = BNXT_MAX_FLTR; 16593 bnxt_init_l2_fltr_tbl(bp); 16594 __bnxt_set_rx_skb_mode(bp, false); 16595 bnxt_set_tpa_flags(bp); 16596 bnxt_init_ring_params(bp); 16597 bnxt_set_ring_params(bp); 16598 bnxt_rdma_aux_device_init(bp); 16599 rc = bnxt_set_dflt_rings(bp, true); 16600 if (rc) { 16601 if (BNXT_VF(bp) && rc == -ENODEV) { 16602 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); 16603 } else { 16604 netdev_err(bp->dev, "Not enough rings available.\n"); 16605 rc = -ENOMEM; 16606 } 16607 goto init_err_pci_clean; 16608 } 16609 16610 bnxt_fw_init_one_p3(bp); 16611 16612 bnxt_init_dflt_coal(bp); 16613 16614 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) 16615 bp->flags |= BNXT_FLAG_STRIP_VLAN; 16616 16617 rc = bnxt_init_int_mode(bp); 16618 if (rc) 16619 goto init_err_pci_clean; 16620 16621 /* No TC has been set yet and rings may have been trimmed due to 16622 * limited MSIX, so we re-initialize the TX rings per TC. 16623 */ 16624 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 16625 16626 if (BNXT_PF(bp)) { 16627 if (!bnxt_pf_wq) { 16628 bnxt_pf_wq = 16629 create_singlethread_workqueue("bnxt_pf_wq"); 16630 if (!bnxt_pf_wq) { 16631 dev_err(&pdev->dev, "Unable to create workqueue.\n"); 16632 rc = -ENOMEM; 16633 goto init_err_pci_clean; 16634 } 16635 } 16636 rc = bnxt_init_tc(bp); 16637 if (rc) 16638 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", 16639 rc); 16640 } 16641 16642 bnxt_inv_fw_health_reg(bp); 16643 rc = bnxt_dl_register(bp); 16644 if (rc) 16645 goto init_err_dl; 16646 16647 INIT_LIST_HEAD(&bp->usr_fltr_list); 16648 16649 if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) 16650 bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; 16651 if (BNXT_SUPPORTS_QUEUE_API(bp)) 16652 dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; 16653 dev->request_ops_lock = true; 16654 16655 rc = register_netdev(dev); 16656 if (rc) 16657 goto init_err_cleanup; 16658 16659 bnxt_dl_fw_reporters_create(bp); 16660 16661 bnxt_rdma_aux_device_add(bp); 16662 16663 bnxt_print_device_info(bp); 16664 16665 pci_save_state(pdev); 16666 16667 return 0; 16668 init_err_cleanup: 16669 bnxt_rdma_aux_device_uninit(bp); 16670 bnxt_dl_unregister(bp); 16671 init_err_dl: 16672 bnxt_shutdown_tc(bp); 16673 bnxt_clear_int_mode(bp); 16674 16675 init_err_pci_clean: 16676 bnxt_hwrm_func_drv_unrgtr(bp); 16677 bnxt_free_hwrm_resources(bp); 16678 bnxt_hwmon_uninit(bp); 16679 bnxt_ethtool_free(bp); 16680 bnxt_ptp_clear(bp); 16681 kfree(bp->ptp_cfg); 16682 bp->ptp_cfg = NULL; 16683 kfree(bp->fw_health); 16684 bp->fw_health = NULL; 16685 bnxt_cleanup_pci(bp); 16686 bnxt_free_ctx_mem(bp, true); 16687 bnxt_free_crash_dump_mem(bp); 16688 kfree(bp->rss_indir_tbl); 16689 bp->rss_indir_tbl = NULL; 16690 16691 init_err_free: 16692 free_netdev(dev); 16693 return rc; 16694 } 16695 16696 static void bnxt_shutdown(struct pci_dev *pdev) 16697 { 16698 struct net_device *dev = pci_get_drvdata(pdev); 16699 struct bnxt *bp; 16700 16701 if (!dev) 16702 return; 16703 16704 rtnl_lock(); 16705 netdev_lock(dev); 16706 bp = netdev_priv(dev); 16707 if (!bp) 16708 goto shutdown_exit; 16709 16710 if (netif_running(dev)) 16711 netif_close(dev); 16712 16713 bnxt_ptp_clear(bp); 16714 bnxt_clear_int_mode(bp); 16715 pci_disable_device(pdev); 16716 16717 if (system_state == SYSTEM_POWER_OFF) { 16718 pci_wake_from_d3(pdev, bp->wol); 16719 pci_set_power_state(pdev, PCI_D3hot); 16720 } 16721 16722 shutdown_exit: 16723 netdev_unlock(dev); 16724 rtnl_unlock(); 16725 } 16726 16727 #ifdef CONFIG_PM_SLEEP 16728 static int bnxt_suspend(struct device *device) 16729 { 16730 struct net_device *dev = dev_get_drvdata(device); 16731 struct bnxt *bp = netdev_priv(dev); 16732 int rc = 0; 16733 16734 bnxt_ulp_stop(bp); 16735 16736 netdev_lock(dev); 16737 if (netif_running(dev)) { 16738 netif_device_detach(dev); 16739 rc = bnxt_close(dev); 16740 } 16741 bnxt_hwrm_func_drv_unrgtr(bp); 16742 bnxt_ptp_clear(bp); 16743 pci_disable_device(bp->pdev); 16744 bnxt_free_ctx_mem(bp, false); 16745 netdev_unlock(dev); 16746 return rc; 16747 } 16748 16749 static int bnxt_resume(struct device *device) 16750 { 16751 struct net_device *dev = dev_get_drvdata(device); 16752 struct bnxt *bp = netdev_priv(dev); 16753 int rc = 0; 16754 16755 netdev_lock(dev); 16756 rc = pci_enable_device(bp->pdev); 16757 if (rc) { 16758 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", 16759 rc); 16760 goto resume_exit; 16761 } 16762 pci_set_master(bp->pdev); 16763 if (bnxt_hwrm_ver_get(bp)) { 16764 rc = -ENODEV; 16765 goto resume_exit; 16766 } 16767 rc = bnxt_hwrm_func_reset(bp); 16768 if (rc) { 16769 rc = -EBUSY; 16770 goto resume_exit; 16771 } 16772 16773 rc = bnxt_hwrm_func_qcaps(bp); 16774 if (rc) 16775 goto resume_exit; 16776 16777 bnxt_clear_reservations(bp, true); 16778 16779 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { 16780 rc = -ENODEV; 16781 goto resume_exit; 16782 } 16783 if (bp->fw_crash_mem) 16784 bnxt_hwrm_crash_dump_mem_cfg(bp); 16785 16786 if (bnxt_ptp_init(bp)) { 16787 kfree(bp->ptp_cfg); 16788 bp->ptp_cfg = NULL; 16789 } 16790 bnxt_get_wol_settings(bp); 16791 if (netif_running(dev)) { 16792 rc = bnxt_open(dev); 16793 if (!rc) 16794 netif_device_attach(dev); 16795 } 16796 16797 resume_exit: 16798 netdev_unlock(bp->dev); 16799 bnxt_ulp_start(bp, rc); 16800 if (!rc) 16801 bnxt_reenable_sriov(bp); 16802 return rc; 16803 } 16804 16805 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); 16806 #define BNXT_PM_OPS (&bnxt_pm_ops) 16807 16808 #else 16809 16810 #define BNXT_PM_OPS NULL 16811 16812 #endif /* CONFIG_PM_SLEEP */ 16813 16814 /** 16815 * bnxt_io_error_detected - called when PCI error is detected 16816 * @pdev: Pointer to PCI device 16817 * @state: The current pci connection state 16818 * 16819 * This function is called after a PCI bus error affecting 16820 * this device has been detected. 16821 */ 16822 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, 16823 pci_channel_state_t state) 16824 { 16825 struct net_device *netdev = pci_get_drvdata(pdev); 16826 struct bnxt *bp = netdev_priv(netdev); 16827 bool abort = false; 16828 16829 netdev_info(netdev, "PCI I/O error detected\n"); 16830 16831 bnxt_ulp_stop(bp); 16832 16833 netdev_lock(netdev); 16834 netif_device_detach(netdev); 16835 16836 if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 16837 netdev_err(bp->dev, "Firmware reset already in progress\n"); 16838 abort = true; 16839 } 16840 16841 if (abort || state == pci_channel_io_perm_failure) { 16842 netdev_unlock(netdev); 16843 return PCI_ERS_RESULT_DISCONNECT; 16844 } 16845 16846 /* Link is not reliable anymore if state is pci_channel_io_frozen 16847 * so we disable bus master to prevent any potential bad DMAs before 16848 * freeing kernel memory. 16849 */ 16850 if (state == pci_channel_io_frozen) { 16851 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); 16852 bnxt_fw_fatal_close(bp); 16853 } 16854 16855 if (netif_running(netdev)) 16856 __bnxt_close_nic(bp, true, true); 16857 16858 if (pci_is_enabled(pdev)) 16859 pci_disable_device(pdev); 16860 bnxt_free_ctx_mem(bp, false); 16861 netdev_unlock(netdev); 16862 16863 /* Request a slot slot reset. */ 16864 return PCI_ERS_RESULT_NEED_RESET; 16865 } 16866 16867 /** 16868 * bnxt_io_slot_reset - called after the pci bus has been reset. 16869 * @pdev: Pointer to PCI device 16870 * 16871 * Restart the card from scratch, as if from a cold-boot. 16872 * At this point, the card has experienced a hard reset, 16873 * followed by fixups by BIOS, and has its config space 16874 * set up identically to what it was at cold boot. 16875 */ 16876 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) 16877 { 16878 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; 16879 struct net_device *netdev = pci_get_drvdata(pdev); 16880 struct bnxt *bp = netdev_priv(netdev); 16881 int retry = 0; 16882 int err = 0; 16883 int off; 16884 16885 netdev_info(bp->dev, "PCI Slot Reset\n"); 16886 16887 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 16888 test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) 16889 msleep(900); 16890 16891 netdev_lock(netdev); 16892 16893 if (pci_enable_device(pdev)) { 16894 dev_err(&pdev->dev, 16895 "Cannot re-enable PCI device after reset.\n"); 16896 } else { 16897 pci_set_master(pdev); 16898 /* Upon fatal error, our device internal logic that latches to 16899 * BAR value is getting reset and will restore only upon 16900 * rewriting the BARs. 16901 * 16902 * As pci_restore_state() does not re-write the BARs if the 16903 * value is same as saved value earlier, driver needs to 16904 * write the BARs to 0 to force restore, in case of fatal error. 16905 */ 16906 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, 16907 &bp->state)) { 16908 for (off = PCI_BASE_ADDRESS_0; 16909 off <= PCI_BASE_ADDRESS_5; off += 4) 16910 pci_write_config_dword(bp->pdev, off, 0); 16911 } 16912 pci_restore_state(pdev); 16913 pci_save_state(pdev); 16914 16915 bnxt_inv_fw_health_reg(bp); 16916 bnxt_try_map_fw_health_reg(bp); 16917 16918 /* In some PCIe AER scenarios, firmware may take up to 16919 * 10 seconds to become ready in the worst case. 16920 */ 16921 do { 16922 err = bnxt_try_recover_fw(bp); 16923 if (!err) 16924 break; 16925 retry++; 16926 } while (retry < BNXT_FW_SLOT_RESET_RETRY); 16927 16928 if (err) { 16929 dev_err(&pdev->dev, "Firmware not ready\n"); 16930 goto reset_exit; 16931 } 16932 16933 err = bnxt_hwrm_func_reset(bp); 16934 if (!err) 16935 result = PCI_ERS_RESULT_RECOVERED; 16936 16937 /* IRQ will be initialized later in bnxt_io_resume */ 16938 bnxt_ulp_irq_stop(bp); 16939 bnxt_clear_int_mode(bp); 16940 } 16941 16942 reset_exit: 16943 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); 16944 bnxt_clear_reservations(bp, true); 16945 netdev_unlock(netdev); 16946 16947 return result; 16948 } 16949 16950 /** 16951 * bnxt_io_resume - called when traffic can start flowing again. 16952 * @pdev: Pointer to PCI device 16953 * 16954 * This callback is called when the error recovery driver tells 16955 * us that its OK to resume normal operation. 16956 */ 16957 static void bnxt_io_resume(struct pci_dev *pdev) 16958 { 16959 struct net_device *netdev = pci_get_drvdata(pdev); 16960 struct bnxt *bp = netdev_priv(netdev); 16961 int err; 16962 16963 netdev_info(bp->dev, "PCI Slot Resume\n"); 16964 netdev_lock(netdev); 16965 16966 err = bnxt_hwrm_func_qcaps(bp); 16967 if (!err) { 16968 if (netif_running(netdev)) { 16969 err = bnxt_open(netdev); 16970 } else { 16971 err = bnxt_reserve_rings(bp, true); 16972 if (!err) 16973 err = bnxt_init_int_mode(bp); 16974 } 16975 } 16976 16977 if (!err) 16978 netif_device_attach(netdev); 16979 16980 netdev_unlock(netdev); 16981 bnxt_ulp_start(bp, err); 16982 if (!err) 16983 bnxt_reenable_sriov(bp); 16984 } 16985 16986 static const struct pci_error_handlers bnxt_err_handler = { 16987 .error_detected = bnxt_io_error_detected, 16988 .slot_reset = bnxt_io_slot_reset, 16989 .resume = bnxt_io_resume 16990 }; 16991 16992 static struct pci_driver bnxt_pci_driver = { 16993 .name = DRV_MODULE_NAME, 16994 .id_table = bnxt_pci_tbl, 16995 .probe = bnxt_init_one, 16996 .remove = bnxt_remove_one, 16997 .shutdown = bnxt_shutdown, 16998 .driver.pm = BNXT_PM_OPS, 16999 .err_handler = &bnxt_err_handler, 17000 #if defined(CONFIG_BNXT_SRIOV) 17001 .sriov_configure = bnxt_sriov_configure, 17002 #endif 17003 }; 17004 17005 static int __init bnxt_init(void) 17006 { 17007 int err; 17008 17009 bnxt_debug_init(); 17010 err = pci_register_driver(&bnxt_pci_driver); 17011 if (err) { 17012 bnxt_debug_exit(); 17013 return err; 17014 } 17015 17016 return 0; 17017 } 17018 17019 static void __exit bnxt_exit(void) 17020 { 17021 pci_unregister_driver(&bnxt_pci_driver); 17022 if (bnxt_pf_wq) 17023 destroy_workqueue(bnxt_pf_wq); 17024 bnxt_debug_exit(); 17025 } 17026 17027 module_init(bnxt_init); 17028 module_exit(bnxt_exit); 17029